diff --git a/lib/trino-hdfs/src/test/java/io/trino/hdfs/TestFSDataInputStreamTail.java b/lib/trino-hdfs/src/test/java/io/trino/hdfs/TestFSDataInputStreamTail.java index 2a509e778f8d..d1bb545d5058 100644 --- a/lib/trino-hdfs/src/test/java/io/trino/hdfs/TestFSDataInputStreamTail.java +++ b/lib/trino-hdfs/src/test/java/io/trino/hdfs/TestFSDataInputStreamTail.java @@ -24,7 +24,6 @@ import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeMethod; -import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.io.File; @@ -85,8 +84,22 @@ public void testEmptyFileReadTail() } } - @Test(dataProvider = "validFileSizeAndPaddedFileSize") - public void testReadTailForFileSize(int fileSize, int paddedFileSize) + @Test + public void testReadTailForFileSize() + throws Exception + { + testReadTailForFileSize(0, 0); + testReadTailForFileSize(0, 1); + testReadTailForFileSize(0, 15); + testReadTailForFileSize(0, FSDataInputStreamTail.MAX_SUPPORTED_PADDING_BYTES - 1); + testReadTailForFileSize(0, FSDataInputStreamTail.MAX_SUPPORTED_PADDING_BYTES); + testReadTailForFileSize(63, 63); + testReadTailForFileSize(63, 64); + testReadTailForFileSize(64, 74); + testReadTailForFileSize(65, 65 + FSDataInputStreamTail.MAX_SUPPORTED_PADDING_BYTES); + } + + private void testReadTailForFileSize(int fileSize, int paddedFileSize) throws Exception { // Cleanup between each input run @@ -105,10 +118,26 @@ public void testReadTailForFileSize(int fileSize, int paddedFileSize) } } - @Test(dataProvider = "validFileSizeAndPaddedFileSize") - public void testReadTailCompletely(int fileSize, int paddedFileSize) + @Test + public void testReadTailCompletely() + throws Exception + { + testReadTailCompletely(0, 0); + testReadTailCompletely(0, 1); + testReadTailCompletely(0, 15); + testReadTailCompletely(0, FSDataInputStreamTail.MAX_SUPPORTED_PADDING_BYTES - 1); + testReadTailCompletely(0, FSDataInputStreamTail.MAX_SUPPORTED_PADDING_BYTES); + testReadTailCompletely(63, 63); + testReadTailCompletely(63, 64); + testReadTailCompletely(64, 74); + testReadTailCompletely(65, 65 + FSDataInputStreamTail.MAX_SUPPORTED_PADDING_BYTES); + } + + private void testReadTailCompletely(int fileSize, int paddedFileSize) throws Exception { + fs.truncate(tempFile, 0); + byte[] contents = countingTestFileContentsWithLength(fileSize); if (contents.length > 0) { try (FSDataOutputStream os = fs.append(tempFile)) { @@ -123,7 +152,8 @@ public void testReadTailCompletely(int fileSize, int paddedFileSize) assertEquals(tail.getFileSize(), fileSize); Slice tailSlice = tail.getTailSlice(); assertEquals(tailSlice.length(), fileSize); - assertCountingTestFileContents(tailSlice.getBytes()); + byte[] tailContents = tailSlice.getBytes(); + assertEquals(tailContents, countingTestFileContentsWithLength(tailContents.length)); } } @@ -184,26 +214,6 @@ public void testReadTailForFileSizeNoEndOfFileFound() } } - @DataProvider(name = "validFileSizeAndPaddedFileSize") - public static Object[][] validFileSizeAndPaddedFileSize() - { - return new Object[][] { - {0, 0}, - {0, 1}, - {0, 15}, - {0, FSDataInputStreamTail.MAX_SUPPORTED_PADDING_BYTES - 1}, - {0, FSDataInputStreamTail.MAX_SUPPORTED_PADDING_BYTES}, - {63, 63}, - {63, 64}, - {64, 74}, - {65, 65 + FSDataInputStreamTail.MAX_SUPPORTED_PADDING_BYTES}}; - } - - private static void assertCountingTestFileContents(byte[] contents) - { - assertEquals(contents, countingTestFileContentsWithLength(contents.length)); - } - private static byte[] countingTestFileContentsWithLength(int length) { byte[] contents = new byte[length]; diff --git a/lib/trino-hdfs/src/test/java/io/trino/hdfs/rubix/TestRubixCaching.java b/lib/trino-hdfs/src/test/java/io/trino/hdfs/rubix/TestRubixCaching.java index b6aff0df6f9a..870acf1adf5c 100644 --- a/lib/trino-hdfs/src/test/java/io/trino/hdfs/rubix/TestRubixCaching.java +++ b/lib/trino-hdfs/src/test/java/io/trino/hdfs/rubix/TestRubixCaching.java @@ -53,7 +53,6 @@ import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeMethod; -import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import javax.management.MBeanServer; @@ -276,12 +275,6 @@ private static void closeFileSystem(FileSystem fileSystem) fileSystem.close(); } - @DataProvider - public static Object[][] readMode() - { - return new Object[][] {{ASYNC}, {READ_THROUGH}}; - } - @Test public void testCoordinatorNotJoining() { @@ -340,135 +333,153 @@ public void testGetBlockLocations() assertEquals(file2Locations[0].getHosts()[0], "127.0.0.2"); } - @Test(dataProvider = "readMode") - public void testCacheRead(ReadMode readMode) + @Test + public void testCacheRead() throws Exception { - RubixConfig rubixConfig = new RubixConfig().setReadMode(readMode); - initializeCachingFileSystem(rubixConfig); - byte[] randomData = new byte[toIntExact(SMALL_FILE_SIZE.toBytes())]; - new Random().nextBytes(randomData); + for (ReadMode readMode : ReadMode.values()) { + deinitializeRubix(); + + RubixConfig rubixConfig = new RubixConfig().setReadMode(readMode); + initializeCachingFileSystem(rubixConfig); + byte[] randomData = new byte[toIntExact(SMALL_FILE_SIZE.toBytes())]; + new Random().nextBytes(randomData); - Path file = getStoragePath("some_file"); - writeFile(nonCachingFileSystem.create(file), randomData); + Path file = getStoragePath("some_file"); + writeFile(nonCachingFileSystem.create(file), randomData); - long beforeRemoteReadsCount = getRemoteReadsCount(); - long beforeCachedReadsCount = getCachedReadsCount(); - long beforeAsyncDownloadedMb = getAsyncDownloadedMb(readMode); + long beforeRemoteReadsCount = getRemoteReadsCount(); + long beforeCachedReadsCount = getCachedReadsCount(); + long beforeAsyncDownloadedMb = getAsyncDownloadedMb(readMode); - assertFileContents(cachingFileSystem, file, randomData); + assertFileContents(cachingFileSystem, file, randomData); - if (readMode == ASYNC) { - // wait for async Rubix requests to complete + if (readMode == ASYNC) { + // wait for async Rubix requests to complete + assertEventually( + new Duration(10, SECONDS), + () -> assertEquals(getAsyncDownloadedMb(ASYNC), beforeAsyncDownloadedMb + 1)); + } + + // stats are propagated asynchronously assertEventually( new Duration(10, SECONDS), - () -> assertEquals(getAsyncDownloadedMb(ASYNC), beforeAsyncDownloadedMb + 1)); - } + () -> { + // data should be read from remote source only + assertGreaterThan(getRemoteReadsCount(), beforeRemoteReadsCount); + assertEquals(getCachedReadsCount(), beforeCachedReadsCount); + }); - // stats are propagated asynchronously - assertEventually( - new Duration(10, SECONDS), - () -> { - // data should be read from remote source only - assertGreaterThan(getRemoteReadsCount(), beforeRemoteReadsCount); - assertEquals(getCachedReadsCount(), beforeCachedReadsCount); - }); - - // ensure that subsequent read uses cache exclusively - assertEventually( - new Duration(10, SECONDS), - () -> { - long remoteReadsCount = getRemoteReadsCount(); - assertFileContents(cachingFileSystem, file, randomData); - assertGreaterThan(getCachedReadsCount(), beforeCachedReadsCount); - assertEquals(getRemoteReadsCount(), remoteReadsCount); - }); + // ensure that subsequent read uses cache exclusively + assertEventually( + new Duration(10, SECONDS), + () -> { + long remoteReadsCount = getRemoteReadsCount(); + assertFileContents(cachingFileSystem, file, randomData); + assertGreaterThan(getCachedReadsCount(), beforeCachedReadsCount); + assertEquals(getRemoteReadsCount(), remoteReadsCount); + }); + + closeRubix(); + } } - @Test(dataProvider = "readMode") - public void testCacheWrite(ReadMode readMode) + @Test + public void testCacheWrite() throws Exception { - initializeCachingFileSystem(new RubixConfig().setReadMode(readMode)); - Path file = getStoragePath("some_file_write"); + for (ReadMode readMode : ReadMode.values()) { + deinitializeRubix(); + + initializeCachingFileSystem(new RubixConfig().setReadMode(readMode)); + Path file = getStoragePath("some_file_write"); - byte[] data = "Hello world".getBytes(UTF_8); - writeFile(cachingFileSystem.create(file), data); - assertFileContents(cachingFileSystem, file, data); + byte[] data = "Hello world".getBytes(UTF_8); + writeFile(cachingFileSystem.create(file), data); + assertFileContents(cachingFileSystem, file, data); + + closeRubix(); + } } - @Test(dataProvider = "readMode") - public void testLargeFile(ReadMode readMode) + @Test + public void testLargeFile() throws Exception { - initializeCachingFileSystem(new RubixConfig().setReadMode(readMode)); - byte[] randomData = new byte[toIntExact(LARGE_FILE_SIZE.toBytes())]; - new Random().nextBytes(randomData); + for (ReadMode readMode : ReadMode.values()) { + deinitializeRubix(); + + initializeCachingFileSystem(new RubixConfig().setReadMode(readMode)); + byte[] randomData = new byte[toIntExact(LARGE_FILE_SIZE.toBytes())]; + new Random().nextBytes(randomData); - Path file = getStoragePath("large_file"); - writeFile(nonCachingFileSystem.create(file), randomData); + Path file = getStoragePath("large_file"); + writeFile(nonCachingFileSystem.create(file), randomData); - long beforeRemoteReadsCount = getRemoteReadsCount(); - long beforeCachedReadsCount = getCachedReadsCount(); - long beforeAsyncDownloadedMb = getAsyncDownloadedMb(readMode); + long beforeRemoteReadsCount = getRemoteReadsCount(); + long beforeCachedReadsCount = getCachedReadsCount(); + long beforeAsyncDownloadedMb = getAsyncDownloadedMb(readMode); - assertFileContents(cachingFileSystem, file, randomData); + assertFileContents(cachingFileSystem, file, randomData); - if (readMode == ASYNC) { - // wait for async Rubix requests to complete + if (readMode == ASYNC) { + // wait for async Rubix requests to complete + assertEventually( + new Duration(10, SECONDS), + () -> assertEquals(getAsyncDownloadedMb(ASYNC), beforeAsyncDownloadedMb + 100)); + } + + // stats are propagated asynchronously assertEventually( new Duration(10, SECONDS), - () -> assertEquals(getAsyncDownloadedMb(ASYNC), beforeAsyncDownloadedMb + 100)); - } + () -> { + // data should be fetched from remote source + assertGreaterThan(getRemoteReadsCount(), beforeRemoteReadsCount); + }); - // stats are propagated asynchronously - assertEventually( - new Duration(10, SECONDS), - () -> { - // data should be fetched from remote source - assertGreaterThan(getRemoteReadsCount(), beforeRemoteReadsCount); - }); - - // ensure that subsequent read uses cache exclusively - assertEventually( - new Duration(10, SECONDS), - () -> { - long remoteReadsCount = getRemoteReadsCount(); - assertFileContents(cachingFileSystem, file, randomData); - assertGreaterThan(getCachedReadsCount(), beforeCachedReadsCount); - assertEquals(getRemoteReadsCount(), remoteReadsCount); - }); - long secondCachedReadsCount = getCachedReadsCount(); - long secondRemoteReadsCount = getRemoteReadsCount(); - - // make sure parallel reading of large file works - ExecutorService executorService = newFixedThreadPool(3); - try { - List> reads = nCopies( - 3, + // ensure that subsequent read uses cache exclusively + assertEventually( + new Duration(10, SECONDS), () -> { + long remoteReadsCount = getRemoteReadsCount(); assertFileContents(cachingFileSystem, file, randomData); - return null; + assertGreaterThan(getCachedReadsCount(), beforeCachedReadsCount); + assertEquals(getRemoteReadsCount(), remoteReadsCount); }); - List> futures = reads.stream() - .map(executorService::submit) - .collect(toImmutableList()); - for (Future future : futures) { - future.get(); + long secondCachedReadsCount = getCachedReadsCount(); + long secondRemoteReadsCount = getRemoteReadsCount(); + + // make sure parallel reading of large file works + ExecutorService executorService = newFixedThreadPool(3); + try { + List> reads = nCopies( + 3, + () -> { + assertFileContents(cachingFileSystem, file, randomData); + return null; + }); + List> futures = reads.stream() + .map(executorService::submit) + .collect(toImmutableList()); + for (Future future : futures) { + future.get(); + } } - } - finally { - executorService.shutdownNow(); - } + finally { + executorService.shutdownNow(); + } + + // stats are propagated asynchronously + assertEventually( + new Duration(10, SECONDS), + () -> { + // data should be read from cache only + assertGreaterThan(getCachedReadsCount(), secondCachedReadsCount); + assertEquals(getRemoteReadsCount(), secondRemoteReadsCount); + }); - // stats are propagated asynchronously - assertEventually( - new Duration(10, SECONDS), - () -> { - // data should be read from cache only - assertGreaterThan(getCachedReadsCount(), secondCachedReadsCount); - assertEquals(getRemoteReadsCount(), secondRemoteReadsCount); - }); + closeRubix(); + } } @SuppressModernizer diff --git a/plugin/trino-accumulo/src/test/java/io/trino/plugin/accumulo/TestAccumuloConnectorTest.java b/plugin/trino-accumulo/src/test/java/io/trino/plugin/accumulo/TestAccumuloConnectorTest.java index dd5b96d8c4d1..1f89baca5457 100644 --- a/plugin/trino-accumulo/src/test/java/io/trino/plugin/accumulo/TestAccumuloConnectorTest.java +++ b/plugin/trino-accumulo/src/test/java/io/trino/plugin/accumulo/TestAccumuloConnectorTest.java @@ -21,7 +21,6 @@ import io.trino.testing.sql.TestTable; import org.intellij.lang.annotations.Language; import org.junit.jupiter.api.Test; -import org.testng.SkipException; import java.util.Optional; @@ -29,6 +28,7 @@ import static io.trino.spi.type.VarcharType.VARCHAR; import static io.trino.testing.MaterializedResult.resultBuilder; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; @@ -78,9 +78,10 @@ protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) @Override protected TestTable createTableWithDefaultColumns() { - throw new SkipException("Accumulo connector does not support column default values"); + return abort("Accumulo connector does not support column default values"); } + @Test @Override public void testCreateTableAsSelect() { @@ -115,6 +116,7 @@ public void testCreateTableAsSelect() "SELECT 0"); } + @Test @Override public void testInsert() { @@ -151,6 +153,7 @@ public void testInsert() assertUpdate("DROP TABLE test_insert"); } + @Test @Override // Overridden because we currently do not support arrays with null elements public void testInsertArray() { @@ -297,6 +300,7 @@ protected Optional filterCaseSensitiveDataMappingTestData( return Optional.of(dataMappingTestSetup); } + @Test @Override public void testCharVarcharComparison() { diff --git a/plugin/trino-base-jdbc/src/test/java/io/trino/plugin/jdbc/BaseJdbcConnectorTest.java b/plugin/trino-base-jdbc/src/test/java/io/trino/plugin/jdbc/BaseJdbcConnectorTest.java index 01ccd6290212..4c1f14eacdb0 100644 --- a/plugin/trino-base-jdbc/src/test/java/io/trino/plugin/jdbc/BaseJdbcConnectorTest.java +++ b/plugin/trino-base-jdbc/src/test/java/io/trino/plugin/jdbc/BaseJdbcConnectorTest.java @@ -42,9 +42,10 @@ import io.trino.testing.sql.TestTable; import io.trino.testing.sql.TestView; import org.intellij.lang.annotations.Language; -import org.testng.SkipException; -import org.testng.annotations.AfterClass; -import org.testng.annotations.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.Timeout; import java.util.ArrayList; import java.util.List; @@ -116,7 +117,10 @@ import static java.util.concurrent.TimeUnit.MINUTES; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; +@TestInstance(PER_CLASS) public abstract class BaseJdbcConnectorTest extends BaseConnectorTest { @@ -124,7 +128,7 @@ public abstract class BaseJdbcConnectorTest protected abstract SqlExecutor onRemoteDatabase(); - @AfterClass(alwaysRun = true) + @AfterAll public void afterClass() { executor.shutdownNow(); @@ -167,7 +171,7 @@ public void testInsertInPresenceOfNotSupportedColumn() protected TestTable createTableWithUnsupportedColumn() { // TODO throw new UnsupportedOperationException(); - throw new SkipException("Not implemented"); + return abort("Not implemented"); } // TODO move common tests from connector-specific classes here @@ -577,7 +581,7 @@ public void testNumericAggregationPushdown() public void testCountDistinctWithStringTypes() { if (!(hasBehavior(SUPPORTS_CREATE_TABLE) && hasBehavior(SUPPORTS_INSERT))) { - throw new SkipException("Unable to CREATE TABLE to test count distinct"); + abort("Unable to CREATE TABLE to test count distinct"); } List rows = Stream.of("a", "b", "A", "B", " a ", "a", "b", " b ", "ą") @@ -643,7 +647,7 @@ public void testStddevAggregationPushdown() String schemaName = getSession().getSchema().orElseThrow(); if (!hasBehavior(SUPPORTS_AGGREGATION_PUSHDOWN_STDDEV)) { if (!hasBehavior(SUPPORTS_CREATE_TABLE)) { - throw new SkipException("Unable to CREATE TABLE to test aggregation pushdown"); + abort("Unable to CREATE TABLE to test aggregation pushdown"); } try (TestTable testTable = createTableWithDoubleAndRealColumns(schemaName + ".test_stddev_pushdown", ImmutableList.of())) { @@ -694,7 +698,7 @@ public void testVarianceAggregationPushdown() String schemaName = getSession().getSchema().orElseThrow(); if (!hasBehavior(SUPPORTS_AGGREGATION_PUSHDOWN_VARIANCE)) { if (!hasBehavior(SUPPORTS_CREATE_TABLE)) { - throw new SkipException("Unable to CREATE TABLE to test aggregation pushdown"); + abort("Unable to CREATE TABLE to test aggregation pushdown"); } try (TestTable testTable = createTableWithDoubleAndRealColumns(schemaName + ".test_var_pushdown", ImmutableList.of())) { @@ -738,7 +742,7 @@ public void testCovarianceAggregationPushdown() String schemaName = getSession().getSchema().orElseThrow(); if (!hasBehavior(SUPPORTS_AGGREGATION_PUSHDOWN_COVARIANCE)) { if (!hasBehavior(SUPPORTS_CREATE_TABLE)) { - throw new SkipException("Unable to CREATE TABLE to test aggregation pushdown"); + abort("Unable to CREATE TABLE to test aggregation pushdown"); } try (TestTable testTable = createTableWithDoubleAndRealColumns(schemaName + ".test_covar_pushdown", ImmutableList.of())) { @@ -775,7 +779,7 @@ public void testCorrAggregationPushdown() String schemaName = getSession().getSchema().orElseThrow(); if (!hasBehavior(SUPPORTS_AGGREGATION_PUSHDOWN_CORRELATION)) { if (!hasBehavior(SUPPORTS_CREATE_TABLE)) { - throw new SkipException("Unable to CREATE TABLE to test aggregation pushdown"); + abort("Unable to CREATE TABLE to test aggregation pushdown"); } try (TestTable testTable = createTableWithDoubleAndRealColumns(schemaName + ".test_corr_pushdown", ImmutableList.of())) { @@ -808,7 +812,7 @@ public void testRegrAggregationPushdown() String schemaName = getSession().getSchema().orElseThrow(); if (!hasBehavior(SUPPORTS_AGGREGATION_PUSHDOWN_REGRESSION)) { if (!hasBehavior(SUPPORTS_CREATE_TABLE)) { - throw new SkipException("Unable to CREATE TABLE to test aggregation pushdown"); + abort("Unable to CREATE TABLE to test aggregation pushdown"); } try (TestTable testTable = createTableWithDoubleAndRealColumns(schemaName + ".test_regr_pushdown", ImmutableList.of())) { @@ -1455,12 +1459,13 @@ protected Session joinPushdownEnabled(Session session) .build(); } - @Test(timeOut = 60_000) + @Test + @Timeout(60) public void testCancellation() throws Exception { if (!hasBehavior(SUPPORTS_CANCELLATION)) { - throw new SkipException("Cancellation is not supported by given connector"); + abort("Cancellation is not supported by given connector"); } try (TestView sleepingView = createSleepingView(new Duration(1, MINUTES))) { @@ -1512,6 +1517,7 @@ protected TestView createSleepingView(Duration minimalSleepDuration) throw new UnsupportedOperationException(); } + @Test @Override public void testUpdateNotNullColumn() { @@ -1538,6 +1544,7 @@ public void testUpdateNotNullColumn() } } + @Test @Override public void testUpdateRowType() { @@ -1555,6 +1562,7 @@ public void testUpdateRowType() } } + @Test @Override public void testUpdateRowConcurrently() throws Exception @@ -1571,6 +1579,7 @@ public void testUpdateRowConcurrently() } } + @Test @Override public void testUpdateAllValues() { @@ -1586,6 +1595,7 @@ public void testUpdateAllValues() } } + @Test @Override public void testUpdateWithPredicates() { @@ -1744,6 +1754,7 @@ public void testDeleteWithVarcharGreaterAndLowerPredicate() } } + @Test @Override public void testDeleteWithComplexPredicate() { @@ -1756,6 +1767,7 @@ public void testDeleteWithComplexPredicate() .hasStackTraceContaining("TrinoException: " + MODIFYING_ROWS_MESSAGE); } + @Test @Override public void testDeleteWithSubquery() { @@ -1768,6 +1780,7 @@ public void testDeleteWithSubquery() .hasStackTraceContaining("TrinoException: " + MODIFYING_ROWS_MESSAGE); } + @Test @Override public void testExplainAnalyzeWithDeleteWithSubquery() { @@ -1780,6 +1793,7 @@ public void testExplainAnalyzeWithDeleteWithSubquery() .hasStackTraceContaining("TrinoException: " + MODIFYING_ROWS_MESSAGE); } + @Test @Override public void testDeleteWithSemiJoin() { @@ -1792,17 +1806,18 @@ public void testDeleteWithSemiJoin() .hasStackTraceContaining("TrinoException: " + MODIFYING_ROWS_MESSAGE); } + @Test @Override public void testDeleteWithVarcharPredicate() { - throw new SkipException("This is implemented by testDeleteWithVarcharEqualityPredicate"); + abort("This is implemented by testDeleteWithVarcharEqualityPredicate"); } @Test public void testInsertWithoutTemporaryTable() { if (!hasBehavior(SUPPORTS_CREATE_TABLE)) { - throw new SkipException("CREATE TABLE is required for testing non-transactional write support"); + abort("CREATE TABLE is required for testing non-transactional write support"); } Session session = Session.builder(getSession()) .setCatalogSessionProperty(getSession().getCatalog().orElseThrow(), "non_transactional_insert", "false") @@ -1832,7 +1847,7 @@ public void testWriteBatchSizeSessionProperty() private void testWriteBatchSizeSessionProperty(int batchSize, int numberOfRows) { if (!hasBehavior(SUPPORTS_CREATE_TABLE)) { - throw new SkipException("CREATE TABLE is required for write_batch_size test but is not supported"); + abort("CREATE TABLE is required for write_batch_size test but is not supported"); } Session session = Session.builder(getSession()) .setCatalogSessionProperty(getSession().getCatalog().orElseThrow(), "write_batch_size", Integer.toString(batchSize)) @@ -1861,7 +1876,7 @@ public void testWriteTaskParallelismSessionProperty() private void testWriteTaskParallelismSessionProperty(int parallelism, int numberOfRows) { if (!hasBehavior(SUPPORTS_CREATE_TABLE)) { - throw new SkipException("CREATE TABLE is required for write_parallelism test but is not supported"); + abort("CREATE TABLE is required for write_parallelism test but is not supported"); } Session session = Session.builder(getSession()) diff --git a/plugin/trino-base-jdbc/src/test/java/io/trino/plugin/jdbc/TestJdbcConnectorTest.java b/plugin/trino-base-jdbc/src/test/java/io/trino/plugin/jdbc/TestJdbcConnectorTest.java index ce5961f0ab33..a5e2fcbad224 100644 --- a/plugin/trino-base-jdbc/src/test/java/io/trino/plugin/jdbc/TestJdbcConnectorTest.java +++ b/plugin/trino-base-jdbc/src/test/java/io/trino/plugin/jdbc/TestJdbcConnectorTest.java @@ -20,8 +20,10 @@ import io.trino.testing.TestingConnectorBehavior; import io.trino.testing.sql.JdbcSqlExecutor; import io.trino.testing.sql.TestTable; -import org.testng.SkipException; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.parallel.Execution; +import org.junit.jupiter.api.parallel.ExecutionMode; import java.util.Map; import java.util.Optional; @@ -37,10 +39,13 @@ import static java.util.Locale.ENGLISH; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; // Single-threaded because H2 DDL operations can sometimes take a global lock, leading to apparent deadlocks // like in https://github.com/trinodb/trino/issues/7209. -@Test(singleThreaded = true) +@TestInstance(PER_CLASS) +@Execution(ExecutionMode.SAME_THREAD) public class TestJdbcConnectorTest extends BaseJdbcConnectorTest { @@ -75,8 +80,8 @@ protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) }; } + @Test @Override - @org.junit.jupiter.api.Test public void testLargeIn() { // This test should pass with H2, but takes too long (currently over a mninute) and is not that important @@ -121,6 +126,7 @@ protected Optional filterDataMappingSmokeTestData(DataMapp return Optional.of(dataMappingTestSetup); } + @Test @Override public void testDeleteWithLike() { @@ -128,12 +134,13 @@ public void testDeleteWithLike() .hasStackTraceContaining("TrinoException: " + MODIFYING_ROWS_MESSAGE); } + @Test @Override public void testReadMetadataWithRelationsConcurrentModifications() { // Under concurrently, H2 sometimes returns null table name in DatabaseMetaData.getTables's ResultSet // See https://github.com/trinodb/trino/issues/16658 for more information - throw new SkipException("Skipped due to H2 problems"); + abort("Skipped due to H2 problems"); } @Test @@ -252,6 +259,7 @@ public void testTableWithOnlyUnsupportedColumns() } } + @Test @Override public void testNativeQueryColumnAlias() { @@ -271,11 +279,12 @@ protected void verifyAddNotNullColumnToNonEmptyTableFailurePermissible(Throwable assertThat(e).hasMessageContaining("NULL not allowed for column"); } + @Test @Override public void testAddColumnConcurrently() { // TODO: Difficult to determine whether the exception is concurrent issue or not from the error message - throw new SkipException("TODO: Enable this test after finding the failure cause"); + abort("TODO: Enable this test after finding the failure cause"); } @Override diff --git a/plugin/trino-bigquery/src/test/java/io/trino/plugin/bigquery/BaseBigQueryConnectorTest.java b/plugin/trino-bigquery/src/test/java/io/trino/plugin/bigquery/BaseBigQueryConnectorTest.java index f6e3f9d0a224..b1fe80f57813 100644 --- a/plugin/trino-bigquery/src/test/java/io/trino/plugin/bigquery/BaseBigQueryConnectorTest.java +++ b/plugin/trino-bigquery/src/test/java/io/trino/plugin/bigquery/BaseBigQueryConnectorTest.java @@ -22,11 +22,9 @@ import io.trino.testing.TestingConnectorBehavior; import io.trino.testing.sql.TestTable; import org.intellij.lang.annotations.Language; -import org.testng.SkipException; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Parameters; -import org.testng.annotations.Test; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import java.util.List; import java.util.Optional; @@ -45,22 +43,24 @@ import static java.util.concurrent.TimeUnit.MINUTES; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; +@TestInstance(PER_CLASS) public abstract class BaseBigQueryConnectorTest extends BaseConnectorTest { protected BigQuerySqlExecutor bigQuerySqlExecutor; private String gcpStorageBucket; - @BeforeClass(alwaysRun = true) - @Parameters("testing.gcp-storage-bucket") - public void initBigQueryExecutor(String gcpStorageBucket) + @BeforeAll + public void initBigQueryExecutor() { this.bigQuerySqlExecutor = new BigQuerySqlExecutor(); // Prerequisite: upload region.csv in resources directory to gs://{testing.gcp-storage-bucket}/tpch/tiny/region.csv - this.gcpStorageBucket = gcpStorageBucket; + this.gcpStorageBucket = System.getProperty("testing.gcp-storage-bucket"); } @Override @@ -87,7 +87,7 @@ protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) } @Override - @org.junit.jupiter.api.Test + @Test public void testShowColumns() { assertThat(query("SHOW COLUMNS FROM orders")).matches(getDescribeOrdersResult()); @@ -110,8 +110,29 @@ protected MaterializedResult getDescribeOrdersResult() .build(); } - @Test(dataProvider = "createTableSupportedTypes") - public void testCreateTableSupportedType(String createType, String expectedType) + @Test + public void testCreateTableSupportedType() + { + testCreateTableSupportedType("boolean", "boolean"); + testCreateTableSupportedType("tinyint", "bigint"); + testCreateTableSupportedType("smallint", "bigint"); + testCreateTableSupportedType("integer", "bigint"); + testCreateTableSupportedType("bigint", "bigint"); + testCreateTableSupportedType("double", "double"); + testCreateTableSupportedType("decimal", "decimal(38,9)"); + testCreateTableSupportedType("date", "date"); + testCreateTableSupportedType("time with time zone", "time(6)"); + testCreateTableSupportedType("timestamp(6)", "timestamp(6)"); + testCreateTableSupportedType("timestamp(6) with time zone", "timestamp(6) with time zone"); + testCreateTableSupportedType("varchar", "varchar"); + testCreateTableSupportedType("varchar(65535)", "varchar"); + testCreateTableSupportedType("varbinary", "varbinary"); + testCreateTableSupportedType("array(bigint)", "array(bigint)"); + testCreateTableSupportedType("row(x bigint, y double)", "row(x bigint, y double)"); + testCreateTableSupportedType("row(x array(bigint))", "row(x array(bigint))"); + } + + private void testCreateTableSupportedType(String createType, String expectedType) { try (TestTable table = new TestTable(getQueryRunner()::execute, "test_create_table_supported_type_" + createType.replaceAll("[^a-zA-Z0-9]", ""), format("(col1 %s)", createType))) { assertEquals( @@ -120,48 +141,21 @@ public void testCreateTableSupportedType(String createType, String expectedType) } } - @DataProvider - public Object[][] createTableSupportedTypes() - { - return new Object[][] { - {"boolean", "boolean"}, - {"tinyint", "bigint"}, - {"smallint", "bigint"}, - {"integer", "bigint"}, - {"bigint", "bigint"}, - {"double", "double"}, - {"decimal", "decimal(38,9)"}, - {"date", "date"}, - {"time with time zone", "time(6)"}, - {"timestamp(6)", "timestamp(6)"}, - {"timestamp(6) with time zone", "timestamp(6) with time zone"}, - {"varchar", "varchar"}, - {"varchar(65535)", "varchar"}, - {"varbinary", "varbinary"}, - {"array(bigint)", "array(bigint)"}, - {"row(x bigint, y double)", "row(x bigint, y double)"}, - {"row(x array(bigint))", "row(x array(bigint))"}, - }; + @Test + public void testCreateTableUnsupportedType() + { + testCreateTableUnsupportedType("json"); + testCreateTableUnsupportedType("uuid"); + testCreateTableUnsupportedType("ipaddress"); } - @Test(dataProvider = "createTableUnsupportedTypes") - public void testCreateTableUnsupportedType(String createType) + private void testCreateTableUnsupportedType(String createType) { String tableName = format("test_create_table_unsupported_type_%s_%s", createType.replaceAll("[^a-zA-Z0-9]", ""), randomNameSuffix()); assertQueryFails(format("CREATE TABLE %s (col1 %s)", tableName, createType), "Unsupported column type: " + createType); assertUpdate("DROP TABLE IF EXISTS " + tableName); } - @DataProvider - public Object[][] createTableUnsupportedTypes() - { - return new Object[][] { - {"json"}, - {"uuid"}, - {"ipaddress"}, - }; - } - @Test public void testCreateTableWithRowTypeWithoutField() { @@ -284,7 +278,7 @@ public void testNoDataSystemTable() "to match regex:\n" + " \"line 1:1: Table '\\w+.\\w+.\"nation\\$data\"' does not exist\"\n" + "but did not."); - throw new SkipException("TODO"); + abort("TODO"); } @Override @@ -425,7 +419,7 @@ public void testSelectFromYearlyPartitionedTable() } } - @Test(description = "regression test for https://github.com/trinodb/trino/issues/7784") + @Test // regression test for https://github.com/trinodb/trino/issues/7784" public void testSelectWithSingleQuoteInWhereClause() { try (TestTable table = new TestTable( @@ -437,7 +431,7 @@ public void testSelectWithSingleQuoteInWhereClause() } } - @Test(description = "regression test for https://github.com/trinodb/trino/issues/5618") + @Test // "regression test for https://github.com/trinodb/trino/issues/5618" public void testPredicatePushdownPrunnedColumns() { try (TestTable table = new TestTable( @@ -537,11 +531,12 @@ public void testShowCreateTable() ")"); } + @Test @Override public void testReadMetadataWithRelationsConcurrentModifications() { // TODO: Enable this test after fixing "Task did not completed before timeout" (https://github.com/trinodb/trino/issues/14230) - throw new SkipException("Test fails with a timeout sometimes and is flaky"); + abort("Test fails with a timeout sometimes and is flaky"); } @Test @@ -952,7 +947,7 @@ public void testInsertArray() public void testInsertRowConcurrently() { // TODO https://github.com/trinodb/trino/issues/15158 Enable this test after switching to storage write API - throw new SkipException("Test fails with a timeout sometimes and is flaky"); + abort("Test fails with a timeout sometimes and is flaky"); } @Override diff --git a/plugin/trino-bigquery/src/test/java/io/trino/plugin/bigquery/TestBigQueryAvroConnectorTest.java b/plugin/trino-bigquery/src/test/java/io/trino/plugin/bigquery/TestBigQueryAvroConnectorTest.java index bb4566271ab9..99d74666488d 100644 --- a/plugin/trino-bigquery/src/test/java/io/trino/plugin/bigquery/TestBigQueryAvroConnectorTest.java +++ b/plugin/trino-bigquery/src/test/java/io/trino/plugin/bigquery/TestBigQueryAvroConnectorTest.java @@ -16,13 +16,11 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import io.trino.testing.QueryRunner; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.util.Optional; import java.util.Set; -import static io.trino.testing.DataProviders.toDataProvider; import static io.trino.testing.TestingNames.randomNameSuffix; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; @@ -60,30 +58,26 @@ protected Optional filterColumnNameTestData(String columnName) } // TODO: Disable all operations for unsupported column names - @Test(dataProvider = "unsupportedColumnNameDataProvider") - public void testSelectFailsForColumnName(String columnName) + @Test + public void testSelectFailsForColumnName() { - String tableName = "test.test_unsupported_column_name" + randomNameSuffix(); + for (String columnName : UNSUPPORTED_COLUMN_NAMES) { + String tableName = "test.test_unsupported_column_name" + randomNameSuffix(); - assertUpdate("CREATE TABLE " + tableName + "(\"" + columnName + "\" varchar(50))"); - try { - assertUpdate("INSERT INTO " + tableName + " VALUES ('test value')", 1); - // The storage API can't read the table, but query based API can read it - assertThatThrownBy(() -> query("SELECT * FROM " + tableName)) - .cause() - .hasMessageMatching(".*(Illegal initial character|Invalid name).*"); - assertThat(bigQuerySqlExecutor.executeQuery("SELECT * FROM " + tableName).getValues()) - .extracting(field -> field.get(0).getStringValue()) - .containsExactly("test value"); + assertUpdate("CREATE TABLE " + tableName + "(\"" + columnName + "\" varchar(50))"); + try { + assertUpdate("INSERT INTO " + tableName + " VALUES ('test value')", 1); + // The storage API can't read the table, but query based API can read it + assertThatThrownBy(() -> query("SELECT * FROM " + tableName)) + .cause() + .hasMessageMatching(".*(Illegal initial character|Invalid name).*"); + assertThat(bigQuerySqlExecutor.executeQuery("SELECT * FROM " + tableName).getValues()) + .extracting(field -> field.get(0).getStringValue()) + .containsExactly("test value"); + } + finally { + assertUpdate("DROP TABLE " + tableName); + } } - finally { - assertUpdate("DROP TABLE " + tableName); - } - } - - @DataProvider - public Object[][] unsupportedColumnNameDataProvider() - { - return UNSUPPORTED_COLUMN_NAMES.stream().collect(toDataProvider()); } } diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnectorTest.java b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnectorTest.java index be2e072870be..aea76d462d85 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnectorTest.java +++ b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnectorTest.java @@ -27,9 +27,9 @@ import io.trino.testing.TestingConnectorBehavior; import io.trino.testing.sql.TestTable; import org.intellij.lang.annotations.Language; -import org.testng.SkipException; -import org.testng.annotations.AfterClass; -import org.testng.annotations.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import java.math.BigDecimal; import java.math.BigInteger; @@ -72,9 +72,12 @@ import static java.util.stream.Collectors.toList; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; +@TestInstance(PER_CLASS) public class TestCassandraConnectorTest extends BaseConnectorTest { @@ -121,7 +124,7 @@ protected QueryRunner createQueryRunner() return createCassandraQueryRunner(server, ImmutableMap.of(), ImmutableMap.of(), REQUIRED_TPCH_TABLES); } - @AfterClass(alwaysRun = true) + @AfterAll public void cleanUp() { session.close(); @@ -131,7 +134,7 @@ public void cleanUp() @Override protected TestTable createTableWithDefaultColumns() { - throw new SkipException("Cassandra connector does not support column default values"); + return abort("Cassandra connector does not support column default values"); } @Override @@ -171,7 +174,7 @@ protected String dataMappingTableName(String trinoTypeName) return "tmp_trino_" + System.nanoTime(); } - @org.junit.jupiter.api.Test + @Test @Override public void testShowColumns() { @@ -212,6 +215,7 @@ public void testShowCreateTable() ")"); } + @Test @Override public void testCharVarcharComparison() { @@ -1297,6 +1301,7 @@ public void testDelete() } } + @Test @Override public void testDeleteWithLike() { @@ -1304,6 +1309,7 @@ public void testDeleteWithLike() .hasStackTraceContaining("Delete without primary key or partition key is not supported"); } + @Test @Override public void testDeleteWithComplexPredicate() { @@ -1311,6 +1317,7 @@ public void testDeleteWithComplexPredicate() .hasStackTraceContaining("Delete without primary key or partition key is not supported"); } + @Test @Override public void testDeleteWithSemiJoin() { @@ -1318,6 +1325,7 @@ public void testDeleteWithSemiJoin() .hasStackTraceContaining("Delete without primary key or partition key is not supported"); } + @Test @Override public void testDeleteWithSubquery() { @@ -1325,6 +1333,7 @@ public void testDeleteWithSubquery() .hasStackTraceContaining("Delete without primary key or partition key is not supported"); } + @Test @Override public void testExplainAnalyzeWithDeleteWithSubquery() { @@ -1332,6 +1341,7 @@ public void testExplainAnalyzeWithDeleteWithSubquery() .hasStackTraceContaining("Delete without primary key or partition key is not supported"); } + @Test @Override public void testDeleteWithVarcharPredicate() { @@ -1339,6 +1349,7 @@ public void testDeleteWithVarcharPredicate() .hasStackTraceContaining("Delete without primary key or partition key is not supported"); } + @Test @Override public void testDeleteAllDataFromTable() { @@ -1346,6 +1357,7 @@ public void testDeleteAllDataFromTable() .hasStackTraceContaining("Deleting without partition key is not supported"); } + @Test @Override public void testRowLevelDelete() { diff --git a/plugin/trino-clickhouse/src/test/java/io/trino/plugin/clickhouse/TestClickHouseConnectorTest.java b/plugin/trino-clickhouse/src/test/java/io/trino/plugin/clickhouse/TestClickHouseConnectorTest.java index fe7d59daeda6..3a9f9b6ac94a 100644 --- a/plugin/trino-clickhouse/src/test/java/io/trino/plugin/clickhouse/TestClickHouseConnectorTest.java +++ b/plugin/trino-clickhouse/src/test/java/io/trino/plugin/clickhouse/TestClickHouseConnectorTest.java @@ -22,8 +22,8 @@ import io.trino.testing.TestingConnectorBehavior; import io.trino.testing.sql.SqlExecutor; import io.trino.testing.sql.TestTable; -import org.testng.SkipException; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import java.sql.Connection; import java.sql.DriverManager; @@ -49,6 +49,7 @@ import static java.lang.String.format; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNull; @@ -105,13 +106,15 @@ public void testSampleBySqlInjection() assertUpdate("ALTER TABLE test SET PROPERTIES sample_by = 'p2'"); } + @Test @Override public void testRenameColumn() { // ClickHouse need resets all data in a column for specified column which to be renamed - throw new SkipException("TODO: test not implemented yet"); + abort("TODO: test not implemented yet"); } + @Test @Override public void testRenameColumnWithComment() { @@ -136,6 +139,7 @@ public void testAddColumnWithCommentSpecialCharacter(String comment) } } + @Test @Override public void testDropAndAddColumnWithSameName() { @@ -154,18 +158,11 @@ protected String createTableSqlForAddingAndDroppingColumn(String tableName, Stri return format("CREATE TABLE %s(%s varchar(50), value varchar(50) NOT NULL) WITH (engine = 'MergeTree', order_by = ARRAY['value'])", tableName, columnNameInSql); } + @Test + @Disabled @Override - public void testRenameColumnName(String columnName) + public void testRenameColumnName() { - // TODO: Enable this test - if (columnName.equals("a.dot")) { - assertThatThrownBy(() -> super.testRenameColumnName(columnName)) - .hasMessageContaining("Cannot rename column from nested struct to normal column"); - throw new SkipException("TODO"); - } - assertThatThrownBy(() -> super.testRenameColumnName(columnName)) - .hasMessageContaining("is not supported by storage Log"); - throw new SkipException("TODO"); } @Override @@ -178,6 +175,7 @@ protected Optional filterColumnNameTestData(String columnName) return Optional.of(columnName); } + @Test @Override public void testDropColumn() { @@ -215,6 +213,7 @@ protected String tableDefinitionForAddColumn() return "(x VARCHAR NOT NULL) WITH (engine = 'MergeTree', order_by = ARRAY['x'])"; } + @Test @Override // Overridden because the default storage type doesn't support adding columns public void testAddNotNullColumnToEmptyTable() { @@ -230,6 +229,7 @@ public void testAddNotNullColumnToEmptyTable() } } + @Test @Override // Overridden because (a) the default storage type doesn't support adding columns and (b) ClickHouse has implicit default value for new NON NULL column public void testAddNotNullColumn() { @@ -264,18 +264,20 @@ public void testAddColumnWithComment() } } + @Test @Override public void testAlterTableAddLongColumnName() { // TODO: Find the maximum column name length in ClickHouse and enable this test. - throw new SkipException("TODO"); + abort("TODO"); } + @Test @Override public void testAlterTableRenameColumnToLongName() { // TODO: Find the maximum column name length in ClickHouse and enable this test. - throw new SkipException("TODO"); + abort("TODO"); } @Test @@ -328,6 +330,7 @@ protected TestTable createTableWithDefaultColumns() "col_required2 Int64) ENGINE=Log"); } + @Test @Override public void testCharVarcharComparison() { @@ -337,7 +340,7 @@ public void testCharVarcharComparison() .hasMessageContaining("Expected rows"); // TODO run the test with clickhouse.map-string-as-varchar - throw new SkipException(""); + abort(""); } @Test @@ -602,6 +605,7 @@ protected Optional filterDataMappingSmokeTestData(DataMapp } // TODO: Remove override once decimal predicate pushdown is implemented (https://github.com/trinodb/trino/issues/7100) + @Test @Override public void testNumericAggregationPushdown() { @@ -684,12 +688,13 @@ protected String errorMessageForDateYearOfEraPredicate(String date) return "Date must be between 1970-01-01 and 2149-06-06 in ClickHouse: " + date; } + @Test @Override public void testCharTrailingSpace() { assertThatThrownBy(super::testCharTrailingSpace) .hasMessageStartingWith("Failed to execute statement: CREATE TABLE tpch.char_trailing_space"); - throw new SkipException("Implement test for ClickHouse"); + abort("Implement test for ClickHouse"); } @Override @@ -699,6 +704,7 @@ protected TestTable simpleTable() return new TestTable(onRemoteDatabase(), "tpch.simple_table", "(col BIGINT) Engine=Log", ImmutableList.of("1", "2")); } + @Test @Override public void testCreateTableWithLongTableName() { @@ -718,6 +724,7 @@ public void testCreateTableWithLongTableName() assertTrue(getQueryRunner().tableExists(getSession(), validTableName)); } + @Test @Override public void testRenameSchemaToLongName() { @@ -755,6 +762,7 @@ protected void verifySchemaNameLengthFailurePermissible(Throwable e) assertThat(e).hasMessageContaining("File name too long"); } + @Test @Override public void testRenameTableToLongTableName() { diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeConnectorTest.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeConnectorTest.java index d07862db4413..c11a54b0fec6 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeConnectorTest.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeConnectorTest.java @@ -27,7 +27,6 @@ import io.trino.sql.planner.plan.TableFinishNode; import io.trino.sql.planner.plan.TableWriterNode; import io.trino.testing.BaseConnectorTest; -import io.trino.testing.DataProviders; import io.trino.testing.DistributedQueryRunner; import io.trino.testing.MaterializedResult; import io.trino.testing.MaterializedResultWithQueryId; @@ -39,14 +38,10 @@ import io.trino.testing.sql.TestTable; import io.trino.testing.sql.TrinoSqlExecutor; import org.intellij.lang.annotations.Language; -import org.testng.SkipException; -import org.testng.annotations.AfterClass; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.nio.file.Path; import java.time.ZonedDateTime; -import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Optional; @@ -68,9 +63,6 @@ import static io.trino.plugin.tpch.TpchMetadata.TINY_SCHEMA_NAME; import static io.trino.spi.type.VarcharType.VARCHAR; import static io.trino.sql.planner.optimizations.PlanNodeSearcher.searchFrom; -import static io.trino.testing.DataProviders.cartesianProduct; -import static io.trino.testing.DataProviders.toDataProvider; -import static io.trino.testing.DataProviders.trueFalse; import static io.trino.testing.MaterializedResult.resultBuilder; import static io.trino.testing.QueryAssertions.copyTpchTables; import static io.trino.testing.TestingAccessControlManager.TestingPrivilegeType.EXECUTE_FUNCTION; @@ -86,6 +78,7 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; @@ -144,12 +137,6 @@ protected QueryRunner createQueryRunner() return queryRunner; } - @AfterClass(alwaysRun = true) - public void tearDown() - { - minioClient = null; // closed by closeAfterClass - } - @Override protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) { @@ -239,7 +226,7 @@ protected Optional filterDataMappingSmokeTestData(DataMapp @Override protected TestTable createTableWithDefaultColumns() { - throw new SkipException("Delta Lake does not support columns with a default value"); + return abort("Delta Lake does not support columns with a default value"); } @Override @@ -362,6 +349,7 @@ public void testCreateTableAsSelectWithUnsupportedPartitionType() "Using array, map or row type on partitioned columns is unsupported"); } + @Test @Override public void testShowCreateSchema() { @@ -373,6 +361,7 @@ public void testShowCreateSchema() ")", getSession().getCatalog().orElseThrow(), schemaName, bucketName)); } + @Test @Override public void testDropNonEmptySchemaWithTable() { @@ -388,6 +377,7 @@ public void testDropNonEmptySchemaWithTable() assertUpdate("DROP SCHEMA " + schemaName); } + @Test @Override public void testDropColumn() { @@ -397,15 +387,19 @@ public void testDropColumn() .hasMessageContaining("Cannot drop column from table using column mapping mode NONE"); } + @Test @Override - public void testAddAndDropColumnName(String columnName) + public void testAddAndDropColumnName() { - // Override because the connector doesn't support dropping columns with 'none' column mapping - // There are some tests in in io.trino.tests.product.deltalake.TestDeltaLakeColumnMappingMode - assertThatThrownBy(() -> super.testAddAndDropColumnName(columnName)) - .hasMessageContaining("Cannot drop column from table using column mapping mode NONE"); + for (String columnName : testColumnNameDataProvider()) { + // Override because the connector doesn't support dropping columns with 'none' column mapping + // There are some tests in in io.trino.tests.product.deltalake.TestDeltaLakeColumnMappingMode + assertThatThrownBy(() -> testAddAndDropColumnName(columnName, requiresDelimiting(columnName))) + .hasMessageContaining("Cannot drop column from table using column mapping mode NONE"); + } } + @Test @Override public void testDropAndAddColumnWithSameName() { @@ -415,13 +409,15 @@ public void testDropAndAddColumnWithSameName() .hasMessageContaining("Cannot drop column from table using column mapping mode NONE"); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testDropPartitionColumn(ColumnMappingMode mode) + @Test + public void testDropPartitionColumn() { - if (mode == ColumnMappingMode.NONE) { - throw new SkipException("Tested in testDropColumn"); - } + testDropPartitionColumn(ColumnMappingMode.ID); + testDropPartitionColumn(ColumnMappingMode.NAME); + } + public void testDropPartitionColumn(ColumnMappingMode mode) + { String tableName = "test_drop_partition_column_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + "(data int, part int) WITH (partitioned_by = ARRAY['part'], column_mapping_mode = '" + mode + "')"); @@ -441,6 +437,7 @@ public void testDropLastNonPartitionColumn() assertUpdate("DROP TABLE " + tableName); } + @Test @Override public void testRenameColumn() { @@ -450,6 +447,7 @@ public void testRenameColumn() .hasMessageContaining("Cannot rename column in table using column mapping mode NONE"); } + @Test @Override public void testRenameColumnWithComment() { @@ -459,13 +457,15 @@ public void testRenameColumnWithComment() .hasMessageContaining("Cannot rename column in table using column mapping mode NONE"); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testDeltaRenameColumnWithComment(ColumnMappingMode mode) + @Test + public void testDeltaRenameColumnWithComment() { - if (mode == ColumnMappingMode.NONE) { - throw new SkipException("The connector doesn't support renaming columns with 'none' column mapping"); - } + testDeltaRenameColumnWithComment(ColumnMappingMode.ID); + testDeltaRenameColumnWithComment(ColumnMappingMode.NAME); + } + private void testDeltaRenameColumnWithComment(ColumnMappingMode mode) + { String tableName = "test_rename_column_" + randomNameSuffix(); assertUpdate("" + "CREATE TABLE " + tableName + @@ -484,6 +484,7 @@ public void testDeltaRenameColumnWithComment(ColumnMappingMode mode) assertUpdate("DROP TABLE " + tableName); } + @Test @Override public void testAlterTableRenameColumnToLongName() { @@ -493,15 +494,19 @@ public void testAlterTableRenameColumnToLongName() .hasMessageContaining("Cannot rename column in table using column mapping mode NONE"); } + @Test @Override - public void testRenameColumnName(String columnName) + public void testRenameColumnName() { - // Override because the connector doesn't support renaming columns with 'none' column mapping - // There are some tests in in io.trino.tests.product.deltalake.TestDeltaLakeColumnMappingMode - assertThatThrownBy(() -> super.testRenameColumnName(columnName)) - .hasMessageContaining("Cannot rename column in table using column mapping mode NONE"); + for (String columnName : testColumnNameDataProvider()) { + // Override because the connector doesn't support renaming columns with 'none' column mapping + // There are some tests in in io.trino.tests.product.deltalake.TestDeltaLakeColumnMappingMode + assertThatThrownBy(() -> testRenameColumnName(columnName, requiresDelimiting(columnName))) + .hasMessageContaining("Cannot rename column in table using column mapping mode NONE"); + } } + @Test @Override public void testCharVarcharComparison() { @@ -510,8 +515,18 @@ public void testCharVarcharComparison() .hasStackTraceContaining("Unsupported type: char(3)"); } - @Test(dataProvider = "timestampValues") - public void testTimestampPredicatePushdown(String value) + @Test + public void testTimestampPredicatePushdown() + { + testTimestampPredicatePushdown("1965-10-31 01:00:08.123 UTC"); + testTimestampPredicatePushdown("1965-10-31 01:00:08.999 UTC"); + testTimestampPredicatePushdown("1970-01-01 01:13:42.000 America/Bahia_Banderas"); // There is a gap in JVM zone + testTimestampPredicatePushdown("1970-01-01 00:00:00.000 Asia/Kathmandu"); + testTimestampPredicatePushdown("2018-10-28 01:33:17.456 Europe/Vilnius"); + testTimestampPredicatePushdown("9999-12-31 23:59:59.999 UTC"); + } + + private void testTimestampPredicatePushdown(String value) { String tableName = "test_parquet_timestamp_predicate_pushdown_" + randomNameSuffix(); @@ -617,18 +632,6 @@ public void testTimestampWithTimeZonePartition() assertUpdate("DROP TABLE " + tableName); } - @DataProvider - public Object[][] timestampValues() - { - return new Object[][] { - {"1965-10-31 01:00:08.123 UTC"}, - {"1965-10-31 01:00:08.999 UTC"}, - {"1970-01-01 01:13:42.000 America/Bahia_Banderas"}, // There is a gap in JVM zone - {"1970-01-01 00:00:00.000 Asia/Kathmandu"}, - {"2018-10-28 01:33:17.456 Europe/Vilnius"}, - {"9999-12-31 23:59:59.999 UTC"}}; - } - @Test public void testAddColumnToPartitionedTable() { @@ -838,8 +841,15 @@ public void testMergeSimpleSelectPartitioned() assertUpdate("DROP TABLE " + targetTable); } - @Test(dataProvider = "partitionedProvider") - public void testMergeUpdateWithVariousLayouts(String partitionPhase) + @Test + public void testMergeUpdateWithVariousLayouts() + { + testMergeUpdateWithVariousLayouts(""); + testMergeUpdateWithVariousLayouts(", partitioned_by = ARRAY['customer']"); + testMergeUpdateWithVariousLayouts(", partitioned_by = ARRAY['purchase']"); + } + + private void testMergeUpdateWithVariousLayouts(String partitionPhase) { String targetTable = "merge_formats_target_" + randomNameSuffix(); String sourceTable = "merge_formats_source_" + randomNameSuffix(); @@ -864,18 +874,16 @@ public void testMergeUpdateWithVariousLayouts(String partitionPhase) assertUpdate("DROP TABLE " + targetTable); } - @DataProvider - public Object[][] partitionedProvider() + @Test + @Override + public void testMergeMultipleOperations() { - return new Object[][] { - {""}, - {", partitioned_by = ARRAY['customer']"}, - {", partitioned_by = ARRAY['purchase']"} - }; + testMergeMultipleOperations(""); + testMergeMultipleOperations(", partitioned_by = ARRAY['customer']"); + testMergeMultipleOperations(", partitioned_by = ARRAY['purchase']"); } - @Test(dataProvider = "partitionedProvider") - public void testMergeMultipleOperations(String partitioning) + private void testMergeMultipleOperations(String partitioning) { int targetCustomerCount = 32; String targetTable = "merge_multiple_" + randomNameSuffix(); @@ -959,8 +967,18 @@ public void testMergeSimpleQueryPartitioned() assertUpdate("DROP TABLE " + targetTable); } - @Test(dataProvider = "targetWithDifferentPartitioning") - public void testMergeMultipleRowsMatchFails(String createTableSql) + @Test + @Override + public void testMergeMultipleRowsMatchFails() + { + testMergeMultipleRowsMatchFails("CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s')"); + testMergeMultipleRowsMatchFails("CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer'])"); + testMergeMultipleRowsMatchFails("CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])"); + testMergeMultipleRowsMatchFails("CREATE TABLE %s (purchases INT, customer VARCHAR, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address', 'customer'])"); + testMergeMultipleRowsMatchFails("CREATE TABLE %s (purchases INT, address VARCHAR, customer VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address', 'customer'])"); + } + + private void testMergeMultipleRowsMatchFails(String createTableSql) { String targetTable = "merge_multiple_target_" + randomNameSuffix(); String sourceTable = "merge_multiple_source_" + randomNameSuffix(); @@ -984,20 +1002,40 @@ public void testMergeMultipleRowsMatchFails(String createTableSql) assertUpdate("DROP TABLE " + targetTable); } - @DataProvider - public Object[][] targetWithDifferentPartitioning() - { - return new Object[][] { - {"CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s')"}, - {"CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer'])"}, - {"CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])"}, - {"CREATE TABLE %s (purchases INT, customer VARCHAR, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address', 'customer'])"}, - {"CREATE TABLE %s (purchases INT, address VARCHAR, customer VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address', 'customer'])"} - }; - } - - @Test(dataProvider = "targetAndSourceWithDifferentPartitioning") - public void testMergeWithDifferentPartitioning(String testDescription, String createTargetTableSql, String createSourceTableSql) + @Test + public void testMergeWithDifferentPartitioning() + { + testMergeWithDifferentPartitioning( + "target_partitioned_source_and_target_partitioned", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address', 'customer'])", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])"); + testMergeWithDifferentPartitioning( + "target_partitioned_source_and_target_partitioned", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer', 'address'])", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])"); + testMergeWithDifferentPartitioning( + "target_flat_source_partitioned_by_customer", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s')", + "CREATE TABLE %s (purchases INT, address VARCHAR, customer VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer'])"); + testMergeWithDifferentPartitioning( + "target_partitioned_by_customer_source_flat", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s')"); + testMergeWithDifferentPartitioning( + "target_bucketed_by_customer_source_flat", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer', 'address'])", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s')"); + testMergeWithDifferentPartitioning( + "target_partitioned_source_partitioned", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer'])", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])"); + testMergeWithDifferentPartitioning( + "target_partitioned_target_partitioned", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer'])"); + } + + private void testMergeWithDifferentPartitioning(String testDescription, String createTargetTableSql, String createSourceTableSql) { String targetTable = format("%s_target_%s", testDescription, randomNameSuffix()); String sourceTable = format("%s_source_%s", testDescription, randomNameSuffix()); @@ -1022,50 +1060,15 @@ public void testMergeWithDifferentPartitioning(String testDescription, String cr assertUpdate("DROP TABLE " + targetTable); } - @DataProvider - public Object[][] targetAndSourceWithDifferentPartitioning() - { - return new Object[][] { - { - "target_partitioned_source_and_target_partitioned", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address', 'customer'])", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", - }, - { - "target_partitioned_source_and_target_partitioned", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer', 'address'])", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", - }, - { - "target_flat_source_partitioned_by_customer", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s')", - "CREATE TABLE %s (purchases INT, address VARCHAR, customer VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer'])" - }, - { - "target_partitioned_by_customer_source_flat", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s')", - }, - { - "target_bucketed_by_customer_source_flat", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer', 'address'])", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s')", - }, - { - "target_partitioned_source_partitioned", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer'])", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", - }, - { - "target_partitioned_target_partitioned", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer'])", - } - }; + @Test + public void testTableWithNonNullableColumns() + { + testTableWithNonNullableColumns(ColumnMappingMode.ID); + testTableWithNonNullableColumns(ColumnMappingMode.NAME); + testTableWithNonNullableColumns(ColumnMappingMode.NONE); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testTableWithNonNullableColumns(ColumnMappingMode mode) + private void testTableWithNonNullableColumns(ColumnMappingMode mode) { String tableName = "test_table_with_non_nullable_columns_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + "(col1 INTEGER NOT NULL, col2 INTEGER, col3 INTEGER) WITH (column_mapping_mode='" + mode + "')"); @@ -1084,76 +1087,80 @@ public void testTableWithNonNullableColumns(ColumnMappingMode mode) assertQuery("SELECT * FROM " + tableName, "VALUES(1, 10, 100), (2, 20, 200)"); } - @Test(dataProvider = "changeDataFeedColumnNamesDataProvider") - public void testCreateTableWithChangeDataFeedColumnName(String columnName) + @Test + public void testCreateTableWithChangeDataFeedColumnName() { - try (TestTable table = new TestTable(getQueryRunner()::execute, "test_create_table_cdf", "(" + columnName + " int)")) { - assertTableColumnNames(table.getName(), columnName); - } + for (String columnName : CHANGE_DATA_FEED_COLUMN_NAMES) { + try (TestTable table = new TestTable(getQueryRunner()::execute, "test_create_table_cdf", "(" + columnName + " int)")) { + assertTableColumnNames(table.getName(), columnName); + } - try (TestTable table = new TestTable(getQueryRunner()::execute, "test_create_table_cdf", "AS SELECT 1 AS " + columnName)) { - assertTableColumnNames(table.getName(), columnName); + try (TestTable table = new TestTable(getQueryRunner()::execute, "test_create_table_cdf", "AS SELECT 1 AS " + columnName)) { + assertTableColumnNames(table.getName(), columnName); + } } } - @Test(dataProvider = "changeDataFeedColumnNamesDataProvider") - public void testUnsupportedCreateTableWithChangeDataFeed(String columnName) + @Test + public void testUnsupportedCreateTableWithChangeDataFeed() { - String tableName = "test_unsupported_create_table_cdf" + randomNameSuffix(); + for (String columnName : CHANGE_DATA_FEED_COLUMN_NAMES) { + String tableName = "test_unsupported_create_table_cdf" + randomNameSuffix(); - assertQueryFails( - "CREATE TABLE " + tableName + "(" + columnName + " int) WITH (change_data_feed_enabled = true)", - "\\QUnable to use [%s] when change data feed is enabled\\E".formatted(columnName)); - assertFalse(getQueryRunner().tableExists(getSession(), tableName)); - - assertQueryFails( - "CREATE TABLE " + tableName + " WITH (change_data_feed_enabled = true) AS SELECT 1 AS " + columnName, - "\\QUnable to use [%s] when change data feed is enabled\\E".formatted(columnName)); - assertFalse(getQueryRunner().tableExists(getSession(), tableName)); - } - - @Test(dataProvider = "changeDataFeedColumnNamesDataProvider") - public void testUnsupportedAddColumnWithChangeDataFeed(String columnName) - { - try (TestTable table = new TestTable(getQueryRunner()::execute, "test_add_column", "(col int) WITH (change_data_feed_enabled = true)")) { assertQueryFails( - "ALTER TABLE " + table.getName() + " ADD COLUMN " + columnName + " int", - "\\QColumn name %s is forbidden when change data feed is enabled\\E".formatted(columnName)); - assertTableColumnNames(table.getName(), "col"); + "CREATE TABLE " + tableName + "(" + columnName + " int) WITH (change_data_feed_enabled = true)", + "\\QUnable to use [%s] when change data feed is enabled\\E".formatted(columnName)); + assertFalse(getQueryRunner().tableExists(getSession(), tableName)); - assertUpdate("ALTER TABLE " + table.getName() + " SET PROPERTIES change_data_feed_enabled = false"); - assertUpdate("ALTER TABLE " + table.getName() + " ADD COLUMN " + columnName + " int"); - assertTableColumnNames(table.getName(), "col", columnName); + assertQueryFails( + "CREATE TABLE " + tableName + " WITH (change_data_feed_enabled = true) AS SELECT 1 AS " + columnName, + "\\QUnable to use [%s] when change data feed is enabled\\E".formatted(columnName)); + assertFalse(getQueryRunner().tableExists(getSession(), tableName)); } } - @Test(dataProvider = "changeDataFeedColumnNamesDataProvider") - public void testUnsupportedRenameColumnWithChangeDataFeed(String columnName) - { - try (TestTable table = new TestTable(getQueryRunner()::execute, "test_rename_column", "(col int) WITH (change_data_feed_enabled = true)")) { - assertQueryFails( - "ALTER TABLE " + table.getName() + " RENAME COLUMN col TO " + columnName, - "Cannot rename column when change data feed is enabled"); - assertTableColumnNames(table.getName(), "col"); + @Test + public void testUnsupportedAddColumnWithChangeDataFeed() + { + for (String columnName : CHANGE_DATA_FEED_COLUMN_NAMES) { + try (TestTable table = new TestTable(getQueryRunner()::execute, "test_add_column", "(col int) WITH (change_data_feed_enabled = true)")) { + assertQueryFails( + "ALTER TABLE " + table.getName() + " ADD COLUMN " + columnName + " int", + "\\QColumn name %s is forbidden when change data feed is enabled\\E".formatted(columnName)); + assertTableColumnNames(table.getName(), "col"); + + assertUpdate("ALTER TABLE " + table.getName() + " SET PROPERTIES change_data_feed_enabled = false"); + assertUpdate("ALTER TABLE " + table.getName() + " ADD COLUMN " + columnName + " int"); + assertTableColumnNames(table.getName(), "col", columnName); + } } } - @Test(dataProvider = "changeDataFeedColumnNamesDataProvider") - public void testUnsupportedSetTablePropertyWithChangeDataFeed(String columnName) - { - try (TestTable table = new TestTable(getQueryRunner()::execute, "test_set_properties", "(" + columnName + " int)")) { - assertQueryFails( - "ALTER TABLE " + table.getName() + " SET PROPERTIES change_data_feed_enabled = true", - "\\QUnable to enable change data feed because table contains [%s] columns\\E".formatted(columnName)); - assertThat((String) computeScalar("SHOW CREATE TABLE " + table.getName())) - .doesNotContain("change_data_feed_enabled = true"); + @Test + public void testUnsupportedRenameColumnWithChangeDataFeed() + { + for (String columnName : CHANGE_DATA_FEED_COLUMN_NAMES) { + try (TestTable table = new TestTable(getQueryRunner()::execute, "test_rename_column", "(col int) WITH (change_data_feed_enabled = true)")) { + assertQueryFails( + "ALTER TABLE " + table.getName() + " RENAME COLUMN col TO " + columnName, + "Cannot rename column when change data feed is enabled"); + assertTableColumnNames(table.getName(), "col"); + } } } - @DataProvider - public Object[][] changeDataFeedColumnNamesDataProvider() - { - return CHANGE_DATA_FEED_COLUMN_NAMES.stream().collect(toDataProvider()); + @Test + public void testUnsupportedSetTablePropertyWithChangeDataFeed() + { + for (String columnName : CHANGE_DATA_FEED_COLUMN_NAMES) { + try (TestTable table = new TestTable(getQueryRunner()::execute, "test_set_properties", "(" + columnName + " int)")) { + assertQueryFails( + "ALTER TABLE " + table.getName() + " SET PROPERTIES change_data_feed_enabled = true", + "\\QUnable to enable change data feed because table contains [%s] columns\\E".formatted(columnName)); + assertThat((String) computeScalar("SHOW CREATE TABLE " + table.getName())) + .doesNotContain("change_data_feed_enabled = true"); + } + } } @Test @@ -1167,7 +1174,14 @@ public void testThatEnableCdfTablePropertyIsShownForCtasTables() .contains("change_data_feed_enabled = true"); } - @Test(dataProvider = "columnMappingModeDataProvider") + @Test + public void testCreateTableWithColumnMappingMode() + { + testCreateTableWithColumnMappingMode(ColumnMappingMode.ID); + testCreateTableWithColumnMappingMode(ColumnMappingMode.NAME); + testCreateTableWithColumnMappingMode(ColumnMappingMode.NONE); + } + public void testCreateTableWithColumnMappingMode(ColumnMappingMode mode) { testCreateTableColumnMappingMode(mode, tableName -> { @@ -1176,16 +1190,30 @@ public void testCreateTableWithColumnMappingMode(ColumnMappingMode mode) }); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testCreateTableAsSelectWithColumnMappingMode(ColumnMappingMode mode) + @Test + public void testCreateTableAsSelectWithColumnMappingMode() + { + testCreateTableAsSelectWithColumnMappingMode(ColumnMappingMode.ID); + testCreateTableAsSelectWithColumnMappingMode(ColumnMappingMode.NAME); + testCreateTableAsSelectWithColumnMappingMode(ColumnMappingMode.NONE); + } + + private void testCreateTableAsSelectWithColumnMappingMode(ColumnMappingMode mode) { testCreateTableColumnMappingMode(mode, tableName -> assertUpdate("CREATE TABLE " + tableName + " WITH (column_mapping_mode='" + mode + "')" + " AS SELECT 1 AS a_int, CAST(row(11) AS row(x integer)) AS a_row", 1)); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testCreatePartitionTableAsSelectWithColumnMappingMode(ColumnMappingMode mode) + @Test + public void testCreatePartitionTableAsSelectWithColumnMappingMode() + { + testCreatePartitionTableAsSelectWithColumnMappingMode(ColumnMappingMode.ID); + testCreatePartitionTableAsSelectWithColumnMappingMode(ColumnMappingMode.NAME); + testCreatePartitionTableAsSelectWithColumnMappingMode(ColumnMappingMode.NONE); + } + + private void testCreatePartitionTableAsSelectWithColumnMappingMode(ColumnMappingMode mode) { testCreateTableColumnMappingMode(mode, tableName -> assertUpdate("CREATE TABLE " + tableName + " WITH (column_mapping_mode='" + mode + "', partitioned_by=ARRAY['a_int'])" + @@ -1211,13 +1239,15 @@ private void testCreateTableColumnMappingMode(ColumnMappingMode mode, Consumer mode != ColumnMappingMode.UNKNOWN) - .collect(toDataProvider()); - } - @Test public void testCreateTableUnsupportedColumnMappingMode() { @@ -1882,8 +1954,15 @@ public void testProjectionPushdownNonPrimitiveTypeExplain() "_row#child := _row#child:bigint:REGULAR"); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testReadCdfChanges(ColumnMappingMode mode) + @Test + public void testReadCdfChanges() + { + testReadCdfChanges(ColumnMappingMode.ID); + testReadCdfChanges(ColumnMappingMode.NAME); + testReadCdfChanges(ColumnMappingMode.NONE); + } + + private void testReadCdfChanges(ColumnMappingMode mode) { String tableName = "test_basic_operations_on_table_with_cdf_enabled_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + " (page_url VARCHAR, domain VARCHAR, views INTEGER) " + @@ -1933,8 +2012,15 @@ public void testReadCdfChanges(ColumnMappingMode mode) """); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testReadCdfChangesOnPartitionedTable(ColumnMappingMode mode) + @Test + public void testReadCdfChangesOnPartitionedTable() + { + testReadCdfChangesOnPartitionedTable(ColumnMappingMode.ID); + testReadCdfChangesOnPartitionedTable(ColumnMappingMode.NAME); + testReadCdfChangesOnPartitionedTable(ColumnMappingMode.NONE); + } + + private void testReadCdfChangesOnPartitionedTable(ColumnMappingMode mode) { String tableName = "test_basic_operations_on_table_with_cdf_enabled_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + " (page_url VARCHAR, domain VARCHAR, views INTEGER) " + @@ -2024,8 +2110,15 @@ private void testCdfWithMappingModeOnTableWithColumnDropped(ColumnMappingMode mo """); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testReadMergeChanges(ColumnMappingMode mode) + @Test + public void testReadMergeChanges() + { + testReadMergeChanges(ColumnMappingMode.ID); + testReadMergeChanges(ColumnMappingMode.NAME); + testReadMergeChanges(ColumnMappingMode.NONE); + } + + private void testReadMergeChanges(ColumnMappingMode mode) { String tableName1 = "test_basic_operations_on_table_with_cdf_enabled_merge_into_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName1 + " (page_url VARCHAR, domain VARCHAR, views INTEGER) " + @@ -2070,8 +2163,15 @@ public void testReadMergeChanges(ColumnMappingMode mode) """); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testReadMergeChangesOnPartitionedTable(ColumnMappingMode mode) + @Test + public void testReadMergeChangesOnPartitionedTable() + { + testReadMergeChangesOnPartitionedTable(ColumnMappingMode.ID); + testReadMergeChangesOnPartitionedTable(ColumnMappingMode.NAME); + testReadMergeChangesOnPartitionedTable(ColumnMappingMode.NONE); + } + + private void testReadMergeChangesOnPartitionedTable(ColumnMappingMode mode) { String targetTable = "test_basic_operations_on_partitioned_table_with_cdf_enabled_target_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + targetTable + " (page_url VARCHAR, domain VARCHAR, views INTEGER) " + @@ -2148,8 +2248,15 @@ public void testReadMergeChangesOnPartitionedTable(ColumnMappingMode mode) """); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testCdfCommitTimestamp(ColumnMappingMode mode) + @Test + public void testCdfCommitTimestamp() + { + testCdfCommitTimestamp(ColumnMappingMode.ID); + testCdfCommitTimestamp(ColumnMappingMode.NAME); + testCdfCommitTimestamp(ColumnMappingMode.NONE); + } + + private void testCdfCommitTimestamp(ColumnMappingMode mode) { String tableName = "test_cdf_commit_timestamp_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + " (page_url VARCHAR, domain VARCHAR, views INTEGER) " + @@ -2160,8 +2267,15 @@ public void testCdfCommitTimestamp(ColumnMappingMode mode) assertThat(historyCommitTimestamp).isEqualTo(tableChangesCommitTimestamp); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testReadDifferentChangeRanges(ColumnMappingMode mode) + @Test + public void testReadDifferentChangeRanges() + { + testReadDifferentChangeRanges(ColumnMappingMode.ID); + testReadDifferentChangeRanges(ColumnMappingMode.NAME); + testReadDifferentChangeRanges(ColumnMappingMode.NONE); + } + + private void testReadDifferentChangeRanges(ColumnMappingMode mode) { String tableName = "test_reading_ranges_of_changes_on_table_with_cdf_enabled_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + " (page_url VARCHAR, domain VARCHAR, views INTEGER) " + @@ -2226,8 +2340,15 @@ public void testReadDifferentChangeRanges(ColumnMappingMode mode) assertQueryFails("SELECT * FROM TABLE(system.table_changes('test_schema', '" + tableName + "', 10))", "since_version: 10 is higher then current table version: 6"); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testReadChangesOnTableWithColumnAdded(ColumnMappingMode mode) + @Test + public void testReadChangesOnTableWithColumnAdded() + { + testReadChangesOnTableWithColumnAdded(ColumnMappingMode.ID); + testReadChangesOnTableWithColumnAdded(ColumnMappingMode.NAME); + testReadChangesOnTableWithColumnAdded(ColumnMappingMode.NONE); + } + + private void testReadChangesOnTableWithColumnAdded(ColumnMappingMode mode) { String tableName = "test_reading_changes_on_table_with_columns_added_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + " (page_url VARCHAR, domain VARCHAR, views INTEGER) " + @@ -2244,8 +2365,15 @@ public void testReadChangesOnTableWithColumnAdded(ColumnMappingMode mode) """); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testReadChangesOnTableWithRowColumn(ColumnMappingMode mode) + @Test + public void testReadChangesOnTableWithRowColumn() + { + testReadChangesOnTableWithRowColumn(ColumnMappingMode.ID); + testReadChangesOnTableWithRowColumn(ColumnMappingMode.NAME); + testReadChangesOnTableWithRowColumn(ColumnMappingMode.NONE); + } + + private void testReadChangesOnTableWithRowColumn(ColumnMappingMode mode) { String tableName = "test_reading_changes_on_table_with_columns_added_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + " (page_url VARCHAR, costs ROW(month VARCHAR, amount BIGINT)) " + @@ -2273,8 +2401,15 @@ public void testReadChangesOnTableWithRowColumn(ColumnMappingMode mode) """); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testCdfOnTableWhichDoesntHaveItEnabledInitially(ColumnMappingMode mode) + @Test + public void testCdfOnTableWhichDoesntHaveItEnabledInitially() + { + testCdfOnTableWhichDoesntHaveItEnabledInitially(ColumnMappingMode.ID); + testCdfOnTableWhichDoesntHaveItEnabledInitially(ColumnMappingMode.NAME); + testCdfOnTableWhichDoesntHaveItEnabledInitially(ColumnMappingMode.NONE); + } + + private void testCdfOnTableWhichDoesntHaveItEnabledInitially(ColumnMappingMode mode) { String tableName = "test_cdf_on_table_without_it_initially_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + " (page_url VARCHAR, domain VARCHAR, views INTEGER) " + @@ -2315,8 +2450,15 @@ public void testCdfOnTableWhichDoesntHaveItEnabledInitially(ColumnMappingMode mo """); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testReadChangesFromCtasTable(ColumnMappingMode mode) + @Test + public void testReadChangesFromCtasTable() + { + testReadChangesFromCtasTable(ColumnMappingMode.ID); + testReadChangesFromCtasTable(ColumnMappingMode.NAME); + testReadChangesFromCtasTable(ColumnMappingMode.NONE); + } + + private void testReadChangesFromCtasTable(ColumnMappingMode mode) { String tableName = "test_basic_operations_on_table_with_cdf_enabled_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + " WITH (change_data_feed_enabled = true, column_mapping_mode = '" + mode + "') " + @@ -2333,8 +2475,16 @@ public void testReadChangesFromCtasTable(ColumnMappingMode mode) """); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testVacuumDeletesCdfFiles(ColumnMappingMode mode) + @Test + public void testVacuumDeletesCdfFiles() + throws InterruptedException + { + testVacuumDeletesCdfFiles(ColumnMappingMode.ID); + testVacuumDeletesCdfFiles(ColumnMappingMode.NAME); + testVacuumDeletesCdfFiles(ColumnMappingMode.NONE); + } + + private void testVacuumDeletesCdfFiles(ColumnMappingMode mode) throws InterruptedException { String tableName = "test_vacuum_correctly_deletes_cdf_files_" + randomNameSuffix(); @@ -2364,8 +2514,15 @@ public void testVacuumDeletesCdfFiles(ColumnMappingMode mode) """); } - @Test(dataProvider = "columnMappingModeDataProvider") - public void testCdfWithOptimize(ColumnMappingMode mode) + @Test + public void testCdfWithOptimize() + { + testCdfWithOptimize(ColumnMappingMode.ID); + testCdfWithOptimize(ColumnMappingMode.NAME); + testCdfWithOptimize(ColumnMappingMode.NONE); + } + + private void testCdfWithOptimize(ColumnMappingMode mode) { String tableName = "test_cdf_with_optimize_" + randomNameSuffix(); assertUpdate("CREATE TABLE " + tableName + " (page_url VARCHAR, domain VARCHAR, views INTEGER) " + @@ -2410,7 +2567,13 @@ public void testTableChangesAccessControl() assertUpdate("DROP TABLE " + tableName); } - @Test(dataProviderClass = DataProviders.class, dataProvider = "trueFalse") + @Test + public void testTableWithTrailingSlashLocation() + { + testTableWithTrailingSlashLocation(true); + testTableWithTrailingSlashLocation(false); + } + public void testTableWithTrailingSlashLocation(boolean partitioned) { String tableName = "test_table_with_trailing_slash_location_" + randomNameSuffix(); @@ -2435,8 +2598,56 @@ public void testTableWithTrailingSlashLocation(boolean partitioned) assertUpdate("DROP TABLE " + tableName); } - @Test(dataProvider = "deleteFiltersForTable") - public void testDeleteWithFilter(String createTableSql, String deleteFilter, boolean pushDownDelete) + @Test + public void testDeleteWithFilter() + { + testDeleteWithFilter( + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s')", + "address = 'Antioch'", + false); + testDeleteWithFilter( + // delete filter applied on function over non-partitioned field + "CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", + "starts_with(address, 'Antioch')", + false); + testDeleteWithFilter( + // delete filter applied on partitioned field + "CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", + "address = 'Antioch'", + true); + testDeleteWithFilter( + // delete filter applied on partitioned field and on synthesized field + "CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", + "address = 'Antioch' AND \"$file_size\" > 0", + false); + testDeleteWithFilter( + // delete filter applied on function over partitioned field + "CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", + "starts_with(address, 'Antioch')", + false); + testDeleteWithFilter( + // delete filter applied on non-partitioned field + "CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer'])", + "address = 'Antioch'", + false); + testDeleteWithFilter( + // delete filter fully applied on composed partition + "CREATE TABLE %s (purchases INT, customer VARCHAR, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address', 'customer'])", + "address = 'Antioch' AND (customer = 'Aaron' OR customer = 'Bill')", + true); + testDeleteWithFilter( + // delete filter applied only partly on first partitioned field + "CREATE TABLE %s (purchases INT, address VARCHAR, customer VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address', 'customer'])", + "address = 'Antioch'", + true); + testDeleteWithFilter( + // delete filter applied only partly on second partitioned field + "CREATE TABLE %s (purchases INT, address VARCHAR, customer VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer', 'address'])", + "address = 'Antioch'", + true); + } + + private void testDeleteWithFilter(String createTableSql, String deleteFilter, boolean pushDownDelete) { String table = "delete_with_filter_" + randomNameSuffix(); assertUpdate(format(createTableSql, table, bucketName, table)); @@ -2463,66 +2674,6 @@ public void testDeleteWithFilter(String createTableSql, String deleteFilter, boo assertUpdate("DROP TABLE " + table); } - @DataProvider - public Object[][] deleteFiltersForTable() - { - return new Object[][]{ - { - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (location = 's3://%s/%s')", - "address = 'Antioch'", - false - }, - { - // delete filter applied on function over non-partitioned field - "CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", - "starts_with(address, 'Antioch')", - false - }, - { - // delete filter applied on partitioned field - "CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", - "address = 'Antioch'", - true - }, - { - // delete filter applied on partitioned field and on synthesized field - "CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", - "address = 'Antioch' AND \"$file_size\" > 0", - false - }, - { - // delete filter applied on function over partitioned field - "CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address'])", - "starts_with(address, 'Antioch')", - false - }, - { - // delete filter applied on non-partitioned field - "CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer'])", - "address = 'Antioch'", - false - }, - { - // delete filter fully applied on composed partition - "CREATE TABLE %s (purchases INT, customer VARCHAR, address VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address', 'customer'])", - "address = 'Antioch' AND (customer = 'Aaron' OR customer = 'Bill')", - true - }, - { - // delete filter applied only partly on first partitioned field - "CREATE TABLE %s (purchases INT, address VARCHAR, customer VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['address', 'customer'])", - "address = 'Antioch'", - true - }, - { - // delete filter applied only partly on second partitioned field - "CREATE TABLE %s (purchases INT, address VARCHAR, customer VARCHAR) WITH (location = 's3://%s/%s', partitioned_by = ARRAY['customer', 'address'])", - "address = 'Antioch'", - true - }, - }; - } - @Override protected void verifyAddNotNullColumnToNonEmptyTableFailurePermissible(Throwable e) { diff --git a/plugin/trino-druid/src/test/java/io/trino/plugin/druid/TestDruidConnectorTest.java b/plugin/trino-druid/src/test/java/io/trino/plugin/druid/TestDruidConnectorTest.java index c4c2ae91868b..94fce7df9394 100644 --- a/plugin/trino-druid/src/test/java/io/trino/plugin/druid/TestDruidConnectorTest.java +++ b/plugin/trino-druid/src/test/java/io/trino/plugin/druid/TestDruidConnectorTest.java @@ -31,9 +31,9 @@ import io.trino.testing.TestingConnectorBehavior; import io.trino.testing.sql.SqlExecutor; import org.intellij.lang.annotations.Language; -import org.testng.SkipException; -import org.testng.annotations.AfterClass; -import org.testng.annotations.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import java.sql.DatabaseMetaData; import java.sql.SQLException; @@ -56,8 +56,11 @@ import static java.lang.String.format; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; import static org.testng.Assert.assertFalse; +@TestInstance(PER_CLASS) public class TestDruidConnectorTest extends BaseJdbcConnectorTest { @@ -75,7 +78,7 @@ protected QueryRunner createQueryRunner() ImmutableList.of(ORDERS, LINE_ITEM, NATION, REGION, PART, CUSTOMER)); } - @AfterClass(alwaysRun = true) + @AfterAll public void destroy() { druidServer = null; // closed by closeAfterClass @@ -126,7 +129,7 @@ protected MaterializedResult getDescribeOrdersResult() .build(); } - @org.junit.jupiter.api.Test + @Test @Override public void testShowColumns() { @@ -302,30 +305,33 @@ public void testLimitPushDown() @Override public void testInsertNegativeDate() { - throw new SkipException("Druid connector does not map 'orderdate' column to date type and INSERT statement"); + abort("Druid connector does not map 'orderdate' column to date type and INSERT statement"); } @Test @Override public void testDateYearOfEraPredicate() { - throw new SkipException("Druid connector does not map 'orderdate' column to date type"); + abort("Druid connector does not map 'orderdate' column to date type"); } + @Test @Override public void testCharTrailingSpace() { assertThatThrownBy(super::testCharTrailingSpace) .hasMessageContaining("Error while executing SQL \"CREATE TABLE druid.char_trailing_space"); - throw new SkipException("Implement test for Druid"); + abort("Implement test for Druid"); } + @Test @Override public void testNativeQuerySelectFromTestTable() { - throw new SkipException("cannot create test table for Druid"); + abort("cannot create test table for Druid"); } + @Test @Override public void testNativeQueryCreateStatement() { @@ -336,10 +342,11 @@ public void testNativeQueryCreateStatement() assertFalse(getQueryRunner().tableExists(getSession(), "numbers")); } + @Test @Override public void testNativeQueryInsertStatementTableExists() { - throw new SkipException("cannot create test table for Druid"); + abort("cannot create test table for Druid"); } @Test diff --git a/plugin/trino-elasticsearch/src/test/java/io/trino/plugin/elasticsearch/BaseElasticsearchConnectorTest.java b/plugin/trino-elasticsearch/src/test/java/io/trino/plugin/elasticsearch/BaseElasticsearchConnectorTest.java index 5b53be176e71..521c4e04fb8a 100644 --- a/plugin/trino-elasticsearch/src/test/java/io/trino/plugin/elasticsearch/BaseElasticsearchConnectorTest.java +++ b/plugin/trino-elasticsearch/src/test/java/io/trino/plugin/elasticsearch/BaseElasticsearchConnectorTest.java @@ -32,8 +32,10 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; import org.intellij.lang.annotations.Language; -import org.testng.annotations.AfterClass; -import org.testng.annotations.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import java.io.IOException; import java.time.LocalDateTime; @@ -49,10 +51,12 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; +@TestInstance(PER_CLASS) public abstract class BaseElasticsearchConnectorTest extends BaseConnectorTest { @@ -85,7 +89,7 @@ protected QueryRunner createQueryRunner() catalogName); } - @AfterClass(alwaysRun = true) + @AfterAll public final void destroy() throws IOException { @@ -195,6 +199,7 @@ public void testSortItemsReflectedInExplain() "TopNPartial\\[count = 5, orderBy = \\[nationkey DESC"); } + @Test @Override public void testShowCreateTable() { @@ -212,7 +217,7 @@ public void testShowCreateTable() ")"); } - @org.junit.jupiter.api.Test + @Test @Override public void testShowColumns() { @@ -1771,7 +1776,8 @@ public void testSelectInformationSchemaForMultiIndexAlias() testSelectInformationSchemaColumns(); } - @Test(enabled = false) // TODO (https://github.com/trinodb/trino/issues/2428) + @Test // TODO (https://github.com/trinodb/trino/issues/2428) + @Disabled public void testMultiIndexAlias() throws IOException { diff --git a/plugin/trino-elasticsearch/src/test/java/io/trino/plugin/elasticsearch/TestElasticsearch6ConnectorTest.java b/plugin/trino-elasticsearch/src/test/java/io/trino/plugin/elasticsearch/TestElasticsearch6ConnectorTest.java index c5a293bd8eb6..06b2c7499200 100644 --- a/plugin/trino-elasticsearch/src/test/java/io/trino/plugin/elasticsearch/TestElasticsearch6ConnectorTest.java +++ b/plugin/trino-elasticsearch/src/test/java/io/trino/plugin/elasticsearch/TestElasticsearch6ConnectorTest.java @@ -17,7 +17,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.nio.entity.NStringEntity; import org.intellij.lang.annotations.Language; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java index 0612e6f01c9f..113f1d3cff9f 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java @@ -68,9 +68,7 @@ import io.trino.type.TypeDeserializer; import org.assertj.core.api.AbstractLongAssert; import org.intellij.lang.annotations.Language; -import org.testng.SkipException; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.io.File; import java.io.IOException; @@ -165,7 +163,6 @@ import static io.trino.sql.planner.planprinter.IoPlanPrinter.FormattedMarker.Bound.EXACTLY; import static io.trino.sql.planner.planprinter.PlanPrinter.textLogicalPlan; import static io.trino.sql.tree.ExplainType.Type.DISTRIBUTED; -import static io.trino.testing.DataProviders.toDataProvider; import static io.trino.testing.MaterializedResult.resultBuilder; import static io.trino.testing.QueryAssertions.assertEqualsIgnoreOrder; import static io.trino.testing.TestingAccessControlManager.TestingPrivilegeType.DELETE_TABLE; @@ -192,6 +189,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.data.Offset.offset; +import static org.junit.jupiter.api.Assumptions.abort; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; @@ -267,6 +265,7 @@ protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) }; } + @Test @Override public void verifySupportsUpdateDeclaration() { @@ -275,6 +274,7 @@ public void verifySupportsUpdateDeclaration() } } + @Test @Override public void verifySupportsRowLevelUpdateDeclaration() { @@ -296,6 +296,7 @@ protected void verifySelectAfterInsertFailurePermissible(Throwable e) .containsPattern("io.trino.spi.TrinoException: Cannot read from a table tpch.test_insert_select_\\w+ that was modified within transaction, you need to commit the transaction first"); } + @Test @Override public void testDelete() { @@ -303,6 +304,7 @@ public void testDelete() .hasStackTraceContaining(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } + @Test @Override public void testDeleteWithLike() { @@ -310,6 +312,7 @@ public void testDeleteWithLike() .hasStackTraceContaining(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } + @Test @Override public void testDeleteWithComplexPredicate() { @@ -317,6 +320,7 @@ public void testDeleteWithComplexPredicate() .hasStackTraceContaining(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } + @Test @Override public void testDeleteWithSemiJoin() { @@ -324,6 +328,7 @@ public void testDeleteWithSemiJoin() .hasStackTraceContaining(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } + @Test @Override public void testDeleteWithSubquery() { @@ -331,6 +336,7 @@ public void testDeleteWithSubquery() .hasStackTraceContaining(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } + @Test @Override public void testUpdate() { @@ -338,6 +344,7 @@ public void testUpdate() .hasMessage(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } + @Test @Override public void testRowLevelUpdate() { @@ -345,6 +352,7 @@ public void testRowLevelUpdate() .hasMessage(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } + @Test @Override public void testUpdateRowConcurrently() throws Exception @@ -356,6 +364,7 @@ public void testUpdateRowConcurrently() .hasMessage(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } + @Test @Override public void testUpdateWithPredicates() { @@ -363,6 +372,7 @@ public void testUpdateWithPredicates() .hasMessage(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } + @Test @Override public void testUpdateRowType() { @@ -370,6 +380,7 @@ public void testUpdateRowType() .hasMessage(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } + @Test @Override public void testUpdateAllValues() { @@ -377,6 +388,7 @@ public void testUpdateAllValues() .hasMessage(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } + @Test @Override public void testExplainAnalyzeWithDeleteWithSubquery() { @@ -384,6 +396,7 @@ public void testExplainAnalyzeWithDeleteWithSubquery() .hasStackTraceContaining(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } + @Test @Override public void testDeleteWithVarcharPredicate() { @@ -391,6 +404,7 @@ public void testDeleteWithVarcharPredicate() .hasStackTraceContaining(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } + @Test @Override public void testRowLevelDelete() { @@ -398,8 +412,14 @@ public void testRowLevelDelete() .hasStackTraceContaining(MODIFYING_NON_TRANSACTIONAL_TABLE_MESSAGE); } - @Test(dataProvider = "queryPartitionFilterRequiredSchemasDataProvider") - public void testRequiredPartitionFilter(String queryPartitionFilterRequiredSchemas) + @Test + public void testRequiredPartitionFilter() + { + testRequiredPartitionFilter("[]"); + testRequiredPartitionFilter("[\"tpch\"]"); + } + + private void testRequiredPartitionFilter(String queryPartitionFilterRequiredSchemas) { Session session = Session.builder(getSession()) .setIdentity(Identity.forUser("hive") @@ -445,8 +465,14 @@ public void testRequiredPartitionFilter(String queryPartitionFilterRequiredSchem assertUpdate(session, "DROP TABLE test_required_partition_filter"); } - @Test(dataProvider = "queryPartitionFilterRequiredSchemasDataProvider") - public void testRequiredPartitionFilterInferred(String queryPartitionFilterRequiredSchemas) + @Test + public void testRequiredPartitionFilterInferred() + { + testRequiredPartitionFilterInferred("[]"); + testRequiredPartitionFilterInferred("[\"tpch\"]"); + } + + private void testRequiredPartitionFilterInferred(String queryPartitionFilterRequiredSchemas) { Session session = Session.builder(getSession()) .setIdentity(Identity.forUser("hive") @@ -478,15 +504,6 @@ public void testRequiredPartitionFilterInferred(String queryPartitionFilterRequi assertUpdate(session, "DROP TABLE test_partition_filter_inferred_right"); } - @DataProvider - public Object[][] queryPartitionFilterRequiredSchemasDataProvider() - { - return new Object[][] { - {"[]"}, - {"[\"tpch\"]"} - }; - } - @Test public void testRequiredPartitionFilterAppliedOnDifferentSchema() { @@ -732,6 +749,7 @@ public void testSchemaAuthorizationForRole() assertUpdate(admin, "DROP ROLE authorized_users IN hive"); } + @Test @Override public void testCreateSchemaWithNonLowercaseOwnerName() { @@ -2140,10 +2158,14 @@ public void testCreateTableUnsupportedPartitionTypeAs() .hasMessageMatching("Unsupported type .* for partition: a"); } - @Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = "Unsupported Hive type: varchar\\(65536\\)\\. Supported VARCHAR types: VARCHAR\\(<=65535\\), VARCHAR\\.") + @Test public void testCreateTableNonSupportedVarcharColumn() { - assertUpdate("CREATE TABLE test_create_table_non_supported_varchar_column (apple varchar(65536))"); + assertThatThrownBy(() -> { + assertUpdate("CREATE TABLE test_create_table_non_supported_varchar_column (apple varchar(65536))"); + }) + .isInstanceOf(RuntimeException.class) + .hasMessageMatching("Unsupported Hive type: varchar\\(65536\\)\\. Supported VARCHAR types: VARCHAR\\(<=65535\\), VARCHAR\\."); } @Test @@ -2259,8 +2281,100 @@ private void testBucketedTable(Session session, HiveStorageFormat storageFormat, assertFalse(getQueryRunner().tableExists(session, tableName)); } - @Test(dataProvider = "bucketFilteringDataTypesSetupProvider") - public void testFilterOnBucketedTable(BucketedFilterTestSetup testSetup) + @Test + public void testFilterOnBucketedTable() + { + testFilterOnBucketedValue( + "BOOLEAN", + Stream.concat(IntStream.range(0, 100).mapToObj(i -> "true"), IntStream.range(0, 100).mapToObj(i -> "false")) + .collect(toImmutableList()), + "true", + 100, + 100); + + testFilterOnBucketedValue( + "TINYINT", + IntStream.range(0, 127).mapToObj(String::valueOf).collect(toImmutableList()), + "126", + 26, + 1); + + testFilterOnBucketedValue( + "SMALLINT", + IntStream.range(0, 1000).map(i -> i + 22767).mapToObj(String::valueOf).collect(toImmutableList()), + "22767", + 200, + 1); + + testFilterOnBucketedValue( + "INTEGER", + IntStream.range(0, 1000).map(i -> i + 1274942432).mapToObj(String::valueOf).collect(toImmutableList()), + "1274942432", + 200, + 1); + + testFilterOnBucketedValue( + "BIGINT", + IntStream.range(0, 1000).mapToLong(i -> i + 312739231274942432L).mapToObj(String::valueOf).collect(toImmutableList()), + "312739231274942432", + 200, + 1); + + testFilterOnBucketedValue( + "REAL", + IntStream.range(0, 1000).mapToDouble(i -> i + 567.123).mapToObj(val -> "REAL '" + val + "'").collect(toImmutableList()), + "567.123", + 201, + 1); + + testFilterOnBucketedValue( + "DOUBLE", + IntStream.range(0, 1000).mapToDouble(i -> i + 1234567890123.123).mapToObj(val -> "DOUBLE '" + val + "'").collect(toImmutableList()), + "1234567890123.123", + 201, + 1); + + testFilterOnBucketedValue( + "VARCHAR", + IntStream.range(0, 1000).mapToObj(i -> "'test value " + i + "'").collect(toImmutableList()), + "'test value 5'", + 200, + 1); + + testFilterOnBucketedValue( + "VARCHAR(20)", + IntStream.range(0, 1000).mapToObj(i -> "'test value " + i + "'").collect(toImmutableList()), + "'test value 5'", + 200, + 1); + + testFilterOnBucketedValue( + "DATE", + IntStream.range(0, 1000).mapToObj(i -> "DATE '2020-02-12' + interval '" + i + "' day").collect(toImmutableList()), + "DATE '2020-02-15'", + 200, + 1); + + testFilterOnBucketedValue( + "ARRAY", + IntStream.range(0, 1000) + .mapToObj(i -> format("ARRAY[%s, %s, %s, %s]", i + 22767, i + 22768, i + 22769, i + 22770)) + .collect(toImmutableList()), + "ARRAY[22767, 22768, 22769, 22770]", + 200, + 1); + + testFilterOnBucketedValue( + "MAP", + IntStream.range(0, 1000) + .mapToObj(i -> format("MAP(ARRAY[%s, %s], ARRAY[%s, %s])", i + 567.123, i + 568.456, i + 22769, i + 22770)) + .collect(toImmutableList()), + "MAP(ARRAY[567.123, 568.456], ARRAY[22769, 22770])", + 149, + 1); + } + + private void testFilterOnBucketedValue(String typeName, List valueList, String filterValue, long expectedPhysicalInputRows, long expectedResult) { String tableName = "test_filter_on_bucketed_table_" + randomNameSuffix(); assertUpdate( @@ -2270,12 +2384,12 @@ public void testFilterOnBucketedTable(BucketedFilterTestSetup testSetup) format = 'TEXTFILE', bucketed_by = ARRAY[ 'bucket_key' ], bucket_count = 5) - """.formatted(tableName, testSetup.getTypeName())); + """.formatted(tableName, typeName)); - String values = testSetup.getValues().stream() + String values = valueList.stream() .map(value -> "(" + value + ", rand())") .collect(joining(", ")); - assertUpdate("INSERT INTO " + tableName + " VALUES " + values, testSetup.getValues().size()); + assertUpdate("INSERT INTO " + tableName + " VALUES " + values, valueList.size()); // It will only read data from a single bucket instead of all buckets, // so physicalInputPositions should be less than number of rows inserted (. @@ -2285,99 +2399,23 @@ public void testFilterOnBucketedTable(BucketedFilterTestSetup testSetup) SELECT count(*) FROM %s WHERE bucket_key = %s - """.formatted(tableName, testSetup.getFilterValue()), - queryStats -> assertThat(queryStats.getPhysicalInputPositions()).isEqualTo(testSetup.getExpectedPhysicalInputRows()), - result -> assertThat(result.getOnlyValue()).isEqualTo(testSetup.getExpectedResult())); + """.formatted(tableName, filterValue), + queryStats -> assertThat(queryStats.getPhysicalInputPositions()).isEqualTo(expectedPhysicalInputRows), + result -> assertThat(result.getOnlyValue()).isEqualTo(expectedResult)); assertUpdate("DROP TABLE " + tableName); } - @DataProvider - public final Object[][] bucketFilteringDataTypesSetupProvider() - { - List testSetups = ImmutableList.of( - new BucketedFilterTestSetup( - "BOOLEAN", - Stream.concat(IntStream.range(0, 100).mapToObj(i -> "true"), IntStream.range(0, 100).mapToObj(i -> "false")) - .collect(toImmutableList()), - "true", - 100, - 100), - new BucketedFilterTestSetup( - "TINYINT", - IntStream.range(0, 127).mapToObj(String::valueOf).collect(toImmutableList()), - "126", - 26, - 1), - new BucketedFilterTestSetup( - "SMALLINT", - IntStream.range(0, 1000).map(i -> i + 22767).mapToObj(String::valueOf).collect(toImmutableList()), - "22767", - 200, - 1), - new BucketedFilterTestSetup( - "INTEGER", - IntStream.range(0, 1000).map(i -> i + 1274942432).mapToObj(String::valueOf).collect(toImmutableList()), - "1274942432", - 200, - 1), - new BucketedFilterTestSetup( - "BIGINT", - IntStream.range(0, 1000).mapToLong(i -> i + 312739231274942432L).mapToObj(String::valueOf).collect(toImmutableList()), - "312739231274942432", - 200, - 1), - new BucketedFilterTestSetup( - "REAL", - IntStream.range(0, 1000).mapToDouble(i -> i + 567.123).mapToObj(val -> "REAL '" + val + "'").collect(toImmutableList()), - "567.123", - 201, - 1), - new BucketedFilterTestSetup( - "DOUBLE", - IntStream.range(0, 1000).mapToDouble(i -> i + 1234567890123.123).mapToObj(val -> "DOUBLE '" + val + "'").collect(toImmutableList()), - "1234567890123.123", - 201, - 1), - new BucketedFilterTestSetup( - "VARCHAR", - IntStream.range(0, 1000).mapToObj(i -> "'test value " + i + "'").collect(toImmutableList()), - "'test value 5'", - 200, - 1), - new BucketedFilterTestSetup( - "VARCHAR(20)", - IntStream.range(0, 1000).mapToObj(i -> "'test value " + i + "'").collect(toImmutableList()), - "'test value 5'", - 200, - 1), - new BucketedFilterTestSetup( - "DATE", - IntStream.range(0, 1000).mapToObj(i -> "DATE '2020-02-12' + interval '" + i + "' day").collect(toImmutableList()), - "DATE '2020-02-15'", - 200, - 1), - new BucketedFilterTestSetup( - "ARRAY", - IntStream.range(0, 1000) - .mapToObj(i -> format("ARRAY[%s, %s, %s, %s]", i + 22767, i + 22768, i + 22769, i + 22770)) - .collect(toImmutableList()), - "ARRAY[22767, 22768, 22769, 22770]", - 200, - 1), - new BucketedFilterTestSetup( - "MAP", - IntStream.range(0, 1000) - .mapToObj(i -> format("MAP(ARRAY[%s, %s], ARRAY[%s, %s])", i + 567.123, i + 568.456, i + 22769, i + 22770)) - .collect(toImmutableList()), - "MAP(ARRAY[567.123, 568.456], ARRAY[22769, 22770])", - 149, - 1)); - return testSetups.stream() - .collect(toDataProvider()); - } - - @Test(dataProvider = "bucketedUnsupportedTypes") - public void testBucketedTableUnsupportedTypes(String typeName) + @Test + public void testBucketedTableUnsupportedTypes() + { + testBucketedTableUnsupportedTypes("VARBINARY"); + testBucketedTableUnsupportedTypes("TIMESTAMP"); + testBucketedTableUnsupportedTypes("DECIMAL(10,3)"); + testBucketedTableUnsupportedTypes("CHAR"); + testBucketedTableUnsupportedTypes("ROW(id VARCHAR)"); + } + + private void testBucketedTableUnsupportedTypes(String typeName) { String tableName = "test_bucketed_table_for_unsupported_types_" + randomNameSuffix(); assertThatThrownBy(() -> assertUpdate( @@ -2390,12 +2428,6 @@ public void testBucketedTableUnsupportedTypes(String typeName) .hasMessage("Cannot create a table bucketed on an unsupported type"); } - @DataProvider - public final Object[][] bucketedUnsupportedTypes() - { - return new Object[][] {{"VARBINARY"}, {"TIMESTAMP"}, {"DECIMAL(10,3)"}, {"CHAR"}, {"ROW(id VARCHAR)"}}; - } - /** * Regression test for https://github.com/trinodb/trino/issues/5295 */ @@ -3426,7 +3458,7 @@ public void testNullPartitionValues() @Override public void testInsertHighestUnicodeCharacter() { - throw new SkipException("Covered by testInsertUnicode"); + abort("Covered by testInsertUnicode"); } @Test @@ -3880,8 +3912,15 @@ public void testArrays() assertQuery("SELECT col[1][2] FROM tmp_array13", "SELECT 2.345"); } - @Test(dataProvider = "timestampPrecision") - public void testTemporalArrays(HiveTimestampPrecision timestampPrecision) + @Test + public void testTemporalArrays() + { + testTemporalArrays(HiveTimestampPrecision.MILLISECONDS); + testTemporalArrays(HiveTimestampPrecision.MICROSECONDS); + testTemporalArrays(HiveTimestampPrecision.NANOSECONDS); + } + + private void testTemporalArrays(HiveTimestampPrecision timestampPrecision) { Session session = withTimestampPrecision(getSession(), timestampPrecision); assertUpdate("DROP TABLE IF EXISTS tmp_array11"); @@ -3892,7 +3931,14 @@ public void testTemporalArrays(HiveTimestampPrecision timestampPrecision) assertOneNotNullResult(session, "SELECT col[1] FROM tmp_array12"); } - @Test(dataProvider = "timestampPrecision") + @Test + public void testMaps() + { + testMaps(HiveTimestampPrecision.MILLISECONDS); + testMaps(HiveTimestampPrecision.MICROSECONDS); + testMaps(HiveTimestampPrecision.NANOSECONDS); + } + public void testMaps(HiveTimestampPrecision timestampPrecision) { Session session = withTimestampPrecision(getSession(), timestampPrecision); @@ -4180,16 +4226,12 @@ public void testMultipleWritersWhenTaskScaleWritersIsEnabledWithMemoryLimit() .isBetween(0L, workers); } - @DataProvider(name = "taskWritersLimitParams") - public Object[][] prepareScaledWritersOption() - { - return new Object[][] {{true, true, 2}, {false, true, 2}, {false, false, 3}}; - } - - @Test(dataProvider = "taskWritersLimitParams") - public void testWriterTasksCountLimitUnpartitioned(boolean scaleWriters, boolean redistributeWrites, int expectedFilesCount) + @Test + public void testWriterTasksCountLimitUnpartitioned() { - testLimitWriterTasks(2, expectedFilesCount, scaleWriters, redistributeWrites, false, DataSize.of(1, MEGABYTE)); + testLimitWriterTasks(2, 2, true, true, false, DataSize.of(1, MEGABYTE)); + testLimitWriterTasks(2, 2, false, true, false, DataSize.of(1, MEGABYTE)); + testLimitWriterTasks(2, 3, false, false, false, DataSize.of(1, MEGABYTE)); } @Test @@ -4857,8 +4899,15 @@ public void testFileSizeHiddenColumn() assertUpdate("DROP TABLE test_file_size"); } - @Test(dataProvider = "timestampPrecision") - public void testFileModifiedTimeHiddenColumn(HiveTimestampPrecision precision) + @Test + public void testFileModifiedTimeHiddenColumn() + { + testFileModifiedTimeHiddenColumn(HiveTimestampPrecision.MILLISECONDS); + testFileModifiedTimeHiddenColumn(HiveTimestampPrecision.MICROSECONDS); + testFileModifiedTimeHiddenColumn(HiveTimestampPrecision.NANOSECONDS); + } + + private void testFileModifiedTimeHiddenColumn(HiveTimestampPrecision precision) { long testStartTime = Instant.now().toEpochMilli(); @@ -5121,6 +5170,7 @@ public void testDropColumn() assertUpdate("DROP TABLE test_drop_column"); } + @Test @Override public void testDropAndAddColumnWithSameName() { @@ -5265,24 +5315,22 @@ public void testMismatchedBucketWithBucketPredicate() assertUpdate("DROP TABLE IF EXISTS test_mismatch_bucketing_with_bucket_predicate32"); } - @DataProvider - public Object[][] timestampPrecisionAndValues() + @Test + public void testParquetTimestampPredicatePushdown() { - return new Object[][] { - {HiveTimestampPrecision.MILLISECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123")}, - {HiveTimestampPrecision.MICROSECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123456")}, - {HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123000000")}, - {HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123000001")}, - {HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123456789")}, - {HiveTimestampPrecision.MILLISECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123")}, - {HiveTimestampPrecision.MICROSECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123456")}, - {HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123000000")}, - {HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123000001")}, - {HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123456789")}}; + testParquetTimestampPredicatePushdown(HiveTimestampPrecision.MILLISECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123")); + testParquetTimestampPredicatePushdown(HiveTimestampPrecision.MICROSECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123456")); + testParquetTimestampPredicatePushdown(HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123000000")); + testParquetTimestampPredicatePushdown(HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123000001")); + testParquetTimestampPredicatePushdown(HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123456789")); + testParquetTimestampPredicatePushdown(HiveTimestampPrecision.MILLISECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123")); + testParquetTimestampPredicatePushdown(HiveTimestampPrecision.MICROSECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123456")); + testParquetTimestampPredicatePushdown(HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123000000")); + testParquetTimestampPredicatePushdown(HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123000001")); + testParquetTimestampPredicatePushdown(HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123456789")); } - @Test(dataProvider = "timestampPrecisionAndValues") - public void testParquetTimestampPredicatePushdown(HiveTimestampPrecision timestampPrecision, LocalDateTime value) + private void testParquetTimestampPredicatePushdown(HiveTimestampPrecision timestampPrecision, LocalDateTime value) { Session session = withTimestampPrecision(getSession(), timestampPrecision); String tableName = "test_parquet_timestamp_predicate_pushdown_" + randomNameSuffix(); @@ -5311,8 +5359,22 @@ public void testParquetTimestampPredicatePushdown(HiveTimestampPrecision timesta results -> {}); } - @Test(dataProvider = "timestampPrecisionAndValues") - public void testOrcTimestampPredicatePushdown(HiveTimestampPrecision timestampPrecision, LocalDateTime value) + @Test + public void testOrcTimestampPredicatePushdown() + { + testOrcTimestampPredicatePushdown(HiveTimestampPrecision.MILLISECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123")); + testOrcTimestampPredicatePushdown(HiveTimestampPrecision.MICROSECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123456")); + testOrcTimestampPredicatePushdown(HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123000000")); + testOrcTimestampPredicatePushdown(HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123000001")); + testOrcTimestampPredicatePushdown(HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("2012-10-31T01:00:08.123456789")); + testOrcTimestampPredicatePushdown(HiveTimestampPrecision.MILLISECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123")); + testOrcTimestampPredicatePushdown(HiveTimestampPrecision.MICROSECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123456")); + testOrcTimestampPredicatePushdown(HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123000000")); + testOrcTimestampPredicatePushdown(HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123000001")); + testOrcTimestampPredicatePushdown(HiveTimestampPrecision.NANOSECONDS, LocalDateTime.parse("1965-10-31T01:00:08.123456789")); + } + + private void testOrcTimestampPredicatePushdown(HiveTimestampPrecision timestampPrecision, LocalDateTime value) { Session session = withTimestampPrecision(getSession(), timestampPrecision); assertUpdate("DROP TABLE IF EXISTS test_orc_timestamp_predicate_pushdown"); @@ -7995,24 +8057,26 @@ public void testUseSortedProperties() assertUpdate("DROP TABLE " + tableName); } - @Test(dataProvider = "testCreateTableWithCompressionCodecDataProvider") - public void testCreateTableWithCompressionCodec(HiveCompressionCodec compressionCodec) + @Test + public void testCreateTableWithCompressionCodec() { - testWithAllStorageFormats((session, hiveStorageFormat) -> { - if (hiveStorageFormat == HiveStorageFormat.PARQUET && compressionCodec == HiveCompressionCodec.LZ4) { - // TODO (https://github.com/trinodb/trino/issues/9142) Support LZ4 compression with native Parquet writer - assertThatThrownBy(() -> testCreateTableWithCompressionCodec(session, hiveStorageFormat, compressionCodec)) - .hasMessage("Unsupported codec: LZ4"); - return; - } + for (HiveCompressionCodec compressionCodec : HiveCompressionCodec.values()) { + testWithAllStorageFormats((session, hiveStorageFormat) -> { + if (hiveStorageFormat == HiveStorageFormat.PARQUET && compressionCodec == HiveCompressionCodec.LZ4) { + // TODO (https://github.com/trinodb/trino/issues/9142) Support LZ4 compression with native Parquet writer + assertThatThrownBy(() -> testCreateTableWithCompressionCodec(session, hiveStorageFormat, compressionCodec)) + .hasMessage("Unsupported codec: LZ4"); + return; + } - if (!isSupportedCodec(hiveStorageFormat, compressionCodec)) { - assertThatThrownBy(() -> testCreateTableWithCompressionCodec(session, hiveStorageFormat, compressionCodec)) - .hasMessage("Compression codec " + compressionCodec + " not supported for " + hiveStorageFormat); - return; - } - testCreateTableWithCompressionCodec(session, hiveStorageFormat, compressionCodec); - }); + if (!isSupportedCodec(hiveStorageFormat, compressionCodec)) { + assertThatThrownBy(() -> testCreateTableWithCompressionCodec(session, hiveStorageFormat, compressionCodec)) + .hasMessage("Compression codec " + compressionCodec + " not supported for " + hiveStorageFormat); + return; + } + testCreateTableWithCompressionCodec(session, hiveStorageFormat, compressionCodec); + }); + } } private boolean isSupportedCodec(HiveStorageFormat storageFormat, HiveCompressionCodec codec) @@ -8023,13 +8087,6 @@ private boolean isSupportedCodec(HiveStorageFormat storageFormat, HiveCompressio return true; } - @DataProvider - public Object[][] testCreateTableWithCompressionCodecDataProvider() - { - return Stream.of(HiveCompressionCodec.values()) - .collect(toDataProvider()); - } - private void testCreateTableWithCompressionCodec(Session session, HiveStorageFormat storageFormat, HiveCompressionCodec compressionCodec) { session = Session.builder(session) @@ -8627,8 +8684,22 @@ public void testTimestampWithTimeZone() assertUpdate("DROP TABLE test_timestamptz"); } - @Test(dataProvider = "legalUseColumnNamesProvider") - public void testUseColumnNames(HiveStorageFormat format, boolean formatUseColumnNames) + @Test + public void testUseColumnNames() + { + testUseColumnNames(HiveStorageFormat.ORC, true); + testUseColumnNames(HiveStorageFormat.ORC, false); + testUseColumnNames(HiveStorageFormat.PARQUET, true); + testUseColumnNames(HiveStorageFormat.PARQUET, false); + testUseColumnNames(HiveStorageFormat.AVRO, false); + testUseColumnNames(HiveStorageFormat.JSON, false); + testUseColumnNames(HiveStorageFormat.RCBINARY, false); + testUseColumnNames(HiveStorageFormat.RCTEXT, false); + testUseColumnNames(HiveStorageFormat.SEQUENCEFILE, false); + testUseColumnNames(HiveStorageFormat.TEXTFILE, false); + } + + private void testUseColumnNames(HiveStorageFormat format, boolean formatUseColumnNames) { String lowerCaseFormat = format.name().toLowerCase(Locale.ROOT); Session.SessionBuilder builder = Session.builder(getSession()); @@ -8658,8 +8729,17 @@ public void testUseColumnNames(HiveStorageFormat format, boolean formatUseColumn assertUpdate("DROP TABLE " + tableName); } - @Test(dataProvider = "hiddenColumnNames") - public void testHiddenColumnNameConflict(String columnName) + @Test + public void testHiddenColumnNameConflict() + { + testHiddenColumnNameConflict("$path"); + testHiddenColumnNameConflict("$bucket"); + testHiddenColumnNameConflict("$file_size"); + testHiddenColumnNameConflict("$file_modified_time"); + testHiddenColumnNameConflict("$partition"); + } + + private void testHiddenColumnNameConflict(String columnName) { try (TestTable table = new TestTable( getQueryRunner()::execute, @@ -8670,20 +8750,22 @@ public void testHiddenColumnNameConflict(String columnName) } } - @DataProvider - public Object[][] hiddenColumnNames() + @Test + public void testUseColumnAddDrop() { - return new Object[][] { - {"$path"}, - {"$bucket"}, - {"$file_size"}, - {"$file_modified_time"}, - {"$partition"}, - }; + testUseColumnAddDrop(HiveStorageFormat.ORC, true); + testUseColumnAddDrop(HiveStorageFormat.ORC, false); + testUseColumnAddDrop(HiveStorageFormat.PARQUET, true); + testUseColumnAddDrop(HiveStorageFormat.PARQUET, false); + testUseColumnAddDrop(HiveStorageFormat.AVRO, false); + testUseColumnAddDrop(HiveStorageFormat.JSON, false); + testUseColumnAddDrop(HiveStorageFormat.RCBINARY, false); + testUseColumnAddDrop(HiveStorageFormat.RCTEXT, false); + testUseColumnAddDrop(HiveStorageFormat.SEQUENCEFILE, false); + testUseColumnAddDrop(HiveStorageFormat.TEXTFILE, false); } - @Test(dataProvider = "legalUseColumnNamesProvider") - public void testUseColumnAddDrop(HiveStorageFormat format, boolean formatUseColumnNames) + private void testUseColumnAddDrop(HiveStorageFormat format, boolean formatUseColumnNames) { String lowerCaseFormat = format.name().toLowerCase(Locale.ROOT); Session.SessionBuilder builder = Session.builder(getSession()); @@ -8927,23 +9009,6 @@ public void testSelectWithShortZoneId() private static final Set NAMED_COLUMN_ONLY_FORMATS = ImmutableSet.of(HiveStorageFormat.AVRO, HiveStorageFormat.JSON); - @DataProvider - public Object[][] legalUseColumnNamesProvider() - { - return new Object[][] { - {HiveStorageFormat.ORC, true}, - {HiveStorageFormat.ORC, false}, - {HiveStorageFormat.PARQUET, true}, - {HiveStorageFormat.PARQUET, false}, - {HiveStorageFormat.AVRO, false}, - {HiveStorageFormat.JSON, false}, - {HiveStorageFormat.RCBINARY, false}, - {HiveStorageFormat.RCTEXT, false}, - {HiveStorageFormat.SEQUENCEFILE, false}, - {HiveStorageFormat.TEXTFILE, false}, - }; - } - private Session getParallelWriteSession(Session baseSession) { return Session.builder(baseSession) @@ -9092,15 +9157,6 @@ public TypeAndEstimate(Type type, EstimatedStatsAndCost estimate) } } - @DataProvider - public Object[][] timestampPrecision() - { - return new Object[][] { - {HiveTimestampPrecision.MILLISECONDS}, - {HiveTimestampPrecision.MICROSECONDS}, - {HiveTimestampPrecision.NANOSECONDS}}; - } - @Override protected Optional filterDataMappingSmokeTestData(DataMappingTestSetup dataMappingTestSetup) { @@ -9122,7 +9178,7 @@ protected Optional filterDataMappingSmokeTestData(DataMapp @Override protected TestTable createTableWithDefaultColumns() { - throw new SkipException("Hive connector does not support column default values"); + return abort("Hive connector does not support column default values"); } @Override diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveConnectorTest.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveConnectorTest.java index 1d8c94367020..a39fdc84ce78 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveConnectorTest.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveConnectorTest.java @@ -14,7 +14,7 @@ package io.trino.plugin.hive; import io.trino.testing.QueryRunner; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import static io.trino.testing.TestingNames.randomNameSuffix; diff --git a/plugin/trino-hudi/src/test/java/io/trino/plugin/hudi/TestHudiConnectorTest.java b/plugin/trino-hudi/src/test/java/io/trino/plugin/hudi/TestHudiConnectorTest.java index d877e0194e1f..66527f5a2e52 100644 --- a/plugin/trino-hudi/src/test/java/io/trino/plugin/hudi/TestHudiConnectorTest.java +++ b/plugin/trino-hudi/src/test/java/io/trino/plugin/hudi/TestHudiConnectorTest.java @@ -18,7 +18,7 @@ import io.trino.testing.BaseConnectorTest; import io.trino.testing.QueryRunner; import io.trino.testing.TestingConnectorBehavior; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import static io.trino.plugin.hudi.HudiQueryRunner.createHudiQueryRunner; import static io.trino.plugin.hudi.testing.HudiTestUtils.COLUMNS_TO_HIDE; diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java index e139895358fd..92bfa3778f10 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java @@ -43,7 +43,6 @@ import io.trino.sql.planner.plan.OutputNode; import io.trino.sql.planner.plan.ValuesNode; import io.trino.testing.BaseConnectorTest; -import io.trino.testing.DataProviders; import io.trino.testing.DistributedQueryRunner; import io.trino.testing.MaterializedResult; import io.trino.testing.MaterializedResultWithQueryId; @@ -62,10 +61,10 @@ import org.apache.iceberg.io.FileIO; import org.apache.iceberg.util.JsonUtil; import org.intellij.lang.annotations.Language; -import org.testng.SkipException; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.Timeout; import java.io.File; import java.io.IOException; @@ -77,7 +76,6 @@ import java.time.Instant; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Locale; @@ -129,7 +127,6 @@ import static io.trino.spi.type.TimeZoneKey.getTimeZoneKey; import static io.trino.spi.type.VarcharType.VARCHAR; import static io.trino.sql.planner.assertions.PlanMatchPattern.node; -import static io.trino.testing.DataProviders.toDataProvider; import static io.trino.testing.MaterializedResult.resultBuilder; import static io.trino.testing.QueryAssertions.assertEqualsIgnoreOrder; import static io.trino.testing.TestingConnectorSession.SESSION; @@ -153,12 +150,15 @@ import static java.util.stream.IntStream.range; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotEquals; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; +@TestInstance(PER_CLASS) public abstract class BaseIcebergConnectorTest extends BaseConnectorTest { @@ -193,13 +193,13 @@ protected IcebergQueryRunner.Builder createQueryRunnerBuilder() .setInitialTables(REQUIRED_TPCH_TABLES); } - @BeforeClass + @BeforeAll public void initFileSystem() { fileSystem = getFileSystemFactory(getDistributedQueryRunner()).create(SESSION); } - @BeforeClass + @BeforeAll public void initStorageTimePrecision() { try (TestTable table = new TestTable(getQueryRunner()::execute, "inspect_storage_precision", "(i int)")) { @@ -244,15 +244,18 @@ public void testAddRowFieldCaseInsensitivity() } } + @Test @Override - public void testAddAndDropColumnName(String columnName) + public void testAddAndDropColumnName() { - if (columnName.equals("a.dot")) { - assertThatThrownBy(() -> super.testAddAndDropColumnName(columnName)) - .hasMessage("Failed to add column: Cannot add column with ambiguous name: a.dot, use addColumn(parent, name, type)"); - return; + for (String columnName : testColumnNameDataProvider()) { + if (columnName.equals("a.dot")) { + assertThatThrownBy(() -> testAddAndDropColumnName(columnName, requiresDelimiting(columnName))) + .hasMessage("Failed to add column: Cannot add column with ambiguous name: a.dot, use addColumn(parent, name, type)"); + return; + } + testAddAndDropColumnName(columnName, requiresDelimiting(columnName)); } - super.testAddAndDropColumnName(columnName); } @Override @@ -290,6 +293,7 @@ public void testDeleteOnV1Table() } } + @Test @Override public void testCharVarcharComparison() { @@ -1085,31 +1089,28 @@ public void testCreatePartitionedTableAs() dropTable("test_create_partitioned_table_as"); } - @DataProvider(name = "partitionedTableWithQuotedIdentifierCasing") - public static Object[][] partitionedTableWithQuotedIdentifierCasing() - { - return new Object[][] { - {"x", "x", true}, - {"X", "x", true}, - {"\"x\"", "x", true}, - {"\"X\"", "x", true}, - {"x", "\"x\"", true}, - {"X", "\"x\"", true}, - {"\"x\"", "\"x\"", true}, - {"\"X\"", "\"x\"", true}, - {"x", "X", true}, - {"X", "X", true}, - {"\"x\"", "X", true}, - {"\"X\"", "X", true}, - {"x", "\"X\"", false}, - {"X", "\"X\"", false}, - {"\"x\"", "\"X\"", false}, - {"\"X\"", "\"X\"", false}, - }; + @Test + public void testCreatePartitionedTableWithQuotedIdentifierCasing() + { + testCreatePartitionedTableWithQuotedIdentifierCasing("x", "x", true); + testCreatePartitionedTableWithQuotedIdentifierCasing("X", "x", true); + testCreatePartitionedTableWithQuotedIdentifierCasing("\"x\"", "x", true); + testCreatePartitionedTableWithQuotedIdentifierCasing("\"X\"", "x", true); + testCreatePartitionedTableWithQuotedIdentifierCasing("x", "\"x\"", true); + testCreatePartitionedTableWithQuotedIdentifierCasing("X", "\"x\"", true); + testCreatePartitionedTableWithQuotedIdentifierCasing("\"x\"", "\"x\"", true); + testCreatePartitionedTableWithQuotedIdentifierCasing("\"X\"", "\"x\"", true); + testCreatePartitionedTableWithQuotedIdentifierCasing("x", "X", true); + testCreatePartitionedTableWithQuotedIdentifierCasing("X", "X", true); + testCreatePartitionedTableWithQuotedIdentifierCasing("\"x\"", "X", true); + testCreatePartitionedTableWithQuotedIdentifierCasing("\"X\"", "X", true); + testCreatePartitionedTableWithQuotedIdentifierCasing("x", "\"X\"", false); + testCreatePartitionedTableWithQuotedIdentifierCasing("X", "\"X\"", false); + testCreatePartitionedTableWithQuotedIdentifierCasing("\"x\"", "\"X\"", false); + testCreatePartitionedTableWithQuotedIdentifierCasing("\"X\"", "\"X\"", false); } - @Test(dataProvider = "partitionedTableWithQuotedIdentifierCasing") - public void testCreatePartitionedTableWithQuotedIdentifierCasing(String columnName, String partitioningField, boolean success) + private void testCreatePartitionedTableWithQuotedIdentifierCasing(String columnName, String partitioningField, boolean success) { String tableName = "partitioning_" + randomNameSuffix(); @Language("SQL") String sql = format("CREATE TABLE %s (%s bigint) WITH (partitioning = ARRAY['%s'])", tableName, columnName, partitioningField); @@ -1244,51 +1245,45 @@ public void testEmptySortedByList() dropTable(tableName); } - @Test(dataProvider = "sortedTableWithQuotedIdentifierCasing") - public void testCreateSortedTableWithQuotedIdentifierCasing(String columnName, String sortField) + @Test + public void testCreateSortedTableWithQuotedIdentifierCasing() + { + testCreateSortedTableWithQuotedIdentifierCasing("col", "col"); + testCreateSortedTableWithQuotedIdentifierCasing("COL", "col"); + testCreateSortedTableWithQuotedIdentifierCasing("\"col\"", "col"); + testCreateSortedTableWithQuotedIdentifierCasing("\"COL\"", "col"); + testCreateSortedTableWithQuotedIdentifierCasing("col", "\"col\""); + testCreateSortedTableWithQuotedIdentifierCasing("COL", "\"col\""); + testCreateSortedTableWithQuotedIdentifierCasing("\"col\"", "\"col\""); + testCreateSortedTableWithQuotedIdentifierCasing("\"COL\"", "\"col\""); + } + + private void testCreateSortedTableWithQuotedIdentifierCasing(String columnName, String sortField) { String tableName = "test_create_sorted_table_with_quotes_" + randomNameSuffix(); assertUpdate(format("CREATE TABLE %s (%s bigint) WITH (sorted_by = ARRAY['%s'])", tableName, columnName, sortField)); dropTable(tableName); } - @DataProvider(name = "sortedTableWithQuotedIdentifierCasing") - public static Object[][] sortedTableWithQuotedIdentifierCasing() + @Test + public void testCreateSortedTableWithSortTransform() { - return new Object[][] { - {"col", "col"}, - {"COL", "col"}, - {"\"col\"", "col"}, - {"\"COL\"", "col"}, - {"col", "\"col\""}, - {"COL", "\"col\""}, - {"\"col\"", "\"col\""}, - {"\"COL\"", "\"col\""}, - }; + testCreateSortedTableWithSortTransform("col", "bucket(col, 3)"); + testCreateSortedTableWithSortTransform("col", "bucket(\"col\", 3)"); + testCreateSortedTableWithSortTransform("col", "truncate(col, 3)"); + testCreateSortedTableWithSortTransform("col", "year(col)"); + testCreateSortedTableWithSortTransform("col", "month(col)"); + testCreateSortedTableWithSortTransform("col", "date(col)"); + testCreateSortedTableWithSortTransform("col", "hour(col)"); } - @Test(dataProvider = "sortedTableWithSortTransform") - public void testCreateSortedTableWithSortTransform(String columnName, String sortField) + private void testCreateSortedTableWithSortTransform(String columnName, String sortField) { String tableName = "test_sort_with_transform_" + randomNameSuffix(); assertThatThrownBy(() -> query(format("CREATE TABLE %s (%s TIMESTAMP(6)) WITH (sorted_by = ARRAY['%s'])", tableName, columnName, sortField))) .hasMessageContaining("Unable to parse sort field"); } - @DataProvider(name = "sortedTableWithSortTransform") - public static Object[][] sortedTableWithSortTransform() - { - return new Object[][] { - {"col", "bucket(col, 3)"}, - {"col", "bucket(\"col\", 3)"}, - {"col", "truncate(col, 3)"}, - {"col", "year(col)"}, - {"col", "month(col)"}, - {"col", "date(col)"}, - {"col", "hour(col)"}, - }; - } - @Test public void testSortOrderChange() { @@ -1520,6 +1515,7 @@ public void testSchemaEvolution() dropTable("test_schema_evolution_drop_middle"); } + @Test @Override public void testDropRowFieldWhenDuplicates() { @@ -3042,7 +3038,13 @@ public void testTruncateTextTransform() dropTable("test_truncate_text_transform"); } - @Test(dataProvider = "truncateNumberTypesProvider") + @Test + public void testTruncateIntegerTransform() + { + testTruncateIntegerTransform("integer"); + testTruncateIntegerTransform("bigint"); + } + public void testTruncateIntegerTransform(String dataType) { String table = format("test_truncate_%s_transform", dataType); @@ -3136,15 +3138,6 @@ public void testTruncateIntegerTransform(String dataType) dropTable(table); } - @DataProvider - public Object[][] truncateNumberTypesProvider() - { - return new Object[][] { - {"integer"}, - {"bigint"}, - }; - } - @Test public void testTruncateDecimalTransform() { @@ -3830,8 +3823,14 @@ public void testPredicatesWithStructuralTypes() dropTable(tableName); } - @Test(dataProviderClass = DataProviders.class, dataProvider = "trueFalse") - public void testPartitionsTableWithColumnNameConflict(boolean partitioned) + @Test + public void testPartitionsTableWithColumnNameConflict() + { + testPartitionsTableWithColumnNameConflict(true); + testPartitionsTableWithColumnNameConflict(false); + } + + private void testPartitionsTableWithColumnNameConflict(boolean partitioned) { assertUpdate("DROP TABLE IF EXISTS test_partitions_with_conflict"); assertUpdate("CREATE TABLE test_partitions_with_conflict (" + @@ -4546,35 +4545,34 @@ public void testAllAvailableTypes() assertUpdate("DROP TABLE test_all_types"); } - @Test(dataProvider = "repartitioningDataProvider") - public void testRepartitionDataOnCtas(Session session, String partitioning, int expectedFiles) - { - testRepartitionData(session, "tpch.tiny.orders", true, partitioning, expectedFiles); - } - - @Test(dataProvider = "repartitioningDataProvider") - public void testRepartitionDataOnInsert(Session session, String partitioning, int expectedFiles) + @Test + public void testRepartitionDataOnCtas() { - testRepartitionData(session, "tpch.tiny.orders", false, partitioning, expectedFiles); + // identity partitioning column + testRepartitionData(getSession(), "tpch.tiny.orders", true, "'orderstatus'", 3); + // bucketing + testRepartitionData(getSession(), "tpch.tiny.orders", true, "'bucket(custkey, 13)'", 13); + // varchar-based + testRepartitionData(getSession(), "tpch.tiny.orders", true, "'truncate(comment, 1)'", 35); + // complex; would exceed 100 open writers limit in IcebergPageSink without write repartitioning + testRepartitionData(getSession(), "tpch.tiny.orders", true, "'bucket(custkey, 4)', 'truncate(comment, 1)'", 131); + // same column multiple times + testRepartitionData(getSession(), "tpch.tiny.orders", true, "'truncate(comment, 1)', 'orderstatus', 'bucket(comment, 2)'", 180); } - @DataProvider - public Object[][] repartitioningDataProvider() + @Test + public void testRepartitionDataOnInsert() { - Session defaultSession = getSession(); - - return new Object[][] { - // identity partitioning column - {defaultSession, "'orderstatus'", 3}, - // bucketing - {defaultSession, "'bucket(custkey, 13)'", 13}, - // varchar-based - {defaultSession, "'truncate(comment, 1)'", 35}, - // complex; would exceed 100 open writers limit in IcebergPageSink without write repartitioning - {defaultSession, "'bucket(custkey, 4)', 'truncate(comment, 1)'", 131}, - // same column multiple times - {defaultSession, "'truncate(comment, 1)', 'orderstatus', 'bucket(comment, 2)'", 180}, - }; + // identity partitioning column + testRepartitionData(getSession(), "tpch.tiny.orders", false, "'orderstatus'", 3); + // bucketing + testRepartitionData(getSession(), "tpch.tiny.orders", false, "'bucket(custkey, 13)'", 13); + // varchar-based + testRepartitionData(getSession(), "tpch.tiny.orders", false, "'truncate(comment, 1)'", 35); + // complex; would exceed 100 open writers limit in IcebergPageSink without write repartitioning + testRepartitionData(getSession(), "tpch.tiny.orders", false, "'bucket(custkey, 4)', 'truncate(comment, 1)'", 131); + // same column multiple times + testRepartitionData(getSession(), "tpch.tiny.orders", false, "'truncate(comment, 1)', 'orderstatus', 'bucket(comment, 2)'", 180); } @Test @@ -4664,32 +4662,34 @@ private void testRepartitionData(Session session, String sourceRelation, boolean assertUpdate(session, "DROP TABLE " + tableName); } - @Test(dataProvider = "testDataMappingSmokeTestDataProvider") - public void testSplitPruningForFilterOnNonPartitionColumn(DataMappingTestSetup testSetup) + @Test + public void testSplitPruningForFilterOnNonPartitionColumn() { - if (testSetup.isUnsupportedType()) { - return; - } - try (TestTable table = new TestTable(getQueryRunner()::execute, "test_split_pruning_non_partitioned", "(row_id int, col " + testSetup.getTrinoTypeName() + ")")) { - String tableName = table.getName(); - String sampleValue = testSetup.getSampleValueLiteral(); - String highValue = testSetup.getHighValueLiteral(); - // Insert separately to ensure two files with one value each - assertUpdate("INSERT INTO " + tableName + " VALUES (1, " + sampleValue + ")", 1); - assertUpdate("INSERT INTO " + tableName + " VALUES (2, " + highValue + ")", 1); - assertQuery("select count(*) from \"" + tableName + "$files\"", "VALUES 2"); - - int expectedSplitCount = supportsIcebergFileStatistics(testSetup.getTrinoTypeName()) ? 1 : 2; - verifySplitCount("SELECT row_id FROM " + tableName, 2); - verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col = " + sampleValue, expectedSplitCount); - verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col = " + highValue, expectedSplitCount); - - // ORC max timestamp statistics are truncated to millisecond precision and then appended with 999 microseconds. - // Therefore, sampleValue and highValue are within the max timestamp & there will be 2 splits. - verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col > " + sampleValue, - (format == ORC && testSetup.getTrinoTypeName().contains("timestamp") ? 2 : expectedSplitCount)); - verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col < " + highValue, - (format == ORC && testSetup.getTrinoTypeName().contains("timestamp(6)") ? 2 : expectedSplitCount)); + for (DataMappingTestSetup testSetup : testDataMappingSmokeTestDataProvider()) { + if (testSetup.isUnsupportedType()) { + return; + } + try (TestTable table = new TestTable(getQueryRunner()::execute, "test_split_pruning_non_partitioned", "(row_id int, col " + testSetup.getTrinoTypeName() + ")")) { + String tableName = table.getName(); + String sampleValue = testSetup.getSampleValueLiteral(); + String highValue = testSetup.getHighValueLiteral(); + // Insert separately to ensure two files with one value each + assertUpdate("INSERT INTO " + tableName + " VALUES (1, " + sampleValue + ")", 1); + assertUpdate("INSERT INTO " + tableName + " VALUES (2, " + highValue + ")", 1); + assertQuery("select count(*) from \"" + tableName + "$files\"", "VALUES 2"); + + int expectedSplitCount = supportsIcebergFileStatistics(testSetup.getTrinoTypeName()) ? 1 : 2; + verifySplitCount("SELECT row_id FROM " + tableName, 2); + verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col = " + sampleValue, expectedSplitCount); + verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col = " + highValue, expectedSplitCount); + + // ORC max timestamp statistics are truncated to millisecond precision and then appended with 999 microseconds. + // Therefore, sampleValue and highValue are within the max timestamp & there will be 2 splits. + verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col > " + sampleValue, + (format == ORC && testSetup.getTrinoTypeName().contains("timestamp") ? 2 : expectedSplitCount)); + verifySplitCount("SELECT row_id FROM " + tableName + " WHERE col < " + highValue, + (format == ORC && testSetup.getTrinoTypeName().contains("timestamp(6)") ? 2 : expectedSplitCount)); + } } } @@ -4712,28 +4712,30 @@ protected void verifyIcebergTableProperties(MaterializedResult actual) protected abstract boolean supportsIcebergFileStatistics(String typeName); - @Test(dataProvider = "testDataMappingSmokeTestDataProvider") - public void testSplitPruningFromDataFileStatistics(DataMappingTestSetup testSetup) + @Test + public void testSplitPruningFromDataFileStatistics() { - if (testSetup.isUnsupportedType()) { - return; - } - try (TestTable table = new TestTable( - getQueryRunner()::execute, - "test_split_pruning_data_file_statistics", - // Random double is needed to make sure rows are different. Otherwise compression may deduplicate rows, resulting in only one row group - "(col " + testSetup.getTrinoTypeName() + ", r double)")) { - String tableName = table.getName(); - String values = - Stream.concat( - nCopies(100, testSetup.getSampleValueLiteral()).stream(), - nCopies(100, testSetup.getHighValueLiteral()).stream()) - .map(value -> "(" + value + ", rand())") - .collect(joining(", ")); - assertUpdate(withSmallRowGroups(getSession()), "INSERT INTO " + tableName + " VALUES " + values, 200); - - String query = "SELECT * FROM " + tableName + " WHERE col = " + testSetup.getSampleValueLiteral(); - verifyPredicatePushdownDataRead(query, supportsRowGroupStatistics(testSetup.getTrinoTypeName())); + for (DataMappingTestSetup testSetup : testDataMappingSmokeTestDataProvider()) { + if (testSetup.isUnsupportedType()) { + return; + } + try (TestTable table = new TestTable( + getQueryRunner()::execute, + "test_split_pruning_data_file_statistics", + // Random double is needed to make sure rows are different. Otherwise compression may deduplicate rows, resulting in only one row group + "(col " + testSetup.getTrinoTypeName() + ", r double)")) { + String tableName = table.getName(); + String values = + Stream.concat( + nCopies(100, testSetup.getSampleValueLiteral()).stream(), + nCopies(100, testSetup.getHighValueLiteral()).stream()) + .map(value -> "(" + value + ", rand())") + .collect(joining(", ")); + assertUpdate(withSmallRowGroups(getSession()), "INSERT INTO " + tableName + " VALUES " + values, 200); + + String query = "SELECT * FROM " + tableName + " WHERE col = " + testSetup.getSampleValueLiteral(); + verifyPredicatePushdownDataRead(query, supportsRowGroupStatistics(testSetup.getTrinoTypeName())); + } } } @@ -4807,7 +4809,7 @@ private OperatorStats getOperatorStats(QueryId queryId) @Override protected TestTable createTableWithDefaultColumns() { - throw new SkipException("Iceberg connector does not support column default values"); + return abort("Iceberg connector does not support column default values"); } @Override @@ -4919,115 +4921,123 @@ public void testProjectionPushdownOnPartitionedTableWithComments() assertUpdate("DROP TABLE IF EXISTS test_projection_pushdown_comments"); } - @Test(dataProvider = "tableFormatVersion") - public void testOptimize(int formatVersion) + @Test + public void testOptimize() throws Exception { - String tableName = "test_optimize_" + randomNameSuffix(); - assertUpdate("CREATE TABLE " + tableName + " (key integer, value varchar) WITH (format_version = " + formatVersion + ")"); + for (int formatVersion = IcebergConfig.FORMAT_VERSION_SUPPORT_MIN; formatVersion < IcebergConfig.FORMAT_VERSION_SUPPORT_MAX; formatVersion++) { + String tableName = "test_optimize_" + randomNameSuffix(); + assertUpdate("CREATE TABLE " + tableName + " (key integer, value varchar) WITH (format_version = " + formatVersion + ")"); - // DistributedQueryRunner sets node-scheduler.include-coordinator by default, so include coordinator - int workerCount = getQueryRunner().getNodeCount(); + // DistributedQueryRunner sets node-scheduler.include-coordinator by default, so include coordinator + int workerCount = getQueryRunner().getNodeCount(); - // optimize an empty table - assertQuerySucceeds(withSingleWriterPerTask(getSession()), "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE"); - assertThat(getActiveFiles(tableName)).isEmpty(); + // optimize an empty table + assertQuerySucceeds(withSingleWriterPerTask(getSession()), "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE"); + assertThat(getActiveFiles(tableName)).isEmpty(); - assertUpdate("INSERT INTO " + tableName + " VALUES (11, 'eleven')", 1); - assertUpdate("INSERT INTO " + tableName + " VALUES (12, 'zwölf')", 1); - assertUpdate("INSERT INTO " + tableName + " VALUES (13, 'trzynaście')", 1); - assertUpdate("INSERT INTO " + tableName + " VALUES (14, 'quatorze')", 1); - assertUpdate("INSERT INTO " + tableName + " VALUES (15, 'пʼятнадцять')", 1); + assertUpdate("INSERT INTO " + tableName + " VALUES (11, 'eleven')", 1); + assertUpdate("INSERT INTO " + tableName + " VALUES (12, 'zwölf')", 1); + assertUpdate("INSERT INTO " + tableName + " VALUES (13, 'trzynaście')", 1); + assertUpdate("INSERT INTO " + tableName + " VALUES (14, 'quatorze')", 1); + assertUpdate("INSERT INTO " + tableName + " VALUES (15, 'пʼятнадцять')", 1); - List initialFiles = getActiveFiles(tableName); - assertThat(initialFiles) - .hasSize(5) - // Verify we have sufficiently many test rows with respect to worker count. - .hasSizeGreaterThan(workerCount); + List initialFiles = getActiveFiles(tableName); + assertThat(initialFiles) + .hasSize(5) + // Verify we have sufficiently many test rows with respect to worker count. + .hasSizeGreaterThan(workerCount); - // For optimize we need to set task_min_writer_count to 1, otherwise it will create more than one file. - computeActual(withSingleWriterPerTask(getSession()), "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE"); - assertThat(query("SELECT sum(key), listagg(value, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName)) - .matches("VALUES (BIGINT '65', VARCHAR 'eleven zwölf trzynaście quatorze пʼятнадцять')"); - List updatedFiles = getActiveFiles(tableName); - assertThat(updatedFiles) - .hasSizeBetween(1, workerCount) - .doesNotContainAnyElementsOf(initialFiles); - // No files should be removed (this is expire_snapshots's job, when it exists) - assertThat(getAllDataFilesFromTableDirectory(tableName)) - .containsExactlyInAnyOrderElementsOf(concat(initialFiles, updatedFiles)); + // For optimize we need to set task_min_writer_count to 1, otherwise it will create more than one file. + computeActual(withSingleWriterPerTask(getSession()), "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE"); + assertThat(query("SELECT sum(key), listagg(value, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName)) + .matches("VALUES (BIGINT '65', VARCHAR 'eleven zwölf trzynaście quatorze пʼятнадцять')"); + List updatedFiles = getActiveFiles(tableName); + assertThat(updatedFiles) + .hasSizeBetween(1, workerCount) + .doesNotContainAnyElementsOf(initialFiles); + // No files should be removed (this is expire_snapshots's job, when it exists) + assertThat(getAllDataFilesFromTableDirectory(tableName)) + .containsExactlyInAnyOrderElementsOf(concat(initialFiles, updatedFiles)); + + // optimize with low retention threshold, nothing should change + // For optimize we need to set task_min_writer_count to 1, otherwise it will create more than one file. + computeActual(withSingleWriterPerTask(getSession()), "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE (file_size_threshold => '33B')"); + assertThat(query("SELECT sum(key), listagg(value, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName)) + .matches("VALUES (BIGINT '65', VARCHAR 'eleven zwölf trzynaście quatorze пʼятнадцять')"); + assertThat(getActiveFiles(tableName)).isEqualTo(updatedFiles); + assertThat(getAllDataFilesFromTableDirectory(tableName)) + .containsExactlyInAnyOrderElementsOf(concat(initialFiles, updatedFiles)); - // optimize with low retention threshold, nothing should change - // For optimize we need to set task_min_writer_count to 1, otherwise it will create more than one file. - computeActual(withSingleWriterPerTask(getSession()), "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE (file_size_threshold => '33B')"); - assertThat(query("SELECT sum(key), listagg(value, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName)) - .matches("VALUES (BIGINT '65', VARCHAR 'eleven zwölf trzynaście quatorze пʼятнадцять')"); - assertThat(getActiveFiles(tableName)).isEqualTo(updatedFiles); - assertThat(getAllDataFilesFromTableDirectory(tableName)) - .containsExactlyInAnyOrderElementsOf(concat(initialFiles, updatedFiles)); - - // optimize with delimited procedure name - assertQueryFails("ALTER TABLE " + tableName + " EXECUTE \"optimize\"", "Table procedure not registered: optimize"); - assertUpdate("ALTER TABLE " + tableName + " EXECUTE \"OPTIMIZE\""); - // optimize with delimited parameter name (and procedure name) - assertUpdate("ALTER TABLE " + tableName + " EXECUTE \"OPTIMIZE\" (\"file_size_threshold\" => '33B')"); // TODO (https://github.com/trinodb/trino/issues/11326) this should fail - assertUpdate("ALTER TABLE " + tableName + " EXECUTE \"OPTIMIZE\" (\"FILE_SIZE_THRESHOLD\" => '33B')"); - assertUpdate("DROP TABLE " + tableName); + // optimize with delimited procedure name + assertQueryFails("ALTER TABLE " + tableName + " EXECUTE \"optimize\"", "Table procedure not registered: optimize"); + assertUpdate("ALTER TABLE " + tableName + " EXECUTE \"OPTIMIZE\""); + // optimize with delimited parameter name (and procedure name) + assertUpdate("ALTER TABLE " + tableName + " EXECUTE \"OPTIMIZE\" (\"file_size_threshold\" => '33B')"); // TODO (https://github.com/trinodb/trino/issues/11326) this should fail + assertUpdate("ALTER TABLE " + tableName + " EXECUTE \"OPTIMIZE\" (\"FILE_SIZE_THRESHOLD\" => '33B')"); + assertUpdate("DROP TABLE " + tableName); + } } - @Test(dataProvider = "tableFormatVersion") - public void testOptimizeForPartitionedTable(int formatVersion) + @Test + public void testOptimizeForPartitionedTable() throws IOException { - // This test will have its own session to make sure partitioning is indeed forced and is not a result - // of session configuration - Session session = testSessionBuilder() - .setCatalog(getQueryRunner().getDefaultSession().getCatalog()) - .setSchema(getQueryRunner().getDefaultSession().getSchema()) - .setSystemProperty("use_preferred_write_partitioning", "true") - .build(); - String tableName = "test_repartitiong_during_optimize_" + randomNameSuffix(); - assertUpdate(session, "CREATE TABLE " + tableName + " (key varchar, value integer) WITH (format_version = " + formatVersion + ", partitioning = ARRAY['key'])"); - // optimize an empty table - assertQuerySucceeds(withSingleWriterPerTask(session), "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE"); - - assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 1)", 1); - assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 2)", 1); - assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 3)", 1); - assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 4)", 1); - assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 5)", 1); - assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 6)", 1); - assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 7)", 1); - assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('two', 8)", 1); - assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('two', 9)", 1); - assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('three', 10)", 1); - - List initialFiles = getActiveFiles(tableName); - assertThat(initialFiles).hasSize(10); + for (int formatVersion = IcebergConfig.FORMAT_VERSION_SUPPORT_MIN; formatVersion < IcebergConfig.FORMAT_VERSION_SUPPORT_MAX; formatVersion++) { + // This test will have its own session to make sure partitioning is indeed forced and is not a result + // of session configuration + Session session = testSessionBuilder() + .setCatalog(getQueryRunner().getDefaultSession().getCatalog()) + .setSchema(getQueryRunner().getDefaultSession().getSchema()) + .setSystemProperty("use_preferred_write_partitioning", "true") + .build(); + String tableName = "test_repartitiong_during_optimize_" + randomNameSuffix(); + assertUpdate(session, "CREATE TABLE " + tableName + " (key varchar, value integer) WITH (format_version = " + formatVersion + ", partitioning = ARRAY['key'])"); + // optimize an empty table + assertQuerySucceeds(withSingleWriterPerTask(session), "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE"); + + assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 1)", 1); + assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 2)", 1); + assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 3)", 1); + assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 4)", 1); + assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 5)", 1); + assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 6)", 1); + assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('one', 7)", 1); + assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('two', 8)", 1); + assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('two', 9)", 1); + assertUpdate(session, "INSERT INTO " + tableName + " VALUES ('three', 10)", 1); + + List initialFiles = getActiveFiles(tableName); + assertThat(initialFiles).hasSize(10); - // For optimize we need to set task_min_writer_count to 1, otherwise it will create more than one file. - computeActual(withSingleWriterPerTask(session), "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE"); + // For optimize we need to set task_min_writer_count to 1, otherwise it will create more than one file. + computeActual(withSingleWriterPerTask(session), "ALTER TABLE " + tableName + " EXECUTE OPTIMIZE"); - assertThat(query(session, "SELECT sum(value), listagg(key, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName)) - .matches("VALUES (BIGINT '55', VARCHAR 'one one one one one one one three two two')"); + assertThat(query(session, "SELECT sum(value), listagg(key, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName)) + .matches("VALUES (BIGINT '55', VARCHAR 'one one one one one one one three two two')"); - List updatedFiles = getActiveFiles(tableName); - // as we force repartitioning there should be only 3 partitions - assertThat(updatedFiles).hasSize(3); - assertThat(getAllDataFilesFromTableDirectory(tableName)).containsExactlyInAnyOrderElementsOf(concat(initialFiles, updatedFiles)); + List updatedFiles = getActiveFiles(tableName); + // as we force repartitioning there should be only 3 partitions + assertThat(updatedFiles).hasSize(3); + assertThat(getAllDataFilesFromTableDirectory(tableName)).containsExactlyInAnyOrderElementsOf(concat(initialFiles, updatedFiles)); - assertUpdate("DROP TABLE " + tableName); + assertUpdate("DROP TABLE " + tableName); + } } - @DataProvider - public Object[][] tableFormatVersion() + @Test() + public void testOptimizeTimePartitionedTable() { - return IntStream.rangeClosed(IcebergConfig.FORMAT_VERSION_SUPPORT_MIN, IcebergConfig.FORMAT_VERSION_SUPPORT_MAX).boxed() - .collect(DataProviders.toDataProvider()); + testOptimizeTimePartitionedTable("date", "%s", 15); + testOptimizeTimePartitionedTable("date", "day(%s)", 15); + testOptimizeTimePartitionedTable("date", "month(%s)", 3); + testOptimizeTimePartitionedTable("timestamp(6)", "day(%s)", 15); + testOptimizeTimePartitionedTable("timestamp(6)", "month(%s)", 3); + testOptimizeTimePartitionedTable("timestamp(6) with time zone", "day(%s)", 15); + testOptimizeTimePartitionedTable("timestamp(6) with time zone", "month(%s)", 3); } - @Test(dataProvider = "testOptimizeTimePartitionedTableDataProvider") - public void testOptimizeTimePartitionedTable(String dataType, String partitioningFormat, int expectedFilesAfterOptimize) + private void testOptimizeTimePartitionedTable(String dataType, String partitioningFormat, int expectedFilesAfterOptimize) { String tableName = "test_optimize_time_partitioned_" + (dataType + "_" + partitioningFormat).toLowerCase(Locale.ENGLISH).replaceAll("[^a-z0-9_]", ""); @@ -5098,20 +5108,6 @@ public void testOptimizeTimePartitionedTable(String dataType, String partitionin assertUpdate("DROP TABLE " + tableName); } - @DataProvider - public static Object[][] testOptimizeTimePartitionedTableDataProvider() - { - return new Object[][] { - {"date", "%s", 15}, - {"date", "day(%s)", 15}, - {"date", "month(%s)", 3}, - {"timestamp(6)", "day(%s)", 15}, - {"timestamp(6)", "month(%s)", 3}, - {"timestamp(6) with time zone", "day(%s)", 15}, - {"timestamp(6) with time zone", "month(%s)", 3}, - }; - } - @Test public void testOptimizeTableAfterDeleteWithFormatVersion2() { @@ -6398,8 +6394,22 @@ public void testMergeSimpleSelectPartitioned() assertUpdate("DROP TABLE " + targetTable); } - @Test(dataProvider = "partitionedAndBucketedProvider") - public void testMergeUpdateWithVariousLayouts(int writers, String partitioning) + @Test + public void testMergeUpdateWithVariousLayouts() + { + testMergeUpdateWithVariousLayouts(1, ""); + testMergeUpdateWithVariousLayouts(4, ""); + testMergeUpdateWithVariousLayouts(1, "WITH (partitioning = ARRAY['customer'])"); + testMergeUpdateWithVariousLayouts(4, "WITH (partitioning = ARRAY['customer'])"); + testMergeUpdateWithVariousLayouts(1, "WITH (partitioning = ARRAY['purchase'])"); + testMergeUpdateWithVariousLayouts(4, "WITH (partitioning = ARRAY['purchase'])"); + testMergeUpdateWithVariousLayouts(1, "WITH (partitioning = ARRAY['bucket(customer, 3)'])"); + testMergeUpdateWithVariousLayouts(4, "WITH (partitioning = ARRAY['bucket(customer, 3)'])"); + testMergeUpdateWithVariousLayouts(1, "WITH (partitioning = ARRAY['bucket(purchase, 4)'])"); + testMergeUpdateWithVariousLayouts(4, "WITH (partitioning = ARRAY['bucket(purchase, 4)'])"); + } + + private void testMergeUpdateWithVariousLayouts(int writers, String partitioning) { Session session = Session.builder(getSession()) .setSystemProperty(TASK_MIN_WRITER_COUNT, String.valueOf(writers)) @@ -6428,28 +6438,22 @@ public void testMergeUpdateWithVariousLayouts(int writers, String partitioning) assertUpdate("DROP TABLE " + targetTable); } - @DataProvider - public Object[][] partitionedAndBucketedProvider() + @Test + @Override + public void testMergeMultipleOperations() { - List writerCounts = ImmutableList.of(1, 4); - List partitioningTypes = ImmutableList.builder() - .add("") - .add("WITH (partitioning = ARRAY['customer'])") - .add("WITH (partitioning = ARRAY['purchase'])") - .add("WITH (partitioning = ARRAY['bucket(customer, 3)'])") - .add("WITH (partitioning = ARRAY['bucket(purchase, 4)'])") - .build(); - - List data = new ArrayList<>(); - for (int writers : writerCounts) { - for (String partitioning : partitioningTypes) { - data.add(new Object[] {writers, partitioning}); - } - } - return data.toArray(Object[][]::new); + testMergeMultipleOperations(1, ""); + testMergeMultipleOperations(4, ""); + testMergeMultipleOperations(1, "WITH (partitioning = ARRAY['customer'])"); + testMergeMultipleOperations(4, "WITH (partitioning = ARRAY['customer'])"); + testMergeMultipleOperations(1, "WITH (partitioning = ARRAY['purchase'])"); + testMergeMultipleOperations(4, "WITH (partitioning = ARRAY['purchase'])"); + testMergeMultipleOperations(1, "WITH (partitioning = ARRAY['bucket(customer, 3)'])"); + testMergeMultipleOperations(4, "WITH (partitioning = ARRAY['bucket(customer, 3)'])"); + testMergeMultipleOperations(1, "WITH (partitioning = ARRAY['bucket(purchase, 4)'])"); + testMergeMultipleOperations(4, "WITH (partitioning = ARRAY['bucket(purchase, 4)'])"); } - @Test(dataProvider = "partitionedAndBucketedProvider") public void testMergeMultipleOperations(int writers, String partitioning) { Session session = Session.builder(getSession()) @@ -6541,8 +6545,18 @@ public void testMergeSimpleQueryPartitioned() assertUpdate("DROP TABLE " + targetTable); } - @Test(dataProvider = "partitionedBucketedFailure") - public void testMergeMultipleRowsMatchFails(String createTableSql) + @Test + @Override + public void testMergeMultipleRowsMatchFails() + { + testMergeMultipleRowsMatchFails("CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR)"); + testMergeMultipleRowsMatchFails("CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['bucket(customer, 3)'])"); + testMergeMultipleRowsMatchFails("CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['customer'])"); + testMergeMultipleRowsMatchFails("CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (partitioning = ARRAY['address'])"); + testMergeMultipleRowsMatchFails("CREATE TABLE %s (purchases INT, customer VARCHAR, address VARCHAR) WITH (partitioning = ARRAY['address', 'customer'])"); + } + + private void testMergeMultipleRowsMatchFails(String createTableSql) { String targetTable = "merge_multiple_target_" + randomNameSuffix(); String sourceTable = "merge_multiple_source_" + randomNameSuffix(); @@ -6566,20 +6580,36 @@ public void testMergeMultipleRowsMatchFails(String createTableSql) assertUpdate("DROP TABLE " + targetTable); } - @DataProvider - public Object[][] partitionedBucketedFailure() - { - return new Object[][] { - {"CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR)"}, - {"CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['bucket(customer, 3)'])"}, - {"CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['customer'])"}, - {"CREATE TABLE %s (customer VARCHAR, address VARCHAR, purchases INT) WITH (partitioning = ARRAY['address'])"}, - {"CREATE TABLE %s (purchases INT, customer VARCHAR, address VARCHAR) WITH (partitioning = ARRAY['address', 'customer'])"} - }; - } - - @Test(dataProvider = "targetAndSourceWithDifferentPartitioning") - public void testMergeWithDifferentPartitioning(String testDescription, String createTargetTableSql, String createSourceTableSql) + @Test + public void testMergeWithDifferentPartitioning() + { + testMergeWithDifferentPartitioning( + "target_partitioned_source_and_target_partitioned_and_bucketed", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['address', 'bucket(customer, 3)'])", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['address', 'bucket(customer, 3)'])"); + testMergeWithDifferentPartitioning( + "target_flat_source_partitioned_by_customer", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR)", + "CREATE TABLE %s (purchases INT, address VARCHAR, customer VARCHAR) WITH (partitioning = ARRAY['customer'])"); + testMergeWithDifferentPartitioning( + "target_partitioned_by_customer_source_flat", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['customer'])", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR)"); + testMergeWithDifferentPartitioning( + "target_bucketed_by_customer_source_flat", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['bucket(customer, 3)'])", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR)"); + testMergeWithDifferentPartitioning( + "target_partitioned_source_partitioned_and_bucketed", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['customer'])", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['address', 'bucket(customer, 3)'])"); + testMergeWithDifferentPartitioning( + "target_partitioned_target_partitioned_and_bucketed", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['address', 'bucket(customer, 3)'])", + "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['customer'])"); + } + + private void testMergeWithDifferentPartitioning(String testDescription, String createTargetTableSql, String createSourceTableSql) { String targetTable = format("%s_target_%s", testDescription, randomNameSuffix()); String sourceTable = format("%s_source_%s", testDescription, randomNameSuffix()); @@ -6604,43 +6634,6 @@ public void testMergeWithDifferentPartitioning(String testDescription, String cr assertUpdate("DROP TABLE " + targetTable); } - @DataProvider - public Object[][] targetAndSourceWithDifferentPartitioning() - { - return new Object[][] { - { - "target_partitioned_source_and_target_partitioned_and_bucketed", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['address', 'bucket(customer, 3)'])", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['address', 'bucket(customer, 3)'])", - }, - { - "target_flat_source_partitioned_by_customer", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR)", - "CREATE TABLE %s (purchases INT, address VARCHAR, customer VARCHAR) WITH (partitioning = ARRAY['customer'])" - }, - { - "target_partitioned_by_customer_source_flat", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['customer'])", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR)", - }, - { - "target_bucketed_by_customer_source_flat", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['bucket(customer, 3)'])", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR)", - }, - { - "target_partitioned_source_partitioned_and_bucketed", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['customer'])", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['address', 'bucket(customer, 3)'])", - }, - { - "target_partitioned_target_partitioned_and_bucketed", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['address', 'bucket(customer, 3)'])", - "CREATE TABLE %s (customer VARCHAR, purchases INT, address VARCHAR) WITH (partitioning = ARRAY['customer'])", - } - }; - } - @Override protected OptionalInt maxSchemaNameLength() { @@ -6980,7 +6973,8 @@ public void testDropCorruptedTableWithHiveRedirection() assertFalse(fileSystem.listFiles(tableLocation).hasNext(), "Table location should not exist"); } - @Test(timeOut = 10_000) + @Test + @Timeout(10) public void testNoRetryWhenMetadataFileInvalid() throws Exception { @@ -7299,38 +7293,41 @@ protected void verifyTableNameLengthFailurePermissible(Throwable e) assertThat(e).hasMessageMatching("Table name must be shorter than or equal to '128' characters but got .*"); } - @Test(dataProvider = "testTimestampPrecisionOnCreateTableAsSelect") - public void testTimestampPrecisionOnCreateTableAsSelect(TimestampPrecisionTestSetup setup) + @Test + public void testTimestampPrecisionOnCreateTableAsSelect() { - try (TestTable testTable = new TestTable( - getQueryRunner()::execute, - "test_coercion_show_create_table", - format("AS SELECT %s a", setup.sourceValueLiteral))) { - assertEquals(getColumnType(testTable.getName(), "a"), setup.newColumnType); - assertQuery( - format("SELECT * FROM %s", testTable.getName()), - format("VALUES (%s)", setup.newValueLiteral)); + for (TimestampPrecisionTestSetup setup : timestampPrecisionOnCreateTableAsSelectProvider()) { + try (TestTable testTable = new TestTable( + getQueryRunner()::execute, + "test_coercion_show_create_table", + format("AS SELECT %s a", setup.sourceValueLiteral))) { + assertEquals(getColumnType(testTable.getName(), "a"), setup.newColumnType); + assertQuery( + format("SELECT * FROM %s", testTable.getName()), + format("VALUES (%s)", setup.newValueLiteral)); + } } } - @Test(dataProvider = "testTimestampPrecisionOnCreateTableAsSelect") - public void testTimestampPrecisionOnCreateTableAsSelectWithNoData(TimestampPrecisionTestSetup setup) + @Test + public void testTimestampPrecisionOnCreateTableAsSelectWithNoData() { - try (TestTable testTable = new TestTable( - getQueryRunner()::execute, - "test_coercion_show_create_table", - format("AS SELECT %s a WITH NO DATA", setup.sourceValueLiteral))) { - assertEquals(getColumnType(testTable.getName(), "a"), setup.newColumnType); + for (TimestampPrecisionTestSetup setup : timestampPrecisionOnCreateTableAsSelectProvider()) { + try (TestTable testTable = new TestTable( + getQueryRunner()::execute, + "test_coercion_show_create_table", + format("AS SELECT %s a WITH NO DATA", setup.sourceValueLiteral))) { + assertEquals(getColumnType(testTable.getName(), "a"), setup.newColumnType); + } } } - @DataProvider(name = "testTimestampPrecisionOnCreateTableAsSelect") - public Object[][] timestampPrecisionOnCreateTableAsSelectProvider() + private List timestampPrecisionOnCreateTableAsSelectProvider() { return timestampPrecisionOnCreateTableAsSelectData().stream() .map(this::filterTimestampPrecisionOnCreateTableAsSelectProvider) .flatMap(Optional::stream) - .collect(toDataProvider()); + .collect(toList()); } protected Optional filterTimestampPrecisionOnCreateTableAsSelectProvider(TimestampPrecisionTestSetup setup) @@ -7384,8 +7381,37 @@ public TimestampPrecisionTestSetup withNewValueLiteral(String newValueLiteral) } } - @Test(dataProvider = "testTimePrecisionOnCreateTableAsSelect") - public void testTimePrecisionOnCreateTableAsSelect(String inputType, String tableType, String tableValue) + @Test + public void testTimePrecisionOnCreateTableAsSelect() + { + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00'", "time(6)", "TIME '00:00:00.000000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.9'", "time(6)", "TIME '00:00:00.900000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.56'", "time(6)", "TIME '00:00:00.560000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.123'", "time(6)", "TIME '00:00:00.123000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.4896'", "time(6)", "TIME '00:00:00.489600'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.89356'", "time(6)", "TIME '00:00:00.893560'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.123000'", "time(6)", "TIME '00:00:00.123000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.999'", "time(6)", "TIME '00:00:00.999000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.123456'", "time(6)", "TIME '00:00:00.123456'"); + testTimePrecisionOnCreateTableAsSelect("TIME '12:34:56.1'", "time(6)", "TIME '12:34:56.100000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '12:34:56.9'", "time(6)", "TIME '12:34:56.900000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '12:34:56.123'", "time(6)", "TIME '12:34:56.123000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '12:34:56.123000'", "time(6)", "TIME '12:34:56.123000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '12:34:56.999'", "time(6)", "TIME '12:34:56.999000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '12:34:56.123456'", "time(6)", "TIME '12:34:56.123456'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.1234561'", "time(6)", "TIME '00:00:00.123456'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.123456499'", "time(6)", "TIME '00:00:00.123456'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.123456499999'", "time(6)", "TIME '00:00:00.123456'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.1234565'", "time(6)", "TIME '00:00:00.123457'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.111222333444'", "time(6)", "TIME '00:00:00.111222'"); + testTimePrecisionOnCreateTableAsSelect("TIME '00:00:00.9999995'", "time(6)", "TIME '00:00:01.000000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '23:59:59.9999995'", "time(6)", "TIME '00:00:00.000000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '23:59:59.9999995'", "time(6)", "TIME '00:00:00.000000'"); + testTimePrecisionOnCreateTableAsSelect("TIME '23:59:59.999999499999'", "time(6)", "TIME '23:59:59.999999'"); + testTimePrecisionOnCreateTableAsSelect("TIME '23:59:59.9999994'", "time(6)", "TIME '23:59:59.999999'"); + } + + private void testTimePrecisionOnCreateTableAsSelect(String inputType, String tableType, String tableValue) { try (TestTable testTable = new TestTable( getQueryRunner()::execute, @@ -7398,8 +7424,37 @@ public void testTimePrecisionOnCreateTableAsSelect(String inputType, String tabl } } - @Test(dataProvider = "testTimePrecisionOnCreateTableAsSelect") - public void testTimePrecisionOnCreateTableAsSelectWithNoData(String inputType, String tableType, String ignored) + @Test + public void testTimePrecisionOnCreateTableAsSelectWithNoData() + { + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.9'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.56'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.123'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.4896'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.89356'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.123000'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.999'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.123456'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '12:34:56.1'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '12:34:56.9'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '12:34:56.123'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '12:34:56.123000'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '12:34:56.999'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '12:34:56.123456'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.1234561'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.123456499'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.123456499999'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.1234565'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.111222333444'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '00:00:00.9999995'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '23:59:59.9999995'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '23:59:59.9999995'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '23:59:59.999999499999'", "time(6)"); + testTimePrecisionOnCreateTableAsSelectWithNoData("TIME '23:59:59.9999994'", "time(6)"); + } + + private void testTimePrecisionOnCreateTableAsSelectWithNoData(String inputType, String tableType) { try (TestTable testTable = new TestTable( getQueryRunner()::execute, @@ -7409,37 +7464,6 @@ public void testTimePrecisionOnCreateTableAsSelectWithNoData(String inputType, S } } - @DataProvider(name = "testTimePrecisionOnCreateTableAsSelect") - public static Object[][] timePrecisionOnCreateTableAsSelectProvider() - { - return new Object[][] { - {"TIME '00:00:00'", "time(6)", "TIME '00:00:00.000000'"}, - {"TIME '00:00:00.9'", "time(6)", "TIME '00:00:00.900000'"}, - {"TIME '00:00:00.56'", "time(6)", "TIME '00:00:00.560000'"}, - {"TIME '00:00:00.123'", "time(6)", "TIME '00:00:00.123000'"}, - {"TIME '00:00:00.4896'", "time(6)", "TIME '00:00:00.489600'"}, - {"TIME '00:00:00.89356'", "time(6)", "TIME '00:00:00.893560'"}, - {"TIME '00:00:00.123000'", "time(6)", "TIME '00:00:00.123000'"}, - {"TIME '00:00:00.999'", "time(6)", "TIME '00:00:00.999000'"}, - {"TIME '00:00:00.123456'", "time(6)", "TIME '00:00:00.123456'"}, - {"TIME '12:34:56.1'", "time(6)", "TIME '12:34:56.100000'"}, - {"TIME '12:34:56.9'", "time(6)", "TIME '12:34:56.900000'"}, - {"TIME '12:34:56.123'", "time(6)", "TIME '12:34:56.123000'"}, - {"TIME '12:34:56.123000'", "time(6)", "TIME '12:34:56.123000'"}, - {"TIME '12:34:56.999'", "time(6)", "TIME '12:34:56.999000'"}, - {"TIME '12:34:56.123456'", "time(6)", "TIME '12:34:56.123456'"}, - {"TIME '00:00:00.1234561'", "time(6)", "TIME '00:00:00.123456'"}, - {"TIME '00:00:00.123456499'", "time(6)", "TIME '00:00:00.123456'"}, - {"TIME '00:00:00.123456499999'", "time(6)", "TIME '00:00:00.123456'"}, - {"TIME '00:00:00.1234565'", "time(6)", "TIME '00:00:00.123457'"}, - {"TIME '00:00:00.111222333444'", "time(6)", "TIME '00:00:00.111222'"}, - {"TIME '00:00:00.9999995'", "time(6)", "TIME '00:00:01.000000'"}, - {"TIME '23:59:59.9999995'", "time(6)", "TIME '00:00:00.000000'"}, - {"TIME '23:59:59.9999995'", "time(6)", "TIME '00:00:00.000000'"}, - {"TIME '23:59:59.999999499999'", "time(6)", "TIME '23:59:59.999999'"}, - {"TIME '23:59:59.9999994'", "time(6)", "TIME '23:59:59.999999'"}}; - } - @Override protected Optional filterSetColumnTypesDataProvider(SetColumnTypeSetup setup) { diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergAvroConnectorTest.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergAvroConnectorTest.java index ff0fa9b71fa9..d85a8fe78043 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergAvroConnectorTest.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergAvroConnectorTest.java @@ -13,9 +13,10 @@ */ package io.trino.plugin.iceberg; -import org.testng.SkipException; +import org.junit.jupiter.api.Test; import static io.trino.plugin.iceberg.IcebergFileFormat.AVRO; +import static org.junit.jupiter.api.Assumptions.abort; public class TestIcebergAvroConnectorTest extends BaseIcebergConnectorTest @@ -37,16 +38,17 @@ protected boolean supportsRowGroupStatistics(String typeName) return false; } + @Test @Override public void testIncorrectIcebergFileSizes() { - throw new SkipException("Avro does not do tail reads"); + abort("Avro does not do tail reads"); } @Override protected boolean isFileSorted(String path, String sortColumnName) { - throw new SkipException("Unimplemented"); + return abort("Unimplemented"); } @Override diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergMinioOrcConnectorTest.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergMinioOrcConnectorTest.java index 0c08d0c6a8c6..5df3dbad90ce 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergMinioOrcConnectorTest.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergMinioOrcConnectorTest.java @@ -19,7 +19,7 @@ import io.trino.testing.QueryRunner; import io.trino.testing.containers.Minio; import io.trino.testing.sql.TestTable; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.io.File; import java.io.OutputStream; @@ -163,6 +163,7 @@ public void testTimeType() } } + @Test @Override public void testDropAmbiguousRowFieldCaseSensitivity() { diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergParquetConnectorTest.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergParquetConnectorTest.java index e05324d2fb89..7f41b139e425 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergParquetConnectorTest.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergParquetConnectorTest.java @@ -15,7 +15,7 @@ import io.trino.testing.MaterializedResult; import io.trino.testing.sql.TestTable; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.util.Optional; import java.util.stream.Collectors; @@ -80,6 +80,7 @@ protected Optional filterSetColumnTypesDataProvider(SetColum return super.filterSetColumnTypesDataProvider(setup); } + @Test @Override public void testDropAmbiguousRowFieldCaseSensitivity() { diff --git a/plugin/trino-ignite/src/test/java/io/trino/plugin/ignite/TestIgniteConnectorTest.java b/plugin/trino-ignite/src/test/java/io/trino/plugin/ignite/TestIgniteConnectorTest.java index 067781018014..f932948ccc2f 100644 --- a/plugin/trino-ignite/src/test/java/io/trino/plugin/ignite/TestIgniteConnectorTest.java +++ b/plugin/trino-ignite/src/test/java/io/trino/plugin/ignite/TestIgniteConnectorTest.java @@ -24,8 +24,7 @@ import io.trino.testing.sql.TestTable; import io.trino.testng.services.Flaky; import org.intellij.lang.annotations.Language; -import org.testng.SkipException; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.util.List; import java.util.Locale; @@ -38,6 +37,7 @@ import static java.lang.String.format; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; public class TestIgniteConnectorTest extends BaseJdbcConnectorTest @@ -245,6 +245,7 @@ protected TestTable createTableWithDefaultColumns() "dummy_id varchar NOT NULL primary key)"); } + @Test @Override public void testShowCreateTable() { @@ -374,12 +375,12 @@ public void testAlterTableAddLongColumnName() super.testAlterTableAddLongColumnName(); } - @Test(dataProvider = "testColumnNameDataProvider") + @Test @Override @Flaky(issue = SCHEMA_CHANGE_OPERATION_FAIL_ISSUE, match = SCHEMA_CHANGE_OPERATION_FAIL_MATCH) - public void testAddAndDropColumnName(String columnName) + public void testAddAndDropColumnName() { - super.testAddAndDropColumnName(columnName); + super.testAddAndDropColumnName(); } @Override @@ -388,11 +389,12 @@ protected TestTable simpleTable() return new TestTable(onRemoteDatabase(), format("%s.simple_table", getSession().getSchema().orElseThrow()), "(col BIGINT, id bigint primary key)", ImmutableList.of("1, 1", "2, 2")); } + @Test @Override public void testCharVarcharComparison() { // Ignite will map char to varchar, skip - throw new SkipException("Ignite map char to varchar, skip test"); + abort("Ignite map char to varchar, skip test"); } @Override @@ -401,10 +403,11 @@ protected String errorMessageForInsertIntoNotNullColumn(String columnName) return format("Failed to insert data: Null value is not allowed for column '%s'", columnName.toUpperCase(Locale.ENGLISH)); } + @Test @Override public void testCharTrailingSpace() { - throw new SkipException("Ignite not support char trailing space"); + abort("Ignite not support char trailing space"); } @Override @@ -430,6 +433,7 @@ protected Optional filterDataMappingSmokeTestData(DataMapp return Optional.of(dataMappingTestSetup); } + @Test @Override public void testDateYearOfEraPredicate() { diff --git a/plugin/trino-kafka/src/test/java/io/trino/plugin/kafka/TestKafkaConnectorTest.java b/plugin/trino-kafka/src/test/java/io/trino/plugin/kafka/TestKafkaConnectorTest.java index f687fcde1ccd..15374d28cf09 100644 --- a/plugin/trino-kafka/src/test/java/io/trino/plugin/kafka/TestKafkaConnectorTest.java +++ b/plugin/trino-kafka/src/test/java/io/trino/plugin/kafka/TestKafkaConnectorTest.java @@ -26,9 +26,7 @@ import io.trino.tpch.TpchTable; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.ByteArraySerializer; -import org.testng.SkipException; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; import java.util.List; @@ -37,7 +35,6 @@ import java.util.UUID; import java.util.stream.Stream; -import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.collect.ImmutableList.toImmutableList; import static com.google.common.collect.ImmutableMap.toImmutableMap; import static io.trino.plugin.kafka.encoder.json.format.DateTimeFormat.CUSTOM_DATE_TIME; @@ -57,7 +54,6 @@ import static io.trino.spi.type.TimestampType.TIMESTAMP_MILLIS; import static io.trino.spi.type.TimestampWithTimeZoneType.TIMESTAMP_TZ_MILLIS; import static io.trino.spi.type.VarcharType.createVarcharType; -import static io.trino.testing.DataProviders.toDataProvider; import static io.trino.testing.TestingConnectorBehavior.SUPPORTS_CREATE_TABLE_WITH_DATA; import static io.trino.testing.TestingNames.randomNameSuffix; import static java.lang.String.format; @@ -70,6 +66,7 @@ import static org.apache.kafka.clients.producer.ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; @@ -199,7 +196,7 @@ public void testInternalFieldPrefix() @Override protected TestTable createTableWithDefaultColumns() { - throw new SkipException("Kafka connector does not support column default values"); + return abort("Kafka connector does not support column default values"); } @Test @@ -435,7 +432,7 @@ public void testInsertArray() // Override because the base test uses CREATE TABLE statement that is unsupported in Kafka connector assertThatThrownBy(() -> query("INSERT INTO " + TABLE_INSERT_ARRAY + " (a) VALUES (ARRAY[null])")) .hasMessage("Unsupported column type 'array(double)' for column 'a'"); - throw new SkipException("not supported"); + abort("not supported"); } @Test @@ -472,10 +469,11 @@ public void testInsertHighestUnicodeCharacter() .containsExactlyInAnyOrder("Hello", "hello测试􏿿world编码"); } + @Test @Override public void testInsertRowConcurrently() { - throw new SkipException("TODO Prepare a topic in Kafka and enable this test"); + abort("TODO Prepare a topic in Kafka and enable this test"); } @Test @@ -495,31 +493,26 @@ public void testKafkaHeaders() "VALUES ('bar'), (null), ('baz')"); } - @Test(dataProvider = "jsonDateTimeFormatsDataProvider") - public void testJsonDateTimeFormatsRoundTrip(JsonDateTimeTestCase testCase) + @Test + public void testJsonDateTimeFormatsRoundTrip() { - assertUpdate("INSERT into write_test." + testCase.getTopicName() + - " (" + testCase.getFieldNames() + ")" + - " VALUES " + testCase.getFieldValues(), 1); - for (JsonDateTimeTestCase.Field field : testCase.getFields()) { - Object actual = computeScalar("SELECT " + field.getFieldName() + " FROM write_test." + testCase.getTopicName()); - Object expected = computeScalar("SELECT " + field.getFieldValue()); - try { - assertEquals(actual, expected, "Equality assertion failed for field: " + field.getFieldName()); - } - catch (AssertionError e) { - throw new AssertionError(format("Equality assertion failed for field '%s'\n%s", field.getFieldName(), e.getMessage()), e); + for (JsonDateTimeTestCase testCase : jsonDateTimeFormatsData()) { + assertUpdate("INSERT into write_test." + testCase.getTopicName() + + " (" + testCase.getFieldNames() + ")" + + " VALUES " + testCase.getFieldValues(), 1); + for (JsonDateTimeTestCase.Field field : testCase.getFields()) { + Object actual = computeScalar("SELECT " + field.getFieldName() + " FROM write_test." + testCase.getTopicName()); + Object expected = computeScalar("SELECT " + field.getFieldValue()); + try { + assertEquals(actual, expected, "Equality assertion failed for field: " + field.getFieldName()); + } + catch (AssertionError e) { + throw new AssertionError(format("Equality assertion failed for field '%s'\n%s", field.getFieldName(), e.getMessage()), e); + } } } } - @DataProvider - public static Object[][] jsonDateTimeFormatsDataProvider() - { - return jsonDateTimeFormatsData().stream() - .collect(toDataProvider()); - } - private static List jsonDateTimeFormatsData() { return ImmutableList.builder() @@ -698,100 +691,53 @@ public String getFieldValue() } } - @Test(dataProvider = "roundTripAllFormatsDataProvider") - public void testRoundTripAllFormats(RoundTripTestCase testCase) - { - assertUpdate("INSERT into write_test." + testCase.getTableName() + - " (" + testCase.getFieldNames() + ")" + - " VALUES " + testCase.getRowValues(), testCase.getNumRows()); - assertQuery("SELECT " + testCase.getFieldNames() + " FROM write_test." + testCase.getTableName() + - " WHERE f_bigint > 1", - "VALUES " + testCase.getRowValues()); - } - - @DataProvider - public static Object[][] roundTripAllFormatsDataProvider() - { - return roundTripAllFormatsData().stream() - .collect(toDataProvider()); - } - - private static List roundTripAllFormatsData() + @Test + public void testRoundTripAllFormats() { - return ImmutableList.builder() - .add(new RoundTripTestCase( - "all_datatypes_avro", - ImmutableList.of("f_bigint", "f_float", "f_double", "f_boolean", "f_varchar"), - ImmutableList.of( - ImmutableList.of(100000, 999.999f, 1000.001, true, "'test'"), - ImmutableList.of(123456, -123.456f, 1234.123, false, "'abcd'")))) - .add(new RoundTripTestCase( - "all_datatypes_csv", - ImmutableList.of("f_bigint", "f_int", "f_smallint", "f_tinyint", "f_double", "f_boolean", "f_varchar"), - ImmutableList.of( - ImmutableList.of(100000, 1000, 100, 10, 1000.001, true, "'test'"), - ImmutableList.of(123456, 1234, 123, 12, 12345.123, false, "'abcd'")))) - .add(new RoundTripTestCase( - "all_datatypes_raw", - ImmutableList.of("kafka_key", "f_varchar", "f_bigint", "f_int", "f_smallint", "f_tinyint", "f_double", "f_boolean"), - ImmutableList.of( - ImmutableList.of(1, "'test'", 100000, 1000, 100, 10, 1000.001, true), - ImmutableList.of(1, "'abcd'", 123456, 1234, 123, 12, 12345.123, false)))) - .add(new RoundTripTestCase( - "all_datatypes_json", - ImmutableList.of("f_bigint", "f_int", "f_smallint", "f_tinyint", "f_double", "f_boolean", "f_varchar"), - ImmutableList.of( - ImmutableList.of(100000, 1000, 100, 10, 1000.001, true, "'test'"), - ImmutableList.of(123748, 1234, 123, 12, 12345.123, false, "'abcd'")))) - .build(); + testRoundTripAllFormats( + "all_datatypes_avro", + ImmutableList.of("f_bigint", "f_float", "f_double", "f_boolean", "f_varchar"), + ImmutableList.of( + ImmutableList.of(100000, 999.999f, 1000.001, true, "'test'"), + ImmutableList.of(123456, -123.456f, 1234.123, false, "'abcd'"))); + + testRoundTripAllFormats( + "all_datatypes_csv", + ImmutableList.of("f_bigint", "f_int", "f_smallint", "f_tinyint", "f_double", "f_boolean", "f_varchar"), + ImmutableList.of( + ImmutableList.of(100000, 1000, 100, 10, 1000.001, true, "'test'"), + ImmutableList.of(123456, 1234, 123, 12, 12345.123, false, "'abcd'"))); + + testRoundTripAllFormats( + "all_datatypes_raw", + ImmutableList.of("kafka_key", "f_varchar", "f_bigint", "f_int", "f_smallint", "f_tinyint", "f_double", "f_boolean"), + ImmutableList.of( + ImmutableList.of(1, "'test'", 100000, 1000, 100, 10, 1000.001, true), + ImmutableList.of(1, "'abcd'", 123456, 1234, 123, 12, 12345.123, false))); + + testRoundTripAllFormats( + "all_datatypes_json", + ImmutableList.of("f_bigint", "f_int", "f_smallint", "f_tinyint", "f_double", "f_boolean", "f_varchar"), + ImmutableList.of( + ImmutableList.of(100000, 1000, 100, 10, 1000.001, true, "'test'"), + ImmutableList.of(123748, 1234, 123, 12, 12345.123, false, "'abcd'"))); } - private static final class RoundTripTestCase + public void testRoundTripAllFormats(String tableName, List fieldNames, List> rowValues) { - private final String tableName; - private final List fieldNames; - private final List> rowValues; - private final int numRows; + String rows = rowValues.stream() + .map(row -> row.stream() + .map(Object::toString) + .collect(joining(", ", "(", ")"))) + .collect(joining(", ")); + String fields = String.join(",", fieldNames); - public RoundTripTestCase(String tableName, List fieldNames, List> rowValues) - { - for (List row : rowValues) { - checkArgument(fieldNames.size() == row.size(), "sizes of fieldNames and rowValues are not equal"); - } - this.tableName = requireNonNull(tableName, "tableName is null"); - this.fieldNames = ImmutableList.copyOf(fieldNames); - this.rowValues = ImmutableList.copyOf(rowValues); - this.numRows = this.rowValues.size(); - } - - public String getTableName() - { - return tableName; - } - - public String getFieldNames() - { - return String.join(", ", fieldNames); - } - - public String getRowValues() - { - String[] rows = new String[numRows]; - for (int i = 0; i < numRows; i++) { - rows[i] = rowValues.get(i).stream().map(Object::toString).collect(joining(", ", "(", ")")); - } - return String.join(", ", rows); - } - - public int getNumRows() - { - return numRows; - } + assertUpdate( + "INSERT into write_test." + tableName + " (" + fields + ") VALUES " + rows, + rowValues.size()); - @Override - public String toString() - { - return tableName; // for test case label in IDE - } + assertQuery( + "SELECT " + fields + " FROM write_test." + tableName + " WHERE f_bigint > 1", + "VALUES " + rows); } } diff --git a/plugin/trino-kudu/src/test/java/io/trino/plugin/kudu/TestKuduConnectorTest.java b/plugin/trino-kudu/src/test/java/io/trino/plugin/kudu/TestKuduConnectorTest.java index e0deac697850..29677ab90a77 100644 --- a/plugin/trino-kudu/src/test/java/io/trino/plugin/kudu/TestKuduConnectorTest.java +++ b/plugin/trino-kudu/src/test/java/io/trino/plugin/kudu/TestKuduConnectorTest.java @@ -20,8 +20,8 @@ import io.trino.testing.QueryRunner; import io.trino.testing.TestingConnectorBehavior; import io.trino.testing.sql.TestTable; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; -import org.testng.SkipException; import java.util.Optional; import java.util.OptionalInt; @@ -37,6 +37,7 @@ import static java.util.Locale.ENGLISH; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNull; @@ -155,22 +156,18 @@ public void testRenameTableToUnqualifiedPreservesSchema() .hasMessage("Creating schema in Kudu connector not allowed if schema emulation is disabled."); } + @Test + @Disabled @Override - public void testAddAndDropColumnName(String columnName) + public void testAddAndDropColumnName() { - // TODO: Enable this test - assertThatThrownBy(() -> super.testAddAndDropColumnName(columnName)) - .hasMessage("Table partitioning must be specified using setRangePartitionColumns or addHashPartitions"); - throw new SkipException("TODO"); } + @Test + @Disabled @Override - public void testRenameColumnName(String columnName) + public void testRenameColumnName() { - // TODO: Enable this test - assertThatThrownBy(() -> super.testRenameColumnName(columnName)) - .hasMessage("Table partitioning must be specified using setRangePartitionColumns or addHashPartitions"); - throw new SkipException("TODO"); } @Override @@ -306,13 +303,14 @@ protected void testColumnName(String columnName, boolean delimited) } } + @Test @Override public void testAddNotNullColumnToEmptyTable() { // TODO: Enable this test assertThatThrownBy(super::testAddNotNullColumnToEmptyTable) .hasMessage("Table partitioning must be specified using setRangePartitionColumns or addHashPartitions"); - throw new SkipException("TODO"); + abort("TODO"); } @Test @@ -420,6 +418,7 @@ public void testCreateTable() //assertFalse(getQueryRunner().tableExists(getSession(), tableNameLike)); } + @Test @Override public void testCreateTableWithLongTableName() { @@ -443,6 +442,7 @@ public void testCreateTableWithLongTableName() assertFalse(getQueryRunner().tableExists(getSession(), validTableName)); } + @Test @Override public void testCreateTableWithLongColumnName() { @@ -469,6 +469,7 @@ public void testCreateTableWithLongColumnName() assertFalse(getQueryRunner().tableExists(getSession(), tableName)); } + @Test @Override public void testCreateTableWithColumnComment() { @@ -485,6 +486,7 @@ public void testCreateTableWithColumnComment() assertUpdate("DROP TABLE IF EXISTS " + tableName); } + @Test @Override public void testDropTable() { @@ -600,6 +602,7 @@ public void testInsertHighestUnicodeCharacter() } } + @Test @Override public void testInsertNegativeDate() { @@ -690,7 +693,8 @@ protected TestTable createTableWithOneIntegerColumn(String namePrefix) * This test fails intermittently because Kudu doesn't have strong enough * semantics to support writing from multiple threads. */ - @org.testng.annotations.Test(enabled = false) + @Test + @Disabled @Override public void testUpdateWithPredicates() { @@ -712,6 +716,7 @@ public void testUpdateWithPredicates() * This test fails intermittently because Kudu doesn't have strong enough * semantics to support writing from multiple threads. */ + @Test @Override public void testUpdateAllValues() { @@ -723,12 +728,14 @@ public void testUpdateAllValues() }); } + @Test @Override public void testWrittenStats() { // TODO Kudu connector supports CTAS and inserts, but the test would fail } + @Test @Override public void testReadMetadataWithRelationsConcurrentModifications() { @@ -740,7 +747,7 @@ public void testReadMetadataWithRelationsConcurrentModifications() // TODO (https://github.com/trinodb/trino/issues/12974): shouldn't fail assertThat(expected) .hasMessageMatching(".* table .* was deleted: Table deleted at .* UTC"); - throw new SkipException("to be fixed"); + abort("to be fixed"); } } @@ -777,15 +784,17 @@ public void testDateYearOfEraPredicate() .hasStackTraceContaining("Cannot apply operator: varchar = date"); } + @Test @Override public void testVarcharCastToDateInPredicate() { assertThatThrownBy(super::testVarcharCastToDateInPredicate) .hasStackTraceContaining("Table partitioning must be specified using setRangePartitionColumns or addHashPartitions"); - throw new SkipException("TODO: implement the test for Kudu"); + abort("TODO: implement the test for Kudu"); } + @Test @Override public void testCharVarcharComparison() { @@ -795,7 +804,7 @@ public void testCharVarcharComparison() .hasMessageContaining("Actual rows") .hasMessageContaining("Expected rows"); - throw new SkipException("TODO"); + abort("TODO"); } @Test @@ -947,7 +956,8 @@ public void testRowLevelDelete() * This test fails intermittently because Kudu doesn't have strong enough * semantics to support writing from multiple threads. */ - @org.testng.annotations.Test(enabled = false) + @Test + @Disabled @Override public void testUpdate() { @@ -963,7 +973,8 @@ public void testUpdate() * This test fails intermittently because Kudu doesn't have strong enough * semantics to support writing from multiple threads. */ - @org.testng.annotations.Test(enabled = false) + @Test + @Disabled @Override public void testRowLevelUpdate() { @@ -984,11 +995,12 @@ public void testRowLevelUpdate() }); } + @Test @Override public void testUpdateRowConcurrently() throws Exception { - throw new SkipException("Kudu doesn't support concurrent update of different columns in a row"); + abort("Kudu doesn't support concurrent update of different columns in a row"); } @Test @@ -1006,7 +1018,7 @@ public void testCreateTableWithTableComment() } @Override - public void testCreateTableWithTableCommentSpecialCharacter(String comment) + protected void testCreateTableWithTableCommentSpecialCharacter(String comment) { // TODO Remove this overriding test once kudu connector can create tables with default partitions try (TestTable table = new TestTable(getQueryRunner()::execute, @@ -1039,10 +1051,11 @@ protected Optional filterDataMappingSmokeTestData(DataMapp return Optional.of(dataMappingTestSetup); } + @Test @Override protected TestTable createTableWithDefaultColumns() { - throw new SkipException("Kudu connector does not support column default values"); + return abort("Kudu connector does not support column default values"); } @Override diff --git a/plugin/trino-mariadb/src/test/java/io/trino/plugin/mariadb/BaseMariaDbConnectorTest.java b/plugin/trino-mariadb/src/test/java/io/trino/plugin/mariadb/BaseMariaDbConnectorTest.java index d2ed5a68ed43..347ebe78610e 100644 --- a/plugin/trino-mariadb/src/test/java/io/trino/plugin/mariadb/BaseMariaDbConnectorTest.java +++ b/plugin/trino-mariadb/src/test/java/io/trino/plugin/mariadb/BaseMariaDbConnectorTest.java @@ -18,7 +18,7 @@ import io.trino.testing.MaterializedResult; import io.trino.testing.TestingConnectorBehavior; import io.trino.testing.sql.TestTable; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.util.Optional; import java.util.OptionalInt; @@ -84,7 +84,7 @@ protected TestTable createTableWithUnsupportedColumn() "(one bigint, two decimal(50,0), three varchar(10))"); } - @org.junit.jupiter.api.Test + @Test @Override public void testShowColumns() { @@ -181,6 +181,7 @@ public void testColumnComment() assertUpdate("DROP TABLE test_column_comment"); } + @Test @Override public void testAddNotNullColumn() { @@ -288,6 +289,7 @@ public void testInsertIntoNotNullColumn() } } + @Test @Override public void testNativeQueryCreateStatement() { @@ -298,6 +300,7 @@ public void testNativeQueryCreateStatement() assertFalse(getQueryRunner().tableExists(getSession(), "numbers")); } + @Test @Override public void testNativeQueryInsertStatementTableExists() { diff --git a/plugin/trino-mariadb/src/test/java/io/trino/plugin/mariadb/TestMariaDbConnectorTest.java b/plugin/trino-mariadb/src/test/java/io/trino/plugin/mariadb/TestMariaDbConnectorTest.java index 394a325f6e1e..42a7bf7432e8 100644 --- a/plugin/trino-mariadb/src/test/java/io/trino/plugin/mariadb/TestMariaDbConnectorTest.java +++ b/plugin/trino-mariadb/src/test/java/io/trino/plugin/mariadb/TestMariaDbConnectorTest.java @@ -16,6 +16,7 @@ import com.google.common.collect.ImmutableMap; import io.trino.testing.QueryRunner; import io.trino.testing.sql.SqlExecutor; +import org.junit.jupiter.api.Test; import static io.trino.plugin.mariadb.MariaDbQueryRunner.createMariaDbQueryRunner; import static org.assertj.core.api.Assertions.assertThatThrownBy; @@ -37,6 +38,7 @@ protected SqlExecutor onRemoteDatabase() return server::execute; } + @Test @Override public void testRenameColumn() { @@ -44,13 +46,17 @@ public void testRenameColumn() .hasMessageContaining("Rename column not supported for the MariaDB server version"); } + @Test @Override - public void testRenameColumnName(String columnName) + public void testRenameColumnName() { - assertThatThrownBy(() -> super.testRenameColumnName(columnName)) - .hasMessageContaining("Rename column not supported for the MariaDB server version"); + for (String columnName : testColumnNameDataProvider()) { + assertThatThrownBy(() -> testRenameColumnName(columnName, requiresDelimiting(columnName))) + .hasMessageContaining("Rename column not supported for the MariaDB server version"); + } } + @Test @Override public void testAlterTableRenameColumnToLongName() { diff --git a/plugin/trino-memory/src/test/java/io/trino/plugin/memory/TestMemoryConnectorTest.java b/plugin/trino-memory/src/test/java/io/trino/plugin/memory/TestMemoryConnectorTest.java index 45fd3e285161..50fb43f610b2 100644 --- a/plugin/trino-memory/src/test/java/io/trino/plugin/memory/TestMemoryConnectorTest.java +++ b/plugin/trino-memory/src/test/java/io/trino/plugin/memory/TestMemoryConnectorTest.java @@ -32,8 +32,8 @@ import io.trino.testng.services.Flaky; import io.trino.tpch.TpchTable; import org.intellij.lang.annotations.Language; -import org.testng.SkipException; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.util.List; @@ -44,6 +44,7 @@ import static io.trino.sql.planner.OptimizerConfig.JoinDistributionType.BROADCAST; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; @@ -107,7 +108,7 @@ protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) @Override protected TestTable createTableWithDefaultColumns() { - throw new SkipException("Memory connector does not support column default values"); + return abort("Memory connector does not support column default values"); } @Test @@ -187,7 +188,8 @@ private Metrics collectCustomMetrics(String sql) .reduce(Metrics.EMPTY, Metrics::mergeWith); } - @Test(timeOut = 30_000) + @Test + @Timeout(30) public void testPhysicalInputPositions() { MaterializedResultWithQueryId result = getDistributedQueryRunner().executeWithQueryId( @@ -202,58 +204,67 @@ public void testPhysicalInputPositions() assertEquals(probeStats.getPhysicalInputPositions(), LINEITEM_COUNT); } - @Test(timeOut = 30_000, dataProvider = "joinDistributionTypes") - public void testJoinDynamicFilteringNone(JoinDistributionType joinDistributionType) + @Test + @Timeout(30) + public void testJoinDynamicFilteringNone() { - // Probe-side is not scanned at all, due to dynamic filtering: - assertDynamicFiltering( - "SELECT * FROM lineitem JOIN orders ON lineitem.orderkey = orders.orderkey AND orders.totalprice < 0", - noJoinReordering(joinDistributionType), - 0, - 0, ORDERS_COUNT); + for (JoinDistributionType joinDistributionType : JoinDistributionType.values()) { + // Probe-side is not scanned at all, due to dynamic filtering: + assertDynamicFiltering( + "SELECT * FROM lineitem JOIN orders ON lineitem.orderkey = orders.orderkey AND orders.totalprice < 0", + noJoinReordering(joinDistributionType), + 0, + 0, ORDERS_COUNT); + } } - @Test(timeOut = 30_000, dataProvider = "joinDistributionTypes") - public void testJoinLargeBuildSideDynamicFiltering(JoinDistributionType joinDistributionType) - { - @Language("SQL") String sql = "SELECT * FROM lineitem JOIN orders ON lineitem.orderkey = orders.orderkey and orders.custkey BETWEEN 300 AND 700"; - int expectedRowCount = 15793; - // Probe-side is fully scanned because the build-side is too large for dynamic filtering: - assertDynamicFiltering( - sql, - noJoinReordering(joinDistributionType), - expectedRowCount, - LINEITEM_COUNT, ORDERS_COUNT); - // Probe-side is partially scanned because we extract min/max from large build-side for dynamic filtering - assertDynamicFiltering( - sql, - withLargeDynamicFilters(joinDistributionType), - expectedRowCount, - 60139, ORDERS_COUNT); + @Test + @Timeout(30) + public void testJoinLargeBuildSideDynamicFiltering() + { + for (JoinDistributionType joinDistributionType : JoinDistributionType.values()) { + @Language("SQL") String sql = "SELECT * FROM lineitem JOIN orders ON lineitem.orderkey = orders.orderkey and orders.custkey BETWEEN 300 AND 700"; + int expectedRowCount = 15793; + // Probe-side is fully scanned because the build-side is too large for dynamic filtering: + assertDynamicFiltering( + sql, + noJoinReordering(joinDistributionType), + expectedRowCount, + LINEITEM_COUNT, ORDERS_COUNT); + // Probe-side is partially scanned because we extract min/max from large build-side for dynamic filtering + assertDynamicFiltering( + sql, + withLargeDynamicFilters(joinDistributionType), + expectedRowCount, + 60139, ORDERS_COUNT); + } } - @Test(timeOut = 30_000, dataProvider = "joinDistributionTypes") - public void testJoinDynamicFilteringSingleValue(JoinDistributionType joinDistributionType) + @Test + @Timeout(30) + public void testJoinDynamicFilteringSingleValue() { - assertThat(computeScalar("SELECT orderkey FROM orders WHERE comment = 'nstructions sleep furiously among '")).isEqualTo(1L); - assertThat(computeScalar("SELECT COUNT() FROM lineitem WHERE orderkey = 1")).isEqualTo(6L); + for (JoinDistributionType joinDistributionType : JoinDistributionType.values()) { + assertThat(computeScalar("SELECT orderkey FROM orders WHERE comment = 'nstructions sleep furiously among '")).isEqualTo(1L); + assertThat(computeScalar("SELECT COUNT() FROM lineitem WHERE orderkey = 1")).isEqualTo(6L); - assertThat(computeScalar("SELECT partkey FROM part WHERE comment = 'onic deposits'")).isEqualTo(1552L); - assertThat(computeScalar("SELECT COUNT() FROM lineitem WHERE partkey = 1552")).isEqualTo(39L); + assertThat(computeScalar("SELECT partkey FROM part WHERE comment = 'onic deposits'")).isEqualTo(1552L); + assertThat(computeScalar("SELECT COUNT() FROM lineitem WHERE partkey = 1552")).isEqualTo(39L); - // Join lineitem with a single row of orders - assertDynamicFiltering( - "SELECT * FROM lineitem JOIN orders ON lineitem.orderkey = orders.orderkey AND orders.comment = 'nstructions sleep furiously among '", - noJoinReordering(joinDistributionType), - 6, - 6, ORDERS_COUNT); + // Join lineitem with a single row of orders + assertDynamicFiltering( + "SELECT * FROM lineitem JOIN orders ON lineitem.orderkey = orders.orderkey AND orders.comment = 'nstructions sleep furiously among '", + noJoinReordering(joinDistributionType), + 6, + 6, ORDERS_COUNT); - // Join lineitem with a single row of part - assertDynamicFiltering( - "SELECT l.comment FROM lineitem l, part p WHERE p.partkey = l.partkey AND p.comment = 'onic deposits'", - noJoinReordering(joinDistributionType), - 39, - 39, PART_COUNT); + // Join lineitem with a single row of part + assertDynamicFiltering( + "SELECT l.comment FROM lineitem l, part p WHERE p.partkey = l.partkey AND p.comment = 'onic deposits'", + noJoinReordering(joinDistributionType), + 39, + 39, PART_COUNT); + } } @Test @@ -268,81 +279,96 @@ public void testJoinDynamicFilteringImplicitCoercion() 6, ORDERS_COUNT); } - @Test(timeOut = 30_000, dataProvider = "joinDistributionTypes") - public void testJoinDynamicFilteringBlockProbeSide(JoinDistributionType joinDistributionType) - { - // Wait for both build sides to finish before starting the scan of 'lineitem' table (should be very selective given the dynamic filters). - assertDynamicFiltering( - "SELECT l.comment" + - " FROM lineitem l, part p, orders o" + - " WHERE l.orderkey = o.orderkey AND o.comment = 'nstructions sleep furiously among '" + - " AND p.partkey = l.partkey AND p.comment = 'onic deposits'", - noJoinReordering(joinDistributionType), - 1, - 1, PART_COUNT, ORDERS_COUNT); + @Test + @Timeout(30) + public void testJoinDynamicFilteringBlockProbeSide() + { + for (JoinDistributionType joinDistributionType : JoinDistributionType.values()) { + // Wait for both build sides to finish before starting the scan of 'lineitem' table (should be very selective given the dynamic filters). + assertDynamicFiltering( + "SELECT l.comment" + + " FROM lineitem l, part p, orders o" + + " WHERE l.orderkey = o.orderkey AND o.comment = 'nstructions sleep furiously among '" + + " AND p.partkey = l.partkey AND p.comment = 'onic deposits'", + noJoinReordering(joinDistributionType), + 1, + 1, PART_COUNT, ORDERS_COUNT); + } } - @Test(timeOut = 30_000, dataProvider = "joinDistributionTypes") - public void testSemiJoinDynamicFilteringNone(JoinDistributionType joinDistributionType) + @Test + @Timeout(30) + public void testSemiJoinDynamicFilteringNone() { - // Probe-side is not scanned at all, due to dynamic filtering: - assertDynamicFiltering( - "SELECT * FROM lineitem WHERE lineitem.orderkey IN (SELECT orders.orderkey FROM orders WHERE orders.totalprice < 0)", - noJoinReordering(joinDistributionType), - 0, - 0, ORDERS_COUNT); + for (JoinDistributionType joinDistributionType : JoinDistributionType.values()) { + // Probe-side is not scanned at all, due to dynamic filtering: + assertDynamicFiltering( + "SELECT * FROM lineitem WHERE lineitem.orderkey IN (SELECT orders.orderkey FROM orders WHERE orders.totalprice < 0)", + noJoinReordering(joinDistributionType), + 0, + 0, ORDERS_COUNT); + } } - @Test(timeOut = 30_000, dataProvider = "joinDistributionTypes") - public void testSemiJoinLargeBuildSideDynamicFiltering(JoinDistributionType joinDistributionType) - { - // Probe-side is fully scanned because the build-side is too large for dynamic filtering: - @Language("SQL") String sql = "SELECT * FROM lineitem WHERE lineitem.orderkey IN " + - "(SELECT orders.orderkey FROM orders WHERE orders.custkey BETWEEN 300 AND 700)"; - int expectedRowCount = 15793; - // Probe-side is fully scanned because the build-side is too large for dynamic filtering: - assertDynamicFiltering( - sql, - noJoinReordering(joinDistributionType), - expectedRowCount, - LINEITEM_COUNT, ORDERS_COUNT); - // Probe-side is partially scanned because we extract min/max from large build-side for dynamic filtering - assertDynamicFiltering( - sql, - withLargeDynamicFilters(joinDistributionType), - expectedRowCount, - 60139, ORDERS_COUNT); + @Test + @Timeout(30) + public void testSemiJoinLargeBuildSideDynamicFiltering() + { + for (JoinDistributionType joinDistributionType : JoinDistributionType.values()) { + // Probe-side is fully scanned because the build-side is too large for dynamic filtering: + @Language("SQL") String sql = "SELECT * FROM lineitem WHERE lineitem.orderkey IN " + + "(SELECT orders.orderkey FROM orders WHERE orders.custkey BETWEEN 300 AND 700)"; + int expectedRowCount = 15793; + // Probe-side is fully scanned because the build-side is too large for dynamic filtering: + assertDynamicFiltering( + sql, + noJoinReordering(joinDistributionType), + expectedRowCount, + LINEITEM_COUNT, ORDERS_COUNT); + // Probe-side is partially scanned because we extract min/max from large build-side for dynamic filtering + assertDynamicFiltering( + sql, + withLargeDynamicFilters(joinDistributionType), + expectedRowCount, + 60139, ORDERS_COUNT); + } } - @Test(timeOut = 30_000, dataProvider = "joinDistributionTypes") - public void testSemiJoinDynamicFilteringSingleValue(JoinDistributionType joinDistributionType) - { - // Join lineitem with a single row of orders - assertDynamicFiltering( - "SELECT * FROM lineitem WHERE lineitem.orderkey IN (SELECT orders.orderkey FROM orders WHERE orders.comment = 'nstructions sleep furiously among ')", - noJoinReordering(joinDistributionType), - 6, - 6, ORDERS_COUNT); - - // Join lineitem with a single row of part - assertDynamicFiltering( - "SELECT l.comment FROM lineitem l WHERE l.partkey IN (SELECT p.partkey FROM part p WHERE p.comment = 'onic deposits')", - noJoinReordering(joinDistributionType), - 39, - 39, PART_COUNT); + @Test + @Timeout(30) + public void testSemiJoinDynamicFilteringSingleValue() + { + for (JoinDistributionType joinDistributionType : JoinDistributionType.values()) { + // Join lineitem with a single row of orders + assertDynamicFiltering( + "SELECT * FROM lineitem WHERE lineitem.orderkey IN (SELECT orders.orderkey FROM orders WHERE orders.comment = 'nstructions sleep furiously among ')", + noJoinReordering(joinDistributionType), + 6, + 6, ORDERS_COUNT); + + // Join lineitem with a single row of part + assertDynamicFiltering( + "SELECT l.comment FROM lineitem l WHERE l.partkey IN (SELECT p.partkey FROM part p WHERE p.comment = 'onic deposits')", + noJoinReordering(joinDistributionType), + 39, + 39, PART_COUNT); + } } - @Test(timeOut = 30_000, dataProvider = "joinDistributionTypes") - public void testSemiJoinDynamicFilteringBlockProbeSide(JoinDistributionType joinDistributionType) + @Test + @Timeout(30) + public void testSemiJoinDynamicFilteringBlockProbeSide() { - // Wait for both build sides to finish before starting the scan of 'lineitem' table (should be very selective given the dynamic filters). - assertDynamicFiltering( - "SELECT t.comment FROM " + - "(SELECT * FROM lineitem l WHERE l.orderkey IN (SELECT o.orderkey FROM orders o WHERE o.comment = 'nstructions sleep furiously among ')) t " + - "WHERE t.partkey IN (SELECT p.partkey FROM part p WHERE p.comment = 'onic deposits')", - noJoinReordering(joinDistributionType), - 1, - 1, ORDERS_COUNT, PART_COUNT); + for (JoinDistributionType joinDistributionType : JoinDistributionType.values()) { + // Wait for both build sides to finish before starting the scan of 'lineitem' table (should be very selective given the dynamic filters). + assertDynamicFiltering( + "SELECT t.comment FROM " + + "(SELECT * FROM lineitem l WHERE l.orderkey IN (SELECT o.orderkey FROM orders o WHERE o.comment = 'nstructions sleep furiously among ')) t " + + "WHERE t.partkey IN (SELECT p.partkey FROM part p WHERE p.comment = 'onic deposits')", + noJoinReordering(joinDistributionType), + 1, + 1, ORDERS_COUNT, PART_COUNT); + } } @Test @@ -422,23 +448,26 @@ public void testCrossJoinLargeBuildSideDynamicFiltering() ORDERS_COUNT, CUSTOMER_COUNT); } - @Test(timeOut = 30_000, dataProvider = "joinDistributionTypes") - public void testJoinDynamicFilteringMultiJoin(JoinDistributionType joinDistributionType) - { - assertUpdate("DROP TABLE IF EXISTS t0"); - assertUpdate("DROP TABLE IF EXISTS t1"); - assertUpdate("DROP TABLE IF EXISTS t2"); - assertUpdate("CREATE TABLE t0 (k0 integer, v0 real)"); - assertUpdate("CREATE TABLE t1 (k1 integer, v1 real)"); - assertUpdate("CREATE TABLE t2 (k2 integer, v2 real)"); - assertUpdate("INSERT INTO t0 VALUES (1, 1.0)", 1); - assertUpdate("INSERT INTO t1 VALUES (1, 2.0)", 1); - assertUpdate("INSERT INTO t2 VALUES (1, 3.0)", 1); - - assertQuery( - noJoinReordering(joinDistributionType), - "SELECT k0, k1, k2 FROM t0, t1, t2 WHERE (k0 = k1) AND (k0 = k2) AND (v0 + v1 = v2)", - "SELECT 1, 1, 1"); + @Test + @Timeout(30) + public void testJoinDynamicFilteringMultiJoin() + { + for (JoinDistributionType joinDistributionType : JoinDistributionType.values()) { + assertUpdate("DROP TABLE IF EXISTS t0"); + assertUpdate("DROP TABLE IF EXISTS t1"); + assertUpdate("DROP TABLE IF EXISTS t2"); + assertUpdate("CREATE TABLE t0 (k0 integer, v0 real)"); + assertUpdate("CREATE TABLE t1 (k1 integer, v1 real)"); + assertUpdate("CREATE TABLE t2 (k2 integer, v2 real)"); + assertUpdate("INSERT INTO t0 VALUES (1, 1.0)", 1); + assertUpdate("INSERT INTO t1 VALUES (1, 2.0)", 1); + assertUpdate("INSERT INTO t2 VALUES (1, 3.0)", 1); + + assertQuery( + noJoinReordering(joinDistributionType), + "SELECT k0, k1, k2 FROM t0, t1, t2 WHERE (k0 = k1) AND (k0 = k2) AND (v0 + v1 = v2)", + "SELECT 1, 1, 1"); + } } private void assertDynamicFiltering(@Language("SQL") String selectQuery, Session session, int expectedRowCount, int... expectedOperatorRowsRead) diff --git a/plugin/trino-mongodb/src/test/java/io/trino/plugin/mongodb/TestMongoConnectorTest.java b/plugin/trino-mongodb/src/test/java/io/trino/plugin/mongodb/TestMongoConnectorTest.java index a6dcbbfdf206..675f3d5eedde 100644 --- a/plugin/trino-mongodb/src/test/java/io/trino/plugin/mongodb/TestMongoConnectorTest.java +++ b/plugin/trino-mongodb/src/test/java/io/trino/plugin/mongodb/TestMongoConnectorTest.java @@ -35,11 +35,10 @@ import org.bson.Document; import org.bson.types.Decimal128; import org.bson.types.ObjectId; -import org.testng.SkipException; -import org.testng.annotations.AfterClass; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import java.math.BigDecimal; import java.time.LocalDate; @@ -61,10 +60,13 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; +@TestInstance(PER_CLASS) public class TestMongoConnectorTest extends BaseConnectorTest { @@ -80,13 +82,13 @@ protected QueryRunner createQueryRunner() return createMongoQueryRunner(server, ImmutableMap.of(), REQUIRED_TPCH_TABLES); } - @BeforeClass + @BeforeAll public void initTestSchema() { assertUpdate("CREATE SCHEMA IF NOT EXISTS test"); } - @AfterClass(alwaysRun = true) + @AfterAll public final void destroy() { server.close(); @@ -117,21 +119,23 @@ protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) @Override protected TestTable createTableWithDefaultColumns() { - throw new SkipException("MongoDB connector does not support column default values"); + return abort("MongoDB connector does not support column default values"); } - @Test(dataProvider = "testColumnNameDataProvider") + @Test @Override - public void testColumnName(String columnName) + public void testColumnName() { - if (columnName.equals("a.dot")) { - assertThatThrownBy(() -> super.testColumnName(columnName)) - .isInstanceOf(RuntimeException.class) - .hasMessage("Column name must not contain '$' or '.' for INSERT: " + columnName); - throw new SkipException("Insert would fail"); - } + for (String columnName : testColumnNameDataProvider()) { + if (columnName.equals("a.dot")) { + assertThatThrownBy(() -> testColumnName(columnName, requiresDelimiting(columnName))) + .isInstanceOf(RuntimeException.class) + .hasMessage("Column name must not contain '$' or '.' for INSERT: " + columnName); + abort("Insert would fail"); + } - super.testColumnName(columnName); + testColumnName(columnName, requiresDelimiting(columnName)); + } } @Test @@ -157,8 +161,24 @@ protected Optional filterDataMappingSmokeTestData(DataMapp return Optional.of(dataMappingTestSetup); } - @Test(dataProvider = "guessFieldTypesProvider") - public void testGuessFieldTypes(String mongoValue, String trinoValue) + @Test + public void testGuessFieldTypes() + { + testGuessFieldTypes("true", "true"); // boolean -> boolean + testGuessFieldTypes("2147483647", "bigint '2147483647'"); // int32 -> bigint + testGuessFieldTypes("{\"$numberLong\": \"9223372036854775807\"}", "9223372036854775807"); // int64 -> bigint + testGuessFieldTypes("1.23", "double '1.23'"); // double -> double + testGuessFieldTypes("{\"$date\": \"1970-01-01T00:00:00.000Z\"}", "timestamp '1970-01-01 00:00:00.000'"); // date -> timestamp(3) + testGuessFieldTypes("'String type'", "varchar 'String type'"); // string -> varchar + testGuessFieldTypes("{$binary: \"\",\"$type\": \"0\"}", "to_utf8('')"); // binary -> varbinary + testGuessFieldTypes("{\"$oid\": \"6216f0c6c432d45190f25e7c\"}", "ObjectId('6216f0c6c432d45190f25e7c')"); // objectid -> objectid + testGuessFieldTypes("[1]", "array[bigint '1']"); // array with single type -> array + testGuessFieldTypes("{\"field\": \"object\"}", "CAST(row('object') AS row(field varchar))"); // object -> row + testGuessFieldTypes("[9, \"test\"]", "CAST(row(9, 'test') AS row(_pos1 bigint, _pos2 varchar))"); // array with multiple types -> row + testGuessFieldTypes("{\"$ref\":\"test_ref\",\"$id\":ObjectId(\"4e3f33de6266b5845052c02c\"),\"$db\":\"test_db\"}", "CAST(row('test_db', 'test_ref', ObjectId('4e3f33de6266b5845052c02c')) AS row(databasename varchar, collectionname varchar, id ObjectId))"); // dbref -> row + } + + private void testGuessFieldTypes(String mongoValue, String trinoValue) { String tableName = "test_guess_field_type_" + randomNameSuffix(); Document document = Document.parse(format("{\"test\":%s}", mongoValue)); @@ -172,25 +192,6 @@ public void testGuessFieldTypes(String mongoValue, String trinoValue) assertUpdate("DROP TABLE test." + tableName); } - @DataProvider - public Object[][] guessFieldTypesProvider() - { - return new Object[][] { - {"true", "true"}, // boolean -> boolean - {"2147483647", "bigint '2147483647'"}, // int32 -> bigint - {"{\"$numberLong\": \"9223372036854775807\"}", "9223372036854775807"}, // int64 -> bigint - {"1.23", "double '1.23'"}, // double -> double - {"{\"$date\": \"1970-01-01T00:00:00.000Z\"}", "timestamp '1970-01-01 00:00:00.000'"}, // date -> timestamp(3) - {"'String type'", "varchar 'String type'"}, // string -> varchar - {"{$binary: \"\",\"$type\": \"0\"}", "to_utf8('')"}, // binary -> varbinary - {"{\"$oid\": \"6216f0c6c432d45190f25e7c\"}", "ObjectId('6216f0c6c432d45190f25e7c')"}, // objectid -> objectid - {"[1]", "array[bigint '1']"}, // array with single type -> array - {"{\"field\": \"object\"}", "CAST(row('object') AS row(field varchar))"}, // object -> row - {"[9, \"test\"]", "CAST(row(9, 'test') AS row(_pos1 bigint, _pos2 varchar))"}, // array with multiple types -> row - {"{\"$ref\":\"test_ref\",\"$id\":ObjectId(\"4e3f33de6266b5845052c02c\"),\"$db\":\"test_db\"}", "CAST(row('test_db', 'test_ref', ObjectId('4e3f33de6266b5845052c02c')) AS row(databasename varchar, collectionname varchar, id ObjectId))"}, // dbref -> row - }; - } - @Test public void createTableWithEveryType() { @@ -276,6 +277,7 @@ public void testInsertWithEveryType() assertFalse(getQueryRunner().tableExists(getSession(), tableName)); } + @Test @Override public void testDeleteWithComplexPredicate() { @@ -283,6 +285,7 @@ public void testDeleteWithComplexPredicate() .hasStackTraceContaining("TrinoException: " + MODIFYING_ROWS_MESSAGE); } + @Test @Override public void testDeleteWithLike() { @@ -290,6 +293,7 @@ public void testDeleteWithLike() .hasStackTraceContaining("TrinoException: " + MODIFYING_ROWS_MESSAGE); } + @Test @Override public void testDeleteWithSemiJoin() { @@ -297,6 +301,7 @@ public void testDeleteWithSemiJoin() .hasStackTraceContaining("TrinoException: " + MODIFYING_ROWS_MESSAGE); } + @Test @Override public void testDeleteWithSubquery() { @@ -304,6 +309,7 @@ public void testDeleteWithSubquery() .hasStackTraceContaining("TrinoException: " + MODIFYING_ROWS_MESSAGE); } + @Test @Override public void testExplainAnalyzeWithDeleteWithSubquery() { @@ -311,8 +317,26 @@ public void testExplainAnalyzeWithDeleteWithSubquery() .hasStackTraceContaining("TrinoException: " + MODIFYING_ROWS_MESSAGE); } - @Test(dataProvider = "predicatePushdownProvider") - public void testPredicatePushdown(String value) + @Test + public void testPredicatePushdown() + { + testPredicatePushdown("true"); + testPredicatePushdown("tinyint '1'"); + testPredicatePushdown("smallint '2'"); + testPredicatePushdown("integer '3'"); + testPredicatePushdown("bigint '4'"); + testPredicatePushdown("decimal '3.14'"); + testPredicatePushdown("decimal '1234567890.123456789'"); + testPredicatePushdown("'test'"); + testPredicatePushdown("char 'test'"); + testPredicatePushdown("objectid('6216f0c6c432d45190f25e7c')"); + testPredicatePushdown("date '1970-01-01'"); + testPredicatePushdown("time '00:00:00.000'"); + testPredicatePushdown("timestamp '1970-01-01 00:00:00.000'"); + testPredicatePushdown("timestamp '1970-01-01 00:00:00.000 UTC'"); + } + + private void testPredicatePushdown(String value) { try (TestTable table = new TestTable(getQueryRunner()::execute, "test_predicate_pushdown", "AS SELECT %s col".formatted(value))) { testPredicatePushdown(table.getName(), "col = " + value); @@ -324,27 +348,6 @@ public void testPredicatePushdown(String value) } } - @DataProvider - public Object[][] predicatePushdownProvider() - { - return new Object[][] { - {"true"}, - {"tinyint '1'"}, - {"smallint '2'"}, - {"integer '3'"}, - {"bigint '4'"}, - {"decimal '3.14'"}, - {"decimal '1234567890.123456789'"}, - {"'test'"}, - {"char 'test'"}, - {"objectid('6216f0c6c432d45190f25e7c')"}, - {"date '1970-01-01'"}, - {"time '00:00:00.000'"}, - {"timestamp '1970-01-01 00:00:00.000'"}, - {"timestamp '1970-01-01 00:00:00.000 UTC'"}, - }; - } - @Test public void testPredicatePushdownRealType() { @@ -608,8 +611,20 @@ public void testNegativeZeroDecimal() assertUpdate("DROP TABLE test." + tableName); } - @Test(dataProvider = "dbRefProvider") - public void testDBRef(Object objectId, String expectedValue, String expectedType) + @Test + public void testDBRef() + { + testDBRef("String type", "varchar 'String type'", "varchar"); + testDBRef("BinData".getBytes(UTF_8), "to_utf8('BinData')", "varbinary"); + testDBRef(1234567890, "bigint '1234567890'", "bigint"); + testDBRef(true, "true", "boolean"); + testDBRef(12.3f, "double '12.3'", "double"); + testDBRef(new Date(0), "timestamp '1970-01-01 00:00:00.000'", "timestamp(3)"); + testDBRef(ImmutableList.of(1), "array[bigint '1']", "array(bigint)"); + testDBRef(new ObjectId("5126bc054aed4daf9e2ab772"), "ObjectId('5126bc054aed4daf9e2ab772')", "ObjectId"); + } + + private void testDBRef(Object objectId, String expectedValue, String expectedType) { Document document = Document.parse("{\"_id\":ObjectId(\"5126bbf64aed4daf9e2ab771\"),\"col1\":\"foo\"}"); @@ -630,21 +645,6 @@ public void testDBRef(Object objectId, String expectedValue, String expectedType assertUpdate("DROP TABLE test." + tableName); } - @DataProvider - public Object[][] dbRefProvider() - { - return new Object[][] { - {"String type", "varchar 'String type'", "varchar"}, - {"BinData".getBytes(UTF_8), "to_utf8('BinData')", "varbinary"}, - {1234567890, "bigint '1234567890'", "bigint"}, - {true, "true", "boolean"}, - {12.3f, "double '12.3'", "double"}, - {new Date(0), "timestamp '1970-01-01 00:00:00.000'", "timestamp(3)"}, - {ImmutableList.of(1), "array[bigint '1']", "array(bigint)"}, - {new ObjectId("5126bc054aed4daf9e2ab772"), "ObjectId('5126bc054aed4daf9e2ab772')", "ObjectId"}, - }; - } - @Test public void testDbRefFieldOrder() { @@ -956,11 +956,12 @@ public void testCollationNumericOrdering() assertUpdate("DROP TABLE test." + tableName); } + @Test @Override public void testAddColumnConcurrently() { // TODO: Enable after supporting multi-document transaction https://www.mongodb.com/docs/manual/core/transactions/ - throw new SkipException("TODO"); + abort("TODO"); } @Test @@ -1313,11 +1314,23 @@ public void testProjectionPushdownMixedWithUnsupportedFieldName() assertUpdate("DROP TABLE test." + tableName); } - @Test(dataProvider = "nestedValuesProvider") - public void testFiltersOnDereferenceColumnReadsLessData(String expectedValue, String expectedType) + @Test + public void testFiltersOnDereferenceColumnReadsLessData() + { + testFiltersOnDereferenceColumnReadsLessData("varchar 'String type'", "varchar"); + testFiltersOnDereferenceColumnReadsLessData("to_utf8('BinData')", "varbinary"); + testFiltersOnDereferenceColumnReadsLessData("bigint '1234567890'", "bigint"); + testFiltersOnDereferenceColumnReadsLessData("true", "boolean"); + testFiltersOnDereferenceColumnReadsLessData("double '12.3'", "double"); + testFiltersOnDereferenceColumnReadsLessData("timestamp '1970-01-01 00:00:00.000'", "timestamp(3)"); + testFiltersOnDereferenceColumnReadsLessData("array[bigint '1']", "array(bigint)"); + testFiltersOnDereferenceColumnReadsLessData("ObjectId('5126bc054aed4daf9e2ab772')", "ObjectId"); + } + + private void testFiltersOnDereferenceColumnReadsLessData(String expectedValue, String expectedType) { if (!isPushdownSupportedType(getQueryRunner().getTypeManager().fromSqlType(expectedType))) { - throw new SkipException("Type doesn't support filter pushdown"); + abort("Type doesn't support filter pushdown"); } Session sessionWithoutPushdown = Session.builder(getSession()) @@ -1387,21 +1400,6 @@ public void testFiltersOnDereferenceColumnReadsLessData(String expectedValue, St } } - @DataProvider - public Object[][] nestedValuesProvider() - { - return new Object[][] { - {"varchar 'String type'", "varchar"}, - {"to_utf8('BinData')", "varbinary"}, - {"bigint '1234567890'", "bigint"}, - {"true", "boolean"}, - {"double '12.3'", "double"}, - {"timestamp '1970-01-01 00:00:00.000'", "timestamp(3)"}, - {"array[bigint '1']", "array(bigint)"}, - {"ObjectId('5126bc054aed4daf9e2ab772')", "ObjectId"}, - }; - } - @Test public void testFiltersOnDereferenceColumnReadsLessDataNativeQuery() { @@ -1479,8 +1477,20 @@ public void testProjectionPushdownWithColumnMissingInDocument() assertUpdate("DROP TABLE test." + tableName); } - @Test(dataProvider = "dbRefProvider") - public void testProjectionPushdownWithDBRef(Object objectId, String expectedValue, String expectedType) + @Test + public void testProjectionPushdownWithDBRef() + { + testProjectionPushdownWithDBRef("String type", "varchar 'String type'", "varchar"); + testProjectionPushdownWithDBRef("BinData".getBytes(UTF_8), "to_utf8('BinData')", "varbinary"); + testProjectionPushdownWithDBRef(1234567890, "bigint '1234567890'", "bigint"); + testProjectionPushdownWithDBRef(true, "true", "boolean"); + testProjectionPushdownWithDBRef(12.3f, "double '12.3'", "double"); + testProjectionPushdownWithDBRef(new Date(0), "timestamp '1970-01-01 00:00:00.000'", "timestamp(3)"); + testProjectionPushdownWithDBRef(ImmutableList.of(1), "array[bigint '1']", "array(bigint)"); + testProjectionPushdownWithDBRef(new ObjectId("5126bc054aed4daf9e2ab772"), "ObjectId('5126bc054aed4daf9e2ab772')", "ObjectId"); + } + + private void testProjectionPushdownWithDBRef(Object objectId, String expectedValue, String expectedType) { String tableName = "test_projection_pushdown_with_dbref_" + randomNameSuffix(); @@ -1503,8 +1513,20 @@ public void testProjectionPushdownWithDBRef(Object objectId, String expectedValu assertUpdate("DROP TABLE test." + tableName); } - @Test(dataProvider = "dbRefProvider") - public void testProjectionPushdownWithNestedDBRef(Object objectId, String expectedValue, String expectedType) + @Test + public void testProjectionPushdownWithNestedDBRef() + { + testProjectionPushdownWithNestedDBRef("String type", "varchar 'String type'", "varchar"); + testProjectionPushdownWithNestedDBRef("BinData".getBytes(UTF_8), "to_utf8('BinData')", "varbinary"); + testProjectionPushdownWithNestedDBRef(1234567890, "bigint '1234567890'", "bigint"); + testProjectionPushdownWithNestedDBRef(true, "true", "boolean"); + testProjectionPushdownWithNestedDBRef(12.3f, "double '12.3'", "double"); + testProjectionPushdownWithNestedDBRef(new Date(0), "timestamp '1970-01-01 00:00:00.000'", "timestamp(3)"); + testProjectionPushdownWithNestedDBRef(ImmutableList.of(1), "array[bigint '1']", "array(bigint)"); + testProjectionPushdownWithNestedDBRef(new ObjectId("5126bc054aed4daf9e2ab772"), "ObjectId('5126bc054aed4daf9e2ab772')", "ObjectId"); + } + + private void testProjectionPushdownWithNestedDBRef(Object objectId, String expectedValue, String expectedType) { String tableName = "test_projection_pushdown_with_dbref_" + randomNameSuffix(); @@ -1528,8 +1550,20 @@ public void testProjectionPushdownWithNestedDBRef(Object objectId, String expect assertUpdate("DROP TABLE test." + tableName); } - @Test(dataProvider = "dbRefProvider") - public void testProjectionPushdownWithPredefinedDBRefKeyword(Object objectId, String expectedValue, String expectedType) + @Test + public void testProjectionPushdownWithPredefinedDBRefKeyword() + { + testProjectionPushdownWithPredefinedDBRefKeyword("String type", "varchar 'String type'", "varchar"); + testProjectionPushdownWithPredefinedDBRefKeyword("BinData".getBytes(UTF_8), "to_utf8('BinData')", "varbinary"); + testProjectionPushdownWithPredefinedDBRefKeyword(1234567890, "bigint '1234567890'", "bigint"); + testProjectionPushdownWithPredefinedDBRefKeyword(true, "true", "boolean"); + testProjectionPushdownWithPredefinedDBRefKeyword(12.3f, "double '12.3'", "double"); + testProjectionPushdownWithPredefinedDBRefKeyword(new Date(0), "timestamp '1970-01-01 00:00:00.000'", "timestamp(3)"); + testProjectionPushdownWithPredefinedDBRefKeyword(ImmutableList.of(1), "array[bigint '1']", "array(bigint)"); + testProjectionPushdownWithPredefinedDBRefKeyword(new ObjectId("5126bc054aed4daf9e2ab772"), "ObjectId('5126bc054aed4daf9e2ab772')", "ObjectId"); + } + + private void testProjectionPushdownWithPredefinedDBRefKeyword(Object objectId, String expectedValue, String expectedType) { String tableName = "test_projection_pushdown_with_predefined_dbref_keyword_" + randomNameSuffix(); @@ -1552,8 +1586,52 @@ public void testProjectionPushdownWithPredefinedDBRefKeyword(Object objectId, St assertUpdate("DROP TABLE test." + tableName); } - @Test(dataProvider = "dbRefAndDocumentProvider") - public void testDBRefLikeDocument(Document document1, Document document2, String expectedValue) + @Test + public void testDBRefLikeDocument() + { + testDBRefLikeDocument("String type", "varchar 'String type'"); + testDBRefLikeDocument("BinData".getBytes(UTF_8), "to_utf8('BinData')"); + testDBRefLikeDocument(1234567890, "bigint '1234567890'"); + testDBRefLikeDocument(true, "true"); + testDBRefLikeDocument(12.3f, "double '12.3'"); + testDBRefLikeDocument(new Date(0), "timestamp '1970-01-01 00:00:00.000'"); + testDBRefLikeDocument(ImmutableList.of(1), "array[bigint '1']"); + testDBRefLikeDocument(new ObjectId("5126bc054aed4daf9e2ab772"), "ObjectId('5126bc054aed4daf9e2ab772')"); + + testDBRefLikeDocument(dbRefDocument("String type"), documentWithSameDbRefFieldOrder("String type"), "varchar 'String type'"); + testDBRefLikeDocument(dbRefDocument("String type"), getDocumentWithDifferentDbRefFieldOrder("String type"), "varchar 'String type'"); + testDBRefLikeDocument(documentWithSameDbRefFieldOrder("String type"), dbRefDocument("String type"), "varchar 'String type'"); + + testDBRefLikeDocument(dbRefDocument("BinData".getBytes(UTF_8)), documentWithSameDbRefFieldOrder("BinData".getBytes(UTF_8)), "to_utf8('BinData')"); + testDBRefLikeDocument(dbRefDocument("BinData".getBytes(UTF_8)), getDocumentWithDifferentDbRefFieldOrder("BinData".getBytes(UTF_8)), "to_utf8('BinData')"); + testDBRefLikeDocument(documentWithSameDbRefFieldOrder("BinData".getBytes(UTF_8)), dbRefDocument("BinData".getBytes(UTF_8)), "to_utf8('BinData')"); + + testDBRefLikeDocument(dbRefDocument(1234567890), documentWithSameDbRefFieldOrder(1234567890), "bigint '1234567890'"); + testDBRefLikeDocument(dbRefDocument(1234567890), getDocumentWithDifferentDbRefFieldOrder(1234567890), "bigint '1234567890'"); + testDBRefLikeDocument(documentWithSameDbRefFieldOrder(1234567890), dbRefDocument(1234567890), "bigint '1234567890'"); + + testDBRefLikeDocument(dbRefDocument(true), documentWithSameDbRefFieldOrder(true), "true"); + testDBRefLikeDocument(dbRefDocument(true), getDocumentWithDifferentDbRefFieldOrder(true), "true"); + testDBRefLikeDocument(documentWithSameDbRefFieldOrder(true), dbRefDocument(true), "true"); + + testDBRefLikeDocument(dbRefDocument(12.3f), documentWithSameDbRefFieldOrder(12.3f), "double '12.3'"); + testDBRefLikeDocument(dbRefDocument(12.3f), getDocumentWithDifferentDbRefFieldOrder(12.3f), "double '12.3'"); + testDBRefLikeDocument(documentWithSameDbRefFieldOrder(12.3f), dbRefDocument(12.3f), "double '12.3'"); + + testDBRefLikeDocument(dbRefDocument(new Date(0)), documentWithSameDbRefFieldOrder(new Date(0)), "timestamp '1970-01-01 00:00:00.000'"); + testDBRefLikeDocument(dbRefDocument(new Date(0)), getDocumentWithDifferentDbRefFieldOrder(new Date(0)), "timestamp '1970-01-01 00:00:00.000'"); + testDBRefLikeDocument(documentWithSameDbRefFieldOrder(new Date(0)), dbRefDocument(new Date(0)), "timestamp '1970-01-01 00:00:00.000'"); + + testDBRefLikeDocument(dbRefDocument(ImmutableList.of(1)), documentWithSameDbRefFieldOrder(ImmutableList.of(1)), "array[bigint '1']"); + testDBRefLikeDocument(dbRefDocument(ImmutableList.of(1)), getDocumentWithDifferentDbRefFieldOrder(ImmutableList.of(1)), "array[bigint '1']"); + testDBRefLikeDocument(documentWithSameDbRefFieldOrder(ImmutableList.of(1)), dbRefDocument(ImmutableList.of(1)), "array[bigint '1']"); + + testDBRefLikeDocument(dbRefDocument(new ObjectId("5126bc054aed4daf9e2ab772")), documentWithSameDbRefFieldOrder(new ObjectId("5126bc054aed4daf9e2ab772")), "ObjectId('5126bc054aed4daf9e2ab772')"); + testDBRefLikeDocument(dbRefDocument(new ObjectId("5126bc054aed4daf9e2ab772")), getDocumentWithDifferentDbRefFieldOrder(new ObjectId("5126bc054aed4daf9e2ab772")), "ObjectId('5126bc054aed4daf9e2ab772')"); + testDBRefLikeDocument(documentWithSameDbRefFieldOrder(new ObjectId("5126bc054aed4daf9e2ab772")), dbRefDocument(new ObjectId("5126bc054aed4daf9e2ab772")), "ObjectId('5126bc054aed4daf9e2ab772')"); + } + + private void testDBRefLikeDocument(Document document1, Document document2, String expectedValue) { String tableName = "test_dbref_like_document_" + randomNameSuffix(); @@ -1580,34 +1658,28 @@ public void testDBRefLikeDocument(Document document1, Document document2, String assertUpdate("DROP TABLE test." + tableName); } - @DataProvider - public Object[][] dbRefAndDocumentProvider() - { - Object[][] dbRefObjects = dbRefProvider(); - Object[][] objects = new Object[dbRefObjects.length * 3][]; - int i = 0; - for (Object[] dbRefObject : dbRefObjects) { - Object objectId = dbRefObject[0]; - Object expectedValue = dbRefObject[1]; - Document dbRefDocument = new Document() - .append("_id", new ObjectId("5126bbf64aed4daf9e2ab772")) - .append("creator", new DBRef("dbref_test", "dbref_creators", objectId)); - Document documentWithSameDbRefFieldOrder = new Document() - .append("_id", new ObjectId("5126bbf64aed4daf9e2ab771")) - .append("creator", new Document().append("databaseName", "doc_test").append("collectionName", "doc_creators").append("id", objectId)); - Document documentWithDifferentDbRefFieldOrder = new Document() - .append("_id", new ObjectId("5126bbf64aed4daf9e2ab771")) - .append("creator", new Document().append("collectionName", "doc_creators").append("id", objectId).append("databaseName", "doc_test")); - - objects[i++] = new Object[] {dbRefDocument, documentWithSameDbRefFieldOrder, expectedValue}; - objects[i++] = new Object[] {dbRefDocument, documentWithDifferentDbRefFieldOrder, expectedValue}; - objects[i++] = new Object[] {documentWithSameDbRefFieldOrder, dbRefDocument, expectedValue}; - } - return objects; + private static Document getDocumentWithDifferentDbRefFieldOrder(Object objectId) + { + return new Document() + .append("_id", new ObjectId("5126bbf64aed4daf9e2ab771")) + .append("creator", new Document().append("collectionName", "doc_creators").append("id", objectId).append("databaseName", "doc_test")); + } + + private static Document documentWithSameDbRefFieldOrder(Object objectId) + { + return new Document() + .append("_id", new ObjectId("5126bbf64aed4daf9e2ab771")) + .append("creator", new Document().append("databaseName", "doc_test").append("collectionName", "doc_creators").append("id", objectId)); + } + + private static Document dbRefDocument(Object objectId) + { + return new Document() + .append("_id", new ObjectId("5126bbf64aed4daf9e2ab772")) + .append("creator", new DBRef("dbref_test", "dbref_creators", objectId)); } - @Test(dataProvider = "dbRefProvider") - public void testDBRefLikeDocument(Object objectId, String expectedValue, String expectedType) + private void testDBRefLikeDocument(Object objectId, String expectedValue) { String tableName = "test_dbref_like_document_fails_" + randomNameSuffix(); @@ -1640,8 +1712,17 @@ public void testDBRefLikeDocument(Object objectId, String expectedValue, String assertUpdate("DROP TABLE test." + tableName); } - @Test(dataProvider = "dfRefPredicateProvider") - public void testPredicateOnDBRefField(Object objectId, String expectedValue) + @Test + public void testPredicateOnDBRefField() + { + testPredicateOnDBRefField(true, "true"); + testPredicateOnDBRefField(4, "bigint '4'"); + testPredicateOnDBRefField("test", "'test'"); + testPredicateOnDBRefField(new ObjectId("6216f0c6c432d45190f25e7c"), "ObjectId('6216f0c6c432d45190f25e7c')"); + testPredicateOnDBRefField(new Date(0), "timestamp '1970-01-01 00:00:00.000'"); + } + + private void testPredicateOnDBRefField(Object objectId, String expectedValue) { String tableName = "test_predicate_on_dbref_field_" + randomNameSuffix(); @@ -1664,8 +1745,17 @@ public void testPredicateOnDBRefField(Object objectId, String expectedValue) assertUpdate("DROP TABLE test." + tableName); } - @Test(dataProvider = "dfRefPredicateProvider") - public void testPredicateOnDBRefLikeDocument(Object objectId, String expectedValue) + @Test + public void testPredicateOnDBRefLikeDocument() + { + testPredicateOnDBRefLikeDocument(true, "true"); + testPredicateOnDBRefLikeDocument(4, "bigint '4'"); + testPredicateOnDBRefLikeDocument("test", "'test'"); + testPredicateOnDBRefLikeDocument(new ObjectId("6216f0c6c432d45190f25e7c"), "ObjectId('6216f0c6c432d45190f25e7c')"); + testPredicateOnDBRefLikeDocument(new Date(0), "timestamp '1970-01-01 00:00:00.000'"); + } + + private void testPredicateOnDBRefLikeDocument(Object objectId, String expectedValue) { String tableName = "test_predicate_on_dbref_like_document_" + randomNameSuffix(); @@ -1691,32 +1781,20 @@ public void testPredicateOnDBRefLikeDocument(Object objectId, String expectedVal assertUpdate("DROP TABLE test." + tableName); } - @DataProvider - public Object[][] dfRefPredicateProvider() - { - return new Object[][] { - {true, "true"}, - {4, "bigint '4'"}, - {"test", "'test'"}, - {new ObjectId("6216f0c6c432d45190f25e7c"), "ObjectId('6216f0c6c432d45190f25e7c')"}, - {new Date(0), "timestamp '1970-01-01 00:00:00.000'"}, - }; - } - - @Override @Test + @Override public void testProjectionPushdownReadsLessData() { // TODO https://github.com/trinodb/trino/issues/17713 - throw new SkipException("MongoDB connector does not calculate physical data input size"); + abort("MongoDB connector does not calculate physical data input size"); } - @Override @Test + @Override public void testProjectionPushdownPhysicalInputSize() { // TODO https://github.com/trinodb/trino/issues/17713 - throw new SkipException("MongoDB connector does not calculate physical data input size"); + abort("MongoDB connector does not calculate physical data input size"); } @Override diff --git a/plugin/trino-mysql/src/test/java/io/trino/plugin/mysql/BaseMySqlConnectorTest.java b/plugin/trino-mysql/src/test/java/io/trino/plugin/mysql/BaseMySqlConnectorTest.java index 679dd31e4c40..cc0322f29c5a 100644 --- a/plugin/trino-mysql/src/test/java/io/trino/plugin/mysql/BaseMySqlConnectorTest.java +++ b/plugin/trino-mysql/src/test/java/io/trino/plugin/mysql/BaseMySqlConnectorTest.java @@ -23,8 +23,7 @@ import io.trino.testing.TestingConnectorBehavior; import io.trino.testing.sql.SqlExecutor; import io.trino.testing.sql.TestTable; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.sql.Connection; import java.sql.PreparedStatement; @@ -103,7 +102,7 @@ protected TestTable createTableWithUnsupportedColumn() "(one bigint, two decimal(50,0), three varchar(10))"); } - @org.junit.jupiter.api.Test + @Test @Override public void testShowColumns() { @@ -165,6 +164,7 @@ protected MaterializedResult getDescribeOrdersResult() .build(); } + @Test @Override public void testShowCreateTable() { @@ -182,6 +182,7 @@ public void testShowCreateTable() ")"); } + @Test @Override public void testDeleteWithLike() { @@ -262,6 +263,7 @@ public void testColumnComment() assertUpdate("DROP TABLE test_column_comment"); } + @Test @Override public void testAddNotNullColumn() { @@ -379,16 +381,28 @@ public void testPredicatePushdown() .isFullyPushedDown(); } - @Test(dataProvider = "charsetAndCollation") - public void testPredicatePushdownWithCollationView(String charset, String collation) + @Test + public void testPredicatePushdownWithCollationView() + { + testPredicatePushdownWithCollationView("latin1", "latin1_general_cs"); + testPredicatePushdownWithCollationView("utf8", "utf8_bin"); + } + + private void testPredicatePushdownWithCollationView(String charset, String collation) { onRemoteDatabase().execute(format("CREATE OR REPLACE VIEW tpch.test_view_pushdown AS SELECT regionkey, nationkey, CONVERT(name USING %s) COLLATE %s AS name FROM tpch.nation;", charset, collation)); testNationCollationQueries("test_view_pushdown"); onRemoteDatabase().execute("DROP VIEW tpch.test_view_pushdown"); } - @Test(dataProvider = "charsetAndCollation") - public void testPredicatePushdownWithCollation(String charset, String collation) + @Test + public void testPredicatePushdownWithCollation() + { + testPredicatePushdownWithCollation("latin1", "latin1_general_cs"); + testPredicatePushdownWithCollation("utf8", "utf8_bin"); + } + + private void testPredicatePushdownWithCollation(String charset, String collation) { try (TestTable testTable = new TestTable( onRemoteDatabase(), @@ -474,12 +488,6 @@ private void testNationCollationQueries(String objectName) .joinIsNotFullyPushedDown(); } - @DataProvider - public static Object[][] charsetAndCollation() - { - return new Object[][] {{"latin1", "latin1_general_cs"}, {"utf8", "utf8_bin"}}; - } - /** * This test helps to tune TupleDomain simplification threshold. */ @@ -504,6 +512,7 @@ public void testNativeMultipleInClauses() onRemoteDatabase().execute("SELECT count(*) FROM tpch.orders WHERE " + longInClauses); } + @Test @Override public void testNativeQueryInsertStatementTableDoesNotExist() { @@ -513,6 +522,7 @@ public void testNativeQueryInsertStatementTableDoesNotExist() .hasMessageContaining("Query not supported: ResultSetMetaData not available for query: INSERT INTO non_existent_table VALUES (1)"); } + @Test @Override public void testNativeQueryIncorrectSyntax() { diff --git a/plugin/trino-mysql/src/test/java/io/trino/plugin/mysql/TestMySqlLegacyConnectorTest.java b/plugin/trino-mysql/src/test/java/io/trino/plugin/mysql/TestMySqlLegacyConnectorTest.java index eff06d44caef..f8cc52e93a36 100644 --- a/plugin/trino-mysql/src/test/java/io/trino/plugin/mysql/TestMySqlLegacyConnectorTest.java +++ b/plugin/trino-mysql/src/test/java/io/trino/plugin/mysql/TestMySqlLegacyConnectorTest.java @@ -134,14 +134,18 @@ public void testRenameColumn() .hasStackTraceContaining("RENAME COLUMN x TO before_y"); } + @Test @Override - public void testRenameColumnName(String columnName) + public void testRenameColumnName() { - assertThatThrownBy(() -> super.testRenameColumnName(columnName)) - .hasMessageContaining("You have an error in your SQL syntax") - .hasStackTraceContaining("RENAME COLUMN"); + for (String columnName : testColumnNameDataProvider()) { + assertThatThrownBy(() -> testRenameColumnName(columnName, requiresDelimiting(columnName))) + .hasMessageContaining("You have an error in your SQL syntax") + .hasStackTraceContaining("RENAME COLUMN"); + } } + @Test @Override public void testAlterTableRenameColumnToLongName() { @@ -167,6 +171,7 @@ protected void verifyColumnNameLengthFailurePermissible(Throwable e) assertThat(e).hasMessageMatching("Identifier name .* is too long"); } + @Test @Override public void testNativeQueryWithClause() { diff --git a/plugin/trino-oracle/src/test/java/io/trino/plugin/oracle/BaseOracleConnectorTest.java b/plugin/trino-oracle/src/test/java/io/trino/plugin/oracle/BaseOracleConnectorTest.java index 0d9834820a8d..644959e2d8e1 100644 --- a/plugin/trino-oracle/src/test/java/io/trino/plugin/oracle/BaseOracleConnectorTest.java +++ b/plugin/trino-oracle/src/test/java/io/trino/plugin/oracle/BaseOracleConnectorTest.java @@ -23,7 +23,7 @@ import io.trino.testing.TestingConnectorBehavior; import io.trino.testing.sql.TestTable; import io.trino.testing.sql.TestView; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.util.List; import java.util.Optional; @@ -115,14 +115,14 @@ protected TestTable createTableWithUnsupportedColumn() "(one NUMBER(19), two NUMBER, three VARCHAR2(10 CHAR))"); } - @org.junit.jupiter.api.Test + @Test @Override public void testShowColumns() { assertThat(query("SHOW COLUMNS FROM orders")).matches(getDescribeOrdersResult()); } - @org.junit.jupiter.api.Test + @Test @Override public void testInformationSchemaFiltering() { @@ -191,6 +191,7 @@ public void testTimestampOutOfPrecisionRounded() assertUpdate("DROP TABLE " + tableName); } + @Test @Override public void testCharVarcharComparison() { @@ -214,6 +215,7 @@ public void testCharVarcharComparison() } } + @Test @Override public void testVarcharCharComparison() { @@ -244,6 +246,7 @@ public void testVarcharCharComparison() } } + @Test @Override public void testAggregationWithUnsupportedResultType() { @@ -266,6 +269,7 @@ protected TestTable createAggregationTestTable(String name, List rows) return new TestTable(onRemoteDatabase(), name, "(short_decimal number(9, 3), long_decimal number(30, 10), a_bigint number(19), t_double binary_double)", rows); } + @Test @Override public void testDeleteWithLike() { @@ -372,6 +376,7 @@ public void testTooLargeDomainCompactionThreshold() "SELECT * from nation", "Domain compaction threshold \\(10000\\) cannot exceed 1000"); } + @Test @Override public void testNativeQuerySimple() { @@ -379,6 +384,7 @@ public void testNativeQuerySimple() assertQuery("SELECT * FROM TABLE(system.query(query => 'SELECT CAST(1 AS number(2, 1)) FROM DUAL'))", ("VALUES 1")); } + @Test @Override public void testNativeQueryParameters() { @@ -391,6 +397,7 @@ public void testNativeQueryParameters() assertQuery(session, "EXECUTE my_query USING 'a', '(SELECT CAST(2 AS number(2, 1)) a FROM DUAL) t'", "VALUES 2"); } + @Test @Override public void testNativeQueryInsertStatementTableDoesNotExist() { @@ -400,6 +407,7 @@ public void testNativeQueryInsertStatementTableDoesNotExist() .hasMessageContaining("Query not supported: ResultSetMetaData not available for query: INSERT INTO non_existent_table VALUES (1)"); } + @Test @Override public void testNativeQueryIncorrectSyntax() { diff --git a/plugin/trino-oracle/src/test/java/io/trino/plugin/oracle/TestOracleConnectorTest.java b/plugin/trino-oracle/src/test/java/io/trino/plugin/oracle/TestOracleConnectorTest.java index af499f537977..a372c5171e48 100644 --- a/plugin/trino-oracle/src/test/java/io/trino/plugin/oracle/TestOracleConnectorTest.java +++ b/plugin/trino-oracle/src/test/java/io/trino/plugin/oracle/TestOracleConnectorTest.java @@ -18,8 +18,9 @@ import io.trino.testing.QueryRunner; import io.trino.testing.sql.SqlExecutor; import io.trino.testing.sql.TestTable; -import org.testng.annotations.AfterClass; -import org.testng.annotations.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import static io.trino.plugin.jdbc.DefaultJdbcMetadata.DEFAULT_COLUMN_ALIAS_LENGTH; import static io.trino.plugin.oracle.TestingOracleServer.TEST_PASS; @@ -29,7 +30,9 @@ import static java.util.stream.Collectors.joining; import static java.util.stream.IntStream.range; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; +@TestInstance(PER_CLASS) public class TestOracleConnectorTest extends BaseOracleConnectorTest { @@ -55,7 +58,7 @@ protected QueryRunner createQueryRunner() REQUIRED_TPCH_TABLES); } - @AfterClass(alwaysRun = true) + @AfterAll public final void destroy() throws Exception { diff --git a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixConnectorTest.java b/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixConnectorTest.java index 2c5e1dcab91c..02a206a5297e 100644 --- a/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixConnectorTest.java +++ b/plugin/trino-phoenix5/src/test/java/io/trino/plugin/phoenix5/TestPhoenixConnectorTest.java @@ -25,8 +25,8 @@ import io.trino.testing.sql.SqlExecutor; import io.trino.testing.sql.TestTable; import org.intellij.lang.annotations.Language; -import org.testng.SkipException; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import java.sql.Connection; import java.sql.DriverManager; @@ -64,6 +64,7 @@ import static java.util.Locale.ENGLISH; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; import static org.testng.Assert.assertTrue; public class TestPhoenixConnectorTest @@ -108,6 +109,7 @@ protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) } // TODO: wait https://github.com/trinodb/trino/pull/14939 done and then remove this test + @Test @Override public void testArithmeticPredicatePushdown() { @@ -204,65 +206,65 @@ private void verifyDivisionByZeroFailure(Throwable e) @Override protected TestTable createTableWithDefaultColumns() { - throw new SkipException("Phoenix connector does not support column default values"); + return abort("Phoenix connector does not support column default values"); } @Override protected TestTable createTableWithUnsupportedColumn() { // Apparently all Phoenix types are supported in the Phoenix connector. - throw new SkipException("Cannot find an unsupported data type"); + return abort("Cannot find an unsupported data type"); } + @Test @Override public void testRenameColumn() { assertThatThrownBy(super::testRenameColumn) // TODO (https://github.com/trinodb/trino/issues/7205) support column rename in Phoenix .hasMessageContaining("Syntax error. Encountered \"RENAME\""); - throw new SkipException("Rename column is not yet supported by Phoenix connector"); + abort("Rename column is not yet supported by Phoenix connector"); } + @Test @Override public void testAlterTableRenameColumnToLongName() { assertThatThrownBy(super::testAlterTableRenameColumnToLongName) // TODO (https://github.com/trinodb/trino/issues/7205) support column rename in Phoenix .hasMessageContaining("Syntax error. Encountered \"RENAME\""); - throw new SkipException("Rename column is not yet supported by Phoenix connector"); + abort("Rename column is not yet supported by Phoenix connector"); } + @Test + @Disabled @Override - public void testRenameColumnName(String columnName) + public void testRenameColumnName() { - // The column name is rejected when creating a table - if (columnName.equals("a\"quote")) { - super.testRenameColumnName(columnName); - return; - } - assertThatThrownBy(() -> super.testRenameColumnName(columnName)) - // TODO (https://github.com/trinodb/trino/issues/7205) support column rename in Phoenix - .hasMessageContaining("Syntax error. Encountered \"RENAME\""); - throw new SkipException("Rename column is not yet supported by Phoenix connector"); } + @Test @Override - public void testAddAndDropColumnName(String columnName) - { - // TODO: Investigate why these two case fail - if (columnName.equals("an'apostrophe")) { - assertThatThrownBy(() -> super.testAddAndDropColumnName(columnName)) - .hasMessageContaining("Syntax error. Mismatched input"); - throw new SkipException("TODO"); - } - if (columnName.equals("a\\backslash`")) { - assertThatThrownBy(() -> super.testAddAndDropColumnName(columnName)) - .hasMessageContaining("Undefined column"); - throw new SkipException("TODO"); + public void testAddAndDropColumnName() + { + for (String columnName : testColumnNameDataProvider()) { + // TODO: Investigate why these two case fail + if (columnName.equals("an'apostrophe")) { + assertThatThrownBy(() -> testAddAndDropColumnName(columnName, requiresDelimiting(columnName))) + .hasMessageContaining("Syntax error. Mismatched input"); + abort("TODO"); + } + if (columnName.equals("a\\backslash`")) { + assertThatThrownBy(() -> testAddAndDropColumnName(columnName, requiresDelimiting(columnName))) + .hasMessageContaining("Undefined column"); + abort("TODO"); + } + + testAddAndDropColumnName(columnName, requiresDelimiting(columnName)); } - super.testAddAndDropColumnName(columnName); } + @Test @Override public void testInsertArray() { @@ -271,10 +273,11 @@ public void testInsertArray() .hasMessage("Phoenix JDBC driver replaced 'null' with '0.0' at index 1 in [0.0]"); } + @Test @Override public void testCreateSchema() { - throw new SkipException("test disabled until issue fixed"); // TODO https://github.com/trinodb/trino/issues/2348 + abort("test disabled until issue fixed"); // TODO https://github.com/trinodb/trino/issues/2348 } @Override @@ -309,6 +312,7 @@ protected Optional filterDataMappingSmokeTestData(DataMapp return Optional.of(dataMappingTestSetup); } + @Test @Override public void testShowCreateTable() { @@ -332,6 +336,7 @@ public void testShowCreateTable() ")"); } + @Test @Override public void testCharVarcharComparison() { @@ -355,6 +360,7 @@ public void testCharVarcharComparison() } } + @Test @Override public void testVarcharCharComparison() { @@ -385,6 +391,7 @@ public void testVarcharCharComparison() } } + @Test @Override public void testCharTrailingSpace() { @@ -399,6 +406,7 @@ public void testCharTrailingSpace() } // Overridden because Phoenix requires a ROWID column + @Test @Override public void testCountDistinctWithStringTypes() { @@ -415,6 +423,7 @@ public void testCountDistinctWithStringTypes() } } + @Test @Override public void testMergeLarge() { @@ -523,10 +532,11 @@ private void testMergeWithSpecifiedRowkeys(String rowkeyDefinition) assertUpdate("DROP TABLE " + targetTable); } + @Test @Override public void testUpdateRowConcurrently() { - throw new SkipException("Phoenix doesn't support concurrent update of different columns in a row"); + abort("Phoenix doesn't support concurrent update of different columns in a row"); } @Test @@ -648,10 +658,11 @@ public void testMissingColumnsOnInsert() assertQuery("SELECT * FROM test_col_insert", "SELECT 1, 'val1', 'val2'"); } + @Test @Override public void testTopNPushdown() { - throw new SkipException("Phoenix does not support topN push down, but instead replaces partial topN with partial Limit."); + abort("Phoenix does not support topN push down, but instead replaces partial topN with partial Limit."); } @Test @@ -780,34 +791,38 @@ protected void verifyConcurrentAddColumnFailurePermissible(Exception e) .hasMessageContaining("Concurrent modification to table"); } + @Test @Override public void testCreateSchemaWithLongName() { // TODO: Find the maximum table schema length in Phoenix and enable this test. - throw new SkipException("TODO"); + abort("TODO"); } + @Test @Override public void testCreateTableWithLongTableName() { // TODO: Find the maximum table name length in Phoenix and enable this test. // Table name length with 65536 chars throws "startRow's length must be less than or equal to 32767 to meet the criteria for a row key." // 32767 chars still causes the same error and shorter names (e.g. 10000) causes timeout. - throw new SkipException("TODO"); + abort("TODO"); } + @Test @Override public void testCreateTableWithLongColumnName() { // TODO: Find the maximum column name length in Phoenix and enable this test. - throw new SkipException("TODO"); + abort("TODO"); } + @Test @Override public void testAlterTableAddLongColumnName() { // TODO: Find the maximum column name length in Phoenix and enable this test. - throw new SkipException("TODO"); + abort("TODO"); } @Test diff --git a/plugin/trino-postgresql/src/test/java/io/trino/plugin/postgresql/TestPostgreSqlConnectorTest.java b/plugin/trino-postgresql/src/test/java/io/trino/plugin/postgresql/TestPostgreSqlConnectorTest.java index 083ab976e1f5..0e1ebe8faec1 100644 --- a/plugin/trino-postgresql/src/test/java/io/trino/plugin/postgresql/TestPostgreSqlConnectorTest.java +++ b/plugin/trino-postgresql/src/test/java/io/trino/plugin/postgresql/TestPostgreSqlConnectorTest.java @@ -37,9 +37,9 @@ import io.trino.testing.sql.SqlExecutor; import io.trino.testing.sql.TestTable; import io.trino.testing.sql.TestView; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import java.sql.Connection; import java.sql.DriverManager; @@ -74,9 +74,11 @@ import static java.util.stream.Collectors.joining; import static java.util.stream.IntStream.range; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; +@TestInstance(PER_CLASS) public class TestPostgreSqlConnectorTest extends BaseJdbcConnectorTest { @@ -90,7 +92,7 @@ protected QueryRunner createQueryRunner() return createPostgreSqlQueryRunner(postgreSqlServer, Map.of(), Map.of(), REQUIRED_TPCH_TABLES); } - @BeforeClass + @BeforeAll public void setExtensions() { onRemoteDatabase().execute("CREATE EXTENSION IF NOT EXISTS file_fdw"); @@ -144,8 +146,25 @@ protected TestTable createTableWithUnsupportedColumn() "(one bigint, two decimal(50,0), three varchar(10))"); } - @Test(dataProvider = "testTimestampPrecisionOnCreateTable") - public void testTimestampPrecisionOnCreateTable(String inputType, String expectedType) + @Test + public void testTimestampPrecisionOnCreateTable() + { + testTimestampPrecisionOnCreateTable("timestamp(0)", "timestamp(0)"); + testTimestampPrecisionOnCreateTable("timestamp(1)", "timestamp(1)"); + testTimestampPrecisionOnCreateTable("timestamp(2)", "timestamp(2)"); + testTimestampPrecisionOnCreateTable("timestamp(3)", "timestamp(3)"); + testTimestampPrecisionOnCreateTable("timestamp(4)", "timestamp(4)"); + testTimestampPrecisionOnCreateTable("timestamp(5)", "timestamp(5)"); + testTimestampPrecisionOnCreateTable("timestamp(6)", "timestamp(6)"); + testTimestampPrecisionOnCreateTable("timestamp(7)", "timestamp(6)"); + testTimestampPrecisionOnCreateTable("timestamp(8)", "timestamp(6)"); + testTimestampPrecisionOnCreateTable("timestamp(9)", "timestamp(6)"); + testTimestampPrecisionOnCreateTable("timestamp(10)", "timestamp(6)"); + testTimestampPrecisionOnCreateTable("timestamp(11)", "timestamp(6)"); + testTimestampPrecisionOnCreateTable("timestamp(12)", "timestamp(6)"); + } + + private void testTimestampPrecisionOnCreateTable(String inputType, String expectedType) { try (TestTable testTable = new TestTable( getQueryRunner()::execute, @@ -155,28 +174,37 @@ public void testTimestampPrecisionOnCreateTable(String inputType, String expecte } } - @DataProvider(name = "testTimestampPrecisionOnCreateTable") - public static Object[][] timestampPrecisionOnCreateTableProvider() + @Test + public void testTimestampPrecisionOnCreateTableAsSelect() { - return new Object[][]{ - {"timestamp(0)", "timestamp(0)"}, - {"timestamp(1)", "timestamp(1)"}, - {"timestamp(2)", "timestamp(2)"}, - {"timestamp(3)", "timestamp(3)"}, - {"timestamp(4)", "timestamp(4)"}, - {"timestamp(5)", "timestamp(5)"}, - {"timestamp(6)", "timestamp(6)"}, - {"timestamp(7)", "timestamp(6)"}, - {"timestamp(8)", "timestamp(6)"}, - {"timestamp(9)", "timestamp(6)"}, - {"timestamp(10)", "timestamp(6)"}, - {"timestamp(11)", "timestamp(6)"}, - {"timestamp(12)", "timestamp(6)"} - }; + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00'", "timestamp(0)", "TIMESTAMP '1970-01-01 00:00:00'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.9'", "timestamp(1)", "TIMESTAMP '1970-01-01 00:00:00.9'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.56'", "timestamp(2)", "TIMESTAMP '1970-01-01 00:00:00.56'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.123'", "timestamp(3)", "TIMESTAMP '1970-01-01 00:00:00.123'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.4896'", "timestamp(4)", "TIMESTAMP '1970-01-01 00:00:00.4896'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.89356'", "timestamp(5)", "TIMESTAMP '1970-01-01 00:00:00.89356'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.123000'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.123000'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.999'", "timestamp(3)", "TIMESTAMP '1970-01-01 00:00:00.999'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.123456'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.123456'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '2020-09-27 12:34:56.1'", "timestamp(1)", "TIMESTAMP '2020-09-27 12:34:56.1'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '2020-09-27 12:34:56.9'", "timestamp(1)", "TIMESTAMP '2020-09-27 12:34:56.9'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '2020-09-27 12:34:56.123'", "timestamp(3)", "TIMESTAMP '2020-09-27 12:34:56.123'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '2020-09-27 12:34:56.123000'", "timestamp(6)", "TIMESTAMP '2020-09-27 12:34:56.123000'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '2020-09-27 12:34:56.999'", "timestamp(3)", "TIMESTAMP '2020-09-27 12:34:56.999'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '2020-09-27 12:34:56.123456'", "timestamp(6)", "TIMESTAMP '2020-09-27 12:34:56.123456'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.1234561'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.123456'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.123456499'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.123456'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.123456499999'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.123456'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.1234565'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.123457'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.111222333444'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.111222'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 00:00:00.9999995'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:01.000000'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1970-01-01 23:59:59.9999995'", "timestamp(6)", "TIMESTAMP '1970-01-02 00:00:00.000000'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1969-12-31 23:59:59.9999995'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.000000'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1969-12-31 23:59:59.999999499999'", "timestamp(6)", "TIMESTAMP '1969-12-31 23:59:59.999999'"); + testTimestampPrecisionOnCreateTableAsSelect("TIMESTAMP '1969-12-31 23:59:59.9999994'", "timestamp(6)", "TIMESTAMP '1969-12-31 23:59:59.999999'"); } - @Test(dataProvider = "testTimestampPrecisionOnCreateTableAsSelect") - public void testTimestampPrecisionOnCreateTableAsSelect(String inputType, String tableType, String tableValue) + private void testTimestampPrecisionOnCreateTableAsSelect(String inputType, String tableType, String tableValue) { try (TestTable testTable = new TestTable( getQueryRunner()::execute, @@ -189,8 +217,37 @@ public void testTimestampPrecisionOnCreateTableAsSelect(String inputType, String } } - @Test(dataProvider = "testTimestampPrecisionOnCreateTableAsSelect") - public void testTimestampPrecisionOnCreateTableAsSelectWithNoData(String inputType, String tableType, String ignored) + @Test + public void testTimestampPrecisionOnCreateTableAsSelectWithNoData() + { + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00'", "timestamp(0)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.9'", "timestamp(1)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.56'", "timestamp(2)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.123'", "timestamp(3)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.4896'", "timestamp(4)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.89356'", "timestamp(5)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.123000'", "timestamp(6)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.999'", "timestamp(3)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.123456'", "timestamp(6)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '2020-09-27 12:34:56.1'", "timestamp(1)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '2020-09-27 12:34:56.9'", "timestamp(1)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '2020-09-27 12:34:56.123'", "timestamp(3)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '2020-09-27 12:34:56.123000'", "timestamp(6)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '2020-09-27 12:34:56.999'", "timestamp(3)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '2020-09-27 12:34:56.123456'", "timestamp(6)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.1234561'", "timestamp(6)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.123456499'", "timestamp(6)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.123456499999'", "timestamp(6)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.1234565'", "timestamp(6)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.111222333444'", "timestamp(6)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 00:00:00.9999995'", "timestamp(6)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1970-01-01 23:59:59.9999995'", "timestamp(6)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1969-12-31 23:59:59.9999995'", "timestamp(6)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1969-12-31 23:59:59.999999499999'", "timestamp(6)"); + testTimestampPrecisionOnCreateTableAsSelectWithNoData("TIMESTAMP '1969-12-31 23:59:59.9999994'", "timestamp(6)"); + } + + private void testTimestampPrecisionOnCreateTableAsSelectWithNoData(String inputType, String tableType) { try (TestTable testTable = new TestTable( getQueryRunner()::execute, @@ -200,37 +257,6 @@ public void testTimestampPrecisionOnCreateTableAsSelectWithNoData(String inputTy } } - @DataProvider(name = "testTimestampPrecisionOnCreateTableAsSelect") - public static Object[][] timestampPrecisionOnCreateTableAsSelectProvider() - { - return new Object[][] { - {"TIMESTAMP '1970-01-01 00:00:00'", "timestamp(0)", "TIMESTAMP '1970-01-01 00:00:00'"}, - {"TIMESTAMP '1970-01-01 00:00:00.9'", "timestamp(1)", "TIMESTAMP '1970-01-01 00:00:00.9'"}, - {"TIMESTAMP '1970-01-01 00:00:00.56'", "timestamp(2)", "TIMESTAMP '1970-01-01 00:00:00.56'"}, - {"TIMESTAMP '1970-01-01 00:00:00.123'", "timestamp(3)", "TIMESTAMP '1970-01-01 00:00:00.123'"}, - {"TIMESTAMP '1970-01-01 00:00:00.4896'", "timestamp(4)", "TIMESTAMP '1970-01-01 00:00:00.4896'"}, - {"TIMESTAMP '1970-01-01 00:00:00.89356'", "timestamp(5)", "TIMESTAMP '1970-01-01 00:00:00.89356'"}, - {"TIMESTAMP '1970-01-01 00:00:00.123000'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.123000'"}, - {"TIMESTAMP '1970-01-01 00:00:00.999'", "timestamp(3)", "TIMESTAMP '1970-01-01 00:00:00.999'"}, - {"TIMESTAMP '1970-01-01 00:00:00.123456'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.123456'"}, - {"TIMESTAMP '2020-09-27 12:34:56.1'", "timestamp(1)", "TIMESTAMP '2020-09-27 12:34:56.1'"}, - {"TIMESTAMP '2020-09-27 12:34:56.9'", "timestamp(1)", "TIMESTAMP '2020-09-27 12:34:56.9'"}, - {"TIMESTAMP '2020-09-27 12:34:56.123'", "timestamp(3)", "TIMESTAMP '2020-09-27 12:34:56.123'"}, - {"TIMESTAMP '2020-09-27 12:34:56.123000'", "timestamp(6)", "TIMESTAMP '2020-09-27 12:34:56.123000'"}, - {"TIMESTAMP '2020-09-27 12:34:56.999'", "timestamp(3)", "TIMESTAMP '2020-09-27 12:34:56.999'"}, - {"TIMESTAMP '2020-09-27 12:34:56.123456'", "timestamp(6)", "TIMESTAMP '2020-09-27 12:34:56.123456'"}, - {"TIMESTAMP '1970-01-01 00:00:00.1234561'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.123456'"}, - {"TIMESTAMP '1970-01-01 00:00:00.123456499'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.123456'"}, - {"TIMESTAMP '1970-01-01 00:00:00.123456499999'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.123456'"}, - {"TIMESTAMP '1970-01-01 00:00:00.1234565'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.123457'"}, - {"TIMESTAMP '1970-01-01 00:00:00.111222333444'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.111222'"}, - {"TIMESTAMP '1970-01-01 00:00:00.9999995'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:01.000000'"}, - {"TIMESTAMP '1970-01-01 23:59:59.9999995'", "timestamp(6)", "TIMESTAMP '1970-01-02 00:00:00.000000'"}, - {"TIMESTAMP '1969-12-31 23:59:59.9999995'", "timestamp(6)", "TIMESTAMP '1970-01-01 00:00:00.000000'"}, - {"TIMESTAMP '1969-12-31 23:59:59.999999499999'", "timestamp(6)", "TIMESTAMP '1969-12-31 23:59:59.999999'"}, - {"TIMESTAMP '1969-12-31 23:59:59.9999994'", "timestamp(6)", "TIMESTAMP '1969-12-31 23:59:59.999999'"}}; - } - @Override protected void verifyAddNotNullColumnToNonEmptyTableFailurePermissible(Throwable e) { diff --git a/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/BaseRaptorConnectorTest.java b/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/BaseRaptorConnectorTest.java index c8fdfa351a5a..fab62710dcc6 100644 --- a/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/BaseRaptorConnectorTest.java +++ b/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/BaseRaptorConnectorTest.java @@ -25,8 +25,7 @@ import io.trino.testing.sql.TestTable; import io.trino.testng.services.Flaky; import org.intellij.lang.annotations.Language; -import org.testng.SkipException; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.time.LocalDate; import java.time.LocalDateTime; @@ -59,6 +58,7 @@ import static java.util.stream.Collectors.toSet; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotEquals; @@ -90,7 +90,7 @@ protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) @Override protected TestTable createTableWithDefaultColumns() { - throw new SkipException("Raptor connector does not support column default values"); + return abort("Raptor connector does not support column default values"); } @Override @@ -256,11 +256,15 @@ public void testBucketNumberHiddenColumn() assertEquals(actual, IntStream.range(0, 50).boxed().collect(toSet())); } - @Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = ".*Column '\\$bucket_number' cannot be resolved") + @Test public void testNoBucketNumberHiddenColumn() { - assertUpdate("CREATE TABLE test_no_bucket_number (test bigint)"); - computeActual("SELECT DISTINCT \"$bucket_number\" FROM test_no_bucket_number"); + assertThatThrownBy(() -> { + assertUpdate("CREATE TABLE test_no_bucket_number (test bigint)"); + computeActual("SELECT DISTINCT \"$bucket_number\" FROM test_no_bucket_number"); + }) + .isInstanceOf(RuntimeException.class) + .hasMessageMatching(".*Column '\\$bucket_number' cannot be resolved"); } @Test diff --git a/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/TestRaptorBucketedConnectorTest.java b/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/TestRaptorBucketedConnectorTest.java index 960bddfc3284..4ac6ac60687a 100644 --- a/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/TestRaptorBucketedConnectorTest.java +++ b/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/TestRaptorBucketedConnectorTest.java @@ -15,7 +15,7 @@ import com.google.common.collect.ImmutableMap; import io.trino.testing.QueryRunner; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import static io.trino.plugin.raptor.legacy.RaptorQueryRunner.createRaptorQueryRunner; import static org.assertj.core.api.Assertions.assertThat; diff --git a/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/TestRaptorMySqlConnectorTest.java b/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/TestRaptorMySqlConnectorTest.java index 6e357c989ae7..b67827072044 100644 --- a/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/TestRaptorMySqlConnectorTest.java +++ b/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/TestRaptorMySqlConnectorTest.java @@ -17,8 +17,9 @@ import io.trino.plugin.tpch.TpchPlugin; import io.trino.testing.DistributedQueryRunner; import io.trino.testing.QueryRunner; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.TestInstance; import org.testcontainers.containers.MySQLContainer; -import org.testng.annotations.AfterClass; import java.io.File; import java.util.Map; @@ -26,7 +27,9 @@ import static io.trino.plugin.raptor.legacy.RaptorQueryRunner.copyTables; import static io.trino.plugin.raptor.legacy.RaptorQueryRunner.createSession; import static java.lang.String.format; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; +@TestInstance(PER_CLASS) public class TestRaptorMySqlConnectorTest extends BaseRaptorConnectorTest { @@ -41,7 +44,7 @@ protected QueryRunner createQueryRunner() return createRaptorMySqlQueryRunner(getJdbcUrl(mysqlContainer)); } - @AfterClass(alwaysRun = true) + @AfterAll public final void destroy() { mysqlContainer.close(); diff --git a/plugin/trino-redshift/src/test/java/io/trino/plugin/redshift/TestRedshiftConnectorTest.java b/plugin/trino-redshift/src/test/java/io/trino/plugin/redshift/TestRedshiftConnectorTest.java index 0b458b229d15..3c51094ab3eb 100644 --- a/plugin/trino-redshift/src/test/java/io/trino/plugin/redshift/TestRedshiftConnectorTest.java +++ b/plugin/trino-redshift/src/test/java/io/trino/plugin/redshift/TestRedshiftConnectorTest.java @@ -22,9 +22,7 @@ import io.trino.testing.sql.SqlExecutor; import io.trino.testing.sql.TestTable; import io.trino.tpch.TpchTable; -import org.testng.SkipException; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.util.List; import java.util.Optional; @@ -40,13 +38,12 @@ import static io.trino.plugin.redshift.RedshiftQueryRunner.createRedshiftQueryRunner; import static io.trino.plugin.redshift.RedshiftQueryRunner.executeInRedshift; import static io.trino.plugin.redshift.RedshiftQueryRunner.executeWithRedshift; -import static io.trino.testing.DataProviders.cartesianProduct; -import static io.trino.testing.DataProviders.trueFalse; import static io.trino.testing.TestingNames.randomNameSuffix; import static java.lang.String.format; import static java.util.Locale.ENGLISH; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; public class TestRedshiftConnectorTest extends BaseJdbcConnectorTest @@ -149,6 +146,7 @@ protected Optional filterDataMappingSmokeTestData(DataMapp /** * Overridden due to Redshift not supporting non-ASCII characters in CHAR. */ + @Test @Override public void testCreateTableAsSelectWithUnicode() { @@ -160,8 +158,26 @@ public void testCreateTableAsSelectWithUnicode() "SELECT 1"); } - @Test(dataProvider = "redshiftTypeToTrinoTypes") - public void testReadFromLateBindingView(String redshiftType, String trinoType) + @Test + public void testReadFromLateBindingView() + { + testReadFromLateBindingView("SMALLINT", "smallint"); + testReadFromLateBindingView("INTEGER", "integer"); + testReadFromLateBindingView("BIGINT", "bigint"); + testReadFromLateBindingView("DECIMAL", "decimal(18,0)"); + testReadFromLateBindingView("REAL", "real"); + testReadFromLateBindingView("DOUBLE PRECISION", "double"); + testReadFromLateBindingView("BOOLEAN", "boolean"); + testReadFromLateBindingView("CHAR(1)", "char(1)"); + testReadFromLateBindingView("VARCHAR(1)", "varchar(1)"); + // consider to extract "CHARACTER VARYING" type from here as it requires exact length, 0 - is for the empty string + testReadFromLateBindingView("CHARACTER VARYING", "varchar(0)"); + testReadFromLateBindingView("TIME", "time(6)"); + testReadFromLateBindingView("TIMESTAMP", "timestamp(6)"); + testReadFromLateBindingView("TIMESTAMPTZ", "timestamp(6) with time zone"); + } + + private void testReadFromLateBindingView(String redshiftType, String trinoType) { try (TestView view = new TestView(onRemoteDatabase(), TEST_SCHEMA + ".late_schema_binding", "SELECT CAST(NULL AS %s) AS value WITH NO SCHEMA BINDING".formatted(redshiftType))) { assertThat(query("SELECT true FROM %s WHERE value IS NULL".formatted(view.getName()))) @@ -173,8 +189,39 @@ public void testReadFromLateBindingView(String redshiftType, String trinoType) } } - @Test(dataProvider = "testReadNullFromViewDataProvider") - public void testReadNullFromView(String redshiftType, String trinoType, boolean lateBindingView) + @Test + public void testReadNullFromView() + { + testReadNullFromView("SMALLINT", "smallint", true); + testReadNullFromView("SMALLINT", "smallint", false); + testReadNullFromView("INTEGER", "integer", true); + testReadNullFromView("INTEGER", "integer", false); + testReadNullFromView("BIGINT", "bigint", true); + testReadNullFromView("BIGINT", "bigint", false); + testReadNullFromView("DECIMAL", "decimal(18,0)", true); + testReadNullFromView("DECIMAL", "decimal(18,0)", false); + testReadNullFromView("REAL", "real", true); + testReadNullFromView("REAL", "real", false); + testReadNullFromView("DOUBLE PRECISION", "double", true); + testReadNullFromView("DOUBLE PRECISION", "double", false); + testReadNullFromView("BOOLEAN", "boolean", true); + testReadNullFromView("BOOLEAN", "boolean", false); + testReadNullFromView("CHAR(1)", "char(1)", true); + testReadNullFromView("CHAR(1)", "char(1)", false); + testReadNullFromView("VARCHAR(1)", "varchar(1)", true); + testReadNullFromView("VARCHAR(1)", "varchar(1)", false); + // consider to extract "CHARACTER VARYING" type from here as it requires exact length, 0 - is for the empty string + testReadNullFromView("CHARACTER VARYING", "varchar(0)", true); + testReadNullFromView("CHARACTER VARYING", "varchar(0)", false); + testReadNullFromView("TIME", "time(6)", true); + testReadNullFromView("TIME", "time(6)", false); + testReadNullFromView("TIMESTAMP", "timestamp(6)", true); + testReadNullFromView("TIMESTAMP", "timestamp(6)", false); + testReadNullFromView("TIMESTAMPTZ", "timestamp(6) with time zone", true); + testReadNullFromView("TIMESTAMPTZ", "timestamp(6) with time zone", false); + } + + private void testReadNullFromView(String redshiftType, String trinoType, boolean lateBindingView) { try (TestView view = new TestView( onRemoteDatabase(), @@ -190,32 +237,6 @@ public void testReadNullFromView(String redshiftType, String trinoType, boolean } } - @DataProvider - public Object[][] testReadNullFromViewDataProvider() - { - return cartesianProduct(redshiftTypeToTrinoTypes(), trueFalse()); - } - - @DataProvider - public Object[][] redshiftTypeToTrinoTypes() - { - return new Object[][] { - {"SMALLINT", "smallint"}, - {"INTEGER", "integer"}, - {"BIGINT", "bigint"}, - {"DECIMAL", "decimal(18,0)"}, - {"REAL", "real"}, - {"DOUBLE PRECISION", "double"}, - {"BOOLEAN", "boolean"}, - {"CHAR(1)", "char(1)"}, - {"VARCHAR(1)", "varchar(1)"}, - // consider to extract "CHARACTER VARYING" type from here as it requires exact length, 0 - is for the empty string - {"CHARACTER VARYING", "varchar(0)"}, - {"TIME", "time(6)"}, - {"TIMESTAMP", "timestamp(6)"}, - {"TIMESTAMPTZ", "timestamp(6) with time zone"}}; - } - @Test public void testRedshiftAddNotNullColumn() { @@ -225,6 +246,7 @@ public void testRedshiftAddNotNullColumn() } } + @Test @Override public void testDelete() { @@ -252,8 +274,18 @@ public void testDelete() } } - @Test(dataProvider = "testCaseColumnNamesDataProvider") - public void testCaseColumnNames(String tableName) + @Test + public void testCaseColumnNames() + { + testCaseColumnNames("TEST_STATS_MIXED_UNQUOTED_UPPER_" + randomNameSuffix()); + testCaseColumnNames("test_stats_mixed_unquoted_lower_" + randomNameSuffix()); + testCaseColumnNames("test_stats_mixed_uNQuoTeD_miXED_" + randomNameSuffix()); + testCaseColumnNames("\"TEST_STATS_MIXED_QUOTED_UPPER_" + randomNameSuffix() + "\""); + testCaseColumnNames("\"test_stats_mixed_quoted_lower_" + randomNameSuffix() + "\""); + testCaseColumnNames("\"test_stats_mixed_QuoTeD_miXED_" + randomNameSuffix() + "\""); + } + + private void testCaseColumnNames(String tableName) { try { assertUpdate( @@ -339,19 +371,7 @@ private static void gatherStats(String tableName) }); } - @DataProvider - public Object[][] testCaseColumnNamesDataProvider() - { - return new Object[][] { - {"TEST_STATS_MIXED_UNQUOTED_UPPER_" + randomNameSuffix()}, - {"test_stats_mixed_unquoted_lower_" + randomNameSuffix()}, - {"test_stats_mixed_uNQuoTeD_miXED_" + randomNameSuffix()}, - {"\"TEST_STATS_MIXED_QUOTED_UPPER_" + randomNameSuffix() + "\""}, - {"\"test_stats_mixed_quoted_lower_" + randomNameSuffix() + "\""}, - {"\"test_stats_mixed_QuoTeD_miXED_" + randomNameSuffix() + "\""} - }; - } - + @Test @Override public void testCountDistinctWithStringTypes() { @@ -380,14 +400,17 @@ public void testCountDistinctWithStringTypes() } } + @Test @Override public void testAggregationPushdown() { - throw new SkipException("tested in testAggregationPushdown(String)"); + testAggregationPushdown("EVEN"); + testAggregationPushdown("KEY"); + testAggregationPushdown("ALL"); + testAggregationPushdown("AUTO"); } - @Test(dataProvider = "testAggregationPushdownDistStylesDataProvider") - public void testAggregationPushdown(String distStyle) + private void testAggregationPushdown(String distStyle) { String nation = format("%s.nation_%s_%s", TEST_SCHEMA, distStyle, randomNameSuffix()); String customer = format("%s.customer_%s_%s", TEST_SCHEMA, distStyle, randomNameSuffix()); @@ -461,14 +484,17 @@ public void testAggregationPushdown(String distStyle) } } + @Test @Override public void testNumericAggregationPushdown() { - throw new SkipException("tested in testNumericAggregationPushdown(String)"); + testNumericAggregationPushdown("EVEN"); + testNumericAggregationPushdown("KEY"); + testNumericAggregationPushdown("ALL"); + testNumericAggregationPushdown("AUTO"); } - @Test(dataProvider = "testAggregationPushdownDistStylesDataProvider") - public void testNumericAggregationPushdown(String distStyle) + private void testNumericAggregationPushdown(String distStyle) { String schemaName = getSession().getSchema().orElseThrow(); // empty table @@ -542,17 +568,6 @@ private static void copyWithDistStyle(String sourceTableName, String destTableNa } } - @DataProvider - public Object[][] testAggregationPushdownDistStylesDataProvider() - { - return new Object[][] { - {"EVEN"}, - {"KEY"}, - {"ALL"}, - {"AUTO"}, - }; - } - @Test public void testDecimalAvgPushdownForMaximumDecimalScale() { @@ -587,17 +602,18 @@ public void testDecimalAvgPushdownFoShortDecimalScale() } } - @Override @Test + @Override public void testReadMetadataWithRelationsConcurrentModifications() { - throw new SkipException("Test fails with a timeout sometimes and is flaky"); + abort("Test fails with a timeout sometimes and is flaky"); } + @Test @Override public void testInsertRowConcurrently() { - throw new SkipException("Test fails with a timeout sometimes and is flaky"); + abort("Test fails with a timeout sometimes and is flaky"); } @Override @@ -657,6 +673,7 @@ protected SqlExecutor onRemoteDatabase() return RedshiftQueryRunner::executeInRedshift; } + @Test @Override public void testDeleteWithLike() { diff --git a/plugin/trino-singlestore/src/test/java/io/trino/plugin/singlestore/TestSingleStoreConnectorTest.java b/plugin/trino-singlestore/src/test/java/io/trino/plugin/singlestore/TestSingleStoreConnectorTest.java index 7c9243212187..92f8e672279f 100644 --- a/plugin/trino-singlestore/src/test/java/io/trino/plugin/singlestore/TestSingleStoreConnectorTest.java +++ b/plugin/trino-singlestore/src/test/java/io/trino/plugin/singlestore/TestSingleStoreConnectorTest.java @@ -23,9 +23,9 @@ import io.trino.testing.TestingConnectorBehavior; import io.trino.testing.sql.SqlExecutor; import io.trino.testing.sql.TestTable; -import org.testng.SkipException; -import org.testng.annotations.AfterClass; -import org.testng.annotations.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import java.util.Optional; import java.util.OptionalInt; @@ -40,10 +40,13 @@ import static java.util.stream.IntStream.range; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; +@TestInstance(PER_CLASS) public class TestSingleStoreConnectorTest extends BaseJdbcConnectorTest { @@ -57,7 +60,7 @@ protected QueryRunner createQueryRunner() return createSingleStoreQueryRunner(singleStoreServer, ImmutableMap.of(), ImmutableMap.of(), REQUIRED_TPCH_TABLES); } - @AfterClass(alwaysRun = true) + @AfterAll public final void destroy() { singleStoreServer.close(); @@ -144,20 +147,23 @@ protected Optional filterDataMappingSmokeTestData(DataMapp return Optional.of(dataMappingTestSetup); } + @Test @Override public void testInsertUnicode() { // SingleStore's utf8 encoding is 3 bytes and truncates strings upon encountering a 4 byte sequence - throw new SkipException("SingleStore doesn't support utf8mb4"); + abort("SingleStore doesn't support utf8mb4"); } + @Test @Override public void testInsertHighestUnicodeCharacter() { // SingleStore's utf8 encoding is 3 bytes and truncates strings upon encountering a 4 byte sequence - throw new SkipException("SingleStore doesn't support utf8mb4"); + abort("SingleStore doesn't support utf8mb4"); } + @Test @Override public void testDeleteWithLike() { @@ -209,6 +215,7 @@ public void testSingleStoreTinyint() } // Overridden because the method from BaseConnectorTest fails on one of the assertions, see TODO below + @Test @Override public void testInsertIntoNotNullColumn() { @@ -253,6 +260,7 @@ public void testColumnComment() assertUpdate("DROP TABLE test_column_comment"); } + @Test @Override public void testAddNotNullColumn() { @@ -316,6 +324,7 @@ public void testPredicatePushdown() .isNotFullyPushedDown(AggregationNode.class); } + @Test @Override public void testCreateTableAsSelectNegativeDate() { @@ -333,6 +342,7 @@ public void testInsertNegativeDate() .hasStackTraceContaining("TrinoException: Driver returned null LocalDate for a non-null value"); } + @Test @Override public void testNativeQueryCreateStatement() { @@ -346,6 +356,7 @@ public void testNativeQueryCreateStatement() assertFalse(getQueryRunner().tableExists(getSession(), "numbers")); } + @Test @Override public void testNativeQueryInsertStatementTableExists() { diff --git a/plugin/trino-sqlserver/src/test/java/io/trino/plugin/sqlserver/BaseSqlServerConnectorTest.java b/plugin/trino-sqlserver/src/test/java/io/trino/plugin/sqlserver/BaseSqlServerConnectorTest.java index aedbd809daef..bc4d2e96a04f 100644 --- a/plugin/trino-sqlserver/src/test/java/io/trino/plugin/sqlserver/BaseSqlServerConnectorTest.java +++ b/plugin/trino-sqlserver/src/test/java/io/trino/plugin/sqlserver/BaseSqlServerConnectorTest.java @@ -25,9 +25,7 @@ import io.trino.testing.TestingConnectorBehavior; import io.trino.testing.sql.TestTable; import io.trino.testng.services.Flaky; -import org.testng.SkipException; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.util.List; import java.util.Optional; @@ -43,6 +41,7 @@ import static java.util.stream.Collectors.joining; import static java.util.stream.IntStream.range; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.abort; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; @@ -153,7 +152,7 @@ public void testReadMetadataWithRelationsConcurrentModifications() "Lock request time out period exceeded|" + // E.g. system.metadata.table_comments can return empty results, when underlying metadata list tables call fails "Expecting actual not to be empty).*"); - throw new SkipException("to be fixed"); + abort("to be fixed"); } } @@ -468,8 +467,15 @@ public void testShowCreateTable() ")"); } - @Test(dataProvider = "dataCompression") - public void testCreateWithDataCompression(DataCompression dataCompression) + @Test + public void testCreateWithDataCompression() + { + testCreateWithDataCompression(NONE); + testCreateWithDataCompression(ROW); + testCreateWithDataCompression(PAGE); + } + + private void testCreateWithDataCompression(DataCompression dataCompression) { String tableName = "test_create_with_compression_" + randomNameSuffix(); String createQuery = format("CREATE TABLE sqlserver.dbo.%s (\n" + @@ -488,16 +494,6 @@ public void testCreateWithDataCompression(DataCompression dataCompression) assertUpdate("DROP TABLE " + tableName); } - @DataProvider - public Object[][] dataCompression() - { - return new Object[][] { - {NONE}, - {ROW}, - {PAGE} - }; - } - @Test public void testShowCreateForPartitionedTablesWithDataCompression() { @@ -587,6 +583,7 @@ public void testDateYearOfEraPredicate() ".*\\QConversion failed when converting date and/or time from character string.\\E"); } + @Test @Override public void testNativeQuerySimple() { diff --git a/plugin/trino-sqlserver/src/test/java/io/trino/plugin/sqlserver/BaseSqlServerTransactionIsolationTest.java b/plugin/trino-sqlserver/src/test/java/io/trino/plugin/sqlserver/BaseSqlServerTransactionIsolationTest.java index 859371798c1d..8d7c7a43307a 100644 --- a/plugin/trino-sqlserver/src/test/java/io/trino/plugin/sqlserver/BaseSqlServerTransactionIsolationTest.java +++ b/plugin/trino-sqlserver/src/test/java/io/trino/plugin/sqlserver/BaseSqlServerTransactionIsolationTest.java @@ -17,7 +17,7 @@ import io.trino.testing.MaterializedResult; import io.trino.testing.QueryRunner; import io.trino.testing.sql.SqlExecutor; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.util.List; import java.util.Map; diff --git a/plugin/trino-sqlserver/src/test/java/io/trino/plugin/sqlserver/TestSqlServerConnectorTest.java b/plugin/trino-sqlserver/src/test/java/io/trino/plugin/sqlserver/TestSqlServerConnectorTest.java index fb536abb1e89..434da9cc1f69 100644 --- a/plugin/trino-sqlserver/src/test/java/io/trino/plugin/sqlserver/TestSqlServerConnectorTest.java +++ b/plugin/trino-sqlserver/src/test/java/io/trino/plugin/sqlserver/TestSqlServerConnectorTest.java @@ -22,8 +22,7 @@ import io.trino.testng.services.Flaky; import org.jdbi.v3.core.Handle; import org.jdbi.v3.core.Jdbi; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.sql.Connection; import java.sql.SQLException; @@ -34,9 +33,6 @@ import static io.trino.plugin.sqlserver.SqlServerQueryRunner.createSqlServerQueryRunner; import static io.trino.plugin.sqlserver.SqlServerSessionProperties.BULK_COPY_FOR_WRITE; import static io.trino.plugin.sqlserver.SqlServerSessionProperties.BULK_COPY_FOR_WRITE_LOCK_DESTINATION_TABLE; -import static io.trino.testing.DataProviders.cartesianProduct; -import static io.trino.testing.DataProviders.toDataProvider; -import static io.trino.testing.DataProviders.trueFalse; import static io.trino.testing.TestingNames.randomNameSuffix; import static java.lang.String.format; import static java.util.Locale.ENGLISH; @@ -69,8 +65,17 @@ protected SqlExecutor onRemoteDatabase() } @Flaky(issue = "fn_dblog() returns information only about the active portion of the transaction log, therefore it is flaky", match = ".*") - @Test(dataProvider = "doubleTrueFalse") - public void testCreateTableAsSelectWriteBulkiness(boolean bulkCopyForWrite, boolean bulkCopyLock) + @Test + public void testCreateTableAsSelectWriteBulkiness() + throws SQLException + { + testCreateTableAsSelectWriteBulkiness(true, true); + testCreateTableAsSelectWriteBulkiness(true, false); + testCreateTableAsSelectWriteBulkiness(false, true); + testCreateTableAsSelectWriteBulkiness(false, false); + } + + private void testCreateTableAsSelectWriteBulkiness(boolean bulkCopyForWrite, boolean bulkCopyLock) throws SQLException { String table = "bulk_copy_ctas_" + randomNameSuffix(); @@ -98,8 +103,21 @@ public void testCreateTableAsSelectWriteBulkiness(boolean bulkCopyForWrite, bool } @Flaky(issue = "fn_dblog() returns information only about the active portion of the transaction log, therefore it is flaky", match = ".*") - @Test(dataProvider = "tripleTrueFalse") - public void testInsertWriteBulkiness(boolean nonTransactionalInsert, boolean bulkCopyForWrite, boolean bulkCopyForWriteLockDestinationTable) + @Test + public void testInsertWriteBulkiness() + throws SQLException + { + testInsertWriteBulkiness(true, true, true); + testInsertWriteBulkiness(true, true, false); + testInsertWriteBulkiness(true, false, true); + testInsertWriteBulkiness(true, false, false); + testInsertWriteBulkiness(false, true, true); + testInsertWriteBulkiness(false, true, false); + testInsertWriteBulkiness(false, false, true); + testInsertWriteBulkiness(false, false, false); + } + + private void testInsertWriteBulkiness(boolean nonTransactionalInsert, boolean bulkCopyForWrite, boolean bulkCopyForWriteLockDestinationTable) throws SQLException { String table = "bulk_copy_insert_" + randomNameSuffix(); @@ -128,8 +146,17 @@ public void testInsertWriteBulkiness(boolean nonTransactionalInsert, boolean bul assertUpdate("DROP TABLE " + table); } - @Test(dataProvider = "timestampTypes") - public void testInsertWriteBulkinessWithTimestamps(String timestampType) + @Test + public void testInsertWriteBulkinessWithTimestamps() + { + testInsertWriteBulkinessWithTimestamps("timestamp"); + testInsertWriteBulkinessWithTimestamps("timestamp(3)"); + testInsertWriteBulkinessWithTimestamps("timestamp(6)"); + testInsertWriteBulkinessWithTimestamps("timestamp(9)"); + testInsertWriteBulkinessWithTimestamps("timestamp(12)"); + } + + private void testInsertWriteBulkinessWithTimestamps(String timestampType) { Session session = Session.builder(getSession()) .setCatalogSessionProperty(CATALOG, BULK_COPY_FOR_WRITE, "true") @@ -162,51 +189,57 @@ public void testInsertWriteBulkinessWithTimestamps(String timestampType) } // TODO move test to BaseConnectorTest https://github.com/trinodb/trino/issues/14517 - @Test(dataProvider = "testTableNameDataProvider") - public void testCreateAndDropTableWithSpecialCharacterName(String tableName) + @Test + public void testCreateAndDropTableWithSpecialCharacterName() { - String tableNameInSql = "\"" + tableName.replace("\"", "\"\"") + "\""; - // Until https://github.com/trinodb/trino/issues/17 the table name is effectively lowercase - tableName = tableName.toLowerCase(ENGLISH); - assertUpdate("CREATE TABLE " + tableNameInSql + " (a bigint, b double, c varchar(50))"); - assertTrue(getQueryRunner().tableExists(getSession(), tableName)); - assertTableColumnNames(tableNameInSql, "a", "b", "c"); - - assertUpdate("DROP TABLE " + tableNameInSql); - assertFalse(getQueryRunner().tableExists(getSession(), tableName)); + for (String tableName : testTableNameTestData()) { + String tableNameInSql = "\"" + tableName.replace("\"", "\"\"") + "\""; + // Until https://github.com/trinodb/trino/issues/17 the table name is effectively lowercase + tableName = tableName.toLowerCase(ENGLISH); + assertUpdate("CREATE TABLE " + tableNameInSql + " (a bigint, b double, c varchar(50))"); + assertTrue(getQueryRunner().tableExists(getSession(), tableName)); + assertTableColumnNames(tableNameInSql, "a", "b", "c"); + + assertUpdate("DROP TABLE " + tableNameInSql); + assertFalse(getQueryRunner().tableExists(getSession(), tableName)); + } } // TODO remove this test after https://github.com/trinodb/trino/issues/14517 - @Test(dataProvider = "testTableNameDataProvider") - public void testRenameColumnNameAdditionalTests(String columnName) + @Test + public void testRenameColumnNameAdditionalTests() { - String nameInSql = "\"" + columnName.replace("\"", "\"\"") + "\""; - String tableName = "tcn_" + nameInSql.replaceAll("[^a-z0-9]", "") + randomNameSuffix(); - // Use complex identifier to test a source column name when renaming columns - String sourceColumnName = "a;b$c"; + for (String columnName : testTableNameTestData()) { + String nameInSql = "\"" + columnName.replace("\"", "\"\"") + "\""; + String tableName = "tcn_" + nameInSql.replaceAll("[^a-z0-9]", "") + randomNameSuffix(); + // Use complex identifier to test a source column name when renaming columns + String sourceColumnName = "a;b$c"; - assertUpdate("CREATE TABLE " + tableName + "(\"" + sourceColumnName + "\" varchar(50))"); - assertTableColumnNames(tableName, sourceColumnName); + assertUpdate("CREATE TABLE " + tableName + "(\"" + sourceColumnName + "\" varchar(50))"); + assertTableColumnNames(tableName, sourceColumnName); - assertUpdate("ALTER TABLE " + tableName + " RENAME COLUMN \"" + sourceColumnName + "\" TO " + nameInSql); - assertTableColumnNames(tableName, columnName.toLowerCase(ENGLISH)); + assertUpdate("ALTER TABLE " + tableName + " RENAME COLUMN \"" + sourceColumnName + "\" TO " + nameInSql); + assertTableColumnNames(tableName, columnName.toLowerCase(ENGLISH)); - assertUpdate("DROP TABLE " + tableName); + assertUpdate("DROP TABLE " + tableName); + } } // TODO move this test to BaseConnectorTest https://github.com/trinodb/trino/issues/14517 - @Test(dataProvider = "testTableNameDataProvider") - public void testRenameFromToTableWithSpecialCharacterName(String tableName) + @Test + public void testRenameFromToTableWithSpecialCharacterName() { - String tableNameInSql = "\"" + tableName.replace("\"", "\"\"") + "\""; - String sourceTableName = "test_rename_source_" + randomNameSuffix(); - assertUpdate("CREATE TABLE " + sourceTableName + " AS SELECT 123 x", 1); - - assertUpdate("ALTER TABLE " + sourceTableName + " RENAME TO " + tableNameInSql); - assertQuery("SELECT x FROM " + tableNameInSql, "VALUES 123"); - // test rename back is working properly - assertUpdate("ALTER TABLE " + tableNameInSql + " RENAME TO " + sourceTableName); - assertUpdate("DROP TABLE " + sourceTableName); + for (String tableName : testTableNameTestData()) { + String tableNameInSql = "\"" + tableName.replace("\"", "\"\"") + "\""; + String sourceTableName = "test_rename_source_" + randomNameSuffix(); + assertUpdate("CREATE TABLE " + sourceTableName + " AS SELECT 123 x", 1); + + assertUpdate("ALTER TABLE " + sourceTableName + " RENAME TO " + tableNameInSql); + assertQuery("SELECT x FROM " + tableNameInSql, "VALUES 123"); + // test rename back is working properly + assertUpdate("ALTER TABLE " + tableNameInSql + " RENAME TO " + sourceTableName); + assertUpdate("DROP TABLE " + sourceTableName); + } } private int getTableOperationsCount(String operation, String table) @@ -229,41 +262,6 @@ private int getTableOperationsCount(String operation, String table) } } - @DataProvider - public static Object[][] doubleTrueFalse() - { - return cartesianProduct(trueFalse(), trueFalse()); - } - - @DataProvider - public static Object[][] tripleTrueFalse() - { - return cartesianProduct(trueFalse(), trueFalse(), trueFalse()); - } - - @DataProvider - public static Object[][] timestampTypes() - { - // Timestamp with timezone is not supported by the SqlServer connector - return new Object[][] { - {"timestamp"}, - {"timestamp(3)"}, - {"timestamp(6)"}, - {"timestamp(9)"}, - {"timestamp(12)"} - }; - } - - // TODO replace TableNameDataProvider and ColumnNameDataProvider with ObjectNameDataProvider - // to one big single list of all special character cases, current list has additional special bracket cases, - // please don't forget to use this list as base - @DataProvider - public Object[][] testTableNameDataProvider() - { - return testTableNameTestData().stream() - .collect(toDataProvider()); - } - private List testTableNameTestData() { return ImmutableList.builder() diff --git a/testing/trino-faulttolerant-tests/src/test/java/io/trino/faulttolerant/hive/TestHiveFaultTolerantExecutionConnectorTest.java b/testing/trino-faulttolerant-tests/src/test/java/io/trino/faulttolerant/hive/TestHiveFaultTolerantExecutionConnectorTest.java index b79ba9ff5b71..4874d865742f 100644 --- a/testing/trino-faulttolerant-tests/src/test/java/io/trino/faulttolerant/hive/TestHiveFaultTolerantExecutionConnectorTest.java +++ b/testing/trino-faulttolerant-tests/src/test/java/io/trino/faulttolerant/hive/TestHiveFaultTolerantExecutionConnectorTest.java @@ -20,8 +20,9 @@ import io.trino.plugin.hive.BaseHiveConnectorTest; import io.trino.plugin.hive.HiveQueryRunner; import io.trino.testing.QueryRunner; -import org.testng.annotations.AfterClass; -import org.testng.annotations.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import static io.airlift.units.DataSize.Unit.GIGABYTE; import static io.trino.SystemSessionProperties.FAULT_TOLERANT_EXECUTION_MAX_PARTITION_COUNT; @@ -29,7 +30,9 @@ import static io.trino.plugin.exchange.filesystem.containers.MinioStorage.getExchangeManagerProperties; import static io.trino.testing.FaultTolerantExecutionConnectorTestHelper.getExtraProperties; import static io.trino.testing.TestingNames.randomNameSuffix; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; +@TestInstance(PER_CLASS) public class TestHiveFaultTolerantExecutionConnectorTest extends BaseHiveConnectorTest { @@ -50,12 +53,14 @@ protected QueryRunner createQueryRunner() })); } + @Test @Override public void testMultipleWriters() { // Not applicable for fault-tolerant mode. } + @Test @Override public void testMultipleWritersWithSkewedData() { @@ -65,6 +70,7 @@ public void testMultipleWritersWithSkewedData() // We need to override this method because in the case of pipeline execution, // the default number of writers are equal to worker count. Whereas, in the // fault-tolerant execution, it starts with 1. + @Test @Override public void testTaskWritersDoesNotScaleWithLargeMinWriterSize() { @@ -72,24 +78,28 @@ public void testTaskWritersDoesNotScaleWithLargeMinWriterSize() .isEqualTo(1); } + @Test @Override - public void testWriterTasksCountLimitUnpartitioned(boolean scaleWriters, boolean redistributeWrites, int expectedFilesCount) + public void testWriterTasksCountLimitUnpartitioned() { // Not applicable for fault-tolerant mode. } + @Test @Override public void testWriterTasksCountLimitPartitionedScaleWritersDisabled() { // Not applicable for fault-tolerant mode. } + @Test @Override public void testWriterTasksCountLimitPartitionedScaleWritersEnabled() { // Not applicable for fault-tolerant mode. } + @Test @Override public void testWritersAcrossMultipleWorkersWhenScaleWritersIsEnabled() { @@ -106,7 +116,7 @@ public void testMaxOutputPartitionCountCheck() assertQueryFails(session, "SELECT nationkey, count(*) FROM nation GROUP BY nationkey", "Max number of output partitions exceeded for exchange.*"); } - @AfterClass(alwaysRun = true) + @AfterAll public void destroy() throws Exception { diff --git a/testing/trino-faulttolerant-tests/src/test/java/io/trino/faulttolerant/iceberg/TestIcebergParquetFaultTolerantExecutionConnectorTest.java b/testing/trino-faulttolerant-tests/src/test/java/io/trino/faulttolerant/iceberg/TestIcebergParquetFaultTolerantExecutionConnectorTest.java index 6bb86779e0ae..32d6c5d0a2db 100644 --- a/testing/trino-faulttolerant-tests/src/test/java/io/trino/faulttolerant/iceberg/TestIcebergParquetFaultTolerantExecutionConnectorTest.java +++ b/testing/trino-faulttolerant-tests/src/test/java/io/trino/faulttolerant/iceberg/TestIcebergParquetFaultTolerantExecutionConnectorTest.java @@ -17,15 +17,19 @@ import io.trino.plugin.exchange.filesystem.containers.MinioStorage; import io.trino.plugin.iceberg.IcebergQueryRunner; import io.trino.plugin.iceberg.TestIcebergParquetConnectorTest; -import org.testng.SkipException; -import org.testng.annotations.AfterClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import static io.trino.plugin.exchange.filesystem.containers.MinioStorage.getExchangeManagerProperties; import static io.trino.plugin.iceberg.IcebergTestUtils.checkParquetFileSorting; import static io.trino.testing.FaultTolerantExecutionConnectorTestHelper.getExtraProperties; import static io.trino.testing.TestingNames.randomNameSuffix; import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.Assumptions.abort; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; +@TestInstance(PER_CLASS) public class TestIcebergParquetFaultTolerantExecutionConnectorTest extends TestIcebergParquetConnectorTest { @@ -45,27 +49,30 @@ protected IcebergQueryRunner.Builder createQueryRunnerBuilder() }); } + @Test @Override public void testSplitPruningForFilterOnPartitionColumn() { // TODO: figure out why assertThatThrownBy(super::testSplitPruningForFilterOnPartitionColumn) .hasMessageContaining("Couldn't find operator summary, probably due to query statistic collection error"); - throw new SkipException("fails currently on FTE"); + abort("fails currently on FTE"); } + @Test @Override public void testStatsBasedRepartitionDataOnCtas() { // TODO: figure out why - throw new SkipException("We always get 3 partitions with FTE"); + abort("We always get 3 partitions with FTE"); } + @Test @Override public void testStatsBasedRepartitionDataOnInsert() { // TODO: figure out why - throw new SkipException("We always get 3 partitions with FTE"); + abort("We always get 3 partitions with FTE"); } @Override @@ -74,7 +81,7 @@ protected boolean isFileSorted(String path, String sortColumnName) return checkParquetFileSorting(path, sortColumnName); } - @AfterClass(alwaysRun = true) + @AfterAll public void destroy() throws Exception { diff --git a/testing/trino-testing/src/main/java/io/trino/testing/BaseConnectorTest.java b/testing/trino-testing/src/main/java/io/trino/testing/BaseConnectorTest.java index b7695f780e1e..02ae0aaeeac4 100644 --- a/testing/trino-testing/src/main/java/io/trino/testing/BaseConnectorTest.java +++ b/testing/trino-testing/src/main/java/io/trino/testing/BaseConnectorTest.java @@ -47,11 +47,12 @@ import io.trino.testing.sql.TestView; import io.trino.tpch.TpchTable; import org.intellij.lang.annotations.Language; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.RepeatedTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.parallel.Isolated; -import org.testng.SkipException; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.DataProvider; -import org.testng.annotations.Test; import java.time.Instant; import java.time.ZonedDateTime; @@ -104,7 +105,6 @@ import static io.trino.sql.planner.assertions.PlanMatchPattern.tableScan; import static io.trino.sql.planner.optimizations.PlanNodeSearcher.searchFrom; import static io.trino.sql.planner.planprinter.PlanPrinter.textLogicalPlan; -import static io.trino.testing.DataProviders.toDataProvider; import static io.trino.testing.MaterializedResult.resultBuilder; import static io.trino.testing.QueryAssertions.assertContains; import static io.trino.testing.QueryAssertions.assertEqualsIgnoreOrder; @@ -176,6 +176,8 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.InstanceOfAssertFactories.ZONED_DATE_TIME; +import static org.junit.jupiter.api.Assumptions.abort; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNull; @@ -186,6 +188,7 @@ * Generic test for connectors. */ @Isolated +@TestInstance(PER_CLASS) public abstract class BaseConnectorTest extends AbstractTestQueries { @@ -198,7 +201,7 @@ public abstract class BaseConnectorTest private final ConcurrentMap>> mockTableListings = new ConcurrentHashMap<>(); - @BeforeClass + @BeforeAll public void initMockCatalog() { QueryRunner queryRunner = getQueryRunner(); @@ -743,23 +746,18 @@ protected void verifyVersionedQueryFailurePermissible(Exception e) /** * Test interactions between optimizer (including CBO), scheduling and connector metadata APIs. */ - @Test(dataProvider = "joinDistributionTypes") - public void testJoinWithEmptySides(JoinDistributionType joinDistributionType) - { - Session session = noJoinReordering(joinDistributionType); - // empty build side - assertQuery(session, "SELECT count(*) FROM nation JOIN region ON nation.regionkey = region.regionkey AND region.name = ''", "VALUES 0"); - assertQuery(session, "SELECT count(*) FROM nation JOIN region ON nation.regionkey = region.regionkey AND region.regionkey < 0", "VALUES 0"); - // empty probe side - assertQuery(session, "SELECT count(*) FROM region JOIN nation ON nation.regionkey = region.regionkey AND region.name = ''", "VALUES 0"); - assertQuery(session, "SELECT count(*) FROM nation JOIN region ON nation.regionkey = region.regionkey AND region.regionkey < 0", "VALUES 0"); - } - - @DataProvider - public Object[][] joinDistributionTypes() + @Test + public void testJoinWithEmptySides() { - return Stream.of(JoinDistributionType.values()) - .collect(toDataProvider()); + for (JoinDistributionType joinDistributionType : JoinDistributionType.values()) { + Session session = noJoinReordering(joinDistributionType); + // empty build side + assertQuery(session, "SELECT count(*) FROM nation JOIN region ON nation.regionkey = region.regionkey AND region.name = ''", "VALUES 0"); + assertQuery(session, "SELECT count(*) FROM nation JOIN region ON nation.regionkey = region.regionkey AND region.regionkey < 0", "VALUES 0"); + // empty probe side + assertQuery(session, "SELECT count(*) FROM region JOIN nation ON nation.regionkey = region.regionkey AND region.name = ''", "VALUES 0"); + assertQuery(session, "SELECT count(*) FROM nation JOIN region ON nation.regionkey = region.regionkey AND region.regionkey < 0", "VALUES 0"); + } } /** @@ -1562,8 +1560,14 @@ public void testFederatedMaterializedViewWithGracePeriod() }); } - @Test(dataProviderClass = DataProviders.class, dataProvider = "trueFalse") - public void testMaterializedViewBaseTableGone(boolean initialized) + @Test + public void testMaterializedViewBaseTableGone() + { + testMaterializedViewBaseTableGone(true); + testMaterializedViewBaseTableGone(false); + } + + private void testMaterializedViewBaseTableGone(boolean initialized) { skipTestUnless(hasBehavior(SUPPORTS_CREATE_MATERIALIZED_VIEW)); @@ -1689,8 +1693,15 @@ public void testCompatibleTypeChangeForView2() assertUpdate("DROP TABLE " + tableName); } - @Test(dataProvider = "testViewMetadataDataProvider") - public void testViewMetadata(String securityClauseInCreate, String securityClauseInShowCreate) + @Test + public void testViewMetadata() + { + testViewMetadata("", "DEFINER"); + testViewMetadata(" SECURITY DEFINER", "DEFINER"); + testViewMetadata(" SECURITY INVOKER", "INVOKER"); + } + + private void testViewMetadata(String securityClauseInCreate, String securityClauseInShowCreate) { skipTestUnless(hasBehavior(SUPPORTS_CREATE_VIEW)); @@ -1763,16 +1774,6 @@ public void testViewMetadata(String securityClauseInCreate, String securityClaus assertUpdate("DROP VIEW " + viewName); } - @DataProvider - public static Object[][] testViewMetadataDataProvider() - { - return new Object[][] { - {"", "DEFINER"}, - {" SECURITY DEFINER", "DEFINER"}, - {" SECURITY INVOKER", "INVOKER"}, - }; - } - @Test public void testShowCreateView() { @@ -1973,12 +1974,13 @@ public void testViewAndMaterializedViewTogether() * Test that reading table, column metadata, like {@code SHOW TABLES} or reading from {@code information_schema.views} * does not fail when relations are concurrently created or dropped. */ - @Test(timeOut = 180_000) + @Test + @Timeout(180) public void testReadMetadataWithRelationsConcurrentModifications() throws Exception { if (!hasBehavior(SUPPORTS_CREATE_TABLE) && !hasBehavior(SUPPORTS_CREATE_VIEW) && !hasBehavior(SUPPORTS_CREATE_MATERIALIZED_VIEW)) { - throw new SkipException("Cannot test"); + abort("Cannot test"); } int readIterations = 5; @@ -2398,7 +2400,7 @@ public void testRenameSchema() } if (!hasBehavior(SUPPORTS_CREATE_SCHEMA)) { - throw new SkipException("Skipping as connector does not support CREATE SCHEMA"); + abort("Skipping as connector does not support CREATE SCHEMA"); } String schemaName = "test_rename_schema_" + randomNameSuffix(); @@ -2934,42 +2936,44 @@ public void testSetColumnType() } } - @Test(dataProvider = "setColumnTypesDataProvider") - public void testSetColumnTypes(SetColumnTypeSetup setup) + @Test + public void testSetColumnTypes() { skipTestUnless(hasBehavior(SUPPORTS_SET_COLUMN_TYPE) && hasBehavior(SUPPORTS_CREATE_TABLE_WITH_DATA)); - TestTable table; - try { - table = new TestTable(getQueryRunner()::execute, "test_set_column_type_", " AS SELECT CAST(" + setup.sourceValueLiteral + " AS " + setup.sourceColumnType + ") AS col"); - } - catch (Exception e) { - verifyUnsupportedTypeException(e, setup.sourceColumnType); - throw new SkipException("Unsupported column type: " + setup.sourceColumnType); - } - try (table) { - Runnable setColumnType = () -> assertUpdate("ALTER TABLE " + table.getName() + " ALTER COLUMN col SET DATA TYPE " + setup.newColumnType); - if (setup.unsupportedType) { - assertThatThrownBy(setColumnType::run) - .satisfies(this::verifySetColumnTypeFailurePermissible); + for (SetColumnTypeSetup setup : setColumnTypesDataProvider()) { + TestTable table; + try { + table = new TestTable(getQueryRunner()::execute, "test_set_column_type_", " AS SELECT CAST(" + setup.sourceValueLiteral + " AS " + setup.sourceColumnType + ") AS col"); + } + catch (Exception e) { + verifyUnsupportedTypeException(e, setup.sourceColumnType); + abort("Unsupported column type: " + setup.sourceColumnType); return; } - setColumnType.run(); + try (table) { + Runnable setColumnType = () -> assertUpdate("ALTER TABLE " + table.getName() + " ALTER COLUMN col SET DATA TYPE " + setup.newColumnType); + if (setup.unsupportedType) { + assertThatThrownBy(setColumnType::run) + .satisfies(this::verifySetColumnTypeFailurePermissible); + return; + } + setColumnType.run(); - assertEquals(getColumnType(table.getName(), "col"), setup.newColumnType); - assertThat(query("SELECT * FROM " + table.getName())) - .skippingTypesCheck() - .matches("SELECT " + setup.newValueLiteral); + assertEquals(getColumnType(table.getName(), "col"), setup.newColumnType); + assertThat(query("SELECT * FROM " + table.getName())) + .skippingTypesCheck() + .matches("SELECT " + setup.newValueLiteral); + } } } - @DataProvider - public Object[][] setColumnTypesDataProvider() + private List setColumnTypesDataProvider() { return setColumnTypeSetupData().stream() .map(this::filterSetColumnTypesDataProvider) .flatMap(Optional::stream) - .collect(toDataProvider()); + .collect(toList()); } protected Optional filterSetColumnTypesDataProvider(SetColumnTypeSetup setup) @@ -3139,45 +3143,47 @@ public void testSetFieldType() } } - @Test(dataProvider = "setFieldTypesDataProvider") - public void testSetFieldTypes(SetColumnTypeSetup setup) + @Test + public void testSetFieldTypes() { skipTestUnless(hasBehavior(SUPPORTS_SET_FIELD_TYPE) && hasBehavior(SUPPORTS_CREATE_TABLE_WITH_DATA)); - TestTable table; - try { - table = new TestTable( - getQueryRunner()::execute, - "test_set_field_type_", - " AS SELECT CAST(row(" + setup.sourceValueLiteral + ") AS row(field " + setup.sourceColumnType + ")) AS col"); - } - catch (Exception e) { - verifyUnsupportedTypeException(e, setup.sourceColumnType); - throw new SkipException("Unsupported column type: " + setup.sourceColumnType); - } - try (table) { - Runnable setFieldType = () -> assertUpdate("ALTER TABLE " + table.getName() + " ALTER COLUMN col.field SET DATA TYPE " + setup.newColumnType); - if (setup.unsupportedType) { - assertThatThrownBy(setFieldType::run) - .satisfies(this::verifySetFieldTypeFailurePermissible); + for (SetColumnTypeSetup setup : setFieldTypesDataProvider()) { + TestTable table; + try { + table = new TestTable( + getQueryRunner()::execute, + "test_set_field_type_", + " AS SELECT CAST(row(" + setup.sourceValueLiteral + ") AS row(field " + setup.sourceColumnType + ")) AS col"); + } + catch (Exception e) { + verifyUnsupportedTypeException(e, setup.sourceColumnType); + abort("Unsupported column type: " + setup.sourceColumnType); return; } - setFieldType.run(); + try (table) { + Runnable setFieldType = () -> assertUpdate("ALTER TABLE " + table.getName() + " ALTER COLUMN col.field SET DATA TYPE " + setup.newColumnType); + if (setup.unsupportedType) { + assertThatThrownBy(setFieldType::run) + .satisfies(this::verifySetFieldTypeFailurePermissible); + return; + } + setFieldType.run(); - assertEquals(getColumnType(table.getName(), "col"), "row(field " + setup.newColumnType + ")"); - assertThat(query("SELECT * FROM " + table.getName())) - .skippingTypesCheck() - .matches("SELECT row(" + setup.newValueLiteral + ")"); + assertEquals(getColumnType(table.getName(), "col"), "row(field " + setup.newColumnType + ")"); + assertThat(query("SELECT * FROM " + table.getName())) + .skippingTypesCheck() + .matches("SELECT row(" + setup.newValueLiteral + ")"); + } } } - @DataProvider - public Object[][] setFieldTypesDataProvider() + public List setFieldTypesDataProvider() { return setColumnTypeSetupData().stream() .map(this::filterSetFieldTypesDataProvider) .flatMap(Optional::stream) - .collect(toDataProvider()); + .collect(toList()); } protected Optional filterSetFieldTypesDataProvider(SetColumnTypeSetup setup) @@ -3968,7 +3974,7 @@ public void testRenameTableAcrossSchema() { if (!hasBehavior(SUPPORTS_RENAME_TABLE_ACROSS_SCHEMAS)) { if (!hasBehavior(SUPPORTS_RENAME_TABLE)) { - throw new SkipException("Skipping since rename table is not supported at all"); + abort("Skipping since rename table is not supported at all"); } assertQueryFails("ALTER TABLE nation RENAME TO other_schema.yyyy", "This connector does not support renaming tables across schemas"); return; @@ -4099,7 +4105,7 @@ public void testCommentView() } return; } - throw new SkipException("Skipping as connector does not support CREATE VIEW"); + abort("Skipping as connector does not support CREATE VIEW"); } String catalogName = getSession().getCatalog().orElseThrow(); @@ -4162,15 +4168,14 @@ public void testCommentColumn() } } - @Test(dataProvider = "testColumnNameDataProvider") - public void testCommentColumnName(String columnName) + @Test + public void testCommentColumnName() { skipTestUnless(hasBehavior(SUPPORTS_COMMENT_ON_COLUMN)); - if (!requiresDelimiting(columnName)) { - testCommentColumnName(columnName, false); + for (String columnName : testColumnNameDataProvider()) { + testCommentColumnName(columnName, requiresDelimiting(columnName)); } - testCommentColumnName(columnName, true); } protected void testCommentColumnName(String columnName, boolean delimited) @@ -4201,7 +4206,7 @@ public void testCommentViewColumn() } return; } - throw new SkipException("Skipping as connector does not support CREATE VIEW"); + abort("Skipping as connector does not support CREATE VIEW"); } String viewColumnName = "regionkey"; @@ -4362,7 +4367,7 @@ public void testInsertArray() assertThatThrownBy(() -> query("CREATE TABLE " + tableName + " (a array(bigint))")) // TODO Unify failure message across connectors .hasMessageMatching("[Uu]nsupported (column )?type: \\Qarray(bigint)"); - throw new SkipException("not supported"); + abort("not supported"); } try (TestTable table = new TestTable(getQueryRunner()::execute, "test_insert_array_", "(a ARRAY, b ARRAY)")) { @@ -4831,7 +4836,8 @@ public void testRowLevelUpdate() } // Repeat test with invocationCount for better test coverage, since the tested aspect is inherently non-deterministic. - @Test(timeOut = 60_000, invocationCount = 4) + @RepeatedTest(4) + @Timeout(60) public void testUpdateRowConcurrently() throws Exception { @@ -4897,7 +4903,8 @@ protected void verifyConcurrentUpdateFailurePermissible(Exception e) } // Repeat test with invocationCount for better test coverage, since the tested aspect is inherently non-deterministic. - @Test(timeOut = 60_000, invocationCount = 4) + @RepeatedTest(4) + @Timeout(60) public void testInsertRowConcurrently() throws Exception { @@ -4965,7 +4972,8 @@ protected void verifyConcurrentInsertFailurePermissible(Exception e) } // Repeat test with invocationCount for better test coverage, since the tested aspect is inherently non-deterministic. - @Test(timeOut = 60_000, invocationCount = 4) + @RepeatedTest(4) + @Timeout(60) public void testAddColumnConcurrently() throws Exception { @@ -5030,7 +5038,8 @@ protected void verifyConcurrentAddColumnFailurePermissible(Exception e) } // Repeat test with invocationCount for better test coverage, since the tested aspect is inherently non-deterministic. - @Test(timeOut = 60_000, invocationCount = 4) + @RepeatedTest(4) + @Timeout(60) public void testCreateOrReplaceTableConcurrently() throws Exception { @@ -5370,15 +5379,14 @@ public void testNoDataSystemTable() assertQueryFails("TABLE \"nation$data\"", "line 1:1: Table '\\w+.\\w+.\"nation\\$data\"' does not exist"); } - @Test(dataProvider = "testColumnNameDataProvider") - public void testColumnName(String columnName) + @Test + public void testColumnName() { skipTestUnless(hasBehavior(SUPPORTS_CREATE_TABLE)); - if (!requiresDelimiting(columnName)) { - testColumnName(columnName, false); + for (String columnName : testColumnNameDataProvider()) { + testColumnName(columnName, requiresDelimiting(columnName)); } - testColumnName(columnName, true); } protected void testColumnName(String columnName, boolean delimited) @@ -5415,15 +5423,14 @@ protected void testColumnName(String columnName, boolean delimited) } } - @Test(dataProvider = "testColumnNameDataProvider") - public void testAddAndDropColumnName(String columnName) + @Test + public void testAddAndDropColumnName() { skipTestUnless(hasBehavior(SUPPORTS_ADD_COLUMN) && hasBehavior(SUPPORTS_DROP_COLUMN)); - if (!requiresDelimiting(columnName)) { - testAddAndDropColumnName(columnName, false); + for (String columnName : testColumnNameDataProvider()) { + testAddAndDropColumnName(columnName, requiresDelimiting(columnName)); } - testAddAndDropColumnName(columnName, true); } protected void testAddAndDropColumnName(String columnName, boolean delimited) @@ -5460,15 +5467,14 @@ protected String createTableSqlForAddingAndDroppingColumn(String tableName, Stri return "CREATE TABLE " + tableName + "(" + columnNameInSql + " varchar(50), value varchar(50))"; } - @Test(dataProvider = "testColumnNameDataProvider") - public void testRenameColumnName(String columnName) + @Test + public void testRenameColumnName() { skipTestUnless(hasBehavior(SUPPORTS_RENAME_COLUMN)); - if (!requiresDelimiting(columnName)) { - testRenameColumnName(columnName, false); + for (String columnName : testColumnNameDataProvider()) { + testRenameColumnName(columnName, requiresDelimiting(columnName)); } - testRenameColumnName(columnName, true); } protected void testRenameColumnName(String columnName, boolean delimited) @@ -5515,14 +5521,13 @@ protected static boolean requiresDelimiting(String identifierName) return !identifierName.matches("[a-zA-Z][a-zA-Z0-9_]*"); } - @DataProvider - public Object[][] testColumnNameDataProvider() + public List testColumnNameDataProvider() { return testColumnNameTestData().stream() .map(this::filterColumnNameTestData) .filter(Optional::isPresent) .map(Optional::get) - .collect(toDataProvider()); + .collect(toList()); } private List testColumnNameTestData() @@ -5561,8 +5566,21 @@ protected String dataMappingTableName(String trinoTypeName) return "test_data_mapping_smoke_" + trinoTypeName.replaceAll("[^a-zA-Z0-9]", "_") + randomNameSuffix(); } - @Test(dataProvider = "testCommentDataProvider") - public void testCreateTableWithTableCommentSpecialCharacter(String comment) + @Test + public void testCreateTableWithTableCommentSpecialCharacter() + { + testCreateTableWithTableCommentSpecialCharacter("a;semicolon"); + testCreateTableWithTableCommentSpecialCharacter("an@at"); + testCreateTableWithTableCommentSpecialCharacter("a\"quote"); + testCreateTableWithTableCommentSpecialCharacter("an'apostrophe"); + testCreateTableWithTableCommentSpecialCharacter("a`backtick`"); + testCreateTableWithTableCommentSpecialCharacter("a/slash"); + testCreateTableWithTableCommentSpecialCharacter("a\\backslash"); + testCreateTableWithTableCommentSpecialCharacter("a?question"); + testCreateTableWithTableCommentSpecialCharacter("[square bracket]"); + } + + protected void testCreateTableWithTableCommentSpecialCharacter(String comment) { skipTestUnless(hasBehavior(SUPPORTS_CREATE_TABLE_WITH_TABLE_COMMENT)); @@ -5571,8 +5589,21 @@ public void testCreateTableWithTableCommentSpecialCharacter(String comment) } } - @Test(dataProvider = "testCommentDataProvider") - public void testCreateTableAsSelectWithTableCommentSpecialCharacter(String comment) + @Test + public void testCreateTableAsSelectWithTableCommentSpecialCharacter() + { + testCreateTableAsSelectWithTableCommentSpecialCharacter("a;semicolon"); + testCreateTableAsSelectWithTableCommentSpecialCharacter("an@at"); + testCreateTableAsSelectWithTableCommentSpecialCharacter("a\"quote"); + testCreateTableAsSelectWithTableCommentSpecialCharacter("an'apostrophe"); + testCreateTableAsSelectWithTableCommentSpecialCharacter("a`backtick`"); + testCreateTableAsSelectWithTableCommentSpecialCharacter("a/slash"); + testCreateTableAsSelectWithTableCommentSpecialCharacter("a\\backslash"); + testCreateTableAsSelectWithTableCommentSpecialCharacter("a?question"); + testCreateTableAsSelectWithTableCommentSpecialCharacter("[square bracket]"); + } + + private void testCreateTableAsSelectWithTableCommentSpecialCharacter(String comment) { skipTestUnless(hasBehavior(SUPPORTS_CREATE_TABLE_WITH_DATA) && hasBehavior(SUPPORTS_CREATE_TABLE_WITH_TABLE_COMMENT)); @@ -5581,8 +5612,21 @@ public void testCreateTableAsSelectWithTableCommentSpecialCharacter(String comme } } - @Test(dataProvider = "testCommentDataProvider") - public void testCreateTableWithColumnCommentSpecialCharacter(String comment) + @Test + public void testCreateTableWithColumnCommentSpecialCharacter() + { + testCreateTableWithColumnCommentSpecialCharacter("a;semicolon"); + testCreateTableWithColumnCommentSpecialCharacter("an@at"); + testCreateTableWithColumnCommentSpecialCharacter("a\"quote"); + testCreateTableWithColumnCommentSpecialCharacter("an'apostrophe"); + testCreateTableWithColumnCommentSpecialCharacter("a`backtick`"); + testCreateTableWithColumnCommentSpecialCharacter("a/slash"); + testCreateTableWithColumnCommentSpecialCharacter("a\\backslash"); + testCreateTableWithColumnCommentSpecialCharacter("a?question"); + testCreateTableWithColumnCommentSpecialCharacter("[square bracket]"); + } + + private void testCreateTableWithColumnCommentSpecialCharacter(String comment) { skipTestUnless(hasBehavior(SUPPORTS_CREATE_TABLE_WITH_COLUMN_COMMENT)); @@ -5591,8 +5635,21 @@ public void testCreateTableWithColumnCommentSpecialCharacter(String comment) } } - @Test(dataProvider = "testCommentDataProvider") - public void testAddColumnWithCommentSpecialCharacter(String comment) + @Test + public void testAddColumnWithCommentSpecialCharacter() + { + testAddColumnWithCommentSpecialCharacter("a;semicolon"); + testAddColumnWithCommentSpecialCharacter("an@at"); + testAddColumnWithCommentSpecialCharacter("a\"quote"); + testAddColumnWithCommentSpecialCharacter("an'apostrophe"); + testAddColumnWithCommentSpecialCharacter("a`backtick`"); + testAddColumnWithCommentSpecialCharacter("a/slash"); + testAddColumnWithCommentSpecialCharacter("a\\backslash"); + testAddColumnWithCommentSpecialCharacter("a?question"); + testAddColumnWithCommentSpecialCharacter("[square bracket]"); + } + + protected void testAddColumnWithCommentSpecialCharacter(String comment) { skipTestUnless(hasBehavior(SUPPORTS_ADD_COLUMN_WITH_COMMENT)); @@ -5602,8 +5659,21 @@ public void testAddColumnWithCommentSpecialCharacter(String comment) } } - @Test(dataProvider = "testCommentDataProvider") - public void testCommentTableSpecialCharacter(String comment) + @Test + public void testCommentTableSpecialCharacter() + { + testCommentTableSpecialCharacter("a;semicolon"); + testCommentTableSpecialCharacter("an@at"); + testCommentTableSpecialCharacter("a\"quote"); + testCommentTableSpecialCharacter("an'apostrophe"); + testCommentTableSpecialCharacter("a`backtick`"); + testCommentTableSpecialCharacter("a/slash"); + testCommentTableSpecialCharacter("a\\backslash"); + testCommentTableSpecialCharacter("a?question"); + testCommentTableSpecialCharacter("[square bracket]"); + } + + private void testCommentTableSpecialCharacter(String comment) { skipTestUnless(hasBehavior(SUPPORTS_COMMENT_ON_TABLE)); @@ -5613,8 +5683,21 @@ public void testCommentTableSpecialCharacter(String comment) } } - @Test(dataProvider = "testCommentDataProvider") - public void testCommentColumnSpecialCharacter(String comment) + @Test + public void testCommentColumnSpecialCharacter() + { + testCommentColumnSpecialCharacter("a;semicolon"); + testCommentColumnSpecialCharacter("an@at"); + testCommentColumnSpecialCharacter("a\"quote"); + testCommentColumnSpecialCharacter("an'apostrophe"); + testCommentColumnSpecialCharacter("a`backtick`"); + testCommentColumnSpecialCharacter("a/slash"); + testCommentColumnSpecialCharacter("a\\backslash"); + testCommentColumnSpecialCharacter("a?question"); + testCommentColumnSpecialCharacter("[square bracket]"); + } + + private void testCommentColumnSpecialCharacter(String comment) { skipTestUnless(hasBehavior(SUPPORTS_COMMENT_ON_COLUMN)); @@ -5624,38 +5707,24 @@ public void testCommentColumnSpecialCharacter(String comment) } } - @DataProvider - public Object[][] testCommentDataProvider() - { - return new Object[][] { - {"a;semicolon"}, - {"an@at"}, - {"a\"quote"}, - {"an'apostrophe"}, - {"a`backtick`"}, - {"a/slash"}, - {"a\\backslash"}, - {"a?question"}, - {"[square bracket]"}, - }; - } - protected static String varcharLiteral(String value) { requireNonNull(value, "value is null"); return "'" + value.replace("'", "''") + "'"; } - @Test(dataProvider = "testDataMappingSmokeTestDataProvider") - public void testDataMappingSmokeTest(DataMappingTestSetup dataMappingTestSetup) + @Test + public void testDataMappingSmokeTest() { - testDataMapping(dataMappingTestSetup); + skipTestUnless(hasBehavior(SUPPORTS_CREATE_TABLE)); + + for (DataMappingTestSetup dataMappingTestSetup : testDataMappingSmokeTestDataProvider()) { + testDataMapping(dataMappingTestSetup); + } } private void testDataMapping(DataMappingTestSetup dataMappingTestSetup) { - skipTestUnless(hasBehavior(SUPPORTS_CREATE_TABLE)); - String trinoTypeName = dataMappingTestSetup.getTrinoTypeName(); String sampleValueLiteral = dataMappingTestSetup.getSampleValueLiteral(); String highValueLiteral = dataMappingTestSetup.getHighValueLiteral(); @@ -5707,13 +5776,12 @@ private void testDataMapping(DataMappingTestSetup dataMappingTestSetup) assertUpdate("DROP TABLE " + tableName); } - @DataProvider - public final Object[][] testDataMappingSmokeTestDataProvider() + public final List testDataMappingSmokeTestDataProvider() { return testDataMappingSmokeTestData().stream() .map(this::filterDataMappingSmokeTestData) .flatMap(Optional::stream) - .collect(toDataProvider()); + .collect(toList()); } private List testDataMappingSmokeTestData() @@ -5756,19 +5824,22 @@ protected Optional filterDataMappingSmokeTestData(DataMapp return Optional.of(dataMappingTestSetup); } - @Test(dataProvider = "testCaseSensitiveDataMappingProvider") - public void testCaseSensitiveDataMapping(DataMappingTestSetup dataMappingTestSetup) + @Test + public void testCaseSensitiveDataMapping() { - testDataMapping(dataMappingTestSetup); + skipTestUnless(hasBehavior(SUPPORTS_CREATE_TABLE)); + + for (DataMappingTestSetup dataMappingTestSetup : testCaseSensitiveDataMappingProvider()) { + testDataMapping(dataMappingTestSetup); + } } - @DataProvider - public final Object[][] testCaseSensitiveDataMappingProvider() + private List testCaseSensitiveDataMappingProvider() { return testCaseSensitiveDataMappingData().stream() .map(this::filterCaseSensitiveDataMappingTestData) .flatMap(Optional::stream) - .collect(toDataProvider()); + .collect(toList()); } protected Optional filterCaseSensitiveDataMappingTestData(DataMappingTestSetup dataMappingTestSetup) @@ -6444,15 +6515,14 @@ private void verifyUnsupportedTypeException(Throwable exception, String trinoTyp .satisfies(e -> assertThat(getTrinoExceptionCause(e)).hasMessageFindingMatch(expectedMessagePart)); } - @Test(dataProvider = "testColumnNameDataProvider") - public void testMaterializedViewColumnName(String columnName) + @Test + public void testMaterializedViewColumnName() { skipTestUnless(hasBehavior(SUPPORTS_CREATE_MATERIALIZED_VIEW)); - if (!requiresDelimiting(columnName)) { - testMaterializedViewColumnName(columnName, false); + for (String columnName : testColumnNameDataProvider()) { + testMaterializedViewColumnName(columnName, requiresDelimiting(columnName)); } - testMaterializedViewColumnName(columnName, true); } private void testMaterializedViewColumnName(String columnName, boolean delimited) @@ -6816,7 +6886,7 @@ public void testProjectionPushdownPhysicalInputSize() protected static void skipTestUnless(boolean requirement) { if (!requirement) { - throw new SkipException("requirement not met"); + abort("requirement not met"); } } diff --git a/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerBasic.java b/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerBasic.java index e1a083b7c8ec..200f0ddf307f 100644 --- a/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerBasic.java +++ b/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerBasic.java @@ -50,7 +50,6 @@ import io.trino.testing.DistributedQueryRunner; import io.trino.testing.QueryRunner; import org.intellij.lang.annotations.Language; -import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.io.File; @@ -1190,8 +1189,19 @@ public void testConnectorMetrics() assertThat(connectorMetrics).containsExactly(TEST_METRICS); } - @Test(dataProvider = "setOperator") - public void testOutputColumnsForSetOperations(String setOperator) + @Test + public void testOutputColumnsForSetOperations() + throws Exception + { + testOutputColumnsForSetOperations("UNION"); + testOutputColumnsForSetOperations("UNION ALL"); + testOutputColumnsForSetOperations("INTERSECT"); + testOutputColumnsForSetOperations("INTERSECT ALL"); + testOutputColumnsForSetOperations("EXCEPT"); + testOutputColumnsForSetOperations("EXCEPT ALL"); + } + + private void testOutputColumnsForSetOperations(String setOperator) throws Exception { assertLineage( @@ -1211,18 +1221,6 @@ public void testOutputColumnsForSetOperations(String setOperator) new ColumnDetail("tpch", "sf1", "orders", "custkey")))); } - @DataProvider - public Object[][] setOperator() - { - return new Object[][]{ - {"UNION"}, - {"UNION ALL"}, - {"INTERSECT"}, - {"INTERSECT ALL"}, - {"EXCEPT"}, - {"EXCEPT ALL"}}; - } - @Test public void testAnonymizedJsonPlan() throws Exception diff --git a/testing/trino-tests/src/test/java/io/trino/execution/TestRevokeOnTable.java b/testing/trino-tests/src/test/java/io/trino/execution/TestRevokeOnTable.java index 404c0867aa6c..50b1324004f8 100644 --- a/testing/trino-tests/src/test/java/io/trino/execution/TestRevokeOnTable.java +++ b/testing/trino-tests/src/test/java/io/trino/execution/TestRevokeOnTable.java @@ -29,7 +29,6 @@ import io.trino.testing.DistributedQueryRunner; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; -import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.util.EnumSet; @@ -87,8 +86,18 @@ public void teardown() queryRunner = null; // closed by assertions.close } - @Test(dataProvider = "privilegesAndUsers") - public void testRevokeOnSchema(String privilege, Session user) + @Test + public void testRevokeOnSchema() + { + testRevokeOnSchema("CREATE", userWithCreate); + testRevokeOnSchema("SELECT", userWithSelect); + testRevokeOnSchema("INSERT", userWithInsert); + testRevokeOnSchema("UPDATE", userWithUpdate); + testRevokeOnSchema("DELETE", userWithDelete); + testRevokeOnSchema("ALL PRIVILEGES", userWithAllPrivileges); + } + + private void testRevokeOnSchema(String privilege, Session user) { assertThat(assertions.query(user, "SHOW TABLES FROM default")).matches("VALUES (VARCHAR 'table_one')"); @@ -97,60 +106,77 @@ public void testRevokeOnSchema(String privilege, Session user) assertThat(assertions.query(user, "SHOW TABLES FROM default")).returnsEmptyResult(); } - @Test(dataProvider = "privilegesAndUsers") - public void testRevokeOnNonExistingCatalog(String privilege, Session user) + @Test + public void testRevokeOnNonExistingCatalog() + { + testRevokeOnNonExistingCatalog("CREATE", userWithCreate); + testRevokeOnNonExistingCatalog("SELECT", userWithSelect); + testRevokeOnNonExistingCatalog("INSERT", userWithInsert); + testRevokeOnNonExistingCatalog("UPDATE", userWithUpdate); + testRevokeOnNonExistingCatalog("DELETE", userWithDelete); + testRevokeOnNonExistingCatalog("ALL PRIVILEGES", userWithAllPrivileges); + } + + private void testRevokeOnNonExistingCatalog(String privilege, Session user) { assertThatThrownBy(() -> queryRunner.execute(admin, format("REVOKE %s ON TABLE missing_catalog.missing_schema.missing_table FROM %s", privilege, user.getUser()))) .hasMessageContaining("Table 'missing_catalog.missing_schema.missing_table' does not exist"); } - @Test(dataProvider = "privilegesAndUsers") - public void testRevokeOnNonExistingSchema(String privilege, Session user) + @Test + public void testRevokeOnNonExistingSchema() { - assertThatThrownBy(() -> queryRunner.execute(admin, format("REVOKE %s ON TABLE missing_schema.missing_table FROM %s", privilege, user.getUser()))) - .hasMessageContaining("Table 'local.missing_schema.missing_table' does not exist"); + testRevokeOnNonExistingSchema("CREATE", userWithCreate); + testRevokeOnNonExistingSchema("SELECT", userWithSelect); + testRevokeOnNonExistingSchema("INSERT", userWithInsert); + testRevokeOnNonExistingSchema("UPDATE", userWithUpdate); + testRevokeOnNonExistingSchema("DELETE", userWithDelete); + testRevokeOnNonExistingSchema("ALL PRIVILEGES", userWithAllPrivileges); } - @Test(dataProvider = "privilegesAndUsers") - public void testRevokeOnNonExistingTable(String privilege, Session user) + private void testRevokeOnNonExistingSchema(String privilege, Session user) { - assertThatThrownBy(() -> queryRunner.execute(admin, format("REVOKE %s ON TABLE default.missing_table FROM %s", privilege, user.getUser()))) - .hasMessageContaining("Table 'local.default.missing_table' does not exist"); + assertThatThrownBy(() -> queryRunner.execute(admin, format("REVOKE %s ON TABLE missing_schema.missing_table FROM %s", privilege, user.getUser()))) + .hasMessageContaining("Table 'local.missing_schema.missing_table' does not exist"); } - @Test(dataProvider = "privileges") - public void testAccessDenied(String privilege) + @Test + public void testRevokeOnNonExistingTable() { - assertThatThrownBy(() -> queryRunner.execute(sessionOf(randomUsername()), format("REVOKE %s ON TABLE table_one FROM %s", privilege, randomUsername()))) - .hasMessageContaining( - "Access Denied: Cannot revoke privilege %s on table default.table_one", - privilege.equals("ALL PRIVILEGES") ? "CREATE" : privilege); + testRevokeOnNonExistingTable("CREATE", userWithCreate); + testRevokeOnNonExistingTable("SELECT", userWithSelect); + testRevokeOnNonExistingTable("INSERT", userWithInsert); + testRevokeOnNonExistingTable("UPDATE", userWithUpdate); + testRevokeOnNonExistingTable("DELETE", userWithDelete); + testRevokeOnNonExistingTable("ALL PRIVILEGES", userWithAllPrivileges); } - @DataProvider(name = "privilegesAndUsers") - public static Object[][] privilegesAndUsers() + private void testRevokeOnNonExistingTable(String privilege, Session user) { - return new Object[][] { - {"CREATE", userWithCreate}, - {"SELECT", userWithSelect}, - {"INSERT", userWithInsert}, - {"UPDATE", userWithUpdate}, - {"DELETE", userWithDelete}, - {"ALL PRIVILEGES", userWithAllPrivileges} - }; + assertThatThrownBy(() -> queryRunner.execute(admin, format("REVOKE %s ON TABLE default.missing_table FROM %s", privilege, user.getUser()))) + .hasMessageContaining("Table 'local.default.missing_table' does not exist"); } - @DataProvider(name = "privileges") - public static Object[][] privileges() + @Test + public void testAccessDenied() { - return new Object[][] { - {"CREATE"}, - {"SELECT"}, - {"INSERT"}, - {"UPDATE"}, - {"DELETE"}, - {"ALL PRIVILEGES"} - }; + assertThatThrownBy(() -> queryRunner.execute(sessionOf(randomUsername()), format("REVOKE CREATE ON TABLE table_one FROM %s", randomUsername()))) + .hasMessageContaining("Access Denied: Cannot revoke privilege CREATE on table default.table_one"); + + assertThatThrownBy(() -> queryRunner.execute(sessionOf(randomUsername()), format("REVOKE SELECT ON TABLE table_one FROM %s", randomUsername()))) + .hasMessageContaining("Access Denied: Cannot revoke privilege SELECT on table default.table_one"); + + assertThatThrownBy(() -> queryRunner.execute(sessionOf(randomUsername()), format("REVOKE INSERT ON TABLE table_one FROM %s", randomUsername()))) + .hasMessageContaining("Access Denied: Cannot revoke privilege INSERT on table default.table_one"); + + assertThatThrownBy(() -> queryRunner.execute(sessionOf(randomUsername()), format("REVOKE UPDATE ON TABLE table_one FROM %s", randomUsername()))) + .hasMessageContaining("Access Denied: Cannot revoke privilege UPDATE on table default.table_one"); + + assertThatThrownBy(() -> queryRunner.execute(sessionOf(randomUsername()), format("REVOKE DELETE ON TABLE table_one FROM %s", randomUsername()))) + .hasMessageContaining("Access Denied: Cannot revoke privilege DELETE on table default.table_one"); + + assertThatThrownBy(() -> queryRunner.execute(sessionOf(randomUsername()), format("REVOKE ALL PRIVILEGES ON TABLE table_one FROM %s", randomUsername()))) + .hasMessageContaining("Access Denied: Cannot revoke privilege CREATE on table default.table_one"); } private static Session sessionOf(String username) diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/BaseCostBasedPlanTest.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/BaseCostBasedPlanTest.java index 4a85713efb7d..cbbb425cfc3e 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/BaseCostBasedPlanTest.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/BaseCostBasedPlanTest.java @@ -34,7 +34,6 @@ import io.trino.sql.planner.plan.ValuesNode; import io.trino.testing.LocalQueryRunner; import org.testng.annotations.BeforeClass; -import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.io.IOException; @@ -44,7 +43,6 @@ import java.util.List; import java.util.Optional; import java.util.stream.IntStream; -import java.util.stream.Stream; import static com.google.common.base.Preconditions.checkState; import static com.google.common.base.Verify.verify; @@ -61,7 +59,6 @@ import static io.trino.sql.planner.LogicalPlanner.Stage.OPTIMIZED_AND_VALIDATED; import static io.trino.sql.planner.plan.JoinNode.DistributionType.REPLICATED; import static io.trino.sql.planner.plan.JoinNode.Type.INNER; -import static io.trino.testing.DataProviders.toDataProvider; import static io.trino.testing.TestingSession.testSessionBuilder; import static java.lang.String.format; import static java.nio.charset.StandardCharsets.UTF_8; @@ -133,19 +130,14 @@ protected LocalQueryRunner createLocalQueryRunner() public abstract void prepareTables() throws Exception; - protected abstract Stream getQueryResourcePaths(); + protected abstract List getQueryResourcePaths(); - @DataProvider - public Object[][] getQueriesDataProvider() + @Test + public void test() { - return getQueryResourcePaths() - .collect(toDataProvider()); - } - - @Test(dataProvider = "getQueriesDataProvider") - public void test(String queryResourcePath) - { - assertEquals(generateQueryPlan(readQuery(queryResourcePath)), read(getQueryPlanResourcePath(queryResourcePath))); + for (String queryResourcePath : getQueryResourcePaths()) { + assertEquals(generateQueryPlan(readQuery(queryResourcePath)), read(getQueryPlanResourcePath(queryResourcePath))); + } } private String getQueryPlanResourcePath(String queryResourcePath) @@ -167,7 +159,7 @@ protected void generate() initPlanTest(); try { prepareTables(); - getQueryResourcePaths() + getQueryResourcePaths().stream() .parallel() .forEach(queryResourcePath -> { try { diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHivePartitionedTpcdsCostBasedPlan.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHivePartitionedTpcdsCostBasedPlan.java index 56013dd1f471..3d8c8b17131a 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHivePartitionedTpcdsCostBasedPlan.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHivePartitionedTpcdsCostBasedPlan.java @@ -13,7 +13,7 @@ */ package io.trino.sql.planner; -import java.util.stream.Stream; +import java.util.List; /** * This class tests cost-based optimization rules related to joins. It contains unmodified TPC-DS queries. @@ -37,9 +37,9 @@ public TestHivePartitionedTpcdsCostBasedPlan() } @Override - protected Stream getQueryResourcePaths() + protected List getQueryResourcePaths() { - return TPCDS_SQL_FILES.stream(); + return TPCDS_SQL_FILES; } public static void main(String[] args) diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHivePartitionedTpchCostBasedPlan.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHivePartitionedTpchCostBasedPlan.java index 2ab22158c19e..71bdba29f709 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHivePartitionedTpchCostBasedPlan.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHivePartitionedTpchCostBasedPlan.java @@ -13,7 +13,7 @@ */ package io.trino.sql.planner; -import java.util.stream.Stream; +import java.util.List; /** * This class tests cost-based optimization rules related to joins. It contains unmodified TPC-H queries. @@ -37,9 +37,9 @@ public TestHivePartitionedTpchCostBasedPlan() } @Override - protected Stream getQueryResourcePaths() + protected List getQueryResourcePaths() { - return TPCH_SQL_FILES.stream(); + return TPCH_SQL_FILES; } public static void main(String[] args) diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHiveTpcdsCostBasedPlan.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHiveTpcdsCostBasedPlan.java index b9e9192915b8..1ac2e7b7d40c 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHiveTpcdsCostBasedPlan.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHiveTpcdsCostBasedPlan.java @@ -14,7 +14,7 @@ package io.trino.sql.planner; -import java.util.stream.Stream; +import java.util.List; /** * This class tests cost-based optimization rules related to joins. It contains unmodified TPC-DS queries. @@ -38,9 +38,9 @@ public TestHiveTpcdsCostBasedPlan() } @Override - protected Stream getQueryResourcePaths() + protected List getQueryResourcePaths() { - return TPCDS_SQL_FILES.stream(); + return TPCDS_SQL_FILES; } public static void main(String[] args) diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHiveTpchCostBasedPlan.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHiveTpchCostBasedPlan.java index dd34e104b47f..a9c152ecbedb 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHiveTpchCostBasedPlan.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestHiveTpchCostBasedPlan.java @@ -14,7 +14,7 @@ package io.trino.sql.planner; -import java.util.stream.Stream; +import java.util.List; /** * This class tests cost-based optimization rules related to joins. It contains unmodified TPC-H queries. @@ -38,9 +38,9 @@ public TestHiveTpchCostBasedPlan() } @Override - protected Stream getQueryResourcePaths() + protected List getQueryResourcePaths() { - return TPCH_SQL_FILES.stream(); + return TPCH_SQL_FILES; } public static void main(String[] args) diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcPartitionedTpcdsCostBasedPlan.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcPartitionedTpcdsCostBasedPlan.java index d6810ea240e7..f2b90868ae6c 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcPartitionedTpcdsCostBasedPlan.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcPartitionedTpcdsCostBasedPlan.java @@ -14,7 +14,7 @@ package io.trino.sql.planner; -import java.util.stream.Stream; +import java.util.List; /** * This class tests cost-based optimization rules related to joins. It contains unmodified TPC-DS queries. @@ -43,9 +43,9 @@ protected void doPrepareTables() } @Override - protected Stream getQueryResourcePaths() + protected List getQueryResourcePaths() { - return TPCDS_SQL_FILES.stream(); + return TPCDS_SQL_FILES; } public static void main(String[] args) diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcPartitionedTpchCostBasedPlan.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcPartitionedTpchCostBasedPlan.java index d6861e9563cc..2375352bab2d 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcPartitionedTpchCostBasedPlan.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcPartitionedTpchCostBasedPlan.java @@ -16,7 +16,7 @@ import io.trino.tpch.TpchTable; -import java.util.stream.Stream; +import java.util.List; /** * This class tests cost-based optimization rules related to joins. It contains unmodified TPC-H queries. @@ -42,9 +42,9 @@ protected void doPrepareTables() } @Override - protected Stream getQueryResourcePaths() + protected List getQueryResourcePaths() { - return TPCH_SQL_FILES.stream(); + return TPCH_SQL_FILES; } public static void main(String[] args) diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcTpcdsCostBasedPlan.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcTpcdsCostBasedPlan.java index 91667b0f105c..a4fc1847d0db 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcTpcdsCostBasedPlan.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcTpcdsCostBasedPlan.java @@ -14,7 +14,7 @@ package io.trino.sql.planner; -import java.util.stream.Stream; +import java.util.List; /** * This class tests cost-based optimization rules related to joins. It contains unmodified TPC-DS queries. @@ -43,9 +43,9 @@ protected void doPrepareTables() } @Override - protected Stream getQueryResourcePaths() + protected List getQueryResourcePaths() { - return TPCDS_SQL_FILES.stream(); + return TPCDS_SQL_FILES; } public static void main(String[] args) diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcTpchCostBasedPlan.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcTpchCostBasedPlan.java index 4cefe3289df5..bee3ce48b1f9 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcTpchCostBasedPlan.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergOrcTpchCostBasedPlan.java @@ -16,7 +16,7 @@ import io.trino.tpch.TpchTable; -import java.util.stream.Stream; +import java.util.List; /** * This class tests cost-based optimization rules related to joins. It contains unmodified TPC-H queries. @@ -42,9 +42,9 @@ protected void doPrepareTables() } @Override - protected Stream getQueryResourcePaths() + protected List getQueryResourcePaths() { - return TPCH_SQL_FILES.stream(); + return TPCH_SQL_FILES; } public static void main(String[] args) diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetPartitionedTpcdsCostBasedPlan.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetPartitionedTpcdsCostBasedPlan.java index f8aac2df7945..3b5d48f5f623 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetPartitionedTpcdsCostBasedPlan.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetPartitionedTpcdsCostBasedPlan.java @@ -14,7 +14,7 @@ package io.trino.sql.planner; -import java.util.stream.Stream; +import java.util.List; /** * This class tests cost-based optimization rules related to joins. It contains unmodified TPC-DS queries. @@ -43,9 +43,9 @@ protected void doPrepareTables() } @Override - protected Stream getQueryResourcePaths() + protected List getQueryResourcePaths() { - return TPCDS_SQL_FILES.stream(); + return TPCDS_SQL_FILES; } public static void main(String[] args) diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetPartitionedTpchCostBasedPlan.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetPartitionedTpchCostBasedPlan.java index 8b05fddf57cf..994571d11ea6 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetPartitionedTpchCostBasedPlan.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetPartitionedTpchCostBasedPlan.java @@ -16,7 +16,7 @@ import io.trino.tpch.TpchTable; -import java.util.stream.Stream; +import java.util.List; /** * This class tests cost-based optimization rules related to joins. It contains unmodified TPC-H queries. @@ -42,9 +42,9 @@ protected void doPrepareTables() } @Override - protected Stream getQueryResourcePaths() + protected List getQueryResourcePaths() { - return TPCH_SQL_FILES.stream(); + return TPCH_SQL_FILES; } public static void main(String[] args) diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetTpcdsCostBasedPlan.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetTpcdsCostBasedPlan.java index 10836dbfb887..d64295fc7e47 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetTpcdsCostBasedPlan.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetTpcdsCostBasedPlan.java @@ -14,7 +14,7 @@ package io.trino.sql.planner; -import java.util.stream.Stream; +import java.util.List; /** * This class tests cost-based optimization rules related to joins. It contains unmodified TPC-DS queries. @@ -43,9 +43,9 @@ protected void doPrepareTables() } @Override - protected Stream getQueryResourcePaths() + protected List getQueryResourcePaths() { - return TPCDS_SQL_FILES.stream(); + return TPCDS_SQL_FILES; } public static void main(String[] args) diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetTpchCostBasedPlan.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetTpchCostBasedPlan.java index d7258641717c..a8de1f470d7e 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetTpchCostBasedPlan.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergParquetTpchCostBasedPlan.java @@ -16,7 +16,7 @@ import io.trino.tpch.TpchTable; -import java.util.stream.Stream; +import java.util.List; /** * This class tests cost-based optimization rules related to joins. It contains unmodified TPC-H queries. @@ -42,9 +42,9 @@ protected void doPrepareTables() } @Override - protected Stream getQueryResourcePaths() + protected List getQueryResourcePaths() { - return TPCH_SQL_FILES.stream(); + return TPCH_SQL_FILES; } public static void main(String[] args) diff --git a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergSmallFilesParquetTpcdsCostBasedPlan.java b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergSmallFilesParquetTpcdsCostBasedPlan.java index 233924d144a0..3e9633fff937 100644 --- a/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergSmallFilesParquetTpcdsCostBasedPlan.java +++ b/testing/trino-tests/src/test/java/io/trino/sql/planner/TestIcebergSmallFilesParquetTpcdsCostBasedPlan.java @@ -14,7 +14,7 @@ package io.trino.sql.planner; -import java.util.stream.Stream; +import java.util.List; /** * This class tests cost-based optimization rules related to joins. It contains unmodified TPC-DS queries. @@ -43,9 +43,9 @@ protected void doPrepareTables() } @Override - protected Stream getQueryResourcePaths() + protected List getQueryResourcePaths() { - return TPCDS_SQL_FILES.stream(); + return TPCDS_SQL_FILES; } public static void main(String[] args) diff --git a/testing/trino-tests/src/test/java/io/trino/tests/tpch/TestTpchConnectorTest.java b/testing/trino-tests/src/test/java/io/trino/tests/tpch/TestTpchConnectorTest.java index 0fac01156129..c843f28cb96f 100644 --- a/testing/trino-tests/src/test/java/io/trino/tests/tpch/TestTpchConnectorTest.java +++ b/testing/trino-tests/src/test/java/io/trino/tests/tpch/TestTpchConnectorTest.java @@ -28,7 +28,7 @@ import io.trino.testing.QueryRunner; import io.trino.testing.TestingConnectorBehavior; import io.trino.type.TypeDeserializer; -import org.testng.annotations.Test; +import org.junit.jupiter.api.Test; import java.util.Optional; @@ -162,6 +162,7 @@ public void testShowTables() assertQueryFails("SHOW TABLES FROM sf0", "line 1:1: Schema 'sf0' does not exist"); } + @Test @Override public void testShowCreateTable() { @@ -179,6 +180,7 @@ public void testShowCreateTable() ")"); } + @Test @Override public void testPredicateReflectedInExplain() {