diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c029e316dd05..f367138e106e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -313,6 +313,7 @@ jobs: !:trino-raptor-legacy, !:trino-accumulo, !:trino-cassandra, + !:trino-scylla, !:trino-clickhouse, !:trino-delta-lake, !:trino-hive, @@ -386,6 +387,7 @@ jobs: - { modules: plugin/trino-raptor-legacy } - { modules: plugin/trino-accumulo } - { modules: plugin/trino-cassandra } + - { modules: plugin/trino-scylla } - { modules: plugin/trino-clickhouse } - { modules: plugin/trino-delta-lake } - { modules: plugin/trino-delta-lake, profile: test-failure-recovery } diff --git a/core/trino-server/src/main/provisio/trino.xml b/core/trino-server/src/main/provisio/trino.xml index f7f8c165bc02..935217b42105 100644 --- a/core/trino-server/src/main/provisio/trino.xml +++ b/core/trino-server/src/main/provisio/trino.xml @@ -288,4 +288,10 @@ + + + + + + diff --git a/docs/src/main/sphinx/connector.rst b/docs/src/main/sphinx/connector.rst index 54cca76b3874..79d3b3804e34 100644 --- a/docs/src/main/sphinx/connector.rst +++ b/docs/src/main/sphinx/connector.rst @@ -36,6 +36,7 @@ from different data sources. Prometheus Redis Redshift + Scylla SingleStore (MemSQL) SQL Server System diff --git a/docs/src/main/sphinx/connector/scylla.rst b/docs/src/main/sphinx/connector/scylla.rst new file mode 100644 index 000000000000..6c36597bd5b9 --- /dev/null +++ b/docs/src/main/sphinx/connector/scylla.rst @@ -0,0 +1,38 @@ +================ +Scylla connector +================ + +The Scylla connector allows querying data stored in +`Scylla `_. + +Requirements +------------ + +To connect to Scylla, you need: + +* Scylla version 3.0.0 or higher. +* Network access from the Trino coordinator and workers to Scylla. + Port 9042 is the default port. + +Configuration +------------- + +To configure the Scylla connector, create a catalog properties file +``etc/catalog/scylla.properties`` with the following contents, +replacing ``host1,host2`` with a comma-separated list of the Scylla +nodes, used to discovery the cluster topology: + +.. code-block:: text + + connector.name=scylla + cassandra.contact-points=host1,host2 + +You also need to set ``cassandra.native-protocol-port``, if your +Scylla nodes are not using the default port 9042. + +Compatibility with Cassandra connector +-------------------------------------- + +The Scylla connector is very similar to the Cassandra connector with the +only difference being the underlying driver. +See :doc:`Cassandra connector ` for more details. diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/BaseCassandraConnectorTest.java b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/BaseCassandraConnectorTest.java new file mode 100644 index 000000000000..2fcb7a467a4a --- /dev/null +++ b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/BaseCassandraConnectorTest.java @@ -0,0 +1,1440 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.cassandra; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.primitives.Ints; +import io.airlift.units.Duration; +import io.trino.Session; +import io.trino.spi.type.Type; +import io.trino.testing.BaseConnectorTest; +import io.trino.testing.Bytes; +import io.trino.testing.MaterializedResult; +import io.trino.testing.MaterializedRow; +import io.trino.testing.QueryRunner; +import io.trino.testing.TestingConnectorBehavior; +import io.trino.testing.assertions.Assert; +import io.trino.testing.sql.TestTable; +import org.testng.SkipException; +import org.testng.annotations.Test; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.sql.Timestamp; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.List; +import java.util.Optional; + +import static com.datastax.oss.driver.api.core.data.ByteUtils.toHexString; +import static com.google.common.io.BaseEncoding.base16; +import static io.trino.plugin.cassandra.CassandraQueryRunner.createCassandraQueryRunner; +import static io.trino.plugin.cassandra.CassandraQueryRunner.createCassandraSession; +import static io.trino.plugin.cassandra.TestCassandraTable.clusterColumn; +import static io.trino.plugin.cassandra.TestCassandraTable.columnsValue; +import static io.trino.plugin.cassandra.TestCassandraTable.generalColumn; +import static io.trino.plugin.cassandra.TestCassandraTable.partitionColumn; +import static io.trino.spi.type.BigintType.BIGINT; +import static io.trino.spi.type.BooleanType.BOOLEAN; +import static io.trino.spi.type.DoubleType.DOUBLE; +import static io.trino.spi.type.IntegerType.INTEGER; +import static io.trino.spi.type.RealType.REAL; +import static io.trino.spi.type.TimestampWithTimeZoneType.TIMESTAMP_WITH_TIME_ZONE; +import static io.trino.spi.type.UuidType.UUID; +import static io.trino.spi.type.VarbinaryType.VARBINARY; +import static io.trino.spi.type.VarcharType.VARCHAR; +import static io.trino.spi.type.VarcharType.createUnboundedVarcharType; +import static io.trino.spi.type.VarcharType.createVarcharType; +import static io.trino.testing.MaterializedResult.DEFAULT_PRECISION; +import static io.trino.testing.MaterializedResult.resultBuilder; +import static io.trino.testing.QueryAssertions.assertContains; +import static io.trino.testing.QueryAssertions.assertContainsEventually; +import static java.lang.String.format; +import static java.util.Comparator.comparing; +import static java.util.concurrent.TimeUnit.MINUTES; +import static java.util.stream.Collectors.toList; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.testng.Assert.assertEquals; + +public abstract class BaseCassandraConnectorTest + extends BaseConnectorTest +{ + protected static final String KEYSPACE = "smoke_test"; + protected static final Session SESSION = createCassandraSession(KEYSPACE); + + protected static final ZonedDateTime TIMESTAMP_VALUE = ZonedDateTime.of(1970, 1, 1, 3, 4, 5, 0, ZoneId.of("UTC")); + + protected CassandraServer server; + protected CassandraSession session; + + @Override + protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) + { + switch (connectorBehavior) { + case SUPPORTS_TRUNCATE: + return true; + + case SUPPORTS_CREATE_SCHEMA: + return false; + + case SUPPORTS_CREATE_VIEW: + return false; + + case SUPPORTS_CREATE_TABLE_WITH_TABLE_COMMENT: + case SUPPORTS_CREATE_TABLE_WITH_COLUMN_COMMENT: + return false; + + case SUPPORTS_RENAME_TABLE: + return false; + + case SUPPORTS_ARRAY: + case SUPPORTS_ROW_TYPE: + return false; + + case SUPPORTS_ADD_COLUMN: + case SUPPORTS_DROP_COLUMN: + case SUPPORTS_RENAME_COLUMN: + return false; + + case SUPPORTS_COMMENT_ON_TABLE: + case SUPPORTS_COMMENT_ON_COLUMN: + return false; + + case SUPPORTS_TOPN_PUSHDOWN: + return false; + + case SUPPORTS_NOT_NULL_CONSTRAINT: + return false; + + case SUPPORTS_DELETE: + return true; + + default: + return super.hasBehavior(connectorBehavior); + } + } + + @Override + protected QueryRunner createQueryRunner() + throws Exception + { + server = closeAfterClass(new TestingCassandraServer()); + session = server.getSession(); + session.execute("CREATE KEYSPACE IF NOT EXISTS " + KEYSPACE + " WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); + return createCassandraQueryRunner(server, ImmutableMap.of(), ImmutableMap.of(), REQUIRED_TPCH_TABLES); + } + + @Override + protected TestTable createTableWithDefaultColumns() + { + throw new SkipException("Cassandra connector does not support column default values"); + } + + @Override + protected Optional filterDataMappingSmokeTestData(DataMappingTestSetup dataMappingTestSetup) + { + String typeName = dataMappingTestSetup.getTrinoTypeName(); + if (typeName.equals("time") + || typeName.equals("timestamp") + || typeName.equals("decimal(5,3)") + || typeName.equals("decimal(15,3)") + || typeName.equals("char(3)")) { + // TODO this should either work or fail cleanly + return Optional.empty(); + } + return Optional.of(dataMappingTestSetup); + } + + @Override + protected Optional filterCaseSensitiveDataMappingTestData(DataMappingTestSetup dataMappingTestSetup) + { + String typeName = dataMappingTestSetup.getTrinoTypeName(); + if (typeName.equals("char(1)")) { + // TODO this should either work or fail cleanly + return Optional.empty(); + } + return Optional.of(dataMappingTestSetup); + } + + @Override + protected String dataMappingTableName(String trinoTypeName) + { + return "tmp_trino_" + System.nanoTime(); + } + + @Test + @Override + public void testShowColumns() + { + MaterializedResult actual = computeActual("SHOW COLUMNS FROM orders"); + + MaterializedResult expectedParametrizedVarchar = resultBuilder(getSession(), VARCHAR, VARCHAR, VARCHAR, VARCHAR) + .row("orderkey", "bigint", "", "") + .row("custkey", "bigint", "", "") + .row("orderstatus", "varchar", "", "") + .row("totalprice", "double", "", "") + .row("orderdate", "date", "", "") + .row("orderpriority", "varchar", "", "") + .row("clerk", "varchar", "", "") + .row("shippriority", "integer", "", "") + .row("comment", "varchar", "", "") + .build(); + + Assert.assertEquals(actual, expectedParametrizedVarchar); + } + + @Test + @Override + public void testDescribeTable() + { + MaterializedResult expectedColumns = resultBuilder(getSession(), VARCHAR, VARCHAR, VARCHAR, VARCHAR) + .row("orderkey", "bigint", "", "") + .row("custkey", "bigint", "", "") + .row("orderstatus", "varchar", "", "") + .row("totalprice", "double", "", "") + .row("orderdate", "date", "", "") + .row("orderpriority", "varchar", "", "") + .row("clerk", "varchar", "", "") + .row("shippriority", "integer", "", "") + .row("comment", "varchar", "", "") + .build(); + MaterializedResult actualColumns = computeActual("DESCRIBE orders"); + Assert.assertEquals(actualColumns, expectedColumns); + } + + @Test + @Override + public void testShowCreateTable() + { + assertThat(computeActual("SHOW CREATE TABLE orders").getOnlyValue()) + .isEqualTo("CREATE TABLE " + getSession().getCatalog().orElseThrow() + ".tpch.orders (\n" + + " orderkey bigint,\n" + + " custkey bigint,\n" + + " orderstatus varchar,\n" + + " totalprice double,\n" + + " orderdate date,\n" + + " orderpriority varchar,\n" + + " clerk varchar,\n" + + " shippriority integer,\n" + + " comment varchar\n" + + ")"); + } + + @Override + public void testCharVarcharComparison() + { + assertThatThrownBy(super::testCharVarcharComparison) + .hasMessage("Unsupported type: char(3)"); + } + + @Test + public void testPushdownUuidPartitionKeyPredicate() + { + try (TestCassandraTable testCassandraTable = testTable( + "table_pushdown_uuid_partition_key", + ImmutableList.of(partitionColumn("col_uuid", "uuid"), generalColumn("col_text", "text")), + ImmutableList.of("00000000-0000-0000-0000-000000000001, 'Trino'"))) { + assertThat(query(format("SELECT col_text FROM %s WHERE col_uuid = UUID '00000000-0000-0000-0000-000000000001'", testCassandraTable.getTableName()))) + .matches("VALUES CAST('Trino' AS varchar)"); + } + } + + @Test + public void testPushdownAllTypesPartitionKeyPredicate() + { + // TODO partition key predicate pushdown for decimal types does not work https://github.com/trinodb/trino/issues/10927 + try (TestCassandraTable testCassandraTable = testTable( + "table_pushdown_all_types_partition_key", + ImmutableList.of( + partitionColumn("key", "text"), + partitionColumn("typeuuid", "uuid"), + partitionColumn("typetinyint", "tinyint"), + partitionColumn("typesmallint", "smallint"), + partitionColumn("typeinteger", "int"), + partitionColumn("typelong", "bigint"), + generalColumn("typebytes", "blob"), + partitionColumn("typedate", "date"), + partitionColumn("typetimestamp", "timestamp"), + partitionColumn("typeansi", "ascii"), + partitionColumn("typeboolean", "boolean"), + generalColumn("typedecimal", "decimal"), + partitionColumn("typedouble", "double"), + partitionColumn("typefloat", "float"), + partitionColumn("typeinet", "inet"), + partitionColumn("typevarchar", "varchar"), + generalColumn("typevarint", "varint"), + partitionColumn("typetimeuuid", "timeuuid"), + generalColumn("typelist", "frozen >"), + generalColumn("typemap", "frozen >"), + generalColumn("typeset", "frozen >")), + ImmutableList.of("" + + "'key 7', " + + "00000000-0000-0000-0000-000000000007, " + + "7, " + + "7, " + + "7, " + + "1007, " + + "0x00000007, " + + "'1970-01-01', " + + "'1970-01-01 03:04:05.000+0000', " + + "'ansi 7', " + + "false, " + + "128.0, " + + "16384.0, " + + "2097152.0, " + + "'127.0.0.1', " + + "'varchar 7', " + + "10000000, " + + "d2177dd0-eaa2-11de-a572-001b779c76e7, " + + "['list-value-17', 'list-value-27'], " + + "{7:8, 9:10}, " + + "{false, true}"))) { + String sql = "SELECT *" + + " FROM " + testCassandraTable.getTableName() + + " WHERE key = 'key 7'" + + " AND typeuuid = UUID '00000000-0000-0000-0000-000000000007'" + + " AND typetinyint = 7" + + " AND typesmallint = 7" + + " AND typeinteger = 7" + + " AND typelong = 1007" + + " AND typedate = DATE '1970-01-01'" + + " AND typetimestamp = TIMESTAMP '1970-01-01 03:04:05Z'" + + " AND typeansi = 'ansi 7'" + + " AND typeboolean = false" + + " AND typedouble = 16384.0" + + " AND typefloat = REAL '2097152.0'" + + " AND typeinet = '127.0.0.1'" + + " AND typevarchar = 'varchar 7'" + + " AND typetimeuuid = UUID 'd2177dd0-eaa2-11de-a572-001b779c76e7'" + + ""; + MaterializedResult result = execute(sql); + + assertEquals(result.getRowCount(), 1); + } + } + + @Test + public void testPartitionPushdownsWithNotMatchingPredicate() + { + try (TestCassandraTable testCassandraTable = testTable( + "partition_not_pushed_down_keys", + ImmutableList.of(partitionColumn("id", "varchar"), generalColumn("trino_filter_col", "int")), + ImmutableList.of("'2', 0"))) { + String sql = "SELECT 1 FROM " + testCassandraTable.getTableName() + " WHERE id = '1' AND trino_filter_col = 0"; + + assertThat(execute(sql).getMaterializedRows().size()).isEqualTo(0); + } + } + + @Test + public void testPartitionKeyPredicate() + { + try (TestCassandraTable testCassandraTable = testTable( + "table_all_types_partition_key", + ImmutableList.of( + partitionColumn("key", "text"), + partitionColumn("typeuuid", "uuid"), + partitionColumn("typetinyint", "tinyint"), + partitionColumn("typesmallint", "smallint"), + partitionColumn("typeinteger", "int"), + partitionColumn("typelong", "bigint"), + partitionColumn("typebytes", "blob"), + partitionColumn("typedate", "date"), + partitionColumn("typetimestamp", "timestamp"), + partitionColumn("typeansi", "ascii"), + partitionColumn("typeboolean", "boolean"), + partitionColumn("typedecimal", "decimal"), + partitionColumn("typedouble", "double"), + partitionColumn("typefloat", "float"), + partitionColumn("typeinet", "inet"), + partitionColumn("typevarchar", "varchar"), + partitionColumn("typevarint", "varint"), + partitionColumn("typetimeuuid", "timeuuid"), + partitionColumn("typelist", "frozen >"), + partitionColumn("typemap", "frozen >"), + partitionColumn("typeset", "frozen >")), + ImmutableList.of("" + + "'key 7', " + + "00000000-0000-0000-0000-000000000007, " + + "7, " + + "7, " + + "7, " + + "1007, " + + "0x00000007, " + + "'1970-01-01', " + + "'1970-01-01 03:04:05.000+0000', " + + "'ansi 7', " + + "false, " + + "128.0, " + + "16384.0, " + + "2097152.0, " + + "'127.0.0.1', " + + "'varchar 7', " + + "10000000, " + + "d2177dd0-eaa2-11de-a572-001b779c76e7, " + + "['list-value-17', 'list-value-27'], " + + "{7:8, 9:10}, " + + "{false, true}"))) { + String sql = "SELECT *" + + " FROM " + testCassandraTable.getTableName() + + " WHERE key = 'key 7'" + + " AND typeuuid = UUID '00000000-0000-0000-0000-000000000007'" + + " AND typetinyint = 7" + + " AND typesmallint = 7" + + " AND typeinteger = 7" + + " AND typelong = 1007" + + " AND typebytes = from_hex('" + base16().encode(Ints.toByteArray(7)) + "')" + + " AND typedate = DATE '1970-01-01'" + + " AND typetimestamp = TIMESTAMP '1970-01-01 03:04:05Z'" + + " AND typeansi = 'ansi 7'" + + " AND typeboolean = false" + + " AND typedecimal = 128.0" + + " AND typedouble = 16384.0" + + " AND typefloat = REAL '2097152.0'" + + " AND typeinet = '127.0.0.1'" + + " AND typevarchar = 'varchar 7'" + + " AND typevarint = '10000000'" + + " AND typetimeuuid = UUID 'd2177dd0-eaa2-11de-a572-001b779c76e7'" + + " AND typelist = '[\"list-value-17\",\"list-value-27\"]'" + + " AND typemap = '{7:8,9:10}'" + + " AND typeset = '[false,true]'" + + ""; + MaterializedResult result = execute(sql); + + assertEquals(result.getRowCount(), 1); + } + } + + @Test + public void testTimestampPartitionKey() + { + try (TestCassandraTable testCassandraTable = testTable( + "test_timestamp", + ImmutableList.of(partitionColumn("c1", "timestamp")), + ImmutableList.of("'2017-04-01T11:21:59.001+0000'"))) { + String sql = format( + "SELECT * " + + "FROM %s " + + "WHERE c1 = TIMESTAMP '2017-04-01 11:21:59.001 UTC'", testCassandraTable.getTableName()); + MaterializedResult result = execute(sql); + + assertEquals(result.getRowCount(), 1); + } + } + + @Test + public void testSelect() + { + try (TestCassandraTable testCassandraTable = testTable( + "table_all_types", + ImmutableList.of( + partitionColumn("key", "text"), + generalColumn("typeuuid", "uuid"), + generalColumn("typetinyint", "tinyint"), + generalColumn("typesmallint", "smallint"), + generalColumn("typeinteger", "int"), + generalColumn("typelong", "bigint"), + generalColumn("typebytes", "blob"), + generalColumn("typedate", "date"), + generalColumn("typetimestamp", "timestamp"), + generalColumn("typeansi", "ascii"), + generalColumn("typeboolean", "boolean"), + generalColumn("typedecimal", "decimal"), + generalColumn("typedouble", "double"), + generalColumn("typefloat", "float"), + generalColumn("typeinet", "inet"), + generalColumn("typevarchar", "varchar"), + generalColumn("typevarint", "varint"), + generalColumn("typetimeuuid", "timeuuid"), + generalColumn("typelist", "frozen >"), + generalColumn("typemap", "frozen >"), + generalColumn("typeset", "frozen >")), + columnsValue(9, ImmutableList.of( + rowNumber -> format("'key %d'", rowNumber), + rowNumber -> format("00000000-0000-0000-0000-%012d", rowNumber), + rowNumber -> String.valueOf(rowNumber), + rowNumber -> String.valueOf(rowNumber), + rowNumber -> String.valueOf(rowNumber), + rowNumber -> String.valueOf(rowNumber + 1000), + rowNumber -> toHexString(ByteBuffer.wrap(Ints.toByteArray(rowNumber)).asReadOnlyBuffer()), + rowNumber -> format("'%s'", DateTimeFormatter.ofPattern("uuuu-MM-dd").format(TIMESTAMP_VALUE)), + rowNumber -> format("'%s'", DateTimeFormatter.ofPattern("uuuu-MM-dd HH:mm:ss.SSSZ").format(TIMESTAMP_VALUE)), + rowNumber -> format("'ansi %d'", rowNumber), + rowNumber -> String.valueOf(rowNumber % 2 == 0), + rowNumber -> new BigDecimal(Math.pow(2, rowNumber)).toString(), + rowNumber -> String.valueOf(Math.pow(4, rowNumber)), + rowNumber -> String.valueOf((float) Math.pow(8, rowNumber)), + rowNumber -> format("'%s'", "127.0.0.1"), + rowNumber -> format("'varchar %d'", rowNumber), + rowNumber -> BigInteger.TEN.pow(rowNumber).toString(), + rowNumber -> format("d2177dd0-eaa2-11de-a572-001b779c76e%d", rowNumber), + rowNumber -> format("['list-value-1%d', 'list-value-2%d']", rowNumber, rowNumber), + rowNumber -> format("{%d:%d, %d:%d}", rowNumber, rowNumber + 1, rowNumber + 2, rowNumber + 3), + rowNumber -> format("{false, true}"))))) { + assertSelect(testCassandraTable.getTableName(), false); + } + + try (TestCassandraTable testCassandraTable = testTable( + "table_all_types_partition_key", + ImmutableList.of( + partitionColumn("key", "text"), + partitionColumn("typeuuid", "uuid"), + partitionColumn("typetinyint", "tinyint"), + partitionColumn("typesmallint", "smallint"), + partitionColumn("typeinteger", "int"), + partitionColumn("typelong", "bigint"), + partitionColumn("typebytes", "blob"), + partitionColumn("typedate", "date"), + partitionColumn("typetimestamp", "timestamp"), + partitionColumn("typeansi", "ascii"), + partitionColumn("typeboolean", "boolean"), + partitionColumn("typedecimal", "decimal"), + partitionColumn("typedouble", "double"), + partitionColumn("typefloat", "float"), + partitionColumn("typeinet", "inet"), + partitionColumn("typevarchar", "varchar"), + partitionColumn("typevarint", "varint"), + partitionColumn("typetimeuuid", "timeuuid"), + partitionColumn("typelist", "frozen >"), + partitionColumn("typemap", "frozen >"), + partitionColumn("typeset", "frozen >")), + columnsValue(9, ImmutableList.of( + rowNumber -> format("'key %d'", rowNumber), + rowNumber -> format("00000000-0000-0000-0000-%012d", rowNumber), + rowNumber -> String.valueOf(rowNumber), + rowNumber -> String.valueOf(rowNumber), + rowNumber -> String.valueOf(rowNumber), + rowNumber -> String.valueOf(rowNumber + 1000), + rowNumber -> toHexString(ByteBuffer.wrap(Ints.toByteArray(rowNumber))), + rowNumber -> format("'%s'", DateTimeFormatter.ofPattern("uuuu-MM-dd").format(TIMESTAMP_VALUE)), + rowNumber -> format("'%s'", DateTimeFormatter.ofPattern("uuuu-MM-dd HH:mm:ss.SSSZ").format(TIMESTAMP_VALUE)), + rowNumber -> format("'ansi %d'", rowNumber), + rowNumber -> String.valueOf(rowNumber % 2 == 0), + rowNumber -> new BigDecimal(Math.pow(2, rowNumber)).toString(), + rowNumber -> String.valueOf(Math.pow(4, rowNumber)), + rowNumber -> String.valueOf((float) Math.pow(8, rowNumber)), + rowNumber -> format("'%s'", "127.0.0.1"), + rowNumber -> format("'varchar %d'", rowNumber), + rowNumber -> BigInteger.TEN.pow(rowNumber).toString(), + rowNumber -> format("d2177dd0-eaa2-11de-a572-001b779c76e%d", rowNumber), + rowNumber -> format("['list-value-1%d', 'list-value-2%d']", rowNumber, rowNumber), + rowNumber -> format("{%d:%d, %d:%d}", rowNumber, rowNumber + 1, rowNumber + 2, rowNumber + 3), + rowNumber -> format("{false, true}"))))) { + assertSelect(testCassandraTable.getTableName(), false); + } + } + + @Test + public void testInsertToTableWithHiddenId() + { + execute("DROP TABLE IF EXISTS test_create_table"); + execute("CREATE TABLE test_create_table (col1 integer)"); + execute("INSERT INTO test_create_table VALUES (12345)"); + assertQuery("SELECT * FROM smoke_test.test_create_table", "VALUES (12345)"); + execute("DROP TABLE test_create_table"); + } + + @Test + public void testCreateTableAs() + { + try (TestCassandraTable testCassandraTable = testTable( + "table_all_types", + ImmutableList.of( + partitionColumn("key", "text"), + generalColumn("typeuuid", "uuid"), + generalColumn("typetinyint", "tinyint"), + generalColumn("typesmallint", "smallint"), + generalColumn("typeinteger", "int"), + generalColumn("typelong", "bigint"), + generalColumn("typebytes", "blob"), + generalColumn("typedate", "date"), + generalColumn("typetimestamp", "timestamp"), + generalColumn("typeansi", "ascii"), + generalColumn("typeboolean", "boolean"), + generalColumn("typedecimal", "decimal"), + generalColumn("typedouble", "double"), + generalColumn("typefloat", "float"), + generalColumn("typeinet", "inet"), + generalColumn("typevarchar", "varchar"), + generalColumn("typevarint", "varint"), + generalColumn("typetimeuuid", "timeuuid"), + generalColumn("typelist", "frozen >"), + generalColumn("typemap", "frozen >"), + generalColumn("typeset", "frozen >")), + columnsValue(9, ImmutableList.of( + rowNumber -> format("'key %d'", rowNumber), + rowNumber -> format("00000000-0000-0000-0000-%012d", rowNumber), + rowNumber -> String.valueOf(rowNumber), + rowNumber -> String.valueOf(rowNumber), + rowNumber -> String.valueOf(rowNumber), + rowNumber -> String.valueOf(rowNumber + 1000), + rowNumber -> toHexString(ByteBuffer.wrap(Ints.toByteArray(rowNumber))), + rowNumber -> format("'%s'", DateTimeFormatter.ofPattern("uuuu-MM-dd").format(TIMESTAMP_VALUE)), + rowNumber -> format("'%s'", DateTimeFormatter.ofPattern("uuuu-MM-dd HH:mm:ss.SSSZ").format(TIMESTAMP_VALUE)), + rowNumber -> format("'ansi %d'", rowNumber), + rowNumber -> String.valueOf(rowNumber % 2 == 0), + rowNumber -> new BigDecimal(Math.pow(2, rowNumber)).toString(), + rowNumber -> String.valueOf(Math.pow(4, rowNumber)), + rowNumber -> String.valueOf((float) Math.pow(8, rowNumber)), + rowNumber -> format("'%s'", "127.0.0.1"), + rowNumber -> format("'varchar %d'", rowNumber), + rowNumber -> BigInteger.TEN.pow(rowNumber).toString(), + rowNumber -> format("d2177dd0-eaa2-11de-a572-001b779c76e%d", rowNumber), + rowNumber -> format("['list-value-1%d', 'list-value-2%d']", rowNumber, rowNumber), + rowNumber -> format("{%d:%d, %d:%d}", rowNumber, rowNumber + 1, rowNumber + 2, rowNumber + 3), + rowNumber -> format("{false, true}"))))) { + execute("DROP TABLE IF EXISTS table_all_types_copy"); + execute("CREATE TABLE table_all_types_copy AS SELECT * FROM " + testCassandraTable.getTableName()); + assertSelect("table_all_types_copy", true); + execute("DROP TABLE table_all_types_copy"); + } + } + + @Test + public void testIdentifiers() + { + String catalogName = getSession().getCatalog().orElseThrow(); + session.execute("DROP KEYSPACE IF EXISTS \"_keyspace\""); + session.execute("CREATE KEYSPACE \"_keyspace\" WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); + assertContainsEventually(() -> execute("SHOW SCHEMAS FROM " + catalogName), resultBuilder(getSession(), createUnboundedVarcharType()) + .row("_keyspace") + .build(), new Duration(1, MINUTES)); + + execute("CREATE TABLE _keyspace._table AS SELECT 1 AS \"_col\", 2 AS \"2col\""); + assertQuery(format("SHOW TABLES FROM %s._keyspace", catalogName), "VALUES ('_table')"); + assertQuery(format("SELECT * FROM %s._keyspace._table", catalogName), "VALUES (1, 2)"); + assertUpdate(format("DROP TABLE %s._keyspace._table", catalogName)); + + session.execute("DROP KEYSPACE \"_keyspace\""); + } + + @Test + public void testClusteringPredicates() + { + try (TestCassandraTable testCassandraTable = testTable( + "table_clustering_keys", + ImmutableList.of( + partitionColumn("key", "text"), + clusterColumn("clust_one", "text"), + clusterColumn("clust_two", "text"), + clusterColumn("clust_three", "text"), + generalColumn("data", "text")), + columnsValue(9, ImmutableList.of( + rowNumber -> format("'key_%d'", rowNumber), + rowNumber -> "'clust_one'", + rowNumber -> format("'clust_two_%d'", rowNumber), + rowNumber -> format("'clust_three_%d'", rowNumber), + rowNumber -> "null")))) { + String sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key IN ('key_1','key_2') AND clust_one='clust_one'"; + assertEquals(execute(sql).getRowCount(), 2); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one!='clust_one'"; + assertEquals(execute(sql).getRowCount(), 0); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key IN ('key_1','key_2','key_3','key_4') AND clust_one='clust_one' AND clust_two>'clust_two_1'"; + assertEquals(execute(sql).getRowCount(), 3); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key IN ('key_1','key_2') AND clust_one='clust_one' AND " + + "((clust_two='clust_two_1') OR (clust_two='clust_two_2'))"; + assertEquals(execute(sql).getRowCount(), 2); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key IN ('key_1','key_2') AND clust_one='clust_one' AND " + + "((clust_two='clust_two_1' AND clust_three='clust_three_1') OR (clust_two='clust_two_2' AND clust_three='clust_three_2'))"; + assertEquals(execute(sql).getRowCount(), 2); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key IN ('key_1','key_2') AND clust_one='clust_one' AND clust_three='clust_three_1'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key IN ('key_1','key_2') AND clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2')"; + assertEquals(execute(sql).getRowCount(), 2); + } + } + + @Test + public void testMultiplePartitionClusteringPredicates() + { + try (TestCassandraTable testCassandraTable = testTable( + "table_multi_partition_clustering_keys", + ImmutableList.of( + partitionColumn("partition_one", "text"), + partitionColumn("partition_two", "text"), + clusterColumn("clust_one", "text"), + clusterColumn("clust_two", "text"), + clusterColumn("clust_three", "text"), + generalColumn("data", "text")), + columnsValue(9, ImmutableList.of( + rowNumber -> format("'partition_one_%d'", rowNumber), + rowNumber -> format("'partition_two_%d'", rowNumber), + rowNumber -> "'clust_one'", + rowNumber -> format("'clust_two_%d'", rowNumber), + rowNumber -> format("'clust_three_%d'", rowNumber), + rowNumber -> "null")))) { + String partitionInPredicates = " partition_one IN ('partition_one_1','partition_one_2') AND partition_two IN ('partition_two_1','partition_two_2') "; + String sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE partition_one='partition_one_1' AND partition_two='partition_two_1' AND clust_one='clust_one'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE " + partitionInPredicates + " AND clust_one='clust_one'"; + assertEquals(execute(sql).getRowCount(), 2); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE partition_one='partition_one_1' AND partition_two='partition_two_1' AND clust_one!='clust_one'"; + assertEquals(execute(sql).getRowCount(), 0); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE " + + "partition_one IN ('partition_one_1','partition_one_2','partition_one_3','partition_one_4') AND " + + "partition_two IN ('partition_two_1','partition_two_2','partition_two_3','partition_two_4') AND " + + "clust_one='clust_one' AND clust_two>'clust_two_1'"; + assertEquals(execute(sql).getRowCount(), 3); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE " + partitionInPredicates + " AND clust_one='clust_one' AND " + + "((clust_two='clust_two_1') OR (clust_two='clust_two_2'))"; + assertEquals(execute(sql).getRowCount(), 2); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE " + partitionInPredicates + " AND clust_one='clust_one' AND " + + "((clust_two='clust_two_1' AND clust_three='clust_three_1') OR (clust_two='clust_two_2' AND clust_three='clust_three_2'))"; + assertEquals(execute(sql).getRowCount(), 2); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE " + partitionInPredicates + " AND clust_one='clust_one' AND clust_three='clust_three_1'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE " + partitionInPredicates + " AND clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2')"; + assertEquals(execute(sql).getRowCount(), 2); + } + } + + @Test + public void testClusteringKeyOnlyPushdown() + { + try (TestCassandraTable testCassandraTable = testTable( + "table_clustering_keys", + ImmutableList.of( + partitionColumn("key", "text"), + clusterColumn("clust_one", "text"), + clusterColumn("clust_two", "text"), + clusterColumn("clust_three", "text"), + generalColumn("data", "text")), + columnsValue(9, ImmutableList.of( + rowNumber -> format("'key_%d'", rowNumber), + rowNumber -> "'clust_one'", + rowNumber -> format("'clust_two_%d'", rowNumber), + rowNumber -> format("'clust_three_%d'", rowNumber), + rowNumber -> "null")))) { + String sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one'"; + assertEquals(execute(sql).getRowCount(), 9); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two='clust_two_2'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two='clust_two_2' AND clust_three='clust_three_2'"; + assertEquals(execute(sql).getRowCount(), 1); + } + + try (TestCassandraTable testCassandraTable = testTable( + "table_clustering_keys", + ImmutableList.of( + partitionColumn("key", "text"), + clusterColumn("clust_one", "text"), + clusterColumn("clust_two", "text"), + clusterColumn("clust_three", "text"), + generalColumn("data", "text")), + columnsValue(1000, ImmutableList.of( + rowNumber -> format("'key_%d'", rowNumber), + rowNumber -> "'clust_one'", + rowNumber -> format("'clust_two_%d'", rowNumber), + rowNumber -> format("'clust_three_%d'", rowNumber), + rowNumber -> "null")))) { + // below test cases are needed to verify clustering key pushdown with unpartitioned table + // for the smaller table (<200 partitions by default) connector fetches all the partitions id + // and the partitioned patch is being followed + String sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two='clust_two_2'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two='clust_two_2' AND clust_three='clust_three_2'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two='clust_two_2' AND clust_three IN ('clust_three_1', 'clust_three_2', 'clust_three_3')"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2') AND clust_three IN ('clust_three_1', 'clust_three_2', 'clust_three_3')"; + assertEquals(execute(sql).getRowCount(), 2); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two > 'clust_two_998'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two > 'clust_two_997' AND clust_two < 'clust_two_999'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2') AND clust_three > 'clust_three_998'"; + assertEquals(execute(sql).getRowCount(), 0); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2') AND clust_three < 'clust_three_3'"; + assertEquals(execute(sql).getRowCount(), 2); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2') AND clust_three > 'clust_three_1' AND clust_three < 'clust_three_3'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2','clust_two_3') AND clust_two < 'clust_two_2'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_997','clust_two_998','clust_two_999') AND clust_two > 'clust_two_998'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2','clust_two_3') AND clust_two = 'clust_two_2'"; + assertEquals(execute(sql).getRowCount(), 1); + } + } + + @Test + public void testNotEqualPredicateOnClusteringColumn() + { + try (TestCassandraTable testCassandraTable = testTable( + "table_clustering_keys_inequality", + ImmutableList.of( + partitionColumn("key", "text"), + clusterColumn("clust_one", "text"), + clusterColumn("clust_two", "int"), + clusterColumn("clust_three", "timestamp"), + generalColumn("data", "text")), + columnsValue(4, ImmutableList.of( + rowNumber -> "'key_1'", + rowNumber -> "'clust_one'", + rowNumber -> format("%d", rowNumber), + rowNumber -> format("%d", Timestamp.from(TIMESTAMP_VALUE.toInstant()).getTime() + rowNumber * 10), + rowNumber -> "null")))) { + String sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one != 'clust_one'"; + assertEquals(execute(sql).getRowCount(), 0); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two != 2"; + assertEquals(execute(sql).getRowCount(), 3); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two >= 2 AND clust_two != 3"; + assertEquals(execute(sql).getRowCount(), 2); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two > 2 AND clust_two != 3"; + assertEquals(execute(sql).getRowCount(), 1); + } + } + + @Test + public void testClusteringKeyPushdownInequality() + { + try (TestCassandraTable testCassandraTable = testTable( + "table_clustering_keys_inequality", + ImmutableList.of( + partitionColumn("key", "text"), + clusterColumn("clust_one", "text"), + clusterColumn("clust_two", "int"), + clusterColumn("clust_three", "timestamp"), + generalColumn("data", "text")), + columnsValue(4, ImmutableList.of( + rowNumber -> "'key_1'", + rowNumber -> "'clust_one'", + rowNumber -> format("%d", rowNumber), + rowNumber -> format("%d", Timestamp.from(TIMESTAMP_VALUE.toInstant()).getTime() + rowNumber * 10), + rowNumber -> "null")))) { + String sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one'"; + assertEquals(execute(sql).getRowCount(), 4); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two=2"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two=2 AND clust_three = timestamp '1970-01-01 03:04:05.020Z'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two=2 AND clust_three = timestamp '1970-01-01 03:04:05.010Z'"; + assertEquals(execute(sql).getRowCount(), 0); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two IN (1,2)"; + assertEquals(execute(sql).getRowCount(), 2); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two > 1 AND clust_two < 3"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two=2 AND clust_three >= timestamp '1970-01-01 03:04:05.010Z' AND clust_three <= timestamp '1970-01-01 03:04:05.020Z'"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two IN (1,2) AND clust_three >= timestamp '1970-01-01 03:04:05.010Z' AND clust_three <= timestamp '1970-01-01 03:04:05.020Z'"; + assertEquals(execute(sql).getRowCount(), 2); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two IN (1,2,3) AND clust_two < 2"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two IN (1,2,3) AND clust_two > 2"; + assertEquals(execute(sql).getRowCount(), 1); + sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two IN (1,2,3) AND clust_two = 2"; + assertEquals(execute(sql).getRowCount(), 1); + } + } + + @Test + public void testUpperCaseNameUnescapedInCassandra() + { + /* + * If an identifier is not escaped with double quotes it is stored as lowercase in the Cassandra metadata + * + * http://docs.datastax.com/en/cql/3.1/cql/cql_reference/ucase-lcase_r.html + */ + String catalogName = getSession().getCatalog().orElseThrow(); + session.execute("CREATE KEYSPACE KEYSPACE_1 WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); + assertContainsEventually(() -> execute("SHOW SCHEMAS FROM " + catalogName), resultBuilder(getSession(), createUnboundedVarcharType()) + .row("keyspace_1") + .build(), new Duration(1, MINUTES)); + + session.execute("CREATE TABLE KEYSPACE_1.TABLE_1 (COLUMN_1 bigint PRIMARY KEY)"); + assertContainsEventually(() -> execute(format("SHOW TABLES FROM %s.keyspace_1", catalogName)), resultBuilder(getSession(), createUnboundedVarcharType()) + .row("table_1") + .build(), new Duration(1, MINUTES)); + assertContains(execute(format("SHOW COLUMNS FROM %s.keyspace_1.table_1", catalogName)), resultBuilder(getSession(), createUnboundedVarcharType(), createUnboundedVarcharType(), createUnboundedVarcharType(), createUnboundedVarcharType()) + .row("column_1", "bigint", "", "") + .build()); + + execute("INSERT INTO keyspace_1.table_1 (column_1) VALUES (1)"); + + assertEquals(execute(format("SELECT column_1 FROM %s.keyspace_1.table_1", catalogName)).getRowCount(), 1); + assertUpdate(format("DROP TABLE %s.keyspace_1.table_1", catalogName)); + + // when an identifier is unquoted the lowercase and uppercase spelling may be used interchangeable + session.execute("DROP KEYSPACE keyspace_1"); + } + + @Test + public void testUppercaseNameEscaped() + { + /* + * If an identifier is escaped with double quotes it is stored verbatim + * + * http://docs.datastax.com/en/cql/3.1/cql/cql_reference/ucase-lcase_r.html + */ + String catalogName = getSession().getCatalog().orElseThrow(); + session.execute("CREATE KEYSPACE \"KEYSPACE_2\" WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); + assertContainsEventually(() -> execute("SHOW SCHEMAS FROM " + catalogName), resultBuilder(getSession(), createUnboundedVarcharType()) + .row("keyspace_2") + .build(), new Duration(1, MINUTES)); + + session.execute("CREATE TABLE \"KEYSPACE_2\".\"TABLE_2\" (\"COLUMN_2\" bigint PRIMARY KEY)"); + assertContainsEventually(() -> execute(format("SHOW TABLES FROM %s.keyspace_2", catalogName)), resultBuilder(getSession(), createUnboundedVarcharType()) + .row("table_2") + .build(), new Duration(1, MINUTES)); + assertContains(execute(format("SHOW COLUMNS FROM %s.keyspace_2.table_2", catalogName)), resultBuilder(getSession(), createUnboundedVarcharType(), createUnboundedVarcharType(), createUnboundedVarcharType(), createUnboundedVarcharType()) + .row("column_2", "bigint", "", "") + .build()); + + execute("INSERT INTO \"KEYSPACE_2\".\"TABLE_2\" (\"COLUMN_2\") VALUES (1)"); + + assertEquals(execute(format("SELECT column_2 FROM %s.keyspace_2.table_2", catalogName)).getRowCount(), 1); + assertUpdate(format("DROP TABLE %s.keyspace_2.table_2", catalogName)); + + // when an identifier is unquoted the lowercase and uppercase spelling may be used interchangeable + session.execute("DROP KEYSPACE \"KEYSPACE_2\""); + } + + @Test + public void testKeyspaceNameAmbiguity() + { + // Identifiers enclosed in double quotes are stored in Cassandra verbatim. It is possible to create 2 keyspaces with names + // that have differences only in letters case. + String catalogName = getSession().getCatalog().orElseThrow(); + session.execute("CREATE KEYSPACE \"KeYsPaCe_3\" WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); + session.execute("CREATE KEYSPACE \"kEySpAcE_3\" WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); + + // Although in Trino all the schema and table names are always displayed as lowercase + assertContainsEventually(() -> execute("SHOW SCHEMAS FROM " + catalogName), resultBuilder(getSession(), createUnboundedVarcharType()) + .row("keyspace_3") + .row("keyspace_3") + .build(), new Duration(1, MINUTES)); + + // There is no way to figure out what the exactly keyspace we want to retrieve tables from + assertQueryFailsEventually( + format("SHOW TABLES FROM %s.keyspace_3", catalogName), + "More than one keyspace has been found for the case insensitive schema name: keyspace_3 -> \\(KeYsPaCe_3, kEySpAcE_3\\)", + new Duration(1, MINUTES)); + + session.execute("DROP KEYSPACE \"KeYsPaCe_3\""); + session.execute("DROP KEYSPACE \"kEySpAcE_3\""); + } + + @Test + public void testTableNameAmbiguity() + { + String catalogName = getSession().getCatalog().orElseThrow(); + session.execute("CREATE KEYSPACE keyspace_4 WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); + assertContainsEventually(() -> execute("SHOW SCHEMAS FROM " + catalogName), resultBuilder(getSession(), createUnboundedVarcharType()) + .row("keyspace_4") + .build(), new Duration(1, MINUTES)); + + // Identifiers enclosed in double quotes are stored in Cassandra verbatim. It is possible to create 2 tables with names + // that have differences only in letters case. + session.execute("CREATE TABLE keyspace_4.\"TaBlE_4\" (column_4 bigint PRIMARY KEY)"); + session.execute("CREATE TABLE keyspace_4.\"tAbLe_4\" (column_4 bigint PRIMARY KEY)"); + + // Although in Trino all the schema and table names are always displayed as lowercase + assertContainsEventually(() -> execute(format("SHOW TABLES FROM %s.keyspace_4", catalogName)), resultBuilder(getSession(), createUnboundedVarcharType()) + .row("table_4") + .row("table_4") + .build(), new Duration(1, MINUTES)); + + // There is no way to figure out what the exactly table is being queried + assertQueryFailsEventually( + format("SHOW COLUMNS FROM %s.keyspace_4.table_4", catalogName), + "More than one table has been found for the case insensitive table name: table_4 -> \\(TaBlE_4, tAbLe_4\\)", + new Duration(1, MINUTES)); + assertQueryFailsEventually( + format("SELECT * FROM %s.keyspace_4.table_4", catalogName), + "More than one table has been found for the case insensitive table name: table_4 -> \\(TaBlE_4, tAbLe_4\\)", + new Duration(1, MINUTES)); + session.execute("DROP KEYSPACE keyspace_4"); + } + + @Test + public void testColumnNameAmbiguity() + { + String catalogName = getSession().getCatalog().orElseThrow(); + + session.execute("CREATE KEYSPACE keyspace_5 WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); + assertContainsEventually(() -> execute("SHOW SCHEMAS FROM " + catalogName), resultBuilder(getSession(), createUnboundedVarcharType()) + .row("keyspace_5") + .build(), new Duration(1, MINUTES)); + + session.execute("CREATE TABLE keyspace_5.table_5 (\"CoLuMn_5\" bigint PRIMARY KEY, \"cOlUmN_5\" bigint)"); + assertContainsEventually(() -> execute(format("SHOW TABLES FROM %s.keyspace_5", catalogName)), resultBuilder(getSession(), createUnboundedVarcharType()) + .row("table_5") + .build(), new Duration(1, MINUTES)); + + assertQueryFailsEventually( + format("SHOW COLUMNS FROM %s.keyspace_5.table_5", catalogName), + "More than one column has been found for the case insensitive column name: column_5 -> \\(CoLuMn_5, cOlUmN_5\\)", + new Duration(1, MINUTES)); + assertQueryFailsEventually( + format("SELECT * FROM %s.keyspace_5.table_5", catalogName), + "More than one column has been found for the case insensitive column name: column_5 -> \\(CoLuMn_5, cOlUmN_5\\)", + new Duration(1, MINUTES)); + + session.execute("DROP KEYSPACE keyspace_5"); + } + + @Test + public void testUnsupportedColumnType() + { + // TODO currently all standard types are supported to some extent. We should add a test with custom type if possible. + } + + @Test + public void testNullAndEmptyTimestamp() + { + try (TestCassandraTable testCassandraTable = testTable( + "test_empty_timestamp", + ImmutableList.of( + partitionColumn("id", "int"), + generalColumn("timestamp_column_with_null", "timestamp"), + generalColumn("timestamp_column_with_empty", "timestamp")), + ImmutableList.of("1, NULL, ''"))) { + String tableName = testCassandraTable.getTableName(); + + assertThat(query(format("SELECT timestamp_column_with_null FROM %s", tableName))) + .matches("VALUES CAST(NULL AS timestamp(3) with time zone)"); + assertThat(query(format("SELECT timestamp_column_with_empty FROM %s", tableName))) + .matches("VALUES CAST(NULL AS timestamp(3) with time zone)"); + + assertThat(query(format("SELECT id FROM %s WHERE timestamp_column_with_null IS NULL", tableName))) + .matches("VALUES 1"); + assertThat(query(format("SELECT id FROM %s WHERE timestamp_column_with_empty IS NULL", tableName))) + .matches("VALUES 1"); + } + String catalogName = getSession().getCatalog().orElseThrow(); + String tableName = "test_empty_timestamp"; + + session.execute(format("DROP TABLE IF EXISTS %s.%s", KEYSPACE, tableName)); + session.execute(format("CREATE TABLE %s.%s (id int PRIMARY KEY, timestamp_column_with_null timestamp, timestamp_column_with_empty timestamp)", KEYSPACE, tableName)); + session.execute(format("INSERT INTO %s.%s (id, timestamp_column_with_null, timestamp_column_with_empty) VALUES (1, NULL, '')", KEYSPACE, tableName)); + assertContainsEventually(() -> execute(format("SHOW TABLES FROM %s.%s LIKE '%s'", catalogName, KEYSPACE, tableName)), resultBuilder(getSession(), createUnboundedVarcharType()) + .row(tableName) + .build(), new Duration(1, MINUTES)); + + assertThat(query(format("SELECT timestamp_column_with_null FROM %s.%s", KEYSPACE, tableName))) + .matches("VALUES CAST(NULL AS timestamp(3) with time zone)"); + assertThat(query(format("SELECT timestamp_column_with_empty FROM %s.%s", KEYSPACE, tableName))) + .matches("VALUES CAST(NULL AS timestamp(3) with time zone)"); + + assertThat(query(format("SELECT id FROM %s.%s WHERE timestamp_column_with_null IS NULL", KEYSPACE, tableName))) + .matches("VALUES 1"); + assertThat(query(format("SELECT id FROM %s.%s WHERE timestamp_column_with_empty IS NULL", KEYSPACE, tableName))) + .matches("VALUES 1"); + + session.execute(format("DROP TABLE %s.%s", KEYSPACE, tableName)); + } + + @Test + public void testEmptyTimestampClusteringKey() + { + try (TestCassandraTable testCassandraTable = testTable( + "test_empty_timestamp", + ImmutableList.of( + partitionColumn("id", "int"), + partitionColumn("timestamp_column_with_empty", "timestamp")), + ImmutableList.of("1, ''"))) { + String tableName = testCassandraTable.getTableName(); + + assertThat(query(format("SELECT timestamp_column_with_empty FROM %s", tableName))) + .matches("VALUES CAST(NULL AS timestamp(3) with time zone)"); + + assertThat(query(format("SELECT id FROM %s WHERE timestamp_column_with_empty IS NULL", tableName))) + .matches("VALUES 1"); + } + } + + @Test + public void testNestedCollectionType() + { + String catalogName = getSession().getCatalog().orElseThrow(); + + session.execute("CREATE KEYSPACE keyspace_test_nested_collection WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); + assertContainsEventually(() -> execute("SHOW SCHEMAS FROM " + catalogName), resultBuilder(getSession(), createUnboundedVarcharType()) + .row("keyspace_test_nested_collection") + .build(), new Duration(1, MINUTES)); + + session.execute("CREATE TABLE keyspace_test_nested_collection.table_set (column_5 bigint PRIMARY KEY, nested_collection frozen>>)"); + session.execute("CREATE TABLE keyspace_test_nested_collection.table_list (column_5 bigint PRIMARY KEY, nested_collection frozen>>)"); + session.execute("CREATE TABLE keyspace_test_nested_collection.table_map (column_5 bigint PRIMARY KEY, nested_collection frozen>>)"); + + assertContainsEventually(() -> execute(format("SHOW TABLES FROM %s.keyspace_test_nested_collection", catalogName)), resultBuilder(getSession(), createUnboundedVarcharType()) + .row("table_set") + .row("table_list") + .row("table_map") + .build(), new Duration(1, MINUTES)); + + session.execute("INSERT INTO keyspace_test_nested_collection.table_set (column_5, nested_collection) VALUES (1, {{1, 2, 3}})"); + assertEquals(execute(format("SELECT nested_collection FROM %s.keyspace_test_nested_collection.table_set", catalogName)).getMaterializedRows().get(0), + new MaterializedRow(DEFAULT_PRECISION, "[[1,2,3]]")); + + session.execute("INSERT INTO keyspace_test_nested_collection.table_list (column_5, nested_collection) VALUES (1, [[4, 5, 6]])"); + assertEquals(execute(format("SELECT nested_collection FROM %s.keyspace_test_nested_collection.table_list", catalogName)).getMaterializedRows().get(0), + new MaterializedRow(DEFAULT_PRECISION, "[[4,5,6]]")); + + session.execute("INSERT INTO keyspace_test_nested_collection.table_map (column_5, nested_collection) VALUES (1, {7:{8:9}})"); + assertEquals(execute(format("SELECT nested_collection FROM %s.keyspace_test_nested_collection.table_map", catalogName)).getMaterializedRows().get(0), + new MaterializedRow(DEFAULT_PRECISION, "{7:{8:9}}")); + + session.execute("DROP KEYSPACE keyspace_test_nested_collection"); + } + + @Test + public void testAllTypesInsert() + { + try (TestCassandraTable testCassandraTable = testTable( + "table_all_types_insert", + ImmutableList.of( + partitionColumn("key", "text"), + generalColumn("typeuuid", "uuid"), + generalColumn("typetinyint", "tinyint"), + generalColumn("typesmallint", "smallint"), + generalColumn("typeinteger", "int"), + generalColumn("typelong", "bigint"), + generalColumn("typebytes", "blob"), + generalColumn("typedate", "date"), + generalColumn("typetimestamp", "timestamp"), + generalColumn("typeansi", "ascii"), + generalColumn("typeboolean", "boolean"), + generalColumn("typedecimal", "decimal"), + generalColumn("typedouble", "double"), + generalColumn("typefloat", "float"), + generalColumn("typeinet", "inet"), + generalColumn("typevarchar", "varchar"), + generalColumn("typevarint", "varint"), + generalColumn("typetimeuuid", "timeuuid"), + generalColumn("typelist", "frozen >"), + generalColumn("typemap", "frozen >"), + generalColumn("typeset", "frozen >")), + ImmutableList.of())) { + String sql = "SELECT key, typeuuid, typeinteger, typelong, typebytes, typetimestamp, typeansi, typeboolean, typedecimal, " + + "typedouble, typefloat, typeinet, typevarchar, typevarint, typetimeuuid, typelist, typemap, typeset" + + " FROM " + testCassandraTable.getTableName(); + assertEquals(execute(sql).getRowCount(), 0); + + // TODO Following types are not supported now. We need to change null into the value after fixing it + // blob, frozen>, inet, list, map, set, decimal, varint + // timestamp can be inserted but the expected and actual values are not same + execute("INSERT INTO " + testCassandraTable.getTableName() + " (" + + "key," + + "typeuuid," + + "typeinteger," + + "typelong," + + "typebytes," + + "typetimestamp," + + "typeansi," + + "typeboolean," + + "typedecimal," + + "typedouble," + + "typefloat," + + "typeinet," + + "typevarchar," + + "typevarint," + + "typetimeuuid," + + "typelist," + + "typemap," + + "typeset" + + ") VALUES (" + + "'key1', " + + "UUID '12151fd2-7586-11e9-8f9e-2a86e4085a59', " + + "1, " + + "1000, " + + "null, " + + "timestamp '1970-01-01 08:34:05.0Z', " + + "'ansi1', " + + "true, " + + "null, " + + "0.3, " + + "cast('0.4' as real), " + + "null, " + + "'varchar1', " + + "null, " + + "UUID '50554d6e-29bb-11e5-b345-feff819cdc9f', " + + "null, " + + "null, " + + "null " + + ")"); + + MaterializedResult result = execute(sql); + int rowCount = result.getRowCount(); + assertEquals(rowCount, 1); + assertEquals(result.getMaterializedRows().get(0), new MaterializedRow(DEFAULT_PRECISION, + "key1", + java.util.UUID.fromString("12151fd2-7586-11e9-8f9e-2a86e4085a59"), + 1, + 1000L, + null, + ZonedDateTime.of(1970, 1, 1, 8, 34, 5, 0, ZoneId.of("UTC")), + "ansi1", + true, + null, + 0.3, + (float) 0.4, + null, + "varchar1", + null, + java.util.UUID.fromString("50554d6e-29bb-11e5-b345-feff819cdc9f"), + null, + null, + null)); + + // insert null for all datatypes + execute("INSERT INTO " + testCassandraTable.getTableName() + " (" + + "key, typeuuid, typeinteger, typelong, typebytes, typetimestamp, typeansi, typeboolean, typedecimal," + + "typedouble, typefloat, typeinet, typevarchar, typevarint, typetimeuuid, typelist, typemap, typeset" + + ") VALUES (" + + "'key2', null, null, null, null, null, null, null, null," + + "null, null, null, null, null, null, null, null, null)"); + sql = "SELECT key, typeuuid, typeinteger, typelong, typebytes, typetimestamp, typeansi, typeboolean, typedecimal, " + + "typedouble, typefloat, typeinet, typevarchar, typevarint, typetimeuuid, typelist, typemap, typeset" + + " FROM " + testCassandraTable.getTableName() + " WHERE key = 'key2'"; + result = execute(sql); + rowCount = result.getRowCount(); + assertEquals(rowCount, 1); + assertEquals(result.getMaterializedRows().get(0), new MaterializedRow(DEFAULT_PRECISION, + "key2", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)); + + // insert into only a subset of columns + execute("INSERT INTO " + testCassandraTable.getTableName() + " (" + + "key, typeinteger, typeansi, typeboolean) VALUES (" + + "'key3', 999, 'ansi', false)"); + sql = "SELECT key, typeuuid, typeinteger, typelong, typebytes, typetimestamp, typeansi, typeboolean, typedecimal, " + + "typedouble, typefloat, typeinet, typevarchar, typevarint, typetimeuuid, typelist, typemap, typeset" + + " FROM " + testCassandraTable.getTableName() + " WHERE key = 'key3'"; + result = execute(sql); + rowCount = result.getRowCount(); + assertEquals(rowCount, 1); + assertEquals(result.getMaterializedRows().get(0), new MaterializedRow(DEFAULT_PRECISION, + "key3", null, 999, null, null, null, "ansi", false, null, null, null, null, null, null, null, null, null, null)); + } + } + + @Test + @Override + public void testDelete() + { + try (TestCassandraTable testCassandraTable = testTable( + "table_delete_data", + ImmutableList.of( + partitionColumn("partition_one", "bigint"), + partitionColumn("partition_two", "int"), + clusterColumn("clust_one", "text"), + generalColumn("data", "text")), + ImmutableList.of( + "1, 1, 'clust_one_1', null", + "2, 2, 'clust_one_2', null", + "3, 3, 'clust_one_3', null", + "4, 4, 'clust_one_4', null", + "5, 5, 'clust_one_5', null", + "6, 6, 'clust_one_6', null", + "7, 7, 'clust_one_7', null", + "8, 8, 'clust_one_8', null", + "9, 9, 'clust_one_9', null", + "1, 1, 'clust_one_2', null", + "1, 1, 'clust_one_3', null", + "1, 2, 'clust_one_1', null", + "1, 2, 'clust_one_2', null", + "1, 2, 'clust_one_3', null", + "2, 2, 'clust_one_1', null"))) { + String keyspaceAndTable = testCassandraTable.getTableName(); + assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 15); + + // error + assertThatThrownBy(() -> execute("DELETE FROM " + keyspaceAndTable)) + .isInstanceOf(RuntimeException.class); + assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 15); + + String whereClusteringKeyOnly = " WHERE clust_one='clust_one_2'"; + assertThatThrownBy(() -> execute("DELETE FROM " + keyspaceAndTable + whereClusteringKeyOnly)) + .isInstanceOf(RuntimeException.class); + assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 15); + + String whereMultiplePartitionKeyWithClusteringKey = " WHERE " + + " (partition_one=1 AND partition_two=1 AND clust_one='clust_one_1') OR " + + " (partition_one=1 AND partition_two=2 AND clust_one='clust_one_2') "; + assertThatThrownBy(() -> execute("DELETE FROM " + keyspaceAndTable + whereMultiplePartitionKeyWithClusteringKey)) + .isInstanceOf(RuntimeException.class); + assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 15); + + // success + String wherePrimaryKey = " WHERE partition_one=3 AND partition_two=3 AND clust_one='clust_one_3'"; + execute("DELETE FROM " + keyspaceAndTable + wherePrimaryKey); + assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 14); + assertEquals(execute("SELECT * FROM " + keyspaceAndTable + wherePrimaryKey).getRowCount(), 0); + + String wherePartitionKey = " WHERE partition_one=2 AND partition_two=2"; + execute("DELETE FROM " + keyspaceAndTable + wherePartitionKey); + assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 12); + assertEquals(execute("SELECT * FROM " + keyspaceAndTable + wherePartitionKey).getRowCount(), 0); + + String whereMultiplePartitionKey = " WHERE (partition_one=1 AND partition_two=1) OR (partition_one=1 AND partition_two=2)"; + execute("DELETE FROM " + keyspaceAndTable + whereMultiplePartitionKey); + assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 6); + assertEquals(execute("SELECT * FROM " + keyspaceAndTable + whereMultiplePartitionKey).getRowCount(), 0); + } + } + + @Override + public void testDeleteWithLike() + { + assertThatThrownBy(super::testDeleteWithLike) + .hasStackTraceContaining("Delete without primary key or partition key is not supported"); + } + + @Override + public void testDeleteWithComplexPredicate() + { + assertThatThrownBy(super::testDeleteWithComplexPredicate) + .hasStackTraceContaining("Delete without primary key or partition key is not supported"); + } + + @Override + public void testDeleteWithSemiJoin() + { + assertThatThrownBy(super::testDeleteWithSemiJoin) + .hasStackTraceContaining("Delete without primary key or partition key is not supported"); + } + + @Override + public void testDeleteWithSubquery() + { + assertThatThrownBy(super::testDeleteWithSubquery) + .hasStackTraceContaining("Delete without primary key or partition key is not supported"); + } + + @Override + public void testExplainAnalyzeWithDeleteWithSubquery() + { + assertThatThrownBy(super::testExplainAnalyzeWithDeleteWithSubquery) + .hasStackTraceContaining("Delete without primary key or partition key is not supported"); + } + + @Override + public void testDeleteWithVarcharPredicate() + { + assertThatThrownBy(super::testDeleteWithVarcharPredicate) + .hasStackTraceContaining("Delete without primary key or partition key is not supported"); + } + + @Override + public void testDeleteAllDataFromTable() + { + assertThatThrownBy(super::testDeleteAllDataFromTable) + .hasStackTraceContaining("Deleting without partition key is not supported"); + } + + @Override + public void testRowLevelDelete() + { + assertThatThrownBy(super::testRowLevelDelete) + .hasStackTraceContaining("Delete without primary key or partition key is not supported"); + } + + private void assertSelect(String tableName, boolean createdByTrino) + { + Type inetType = createdByTrino ? createUnboundedVarcharType() : createVarcharType(45); + + String sql = "SELECT " + + " key, " + + " typeuuid, " + + " typeinteger, " + + " typelong, " + + " typebytes, " + + " typetimestamp, " + + " typeansi, " + + " typeboolean, " + + " typedecimal, " + + " typedouble, " + + " typefloat, " + + " typeinet, " + + " typevarchar, " + + " typevarint, " + + " typetimeuuid, " + + " typelist, " + + " typemap, " + + " typeset " + + " FROM " + tableName; + + MaterializedResult result = execute(sql); + + int rowCount = result.getRowCount(); + assertEquals(rowCount, 9); + assertEquals(result.getTypes(), ImmutableList.of( + createUnboundedVarcharType(), + UUID, + INTEGER, + BIGINT, + VARBINARY, + TIMESTAMP_WITH_TIME_ZONE, + createUnboundedVarcharType(), + BOOLEAN, + DOUBLE, + DOUBLE, + REAL, + inetType, + createUnboundedVarcharType(), + createUnboundedVarcharType(), + UUID, + createUnboundedVarcharType(), + createUnboundedVarcharType(), + createUnboundedVarcharType())); + + List sortedRows = result.getMaterializedRows().stream() + .sorted(comparing(o -> o.getField(1).toString())) + .collect(toList()); + + for (int rowNumber = 1; rowNumber <= rowCount; rowNumber++) { + assertEquals(sortedRows.get(rowNumber - 1), new MaterializedRow(DEFAULT_PRECISION, + "key " + rowNumber, + java.util.UUID.fromString(format("00000000-0000-0000-0000-%012d", rowNumber)), + rowNumber, + rowNumber + 1000L, + Bytes.fromBytes(Ints.toByteArray(rowNumber)), + TIMESTAMP_VALUE, + "ansi " + rowNumber, + rowNumber % 2 == 0, + Math.pow(2, rowNumber), + Math.pow(4, rowNumber), + (float) Math.pow(8, rowNumber), + "127.0.0.1", + "varchar " + rowNumber, + BigInteger.TEN.pow(rowNumber).toString(), + java.util.UUID.fromString(format("d2177dd0-eaa2-11de-a572-001b779c76e%d", rowNumber)), + format("[\"list-value-1%1$d\",\"list-value-2%1$d\"]", rowNumber), + format("{%d:%d,%d:%d}", rowNumber, rowNumber + 1L, rowNumber + 2, rowNumber + 3L), + "[false,true]")); + } + } + + protected void refreshSizeEstimates(String keyspace, String tableName) + throws Exception + { + server.refreshSizeEstimates(KEYSPACE, tableName); + } + + protected MaterializedResult execute(String sql) + { + return getQueryRunner().execute(SESSION, sql); + } + + private TestCassandraTable testTable(String namePrefix, List columnDefinitions, List rowsToInsert) + { + return new TestCassandraTable(session::execute, server, KEYSPACE, namePrefix, columnDefinitions, rowsToInsert); + } +} diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/CassandraQueryRunner.java b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/CassandraQueryRunner.java index 758feddd9c93..52fefc8bfde1 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/CassandraQueryRunner.java +++ b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/CassandraQueryRunner.java @@ -84,7 +84,7 @@ public static void main(String[] args) throws Exception { DistributedQueryRunner queryRunner = createCassandraQueryRunner( - new CassandraServer(), + new TestingCassandraServer(), ImmutableMap.of("http-server.http.port", "8080"), ImmutableMap.of(), TpchTable.getTables()); diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/CassandraServer.java b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/CassandraServer.java index f1cc33f78390..72faa07d0b39 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/CassandraServer.java +++ b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/CassandraServer.java @@ -13,180 +13,17 @@ */ package io.trino.plugin.cassandra; -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.CqlSessionBuilder; -import com.datastax.oss.driver.api.core.ProtocolVersion; -import com.datastax.oss.driver.api.core.config.DriverConfigLoader; -import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.google.common.io.Resources; -import io.airlift.json.JsonCodec; -import io.airlift.log.Logger; -import io.airlift.units.Duration; -import org.testcontainers.containers.GenericContainer; - import java.io.Closeable; -import java.io.File; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.file.Path; -import java.util.List; -import java.util.concurrent.TimeoutException; - -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.PROTOCOL_VERSION; -import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_TIMEOUT; -import static com.google.common.io.Files.write; -import static com.google.common.io.Resources.getResource; -import static java.lang.String.format; -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.nio.file.Files.createDirectory; -import static java.nio.file.Files.createTempDirectory; -import static java.util.Objects.requireNonNull; -import static java.util.concurrent.TimeUnit.MINUTES; -import static java.util.concurrent.TimeUnit.NANOSECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.testcontainers.utility.MountableFile.forHostPath; -import static org.testng.Assert.assertEquals; -public class CassandraServer - implements Closeable +public interface CassandraServer + extends Closeable { - private static Logger log = Logger.get(CassandraServer.class); - - private static final int PORT = 9142; - - private static final Duration REFRESH_SIZE_ESTIMATES_TIMEOUT = new Duration(1, MINUTES); - - private final GenericContainer dockerContainer; - private final CassandraSession session; - - public CassandraServer() - throws Exception - { - this("2.2"); - } - - public CassandraServer(String cassandraVersion) - throws Exception - { - log.info("Starting cassandra..."); - - this.dockerContainer = new GenericContainer<>("cassandra:" + cassandraVersion) - .withExposedPorts(PORT) - .withCopyFileToContainer(forHostPath(prepareCassandraYaml()), "/etc/cassandra/cassandra.yaml"); - this.dockerContainer.start(); - - ProgrammaticDriverConfigLoaderBuilder driverConfigLoaderBuilder = DriverConfigLoader.programmaticBuilder(); - driverConfigLoaderBuilder.withDuration(REQUEST_TIMEOUT, java.time.Duration.ofSeconds(12)); - driverConfigLoaderBuilder.withString(PROTOCOL_VERSION, ProtocolVersion.V3.name()); - driverConfigLoaderBuilder.withDuration(CONTROL_CONNECTION_AGREEMENT_TIMEOUT, java.time.Duration.ofSeconds(30)); - // allow the retrieval of metadata for the system keyspaces - driverConfigLoaderBuilder.withStringList(METADATA_SCHEMA_REFRESHED_KEYSPACES, List.of()); - - CqlSessionBuilder cqlSessionBuilder = CqlSession.builder() - .withApplicationName("TestCluster") - .addContactPoint(new InetSocketAddress(this.dockerContainer.getContainerIpAddress(), this.dockerContainer.getMappedPort(PORT))) - .withLocalDatacenter("datacenter1") - .withConfigLoader(driverConfigLoaderBuilder.build()); - - CassandraSession session = new CassandraSession( - JsonCodec.listJsonCodec(ExtraColumnMetadata.class), - cqlSessionBuilder::build, - new Duration(1, MINUTES)); - - try { - checkConnectivity(session); - } - catch (RuntimeException e) { - session.close(); - this.dockerContainer.stop(); - throw e; - } - - this.session = session; - } - - private static String prepareCassandraYaml() - throws IOException - { - String original = Resources.toString(getResource("cu-cassandra.yaml"), UTF_8); - - Path tmpDirPath = createTempDirectory(null); - Path dataDir = tmpDirPath.resolve("data"); - createDirectory(dataDir); - - String modified = original.replaceAll("\\$\\{data_directory\\}", dataDir.toAbsolutePath().toString()); - - File yamlFile = tmpDirPath.resolve("cu-cassandra.yaml").toFile(); - yamlFile.deleteOnExit(); - write(modified, yamlFile, UTF_8); - - return yamlFile.getAbsolutePath(); - } - - public CassandraSession getSession() - { - return requireNonNull(session, "session is null"); - } - - public String getHost() - { - return dockerContainer.getContainerIpAddress(); - } - - public int getPort() - { - return dockerContainer.getMappedPort(PORT); - } - - private static void checkConnectivity(CassandraSession session) - { - ResultSet result = session.execute("SELECT release_version FROM system.local"); - List rows = result.all(); - assertEquals(rows.size(), 1); - String version = rows.get(0).getString(0); - log.info("Cassandra version: %s", version); - } - - public void refreshSizeEstimates(String keyspace, String table) - throws Exception - { - long deadline = System.nanoTime() + REFRESH_SIZE_ESTIMATES_TIMEOUT.roundTo(NANOSECONDS); - while (System.nanoTime() - deadline < 0) { - flushTable(keyspace, table); - refreshSizeEstimates(); - List sizeEstimates = getSession().getSizeEstimates(keyspace, table); - if (!sizeEstimates.isEmpty()) { - log.info("Size estimates for the table %s.%s have been refreshed successfully: %s", keyspace, table, sizeEstimates); - return; - } - log.info("Size estimates haven't been refreshed as expected. Retrying ..."); - SECONDS.sleep(1); - } - throw new TimeoutException(format("Attempting to refresh size estimates for table %s.%s has timed out after %s", keyspace, table, REFRESH_SIZE_ESTIMATES_TIMEOUT)); - } + CassandraSession getSession(); - private void flushTable(String keyspace, String table) - throws Exception - { - dockerContainer.execInContainer("nodetool", "flush", keyspace, table); - } + String getHost(); - private void refreshSizeEstimates() - throws Exception - { - dockerContainer.execInContainer("nodetool", "refreshsizeestimates"); - } + int getPort(); - @Override - public void close() - { - if (session != null) { - session.close(); - } - dockerContainer.close(); - } + void refreshSizeEstimates(String keyspace, String table) + throws Exception; } diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnector.java b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnector.java index 5d35ceedee8b..d18c36007e50 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnector.java +++ b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnector.java @@ -97,7 +97,7 @@ public class TestCassandraConnector private static final ConnectorSession SESSION = TestingConnectorSession.builder() .setPropertyMetadata(new CassandraSessionProperties(new CassandraClientConfig()).getSessionProperties()) .build(); - private CassandraServer server; + private TestingCassandraServer server; protected String database; protected SchemaTableName table; protected SchemaTableName tableForDelete; @@ -111,7 +111,7 @@ public class TestCassandraConnector public void setup() throws Exception { - this.server = new CassandraServer(); + this.server = new TestingCassandraServer(); String keyspace = "test_connector"; createTestTables(server.getSession(), keyspace, DATE); diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnectorTest.java b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnectorTest.java index fc1f3023866e..20d8f99b87f1 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnectorTest.java +++ b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnectorTest.java @@ -13,1392 +13,24 @@ */ package io.trino.plugin.cassandra; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import com.google.common.primitives.Ints; -import io.airlift.units.Duration; -import io.trino.Session; -import io.trino.spi.type.Type; -import io.trino.testing.BaseConnectorTest; -import io.trino.testing.Bytes; -import io.trino.testing.MaterializedResult; -import io.trino.testing.MaterializedRow; import io.trino.testing.QueryRunner; -import io.trino.testing.TestingConnectorBehavior; -import io.trino.testing.assertions.Assert; -import io.trino.testing.sql.TestTable; -import org.testng.SkipException; -import org.testng.annotations.Test; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.nio.ByteBuffer; import java.sql.Timestamp; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.time.format.DateTimeFormatter; -import java.util.List; -import java.util.Optional; -import static com.datastax.oss.driver.api.core.data.ByteUtils.toHexString; -import static com.google.common.io.BaseEncoding.base16; import static io.trino.plugin.cassandra.CassandraQueryRunner.createCassandraQueryRunner; -import static io.trino.plugin.cassandra.CassandraQueryRunner.createCassandraSession; -import static io.trino.plugin.cassandra.TestCassandraTable.clusterColumn; -import static io.trino.plugin.cassandra.TestCassandraTable.columnsValue; -import static io.trino.plugin.cassandra.TestCassandraTable.generalColumn; -import static io.trino.plugin.cassandra.TestCassandraTable.partitionColumn; -import static io.trino.spi.type.BigintType.BIGINT; -import static io.trino.spi.type.BooleanType.BOOLEAN; -import static io.trino.spi.type.DoubleType.DOUBLE; -import static io.trino.spi.type.IntegerType.INTEGER; -import static io.trino.spi.type.RealType.REAL; -import static io.trino.spi.type.TimestampWithTimeZoneType.TIMESTAMP_WITH_TIME_ZONE; -import static io.trino.spi.type.UuidType.UUID; -import static io.trino.spi.type.VarbinaryType.VARBINARY; -import static io.trino.spi.type.VarcharType.VARCHAR; -import static io.trino.spi.type.VarcharType.createUnboundedVarcharType; -import static io.trino.spi.type.VarcharType.createVarcharType; -import static io.trino.testing.MaterializedResult.DEFAULT_PRECISION; -import static io.trino.testing.MaterializedResult.resultBuilder; -import static io.trino.testing.QueryAssertions.assertContains; -import static io.trino.testing.QueryAssertions.assertContainsEventually; -import static java.lang.String.format; -import static java.util.Comparator.comparing; -import static java.util.concurrent.TimeUnit.MINUTES; -import static java.util.stream.Collectors.toList; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.testng.Assert.assertEquals; +import static io.trino.plugin.cassandra.CassandraTestingUtils.createTestTables; public class TestCassandraConnectorTest - extends BaseConnectorTest + extends BaseCassandraConnectorTest { - private static final String KEYSPACE = "smoke_test"; - private static final Session SESSION = createCassandraSession(KEYSPACE); - - private static final ZonedDateTime TIMESTAMP_VALUE = ZonedDateTime.of(1970, 1, 1, 3, 4, 5, 0, ZoneId.of("UTC")); - - private CassandraServer server; - private CassandraSession session; - - @Override - protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) - { - switch (connectorBehavior) { - case SUPPORTS_TRUNCATE: - return true; - - case SUPPORTS_CREATE_SCHEMA: - return false; - - case SUPPORTS_CREATE_VIEW: - return false; - - case SUPPORTS_CREATE_TABLE_WITH_TABLE_COMMENT: - case SUPPORTS_CREATE_TABLE_WITH_COLUMN_COMMENT: - return false; - - case SUPPORTS_RENAME_TABLE: - return false; - - case SUPPORTS_ARRAY: - case SUPPORTS_ROW_TYPE: - return false; - - case SUPPORTS_ADD_COLUMN: - case SUPPORTS_DROP_COLUMN: - case SUPPORTS_RENAME_COLUMN: - return false; - - case SUPPORTS_COMMENT_ON_TABLE: - case SUPPORTS_COMMENT_ON_COLUMN: - return false; - - case SUPPORTS_TOPN_PUSHDOWN: - return false; - - case SUPPORTS_NOT_NULL_CONSTRAINT: - return false; - - case SUPPORTS_DELETE: - return true; - - default: - return super.hasBehavior(connectorBehavior); - } - } - @Override protected QueryRunner createQueryRunner() throws Exception { - server = closeAfterClass(new CassandraServer()); + server = closeAfterClass(new TestingCassandraServer()); session = server.getSession(); - session.execute("CREATE KEYSPACE IF NOT EXISTS " + KEYSPACE + " WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); + createTestTables(session, KEYSPACE, Timestamp.from(TIMESTAMP_VALUE.toInstant())); return createCassandraQueryRunner(server, ImmutableMap.of(), ImmutableMap.of(), REQUIRED_TPCH_TABLES); } - - @Override - protected TestTable createTableWithDefaultColumns() - { - throw new SkipException("Cassandra connector does not support column default values"); - } - - @Override - protected Optional filterDataMappingSmokeTestData(DataMappingTestSetup dataMappingTestSetup) - { - String typeName = dataMappingTestSetup.getTrinoTypeName(); - if (typeName.equals("time") - || typeName.equals("timestamp") - || typeName.equals("decimal(5,3)") - || typeName.equals("decimal(15,3)") - || typeName.equals("char(3)")) { - // TODO this should either work or fail cleanly - return Optional.empty(); - } - return Optional.of(dataMappingTestSetup); - } - - @Override - protected Optional filterCaseSensitiveDataMappingTestData(DataMappingTestSetup dataMappingTestSetup) - { - String typeName = dataMappingTestSetup.getTrinoTypeName(); - if (typeName.equals("char(1)")) { - // TODO this should either work or fail cleanly - return Optional.empty(); - } - return Optional.of(dataMappingTestSetup); - } - - @Override - protected String dataMappingTableName(String trinoTypeName) - { - return "tmp_trino_" + System.nanoTime(); - } - - @Test - @Override - public void testShowColumns() - { - MaterializedResult actual = computeActual("SHOW COLUMNS FROM orders"); - - MaterializedResult expectedParametrizedVarchar = resultBuilder(getSession(), VARCHAR, VARCHAR, VARCHAR, VARCHAR) - .row("orderkey", "bigint", "", "") - .row("custkey", "bigint", "", "") - .row("orderstatus", "varchar", "", "") - .row("totalprice", "double", "", "") - .row("orderdate", "date", "", "") - .row("orderpriority", "varchar", "", "") - .row("clerk", "varchar", "", "") - .row("shippriority", "integer", "", "") - .row("comment", "varchar", "", "") - .build(); - - Assert.assertEquals(actual, expectedParametrizedVarchar); - } - - @Test - @Override - public void testDescribeTable() - { - MaterializedResult expectedColumns = resultBuilder(getSession(), VARCHAR, VARCHAR, VARCHAR, VARCHAR) - .row("orderkey", "bigint", "", "") - .row("custkey", "bigint", "", "") - .row("orderstatus", "varchar", "", "") - .row("totalprice", "double", "", "") - .row("orderdate", "date", "", "") - .row("orderpriority", "varchar", "", "") - .row("clerk", "varchar", "", "") - .row("shippriority", "integer", "", "") - .row("comment", "varchar", "", "") - .build(); - MaterializedResult actualColumns = computeActual("DESCRIBE orders"); - Assert.assertEquals(actualColumns, expectedColumns); - } - - @Test - @Override - public void testShowCreateTable() - { - assertThat(computeActual("SHOW CREATE TABLE orders").getOnlyValue()) - .isEqualTo("CREATE TABLE cassandra.tpch.orders (\n" + - " orderkey bigint,\n" + - " custkey bigint,\n" + - " orderstatus varchar,\n" + - " totalprice double,\n" + - " orderdate date,\n" + - " orderpriority varchar,\n" + - " clerk varchar,\n" + - " shippriority integer,\n" + - " comment varchar\n" + - ")"); - } - - @Override - public void testCharVarcharComparison() - { - assertThatThrownBy(super::testCharVarcharComparison) - .hasMessage("Unsupported type: char(3)"); - } - - @Test - public void testPushdownUuidPartitionKeyPredicate() - { - try (TestCassandraTable testCassandraTable = testTable( - "table_pushdown_uuid_partition_key", - ImmutableList.of(partitionColumn("col_uuid", "uuid"), generalColumn("col_text", "text")), - ImmutableList.of("00000000-0000-0000-0000-000000000001, 'Trino'"))) { - assertThat(query(format("SELECT col_text FROM %s WHERE col_uuid = UUID '00000000-0000-0000-0000-000000000001'", testCassandraTable.getTableName()))) - .matches("VALUES CAST('Trino' AS varchar)"); - } - } - - @Test - public void testPushdownAllTypesPartitionKeyPredicate() - { - // TODO partition key predicate pushdown for decimal types does not work https://github.com/trinodb/trino/issues/10927 - try (TestCassandraTable testCassandraTable = testTable( - "table_pushdown_all_types_partition_key", - ImmutableList.of( - partitionColumn("key", "text"), - partitionColumn("typeuuid", "uuid"), - partitionColumn("typetinyint", "tinyint"), - partitionColumn("typesmallint", "smallint"), - partitionColumn("typeinteger", "int"), - partitionColumn("typelong", "bigint"), - generalColumn("typebytes", "blob"), - partitionColumn("typedate", "date"), - partitionColumn("typetimestamp", "timestamp"), - partitionColumn("typeansi", "ascii"), - partitionColumn("typeboolean", "boolean"), - generalColumn("typedecimal", "decimal"), - partitionColumn("typedouble", "double"), - partitionColumn("typefloat", "float"), - partitionColumn("typeinet", "inet"), - partitionColumn("typevarchar", "varchar"), - generalColumn("typevarint", "varint"), - partitionColumn("typetimeuuid", "timeuuid"), - generalColumn("typelist", "frozen >"), - generalColumn("typemap", "frozen >"), - generalColumn("typeset", "frozen >")), - ImmutableList.of("" + - "'key 7', " + - "00000000-0000-0000-0000-000000000007, " + - "7, " + - "7, " + - "7, " + - "1007, " + - "0x00000007, " + - "'1970-01-01', " + - "'1970-01-01 03:04:05.000+0000', " + - "'ansi 7', " + - "false, " + - "128.0, " + - "16384.0, " + - "2097152.0, " + - "'127.0.0.1', " + - "'varchar 7', " + - "10000000, " + - "d2177dd0-eaa2-11de-a572-001b779c76e7, " + - "['list-value-17', 'list-value-27'], " + - "{7:8, 9:10}, " + - "{false, true}"))) { - String sql = "SELECT *" + - " FROM " + testCassandraTable.getTableName() + - " WHERE key = 'key 7'" + - " AND typeuuid = UUID '00000000-0000-0000-0000-000000000007'" + - " AND typetinyint = 7" + - " AND typesmallint = 7" + - " AND typeinteger = 7" + - " AND typelong = 1007" + - " AND typedate = DATE '1970-01-01'" + - " AND typetimestamp = TIMESTAMP '1970-01-01 03:04:05Z'" + - " AND typeansi = 'ansi 7'" + - " AND typeboolean = false" + - " AND typedouble = 16384.0" + - " AND typefloat = REAL '2097152.0'" + - " AND typeinet = '127.0.0.1'" + - " AND typevarchar = 'varchar 7'" + - " AND typetimeuuid = UUID 'd2177dd0-eaa2-11de-a572-001b779c76e7'" + - ""; - MaterializedResult result = execute(sql); - - assertEquals(result.getRowCount(), 1); - } - } - - @Test - public void testPartitionPushdownsWithNotMatchingPredicate() - { - try (TestCassandraTable testCassandraTable = testTable( - "partition_not_pushed_down_keys", - ImmutableList.of(partitionColumn("id", "varchar"), generalColumn("trino_filter_col", "int")), - ImmutableList.of("'2', 0"))) { - String sql = "SELECT 1 FROM " + testCassandraTable.getTableName() + " WHERE id = '1' AND trino_filter_col = 0"; - - assertThat(execute(sql).getMaterializedRows().size()).isEqualTo(0); - } - } - - @Test - public void testPartitionKeyPredicate() - { - try (TestCassandraTable testCassandraTable = testTable( - "table_all_types_partition_key", - ImmutableList.of( - partitionColumn("key", "text"), - partitionColumn("typeuuid", "uuid"), - partitionColumn("typetinyint", "tinyint"), - partitionColumn("typesmallint", "smallint"), - partitionColumn("typeinteger", "int"), - partitionColumn("typelong", "bigint"), - partitionColumn("typebytes", "blob"), - partitionColumn("typedate", "date"), - partitionColumn("typetimestamp", "timestamp"), - partitionColumn("typeansi", "ascii"), - partitionColumn("typeboolean", "boolean"), - partitionColumn("typedecimal", "decimal"), - partitionColumn("typedouble", "double"), - partitionColumn("typefloat", "float"), - partitionColumn("typeinet", "inet"), - partitionColumn("typevarchar", "varchar"), - partitionColumn("typevarint", "varint"), - partitionColumn("typetimeuuid", "timeuuid"), - partitionColumn("typelist", "frozen >"), - partitionColumn("typemap", "frozen >"), - partitionColumn("typeset", "frozen >")), - ImmutableList.of("" + - "'key 7', " + - "00000000-0000-0000-0000-000000000007, " + - "7, " + - "7, " + - "7, " + - "1007, " + - "0x00000007, " + - "'1970-01-01', " + - "'1970-01-01 03:04:05.000+0000', " + - "'ansi 7', " + - "false, " + - "128.0, " + - "16384.0, " + - "2097152.0, " + - "'127.0.0.1', " + - "'varchar 7', " + - "10000000, " + - "d2177dd0-eaa2-11de-a572-001b779c76e7, " + - "['list-value-17', 'list-value-27'], " + - "{7:8, 9:10}, " + - "{false, true}"))) { - String sql = "SELECT *" + - " FROM " + testCassandraTable.getTableName() + - " WHERE key = 'key 7'" + - " AND typeuuid = UUID '00000000-0000-0000-0000-000000000007'" + - " AND typetinyint = 7" + - " AND typesmallint = 7" + - " AND typeinteger = 7" + - " AND typelong = 1007" + - " AND typebytes = from_hex('" + base16().encode(Ints.toByteArray(7)) + "')" + - " AND typedate = DATE '1970-01-01'" + - " AND typetimestamp = TIMESTAMP '1970-01-01 03:04:05Z'" + - " AND typeansi = 'ansi 7'" + - " AND typeboolean = false" + - " AND typedecimal = 128.0" + - " AND typedouble = 16384.0" + - " AND typefloat = REAL '2097152.0'" + - " AND typeinet = '127.0.0.1'" + - " AND typevarchar = 'varchar 7'" + - " AND typevarint = '10000000'" + - " AND typetimeuuid = UUID 'd2177dd0-eaa2-11de-a572-001b779c76e7'" + - " AND typelist = '[\"list-value-17\",\"list-value-27\"]'" + - " AND typemap = '{7:8,9:10}'" + - " AND typeset = '[false,true]'" + - ""; - MaterializedResult result = execute(sql); - - assertEquals(result.getRowCount(), 1); - } - } - - @Test - public void testTimestampPartitionKey() - { - try (TestCassandraTable testCassandraTable = testTable( - "test_timestamp", - ImmutableList.of(partitionColumn("c1", "timestamp")), - ImmutableList.of("'2017-04-01T11:21:59.001+0000'"))) { - String sql = format( - "SELECT * " + - "FROM %s " + - "WHERE c1 = TIMESTAMP '2017-04-01 11:21:59.001 UTC'", testCassandraTable.getTableName()); - MaterializedResult result = execute(sql); - - assertEquals(result.getRowCount(), 1); - } - } - - @Test - public void testSelect() - { - try (TestCassandraTable testCassandraTable = testTable( - "table_all_types", - ImmutableList.of( - partitionColumn("key", "text"), - generalColumn("typeuuid", "uuid"), - generalColumn("typetinyint", "tinyint"), - generalColumn("typesmallint", "smallint"), - generalColumn("typeinteger", "int"), - generalColumn("typelong", "bigint"), - generalColumn("typebytes", "blob"), - generalColumn("typedate", "date"), - generalColumn("typetimestamp", "timestamp"), - generalColumn("typeansi", "ascii"), - generalColumn("typeboolean", "boolean"), - generalColumn("typedecimal", "decimal"), - generalColumn("typedouble", "double"), - generalColumn("typefloat", "float"), - generalColumn("typeinet", "inet"), - generalColumn("typevarchar", "varchar"), - generalColumn("typevarint", "varint"), - generalColumn("typetimeuuid", "timeuuid"), - generalColumn("typelist", "frozen >"), - generalColumn("typemap", "frozen >"), - generalColumn("typeset", "frozen >")), - columnsValue(9, ImmutableList.of( - rowNumber -> format("'key %d'", rowNumber), - rowNumber -> format("00000000-0000-0000-0000-%012d", rowNumber), - rowNumber -> String.valueOf(rowNumber), - rowNumber -> String.valueOf(rowNumber), - rowNumber -> String.valueOf(rowNumber), - rowNumber -> String.valueOf(rowNumber + 1000), - rowNumber -> toHexString(ByteBuffer.wrap(Ints.toByteArray(rowNumber)).asReadOnlyBuffer()), - rowNumber -> format("'%s'", DateTimeFormatter.ofPattern("uuuu-MM-dd").format(TIMESTAMP_VALUE)), - rowNumber -> format("'%s'", DateTimeFormatter.ofPattern("uuuu-MM-dd HH:mm:ss.SSSZ").format(TIMESTAMP_VALUE)), - rowNumber -> format("'ansi %d'", rowNumber), - rowNumber -> String.valueOf(rowNumber % 2 == 0), - rowNumber -> new BigDecimal(Math.pow(2, rowNumber)).toString(), - rowNumber -> String.valueOf(Math.pow(4, rowNumber)), - rowNumber -> String.valueOf((float) Math.pow(8, rowNumber)), - rowNumber -> format("'%s'", "127.0.0.1"), - rowNumber -> format("'varchar %d'", rowNumber), - rowNumber -> BigInteger.TEN.pow(rowNumber).toString(), - rowNumber -> format("d2177dd0-eaa2-11de-a572-001b779c76e%d", rowNumber), - rowNumber -> format("['list-value-1%d', 'list-value-2%d']", rowNumber, rowNumber), - rowNumber -> format("{%d:%d, %d:%d}", rowNumber, rowNumber + 1, rowNumber + 2, rowNumber + 3), - rowNumber -> format("{false, true}"))))) { - assertSelect(testCassandraTable.getTableName(), false); - } - - try (TestCassandraTable testCassandraTable = testTable( - "table_all_types_partition_key", - ImmutableList.of( - partitionColumn("key", "text"), - partitionColumn("typeuuid", "uuid"), - partitionColumn("typetinyint", "tinyint"), - partitionColumn("typesmallint", "smallint"), - partitionColumn("typeinteger", "int"), - partitionColumn("typelong", "bigint"), - partitionColumn("typebytes", "blob"), - partitionColumn("typedate", "date"), - partitionColumn("typetimestamp", "timestamp"), - partitionColumn("typeansi", "ascii"), - partitionColumn("typeboolean", "boolean"), - partitionColumn("typedecimal", "decimal"), - partitionColumn("typedouble", "double"), - partitionColumn("typefloat", "float"), - partitionColumn("typeinet", "inet"), - partitionColumn("typevarchar", "varchar"), - partitionColumn("typevarint", "varint"), - partitionColumn("typetimeuuid", "timeuuid"), - partitionColumn("typelist", "frozen >"), - partitionColumn("typemap", "frozen >"), - partitionColumn("typeset", "frozen >")), - columnsValue(9, ImmutableList.of( - rowNumber -> format("'key %d'", rowNumber), - rowNumber -> format("00000000-0000-0000-0000-%012d", rowNumber), - rowNumber -> String.valueOf(rowNumber), - rowNumber -> String.valueOf(rowNumber), - rowNumber -> String.valueOf(rowNumber), - rowNumber -> String.valueOf(rowNumber + 1000), - rowNumber -> toHexString(ByteBuffer.wrap(Ints.toByteArray(rowNumber))), - rowNumber -> format("'%s'", DateTimeFormatter.ofPattern("uuuu-MM-dd").format(TIMESTAMP_VALUE)), - rowNumber -> format("'%s'", DateTimeFormatter.ofPattern("uuuu-MM-dd HH:mm:ss.SSSZ").format(TIMESTAMP_VALUE)), - rowNumber -> format("'ansi %d'", rowNumber), - rowNumber -> String.valueOf(rowNumber % 2 == 0), - rowNumber -> new BigDecimal(Math.pow(2, rowNumber)).toString(), - rowNumber -> String.valueOf(Math.pow(4, rowNumber)), - rowNumber -> String.valueOf((float) Math.pow(8, rowNumber)), - rowNumber -> format("'%s'", "127.0.0.1"), - rowNumber -> format("'varchar %d'", rowNumber), - rowNumber -> BigInteger.TEN.pow(rowNumber).toString(), - rowNumber -> format("d2177dd0-eaa2-11de-a572-001b779c76e%d", rowNumber), - rowNumber -> format("['list-value-1%d', 'list-value-2%d']", rowNumber, rowNumber), - rowNumber -> format("{%d:%d, %d:%d}", rowNumber, rowNumber + 1, rowNumber + 2, rowNumber + 3), - rowNumber -> format("{false, true}"))))) { - assertSelect(testCassandraTable.getTableName(), false); - } - } - - @Test - public void testInsertToTableWithHiddenId() - { - execute("DROP TABLE IF EXISTS test_create_table"); - execute("CREATE TABLE test_create_table (col1 integer)"); - execute("INSERT INTO test_create_table VALUES (12345)"); - assertQuery("SELECT * FROM smoke_test.test_create_table", "VALUES (12345)"); - execute("DROP TABLE test_create_table"); - } - - @Test - public void testCreateTableAs() - { - try (TestCassandraTable testCassandraTable = testTable( - "table_all_types", - ImmutableList.of( - partitionColumn("key", "text"), - generalColumn("typeuuid", "uuid"), - generalColumn("typetinyint", "tinyint"), - generalColumn("typesmallint", "smallint"), - generalColumn("typeinteger", "int"), - generalColumn("typelong", "bigint"), - generalColumn("typebytes", "blob"), - generalColumn("typedate", "date"), - generalColumn("typetimestamp", "timestamp"), - generalColumn("typeansi", "ascii"), - generalColumn("typeboolean", "boolean"), - generalColumn("typedecimal", "decimal"), - generalColumn("typedouble", "double"), - generalColumn("typefloat", "float"), - generalColumn("typeinet", "inet"), - generalColumn("typevarchar", "varchar"), - generalColumn("typevarint", "varint"), - generalColumn("typetimeuuid", "timeuuid"), - generalColumn("typelist", "frozen >"), - generalColumn("typemap", "frozen >"), - generalColumn("typeset", "frozen >")), - columnsValue(9, ImmutableList.of( - rowNumber -> format("'key %d'", rowNumber), - rowNumber -> format("00000000-0000-0000-0000-%012d", rowNumber), - rowNumber -> String.valueOf(rowNumber), - rowNumber -> String.valueOf(rowNumber), - rowNumber -> String.valueOf(rowNumber), - rowNumber -> String.valueOf(rowNumber + 1000), - rowNumber -> toHexString(ByteBuffer.wrap(Ints.toByteArray(rowNumber))), - rowNumber -> format("'%s'", DateTimeFormatter.ofPattern("uuuu-MM-dd").format(TIMESTAMP_VALUE)), - rowNumber -> format("'%s'", DateTimeFormatter.ofPattern("uuuu-MM-dd HH:mm:ss.SSSZ").format(TIMESTAMP_VALUE)), - rowNumber -> format("'ansi %d'", rowNumber), - rowNumber -> String.valueOf(rowNumber % 2 == 0), - rowNumber -> new BigDecimal(Math.pow(2, rowNumber)).toString(), - rowNumber -> String.valueOf(Math.pow(4, rowNumber)), - rowNumber -> String.valueOf((float) Math.pow(8, rowNumber)), - rowNumber -> format("'%s'", "127.0.0.1"), - rowNumber -> format("'varchar %d'", rowNumber), - rowNumber -> BigInteger.TEN.pow(rowNumber).toString(), - rowNumber -> format("d2177dd0-eaa2-11de-a572-001b779c76e%d", rowNumber), - rowNumber -> format("['list-value-1%d', 'list-value-2%d']", rowNumber, rowNumber), - rowNumber -> format("{%d:%d, %d:%d}", rowNumber, rowNumber + 1, rowNumber + 2, rowNumber + 3), - rowNumber -> format("{false, true}"))))) { - execute("DROP TABLE IF EXISTS table_all_types_copy"); - execute("CREATE TABLE table_all_types_copy AS SELECT * FROM " + testCassandraTable.getTableName()); - assertSelect("table_all_types_copy", true); - execute("DROP TABLE table_all_types_copy"); - } - } - - @Test - public void testIdentifiers() - { - session.execute("DROP KEYSPACE IF EXISTS \"_keyspace\""); - session.execute("CREATE KEYSPACE \"_keyspace\" WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); - assertContainsEventually(() -> execute("SHOW SCHEMAS FROM cassandra"), resultBuilder(getSession(), createUnboundedVarcharType()) - .row("_keyspace") - .build(), new Duration(1, MINUTES)); - - execute("CREATE TABLE _keyspace._table AS SELECT 1 AS \"_col\", 2 AS \"2col\""); - assertQuery("SHOW TABLES FROM cassandra._keyspace", "VALUES ('_table')"); - assertQuery("SELECT * FROM cassandra._keyspace._table", "VALUES (1, 2)"); - assertUpdate("DROP TABLE cassandra._keyspace._table"); - - session.execute("DROP KEYSPACE \"_keyspace\""); - } - - @Test - public void testClusteringPredicates() - { - try (TestCassandraTable testCassandraTable = testTable( - "table_clustering_keys", - ImmutableList.of( - partitionColumn("key", "text"), - clusterColumn("clust_one", "text"), - clusterColumn("clust_two", "text"), - clusterColumn("clust_three", "text"), - generalColumn("data", "text")), - columnsValue(9, ImmutableList.of( - rowNumber -> format("'key_%d'", rowNumber), - rowNumber -> "'clust_one'", - rowNumber -> format("'clust_two_%d'", rowNumber), - rowNumber -> format("'clust_three_%d'", rowNumber), - rowNumber -> "null")))) { - String sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key IN ('key_1','key_2') AND clust_one='clust_one'"; - assertEquals(execute(sql).getRowCount(), 2); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one!='clust_one'"; - assertEquals(execute(sql).getRowCount(), 0); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key IN ('key_1','key_2','key_3','key_4') AND clust_one='clust_one' AND clust_two>'clust_two_1'"; - assertEquals(execute(sql).getRowCount(), 3); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key IN ('key_1','key_2') AND clust_one='clust_one' AND " + - "((clust_two='clust_two_1') OR (clust_two='clust_two_2'))"; - assertEquals(execute(sql).getRowCount(), 2); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key IN ('key_1','key_2') AND clust_one='clust_one' AND " + - "((clust_two='clust_two_1' AND clust_three='clust_three_1') OR (clust_two='clust_two_2' AND clust_three='clust_three_2'))"; - assertEquals(execute(sql).getRowCount(), 2); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key IN ('key_1','key_2') AND clust_one='clust_one' AND clust_three='clust_three_1'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key IN ('key_1','key_2') AND clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2')"; - assertEquals(execute(sql).getRowCount(), 2); - } - } - - @Test - public void testMultiplePartitionClusteringPredicates() - { - try (TestCassandraTable testCassandraTable = testTable( - "table_multi_partition_clustering_keys", - ImmutableList.of( - partitionColumn("partition_one", "text"), - partitionColumn("partition_two", "text"), - clusterColumn("clust_one", "text"), - clusterColumn("clust_two", "text"), - clusterColumn("clust_three", "text"), - generalColumn("data", "text")), - columnsValue(9, ImmutableList.of( - rowNumber -> format("'partition_one_%d'", rowNumber), - rowNumber -> format("'partition_two_%d'", rowNumber), - rowNumber -> "'clust_one'", - rowNumber -> format("'clust_two_%d'", rowNumber), - rowNumber -> format("'clust_three_%d'", rowNumber), - rowNumber -> "null")))) { - String partitionInPredicates = " partition_one IN ('partition_one_1','partition_one_2') AND partition_two IN ('partition_two_1','partition_two_2') "; - String sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE partition_one='partition_one_1' AND partition_two='partition_two_1' AND clust_one='clust_one'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE " + partitionInPredicates + " AND clust_one='clust_one'"; - assertEquals(execute(sql).getRowCount(), 2); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE partition_one='partition_one_1' AND partition_two='partition_two_1' AND clust_one!='clust_one'"; - assertEquals(execute(sql).getRowCount(), 0); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE " + - "partition_one IN ('partition_one_1','partition_one_2','partition_one_3','partition_one_4') AND " + - "partition_two IN ('partition_two_1','partition_two_2','partition_two_3','partition_two_4') AND " + - "clust_one='clust_one' AND clust_two>'clust_two_1'"; - assertEquals(execute(sql).getRowCount(), 3); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE " + partitionInPredicates + " AND clust_one='clust_one' AND " + - "((clust_two='clust_two_1') OR (clust_two='clust_two_2'))"; - assertEquals(execute(sql).getRowCount(), 2); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE " + partitionInPredicates + " AND clust_one='clust_one' AND " + - "((clust_two='clust_two_1' AND clust_three='clust_three_1') OR (clust_two='clust_two_2' AND clust_three='clust_three_2'))"; - assertEquals(execute(sql).getRowCount(), 2); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE " + partitionInPredicates + " AND clust_one='clust_one' AND clust_three='clust_three_1'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE " + partitionInPredicates + " AND clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2')"; - assertEquals(execute(sql).getRowCount(), 2); - } - } - - @Test - public void testClusteringKeyOnlyPushdown() - { - try (TestCassandraTable testCassandraTable = testTable( - "table_clustering_keys", - ImmutableList.of( - partitionColumn("key", "text"), - clusterColumn("clust_one", "text"), - clusterColumn("clust_two", "text"), - clusterColumn("clust_three", "text"), - generalColumn("data", "text")), - columnsValue(9, ImmutableList.of( - rowNumber -> format("'key_%d'", rowNumber), - rowNumber -> "'clust_one'", - rowNumber -> format("'clust_two_%d'", rowNumber), - rowNumber -> format("'clust_three_%d'", rowNumber), - rowNumber -> "null")))) { - String sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one'"; - assertEquals(execute(sql).getRowCount(), 9); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two='clust_two_2'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two='clust_two_2' AND clust_three='clust_three_2'"; - assertEquals(execute(sql).getRowCount(), 1); - } - - try (TestCassandraTable testCassandraTable = testTable( - "table_clustering_keys", - ImmutableList.of( - partitionColumn("key", "text"), - clusterColumn("clust_one", "text"), - clusterColumn("clust_two", "text"), - clusterColumn("clust_three", "text"), - generalColumn("data", "text")), - columnsValue(1000, ImmutableList.of( - rowNumber -> format("'key_%d'", rowNumber), - rowNumber -> "'clust_one'", - rowNumber -> format("'clust_two_%d'", rowNumber), - rowNumber -> format("'clust_three_%d'", rowNumber), - rowNumber -> "null")))) { - // below test cases are needed to verify clustering key pushdown with unpartitioned table - // for the smaller table (<200 partitions by default) connector fetches all the partitions id - // and the partitioned patch is being followed - String sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two='clust_two_2'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two='clust_two_2' AND clust_three='clust_three_2'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two='clust_two_2' AND clust_three IN ('clust_three_1', 'clust_three_2', 'clust_three_3')"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2') AND clust_three IN ('clust_three_1', 'clust_three_2', 'clust_three_3')"; - assertEquals(execute(sql).getRowCount(), 2); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two > 'clust_two_998'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two > 'clust_two_997' AND clust_two < 'clust_two_999'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2') AND clust_three > 'clust_three_998'"; - assertEquals(execute(sql).getRowCount(), 0); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2') AND clust_three < 'clust_three_3'"; - assertEquals(execute(sql).getRowCount(), 2); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2') AND clust_three > 'clust_three_1' AND clust_three < 'clust_three_3'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2','clust_two_3') AND clust_two < 'clust_two_2'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_997','clust_two_998','clust_two_999') AND clust_two > 'clust_two_998'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE clust_one='clust_one' AND clust_two IN ('clust_two_1','clust_two_2','clust_two_3') AND clust_two = 'clust_two_2'"; - assertEquals(execute(sql).getRowCount(), 1); - } - } - - @Test - public void testNotEqualPredicateOnClusteringColumn() - { - try (TestCassandraTable testCassandraTable = testTable( - "table_clustering_keys_inequality", - ImmutableList.of( - partitionColumn("key", "text"), - clusterColumn("clust_one", "text"), - clusterColumn("clust_two", "int"), - clusterColumn("clust_three", "timestamp"), - generalColumn("data", "text")), - columnsValue(4, ImmutableList.of( - rowNumber -> "'key_1'", - rowNumber -> "'clust_one'", - rowNumber -> format("%d", rowNumber), - rowNumber -> format("%d", Timestamp.from(TIMESTAMP_VALUE.toInstant()).getTime() + rowNumber * 10), - rowNumber -> "null")))) { - String sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one != 'clust_one'"; - assertEquals(execute(sql).getRowCount(), 0); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two != 2"; - assertEquals(execute(sql).getRowCount(), 3); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two >= 2 AND clust_two != 3"; - assertEquals(execute(sql).getRowCount(), 2); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two > 2 AND clust_two != 3"; - assertEquals(execute(sql).getRowCount(), 1); - } - } - - @Test - public void testClusteringKeyPushdownInequality() - { - try (TestCassandraTable testCassandraTable = testTable( - "table_clustering_keys_inequality", - ImmutableList.of( - partitionColumn("key", "text"), - clusterColumn("clust_one", "text"), - clusterColumn("clust_two", "int"), - clusterColumn("clust_three", "timestamp"), - generalColumn("data", "text")), - columnsValue(4, ImmutableList.of( - rowNumber -> "'key_1'", - rowNumber -> "'clust_one'", - rowNumber -> format("%d", rowNumber), - rowNumber -> format("%d", Timestamp.from(TIMESTAMP_VALUE.toInstant()).getTime() + rowNumber * 10), - rowNumber -> "null")))) { - String sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one'"; - assertEquals(execute(sql).getRowCount(), 4); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two=2"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two=2 AND clust_three = timestamp '1970-01-01 03:04:05.020Z'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two=2 AND clust_three = timestamp '1970-01-01 03:04:05.010Z'"; - assertEquals(execute(sql).getRowCount(), 0); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two IN (1,2)"; - assertEquals(execute(sql).getRowCount(), 2); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two > 1 AND clust_two < 3"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two=2 AND clust_three >= timestamp '1970-01-01 03:04:05.010Z' AND clust_three <= timestamp '1970-01-01 03:04:05.020Z'"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two IN (1,2) AND clust_three >= timestamp '1970-01-01 03:04:05.010Z' AND clust_three <= timestamp '1970-01-01 03:04:05.020Z'"; - assertEquals(execute(sql).getRowCount(), 2); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two IN (1,2,3) AND clust_two < 2"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two IN (1,2,3) AND clust_two > 2"; - assertEquals(execute(sql).getRowCount(), 1); - sql = "SELECT * FROM " + testCassandraTable.getTableName() + " WHERE key='key_1' AND clust_one='clust_one' AND clust_two IN (1,2,3) AND clust_two = 2"; - assertEquals(execute(sql).getRowCount(), 1); - } - } - - @Test - public void testUpperCaseNameUnescapedInCassandra() - { - /* - * If an identifier is not escaped with double quotes it is stored as lowercase in the Cassandra metadata - * - * http://docs.datastax.com/en/cql/3.1/cql/cql_reference/ucase-lcase_r.html - */ - session.execute("CREATE KEYSPACE KEYSPACE_1 WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); - assertContainsEventually(() -> execute("SHOW SCHEMAS FROM cassandra"), resultBuilder(getSession(), createUnboundedVarcharType()) - .row("keyspace_1") - .build(), new Duration(1, MINUTES)); - - session.execute("CREATE TABLE KEYSPACE_1.TABLE_1 (COLUMN_1 bigint PRIMARY KEY)"); - assertContainsEventually(() -> execute("SHOW TABLES FROM cassandra.keyspace_1"), resultBuilder(getSession(), createUnboundedVarcharType()) - .row("table_1") - .build(), new Duration(1, MINUTES)); - assertContains(execute("SHOW COLUMNS FROM cassandra.keyspace_1.table_1"), resultBuilder(getSession(), createUnboundedVarcharType(), createUnboundedVarcharType(), createUnboundedVarcharType(), createUnboundedVarcharType()) - .row("column_1", "bigint", "", "") - .build()); - - execute("INSERT INTO keyspace_1.table_1 (column_1) VALUES (1)"); - - assertEquals(execute("SELECT column_1 FROM cassandra.keyspace_1.table_1").getRowCount(), 1); - assertUpdate("DROP TABLE cassandra.keyspace_1.table_1"); - - // when an identifier is unquoted the lowercase and uppercase spelling may be used interchangeable - session.execute("DROP KEYSPACE keyspace_1"); - } - - @Test - public void testUppercaseNameEscaped() - { - /* - * If an identifier is escaped with double quotes it is stored verbatim - * - * http://docs.datastax.com/en/cql/3.1/cql/cql_reference/ucase-lcase_r.html - */ - session.execute("CREATE KEYSPACE \"KEYSPACE_2\" WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); - assertContainsEventually(() -> execute("SHOW SCHEMAS FROM cassandra"), resultBuilder(getSession(), createUnboundedVarcharType()) - .row("keyspace_2") - .build(), new Duration(1, MINUTES)); - - session.execute("CREATE TABLE \"KEYSPACE_2\".\"TABLE_2\" (\"COLUMN_2\" bigint PRIMARY KEY)"); - assertContainsEventually(() -> execute("SHOW TABLES FROM cassandra.keyspace_2"), resultBuilder(getSession(), createUnboundedVarcharType()) - .row("table_2") - .build(), new Duration(1, MINUTES)); - assertContains(execute("SHOW COLUMNS FROM cassandra.keyspace_2.table_2"), resultBuilder(getSession(), createUnboundedVarcharType(), createUnboundedVarcharType(), createUnboundedVarcharType(), createUnboundedVarcharType()) - .row("column_2", "bigint", "", "") - .build()); - - execute("INSERT INTO \"KEYSPACE_2\".\"TABLE_2\" (\"COLUMN_2\") VALUES (1)"); - - assertEquals(execute("SELECT column_2 FROM cassandra.keyspace_2.table_2").getRowCount(), 1); - assertUpdate("DROP TABLE cassandra.keyspace_2.table_2"); - - // when an identifier is unquoted the lowercase and uppercase spelling may be used interchangeable - session.execute("DROP KEYSPACE \"KEYSPACE_2\""); - } - - @Test - public void testKeyspaceNameAmbiguity() - { - // Identifiers enclosed in double quotes are stored in Cassandra verbatim. It is possible to create 2 keyspaces with names - // that have differences only in letters case. - session.execute("CREATE KEYSPACE \"KeYsPaCe_3\" WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); - session.execute("CREATE KEYSPACE \"kEySpAcE_3\" WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); - - // Although in Trino all the schema and table names are always displayed as lowercase - assertContainsEventually(() -> execute("SHOW SCHEMAS FROM cassandra"), resultBuilder(getSession(), createUnboundedVarcharType()) - .row("keyspace_3") - .row("keyspace_3") - .build(), new Duration(1, MINUTES)); - - // There is no way to figure out what the exactly keyspace we want to retrieve tables from - assertQueryFailsEventually( - "SHOW TABLES FROM cassandra.keyspace_3", - "More than one keyspace has been found for the case insensitive schema name: keyspace_3 -> \\(KeYsPaCe_3, kEySpAcE_3\\)", - new Duration(1, MINUTES)); - - session.execute("DROP KEYSPACE \"KeYsPaCe_3\""); - session.execute("DROP KEYSPACE \"kEySpAcE_3\""); - } - - @Test - public void testTableNameAmbiguity() - { - session.execute("CREATE KEYSPACE keyspace_4 WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); - assertContainsEventually(() -> execute("SHOW SCHEMAS FROM cassandra"), resultBuilder(getSession(), createUnboundedVarcharType()) - .row("keyspace_4") - .build(), new Duration(1, MINUTES)); - - // Identifiers enclosed in double quotes are stored in Cassandra verbatim. It is possible to create 2 tables with names - // that have differences only in letters case. - session.execute("CREATE TABLE keyspace_4.\"TaBlE_4\" (column_4 bigint PRIMARY KEY)"); - session.execute("CREATE TABLE keyspace_4.\"tAbLe_4\" (column_4 bigint PRIMARY KEY)"); - - // Although in Trino all the schema and table names are always displayed as lowercase - assertContainsEventually(() -> execute("SHOW TABLES FROM cassandra.keyspace_4"), resultBuilder(getSession(), createUnboundedVarcharType()) - .row("table_4") - .row("table_4") - .build(), new Duration(1, MINUTES)); - - // There is no way to figure out what the exactly table is being queried - assertQueryFailsEventually( - "SHOW COLUMNS FROM cassandra.keyspace_4.table_4", - "More than one table has been found for the case insensitive table name: table_4 -> \\(TaBlE_4, tAbLe_4\\)", - new Duration(1, MINUTES)); - assertQueryFailsEventually( - "SELECT * FROM cassandra.keyspace_4.table_4", - "More than one table has been found for the case insensitive table name: table_4 -> \\(TaBlE_4, tAbLe_4\\)", - new Duration(1, MINUTES)); - session.execute("DROP KEYSPACE keyspace_4"); - } - - @Test - public void testColumnNameAmbiguity() - { - session.execute("CREATE KEYSPACE keyspace_5 WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); - assertContainsEventually(() -> execute("SHOW SCHEMAS FROM cassandra"), resultBuilder(getSession(), createUnboundedVarcharType()) - .row("keyspace_5") - .build(), new Duration(1, MINUTES)); - - session.execute("CREATE TABLE keyspace_5.table_5 (\"CoLuMn_5\" bigint PRIMARY KEY, \"cOlUmN_5\" bigint)"); - assertContainsEventually(() -> execute("SHOW TABLES FROM cassandra.keyspace_5"), resultBuilder(getSession(), createUnboundedVarcharType()) - .row("table_5") - .build(), new Duration(1, MINUTES)); - - assertQueryFailsEventually( - "SHOW COLUMNS FROM cassandra.keyspace_5.table_5", - "More than one column has been found for the case insensitive column name: column_5 -> \\(CoLuMn_5, cOlUmN_5\\)", - new Duration(1, MINUTES)); - assertQueryFailsEventually( - "SELECT * FROM cassandra.keyspace_5.table_5", - "More than one column has been found for the case insensitive column name: column_5 -> \\(CoLuMn_5, cOlUmN_5\\)", - new Duration(1, MINUTES)); - - session.execute("DROP KEYSPACE keyspace_5"); - } - - @Test - public void testUnsupportedColumnType() - { - // TODO currently all standard types are supported to some extent. We should add a test with custom type if possible. - } - - @Test - public void testNullAndEmptyTimestamp() - { - try (TestCassandraTable testCassandraTable = testTable( - "test_empty_timestamp", - ImmutableList.of( - partitionColumn("id", "int"), - generalColumn("timestamp_column_with_null", "timestamp"), - generalColumn("timestamp_column_with_empty", "timestamp")), - ImmutableList.of("1, NULL, ''"))) { - String tableName = testCassandraTable.getTableName(); - - assertThat(query(format("SELECT timestamp_column_with_null FROM %s", tableName))) - .matches("VALUES CAST(NULL AS timestamp(3) with time zone)"); - assertThat(query(format("SELECT timestamp_column_with_empty FROM %s", tableName))) - .matches("VALUES CAST(NULL AS timestamp(3) with time zone)"); - - assertThat(query(format("SELECT id FROM %s WHERE timestamp_column_with_null IS NULL", tableName))) - .matches("VALUES 1"); - assertThat(query(format("SELECT id FROM %s WHERE timestamp_column_with_empty IS NULL", tableName))) - .matches("VALUES 1"); - } - } - - @Test - public void testEmptyTimestampClusteringKey() - { - try (TestCassandraTable testCassandraTable = testTable( - "test_empty_timestamp", - ImmutableList.of( - partitionColumn("id", "int"), - partitionColumn("timestamp_column_with_empty", "timestamp")), - ImmutableList.of("1, ''"))) { - String tableName = testCassandraTable.getTableName(); - - assertThat(query(format("SELECT timestamp_column_with_empty FROM %s", tableName))) - .matches("VALUES CAST(NULL AS timestamp(3) with time zone)"); - - assertThat(query(format("SELECT id FROM %s WHERE timestamp_column_with_empty IS NULL", tableName))) - .matches("VALUES 1"); - } - } - - @Test - public void testNestedCollectionType() - { - session.execute("CREATE KEYSPACE keyspace_test_nested_collection WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor': 1}"); - assertContainsEventually(() -> execute("SHOW SCHEMAS FROM cassandra"), resultBuilder(getSession(), createUnboundedVarcharType()) - .row("keyspace_test_nested_collection") - .build(), new Duration(1, MINUTES)); - - session.execute("CREATE TABLE keyspace_test_nested_collection.table_set (column_5 bigint PRIMARY KEY, nested_collection frozen>>)"); - session.execute("CREATE TABLE keyspace_test_nested_collection.table_list (column_5 bigint PRIMARY KEY, nested_collection frozen>>)"); - session.execute("CREATE TABLE keyspace_test_nested_collection.table_map (column_5 bigint PRIMARY KEY, nested_collection frozen>>)"); - - assertContainsEventually(() -> execute("SHOW TABLES FROM cassandra.keyspace_test_nested_collection"), resultBuilder(getSession(), createUnboundedVarcharType()) - .row("table_set") - .row("table_list") - .row("table_map") - .build(), new Duration(1, MINUTES)); - - session.execute("INSERT INTO keyspace_test_nested_collection.table_set (column_5, nested_collection) VALUES (1, {{1, 2, 3}})"); - assertEquals(execute("SELECT nested_collection FROM cassandra.keyspace_test_nested_collection.table_set").getMaterializedRows().get(0), - new MaterializedRow(DEFAULT_PRECISION, "[[1,2,3]]")); - - session.execute("INSERT INTO keyspace_test_nested_collection.table_list (column_5, nested_collection) VALUES (1, [[4, 5, 6]])"); - assertEquals(execute("SELECT nested_collection FROM cassandra.keyspace_test_nested_collection.table_list").getMaterializedRows().get(0), - new MaterializedRow(DEFAULT_PRECISION, "[[4,5,6]]")); - - session.execute("INSERT INTO keyspace_test_nested_collection.table_map (column_5, nested_collection) VALUES (1, {7:{8:9}})"); - assertEquals(execute("SELECT nested_collection FROM cassandra.keyspace_test_nested_collection.table_map").getMaterializedRows().get(0), - new MaterializedRow(DEFAULT_PRECISION, "{7:{8:9}}")); - - session.execute("DROP KEYSPACE keyspace_test_nested_collection"); - } - - @Test - public void testAllTypesInsert() - { - try (TestCassandraTable testCassandraTable = testTable( - "table_all_types_insert", - ImmutableList.of( - partitionColumn("key", "text"), - generalColumn("typeuuid", "uuid"), - generalColumn("typetinyint", "tinyint"), - generalColumn("typesmallint", "smallint"), - generalColumn("typeinteger", "int"), - generalColumn("typelong", "bigint"), - generalColumn("typebytes", "blob"), - generalColumn("typedate", "date"), - generalColumn("typetimestamp", "timestamp"), - generalColumn("typeansi", "ascii"), - generalColumn("typeboolean", "boolean"), - generalColumn("typedecimal", "decimal"), - generalColumn("typedouble", "double"), - generalColumn("typefloat", "float"), - generalColumn("typeinet", "inet"), - generalColumn("typevarchar", "varchar"), - generalColumn("typevarint", "varint"), - generalColumn("typetimeuuid", "timeuuid"), - generalColumn("typelist", "frozen >"), - generalColumn("typemap", "frozen >"), - generalColumn("typeset", "frozen >")), - ImmutableList.of())) { - String sql = "SELECT key, typeuuid, typeinteger, typelong, typebytes, typetimestamp, typeansi, typeboolean, typedecimal, " + - "typedouble, typefloat, typeinet, typevarchar, typevarint, typetimeuuid, typelist, typemap, typeset" + - " FROM " + testCassandraTable.getTableName(); - assertEquals(execute(sql).getRowCount(), 0); - - // TODO Following types are not supported now. We need to change null into the value after fixing it - // blob, frozen>, inet, list, map, set, decimal, varint - // timestamp can be inserted but the expected and actual values are not same - execute("INSERT INTO " + testCassandraTable.getTableName() + " (" + - "key," + - "typeuuid," + - "typeinteger," + - "typelong," + - "typebytes," + - "typetimestamp," + - "typeansi," + - "typeboolean," + - "typedecimal," + - "typedouble," + - "typefloat," + - "typeinet," + - "typevarchar," + - "typevarint," + - "typetimeuuid," + - "typelist," + - "typemap," + - "typeset" + - ") VALUES (" + - "'key1', " + - "UUID '12151fd2-7586-11e9-8f9e-2a86e4085a59', " + - "1, " + - "1000, " + - "null, " + - "timestamp '1970-01-01 08:34:05.0Z', " + - "'ansi1', " + - "true, " + - "null, " + - "0.3, " + - "cast('0.4' as real), " + - "null, " + - "'varchar1', " + - "null, " + - "UUID '50554d6e-29bb-11e5-b345-feff819cdc9f', " + - "null, " + - "null, " + - "null " + - ")"); - - MaterializedResult result = execute(sql); - int rowCount = result.getRowCount(); - assertEquals(rowCount, 1); - assertEquals(result.getMaterializedRows().get(0), new MaterializedRow(DEFAULT_PRECISION, - "key1", - java.util.UUID.fromString("12151fd2-7586-11e9-8f9e-2a86e4085a59"), - 1, - 1000L, - null, - ZonedDateTime.of(1970, 1, 1, 8, 34, 5, 0, ZoneId.of("UTC")), - "ansi1", - true, - null, - 0.3, - (float) 0.4, - null, - "varchar1", - null, - java.util.UUID.fromString("50554d6e-29bb-11e5-b345-feff819cdc9f"), - null, - null, - null)); - - // insert null for all datatypes - execute("INSERT INTO " + testCassandraTable.getTableName() + " (" + - "key, typeuuid, typeinteger, typelong, typebytes, typetimestamp, typeansi, typeboolean, typedecimal," + - "typedouble, typefloat, typeinet, typevarchar, typevarint, typetimeuuid, typelist, typemap, typeset" + - ") VALUES (" + - "'key2', null, null, null, null, null, null, null, null," + - "null, null, null, null, null, null, null, null, null)"); - sql = "SELECT key, typeuuid, typeinteger, typelong, typebytes, typetimestamp, typeansi, typeboolean, typedecimal, " + - "typedouble, typefloat, typeinet, typevarchar, typevarint, typetimeuuid, typelist, typemap, typeset" + - " FROM " + testCassandraTable.getTableName() + " WHERE key = 'key2'"; - result = execute(sql); - rowCount = result.getRowCount(); - assertEquals(rowCount, 1); - assertEquals(result.getMaterializedRows().get(0), new MaterializedRow(DEFAULT_PRECISION, - "key2", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)); - - // insert into only a subset of columns - execute("INSERT INTO " + testCassandraTable.getTableName() + " (" + - "key, typeinteger, typeansi, typeboolean) VALUES (" + - "'key3', 999, 'ansi', false)"); - sql = "SELECT key, typeuuid, typeinteger, typelong, typebytes, typetimestamp, typeansi, typeboolean, typedecimal, " + - "typedouble, typefloat, typeinet, typevarchar, typevarint, typetimeuuid, typelist, typemap, typeset" + - " FROM " + testCassandraTable.getTableName() + " WHERE key = 'key3'"; - result = execute(sql); - rowCount = result.getRowCount(); - assertEquals(rowCount, 1); - assertEquals(result.getMaterializedRows().get(0), new MaterializedRow(DEFAULT_PRECISION, - "key3", null, 999, null, null, null, "ansi", false, null, null, null, null, null, null, null, null, null, null)); - } - } - - @Test - @Override - public void testDelete() - { - try (TestCassandraTable testCassandraTable = testTable( - "table_delete_data", - ImmutableList.of( - partitionColumn("partition_one", "bigint"), - partitionColumn("partition_two", "int"), - clusterColumn("clust_one", "text"), - generalColumn("data", "text")), - ImmutableList.of( - "1, 1, 'clust_one_1', null", - "2, 2, 'clust_one_2', null", - "3, 3, 'clust_one_3', null", - "4, 4, 'clust_one_4', null", - "5, 5, 'clust_one_5', null", - "6, 6, 'clust_one_6', null", - "7, 7, 'clust_one_7', null", - "8, 8, 'clust_one_8', null", - "9, 9, 'clust_one_9', null", - "1, 1, 'clust_one_2', null", - "1, 1, 'clust_one_3', null", - "1, 2, 'clust_one_1', null", - "1, 2, 'clust_one_2', null", - "1, 2, 'clust_one_3', null", - "2, 2, 'clust_one_1', null"))) { - String keyspaceAndTable = testCassandraTable.getTableName(); - assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 15); - - // error - assertThatThrownBy(() -> execute("DELETE FROM " + keyspaceAndTable)) - .isInstanceOf(RuntimeException.class); - assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 15); - - String whereClusteringKeyOnly = " WHERE clust_one='clust_one_2'"; - assertThatThrownBy(() -> execute("DELETE FROM " + keyspaceAndTable + whereClusteringKeyOnly)) - .isInstanceOf(RuntimeException.class); - assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 15); - - String whereMultiplePartitionKeyWithClusteringKey = " WHERE " + - " (partition_one=1 AND partition_two=1 AND clust_one='clust_one_1') OR " + - " (partition_one=1 AND partition_two=2 AND clust_one='clust_one_2') "; - assertThatThrownBy(() -> execute("DELETE FROM " + keyspaceAndTable + whereMultiplePartitionKeyWithClusteringKey)) - .isInstanceOf(RuntimeException.class); - assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 15); - - // success - String wherePrimaryKey = " WHERE partition_one=3 AND partition_two=3 AND clust_one='clust_one_3'"; - execute("DELETE FROM " + keyspaceAndTable + wherePrimaryKey); - assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 14); - assertEquals(execute("SELECT * FROM " + keyspaceAndTable + wherePrimaryKey).getRowCount(), 0); - - String wherePartitionKey = " WHERE partition_one=2 AND partition_two=2"; - execute("DELETE FROM " + keyspaceAndTable + wherePartitionKey); - assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 12); - assertEquals(execute("SELECT * FROM " + keyspaceAndTable + wherePartitionKey).getRowCount(), 0); - - String whereMultiplePartitionKey = " WHERE (partition_one=1 AND partition_two=1) OR (partition_one=1 AND partition_two=2)"; - execute("DELETE FROM " + keyspaceAndTable + whereMultiplePartitionKey); - assertEquals(execute("SELECT * FROM " + keyspaceAndTable).getRowCount(), 6); - assertEquals(execute("SELECT * FROM " + keyspaceAndTable + whereMultiplePartitionKey).getRowCount(), 0); - } - } - - @Override - public void testDeleteWithLike() - { - assertThatThrownBy(super::testDeleteWithLike) - .hasStackTraceContaining("Delete without primary key or partition key is not supported"); - } - - @Override - public void testDeleteWithComplexPredicate() - { - assertThatThrownBy(super::testDeleteWithComplexPredicate) - .hasStackTraceContaining("Delete without primary key or partition key is not supported"); - } - - @Override - public void testDeleteWithSemiJoin() - { - assertThatThrownBy(super::testDeleteWithSemiJoin) - .hasStackTraceContaining("Delete without primary key or partition key is not supported"); - } - - @Override - public void testDeleteWithSubquery() - { - assertThatThrownBy(super::testDeleteWithSubquery) - .hasStackTraceContaining("Delete without primary key or partition key is not supported"); - } - - @Override - public void testExplainAnalyzeWithDeleteWithSubquery() - { - assertThatThrownBy(super::testExplainAnalyzeWithDeleteWithSubquery) - .hasStackTraceContaining("Delete without primary key or partition key is not supported"); - } - - @Override - public void testDeleteWithVarcharPredicate() - { - assertThatThrownBy(super::testDeleteWithVarcharPredicate) - .hasStackTraceContaining("Delete without primary key or partition key is not supported"); - } - - @Override - public void testDeleteAllDataFromTable() - { - assertThatThrownBy(super::testDeleteAllDataFromTable) - .hasStackTraceContaining("Deleting without partition key is not supported"); - } - - @Override - public void testRowLevelDelete() - { - assertThatThrownBy(super::testRowLevelDelete) - .hasStackTraceContaining("Delete without primary key or partition key is not supported"); - } - - private void assertSelect(String tableName, boolean createdByTrino) - { - Type inetType = createdByTrino ? createUnboundedVarcharType() : createVarcharType(45); - - String sql = "SELECT " + - " key, " + - " typeuuid, " + - " typeinteger, " + - " typelong, " + - " typebytes, " + - " typetimestamp, " + - " typeansi, " + - " typeboolean, " + - " typedecimal, " + - " typedouble, " + - " typefloat, " + - " typeinet, " + - " typevarchar, " + - " typevarint, " + - " typetimeuuid, " + - " typelist, " + - " typemap, " + - " typeset " + - " FROM " + tableName; - - MaterializedResult result = execute(sql); - - int rowCount = result.getRowCount(); - assertEquals(rowCount, 9); - assertEquals(result.getTypes(), ImmutableList.of( - createUnboundedVarcharType(), - UUID, - INTEGER, - BIGINT, - VARBINARY, - TIMESTAMP_WITH_TIME_ZONE, - createUnboundedVarcharType(), - BOOLEAN, - DOUBLE, - DOUBLE, - REAL, - inetType, - createUnboundedVarcharType(), - createUnboundedVarcharType(), - UUID, - createUnboundedVarcharType(), - createUnboundedVarcharType(), - createUnboundedVarcharType())); - - List sortedRows = result.getMaterializedRows().stream() - .sorted(comparing(o -> o.getField(1).toString())) - .collect(toList()); - - for (int rowNumber = 1; rowNumber <= rowCount; rowNumber++) { - assertEquals(sortedRows.get(rowNumber - 1), new MaterializedRow(DEFAULT_PRECISION, - "key " + rowNumber, - java.util.UUID.fromString(format("00000000-0000-0000-0000-%012d", rowNumber)), - rowNumber, - rowNumber + 1000L, - Bytes.fromBytes(Ints.toByteArray(rowNumber)), - TIMESTAMP_VALUE, - "ansi " + rowNumber, - rowNumber % 2 == 0, - Math.pow(2, rowNumber), - Math.pow(4, rowNumber), - (float) Math.pow(8, rowNumber), - "127.0.0.1", - "varchar " + rowNumber, - BigInteger.TEN.pow(rowNumber).toString(), - java.util.UUID.fromString(format("d2177dd0-eaa2-11de-a572-001b779c76e%d", rowNumber)), - format("[\"list-value-1%1$d\",\"list-value-2%1$d\"]", rowNumber), - format("{%d:%d,%d:%d}", rowNumber, rowNumber + 1L, rowNumber + 2, rowNumber + 3L), - "[false,true]")); - } - } - - private MaterializedResult execute(String sql) - { - return getQueryRunner().execute(SESSION, sql); - } - - private TestCassandraTable testTable(String namePrefix, List columnDefinitions, List rowsToInsert) - { - return new TestCassandraTable(session::execute, server, KEYSPACE, namePrefix, columnDefinitions, rowsToInsert); - } } diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraLatestConnectorSmokeTest.java b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraLatestConnectorSmokeTest.java index 2109ce2cc262..6166f96b0a79 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraLatestConnectorSmokeTest.java +++ b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraLatestConnectorSmokeTest.java @@ -28,7 +28,7 @@ public class TestCassandraLatestConnectorSmokeTest protected QueryRunner createQueryRunner() throws Exception { - CassandraServer server = closeAfterClass(new CassandraServer("3.11.10")); + TestingCassandraServer server = closeAfterClass(new TestingCassandraServer("3.11.10")); CassandraSession session = server.getSession(); createTestTables(session, KEYSPACE, Timestamp.from(TIMESTAMP_VALUE.toInstant())); return createCassandraQueryRunner(server, ImmutableMap.of(), ImmutableMap.of(), REQUIRED_TPCH_TABLES); diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraProtocolVersionV3ConnectorSmokeTest.java b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraProtocolVersionV3ConnectorSmokeTest.java index 7dd1953b3ab7..a6ce5b2e62fb 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraProtocolVersionV3ConnectorSmokeTest.java +++ b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraProtocolVersionV3ConnectorSmokeTest.java @@ -31,7 +31,7 @@ public class TestCassandraProtocolVersionV3ConnectorSmokeTest protected QueryRunner createQueryRunner() throws Exception { - CassandraServer server = closeAfterClass(new CassandraServer()); + CassandraServer server = closeAfterClass(new TestingCassandraServer()); CassandraSession session = server.getSession(); createTestTables(session, KEYSPACE, Timestamp.from(TIMESTAMP_VALUE.toInstant())); return createCassandraQueryRunner(server, ImmutableMap.of(), ImmutableMap.of("cassandra.protocol-version", "V3"), REQUIRED_TPCH_TABLES); diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraTokenSplitManager.java b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraTokenSplitManager.java index 598b8d00ff89..145da36ab364 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraTokenSplitManager.java +++ b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraTokenSplitManager.java @@ -32,7 +32,7 @@ public class TestCassandraTokenSplitManager private static final String KEYSPACE = "test_cassandra_token_split_manager_keyspace"; private static final int PARTITION_COUNT = 1000; - private CassandraServer server; + private TestingCassandraServer server; private CassandraSession session; private CassandraTokenSplitManager splitManager; @@ -40,7 +40,7 @@ public class TestCassandraTokenSplitManager public void setUp() throws Exception { - server = new CassandraServer(); + server = new TestingCassandraServer(); session = server.getSession(); createKeyspace(session, KEYSPACE); splitManager = new CassandraTokenSplitManager(session, SPLIT_SIZE, Optional.empty()); diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraTypeMapping.java b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraTypeMapping.java index 480233200942..413f1d123d24 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraTypeMapping.java +++ b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraTypeMapping.java @@ -135,7 +135,7 @@ private static void checkIsDoubled(ZoneId zone, LocalDateTime dateTime) protected QueryRunner createQueryRunner() throws Exception { - server = closeAfterClass(new CassandraServer()); + server = closeAfterClass(new TestingCassandraServer()); session = server.getSession(); return createCassandraQueryRunner( server, diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestingCassandraServer.java b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestingCassandraServer.java new file mode 100644 index 000000000000..a59cde022eca --- /dev/null +++ b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestingCassandraServer.java @@ -0,0 +1,195 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.cassandra; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.CqlSessionBuilder; +import com.datastax.oss.driver.api.core.ProtocolVersion; +import com.datastax.oss.driver.api.core.config.DriverConfigLoader; +import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.google.common.io.Resources; +import io.airlift.json.JsonCodec; +import io.airlift.log.Logger; +import io.airlift.units.Duration; +import org.testcontainers.containers.GenericContainer; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.file.Path; +import java.util.List; +import java.util.concurrent.TimeoutException; + +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.CONTROL_CONNECTION_AGREEMENT_TIMEOUT; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.PROTOCOL_VERSION; +import static com.datastax.oss.driver.api.core.config.DefaultDriverOption.REQUEST_TIMEOUT; +import static com.google.common.io.Files.write; +import static com.google.common.io.Resources.getResource; +import static java.lang.String.format; +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.nio.file.Files.createDirectory; +import static java.nio.file.Files.createTempDirectory; +import static java.util.Objects.requireNonNull; +import static java.util.concurrent.TimeUnit.MINUTES; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.testcontainers.utility.MountableFile.forHostPath; +import static org.testng.Assert.assertEquals; + +public class TestingCassandraServer + implements CassandraServer +{ + private static Logger log = Logger.get(TestingCassandraServer.class); + + private static final int PORT = 9142; + + private static final Duration REFRESH_SIZE_ESTIMATES_TIMEOUT = new Duration(1, MINUTES); + + private final GenericContainer dockerContainer; + private final CassandraSession session; + + public TestingCassandraServer() + throws Exception + { + this("2.2"); + } + + public TestingCassandraServer(String cassandraVersion) + throws Exception + { + log.info("Starting cassandra..."); + + this.dockerContainer = new GenericContainer<>("cassandra:" + cassandraVersion) + .withExposedPorts(PORT) + .withCopyFileToContainer(forHostPath(prepareCassandraYaml()), "/etc/cassandra/cassandra.yaml"); + this.dockerContainer.start(); + + ProgrammaticDriverConfigLoaderBuilder driverConfigLoaderBuilder = DriverConfigLoader.programmaticBuilder(); + driverConfigLoaderBuilder.withDuration(REQUEST_TIMEOUT, java.time.Duration.ofSeconds(12)); + driverConfigLoaderBuilder.withString(PROTOCOL_VERSION, ProtocolVersion.V3.name()); + driverConfigLoaderBuilder.withDuration(CONTROL_CONNECTION_AGREEMENT_TIMEOUT, java.time.Duration.ofSeconds(30)); + // allow the retrieval of metadata for the system keyspaces + driverConfigLoaderBuilder.withStringList(METADATA_SCHEMA_REFRESHED_KEYSPACES, List.of()); + + CqlSessionBuilder cqlSessionBuilder = CqlSession.builder() + .withApplicationName("TestCluster") + .addContactPoint(new InetSocketAddress(this.dockerContainer.getContainerIpAddress(), this.dockerContainer.getMappedPort(PORT))) + .withLocalDatacenter("datacenter1") + .withConfigLoader(driverConfigLoaderBuilder.build()); + + CassandraSession session = new CassandraSession( + JsonCodec.listJsonCodec(ExtraColumnMetadata.class), + cqlSessionBuilder::build, + new Duration(1, MINUTES)); + + try { + checkConnectivity(session); + } + catch (RuntimeException e) { + session.close(); + this.dockerContainer.stop(); + throw e; + } + + this.session = session; + } + + private static String prepareCassandraYaml() + throws IOException + { + String original = Resources.toString(getResource("cu-cassandra.yaml"), UTF_8); + + Path tmpDirPath = createTempDirectory(null); + Path dataDir = tmpDirPath.resolve("data"); + createDirectory(dataDir); + + String modified = original.replaceAll("\\$\\{data_directory\\}", dataDir.toAbsolutePath().toString()); + + File yamlFile = tmpDirPath.resolve("cu-cassandra.yaml").toFile(); + yamlFile.deleteOnExit(); + write(modified, yamlFile, UTF_8); + + return yamlFile.getAbsolutePath(); + } + + @Override + public CassandraSession getSession() + { + return requireNonNull(session, "session is null"); + } + + @Override + public String getHost() + { + return dockerContainer.getContainerIpAddress(); + } + + @Override + public int getPort() + { + return dockerContainer.getMappedPort(PORT); + } + + private static void checkConnectivity(CassandraSession session) + { + ResultSet result = session.execute("SELECT release_version FROM system.local"); + List rows = result.all(); + assertEquals(rows.size(), 1); + String version = rows.get(0).getString(0); + log.info("Cassandra version: %s", version); + } + + @Override + public void refreshSizeEstimates(String keyspace, String table) + throws Exception + { + long deadline = System.nanoTime() + REFRESH_SIZE_ESTIMATES_TIMEOUT.roundTo(NANOSECONDS); + while (System.nanoTime() - deadline < 0) { + flushTable(keyspace, table); + refreshSizeEstimates(); + List sizeEstimates = getSession().getSizeEstimates(keyspace, table); + if (!sizeEstimates.isEmpty()) { + log.info("Size estimates for the table %s.%s have been refreshed successfully: %s", keyspace, table, sizeEstimates); + return; + } + log.info("Size estimates haven't been refreshed as expected. Retrying ..."); + SECONDS.sleep(1); + } + throw new TimeoutException(format("Attempting to refresh size estimates for table %s.%s has timed out after %s", keyspace, table, REFRESH_SIZE_ESTIMATES_TIMEOUT)); + } + + private void flushTable(String keyspace, String table) + throws Exception + { + dockerContainer.execInContainer("nodetool", "flush", keyspace, table); + } + + private void refreshSizeEstimates() + throws Exception + { + dockerContainer.execInContainer("nodetool", "refreshsizeestimates"); + } + + @Override + public void close() + { + if (session != null) { + session.close(); + } + dockerContainer.close(); + } +} diff --git a/plugin/trino-scylla/pom.xml b/plugin/trino-scylla/pom.xml new file mode 100644 index 000000000000..c617c7288825 --- /dev/null +++ b/plugin/trino-scylla/pom.xml @@ -0,0 +1,203 @@ + + + 4.0.0 + + io.trino + trino-root + 383-SNAPSHOT + ../../pom.xml + + + trino-scylla + Trino - Scylla Connector + trino-plugin + + + ${project.parent.basedir} + + + + + + org.ow2.asm + asm-analysis + 9.2 + + + + org.ow2.asm + asm-tree + 9.2 + + + + org.ow2.asm + asm-util + 9.2 + + + + + + + io.trino + trino-cassandra + ${project.version} + + + com.datastax.oss + java-driver-core + + + + + + + com.google.guava + guava + + + + io.airlift + json + runtime + + + + io.airlift + log + runtime + + + + io.airlift + log-manager + runtime + + + + io.airlift + units + runtime + + + + com.scylladb + java-driver-core + 4.13.0.0 + runtime + + + + + io.trino + trino-spi + provided + + + + io.airlift + slice + provided + + + + com.fasterxml.jackson.core + jackson-annotations + provided + + + + org.openjdk.jol + jol-core + provided + + + + + io.trino + trino-cassandra + test-jar + ${project.version} + test + + + + io.trino + trino-main + test-jar + test + + + + io.trino + trino-main + test + + + + io.trino + trino-testing + test + + + + io.trino + trino-tpch + test + + + + io.trino.tpch + tpch + test + + + + io.airlift + testing + test + + + + org.apache.thrift + libthrift + test + + + + org.assertj + assertj-core + test + + + + org.testcontainers + testcontainers + test + + + + org.testng + testng + test + + + + + + + org.basepom.maven + duplicate-finder-maven-plugin + + + + com.datastax.oss + java-driver-core + + + + + + + diff --git a/plugin/trino-scylla/src/main/java/io/trino/plugin/scylla/ScyllaConnectorFactory.java b/plugin/trino-scylla/src/main/java/io/trino/plugin/scylla/ScyllaConnectorFactory.java new file mode 100644 index 000000000000..9260ca48e65b --- /dev/null +++ b/plugin/trino-scylla/src/main/java/io/trino/plugin/scylla/ScyllaConnectorFactory.java @@ -0,0 +1,26 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.scylla; + +import io.trino.plugin.cassandra.CassandraConnectorFactory; + +public class ScyllaConnectorFactory + extends CassandraConnectorFactory +{ + @Override + public String getName() + { + return "scylla"; + } +} diff --git a/plugin/trino-scylla/src/main/java/io/trino/plugin/scylla/ScyllaPlugin.java b/plugin/trino-scylla/src/main/java/io/trino/plugin/scylla/ScyllaPlugin.java new file mode 100644 index 000000000000..6c9b549123ca --- /dev/null +++ b/plugin/trino-scylla/src/main/java/io/trino/plugin/scylla/ScyllaPlugin.java @@ -0,0 +1,28 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.scylla; + +import com.google.common.collect.ImmutableList; +import io.trino.spi.Plugin; +import io.trino.spi.connector.ConnectorFactory; + +public class ScyllaPlugin + implements Plugin +{ + @Override + public Iterable getConnectorFactories() + { + return ImmutableList.of(new ScyllaConnectorFactory()); + } +} diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/ScyllaQueryRunner.java b/plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/ScyllaQueryRunner.java similarity index 92% rename from plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/ScyllaQueryRunner.java rename to plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/ScyllaQueryRunner.java index 8091bcd1ea1f..e90f512c2cbe 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/ScyllaQueryRunner.java +++ b/plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/ScyllaQueryRunner.java @@ -11,12 +11,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package io.trino.plugin.cassandra; +package io.trino.plugin.scylla; import com.google.common.collect.ImmutableMap; import io.airlift.log.Logger; import io.airlift.log.Logging; import io.trino.Session; +import io.trino.plugin.cassandra.CassandraServer; import io.trino.plugin.tpch.TpchPlugin; import io.trino.testing.DistributedQueryRunner; import io.trino.tpch.TpchTable; @@ -35,7 +36,7 @@ public final class ScyllaQueryRunner private ScyllaQueryRunner() {} public static DistributedQueryRunner createScyllaQueryRunner( - TestingScyllaServer server, + CassandraServer server, Map extraProperties, Map connectorProperties, Iterable> tables) @@ -56,8 +57,8 @@ public static DistributedQueryRunner createScyllaQueryRunner( connectorProperties.putIfAbsent("cassandra.load-policy.use-dc-aware", "true"); connectorProperties.putIfAbsent("cassandra.load-policy.dc-aware.local-dc", "datacenter1"); - queryRunner.installPlugin(new CassandraPlugin()); - queryRunner.createCatalog("cassandra", "cassandra", connectorProperties); + queryRunner.installPlugin(new ScyllaPlugin()); + queryRunner.createCatalog("scylla", "scylla", connectorProperties); createKeyspace(server.getSession(), "tpch"); copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession("tpch"), tables); @@ -75,7 +76,7 @@ public static DistributedQueryRunner createScyllaQueryRunner( public static Session createSession(String schema) { return testSessionBuilder() - .setCatalog("cassandra") + .setCatalog("scylla") .setSchema(schema) .build(); } diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestScyllaConnectorSmokeTest.java b/plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/TestScyllaConnectorSmokeTest.java similarity index 80% rename from plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestScyllaConnectorSmokeTest.java rename to plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/TestScyllaConnectorSmokeTest.java index 3c9c992d1100..8d29cff27edd 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestScyllaConnectorSmokeTest.java +++ b/plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/TestScyllaConnectorSmokeTest.java @@ -11,15 +11,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package io.trino.plugin.cassandra; +package io.trino.plugin.scylla; import com.google.common.collect.ImmutableMap; +import io.trino.plugin.cassandra.BaseCassandraConnectorSmokeTest; +import io.trino.plugin.cassandra.CassandraSession; import io.trino.testing.QueryRunner; import java.sql.Timestamp; import static io.trino.plugin.cassandra.CassandraTestingUtils.createTestTables; -import static io.trino.plugin.cassandra.ScyllaQueryRunner.createScyllaQueryRunner; +import static io.trino.plugin.scylla.ScyllaQueryRunner.createScyllaQueryRunner; +import static io.trino.plugin.scylla.TestingScyllaServer.V3_TAG; public class TestScyllaConnectorSmokeTest extends BaseCassandraConnectorSmokeTest @@ -28,7 +31,7 @@ public class TestScyllaConnectorSmokeTest protected QueryRunner createQueryRunner() throws Exception { - TestingScyllaServer server = closeAfterClass(new TestingScyllaServer("3.3.4")); + TestingScyllaServer server = closeAfterClass(new TestingScyllaServer(V3_TAG)); CassandraSession session = server.getSession(); createTestTables(session, KEYSPACE, Timestamp.from(TIMESTAMP_VALUE.toInstant())); return createScyllaQueryRunner(server, ImmutableMap.of(), ImmutableMap.of(), REQUIRED_TPCH_TABLES); diff --git a/plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/TestScyllaConnectorTest.java b/plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/TestScyllaConnectorTest.java new file mode 100644 index 000000000000..f4a010e97564 --- /dev/null +++ b/plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/TestScyllaConnectorTest.java @@ -0,0 +1,53 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.scylla; + +import com.google.common.collect.ImmutableMap; +import io.trino.Session; +import io.trino.plugin.cassandra.BaseCassandraConnectorTest; +import io.trino.testing.MaterializedResult; +import io.trino.testing.QueryRunner; + +import java.sql.Timestamp; + +import static io.trino.plugin.cassandra.CassandraTestingUtils.createTestTables; +import static io.trino.plugin.scylla.ScyllaQueryRunner.createScyllaQueryRunner; +import static io.trino.plugin.scylla.ScyllaQueryRunner.createSession; +import static io.trino.plugin.scylla.TestingScyllaServer.V3_TAG; + +public class TestScyllaConnectorTest + extends BaseCassandraConnectorTest +{ + protected static final Session SESSION = createSession(KEYSPACE); + + @Override + protected QueryRunner createQueryRunner() + throws Exception + { + server = closeAfterClass(new TestingScyllaServer(V3_TAG)); + session = server.getSession(); + createTestTables(session, KEYSPACE, Timestamp.from(TIMESTAMP_VALUE.toInstant())); + return createScyllaQueryRunner( + server, + ImmutableMap.of(), + ImmutableMap.of("cassandra.batch-size", "50"), // The default 100 causes 'Batch too large' error + REQUIRED_TPCH_TABLES); + } + + @Override + protected MaterializedResult execute(String sql) + { + return getQueryRunner().execute(SESSION, sql); + } +} diff --git a/plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/TestScyllaLatestConnectorSmokeTest.java b/plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/TestScyllaLatestConnectorSmokeTest.java new file mode 100644 index 000000000000..a714b8d6fa98 --- /dev/null +++ b/plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/TestScyllaLatestConnectorSmokeTest.java @@ -0,0 +1,39 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.scylla; + +import com.google.common.collect.ImmutableMap; +import io.trino.plugin.cassandra.BaseCassandraConnectorSmokeTest; +import io.trino.plugin.cassandra.CassandraSession; +import io.trino.testing.QueryRunner; + +import java.sql.Timestamp; + +import static io.trino.plugin.cassandra.CassandraTestingUtils.createTestTables; +import static io.trino.plugin.scylla.ScyllaQueryRunner.createScyllaQueryRunner; +import static io.trino.plugin.scylla.TestingScyllaServer.V4_TAG; + +public class TestScyllaLatestConnectorSmokeTest + extends BaseCassandraConnectorSmokeTest +{ + @Override + protected QueryRunner createQueryRunner() + throws Exception + { + TestingScyllaServer server = closeAfterClass(new TestingScyllaServer(V4_TAG)); + CassandraSession session = server.getSession(); + createTestTables(session, KEYSPACE, Timestamp.from(TIMESTAMP_VALUE.toInstant())); + return createScyllaQueryRunner(server, ImmutableMap.of(), ImmutableMap.of(), REQUIRED_TPCH_TABLES); + } +} diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestingScyllaServer.java b/plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/TestingScyllaServer.java similarity index 92% rename from plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestingScyllaServer.java rename to plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/TestingScyllaServer.java index 73da7463bcec..570d97be8b5b 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestingScyllaServer.java +++ b/plugin/trino-scylla/src/test/java/io/trino/plugin/scylla/TestingScyllaServer.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package io.trino.plugin.cassandra; +package io.trino.plugin.scylla; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.CqlSessionBuilder; @@ -21,9 +21,12 @@ import io.airlift.json.JsonCodec; import io.airlift.log.Logger; import io.airlift.units.Duration; +import io.trino.plugin.cassandra.CassandraServer; +import io.trino.plugin.cassandra.CassandraSession; +import io.trino.plugin.cassandra.ExtraColumnMetadata; +import io.trino.plugin.cassandra.SizeEstimate; import org.testcontainers.containers.GenericContainer; -import java.io.Closeable; import java.net.InetSocketAddress; import java.util.List; import java.util.concurrent.TimeoutException; @@ -39,25 +42,25 @@ import static java.util.concurrent.TimeUnit.SECONDS; public class TestingScyllaServer - implements Closeable + implements CassandraServer { private static final Logger log = Logger.get(TestingScyllaServer.class); - private static final int PORT = 9042; + public static final String V4_TAG = "4.5.3"; + public static final String V3_TAG = "3.0.0"; + private static final int PORT = 9042; private static final Duration REFRESH_SIZE_ESTIMATES_TIMEOUT = new Duration(1, MINUTES); private final GenericContainer container; private final CassandraSession session; public TestingScyllaServer() - throws Exception { - this("2.2.0"); + this(V3_TAG); } public TestingScyllaServer(String version) - throws Exception { container = new GenericContainer<>("scylladb/scylla:" + version) .withCommand("--smp", "1") // Limit SMP to run in a machine having many cores https://github.com/scylladb/scylla/issues/5638 @@ -83,21 +86,25 @@ public TestingScyllaServer(String version) new Duration(1, MINUTES)); } + @Override public CassandraSession getSession() { return requireNonNull(session, "session is null"); } + @Override public String getHost() { return container.getContainerIpAddress(); } + @Override public int getPort() { return container.getMappedPort(PORT); } + @Override public void refreshSizeEstimates(String keyspace, String table) throws Exception { diff --git a/pom.xml b/pom.xml index 103e889bee88..864f8e83f991 100644 --- a/pom.xml +++ b/pom.xml @@ -147,6 +147,7 @@ plugin/trino-redis plugin/trino-redshift plugin/trino-resource-group-managers + plugin/trino-scylla plugin/trino-session-property-managers plugin/trino-singlestore plugin/trino-sqlserver @@ -476,6 +477,12 @@ ${project.version} + + io.trino + trino-scylla + ${project.version} + + io.trino trino-server diff --git a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodeAllConnectors.java b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodeAllConnectors.java index f0ae2e528040..06eaa0adad61 100644 --- a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodeAllConnectors.java +++ b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodeAllConnectors.java @@ -76,6 +76,7 @@ public void extendEnvironment(Environment.Builder builder) "raptor-legacy", "redis", "redshift", + "scylla", "sqlserver", "trino-thrift", "tpcds") diff --git a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodeScylla.java b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodeScylla.java new file mode 100644 index 000000000000..cc5d28896caf --- /dev/null +++ b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvMultinodeScylla.java @@ -0,0 +1,75 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.tests.product.launcher.env.environment; + +import io.trino.tests.product.launcher.docker.DockerFiles; +import io.trino.tests.product.launcher.docker.DockerFiles.ResourceProvider; +import io.trino.tests.product.launcher.env.DockerContainer; +import io.trino.tests.product.launcher.env.Environment.Builder; +import io.trino.tests.product.launcher.env.EnvironmentProvider; +import io.trino.tests.product.launcher.env.common.StandardMultinode; +import io.trino.tests.product.launcher.env.common.TestsEnvironment; +import io.trino.tests.product.launcher.testcontainers.PortBinder; +import org.testcontainers.containers.startupcheck.IsRunningStartupCheckStrategy; + +import javax.inject.Inject; + +import java.time.Duration; + +import static io.trino.tests.product.launcher.docker.ContainerUtil.forSelectedPorts; +import static io.trino.tests.product.launcher.env.EnvironmentContainers.configureTempto; +import static java.util.Objects.requireNonNull; +import static org.testcontainers.utility.MountableFile.forHostPath; + +@TestsEnvironment +public class EnvMultinodeScylla + extends EnvironmentProvider +{ + public static final int SCYLLA_PORT = 9042; + + private final ResourceProvider configDir; + private final PortBinder portBinder; + + @Inject + public EnvMultinodeScylla(StandardMultinode standardMultinode, DockerFiles dockerFiles, PortBinder portBinder) + { + super(standardMultinode); + this.configDir = requireNonNull(dockerFiles, "dockerFiles is null").getDockerFilesHostDirectory("conf/environment/multinode-scylla/"); + this.portBinder = requireNonNull(portBinder, "portBinder is null"); + } + + @Override + public void extendEnvironment(Builder builder) + { + builder.addConnector("scylla", forHostPath(configDir.getPath("scylla.properties"))); + builder.addContainer(createScylla()); + configureTempto(builder, configDir); + } + + private DockerContainer createScylla() + { + DockerContainer container = new DockerContainer("scylladb/scylla:4.6.2", "scylla") + .withEnv("HEAP_NEWSIZE", "128M") + .withEnv("MAX_HEAP_SIZE", "512M") + // Limit SMP to run in a machine having many cores https://github.com/scylladb/scylla/issues/5638 + .withCommand("--smp", "1") + .withStartupCheckStrategy(new IsRunningStartupCheckStrategy()) + .waitingFor(forSelectedPorts(SCYLLA_PORT)) + .withStartupTimeout(Duration.ofMinutes(5)); + + portBinder.exposePort(container, SCYLLA_PORT); + + return container; + } +} diff --git a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/Suite6NonGeneric.java b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/Suite6NonGeneric.java index 9adddc71b3b3..dfbf02c6dd02 100644 --- a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/Suite6NonGeneric.java +++ b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/Suite6NonGeneric.java @@ -21,6 +21,7 @@ import io.trino.tests.product.launcher.env.environment.EnvMultinodeKafkaSsl; import io.trino.tests.product.launcher.env.environment.EnvMultinodePhoenix4; import io.trino.tests.product.launcher.env.environment.EnvMultinodePhoenix5; +import io.trino.tests.product.launcher.env.environment.EnvMultinodeScylla; import io.trino.tests.product.launcher.env.environment.EnvSinglenodeCassandra; import io.trino.tests.product.launcher.env.environment.EnvSinglenodeKerberosKmsHdfsImpersonation; import io.trino.tests.product.launcher.env.environment.EnvSinglenodeKerberosKmsHdfsNoImpersonation; @@ -50,6 +51,9 @@ public List getTestRuns(EnvironmentConfig config) testOnEnvironment(EnvSinglenodeCassandra.class) .withGroups("configured_features", "cassandra") .build(), + testOnEnvironment(EnvMultinodeScylla.class) + .withGroups("configured_features", "scylla") + .build(), testOnEnvironment(EnvMultinodeKafka.class) .withGroups("configured_features", "kafka") .build(), diff --git a/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-all/scylla.properties b/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-all/scylla.properties new file mode 100644 index 000000000000..48d4ab86786e --- /dev/null +++ b/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-all/scylla.properties @@ -0,0 +1,3 @@ +connector.name=scylla +cassandra.contact-points=host1.invalid,host2.invalid +cassandra.load-policy.dc-aware.local-dc=datacenter1 diff --git a/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-scylla/scylla.properties b/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-scylla/scylla.properties new file mode 100644 index 000000000000..883f48a5452b --- /dev/null +++ b/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-scylla/scylla.properties @@ -0,0 +1,5 @@ +connector.name=scylla +cassandra.contact-points=scylla +cassandra.allow-drop-table=true +cassandra.load-policy.use-dc-aware=true +cassandra.load-policy.dc-aware.local-dc=datacenter1 diff --git a/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-scylla/tempto-configuration.yaml b/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-scylla/tempto-configuration.yaml new file mode 100644 index 000000000000..cefae6c56579 --- /dev/null +++ b/testing/trino-product-tests-launcher/src/main/resources/docker/presto-product-tests/conf/environment/multinode-scylla/tempto-configuration.yaml @@ -0,0 +1,3 @@ +databases: + presto: + jdbc_url: "jdbc:trino://${databases.presto.host}:${databases.presto.port}/scylla/test" diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/TestGroups.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/TestGroups.java index 66d2b24f962d..26513afdf67e 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/TestGroups.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/TestGroups.java @@ -53,6 +53,7 @@ public final class TestGroups public static final String HIVE_COERCION = "hive_coercion"; public static final String AZURE = "azure"; public static final String CASSANDRA = "cassandra"; + public static final String SCYLLA = "scylla"; public static final String SQL_SERVER = "sqlserver"; public static final String LDAP = "ldap"; public static final String LDAP_AND_FILE = "ldap_and_file"; diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/scylla/TestScylla.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/scylla/TestScylla.java new file mode 100644 index 000000000000..3b55ade69535 --- /dev/null +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/scylla/TestScylla.java @@ -0,0 +1,71 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.tests.product.scylla; + +import com.datastax.oss.driver.api.core.CqlSession; +import io.trino.tempto.BeforeTestWithContext; +import io.trino.tempto.ProductTest; +import io.trino.tempto.configuration.Configuration; +import io.trino.tempto.query.QueryResult; +import org.intellij.lang.annotations.Language; +import org.testng.annotations.Test; + +import javax.inject.Inject; + +import java.net.InetSocketAddress; + +import static io.trino.tempto.assertions.QueryAssert.Row.row; +import static io.trino.tempto.assertions.QueryAssert.assertThat; +import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; +import static io.trino.tests.product.TestGroups.SCYLLA; +import static io.trino.tests.product.utils.QueryExecutors.onTrino; + +public class TestScylla + extends ProductTest +{ + @Inject + private Configuration configuration; + + @BeforeTestWithContext + public void setUp() + { + onScylla("DROP KEYSPACE IF EXISTS test"); + onScylla("CREATE KEYSPACE test WITH replication={'class':'SimpleStrategy', 'replication_factor':1}"); + } + + @Test(groups = {SCYLLA, PROFILE_SPECIFIC_TESTS}) + public void testCreateTableAsSelect() + { + onTrino().executeQuery("DROP TABLE IF EXISTS scylla.test.nation"); + QueryResult result = onTrino().executeQuery("CREATE TABLE scylla.test.nation AS SELECT * FROM tpch.tiny.nation"); + try { + assertThat(result).updatedRowsCountIsEqualTo(25); + assertThat(onTrino().executeQuery("SELECT COUNT(*) FROM scylla.test.nation")) + .containsOnly(row(25)); + } + finally { + onTrino().executeQuery("DROP TABLE scylla.test.nation"); + } + } + + private void onScylla(@Language("SQL") String query) + { + try (CqlSession session = CqlSession.builder() + .addContactPoint(new InetSocketAddress(configuration.getStringMandatory("databases.scylla.host"), configuration.getIntMandatory("databases.scylla.port"))) + .withLocalDatacenter(configuration.getStringMandatory("databases.scylla.local_datacenter")) + .build()) { + session.execute(query); + } + } +} diff --git a/testing/trino-product-tests/src/main/resources/tempto-configuration.yaml b/testing/trino-product-tests/src/main/resources/tempto-configuration.yaml index 13743718a4e8..4fd7a88d790f 100644 --- a/testing/trino-product-tests/src/main/resources/tempto-configuration.yaml +++ b/testing/trino-product-tests/src/main/resources/tempto-configuration.yaml @@ -133,6 +133,11 @@ databases: skip_create_schema: false table_manager_type: cassandra + scylla: + host: scylla + port: 9042 + local_datacenter: datacenter1 + sqlserver: jdbc_driver_class: com.microsoft.sqlserver.jdbc.SQLServerDriver jdbc_url: jdbc:sqlserver://sqlserver;encrypt=false diff --git a/testing/trino-server-dev/etc/catalog/scylla.properties b/testing/trino-server-dev/etc/catalog/scylla.properties new file mode 100644 index 000000000000..e264b15f0548 --- /dev/null +++ b/testing/trino-server-dev/etc/catalog/scylla.properties @@ -0,0 +1,5 @@ +connector.name=scylla +cassandra.contact-points=scylla +cassandra.load-policy.use-dc-aware=true +cassandra.load-policy.dc-aware.local-dc=datacenter1 +cassandra.allow-drop-table=true diff --git a/testing/trino-server-dev/etc/config.properties b/testing/trino-server-dev/etc/config.properties index ac207e125b69..86f32323633a 100644 --- a/testing/trino-server-dev/etc/config.properties +++ b/testing/trino-server-dev/etc/config.properties @@ -34,6 +34,7 @@ plugin.bundles=\ ../../plugin/trino-delta-lake/pom.xml,\ ../../plugin/trino-blackhole/pom.xml,\ ../../plugin/trino-cassandra/pom.xml,\ + ../../plugin/trino-scylla/pom.xml,\ ../../plugin/trino-memory/pom.xml,\ ../../plugin/trino-jmx/pom.xml,\ ../../plugin/trino-raptor-legacy/pom.xml,\