diff --git a/plugin/trino-clickhouse/pom.xml b/plugin/trino-clickhouse/pom.xml
index d29125cb94a8..69bea01c8f22 100644
--- a/plugin/trino-clickhouse/pom.xml
+++ b/plugin/trino-clickhouse/pom.xml
@@ -48,10 +48,14 @@
log-manager
+
com.clickhouse
clickhouse-jdbc
- 0.3.2-patch1
+ 0.3.2-patch2
org.slf4j
@@ -65,10 +69,6 @@
org.projectlombok
lombok
-
- com.clickhouse
- clickhouse-http-client
-
org.robolectric
android-all
diff --git a/plugin/trino-clickhouse/src/main/java/io/trino/plugin/clickhouse/ClickHouseClient.java b/plugin/trino-clickhouse/src/main/java/io/trino/plugin/clickhouse/ClickHouseClient.java
index 783008905365..3f7bd1b7fae2 100644
--- a/plugin/trino-clickhouse/src/main/java/io/trino/plugin/clickhouse/ClickHouseClient.java
+++ b/plugin/trino-clickhouse/src/main/java/io/trino/plugin/clickhouse/ClickHouseClient.java
@@ -13,6 +13,8 @@
*/
package io.trino.plugin.clickhouse;
+import com.clickhouse.client.ClickHouseColumn;
+import com.clickhouse.client.ClickHouseDataType;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import io.trino.plugin.base.expression.AggregateFunctionRewriter;
@@ -55,8 +57,6 @@
import javax.inject.Inject;
import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Types;
import java.util.List;
@@ -69,6 +69,8 @@
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Strings.isNullOrEmpty;
import static com.google.common.base.Verify.verify;
+import static io.airlift.slice.Slices.utf8Slice;
+import static io.airlift.slice.Slices.wrappedBuffer;
import static io.trino.plugin.clickhouse.ClickHouseSessionProperties.isMapStringAsVarchar;
import static io.trino.plugin.clickhouse.ClickHouseTableProperties.SAMPLE_BY_PROPERTY;
import static io.trino.plugin.jdbc.DecimalConfig.DecimalMapping.ALLOW_OVERFLOW;
@@ -96,7 +98,6 @@
import static io.trino.plugin.jdbc.StandardColumnMappings.timestampColumnMappingUsingSqlTimestampWithRounding;
import static io.trino.plugin.jdbc.StandardColumnMappings.tinyintColumnMapping;
import static io.trino.plugin.jdbc.StandardColumnMappings.tinyintWriteFunction;
-import static io.trino.plugin.jdbc.StandardColumnMappings.varbinaryColumnMapping;
import static io.trino.plugin.jdbc.StandardColumnMappings.varbinaryWriteFunction;
import static io.trino.plugin.jdbc.StandardColumnMappings.varcharReadFunction;
import static io.trino.plugin.jdbc.StandardColumnMappings.varcharWriteFunction;
@@ -115,6 +116,7 @@
import static io.trino.spi.type.TinyintType.TINYINT;
import static io.trino.spi.type.UuidType.javaUuidToTrinoUuid;
import static io.trino.spi.type.UuidType.trinoUuidToJavaUuid;
+import static io.trino.spi.type.VarbinaryType.VARBINARY;
import static io.trino.spi.type.VarcharType.createUnboundedVarcharType;
import static java.lang.Float.floatToRawIntBits;
import static java.lang.Math.max;
@@ -306,16 +308,9 @@ public void renameColumn(ConnectorSession session, JdbcTableHandle handle, JdbcC
}
@Override
- public ResultSet getTables(Connection connection, Optional schemaName, Optional tableName)
- throws SQLException
+ protected Optional> getTableTypes()
{
- // ClickHouse maps their "database" to SQL catalogs and does not have schemas
- DatabaseMetaData metadata = connection.getMetaData();
- return metadata.getTables(
- null,
- schemaName.orElse(null),
- escapeNamePattern(tableName, metadata.getSearchStringEscape()).orElse(null),
- new String[] {"TABLE", "VIEW"});
+ return Optional.empty();
}
@Override
@@ -366,12 +361,14 @@ public Optional toColumnMapping(ConnectorSession session, Connect
return mapping;
}
- switch (jdbcTypeName.replaceAll("\\(.*\\)$", "")) {
- case "IPv4":
- case "IPv6":
+ ClickHouseColumn column = ClickHouseColumn.of("", jdbcTypeName);
+ ClickHouseDataType columnDataType = column.getDataType();
+ switch (columnDataType) {
+ case IPv4:
+ case IPv6:
// TODO (https://github.com/trinodb/trino/issues/7098) map to Trino IPADDRESS
- case "Enum8":
- case "Enum16":
+ case Enum8:
+ case Enum16:
return Optional.of(ColumnMapping.sliceMapping(
createUnboundedVarcharType(),
varcharReadFunction(createUnboundedVarcharType()),
@@ -379,8 +376,8 @@ public Optional toColumnMapping(ConnectorSession session, Connect
// TODO (https://github.com/trinodb/trino/issues/7100) Currently pushdown would not work and may require a custom bind expression
DISABLE_PUSHDOWN));
- case "FixedString": // FixedString(n)
- case "String":
+ case FixedString: // FixedString(n)
+ case String:
if (isMapStringAsVarchar(session)) {
return Optional.of(ColumnMapping.sliceMapping(
createUnboundedVarcharType(),
@@ -389,8 +386,12 @@ public Optional toColumnMapping(ConnectorSession session, Connect
DISABLE_PUSHDOWN));
}
// TODO (https://github.com/trinodb/trino/issues/7100) test & enable predicate pushdown
- return Optional.of(varbinaryColumnMapping());
- case "UUID":
+ return Optional.of(ColumnMapping.sliceMapping(
+ VARBINARY,
+ (resultSet, columnIndex) -> wrappedBuffer(utf8Slice(resultSet.getString(columnIndex)).getBytes()),
+ varbinaryWriteFunction(),
+ DISABLE_PUSHDOWN));
+ case UUID:
return Optional.of(uuidColumnMapping());
}
@@ -441,7 +442,7 @@ public Optional toColumnMapping(ConnectorSession session, Connect
return Optional.of(dateColumnMappingUsingLocalDate());
case Types.TIMESTAMP:
- if (jdbcTypeName.equals("DateTime")) {
+ if (columnDataType == ClickHouseDataType.DateTime) {
verify(typeHandle.getRequiredDecimalDigits() == 0, "Expected 0 as timestamp precision, but got %s", typeHandle.getRequiredDecimalDigits());
return Optional.of(timestampColumnMapping(TIMESTAMP_SECONDS));
}
diff --git a/plugin/trino-clickhouse/src/main/java/io/trino/plugin/clickhouse/ClickHouseClientModule.java b/plugin/trino-clickhouse/src/main/java/io/trino/plugin/clickhouse/ClickHouseClientModule.java
index 1a538cda6dd2..bf86284c3ef0 100644
--- a/plugin/trino-clickhouse/src/main/java/io/trino/plugin/clickhouse/ClickHouseClientModule.java
+++ b/plugin/trino-clickhouse/src/main/java/io/trino/plugin/clickhouse/ClickHouseClientModule.java
@@ -13,6 +13,7 @@
*/
package io.trino.plugin.clickhouse;
+import com.clickhouse.jdbc.ClickHouseDriver;
import com.google.inject.Binder;
import com.google.inject.Module;
import com.google.inject.Provides;
@@ -26,7 +27,6 @@
import io.trino.plugin.jdbc.ForBaseJdbc;
import io.trino.plugin.jdbc.JdbcClient;
import io.trino.plugin.jdbc.credential.CredentialProvider;
-import ru.yandex.clickhouse.ClickHouseDriver;
import static io.trino.plugin.jdbc.JdbcModule.bindSessionPropertiesProvider;
import static io.trino.plugin.jdbc.JdbcModule.bindTablePropertiesProvider;
diff --git a/plugin/trino-clickhouse/src/test/java/io/trino/plugin/clickhouse/TestClickHouseConnectorTest.java b/plugin/trino-clickhouse/src/test/java/io/trino/plugin/clickhouse/TestClickHouseConnectorTest.java
index 175429264494..776b6e50431e 100644
--- a/plugin/trino-clickhouse/src/test/java/io/trino/plugin/clickhouse/TestClickHouseConnectorTest.java
+++ b/plugin/trino-clickhouse/src/test/java/io/trino/plugin/clickhouse/TestClickHouseConnectorTest.java
@@ -133,8 +133,10 @@ public void testDropColumn()
// the columns are referenced by order_by/order_by property can not be dropped
assertUpdate("CREATE TABLE " + tableName + "(x int NOT NULL, y int, a int NOT NULL) WITH " +
"(engine = 'MergeTree', order_by = ARRAY['x'], partition_by = ARRAY['a'])");
- assertQueryFails("ALTER TABLE " + tableName + " DROP COLUMN x", "ClickHouse exception, code: 47,.*\\n");
- assertQueryFails("ALTER TABLE " + tableName + " DROP COLUMN a", "ClickHouse exception, code: 47,.*\\n");
+ assertQueryFails("ALTER TABLE " + tableName + " DROP COLUMN x",
+ "Code: 47,.* Missing columns: 'x' while processing query: 'x', required columns: 'x', source columns: 'a' 'y' .*\\n.*");
+ assertQueryFails("ALTER TABLE " + tableName + " DROP COLUMN a",
+ "Code: 47,.* Missing columns: 'a' while processing query: 'a', required columns: 'a', source columns: 'y' 'x' .*\\n.*");
}
@Override
@@ -296,9 +298,9 @@ public void testTableProperty()
assertUpdate("DROP TABLE " + tableName);
// Log engine DOES NOT any property
- assertQueryFails("CREATE TABLE " + tableName + " (id int NOT NULL, x VARCHAR) WITH (engine = 'Log', order_by=ARRAY['id'])", ".* doesn't support PARTITION_BY, PRIMARY_KEY, ORDER_BY or SAMPLE_BY clauses.*\\n");
- assertQueryFails("CREATE TABLE " + tableName + " (id int NOT NULL, x VARCHAR) WITH (engine = 'Log', partition_by=ARRAY['id'])", ".* doesn't support PARTITION_BY, PRIMARY_KEY, ORDER_BY or SAMPLE_BY clauses.*\\n");
- assertQueryFails("CREATE TABLE " + tableName + " (id int NOT NULL, x VARCHAR) WITH (engine = 'Log', sample_by='id')", ".* doesn't support PARTITION_BY, PRIMARY_KEY, ORDER_BY or SAMPLE_BY clauses.*\\n");
+ assertQueryFails("CREATE TABLE " + tableName + " (id int NOT NULL, x VARCHAR) WITH (engine = 'Log', order_by=ARRAY['id'])", ".* doesn't support PARTITION_BY, PRIMARY_KEY, ORDER_BY or SAMPLE_BY clauses.*\\n.*");
+ assertQueryFails("CREATE TABLE " + tableName + " (id int NOT NULL, x VARCHAR) WITH (engine = 'Log', partition_by=ARRAY['id'])", ".* doesn't support PARTITION_BY, PRIMARY_KEY, ORDER_BY or SAMPLE_BY clauses.*\\n.*");
+ assertQueryFails("CREATE TABLE " + tableName + " (id int NOT NULL, x VARCHAR) WITH (engine = 'Log', sample_by='id')", ".* doesn't support PARTITION_BY, PRIMARY_KEY, ORDER_BY or SAMPLE_BY clauses.*\\n.*");
// optional properties
assertUpdate("CREATE TABLE " + tableName + " (id int NOT NULL, x VARCHAR) WITH (engine = 'MergeTree', order_by = ARRAY['id'])");
@@ -306,7 +308,7 @@ public void testTableProperty()
assertUpdate("DROP TABLE " + tableName);
// the column refers by order by must be not null
- assertQueryFails("CREATE TABLE " + tableName + " (id int NOT NULL, x VARCHAR) WITH (engine = 'MergeTree', order_by = ARRAY['id', 'x'])", ".* Sorting key cannot contain nullable columns.*\\n");
+ assertQueryFails("CREATE TABLE " + tableName + " (id int NOT NULL, x VARCHAR) WITH (engine = 'MergeTree', order_by = ARRAY['id', 'x'])", ".* Sorting key cannot contain nullable columns.*\\n.*");
assertUpdate("CREATE TABLE " + tableName + " (id int NOT NULL, x VARCHAR) WITH (engine = 'MergeTree', order_by = ARRAY['id'], primary_key = ARRAY['id'])");
assertTrue(getQueryRunner().tableExists(getSession(), tableName));