diff --git a/docs/src/main/sphinx/connector/cassandra.rst b/docs/src/main/sphinx/connector/cassandra.rst
index 8d99f955b382..c6b77ab4e2e0 100644
--- a/docs/src/main/sphinx/connector/cassandra.rst
+++ b/docs/src/main/sphinx/connector/cassandra.rst
@@ -76,9 +76,11 @@ Property Name Description
This is a global setting used for all connections, regardless
of the user connected to Trino.
-``cassandra.protocol-version`` It is possible to override the protocol version for older Cassandra clusters.
- By default, the values from the highest protocol version the driver can use.
- Possible values include ``V2``, ``V3`` and ``V4``.
+``cassandra.protocol-version`` It is possible to override the protocol version for older Cassandra
+ clusters.
+ By default, the value corresponds to the default protocol version
+ used in the underlying Cassandra java driver.
+ Possible values include ``V3``, ``V4``, ``V5``, ``V6``.
================================================== ======================================================================
.. note::
diff --git a/plugin/trino-cassandra/pom.xml b/plugin/trino-cassandra/pom.xml
index 5ef3f425c6f7..8a2688190f3e 100644
--- a/plugin/trino-cassandra/pom.xml
+++ b/plugin/trino-cassandra/pom.xml
@@ -14,6 +14,8 @@
${project.parent.basedir}
+ 4.14.0
+ 1.5.1
@@ -22,11 +24,6 @@
trino-plugin-toolkit
-
- io.trino.cassandra
- cassandra-driver
-
-
io.airlift
bootstrap
@@ -49,12 +46,37 @@
io.airlift
- security
+ units
- io.airlift
- units
+ com.datastax.oss
+ java-driver-core
+ ${dep.casandra.version}
+
+
+ org.ow2.asm
+ asm-analysis
+
+
+
+
+
+ com.datastax.oss
+ java-driver-query-builder
+ ${dep.casandra.version}
+
+
+ com.github.spotbugs
+ spotbugs-annotations
+
+
+
+
+
+ com.datastax.oss
+ native-protocol
+ ${dep.native-protocol.version}
@@ -93,11 +115,6 @@
validation-api
-
- joda-time
- joda-time
-
-
org.weakref
jmxutils
diff --git a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/BackoffRetryPolicy.java b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/BackoffRetryPolicy.java
index 574687b83e98..524191008eb7 100644
--- a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/BackoffRetryPolicy.java
+++ b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/BackoffRetryPolicy.java
@@ -13,63 +13,105 @@
*/
package io.trino.plugin.cassandra;
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.ConsistencyLevel;
-import com.datastax.driver.core.Statement;
-import com.datastax.driver.core.WriteType;
-import com.datastax.driver.core.exceptions.DriverException;
-import com.datastax.driver.core.policies.DefaultRetryPolicy;
-import com.datastax.driver.core.policies.RetryPolicy;
+import com.datastax.oss.driver.api.core.ConsistencyLevel;
+import com.datastax.oss.driver.api.core.context.DriverContext;
+import com.datastax.oss.driver.api.core.retry.RetryDecision;
+import com.datastax.oss.driver.api.core.retry.RetryPolicy;
+import com.datastax.oss.driver.api.core.servererrors.CoordinatorException;
+import com.datastax.oss.driver.api.core.servererrors.DefaultWriteType;
+import com.datastax.oss.driver.api.core.servererrors.WriteType;
+import com.datastax.oss.driver.api.core.session.Request;
+import io.airlift.log.Logger;
import java.util.concurrent.ThreadLocalRandom;
public class BackoffRetryPolicy
implements RetryPolicy
{
- public static final BackoffRetryPolicy INSTANCE = new BackoffRetryPolicy();
+ private static final Logger log = Logger.get(BackoffRetryPolicy.class);
- private BackoffRetryPolicy() {}
+ private final String logPrefix;
+
+ public BackoffRetryPolicy(DriverContext context, String profileName)
+ {
+ this.logPrefix = (context != null ? context.getSessionName() : null) + "|" + profileName;
+ }
+
+ @Override
+ public RetryDecision onReadTimeout(Request request, ConsistencyLevel consistencyLevel, int blockFor, int received, boolean dataPresent, int retryCount)
+ {
+ RetryDecision decision =
+ (retryCount == 0 && received >= blockFor && !dataPresent)
+ ? RetryDecision.RETRY_SAME
+ : RetryDecision.RETHROW;
+
+ if (decision == RetryDecision.RETRY_SAME) {
+ log.debug(
+ "[%s] Retrying on read timeout on same host (consistency: %s, required responses: %s, received responses: %s, data retrieved: %s, retries: %s)",
+ logPrefix,
+ consistencyLevel,
+ blockFor,
+ received,
+ false,
+ retryCount);
+ }
+
+ return decision;
+ }
+
+ @Override
+ public RetryDecision onWriteTimeout(Request request, ConsistencyLevel consistencyLevel, WriteType writeType, int blockFor, int received, int retryCount)
+ {
+ RetryDecision decision =
+ (retryCount == 0 && writeType == DefaultWriteType.BATCH_LOG)
+ ? RetryDecision.RETRY_SAME
+ : RetryDecision.RETHROW;
+
+ if (decision == RetryDecision.RETRY_SAME && log.isDebugEnabled()) {
+ log.debug(
+ "[%s] Retrying on write timeout on same host (consistency: %s, write type: %s, required acknowledgments: %s, received acknowledgments: %s, retries: %s)",
+ logPrefix,
+ consistencyLevel,
+ writeType,
+ blockFor,
+ received,
+ retryCount);
+ }
+ return decision;
+ }
@Override
- public RetryDecision onUnavailable(Statement statement, ConsistencyLevel consistencyLevel, int requiredReplica, int aliveReplica, int retries)
+ public RetryDecision onUnavailable(Request request, ConsistencyLevel consistencyLevel, int required, int alive, int retries)
{
if (retries >= 10) {
- return RetryDecision.rethrow();
+ return RetryDecision.RETHROW;
}
try {
int jitter = ThreadLocalRandom.current().nextInt(100);
int delay = (100 * (retries + 1)) + jitter;
Thread.sleep(delay);
- return RetryDecision.retry(consistencyLevel);
+ return RetryDecision.RETRY_SAME;
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
- return RetryDecision.rethrow();
+ return RetryDecision.RETHROW;
}
}
@Override
- public RetryDecision onReadTimeout(Statement statement, ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry)
+ public RetryDecision onRequestAborted(Request request, Throwable error, int retryCount)
{
- return DefaultRetryPolicy.INSTANCE.onReadTimeout(statement, cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry);
+ return RetryDecision.RETHROW;
}
@Override
- public RetryDecision onWriteTimeout(Statement statement, ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry)
+ public RetryDecision onErrorResponse(Request request, CoordinatorException error, int retryCount)
{
- return DefaultRetryPolicy.INSTANCE.onWriteTimeout(statement, cl, writeType, requiredAcks, receivedAcks, nbRetry);
+ log.debug(error, "[%s] Retrying on node error on next host (retries: %s)", logPrefix, retryCount);
+ return RetryDecision.RETRY_NEXT;
}
- @Override
- public RetryDecision onRequestError(Statement statement, ConsistencyLevel cl, DriverException e, int nbRetry)
- {
- return RetryDecision.tryNextHost(cl);
- }
-
- @Override
- public void init(Cluster cluster) {}
-
@Override
public void close() {}
}
diff --git a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraClientConfig.java b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraClientConfig.java
index 6a82d87f0e4e..2e0858bd51fb 100644
--- a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraClientConfig.java
+++ b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraClientConfig.java
@@ -13,9 +13,10 @@
*/
package io.trino.plugin.cassandra;
-import com.datastax.driver.core.ConsistencyLevel;
-import com.datastax.driver.core.ProtocolVersion;
-import com.datastax.driver.core.SocketOptions;
+import com.datastax.oss.driver.api.core.ConsistencyLevel;
+import com.datastax.oss.driver.api.core.DefaultConsistencyLevel;
+import com.datastax.oss.driver.api.core.DefaultProtocolVersion;
+import com.datastax.oss.driver.api.core.ProtocolVersion;
import com.google.common.base.Splitter;
import com.google.common.collect.ImmutableList;
import io.airlift.configuration.Config;
@@ -58,8 +59,8 @@ public class CassandraClientConfig
private boolean allowDropTable;
private String username;
private String password;
- private Duration clientReadTimeout = new Duration(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS, MILLISECONDS);
- private Duration clientConnectTimeout = new Duration(SocketOptions.DEFAULT_CONNECT_TIMEOUT_MILLIS, MILLISECONDS);
+ private Duration clientReadTimeout = new Duration(12_000, MILLISECONDS);
+ private Duration clientConnectTimeout = new Duration(5_000, MILLISECONDS);
private Integer clientSoLinger;
private RetryPolicyType retryPolicy = RetryPolicyType.DEFAULT;
private boolean useDCAware;
@@ -119,7 +120,7 @@ public ConsistencyLevel getConsistencyLevel()
}
@Config("cassandra.consistency-level")
- public CassandraClientConfig setConsistencyLevel(ConsistencyLevel level)
+ public CassandraClientConfig setConsistencyLevel(DefaultConsistencyLevel level)
{
this.consistencyLevel = level;
return this;
@@ -411,7 +412,7 @@ public ProtocolVersion getProtocolVersion()
}
@Config("cassandra.protocol-version")
- public CassandraClientConfig setProtocolVersion(ProtocolVersion version)
+ public CassandraClientConfig setProtocolVersion(DefaultProtocolVersion version)
{
this.protocolVersion = version;
return this;
diff --git a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraClientModule.java b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraClientModule.java
index ce8709dc0146..f26e53f4d74f 100644
--- a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraClientModule.java
+++ b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraClientModule.java
@@ -13,17 +13,12 @@
*/
package io.trino.plugin.cassandra;
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.JdkSSLOptions;
-import com.datastax.driver.core.QueryOptions;
-import com.datastax.driver.core.SocketOptions;
-import com.datastax.driver.core.policies.ConstantSpeculativeExecutionPolicy;
-import com.datastax.driver.core.policies.DCAwareRoundRobinPolicy;
-import com.datastax.driver.core.policies.ExponentialReconnectionPolicy;
-import com.datastax.driver.core.policies.LoadBalancingPolicy;
-import com.datastax.driver.core.policies.RoundRobinPolicy;
-import com.datastax.driver.core.policies.TokenAwarePolicy;
-import com.datastax.driver.core.policies.WhiteListPolicy;
+import com.datastax.oss.driver.api.core.CqlSession;
+import com.datastax.oss.driver.api.core.CqlSessionBuilder;
+import com.datastax.oss.driver.api.core.config.DefaultDriverOption;
+import com.datastax.oss.driver.api.core.config.DriverConfigLoader;
+import com.datastax.oss.driver.api.core.config.ProgrammaticDriverConfigLoaderBuilder;
+import com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.deser.std.FromStringDeserializer;
import com.google.inject.Binder;
@@ -31,7 +26,6 @@
import com.google.inject.Provides;
import com.google.inject.Scopes;
import io.airlift.json.JsonCodec;
-import io.airlift.security.pem.PemReader;
import io.trino.spi.TrinoException;
import io.trino.spi.type.Type;
import io.trino.spi.type.TypeId;
@@ -40,20 +34,14 @@
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.net.ssl.SSLContext;
-import javax.security.auth.x500.X500Principal;
import java.io.File;
-import java.io.FileInputStream;
import java.io.IOException;
-import java.io.InputStream;
+import java.net.InetAddress;
import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
import java.security.GeneralSecurityException;
-import java.security.KeyStore;
-import java.security.cert.Certificate;
-import java.security.cert.CertificateExpiredException;
-import java.security.cert.CertificateNotYetValidException;
-import java.security.cert.X509Certificate;
-import java.util.ArrayList;
+import java.time.Duration;
import java.util.List;
import java.util.Optional;
@@ -64,7 +52,6 @@
import static io.trino.plugin.base.ssl.SslUtils.createSSLContext;
import static io.trino.plugin.cassandra.CassandraErrorCode.CASSANDRA_SSL_INITIALIZATION_FAILURE;
import static java.lang.Math.toIntExact;
-import static java.util.Collections.list;
import static java.util.Objects.requireNonNull;
public class CassandraClientModule
@@ -123,80 +110,70 @@ public static CassandraSession createCassandraSession(CassandraClientConfig conf
requireNonNull(config, "config is null");
requireNonNull(extraColumnMetadataCodec, "extraColumnMetadataCodec is null");
- Cluster.Builder clusterBuilder = Cluster.builder();
+ CqlSessionBuilder cqlSessionBuilder = CqlSession.builder();
+ ProgrammaticDriverConfigLoaderBuilder driverConfigLoaderBuilder = DriverConfigLoader.programmaticBuilder();
+ // allow the retrieval of metadata for the system keyspaces
+ driverConfigLoaderBuilder.withStringList(DefaultDriverOption.METADATA_SCHEMA_REFRESHED_KEYSPACES, List.of());
+
if (config.getProtocolVersion() != null) {
- clusterBuilder.withProtocolVersion(config.getProtocolVersion());
+ driverConfigLoaderBuilder.withString(DefaultDriverOption.PROTOCOL_VERSION, config.getProtocolVersion().name());
}
List contactPoints = requireNonNull(config.getContactPoints(), "contactPoints is null");
checkArgument(!contactPoints.isEmpty(), "empty contactPoints");
- clusterBuilder.withPort(config.getNativeProtocolPort());
- clusterBuilder.withReconnectionPolicy(new ExponentialReconnectionPolicy(500, 10000));
- clusterBuilder.withRetryPolicy(config.getRetryPolicy().getPolicy());
- LoadBalancingPolicy loadPolicy = new RoundRobinPolicy();
+ driverConfigLoaderBuilder.withString(DefaultDriverOption.RECONNECTION_POLICY_CLASS, com.datastax.oss.driver.internal.core.connection.ExponentialReconnectionPolicy.class.getName());
+ driverConfigLoaderBuilder.withDuration(DefaultDriverOption.RECONNECTION_BASE_DELAY, Duration.ofMillis(500));
+ driverConfigLoaderBuilder.withDuration(DefaultDriverOption.RECONNECTION_MAX_DELAY, Duration.ofMillis(10_000));
+ driverConfigLoaderBuilder.withString(DefaultDriverOption.RETRY_POLICY_CLASS, config.getRetryPolicy().getPolicyClass().getName());
+ driverConfigLoaderBuilder.withString(DefaultDriverOption.LOAD_BALANCING_POLICY_CLASS, DefaultLoadBalancingPolicy.class.getName());
if (config.isUseDCAware()) {
requireNonNull(config.getDcAwareLocalDC(), "DCAwarePolicy localDC is null");
- DCAwareRoundRobinPolicy.Builder builder = DCAwareRoundRobinPolicy.builder()
- .withLocalDc(config.getDcAwareLocalDC());
+ driverConfigLoaderBuilder.withString(DefaultDriverOption.LOAD_BALANCING_LOCAL_DATACENTER, config.getDcAwareLocalDC());
+
if (config.getDcAwareUsedHostsPerRemoteDc() > 0) {
- builder.withUsedHostsPerRemoteDc(config.getDcAwareUsedHostsPerRemoteDc());
+ driverConfigLoaderBuilder.withInt(DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_MAX_NODES_PER_REMOTE_DC, config.getDcAwareUsedHostsPerRemoteDc());
if (config.isDcAwareAllowRemoteDCsForLocal()) {
- builder.allowRemoteDCsForLocalConsistencyLevel();
+ driverConfigLoaderBuilder.withBoolean(DefaultDriverOption.LOAD_BALANCING_DC_FAILOVER_ALLOW_FOR_LOCAL_CONSISTENCY_LEVELS, true);
}
}
- loadPolicy = builder.build();
- }
-
- if (config.isUseTokenAware()) {
- loadPolicy = new TokenAwarePolicy(loadPolicy, config.isTokenAwareShuffleReplicas());
- }
-
- if (!config.getAllowedAddresses().isEmpty()) {
- checkArgument(!config.getAllowedAddresses().isEmpty(), "empty AllowListAddresses");
- List allowList = new ArrayList<>();
- for (String point : config.getAllowedAddresses()) {
- allowList.add(new InetSocketAddress(point, config.getNativeProtocolPort()));
- }
- loadPolicy = new WhiteListPolicy(loadPolicy, allowList);
}
- clusterBuilder.withLoadBalancingPolicy(loadPolicy);
-
- SocketOptions socketOptions = new SocketOptions();
- socketOptions.setReadTimeoutMillis(toIntExact(config.getClientReadTimeout().toMillis()));
- socketOptions.setConnectTimeoutMillis(toIntExact(config.getClientConnectTimeout().toMillis()));
+ driverConfigLoaderBuilder.withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofMillis(toIntExact(config.getClientReadTimeout().toMillis())));
+ driverConfigLoaderBuilder.withDuration(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT, Duration.ofMillis(toIntExact(config.getClientConnectTimeout().toMillis())));
if (config.getClientSoLinger() != null) {
- socketOptions.setSoLinger(config.getClientSoLinger());
+ driverConfigLoaderBuilder.withInt(DefaultDriverOption.SOCKET_LINGER_INTERVAL, config.getClientSoLinger());
}
if (config.isTlsEnabled()) {
buildSslContext(config.getKeystorePath(), config.getKeystorePassword(), config.getTruststorePath(), config.getTruststorePassword())
- .ifPresent(context -> clusterBuilder.withSSL(JdkSSLOptions.builder().withSSLContext(context).build()));
+ .ifPresent(cqlSessionBuilder::withSslContext);
}
- clusterBuilder.withSocketOptions(socketOptions);
if (config.getUsername() != null && config.getPassword() != null) {
- clusterBuilder.withCredentials(config.getUsername(), config.getPassword());
+ cqlSessionBuilder.withAuthCredentials(config.getUsername(), config.getPassword());
}
- QueryOptions options = new QueryOptions();
- options.setFetchSize(config.getFetchSize());
- options.setConsistencyLevel(config.getConsistencyLevel());
- clusterBuilder.withQueryOptions(options);
+ driverConfigLoaderBuilder.withInt(DefaultDriverOption.REQUEST_PAGE_SIZE, config.getFetchSize());
+ driverConfigLoaderBuilder.withString(DefaultDriverOption.REQUEST_CONSISTENCY, config.getConsistencyLevel().name());
if (config.getSpeculativeExecutionLimit().isPresent()) {
- clusterBuilder.withSpeculativeExecutionPolicy(new ConstantSpeculativeExecutionPolicy(
- config.getSpeculativeExecutionDelay().toMillis(), // delay before a new execution is launched
- config.getSpeculativeExecutionLimit().get())); // maximum number of executions
+ driverConfigLoaderBuilder.withString(DefaultDriverOption.SPECULATIVE_EXECUTION_POLICY_CLASS, com.datastax.oss.driver.internal.core.specex.ConstantSpeculativeExecutionPolicy.class.getName());
+ // maximum number of executions
+ driverConfigLoaderBuilder.withInt(DefaultDriverOption.SPECULATIVE_EXECUTION_MAX, config.getSpeculativeExecutionLimit().get());
+ // delay before a new execution is launched
+ driverConfigLoaderBuilder.withDuration(DefaultDriverOption.SPECULATIVE_EXECUTION_DELAY, Duration.ofMillis(config.getSpeculativeExecutionDelay().toMillis()));
}
+ cqlSessionBuilder.withConfigLoader(driverConfigLoaderBuilder.build());
+
return new CassandraSession(
extraColumnMetadataCodec,
- new ReopeningCluster(() -> {
- contactPoints.forEach(clusterBuilder::addContactPoint);
- return clusterBuilder.build();
- }),
+ () -> {
+ contactPoints.forEach(contactPoint -> cqlSessionBuilder.addContactPoint(
+ createInetSocketAddress(contactPoint, config.getNativeProtocolPort())));
+ return cqlSessionBuilder.build();
+ },
config.getNoHostAvailableRetryTimeout());
}
@@ -218,52 +195,13 @@ private static Optional buildSslContext(
}
}
- private static KeyStore loadTrustStore(File trustStorePath, Optional trustStorePassword)
- throws IOException, GeneralSecurityException
+ private static InetSocketAddress createInetSocketAddress(String contactPoint, int port)
{
- KeyStore trustStore = KeyStore.getInstance(KeyStore.getDefaultType());
try {
- // attempt to read the trust store as a PEM file
- List certificateChain = PemReader.readCertificateChain(trustStorePath);
- if (!certificateChain.isEmpty()) {
- trustStore.load(null, null);
- for (X509Certificate certificate : certificateChain) {
- X500Principal principal = certificate.getSubjectX500Principal();
- trustStore.setCertificateEntry(principal.getName(), certificate);
- }
- return trustStore;
- }
- }
- catch (IOException | GeneralSecurityException ignored) {
+ return new InetSocketAddress(InetAddress.getByName(contactPoint), port);
}
-
- try (InputStream in = new FileInputStream(trustStorePath)) {
- trustStore.load(in, trustStorePassword.map(String::toCharArray).orElse(null));
- }
- return trustStore;
- }
-
- private static void validateCertificates(KeyStore keyStore)
- throws GeneralSecurityException
- {
- for (String alias : list(keyStore.aliases())) {
- if (!keyStore.isKeyEntry(alias)) {
- continue;
- }
- Certificate certificate = keyStore.getCertificate(alias);
- if (!(certificate instanceof X509Certificate)) {
- continue;
- }
-
- try {
- ((X509Certificate) certificate).checkValidity();
- }
- catch (CertificateExpiredException e) {
- throw new CertificateExpiredException("KeyStore certificate is expired: " + e.getMessage());
- }
- catch (CertificateNotYetValidException e) {
- throw new CertificateNotYetValidException("KeyStore certificate is not yet valid: " + e.getMessage());
- }
+ catch (UnknownHostException e) {
+ throw new IllegalArgumentException("Failed to add contact point: " + contactPoint, e);
}
}
}
diff --git a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraClusteringPredicatesExtractor.java b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraClusteringPredicatesExtractor.java
index fe17c8a9d45f..6c062539c0fa 100644
--- a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraClusteringPredicatesExtractor.java
+++ b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraClusteringPredicatesExtractor.java
@@ -13,7 +13,7 @@
*/
package io.trino.plugin.cassandra;
-import com.datastax.driver.core.VersionNumber;
+import com.datastax.oss.driver.api.core.Version;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
@@ -36,7 +36,7 @@ public class CassandraClusteringPredicatesExtractor
private final ClusteringPushDownResult clusteringPushDownResult;
private final TupleDomain predicates;
- public CassandraClusteringPredicatesExtractor(List clusteringColumns, TupleDomain predicates, VersionNumber cassandraVersion)
+ public CassandraClusteringPredicatesExtractor(List clusteringColumns, TupleDomain predicates, Version cassandraVersion)
{
this.predicates = requireNonNull(predicates, "predicates is null");
this.clusteringPushDownResult = getClusteringKeysSet(clusteringColumns, predicates, requireNonNull(cassandraVersion, "cassandraVersion is null"));
@@ -52,7 +52,7 @@ public TupleDomain getUnenforcedConstraints()
return predicates.filter(((columnHandle, domain) -> !clusteringPushDownResult.hasBeenFullyPushed(columnHandle)));
}
- private static ClusteringPushDownResult getClusteringKeysSet(List clusteringColumns, TupleDomain predicates, VersionNumber cassandraVersion)
+ private static ClusteringPushDownResult getClusteringKeysSet(List clusteringColumns, TupleDomain predicates, Version cassandraVersion)
{
ImmutableSet.Builder fullyPushedColumnPredicates = ImmutableSet.builder();
ImmutableList.Builder clusteringColumnSql = ImmutableList.builder();
@@ -127,9 +127,9 @@ private static ClusteringPushDownResult getClusteringKeysSet(List clusteringColumns, VersionNumber cassandraVersion, int currentlyProcessedClusteringColumn)
+ private static boolean isInExpressionNotAllowed(List clusteringColumns, Version cassandraVersion, int currentlyProcessedClusteringColumn)
{
- return cassandraVersion.compareTo(VersionNumber.parse("2.2.0")) < 0 && currentlyProcessedClusteringColumn != (clusteringColumns.size() - 1);
+ return cassandraVersion.compareTo(Version.parse("2.2.0")) < 0 && currentlyProcessedClusteringColumn != (clusteringColumns.size() - 1);
}
private static String toCqlLiteral(CassandraColumnHandle columnHandle, Object value)
diff --git a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraMetadata.java b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraMetadata.java
index f9692c04d1ed..cf1f9a45a54c 100644
--- a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraMetadata.java
+++ b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraMetadata.java
@@ -50,7 +50,7 @@
import java.util.OptionalLong;
import java.util.stream.Collectors;
-import static com.datastax.driver.core.querybuilder.QueryBuilder.truncate;
+import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.truncate;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.MoreCollectors.toOptional;
@@ -59,6 +59,8 @@
import static io.trino.plugin.cassandra.util.CassandraCqlUtils.cqlNameToSqlName;
import static io.trino.plugin.cassandra.util.CassandraCqlUtils.quoteStringLiteral;
import static io.trino.plugin.cassandra.util.CassandraCqlUtils.validColumnName;
+import static io.trino.plugin.cassandra.util.CassandraCqlUtils.validSchemaName;
+import static io.trino.plugin.cassandra.util.CassandraCqlUtils.validTableName;
import static io.trino.spi.StandardErrorCode.NOT_SUPPORTED;
import static io.trino.spi.StandardErrorCode.PERMISSION_DENIED;
import static java.lang.String.format;
@@ -329,7 +331,7 @@ public Optional finishCreateTable(ConnectorSession sess
public void truncateTable(ConnectorSession session, ConnectorTableHandle tableHandle)
{
CassandraTableHandle table = (CassandraTableHandle) tableHandle;
- cassandraSession.execute(truncate(table.getSchemaName(), table.getTableName()));
+ cassandraSession.execute(truncate(validSchemaName(table.getSchemaName()), validTableName(table.getTableName())).build());
}
@Override
diff --git a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraPageSink.java b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraPageSink.java
index 1804d593a631..948b63758c2d 100644
--- a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraPageSink.java
+++ b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraPageSink.java
@@ -13,12 +13,15 @@
*/
package io.trino.plugin.cassandra;
-import com.datastax.driver.core.BatchStatement;
-import com.datastax.driver.core.LocalDate;
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.ProtocolVersion;
-import com.datastax.driver.core.querybuilder.Insert;
+import com.datastax.oss.driver.api.core.ProtocolVersion;
+import com.datastax.oss.driver.api.core.cql.BatchStatement;
+import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder;
+import com.datastax.oss.driver.api.core.cql.DefaultBatchType;
+import com.datastax.oss.driver.api.core.cql.PreparedStatement;
+import com.datastax.oss.driver.api.core.cql.SimpleStatement;
+import com.datastax.oss.driver.api.querybuilder.term.Term;
import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
import com.google.common.primitives.Shorts;
import com.google.common.primitives.SignedBytes;
import io.airlift.slice.Slice;
@@ -29,20 +32,19 @@
import io.trino.spi.type.Type;
import io.trino.spi.type.UuidType;
import io.trino.spi.type.VarcharType;
-import org.joda.time.format.DateTimeFormatter;
-import org.joda.time.format.ISODateTimeFormat;
-import java.sql.Timestamp;
+import java.time.Instant;
+import java.time.LocalDate;
+import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeUnit;
import java.util.function.Function;
-import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker;
-import static com.datastax.driver.core.querybuilder.QueryBuilder.insertInto;
+import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker;
+import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto;
import static com.google.common.base.Preconditions.checkArgument;
import static io.trino.plugin.cassandra.util.CassandraCqlUtils.ID_COLUMN_NAME;
import static io.trino.plugin.cassandra.util.CassandraCqlUtils.validColumnName;
@@ -69,15 +71,13 @@
public class CassandraPageSink
implements ConnectorPageSink
{
- private static final DateTimeFormatter DATE_FORMATTER = ISODateTimeFormat.date().withZoneUTC();
-
private final CassandraSession cassandraSession;
private final PreparedStatement insert;
private final List columnTypes;
private final boolean generateUuid;
private final int batchSize;
private final Function toCassandraDate;
- private final BatchStatement batchStatement = new BatchStatement();
+ private final BatchStatementBuilder batchStatement = BatchStatement.builder(DefaultBatchType.LOGGED);
public CassandraPageSink(
CassandraSession cassandraSession,
@@ -97,23 +97,26 @@ public CassandraPageSink(
this.generateUuid = generateUuid;
this.batchSize = batchSize;
- if (protocolVersion.toInt() <= ProtocolVersion.V3.toInt()) {
- this.toCassandraDate = value -> DATE_FORMATTER.print(TimeUnit.DAYS.toMillis(value));
+ if (protocolVersion.getCode() <= ProtocolVersion.V3.getCode()) {
+ toCassandraDate = value -> DateTimeFormatter.ISO_LOCAL_DATE.format(LocalDate.ofEpochDay(toIntExact(value)));
}
else {
- this.toCassandraDate = value -> LocalDate.fromDaysSinceEpoch(toIntExact(value));
+ toCassandraDate = value -> LocalDate.ofEpochDay(toIntExact(value));
}
- Insert insert = insertInto(validSchemaName(schemaName), validTableName(tableName));
+ ImmutableMap.Builder parameters = ImmutableMap.builder();
if (generateUuid) {
- insert.value(ID_COLUMN_NAME, bindMarker());
+ parameters.put(ID_COLUMN_NAME, bindMarker());
}
for (int i = 0; i < columnNames.size(); i++) {
String columnName = columnNames.get(i);
checkArgument(columnName != null, "columnName is null at position: %s", i);
- insert.value(validColumnName(columnName), bindMarker());
+ parameters.put(validColumnName(columnName), bindMarker());
}
- this.insert = cassandraSession.prepare(insert);
+ SimpleStatement insertStatement = insertInto(validSchemaName(schemaName), validTableName(tableName))
+ .values(parameters.buildOrThrow())
+ .build();
+ this.insert = cassandraSession.prepare(insertStatement);
}
@Override
@@ -129,11 +132,11 @@ public CompletableFuture> appendPage(Page page)
appendColumn(values, page, position, channel);
}
- batchStatement.add(insert.bind(values.toArray()));
+ batchStatement.addStatement(insert.bind(values.toArray()));
- if (batchStatement.size() >= batchSize) {
- cassandraSession.execute(batchStatement);
- batchStatement.clear();
+ if (batchStatement.getStatementsCount() >= batchSize) {
+ cassandraSession.execute(batchStatement.build());
+ batchStatement.clearStatements();
}
}
return NOT_BLOCKED;
@@ -171,7 +174,7 @@ else if (DATE.equals(type)) {
values.add(toCassandraDate.apply(type.getLong(block, position)));
}
else if (TIMESTAMP_TZ_MILLIS.equals(type)) {
- values.add(new Timestamp(unpackMillisUtc(type.getLong(block, position))));
+ values.add(Instant.ofEpochMilli(unpackMillisUtc(type.getLong(block, position))));
}
else if (type instanceof VarcharType) {
values.add(type.getSlice(block, position).toStringUtf8());
@@ -190,9 +193,9 @@ else if (UuidType.UUID.equals(type)) {
@Override
public CompletableFuture> finish()
{
- if (batchStatement.size() > 0) {
- cassandraSession.execute(batchStatement);
- batchStatement.clear();
+ if (batchStatement.getStatementsCount() > 0) {
+ cassandraSession.execute(batchStatement.build());
+ batchStatement.clearStatements();
}
// the committer does not need any additional info
diff --git a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraRecordCursor.java b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraRecordCursor.java
index d4d30102d153..986a48eb82a1 100644
--- a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraRecordCursor.java
+++ b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraRecordCursor.java
@@ -13,8 +13,8 @@
*/
package io.trino.plugin.cassandra;
-import com.datastax.driver.core.ResultSet;
-import com.datastax.driver.core.Row;
+import com.datastax.oss.driver.api.core.cql.ResultSet;
+import com.datastax.oss.driver.api.core.cql.Row;
import io.airlift.slice.Slice;
import io.trino.plugin.cassandra.CassandraType.Kind;
import io.trino.spi.connector.RecordCursor;
@@ -45,8 +45,9 @@ public CassandraRecordCursor(CassandraSession cassandraSession, List (CassandraColumnHandle) column)
.collect(toList());
- String selectCql = CassandraCqlUtils.selectFrom(cassandraTable, cassandraColumns).getQueryString();
+ String selectCql = CassandraCqlUtils.selectFrom(cassandraTable, cassandraColumns).asCql();
StringBuilder sb = new StringBuilder(selectCql);
if (sb.charAt(sb.length() - 1) == ';') {
sb.setLength(sb.length() - 1);
diff --git a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraSession.java b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraSession.java
index d48d0b2de8e2..67677b6323f1 100644
--- a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraSession.java
+++ b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraSession.java
@@ -13,30 +13,35 @@
*/
package io.trino.plugin.cassandra;
-import com.datastax.driver.core.AbstractTableMetadata;
-import com.datastax.driver.core.Cluster;
-import com.datastax.driver.core.ColumnMetadata;
-import com.datastax.driver.core.DataType;
-import com.datastax.driver.core.Host;
-import com.datastax.driver.core.IndexMetadata;
-import com.datastax.driver.core.KeyspaceMetadata;
-import com.datastax.driver.core.MaterializedViewMetadata;
-import com.datastax.driver.core.PreparedStatement;
-import com.datastax.driver.core.ProtocolVersion;
-import com.datastax.driver.core.RegularStatement;
-import com.datastax.driver.core.ResultSet;
-import com.datastax.driver.core.Row;
-import com.datastax.driver.core.Session;
-import com.datastax.driver.core.Statement;
-import com.datastax.driver.core.TableMetadata;
-import com.datastax.driver.core.TokenRange;
-import com.datastax.driver.core.VersionNumber;
-import com.datastax.driver.core.exceptions.NoHostAvailableException;
-import com.datastax.driver.core.policies.ReconnectionPolicy;
-import com.datastax.driver.core.policies.ReconnectionPolicy.ReconnectionSchedule;
-import com.datastax.driver.core.querybuilder.Clause;
-import com.datastax.driver.core.querybuilder.QueryBuilder;
-import com.datastax.driver.core.querybuilder.Select;
+import com.datastax.oss.driver.api.core.AllNodesFailedException;
+import com.datastax.oss.driver.api.core.CqlIdentifier;
+import com.datastax.oss.driver.api.core.CqlSession;
+import com.datastax.oss.driver.api.core.ProtocolVersion;
+import com.datastax.oss.driver.api.core.Version;
+import com.datastax.oss.driver.api.core.connection.ReconnectionPolicy;
+import com.datastax.oss.driver.api.core.cql.PreparedStatement;
+import com.datastax.oss.driver.api.core.cql.ResultSet;
+import com.datastax.oss.driver.api.core.cql.Row;
+import com.datastax.oss.driver.api.core.cql.SimpleStatement;
+import com.datastax.oss.driver.api.core.cql.Statement;
+import com.datastax.oss.driver.api.core.metadata.Node;
+import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata;
+import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata;
+import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata;
+import com.datastax.oss.driver.api.core.metadata.schema.RelationMetadata;
+import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata;
+import com.datastax.oss.driver.api.core.metadata.schema.ViewMetadata;
+import com.datastax.oss.driver.api.core.metadata.token.TokenRange;
+import com.datastax.oss.driver.api.core.type.DataType;
+import com.datastax.oss.driver.api.core.type.ListType;
+import com.datastax.oss.driver.api.core.type.MapType;
+import com.datastax.oss.driver.api.core.type.SetType;
+import com.datastax.oss.driver.api.core.type.TupleType;
+import com.datastax.oss.driver.api.core.type.UserDefinedType;
+import com.datastax.oss.driver.api.querybuilder.QueryBuilder;
+import com.datastax.oss.driver.api.querybuilder.relation.Relation;
+import com.datastax.oss.driver.api.querybuilder.select.Select;
+import com.datastax.oss.driver.api.querybuilder.term.Term;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Ordering;
@@ -53,8 +58,11 @@
import io.trino.spi.predicate.NullableValue;
import io.trino.spi.predicate.TupleDomain;
+import java.io.Closeable;
import java.nio.ByteBuffer;
import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -62,10 +70,11 @@
import java.util.Optional;
import java.util.Set;
import java.util.function.Supplier;
+import java.util.stream.IntStream;
import java.util.stream.Stream;
-import static com.datastax.driver.core.querybuilder.QueryBuilder.eq;
-import static com.datastax.driver.core.querybuilder.QueryBuilder.select;
+import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal;
+import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.base.Suppliers.memoize;
import static com.google.common.collect.ImmutableList.toImmutableList;
@@ -77,6 +86,7 @@
import static io.trino.plugin.cassandra.CassandraType.toCassandraType;
import static io.trino.plugin.cassandra.util.CassandraCqlUtils.selectDistinctFrom;
import static io.trino.plugin.cassandra.util.CassandraCqlUtils.validSchemaName;
+import static io.trino.plugin.cassandra.util.CassandraCqlUtils.validTableName;
import static io.trino.spi.StandardErrorCode.NOT_SUPPORTED;
import static java.lang.String.format;
import static java.util.Comparator.comparing;
@@ -86,27 +96,26 @@
import static java.util.stream.Collectors.toList;
public class CassandraSession
+ implements Closeable
{
private static final Logger log = Logger.get(CassandraSession.class);
private static final String SYSTEM = "system";
private static final String SIZE_ESTIMATES = "size_estimates";
- private static final VersionNumber PARTITION_FETCH_WITH_IN_PREDICATE_VERSION = VersionNumber.parse("2.2");
+ private static final Version PARTITION_FETCH_WITH_IN_PREDICATE_VERSION = Version.parse("2.2");
private final JsonCodec> extraColumnMetadataCodec;
- private final Cluster cluster;
- private final Supplier session;
+ private final Supplier session;
private final Duration noHostAvailableRetryTimeout;
- public CassandraSession(JsonCodec> extraColumnMetadataCodec, Cluster cluster, Duration noHostAvailableRetryTimeout)
+ public CassandraSession(JsonCodec> extraColumnMetadataCodec, Supplier sessionSupplier, Duration noHostAvailableRetryTimeout)
{
this.extraColumnMetadataCodec = requireNonNull(extraColumnMetadataCodec, "extraColumnMetadataCodec is null");
- this.cluster = requireNonNull(cluster, "cluster is null");
this.noHostAvailableRetryTimeout = requireNonNull(noHostAvailableRetryTimeout, "noHostAvailableRetryTimeout is null");
- this.session = memoize(cluster::connect);
+ this.session = memoize(sessionSupplier::get);
}
- public VersionNumber getCassandraVersion()
+ public Version getCassandraVersion()
{
ResultSet result = executeWithSession(session -> session.execute("select release_version from system.local"));
Row versionRow = result.one();
@@ -115,51 +124,60 @@ public VersionNumber getCassandraVersion()
"Please make sure that the Cassandra cluster is up and running, " +
"and that the contact points are specified correctly.");
}
- return VersionNumber.parse(versionRow.getString("release_version"));
+ return Version.parse(versionRow.getString("release_version"));
}
public ProtocolVersion getProtocolVersion()
{
- return executeWithSession(session -> session.getCluster().getConfiguration().getProtocolOptions().getProtocolVersion());
+ return executeWithSession(session -> session.getContext().getProtocolVersion());
}
public String getPartitioner()
{
- return executeWithSession(session -> session.getCluster().getMetadata().getPartitioner());
+ return executeWithSession(session -> session.getMetadata().getTokenMap()
+ .orElseThrow()
+ .getPartitionerName());
}
public Set getTokenRanges()
{
- return executeWithSession(session -> session.getCluster().getMetadata().getTokenRanges());
+ return executeWithSession(session -> session.getMetadata().getTokenMap()
+ .orElseThrow()
+ .getTokenRanges());
}
- public Set getReplicas(String caseSensitiveSchemaName, TokenRange tokenRange)
+ public Set getReplicas(String caseSensitiveSchemaName, TokenRange tokenRange)
{
requireNonNull(caseSensitiveSchemaName, "caseSensitiveSchemaName is null");
requireNonNull(tokenRange, "tokenRange is null");
return executeWithSession(session ->
- session.getCluster().getMetadata().getReplicas(validSchemaName(caseSensitiveSchemaName), tokenRange));
+ session.getMetadata()
+ .getTokenMap()
+ .map(tokenMap -> tokenMap.getReplicas(validSchemaName(caseSensitiveSchemaName), tokenRange))
+ .orElse(ImmutableSet.of()));
}
- public Set getReplicas(String caseSensitiveSchemaName, ByteBuffer partitionKey)
+ public Set getReplicas(String caseSensitiveSchemaName, ByteBuffer partitionKey)
{
requireNonNull(caseSensitiveSchemaName, "caseSensitiveSchemaName is null");
requireNonNull(partitionKey, "partitionKey is null");
return executeWithSession(session ->
- session.getCluster().getMetadata().getReplicas(validSchemaName(caseSensitiveSchemaName), partitionKey));
+ session.getMetadata().getTokenMap()
+ .map(tokenMap -> tokenMap.getReplicas(validSchemaName(caseSensitiveSchemaName), partitionKey))
+ .orElse(ImmutableSet.of()));
}
public String getCaseSensitiveSchemaName(String caseInsensitiveSchemaName)
{
- return getKeyspaceByCaseInsensitiveName(caseInsensitiveSchemaName).getName();
+ return getKeyspaceByCaseInsensitiveName(caseInsensitiveSchemaName).getName().asInternal();
}
public List getCaseSensitiveSchemaNames()
{
ImmutableList.Builder builder = ImmutableList.builder();
- List keyspaces = executeWithSession(session -> session.getCluster().getMetadata().getKeyspaces());
- for (KeyspaceMetadata meta : keyspaces) {
- builder.add(meta.getName());
+ Map keyspaces = executeWithSession(session -> session.getMetadata().getKeyspaces());
+ for (KeyspaceMetadata meta : keyspaces.values()) {
+ builder.add(meta.getName().asInternal());
}
return builder.build();
}
@@ -169,11 +187,11 @@ public List getCaseSensitiveTableNames(String caseInsensitiveSchemaName)
{
KeyspaceMetadata keyspace = getKeyspaceByCaseInsensitiveName(caseInsensitiveSchemaName);
ImmutableList.Builder builder = ImmutableList.builder();
- for (TableMetadata table : keyspace.getTables()) {
- builder.add(table.getName());
+ for (TableMetadata table : keyspace.getTables().values()) {
+ builder.add(table.getName().asInternal());
}
- for (MaterializedViewMetadata materializedView : keyspace.getMaterializedViews()) {
- builder.add(materializedView.getName());
+ for (ViewMetadata materializedView : keyspace.getViews().values()) {
+ builder.add(materializedView.getName().asInternal());
}
return builder.build();
}
@@ -182,20 +200,20 @@ public CassandraTable getTable(SchemaTableName schemaTableName)
throws TableNotFoundException
{
KeyspaceMetadata keyspace = getKeyspaceByCaseInsensitiveName(schemaTableName.getSchemaName());
- AbstractTableMetadata tableMeta = getTableMetadata(keyspace, schemaTableName.getTableName());
+ RelationMetadata tableMeta = getTableMetadata(keyspace, schemaTableName.getTableName());
List columnNames = new ArrayList<>();
- List columns = tableMeta.getColumns();
+ Collection columns = tableMeta.getColumns().values();
checkColumnNames(columns);
for (ColumnMetadata columnMetadata : columns) {
- columnNames.add(columnMetadata.getName());
+ columnNames.add(columnMetadata.getName().asInternal());
}
// check if there is a comment to establish column ordering
- String comment = tableMeta.getOptions().getComment();
+ Object comment = tableMeta.getOptions().get(CqlIdentifier.fromInternal("comment"));
Set hiddenColumns = ImmutableSet.of();
- if (comment != null && comment.startsWith(PRESTO_COMMENT_METADATA)) {
- String columnOrderingString = comment.substring(PRESTO_COMMENT_METADATA.length());
+ if (comment instanceof String && ((String) comment).startsWith(PRESTO_COMMENT_METADATA)) {
+ String columnOrderingString = ((String) comment).substring(PRESTO_COMMENT_METADATA.length());
// column ordering
List extras = extraColumnMetadataCodec.fromJson(columnOrderingString);
@@ -218,28 +236,28 @@ public CassandraTable getTable(SchemaTableName schemaTableName)
ImmutableList.Builder columnHandles = ImmutableList.builder();
// add primary keys first
- Set primaryKeySet = new HashSet<>();
+ Set primaryKeySet = new HashSet<>();
for (ColumnMetadata columnMeta : tableMeta.getPartitionKey()) {
primaryKeySet.add(columnMeta.getName());
- boolean hidden = hiddenColumns.contains(columnMeta.getName());
- CassandraColumnHandle columnHandle = buildColumnHandle(tableMeta, columnMeta, true, false, columnNames.indexOf(columnMeta.getName()), hidden)
- .orElseThrow(() -> new TrinoException(NOT_SUPPORTED, "Unsupported partition key type: " + columnMeta.getType().getName()));
+ boolean hidden = hiddenColumns.contains(columnMeta.getName().asInternal());
+ CassandraColumnHandle columnHandle = buildColumnHandle(tableMeta, columnMeta, true, false, columnNames.indexOf(columnMeta.getName().asInternal()), hidden)
+ .orElseThrow(() -> new TrinoException(NOT_SUPPORTED, "Unsupported partition key type: " + columnMeta.getType().asCql(false, false)));
columnHandles.add(columnHandle);
}
// add clustering columns
- for (ColumnMetadata columnMeta : tableMeta.getClusteringColumns()) {
+ for (ColumnMetadata columnMeta : tableMeta.getClusteringColumns().keySet()) {
primaryKeySet.add(columnMeta.getName());
- boolean hidden = hiddenColumns.contains(columnMeta.getName());
- Optional columnHandle = buildColumnHandle(tableMeta, columnMeta, false, true, columnNames.indexOf(columnMeta.getName()), hidden);
+ boolean hidden = hiddenColumns.contains(columnMeta.getName().asInternal());
+ Optional columnHandle = buildColumnHandle(tableMeta, columnMeta, false, true, columnNames.indexOf(columnMeta.getName().asInternal()), hidden);
columnHandle.ifPresent(columnHandles::add);
}
// add other columns
for (ColumnMetadata columnMeta : columns) {
if (!primaryKeySet.contains(columnMeta.getName())) {
- boolean hidden = hiddenColumns.contains(columnMeta.getName());
- Optional columnHandle = buildColumnHandle(tableMeta, columnMeta, false, false, columnNames.indexOf(columnMeta.getName()), hidden);
+ boolean hidden = hiddenColumns.contains(columnMeta.getName().asInternal());
+ Optional columnHandle = buildColumnHandle(tableMeta, columnMeta, false, false, columnNames.indexOf(columnMeta.getName().asInternal()), hidden);
columnHandle.ifPresent(columnHandles::add);
}
}
@@ -248,19 +266,21 @@ public CassandraTable getTable(SchemaTableName schemaTableName)
.sorted(comparing(CassandraColumnHandle::getOrdinalPosition))
.collect(toList());
- CassandraTableHandle tableHandle = new CassandraTableHandle(tableMeta.getKeyspace().getName(), tableMeta.getName());
+ CassandraTableHandle tableHandle = new CassandraTableHandle(tableMeta.getKeyspace().asInternal(), tableMeta.getName().asInternal());
return new CassandraTable(tableHandle, sortedColumnHandles);
}
private KeyspaceMetadata getKeyspaceByCaseInsensitiveName(String caseInsensitiveSchemaName)
throws SchemaNotFoundException
{
- List keyspaces = executeWithSession(session -> session.getCluster().getMetadata().getKeyspaces());
+ Collection keyspaces = executeWithSession(session -> session.getMetadata().getKeyspaces()).values();
KeyspaceMetadata result = null;
// Ensure that the error message is deterministic
- List sortedKeyspaces = Ordering.from(comparing(KeyspaceMetadata::getName)).immutableSortedCopy(keyspaces);
+ List sortedKeyspaces = keyspaces.stream()
+ .sorted(Comparator.comparing(keyspaceMetadata -> keyspaceMetadata.getName().asInternal()))
+ .collect(toImmutableList());
for (KeyspaceMetadata keyspace : sortedKeyspaces) {
- if (keyspace.getName().equalsIgnoreCase(caseInsensitiveSchemaName)) {
+ if (keyspace.getName().asInternal().equalsIgnoreCase(caseInsensitiveSchemaName)) {
if (result != null) {
throw new TrinoException(
NOT_SUPPORTED,
@@ -276,21 +296,21 @@ private KeyspaceMetadata getKeyspaceByCaseInsensitiveName(String caseInsensitive
return result;
}
- private static AbstractTableMetadata getTableMetadata(KeyspaceMetadata keyspace, String caseInsensitiveTableName)
+ private static RelationMetadata getTableMetadata(KeyspaceMetadata keyspace, String caseInsensitiveTableName)
{
- List tables = Stream.concat(
- keyspace.getTables().stream(),
- keyspace.getMaterializedViews().stream())
- .filter(table -> table.getName().equalsIgnoreCase(caseInsensitiveTableName))
+ List tables = Stream.concat(
+ keyspace.getTables().values().stream(),
+ keyspace.getViews().values().stream())
+ .filter(table -> table.getName().asInternal().equalsIgnoreCase(caseInsensitiveTableName))
.collect(toImmutableList());
if (tables.size() == 0) {
- throw new TableNotFoundException(new SchemaTableName(keyspace.getName(), caseInsensitiveTableName));
+ throw new TableNotFoundException(new SchemaTableName(keyspace.getName().asInternal(), caseInsensitiveTableName));
}
if (tables.size() == 1) {
return tables.get(0);
}
String tableNames = tables.stream()
- .map(AbstractTableMetadata::getName)
+ .map(metadata -> metadata.getName().asInternal())
.sorted()
.collect(joining(", "));
throw new TrinoException(
@@ -302,14 +322,14 @@ private static AbstractTableMetadata getTableMetadata(KeyspaceMetadata keyspace,
public boolean isMaterializedView(SchemaTableName schemaTableName)
{
KeyspaceMetadata keyspace = getKeyspaceByCaseInsensitiveName(schemaTableName.getSchemaName());
- return keyspace.getMaterializedView(schemaTableName.getTableName()) != null;
+ return keyspace.getView(validTableName(schemaTableName.getTableName())).isPresent();
}
- private static void checkColumnNames(List columns)
+ private static void checkColumnNames(Collection columns)
{
Map lowercaseNameToColumnMap = new HashMap<>();
for (ColumnMetadata column : columns) {
- String lowercaseName = column.getName().toLowerCase(ENGLISH);
+ String lowercaseName = column.getName().asInternal().toLowerCase(ENGLISH);
if (lowercaseNameToColumnMap.containsKey(lowercaseName)) {
throw new TrinoException(
NOT_SUPPORTED,
@@ -320,15 +340,15 @@ private static void checkColumnNames(List columns)
}
}
- private Optional buildColumnHandle(AbstractTableMetadata tableMetadata, ColumnMetadata columnMeta, boolean partitionKey, boolean clusteringKey, int ordinalPosition, boolean hidden)
+ private Optional buildColumnHandle(RelationMetadata tableMetadata, ColumnMetadata columnMeta, boolean partitionKey, boolean clusteringKey, int ordinalPosition, boolean hidden)
{
Optional cassandraType = toCassandraType(columnMeta.getType());
if (cassandraType.isEmpty()) {
- log.debug("Unsupported column type: %s", columnMeta.getType().getName());
+ log.debug("Unsupported column type: %s", columnMeta.getType().asCql(false, false));
return Optional.empty();
}
- List typeArgs = columnMeta.getType().getTypeArguments();
+ List typeArgs = getTypeArguments(columnMeta.getType());
for (DataType typeArgument : typeArgs) {
if (!isFullySupported(typeArgument)) {
log.debug("%s column has unsupported type: %s", columnMeta.getName(), typeArgument);
@@ -336,17 +356,17 @@ private Optional buildColumnHandle(AbstractTableMetadata
}
}
boolean indexed = false;
- SchemaTableName schemaTableName = new SchemaTableName(tableMetadata.getKeyspace().getName(), tableMetadata.getName());
+ SchemaTableName schemaTableName = new SchemaTableName(tableMetadata.getKeyspace().asInternal(), tableMetadata.getName().asInternal());
if (!isMaterializedView(schemaTableName)) {
TableMetadata table = (TableMetadata) tableMetadata;
- for (IndexMetadata idx : table.getIndexes()) {
- if (idx.getTarget().equals(columnMeta.getName())) {
+ for (IndexMetadata idx : table.getIndexes().values()) {
+ if (idx.getTarget().equals(columnMeta.getName().asInternal())) {
indexed = true;
break;
}
}
}
- return Optional.of(new CassandraColumnHandle(columnMeta.getName(), ordinalPosition, cassandraType.get(), partitionKey, clusteringKey, indexed, hidden));
+ return Optional.of(new CassandraColumnHandle(columnMeta.getName().asInternal(), ordinalPosition, cassandraType.get(), partitionKey, clusteringKey, indexed, hidden));
}
/**
@@ -393,7 +413,7 @@ public List getPartitions(CassandraTable table, List session.execute(cql));
}
- public PreparedStatement prepare(RegularStatement statement)
+ public PreparedStatement prepare(SimpleStatement statement)
{
- log.debug("Execute RegularStatement: %s", statement);
+ log.debug("Execute SimpleStatement: %s", statement);
return executeWithSession(session -> session.prepare(statement));
}
@@ -448,11 +468,11 @@ private Iterable queryPartitionKeysWithInClauses(CassandraTable table, List
CassandraTableHandle tableHandle = table.getTableHandle();
List partitionKeyColumns = table.getPartitionKeyColumns();
- Select partitionKeys = selectDistinctFrom(tableHandle, partitionKeyColumns);
- addWhereInClauses(partitionKeys.where(), partitionKeyColumns, filterPrefixes);
+ Select partitionKeys = selectDistinctFrom(tableHandle, partitionKeyColumns)
+ .where(getInRelations(partitionKeyColumns, filterPrefixes));
log.debug("Execute cql for partition keys with IN clauses: %s", partitionKeys);
- return execute(partitionKeys).all();
+ return execute(partitionKeys.build()).all();
}
private Iterable queryPartitionKeysLegacyWithMultipleQueries(CassandraTable table, List> filterPrefixes)
@@ -464,11 +484,11 @@ private Iterable queryPartitionKeysLegacyWithMultipleQueries(CassandraTable
ImmutableList.Builder rowList = ImmutableList.builder();
for (List