diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e1abf3cc93a2..a7159b3cd7b1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -695,6 +695,7 @@ jobs: AWS_SECRET_ACCESS_KEY: ${{ secrets.TRINO_AWS_SECRET_ACCESS_KEY }} AWS_REGION: ${{ vars.TRINO_AWS_REGION }} S3_BUCKET: ${{ vars.TRINO_S3_BUCKET }} + S3_TABLES_BUCKET: ${{ vars.TRINO_S3_TABLES_BUCKET }} GCP_CREDENTIALS_KEY: ${{ secrets.GCP_CREDENTIALS_KEY }} GCP_STORAGE_BUCKET: ${{ vars.GCP_STORAGE_BUCKET }} ABFS_CONTAINER: ${{ vars.AZURE_ABFS_HIERARCHICAL_CONTAINER }} @@ -882,7 +883,6 @@ jobs: - suite-7-non-generic - suite-hive-transactional - suite-azure - - suite-delta-lake-databricks104 - suite-delta-lake-databricks113 - suite-delta-lake-databricks122 - suite-delta-lake-databricks133 @@ -922,9 +922,6 @@ jobs: ignore exclusion if: >- ${{ env.CI_SKIP_SECRETS_PRESENCE_CHECKS != '' || secrets.GCP_CREDENTIALS_KEY != '' }} - - suite: suite-delta-lake-databricks104 - ignore exclusion if: >- - ${{ env.CI_SKIP_SECRETS_PRESENCE_CHECKS != '' || secrets.DATABRICKS_TOKEN != '' }} - suite: suite-delta-lake-databricks113 ignore exclusion if: >- ${{ env.CI_SKIP_SECRETS_PRESENCE_CHECKS != '' || secrets.DATABRICKS_TOKEN != '' }} @@ -989,7 +986,6 @@ jobs: AWS_REGION: "" TRINO_AWS_ACCESS_KEY_ID: "" TRINO_AWS_SECRET_ACCESS_KEY: "" - DATABRICKS_104_JDBC_URL: "" DATABRICKS_113_JDBC_URL: "" DATABRICKS_122_JDBC_URL: "" DATABRICKS_133_JDBC_URL: "" @@ -1066,7 +1062,6 @@ jobs: AWS_REGION: ${{ vars.TRINO_AWS_REGION }} TRINO_AWS_ACCESS_KEY_ID: ${{ vars.TRINO_AWS_ACCESS_KEY_ID }} TRINO_AWS_SECRET_ACCESS_KEY: ${{ secrets.TRINO_AWS_SECRET_ACCESS_KEY }} - DATABRICKS_104_JDBC_URL: ${{ vars.DATABRICKS_104_JDBC_URL }} DATABRICKS_113_JDBC_URL: ${{ vars.DATABRICKS_113_JDBC_URL }} DATABRICKS_122_JDBC_URL: ${{ vars.DATABRICKS_122_JDBC_URL }} DATABRICKS_133_JDBC_URL: ${{ vars.DATABRICKS_133_JDBC_URL }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index ac0051df7203..3a350cf9a382 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest if: github.repository == 'trinodb/trino' steps: - - uses: actions/stale@v9.0.0 + - uses: actions/stale@v9.1.0 with: stale-pr-message: 'This pull request has gone a while without any activity. Tagging for triage help: @mosabua' days-before-pr-stale: 21 @@ -21,7 +21,7 @@ jobs: stale-pr-label: 'stale' exempt-pr-labels: 'stale-ignore' start-date: '2020-01-01T00:00:00Z' - exempt-draft-pr: true + exempt-draft-pr: false operations-per-run: 200 # Avoid processing issues completely, see https://github.com/actions/stale/issues/1112 days-before-issue-stale: -1 diff --git a/.mvn/modernizer/violations.xml b/.mvn/modernizer/violations.xml index 12045c09097b..c5ddee06db55 100644 --- a/.mvn/modernizer/violations.xml +++ b/.mvn/modernizer/violations.xml @@ -317,4 +317,10 @@ 1.8 Use io.trino.plugin.base.util.JsonUtils.jsonFactoryBuilder() instead + + + software/amazon/awssdk/services/glue/model/Table.tableType:()Ljava/lang/String; + 1.8 + Table type is nullable in Glue model, which is too easy to forget about. Prefer GlueConverter.getTableTypeNullable + diff --git a/client/trino-cli/pom.xml b/client/trino-cli/pom.xml index 9b1573ab9aa6..c2d5cc3152bf 100644 --- a/client/trino-cli/pom.xml +++ b/client/trino-cli/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/client/trino-cli/src/test/java/io/trino/cli/TestQueryRunner.java b/client/trino-cli/src/test/java/io/trino/cli/TestQueryRunner.java index 574256cfc3d6..7f5a8d80971f 100644 --- a/client/trino-cli/src/test/java/io/trino/cli/TestQueryRunner.java +++ b/client/trino-cli/src/test/java/io/trino/cli/TestQueryRunner.java @@ -18,9 +18,9 @@ import io.trino.client.ClientSession; import io.trino.client.ClientTypeSignature; import io.trino.client.Column; -import io.trino.client.JsonCodec; import io.trino.client.QueryResults; import io.trino.client.StatementStats; +import io.trino.client.TrinoJsonCodec; import io.trino.client.TypedQueryData; import io.trino.client.uri.PropertyName; import io.trino.client.uri.TrinoUri; @@ -46,7 +46,7 @@ import static io.trino.cli.ClientOptions.OutputFormat.CSV; import static io.trino.cli.TerminalUtils.getTerminal; import static io.trino.client.ClientStandardTypes.BIGINT; -import static io.trino.client.JsonCodec.jsonCodec; +import static io.trino.client.TrinoJsonCodec.jsonCodec; import static io.trino.client.auth.external.ExternalRedirectStrategy.PRINT; import static java.util.concurrent.TimeUnit.MINUTES; import static org.assertj.core.api.Assertions.assertThat; @@ -55,7 +55,7 @@ @TestInstance(PER_METHOD) public class TestQueryRunner { - private static final JsonCodec QUERY_RESULTS_CODEC = jsonCodec(QueryResults.class); + private static final TrinoJsonCodec QUERY_RESULTS_CODEC = jsonCodec(QueryResults.class); private MockWebServer server; @BeforeEach diff --git a/client/trino-client/pom.xml b/client/trino-client/pom.xml index 2c353ea20624..92977ac6f54f 100644 --- a/client/trino-client/pom.xml +++ b/client/trino-client/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/client/trino-client/src/main/java/io/trino/client/ClientTypeSignatureParameter.java b/client/trino-client/src/main/java/io/trino/client/ClientTypeSignatureParameter.java index a4c900d338c0..bed65ae12b13 100644 --- a/client/trino-client/src/main/java/io/trino/client/ClientTypeSignatureParameter.java +++ b/client/trino-client/src/main/java/io/trino/client/ClientTypeSignatureParameter.java @@ -145,7 +145,7 @@ public int hashCode() public static class ClientTypeSignatureParameterDeserializer extends JsonDeserializer { - private static final ObjectMapper MAPPER = JsonCodec.OBJECT_MAPPER_SUPPLIER.get(); + private static final ObjectMapper MAPPER = TrinoJsonCodec.OBJECT_MAPPER_SUPPLIER.get(); @Override public ClientTypeSignatureParameter deserialize(JsonParser jp, DeserializationContext ctxt) diff --git a/client/trino-client/src/main/java/io/trino/client/CloseableIterator.java b/client/trino-client/src/main/java/io/trino/client/CloseableIterator.java new file mode 100644 index 000000000000..501f3c3508d2 --- /dev/null +++ b/client/trino-client/src/main/java/io/trino/client/CloseableIterator.java @@ -0,0 +1,56 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.client; + +import java.io.Closeable; +import java.util.Iterator; + +/* + * A CloseableIterator is an Iterator that must be closed to release resources. + */ +public interface CloseableIterator + extends Iterator, Closeable +{ + static CloseableIterator closeable(Iterator iterator) + { + return new CloseableIterator() + { + @Override + public void close() + { + } + + @Override + public boolean hasNext() + { + return iterator.hasNext(); + } + + @Override + public T next() + { + return iterator.next(); + } + + @Override + public String toString() + { + return "CloseableIterator{iterator=" + iterator + '}'; + } + }; + } + + @Override + String toString(); +} diff --git a/client/trino-client/src/main/java/io/trino/client/JsonResultRows.java b/client/trino-client/src/main/java/io/trino/client/JsonIterators.java similarity index 84% rename from client/trino-client/src/main/java/io/trino/client/JsonResultRows.java rename to client/trino-client/src/main/java/io/trino/client/JsonIterators.java index f2f5e965732e..dc7b3e4f146f 100644 --- a/client/trino-client/src/main/java/io/trino/client/JsonResultRows.java +++ b/client/trino-client/src/main/java/io/trino/client/JsonIterators.java @@ -39,21 +39,22 @@ import static java.util.Collections.unmodifiableList; import static java.util.Objects.requireNonNull; -public final class JsonResultRows +public final class JsonIterators { private static final JsonFactory JSON_FACTORY = createJsonFactory(); - private JsonResultRows() {} + private JsonIterators() {} - private static class RowWiseIterator + private static class JsonIterator extends AbstractIterator> + implements CloseableIterator> { private final Closer closer = Closer.create(); private boolean closed; private final JsonParser parser; private final TypeDecoder[] decoders; - public RowWiseIterator(JsonParser parser, TypeDecoder[] decoders) + public JsonIterator(JsonParser parser, TypeDecoder[] decoders) throws IOException { requireNonNull(decoders, "decoders is null"); @@ -77,7 +78,7 @@ public RowWiseIterator(JsonParser parser, TypeDecoder[] decoders) } } - public RowWiseIterator(InputStream stream, TypeDecoder[] decoders) + public JsonIterator(InputStream stream, TypeDecoder[] decoders) throws IOException { this(JSON_FACTORY.createParser(requireNonNull(stream, "stream is null")), decoders); @@ -128,7 +129,8 @@ public List computeNext() } } - private void close() + @Override + public void close() throws IOException { this.closed = true; @@ -136,28 +138,16 @@ private void close() } } - public static ResultRows forJsonParser(JsonParser parser, List columns) + public static CloseableIterator> forJsonParser(JsonParser parser, List columns) + throws IOException { - return () -> { - try { - return new RowWiseIterator(parser, createTypeDecoders(columns)); - } - catch (IOException e) { - throw new UncheckedIOException(e); - } - }; + return new JsonIterator(parser, createTypeDecoders(columns)); } - public static ResultRows forInputStream(InputStream stream, TypeDecoder[] decoders) + public static CloseableIterator> forInputStream(InputStream stream, TypeDecoder[] decoders) + throws IOException { - return () -> { - try { - return new RowWiseIterator(stream, decoders); - } - catch (IOException e) { - throw new UncheckedIOException(e); - } - }; + return new JsonIterator(stream, decoders); } @SuppressModernizer // There is no JsonFactory in the client module diff --git a/client/trino-client/src/main/java/io/trino/client/JsonResponse.java b/client/trino-client/src/main/java/io/trino/client/JsonResponse.java index d9cf6a84ed5c..4898b95cc6d9 100644 --- a/client/trino-client/src/main/java/io/trino/client/JsonResponse.java +++ b/client/trino-client/src/main/java/io/trino/client/JsonResponse.java @@ -108,7 +108,7 @@ public String toString() .toString(); } - public static JsonResponse execute(JsonCodec codec, Call.Factory client, Request request, OptionalLong materializedJsonSizeLimit) + public static JsonResponse execute(TrinoJsonCodec codec, Call.Factory client, Request request, OptionalLong materializedJsonSizeLimit) { try (Response response = client.newCall(request).execute()) { ResponseBody responseBody = requireNonNull(response.body()); diff --git a/client/trino-client/src/main/java/io/trino/client/OkHttpSegmentLoader.java b/client/trino-client/src/main/java/io/trino/client/OkHttpSegmentLoader.java index eb699d7fc562..f3f9e61b33c4 100644 --- a/client/trino-client/src/main/java/io/trino/client/OkHttpSegmentLoader.java +++ b/client/trino-client/src/main/java/io/trino/client/OkHttpSegmentLoader.java @@ -22,7 +22,6 @@ import okhttp3.Request; import okhttp3.Response; -import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.util.List; @@ -65,7 +64,7 @@ public InputStream load(SpooledSegment segment) } if (response.isSuccessful()) { - return delegatingInputStream(response, response.body().byteStream(), segment); + return response.body().byteStream(); } throw new IOException(format("Could not open segment for streaming, got error '%s' with code %d", response.message(), response.code())); } @@ -95,21 +94,6 @@ public void onResponse(Call call, Response response) }); } - private InputStream delegatingInputStream(Response response, InputStream delegate, SpooledSegment segment) - { - return new FilterInputStream(delegate) - { - @Override - public void close() - throws IOException - { - try (Response ignored = response; InputStream ignored2 = delegate) { - acknowledge(segment); - } - } - }; - } - private static Headers toHeaders(Map> headers) { Headers.Builder builder = new Headers.Builder(); diff --git a/client/trino-client/src/main/java/io/trino/client/QueryDataDecoder.java b/client/trino-client/src/main/java/io/trino/client/QueryDataDecoder.java index 631cf7eb3a5f..fa0b5bc3a374 100644 --- a/client/trino-client/src/main/java/io/trino/client/QueryDataDecoder.java +++ b/client/trino-client/src/main/java/io/trino/client/QueryDataDecoder.java @@ -39,7 +39,7 @@ interface Factory * * @throws IOException if an I/O error occurs */ - ResultRows decode(InputStream input, DataAttributes segmentAttributes) + CloseableIterator> decode(InputStream input, DataAttributes segmentAttributes) throws IOException; String encoding(); diff --git a/client/trino-client/src/main/java/io/trino/client/ResultRows.java b/client/trino-client/src/main/java/io/trino/client/ResultRows.java index 9a73644f72b8..5db4f59c581b 100644 --- a/client/trino-client/src/main/java/io/trino/client/ResultRows.java +++ b/client/trino-client/src/main/java/io/trino/client/ResultRows.java @@ -13,18 +13,26 @@ */ package io.trino.client; +import java.io.Closeable; +import java.io.IOException; import java.util.Iterator; import java.util.List; +import static com.google.common.base.Verify.verify; import static java.util.Collections.emptyIterator; /** * Allows iterating over decoded result data in row-wise manner. + * + * Iterator can be acquired only once, and it should be closed after use. */ public interface ResultRows - extends Iterable> + extends Iterable>, Closeable { ResultRows NULL_ROWS = new ResultRows() { + @Override + public void close() {} + @Override public boolean isNull() { @@ -38,11 +46,40 @@ public Iterator> iterator() { return emptyIterator(); } + + @Override + public String toString() + { + return "EmptyResultRows{}"; + } }; - static ResultRows fromIterableRows(Iterable> values) + static ResultRows wrapIterator(CloseableIterator> iterator) { - return values::iterator; + return new ResultRows() { + private volatile boolean fetched; + + @Override + public void close() + throws IOException + { + iterator.close(); + } + + @Override + public Iterator> iterator() + { + verify(!fetched, "Iterator already fetched"); + fetched = true; + return iterator; + } + + @Override + public String toString() + { + return "ResultRows{iterator=" + iterator + "}"; + } + }; } default boolean isNull() diff --git a/client/trino-client/src/main/java/io/trino/client/ResultRowsDecoder.java b/client/trino-client/src/main/java/io/trino/client/ResultRowsDecoder.java index 64bf431fda9b..2a7c99399642 100644 --- a/client/trino-client/src/main/java/io/trino/client/ResultRowsDecoder.java +++ b/client/trino-client/src/main/java/io/trino/client/ResultRowsDecoder.java @@ -13,29 +13,22 @@ */ package io.trino.client; -import com.google.common.collect.Iterables; import io.trino.client.spooling.DataAttributes; import io.trino.client.spooling.EncodedQueryData; -import io.trino.client.spooling.InlineSegment; -import io.trino.client.spooling.Segment; import io.trino.client.spooling.SegmentLoader; -import io.trino.client.spooling.SpooledSegment; +import io.trino.client.spooling.SegmentsIterator; import io.trino.client.spooling.encoding.QueryDataDecoders; -import org.gaul.modernizer_maven_annotations.SuppressModernizer; -import java.io.ByteArrayInputStream; import java.io.IOException; -import java.io.InputStream; import java.io.UncheckedIOException; import java.util.List; import java.util.Optional; import static com.google.common.base.Preconditions.checkState; import static com.google.common.base.Verify.verify; -import static com.google.common.collect.Iterables.filter; -import static com.google.common.collect.Iterables.transform; +import static io.trino.client.CloseableIterator.closeable; import static io.trino.client.ResultRows.NULL_ROWS; -import static io.trino.client.ResultRows.fromIterableRows; +import static io.trino.client.ResultRows.wrapIterator; import static java.util.Objects.requireNonNull; /** @@ -92,7 +85,7 @@ public ResultRows toRows(List columns, QueryData data) return NULL_ROWS; // for backward compatibility instead of null } // RawQueryData is always typed - return () -> rawData.getIterable().iterator(); + return wrapIterator(closeable(rawData.getIterable().iterator())); } if (data instanceof JsonQueryData) { @@ -100,44 +93,21 @@ public ResultRows toRows(List columns, QueryData data) if (jsonData.isNull()) { return NULL_ROWS; } - return () -> JsonResultRows.forJsonParser(jsonData.getJsonParser(), columns).iterator(); - } - - if (data instanceof EncodedQueryData) { - EncodedQueryData encodedData = (EncodedQueryData) data; - setEncoding(columns, encodedData.getEncoding()); - return concat(transform(encodedData.getSegments(), this::segmentToRows)); - } - - throw new UnsupportedOperationException("Unsupported data type: " + data.getClass().getName()); - } - - private ResultRows segmentToRows(Segment segment) - { - if (segment instanceof InlineSegment) { - InlineSegment inlineSegment = (InlineSegment) segment; try { - return decoder.decode(new ByteArrayInputStream(inlineSegment.getData()), inlineSegment.getMetadata()); + return wrapIterator(JsonIterators.forJsonParser(jsonData.getJsonParser(), columns)); } catch (IOException e) { throw new UncheckedIOException(e); } } - if (segment instanceof SpooledSegment) { - SpooledSegment spooledSegment = (SpooledSegment) segment; - - try { - // The returned rows are lazy which means that decoder is responsible for closing input stream - InputStream stream = loader.load(spooledSegment); - return decoder.decode(stream, spooledSegment.getMetadata()); - } - catch (IOException e) { - throw new RuntimeException(e); - } + if (data instanceof EncodedQueryData) { + EncodedQueryData encodedData = (EncodedQueryData) data; + setEncoding(columns, encodedData.getEncoding()); + return wrapIterator(new SegmentsIterator(loader, decoder, encodedData.getSegments())); } - throw new UnsupportedOperationException("Unsupported segment type: " + segment.getClass().getName()); + throw new UnsupportedOperationException("Unsupported data type: " + data.getClass().getName()); } public Optional getEncoding() @@ -152,10 +122,4 @@ public void close() { loader.close(); } - - @SuppressModernizer - private static ResultRows concat(Iterable resultRows) - { - return fromIterableRows(Iterables.concat(filter(resultRows, rows -> !rows.isNull()))); - } } diff --git a/client/trino-client/src/main/java/io/trino/client/StatementClientV1.java b/client/trino-client/src/main/java/io/trino/client/StatementClientV1.java index 94a309fbf267..29552f22a4c9 100644 --- a/client/trino-client/src/main/java/io/trino/client/StatementClientV1.java +++ b/client/trino-client/src/main/java/io/trino/client/StatementClientV1.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.io.InterruptedIOException; +import java.io.UncheckedIOException; import java.net.ProtocolException; import java.net.SocketTimeoutException; import java.net.URI; @@ -57,8 +58,8 @@ import static com.google.common.net.HttpHeaders.ACCEPT_ENCODING; import static com.google.common.net.HttpHeaders.USER_AGENT; import static io.trino.client.HttpStatusCodes.shouldRetry; -import static io.trino.client.JsonCodec.jsonCodec; import static io.trino.client.ProtocolHeaders.TRINO_HEADERS; +import static io.trino.client.TrinoJsonCodec.jsonCodec; import static java.lang.String.format; import static java.net.HttpURLConnection.HTTP_OK; import static java.net.HttpURLConnection.HTTP_UNAUTHORIZED; @@ -71,7 +72,7 @@ class StatementClientV1 implements StatementClient { private static final MediaType MEDIA_TYPE_TEXT = MediaType.parse("text/plain; charset=utf-8"); - private static final JsonCodec QUERY_RESULTS_CODEC = jsonCodec(QueryResults.class); + private static final TrinoJsonCodec QUERY_RESULTS_CODEC = jsonCodec(QueryResults.class); private static final Splitter COLLECTION_HEADER_SPLITTER = Splitter.on('=').limit(2).trimResults(); private static final String USER_AGENT_VALUE = StatementClientV1.class.getSimpleName() + @@ -560,6 +561,15 @@ public void close() httpDelete(uri); } } + + // Close rows - this will close the underlying iterators, + // releasing all resources and pruning remote segments + try { + currentRows.get().close(); + } + catch (IOException e) { + throw new UncheckedIOException(e); + } } private void httpDelete(URI uri) diff --git a/client/trino-client/src/main/java/io/trino/client/JsonCodec.java b/client/trino-client/src/main/java/io/trino/client/TrinoJsonCodec.java similarity index 95% rename from client/trino-client/src/main/java/io/trino/client/JsonCodec.java rename to client/trino-client/src/main/java/io/trino/client/TrinoJsonCodec.java index c043388a8742..17db447d0f8b 100644 --- a/client/trino-client/src/main/java/io/trino/client/JsonCodec.java +++ b/client/trino-client/src/main/java/io/trino/client/TrinoJsonCodec.java @@ -34,7 +34,7 @@ import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; -public class JsonCodec +public class TrinoJsonCodec { // copy of https://github.com/airlift/airlift/blob/master/json/src/main/java/io/airlift/json/ObjectMapperProvider.java static final Supplier OBJECT_MAPPER_SUPPLIER = () -> { @@ -64,16 +64,16 @@ public class JsonCodec .build(); }; - public static JsonCodec jsonCodec(Class type) + public static TrinoJsonCodec jsonCodec(Class type) { - return new JsonCodec<>(OBJECT_MAPPER_SUPPLIER.get(), type); + return new TrinoJsonCodec<>(OBJECT_MAPPER_SUPPLIER.get(), type); } private final ObjectMapper mapper; private final Type type; private final JavaType javaType; - private JsonCodec(ObjectMapper mapper, Type type) + private TrinoJsonCodec(ObjectMapper mapper, Type type) { this.mapper = requireNonNull(mapper, "mapper is null"); this.type = requireNonNull(type, "type is null"); diff --git a/client/trino-client/src/main/java/io/trino/client/auth/external/HttpTokenPoller.java b/client/trino-client/src/main/java/io/trino/client/auth/external/HttpTokenPoller.java index 243ee81fa669..d5d217295221 100644 --- a/client/trino-client/src/main/java/io/trino/client/auth/external/HttpTokenPoller.java +++ b/client/trino-client/src/main/java/io/trino/client/auth/external/HttpTokenPoller.java @@ -18,8 +18,8 @@ import dev.failsafe.Failsafe; import dev.failsafe.FailsafeException; import dev.failsafe.RetryPolicy; -import io.trino.client.JsonCodec; import io.trino.client.JsonResponse; +import io.trino.client.TrinoJsonCodec; import okhttp3.HttpUrl; import okhttp3.OkHttpClient; import okhttp3.Request; @@ -36,8 +36,8 @@ import static com.google.common.base.MoreObjects.firstNonNull; import static com.google.common.net.HttpHeaders.USER_AGENT; import static io.trino.client.HttpStatusCodes.shouldRetry; -import static io.trino.client.JsonCodec.jsonCodec; import static io.trino.client.JsonResponse.execute; +import static io.trino.client.TrinoJsonCodec.jsonCodec; import static java.lang.String.format; import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR; import static java.net.HttpURLConnection.HTTP_OK; @@ -47,7 +47,7 @@ public class HttpTokenPoller implements TokenPoller { - private static final JsonCodec TOKEN_POLL_CODEC = jsonCodec(TokenPollRepresentation.class); + private static final TrinoJsonCodec TOKEN_POLL_CODEC = jsonCodec(TokenPollRepresentation.class); private static final String USER_AGENT_VALUE = "TrinoTokenPoller/" + firstNonNull(HttpTokenPoller.class.getPackage().getImplementationVersion(), "unknown"); diff --git a/client/trino-client/src/main/java/io/trino/client/spooling/DataAttribute.java b/client/trino-client/src/main/java/io/trino/client/spooling/DataAttribute.java index 8e519955ed6f..69d8045934eb 100644 --- a/client/trino-client/src/main/java/io/trino/client/spooling/DataAttribute.java +++ b/client/trino-client/src/main/java/io/trino/client/spooling/DataAttribute.java @@ -28,7 +28,9 @@ public enum DataAttribute // Size of the segment in bytes after decompression, added only to compressed segments UNCOMPRESSED_SIZE("uncompressedSize", Integer.class), // Placeholder for future encoder-specific schema - SCHEMA("schema", String.class); + SCHEMA("schema", String.class), + // Segment expiration time + EXPIRES_AT("expiresAt", String.class); private final String name; private final Class javaClass; diff --git a/client/trino-client/src/main/java/io/trino/client/spooling/InlineSegmentIterator.java b/client/trino-client/src/main/java/io/trino/client/spooling/InlineSegmentIterator.java new file mode 100644 index 000000000000..b603185bd78a --- /dev/null +++ b/client/trino-client/src/main/java/io/trino/client/spooling/InlineSegmentIterator.java @@ -0,0 +1,69 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.client.spooling; + +import com.google.common.collect.AbstractIterator; +import io.trino.client.CloseableIterator; +import io.trino.client.QueryDataDecoder; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.List; + +import static java.util.Objects.requireNonNull; + +// Accessible through the InlineSegment.toIterator +class InlineSegmentIterator + extends AbstractIterator> + implements CloseableIterator> +{ + private InlineSegment segment; + private final QueryDataDecoder decoder; + private CloseableIterator> iterator; + + public InlineSegmentIterator(InlineSegment segment, QueryDataDecoder decoder) + { + this.segment = requireNonNull(segment, "segment is null"); + this.decoder = requireNonNull(decoder, "decoder is null"); + } + + @Override + protected List computeNext() + { + if (iterator == null) { + try { + iterator = decoder.decode(new ByteArrayInputStream(segment.getData()), segment.getMetadata()); + segment = null; + } + catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + if (iterator.hasNext()) { + return iterator.next(); + } + return endOfData(); + } + + @Override + public void close() + throws IOException + { + if (iterator != null) { + iterator.close(); + } + } +} diff --git a/client/trino-client/src/main/java/io/trino/client/spooling/SegmentsIterator.java b/client/trino-client/src/main/java/io/trino/client/spooling/SegmentsIterator.java new file mode 100644 index 000000000000..3ec4cadd58cc --- /dev/null +++ b/client/trino-client/src/main/java/io/trino/client/spooling/SegmentsIterator.java @@ -0,0 +1,120 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.client.spooling; + +import com.google.common.collect.AbstractIterator; +import io.trino.client.CloseableIterator; +import io.trino.client.QueryDataDecoder; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.List; + +import static com.google.common.base.Verify.verify; +import static java.util.Objects.requireNonNull; + +public class SegmentsIterator + extends AbstractIterator> + implements CloseableIterator> +{ + private final SegmentLoader loader; + private final QueryDataDecoder decoder; + private final Deque remainingSegments; + private CloseableIterator> currentIterator; + + public SegmentsIterator(SegmentLoader loader, QueryDataDecoder decoder, List segments) + { + verify(!segments.isEmpty(), "Expected at least a single segment to iterate over"); + this.loader = requireNonNull(loader, "loader is null"); + this.decoder = requireNonNull(decoder, "decoder is null"); + this.remainingSegments = new ArrayDeque<>(segments); + this.currentIterator = iterate(this.remainingSegments.removeFirst()); + } + + @Override + protected List computeNext() + { + if (currentIterator.hasNext()) { + return currentIterator.next(); + } + + // Current iterator exhausted, move to next one + if (moveToNextIterator()) { + verify(currentIterator.hasNext(), "New current iterator is empty"); + return currentIterator.next(); + } + + return endOfData(); + } + + private boolean moveToNextIterator() + { + if (!remainingSegments.isEmpty()) { + Segment segment = remainingSegments.removeFirst(); + currentIterator = iterate(segment); + return true; + } + else { + return false; + } + } + + private CloseableIterator> iterate(Segment segment) + { + if (segment instanceof InlineSegment) { + return new InlineSegmentIterator((InlineSegment) segment, decoder); + } + + if (segment instanceof SpooledSegment) { + return new SpooledSegmentIterator((SpooledSegment) segment, loader, decoder); + } + + throw new UnsupportedOperationException("Unsupported segment type: " + segment.getClass().getName()); + } + + @Override + public void close() + throws IOException + { + IOException exception = new IOException("Could not close all segments"); + if (currentIterator != null) { + try { + currentIterator.close(); + } + catch (IOException e) { + exception.addSuppressed(e); + } + } + + for (Segment segment : remainingSegments) { + try { + iterate(segment).close(); + } + catch (IOException e) { + exception.addSuppressed(e); + } + } + + if (exception.getSuppressed().length > 0) { + throw exception; + } + } + + @Override + public String toString() + { + return "SegmentsIterator{currentIterator=" + currentIterator + ", remainingSegments=" + remainingSegments + "}"; + } +} diff --git a/client/trino-client/src/main/java/io/trino/client/spooling/SpooledSegmentIterator.java b/client/trino-client/src/main/java/io/trino/client/spooling/SpooledSegmentIterator.java new file mode 100644 index 000000000000..d70afebae497 --- /dev/null +++ b/client/trino-client/src/main/java/io/trino/client/spooling/SpooledSegmentIterator.java @@ -0,0 +1,136 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.client.spooling; + +import com.google.common.collect.AbstractIterator; +import com.google.common.io.Closer; +import io.trino.client.CloseableIterator; +import io.trino.client.QueryDataDecoder; + +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; + +import static com.google.common.base.Preconditions.checkState; +import static com.google.common.base.Verify.verify; +import static java.util.Objects.requireNonNull; + +// Accessible through the SpooledSegment.toIterator +class SpooledSegmentIterator + extends AbstractIterator> + implements CloseableIterator> +{ + private final SpooledSegment segment; + private final long rowsCount; + private final SegmentLoader loader; + private final QueryDataDecoder decoder; + private long currentRow; + private boolean loaded; + private boolean closed; + private Iterator> iterator; + private Closer closer = Closer.create(); + + public SpooledSegmentIterator(SpooledSegment spooledSegment, SegmentLoader loader, QueryDataDecoder decoder) + { + this.segment = requireNonNull(spooledSegment, "spooledSegment is null"); + this.rowsCount = spooledSegment.getRowsCount(); + this.loader = requireNonNull(loader, "loader is null"); + this.decoder = requireNonNull(decoder, "decoder is null"); + + closer.register(() -> loader.acknowledge(segment)); // acknowledge segment when closed + } + + public void load() + { + checkState(!closed, "Iterator is already closed"); + checkState(!loaded, "Iterator is already loaded"); + + checkState(iterator == null, "Iterator should be unloaded"); + try { + InputStream stream = closer.register(loader.load(segment)); // close stream when exhausted + iterator = decoder.decode(stream, segment.getMetadata()); + loaded = true; + } + catch (IOException e) { + closed = true; + throw new UncheckedIOException(e); + } + } + + public void unload() + { + checkState(!closed, "Iterator is already closed"); + closed = true; + try { + closer.close(); + iterator = null; + } + catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public long remaining() + { + return rowsCount - currentRow; + } + + @Override + protected List computeNext() + { + if (!loaded) { + load(); + } + + if (++currentRow > rowsCount) { + return endOfData(); + } + + if (closed) { + throw new NoSuchElementException(); + } + + try { + verify(iterator.hasNext(), "Iterator should have more rows, current: %s, count: %s", currentRow, rowsCount); + List rows = iterator.next(); + if (currentRow == this.rowsCount) { + unload(); // Unload when the last row was fetched + } + return rows; + } + catch (Exception e) { + // Cleanup if decoding has failed + unload(); + throw e; + } + } + + @Override + public void close() + throws IOException + { + if (!closed) { + unload(); + } + } + + @Override + public String toString() + { + return "SpooledSegmentIterator{segment=" + segment + "}"; + } +} diff --git a/client/trino-client/src/main/java/io/trino/client/spooling/encoding/CompressedQueryDataDecoder.java b/client/trino-client/src/main/java/io/trino/client/spooling/encoding/CompressedQueryDataDecoder.java index ce2fe561d5ad..681f8d06f2ab 100644 --- a/client/trino-client/src/main/java/io/trino/client/spooling/encoding/CompressedQueryDataDecoder.java +++ b/client/trino-client/src/main/java/io/trino/client/spooling/encoding/CompressedQueryDataDecoder.java @@ -14,14 +14,15 @@ package io.trino.client.spooling.encoding; import com.google.common.io.ByteStreams; +import io.trino.client.CloseableIterator; import io.trino.client.QueryDataDecoder; -import io.trino.client.ResultRows; import io.trino.client.spooling.DataAttribute; import io.trino.client.spooling.DataAttributes; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import java.util.List; import java.util.Optional; import static com.google.common.base.Verify.verify; @@ -41,7 +42,7 @@ abstract void decompress(byte[] input, byte[] output) throws IOException; @Override - public ResultRows decode(InputStream stream, DataAttributes metadata) + public CloseableIterator> decode(InputStream stream, DataAttributes metadata) throws IOException { Optional expectedDecompressedSize = metadata.getOptional(DataAttribute.UNCOMPRESSED_SIZE, Integer.class); diff --git a/client/trino-client/src/main/java/io/trino/client/spooling/encoding/JsonQueryDataDecoder.java b/client/trino-client/src/main/java/io/trino/client/spooling/encoding/JsonQueryDataDecoder.java index dcd631b0f285..11cf86167948 100644 --- a/client/trino-client/src/main/java/io/trino/client/spooling/encoding/JsonQueryDataDecoder.java +++ b/client/trino-client/src/main/java/io/trino/client/spooling/encoding/JsonQueryDataDecoder.java @@ -13,13 +13,14 @@ */ package io.trino.client.spooling.encoding; +import io.trino.client.CloseableIterator; import io.trino.client.Column; import io.trino.client.JsonDecodingUtils.TypeDecoder; -import io.trino.client.JsonResultRows; +import io.trino.client.JsonIterators; import io.trino.client.QueryDataDecoder; -import io.trino.client.ResultRows; import io.trino.client.spooling.DataAttributes; +import java.io.IOException; import java.io.InputStream; import java.util.List; @@ -37,9 +38,10 @@ public class JsonQueryDataDecoder } @Override - public ResultRows decode(InputStream stream, DataAttributes queryAttributes) + public CloseableIterator> decode(InputStream stream, DataAttributes queryAttributes) + throws IOException { - return JsonResultRows.forInputStream(stream, decoders); + return JsonIterators.forInputStream(stream, decoders); } @Override diff --git a/client/trino-client/src/test/java/io/trino/client/TestQueryResults.java b/client/trino-client/src/test/java/io/trino/client/TestQueryResults.java index 3dd478efcf4b..4d792e7087ee 100644 --- a/client/trino-client/src/test/java/io/trino/client/TestQueryResults.java +++ b/client/trino-client/src/test/java/io/trino/client/TestQueryResults.java @@ -18,13 +18,13 @@ import com.google.common.base.Strings; import org.junit.jupiter.api.Test; -import static io.trino.client.JsonCodec.jsonCodec; +import static io.trino.client.TrinoJsonCodec.jsonCodec; import static java.lang.String.format; import static org.assertj.core.api.Assertions.assertThat; public class TestQueryResults { - private static final JsonCodec QUERY_RESULTS_CODEC = jsonCodec(QueryResults.class); + private static final TrinoJsonCodec QUERY_RESULTS_CODEC = jsonCodec(QueryResults.class); private static final String GOLDEN_VALUE = "{\n" + " \"id\" : \"20160128_214710_00012_rk68b\",\n" + diff --git a/client/trino-client/src/test/java/io/trino/client/TestResultRowsDecoder.java b/client/trino-client/src/test/java/io/trino/client/TestResultRowsDecoder.java index 8a2b6465952c..34ab51bd4cc2 100644 --- a/client/trino-client/src/test/java/io/trino/client/TestResultRowsDecoder.java +++ b/client/trino-client/src/test/java/io/trino/client/TestResultRowsDecoder.java @@ -17,6 +17,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.io.CountingInputStream; +import io.trino.client.spooling.DataAttribute; import io.trino.client.spooling.DataAttributes; import io.trino.client.spooling.EncodedQueryData; import io.trino.client.spooling.Segment; @@ -25,7 +26,6 @@ import org.junit.jupiter.api.Test; import java.io.ByteArrayInputStream; -import java.io.FilterInputStream; import java.io.InputStream; import java.net.URI; import java.util.Arrays; @@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.IntStream; -import static io.trino.client.JsonResultRows.createJsonFactory; +import static io.trino.client.JsonIterators.createJsonFactory; import static io.trino.client.spooling.Segment.inlined; import static io.trino.client.spooling.Segment.spooled; import static java.nio.charset.StandardCharsets.UTF_8; @@ -117,7 +117,7 @@ public void testSpooledJsonMaterialization() AtomicInteger loaded = new AtomicInteger(); AtomicInteger acknowledged = new AtomicInteger(); try (ResultRowsDecoder decoder = new ResultRowsDecoder(new StaticLoader(loaded, acknowledged))) { - assertThat(eagerlyMaterialize(decoder.toRows(fromSegments(spooledSegment(), spooledSegment())))) + assertThat(eagerlyMaterialize(decoder.toRows(fromSegments(spooledSegment(2), spooledSegment(2))))) .hasSize(4) .containsExactly(ImmutableList.of(2137), ImmutableList.of(1337), ImmutableList.of(2137), ImmutableList.of(1337)); } @@ -132,7 +132,7 @@ public void testSpooledJsonNodeMaterialization() AtomicInteger loaded = new AtomicInteger(); AtomicInteger acknowledged = new AtomicInteger(); try (ResultRowsDecoder decoder = new ResultRowsDecoder(new StaticLoader(loaded, acknowledged))) { - assertThat(eagerlyMaterialize(decoder.toRows(fromSegments(spooledSegment(), spooledSegment())))) + assertThat(eagerlyMaterialize(decoder.toRows(fromSegments(spooledSegment(2), spooledSegment(2))))) .hasSize(4) .containsExactly(ImmutableList.of(2137), ImmutableList.of(1337), ImmutableList.of(2137), ImmutableList.of(1337)); } @@ -148,7 +148,7 @@ public void testSpooledJsonNodeScanningMaterialization() .reduce("[", (a, b) -> a + "[" + b + "],", String::concat) + "[1337]]"; CountingInputStream stream = new CountingInputStream(new ByteArrayInputStream(data.getBytes(UTF_8))); try (ResultRowsDecoder decoder = new ResultRowsDecoder(loaderFromStream(stream))) { - Iterator> iterator = decoder.toRows(fromSegments(spooledSegment())).iterator(); + Iterator> iterator = decoder.toRows(fromSegments(spooledSegment(2501))).iterator(); assertThat(stream.getCount()).isEqualTo(0); iterator.next(); assertThat(stream.getCount()).isEqualTo(8000); // Jackson reads data in 8K chunks @@ -174,7 +174,7 @@ public void testLazySpooledMaterialization() AtomicInteger loaded = new AtomicInteger(); AtomicInteger acknowledged = new AtomicInteger(); try (ResultRowsDecoder decoder = new ResultRowsDecoder(new StaticLoader(loaded, acknowledged))) { - Iterator> iterator = decoder.toRows(fromSegments(spooledSegment(), spooledSegment())) + Iterator> iterator = decoder.toRows(fromSegments(spooledSegment(2), spooledSegment(2))) .iterator(); assertThat(loaded.get()).isEqualTo(0); @@ -216,14 +216,7 @@ public StaticLoader(AtomicInteger loaded, AtomicInteger acknowledged) public InputStream load(SpooledSegment segment) { loaded.incrementAndGet(); - - return new FilterInputStream(new ByteArrayInputStream("[[2137], [1337]]".getBytes(UTF_8))) { - @Override - public void close() - { - acknowledge(segment); - } - }; + return new ByteArrayInputStream("[[2137], [1337]]".getBytes(UTF_8)); } @Override @@ -292,8 +285,12 @@ private static QueryResults fromSegments(Segment... segments) .build()); } - private static Segment spooledSegment() + private static Segment spooledSegment(long rows) { - return spooled(URI.create("http://localhost"), URI.create("http://localhost"), DataAttributes.empty(), ImmutableMap.of()); + DataAttributes attributes = DataAttributes.builder() + .set(DataAttribute.ROWS_COUNT, rows) + .build(); + + return spooled(URI.create("http://localhost"), URI.create("http://localhost"), attributes, ImmutableMap.of()); } } diff --git a/client/trino-client/src/test/java/io/trino/client/TestRetry.java b/client/trino-client/src/test/java/io/trino/client/TestRetry.java index 3ea7a940e665..310bce43d559 100644 --- a/client/trino-client/src/test/java/io/trino/client/TestRetry.java +++ b/client/trino-client/src/test/java/io/trino/client/TestRetry.java @@ -34,8 +34,8 @@ import static com.google.common.net.HttpHeaders.CONTENT_TYPE; import static com.google.common.net.MediaType.JSON_UTF_8; -import static io.trino.client.JsonCodec.jsonCodec; import static io.trino.client.StatementClientFactory.newStatementClient; +import static io.trino.client.TrinoJsonCodec.jsonCodec; import static io.trino.spi.type.StandardTypes.INTEGER; import static io.trino.spi.type.StandardTypes.VARCHAR; import static java.lang.String.format; @@ -48,7 +48,7 @@ public class TestRetry { private MockWebServer server; - private static final JsonCodec QUERY_RESULTS_CODEC = jsonCodec(QueryResults.class); + private static final TrinoJsonCodec QUERY_RESULTS_CODEC = jsonCodec(QueryResults.class); @BeforeEach public void setup() diff --git a/client/trino-client/src/test/java/io/trino/client/TestJsonCodec.java b/client/trino-client/src/test/java/io/trino/client/TestTrinoJsonCodec.java similarity index 93% rename from client/trino-client/src/test/java/io/trino/client/TestJsonCodec.java rename to client/trino-client/src/test/java/io/trino/client/TestTrinoJsonCodec.java index 050349bbf387..9f65314120af 100644 --- a/client/trino-client/src/test/java/io/trino/client/TestJsonCodec.java +++ b/client/trino-client/src/test/java/io/trino/client/TestTrinoJsonCodec.java @@ -18,18 +18,18 @@ import java.io.ByteArrayInputStream; -import static io.trino.client.JsonCodec.jsonCodec; +import static io.trino.client.TrinoJsonCodec.jsonCodec; import static java.nio.charset.StandardCharsets.UTF_8; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; -public class TestJsonCodec +public class TestTrinoJsonCodec { @Test public void testTrailingContent() throws Exception { - JsonCodec codec = jsonCodec(ClientTypeSignature.class); + TrinoJsonCodec codec = jsonCodec(ClientTypeSignature.class); String json = "{\"rawType\":\"bigint\",\"arguments\":[]}"; assertThat(codec.fromJson(json).getRawType()).isEqualTo("bigint"); diff --git a/client/trino-client/src/test/java/io/trino/client/spooling/encoding/TestCompressedQueryDataDecoder.java b/client/trino-client/src/test/java/io/trino/client/spooling/encoding/TestCompressedQueryDataDecoder.java index 395d9123a639..ce787a2058f3 100644 --- a/client/trino-client/src/test/java/io/trino/client/spooling/encoding/TestCompressedQueryDataDecoder.java +++ b/client/trino-client/src/test/java/io/trino/client/spooling/encoding/TestCompressedQueryDataDecoder.java @@ -15,8 +15,8 @@ import com.google.common.collect.ImmutableList; import com.google.common.io.ByteStreams; +import io.trino.client.CloseableIterator; import io.trino.client.QueryDataDecoder; -import io.trino.client.ResultRows; import io.trino.client.spooling.DataAttributes; import org.junit.jupiter.api.Test; @@ -27,6 +27,7 @@ import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; +import static io.trino.client.CloseableIterator.closeable; import static io.trino.client.spooling.DataAttribute.SEGMENT_SIZE; import static io.trino.client.spooling.DataAttribute.UNCOMPRESSED_SIZE; import static java.nio.charset.StandardCharsets.UTF_8; @@ -53,12 +54,12 @@ public void close() QueryDataDecoder decoder = new TestQueryDataDecoder(new QueryDataDecoder() { @Override - public ResultRows decode(InputStream input, DataAttributes segmentAttributes) + public CloseableIterator> decode(InputStream input, DataAttributes segmentAttributes) throws IOException { assertThat(new String(ByteStreams.toByteArray(input), UTF_8)) .isEqualTo("decompressed"); - return SAMPLE_VALUES::iterator; + return closeable(SAMPLE_VALUES.iterator()); } @Override @@ -74,6 +75,7 @@ public String encoding() .set(UNCOMPRESSED_SIZE, "decompressed".length()) .set(SEGMENT_SIZE, "compressed".length()) .build())) + .toIterable() .containsAll(SAMPLE_VALUES); assertThat(closed.get()).isTrue(); } @@ -95,13 +97,13 @@ public void close() QueryDataDecoder decoder = new TestQueryDataDecoder(new QueryDataDecoder() { @Override - public ResultRows decode(InputStream input, DataAttributes segmentAttributes) + public CloseableIterator> decode(InputStream input, DataAttributes segmentAttributes) throws IOException { assertThat(new String(ByteStreams.toByteArray(input), UTF_8)) .isEqualTo("not compressed"); input.close(); // Closes input stream according to the contract - return SAMPLE_VALUES::iterator; + return closeable(SAMPLE_VALUES.iterator()); } @Override @@ -115,6 +117,7 @@ public String encoding() assertThat(decoder.decode(stream, DataAttributes.builder() .set(SEGMENT_SIZE, "not compressed".length()) .build())) + .toIterable() .containsAll(SAMPLE_VALUES); assertThat(closed.get()).isTrue(); } diff --git a/client/trino-jdbc/pom.xml b/client/trino-jdbc/pom.xml index 59be60756a3f..d873ec2ca789 100644 --- a/client/trino-jdbc/pom.xml +++ b/client/trino-jdbc/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/client/trino-jdbc/src/test/java/io/trino/jdbc/TestAsyncResultIterator.java b/client/trino-jdbc/src/test/java/io/trino/jdbc/TestAsyncResultIterator.java index 963c782f934a..cf260df3bbbe 100644 --- a/client/trino-jdbc/src/test/java/io/trino/jdbc/TestAsyncResultIterator.java +++ b/client/trino-jdbc/src/test/java/io/trino/jdbc/TestAsyncResultIterator.java @@ -29,6 +29,7 @@ import java.net.URI; import java.time.ZoneId; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; @@ -41,7 +42,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; -import static io.trino.client.ResultRows.fromIterableRows; import static java.util.Objects.requireNonNull; import static org.assertj.core.api.Assertions.assertThat; @@ -64,7 +64,7 @@ public void testIteratorCancelWhenQueueNotFull() catch (InterruptedException e) { interruptedButSwallowedLatch.countDown(); } - return fromIterableRows(ImmutableList.of(ImmutableList.of(new Object()))); + return fromList(ImmutableList.of(ImmutableList.of(new Object()))); }), ignored -> {}, new WarningsManager(), Optional.of(new ArrayBlockingQueue<>(100))); @@ -93,7 +93,7 @@ public void testIteratorCancelWhenQueueIsFull() AsyncResultIterator iterator = new AsyncResultIterator( new MockStatementClient(() -> { thread.compareAndSet(null, Thread.currentThread()); - return fromIterableRows(ImmutableList.of(ImmutableList.of(new Object()))); + return fromList(ImmutableList.of(ImmutableList.of(new Object()))); }), ignored -> {}, new WarningsManager(), Optional.of(queue)); @@ -371,4 +371,24 @@ public Long getUpdateCount() } }; } + + static ResultRows fromList(List> values) + { + return new ResultRows() { + @Override + public void close() {} + + @Override + public Iterator> iterator() + { + return values.iterator(); + } + + @Override + public String toString() + { + return "ResultRows{values=" + values + "}"; + } + }; + } } diff --git a/core/trino-grammar/pom.xml b/core/trino-grammar/pom.xml index 9b2ed10f7d1f..417fd328c287 100644 --- a/core/trino-grammar/pom.xml +++ b/core/trino-grammar/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/core/trino-grammar/src/main/antlr4/io/trino/grammar/sql/SqlBase.g4 b/core/trino-grammar/src/main/antlr4/io/trino/grammar/sql/SqlBase.g4 index d716ee538376..c11f6b901dfd 100644 --- a/core/trino-grammar/src/main/antlr4/io/trino/grammar/sql/SqlBase.g4 +++ b/core/trino-grammar/src/main/antlr4/io/trino/grammar/sql/SqlBase.g4 @@ -200,11 +200,13 @@ statement ; rootQuery - : withFunction? query + : (WITH SESSION sessionProperty (',' sessionProperty)*)? + (WITH functionSpecification (',' functionSpecification)*)? + query ; -withFunction - : WITH functionSpecification (',' functionSpecification)* +sessionProperty + : qualifiedName EQ expression ; query diff --git a/core/trino-main/pom.xml b/core/trino-main/pom.xml index 6fa1293393ed..33c485267766 100644 --- a/core/trino-main/pom.xml +++ b/core/trino-main/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/core/trino-main/src/main/java/io/trino/FeaturesConfig.java b/core/trino-main/src/main/java/io/trino/FeaturesConfig.java index 707b3300e8ca..4e32436a250e 100644 --- a/core/trino-main/src/main/java/io/trino/FeaturesConfig.java +++ b/core/trino-main/src/main/java/io/trino/FeaturesConfig.java @@ -21,6 +21,7 @@ import io.airlift.configuration.LegacyConfig; import io.airlift.units.DataSize; import io.airlift.units.MaxDataSize; +import io.trino.execution.ThreadCountParser; import io.trino.execution.buffer.CompressionCodec; import io.trino.sql.analyzer.RegexLibrary; import jakarta.validation.constraints.DecimalMax; @@ -290,9 +291,9 @@ public int getSpillerThreads() } @Config("spiller-threads") - public FeaturesConfig setSpillerThreads(int spillerThreads) + public FeaturesConfig setSpillerThreads(String spillerThreads) { - this.spillerThreads = spillerThreads; + this.spillerThreads = ThreadCountParser.DEFAULT.parse(spillerThreads); return this; } diff --git a/core/trino-main/src/main/java/io/trino/Session.java b/core/trino-main/src/main/java/io/trino/Session.java index 037cf5147e24..7841c53cb5b6 100644 --- a/core/trino-main/src/main/java/io/trino/Session.java +++ b/core/trino-main/src/main/java/io/trino/Session.java @@ -206,7 +206,10 @@ public SqlPath getPath() public TimeZoneKey getTimeZoneKey() { - return timeZoneKey; + // Allow overriding timezone key with a session property regardless of it's source + return SystemSessionProperties.getTimeZoneId(this) + .map(TimeZoneKey::getTimeZoneKey) + .orElse(timeZoneKey); } public Locale getLocale() @@ -416,6 +419,11 @@ public Session withDefaultProperties(Map systemPropertyDefaults, .putAll(catalogEntry.getValue()); } + return withProperties(systemProperties, catalogProperties); + } + + public Session withProperties(Map systemProperties, Map> catalogProperties) + { return new Session( queryId, querySpan, diff --git a/core/trino-main/src/main/java/io/trino/dispatcher/DispatchManager.java b/core/trino-main/src/main/java/io/trino/dispatcher/DispatchManager.java index f49114735147..0840a9812e6d 100644 --- a/core/trino-main/src/main/java/io/trino/dispatcher/DispatchManager.java +++ b/core/trino-main/src/main/java/io/trino/dispatcher/DispatchManager.java @@ -119,7 +119,7 @@ public DispatchManager( this.accessControl = requireNonNull(accessControl, "accessControl is null"); this.sessionSupplier = requireNonNull(sessionSupplier, "sessionSupplier is null"); this.sessionPropertyDefaults = requireNonNull(sessionPropertyDefaults, "sessionPropertyDefaults is null"); - this.sessionPropertyManager = sessionPropertyManager; + this.sessionPropertyManager = requireNonNull(sessionPropertyManager, "sessionPropertyManager is null"); this.tracer = requireNonNull(tracer, "tracer is null"); this.maxQueryLength = queryManagerConfig.getMaxQueryLength(); @@ -347,6 +347,14 @@ public long getProgressingQueries() .count(); } + @Managed + public long getFullyBlockedQueries() + { + return queryTracker.getAllQueries().stream() + .filter(query -> query.getState() == RUNNING && query.getBasicQueryInfo().getQueryStats().isFullyBlocked()) + .count(); + } + public boolean isQueryRegistered(QueryId queryId) { return queryTracker.hasQuery(queryId); diff --git a/core/trino-main/src/main/java/io/trino/dispatcher/LocalDispatchQueryFactory.java b/core/trino-main/src/main/java/io/trino/dispatcher/LocalDispatchQueryFactory.java index 5205b1268a9e..a1c7874b3ba8 100644 --- a/core/trino-main/src/main/java/io/trino/dispatcher/LocalDispatchQueryFactory.java +++ b/core/trino-main/src/main/java/io/trino/dispatcher/LocalDispatchQueryFactory.java @@ -38,6 +38,7 @@ import io.trino.server.protocol.Slug; import io.trino.spi.TrinoException; import io.trino.spi.resourcegroups.ResourceGroupId; +import io.trino.sql.SessionPropertyResolver; import io.trino.sql.tree.Statement; import io.trino.transaction.TransactionId; import io.trino.transaction.TransactionManager; @@ -59,6 +60,7 @@ public class LocalDispatchQueryFactory private final TransactionManager transactionManager; private final AccessControl accessControl; private final Metadata metadata; + private final SessionPropertyResolver sessionPropertyResolver; private final QueryMonitor queryMonitor; private final LocationFactory locationFactory; @@ -77,6 +79,7 @@ public LocalDispatchQueryFactory( QueryManager queryManager, QueryManagerConfig queryManagerConfig, TransactionManager transactionManager, + SessionPropertyResolver sessionPropertyResolver, AccessControl accessControl, Metadata metadata, QueryMonitor queryMonitor, @@ -92,6 +95,7 @@ public LocalDispatchQueryFactory( this.transactionManager = requireNonNull(transactionManager, "transactionManager is null"); this.accessControl = requireNonNull(accessControl, "accessControl is null"); this.metadata = requireNonNull(metadata, "metadata is null"); + this.sessionPropertyResolver = requireNonNull(sessionPropertyResolver, "sessionPropertyInterpreter is null"); this.queryMonitor = requireNonNull(queryMonitor, "queryMonitor is null"); this.locationFactory = requireNonNull(locationFactory, "locationFactory is null"); this.executionFactories = requireNonNull(executionFactories, "executionFactories is null"); @@ -132,6 +136,7 @@ public DispatchQuery createDispatchQuery( planOptimizersStatsCollector, getQueryType(preparedQuery.getStatement()), faultTolerantExecutionExchangeEncryptionEnabled, + Optional.of(sessionPropertyResolver.getSessionPropertiesApplier(preparedQuery)), version); // It is important that `queryCreatedEvent` is called here. Moving it past the `executor.submit` below diff --git a/core/trino-main/src/main/java/io/trino/execution/DistributionSnapshot.java b/core/trino-main/src/main/java/io/trino/execution/DistributionSnapshot.java index 5323c5f08a54..429ff6876186 100644 --- a/core/trino-main/src/main/java/io/trino/execution/DistributionSnapshot.java +++ b/core/trino-main/src/main/java/io/trino/execution/DistributionSnapshot.java @@ -110,15 +110,15 @@ public DistributionSnapshot(Distribution distribution) distribution.getTotal(), distribution.getMin(), distribution.getMax(), - distribution.getPercentile(0.01), - distribution.getPercentile(0.05), - distribution.getPercentile(0.10), - distribution.getPercentile(0.25), - distribution.getPercentile(0.50), - distribution.getPercentile(0.75), - distribution.getPercentile(0.90), - distribution.getPercentile(0.95), - distribution.getPercentile(0.99)); + distribution.getPercentile(1), + distribution.getPercentile(5), + distribution.getPercentile(10), + distribution.getPercentile(25), + distribution.getPercentile(50), + distribution.getPercentile(75), + distribution.getPercentile(90), + distribution.getPercentile(95), + distribution.getPercentile(99)); } @Override diff --git a/core/trino-main/src/main/java/io/trino/execution/QueryManagerConfig.java b/core/trino-main/src/main/java/io/trino/execution/QueryManagerConfig.java index 135e08b6e382..37d2f418bf5c 100644 --- a/core/trino-main/src/main/java/io/trino/execution/QueryManagerConfig.java +++ b/core/trino-main/src/main/java/io/trino/execution/QueryManagerConfig.java @@ -379,9 +379,9 @@ public int getQueryManagerExecutorPoolSize() } @Config("query.manager-executor-pool-size") - public QueryManagerConfig setQueryManagerExecutorPoolSize(int queryManagerExecutorPoolSize) + public QueryManagerConfig setQueryManagerExecutorPoolSize(String queryManagerExecutorPoolSize) { - this.queryManagerExecutorPoolSize = queryManagerExecutorPoolSize; + this.queryManagerExecutorPoolSize = ThreadCountParser.DEFAULT.parse(queryManagerExecutorPoolSize); return this; } @@ -392,9 +392,9 @@ public int getQueryExecutorPoolSize() } @Config("query.executor-pool-size") - public QueryManagerConfig setQueryExecutorPoolSize(int queryExecutorPoolSize) + public QueryManagerConfig setQueryExecutorPoolSize(String queryExecutorPoolSize) { - this.queryExecutorPoolSize = queryExecutorPoolSize; + this.queryExecutorPoolSize = ThreadCountParser.DEFAULT.parse(queryExecutorPoolSize); return this; } @@ -406,9 +406,9 @@ public int getMaxStateMachineCallbackThreads() @Config("query.max-state-machine-callback-threads") @ConfigDescription("The maximum number of threads allowed to run query and stage state machine listener callbacks concurrently for each query") - public QueryManagerConfig setMaxStateMachineCallbackThreads(int maxStateMachineCallbackThreads) + public QueryManagerConfig setMaxStateMachineCallbackThreads(String maxStateMachineCallbackThreads) { - this.maxStateMachineCallbackThreads = maxStateMachineCallbackThreads; + this.maxStateMachineCallbackThreads = ThreadCountParser.DEFAULT.parse(maxStateMachineCallbackThreads); return this; } @@ -420,9 +420,9 @@ public int getMaxSplitManagerCallbackThreads() @Config("query.max-split-manager-callback-threads") @ConfigDescription("The maximum number of threads allowed to run splits generation callbacks concurrently") - public QueryManagerConfig setMaxSplitManagerCallbackThreads(int maxSplitManagerCallbackThreads) + public QueryManagerConfig setMaxSplitManagerCallbackThreads(String maxSplitManagerCallbackThreads) { - this.maxSplitManagerCallbackThreads = maxSplitManagerCallbackThreads; + this.maxSplitManagerCallbackThreads = ThreadCountParser.DEFAULT.parse(maxSplitManagerCallbackThreads); return this; } @@ -526,9 +526,9 @@ public int getDispatcherQueryPoolSize() } @Config("query.dispatcher-query-pool-size") - public QueryManagerConfig setDispatcherQueryPoolSize(int dispatcherQueryPoolSize) + public QueryManagerConfig setDispatcherQueryPoolSize(String dispatcherQueryPoolSize) { - this.dispatcherQueryPoolSize = dispatcherQueryPoolSize; + this.dispatcherQueryPoolSize = ThreadCountParser.DEFAULT.parse(dispatcherQueryPoolSize); return this; } @@ -539,9 +539,9 @@ public int getRemoteTaskMaxCallbackThreads() } @Config("query.remote-task.max-callback-threads") - public QueryManagerConfig setRemoteTaskMaxCallbackThreads(int remoteTaskMaxCallbackThreads) + public QueryManagerConfig setRemoteTaskMaxCallbackThreads(String remoteTaskMaxCallbackThreads) { - this.remoteTaskMaxCallbackThreads = remoteTaskMaxCallbackThreads; + this.remoteTaskMaxCallbackThreads = ThreadCountParser.DEFAULT.parse(remoteTaskMaxCallbackThreads); return this; } diff --git a/core/trino-main/src/main/java/io/trino/execution/QueryStateMachine.java b/core/trino-main/src/main/java/io/trino/execution/QueryStateMachine.java index 7e84de77b97d..0e23c80b5ca3 100644 --- a/core/trino-main/src/main/java/io/trino/execution/QueryStateMachine.java +++ b/core/trino-main/src/main/java/io/trino/execution/QueryStateMachine.java @@ -53,6 +53,7 @@ import io.trino.spi.resourcegroups.ResourceGroupId; import io.trino.spi.security.SelectedRole; import io.trino.spi.type.Type; +import io.trino.sql.SessionPropertyResolver.SessionPropertiesApplier; import io.trino.sql.analyzer.Output; import io.trino.sql.planner.PlanFragment; import io.trino.tracing.TrinoAttributes; @@ -245,6 +246,7 @@ public static QueryStateMachine begin( PlanOptimizersStatsCollector queryStatsCollector, Optional queryType, boolean faultTolerantExecutionExchangeEncryptionEnabled, + Optional sessionPropertiesApplier, NodeVersion version) { return beginWithTicker( @@ -264,6 +266,7 @@ public static QueryStateMachine begin( queryStatsCollector, queryType, faultTolerantExecutionExchangeEncryptionEnabled, + sessionPropertiesApplier, version); } @@ -284,6 +287,7 @@ static QueryStateMachine beginWithTicker( PlanOptimizersStatsCollector queryStatsCollector, Optional queryType, boolean faultTolerantExecutionExchangeEncryptionEnabled, + Optional sessionPropertiesApplier, NodeVersion version) { // if there is an existing transaction, activate it @@ -314,6 +318,11 @@ static QueryStateMachine beginWithTicker( session = session.withoutSpooling(); } + // Apply WITH SESSION properties which require transaction to be started to resolve catalog handles + if (sessionPropertiesApplier.isPresent()) { + session = sessionPropertiesApplier.orElseThrow().apply(session); + } + Span querySpan = session.getQuerySpan(); querySpan.setAttribute(TrinoAttributes.QUERY_TYPE, queryType.map(Enum::name).orElse("UNKNOWN")); diff --git a/core/trino-main/src/main/java/io/trino/execution/SessionPropertyEvaluator.java b/core/trino-main/src/main/java/io/trino/execution/SessionPropertyEvaluator.java new file mode 100644 index 000000000000..22572111d208 --- /dev/null +++ b/core/trino-main/src/main/java/io/trino/execution/SessionPropertyEvaluator.java @@ -0,0 +1,113 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.execution; + +import com.google.inject.Inject; +import io.trino.Session; +import io.trino.metadata.SessionPropertyManager; +import io.trino.security.AccessControl; +import io.trino.spi.TrinoException; +import io.trino.spi.connector.CatalogHandle; +import io.trino.spi.session.PropertyMetadata; +import io.trino.spi.type.TimeZoneKey; +import io.trino.spi.type.Type; +import io.trino.sql.PlannerContext; +import io.trino.sql.SqlEnvironmentConfig; +import io.trino.sql.tree.Expression; +import io.trino.sql.tree.NodeRef; +import io.trino.sql.tree.Parameter; +import io.trino.sql.tree.QualifiedName; + +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static io.trino.SystemSessionProperties.TIME_ZONE_ID; +import static io.trino.metadata.MetadataUtil.getRequiredCatalogHandle; +import static io.trino.metadata.SessionPropertyManager.evaluatePropertyValue; +import static io.trino.metadata.SessionPropertyManager.serializeSessionProperty; +import static io.trino.spi.StandardErrorCode.INVALID_SESSION_PROPERTY; +import static io.trino.sql.analyzer.SemanticExceptions.semanticException; +import static java.lang.String.format; +import static java.util.Objects.requireNonNull; + +public class SessionPropertyEvaluator +{ + private final PlannerContext plannerContext; + private final AccessControl accessControl; + private final SessionPropertyManager sessionPropertyManager; + private final Optional forcedSessionTimeZone; + + @Inject + public SessionPropertyEvaluator(PlannerContext plannerContext, AccessControl accessControl, SessionPropertyManager sessionPropertyManager, SqlEnvironmentConfig environmentConfig) + { + this.plannerContext = requireNonNull(plannerContext, "plannerContext is null"); + this.accessControl = requireNonNull(accessControl, "accessControl is null"); + this.sessionPropertyManager = requireNonNull(sessionPropertyManager, "sessionPropertyManager is null"); + this.forcedSessionTimeZone = requireNonNull(environmentConfig, "environmentConfig is null").getForcedSessionTimeZone(); + } + + public String evaluate(Session session, QualifiedName name, Expression expression, Map, Expression> parameters) + { + List nameParts = name.getParts(); + if (nameParts.size() == 1) { + PropertyMetadata systemPropertyMetadata = sessionPropertyManager.getSystemSessionPropertyMetadata(nameParts.getFirst()) + .orElseThrow(() -> semanticException(INVALID_SESSION_PROPERTY, expression, "Session property %s does not exist", name)); + + return evaluate(session, name, expression, parameters, systemPropertyMetadata); + } + else if (nameParts.size() == 2) { + String catalogName = nameParts.getFirst(); + String propertyName = nameParts.getLast(); + + CatalogHandle catalogHandle = getRequiredCatalogHandle(plannerContext.getMetadata(), session, expression, catalogName); + PropertyMetadata connectorPropertyMetadata = sessionPropertyManager.getConnectorSessionPropertyMetadata(catalogHandle, propertyName) + .orElseThrow(() -> semanticException(INVALID_SESSION_PROPERTY, expression, "Session property %s does not exist", name)); + + return evaluate(session, name, expression, parameters, connectorPropertyMetadata); + } + throw semanticException(INVALID_SESSION_PROPERTY, expression, "Invalid session property '%s'", name); + } + + private String evaluate(Session session, QualifiedName name, Expression expression, Map, Expression> parameters, PropertyMetadata propertyMetadata) + { + if (propertyMetadata.getName().equals(TIME_ZONE_ID) && forcedSessionTimeZone.isPresent()) { + return serializeSessionProperty(propertyMetadata.getSqlType(), forcedSessionTimeZone.get().toString()); + } + + Type type = propertyMetadata.getSqlType(); + Object objectValue; + + try { + objectValue = evaluatePropertyValue(expression, type, session, plannerContext, accessControl, parameters); + } + catch (TrinoException e) { + throw new TrinoException( + INVALID_SESSION_PROPERTY, + format("Unable to set session property '%s' to '%s': %s", name, expression, e.getRawMessage())); + } + + String value = serializeSessionProperty(type, objectValue); + + try { + // verify the SQL value can be decoded by the property + propertyMetadata.decode(objectValue); + } + catch (RuntimeException e) { + throw semanticException(INVALID_SESSION_PROPERTY, expression, "%s", e.getMessage()); + } + + return value; + } +} diff --git a/core/trino-main/src/main/java/io/trino/execution/SetSessionTask.java b/core/trino-main/src/main/java/io/trino/execution/SetSessionTask.java index 27d04a46a49f..f62f5fe23a5b 100644 --- a/core/trino-main/src/main/java/io/trino/execution/SetSessionTask.java +++ b/core/trino-main/src/main/java/io/trino/execution/SetSessionTask.java @@ -17,14 +17,9 @@ import com.google.inject.Inject; import io.trino.Session; import io.trino.execution.warnings.WarningCollector; -import io.trino.metadata.SessionPropertyManager; import io.trino.security.AccessControl; import io.trino.security.SecurityContext; import io.trino.spi.TrinoException; -import io.trino.spi.connector.CatalogHandle; -import io.trino.spi.session.PropertyMetadata; -import io.trino.spi.type.Type; -import io.trino.sql.PlannerContext; import io.trino.sql.tree.Expression; import io.trino.sql.tree.QualifiedName; import io.trino.sql.tree.SetSession; @@ -33,27 +28,20 @@ import static com.google.common.util.concurrent.Futures.immediateVoidFuture; import static io.trino.execution.ParameterExtractor.bindParameters; -import static io.trino.metadata.MetadataUtil.getRequiredCatalogHandle; -import static io.trino.metadata.SessionPropertyManager.evaluatePropertyValue; -import static io.trino.metadata.SessionPropertyManager.serializeSessionProperty; import static io.trino.spi.StandardErrorCode.INVALID_SESSION_PROPERTY; -import static io.trino.sql.analyzer.SemanticExceptions.semanticException; -import static java.lang.String.format; import static java.util.Objects.requireNonNull; public class SetSessionTask implements DataDefinitionTask { - private final PlannerContext plannerContext; + private final SessionPropertyEvaluator sessionEvaluator; private final AccessControl accessControl; - private final SessionPropertyManager sessionPropertyManager; @Inject - public SetSessionTask(PlannerContext plannerContext, AccessControl accessControl, SessionPropertyManager sessionPropertyManager) + public SetSessionTask(SessionPropertyEvaluator sessionPropertyEvaluator, AccessControl accessControl) { - this.plannerContext = requireNonNull(plannerContext, "plannerContext is null"); + this.sessionEvaluator = requireNonNull(sessionPropertyEvaluator, "sessionEvaluator is null"); this.accessControl = requireNonNull(accessControl, "accessControl is null"); - this.sessionPropertyManager = requireNonNull(sessionPropertyManager, "sessionPropertyManager is null"); } @Override @@ -69,50 +57,23 @@ public ListenableFuture execute( List parameters, WarningCollector warningCollector) { - Session session = stateMachine.getSession(); QualifiedName propertyName = statement.getName(); List parts = propertyName.getParts(); - if (parts.size() > 2) { - throw semanticException(INVALID_SESSION_PROPERTY, statement, "Invalid session property '%s'", propertyName); - } + Session session = stateMachine.getSession(); - // validate the property name - PropertyMetadata propertyMetadata; if (parts.size() == 1) { - accessControl.checkCanSetSystemSessionProperty(session.getIdentity(), session.getQueryId(), parts.get(0)); - propertyMetadata = sessionPropertyManager.getSystemSessionPropertyMetadata(parts.get(0)) - .orElseThrow(() -> semanticException(INVALID_SESSION_PROPERTY, statement, "Session property '%s' does not exist", statement.getName())); - } - else { - CatalogHandle catalogHandle = getRequiredCatalogHandle(plannerContext.getMetadata(), stateMachine.getSession(), statement, parts.get(0)); - accessControl.checkCanSetCatalogSessionProperty(SecurityContext.of(session), parts.get(0), parts.get(1)); - propertyMetadata = sessionPropertyManager.getConnectorSessionPropertyMetadata(catalogHandle, parts.get(1)) - .orElseThrow(() -> semanticException(INVALID_SESSION_PROPERTY, statement, "Session property '%s' does not exist", statement.getName())); - } - - Type type = propertyMetadata.getSqlType(); - Object objectValue; - - try { - objectValue = evaluatePropertyValue(statement.getValue(), type, session, plannerContext, accessControl, bindParameters(statement, parameters)); - } - catch (TrinoException e) { - throw new TrinoException( - INVALID_SESSION_PROPERTY, - format("Unable to set session property '%s' to '%s': %s", propertyName, statement.getValue(), e.getRawMessage())); + accessControl.checkCanSetSystemSessionProperty(session.getIdentity(), session.getQueryId(), parts.getFirst()); } - - String value = serializeSessionProperty(type, objectValue); - - // verify the SQL value can be decoded by the property - try { - propertyMetadata.decode(objectValue); + else if (parts.size() == 2) { + accessControl.checkCanSetCatalogSessionProperty(SecurityContext.of(session), parts.getFirst(), parts.getLast()); } - catch (RuntimeException e) { - throw semanticException(INVALID_SESSION_PROPERTY, statement, "%s", e.getMessage()); + else { + throw new TrinoException(INVALID_SESSION_PROPERTY, "Invalid session property '%s'".formatted(propertyName)); } - stateMachine.addSetSessionProperties(propertyName.toString(), value); + stateMachine.addSetSessionProperties( + statement.getName().toString(), + sessionEvaluator.evaluate(stateMachine.getSession(), statement.getName(), statement.getValue(), bindParameters(statement, parameters))); return immediateVoidFuture(); } diff --git a/core/trino-main/src/main/java/io/trino/execution/TaskManagerConfig.java b/core/trino-main/src/main/java/io/trino/execution/TaskManagerConfig.java index 08b3c8f56f1f..e197864ee9ac 100644 --- a/core/trino-main/src/main/java/io/trino/execution/TaskManagerConfig.java +++ b/core/trino-main/src/main/java/io/trino/execution/TaskManagerConfig.java @@ -525,9 +525,9 @@ public int getHttpResponseThreads() } @Config("task.http-response-threads") - public TaskManagerConfig setHttpResponseThreads(int httpResponseThreads) + public TaskManagerConfig setHttpResponseThreads(String httpResponseThreads) { - this.httpResponseThreads = httpResponseThreads; + this.httpResponseThreads = ThreadCountParser.DEFAULT.parse(httpResponseThreads); return this; } @@ -538,9 +538,9 @@ public int getHttpTimeoutThreads() } @Config("task.http-timeout-threads") - public TaskManagerConfig setHttpTimeoutThreads(int httpTimeoutThreads) + public TaskManagerConfig setHttpTimeoutThreads(String httpTimeoutThreads) { - this.httpTimeoutThreads = httpTimeoutThreads; + this.httpTimeoutThreads = ThreadCountParser.DEFAULT.parse(httpTimeoutThreads); return this; } @@ -552,9 +552,9 @@ public int getTaskNotificationThreads() @Config("task.task-notification-threads") @ConfigDescription("Number of threads used for internal task event notifications") - public TaskManagerConfig setTaskNotificationThreads(int taskNotificationThreads) + public TaskManagerConfig setTaskNotificationThreads(String taskNotificationThreads) { - this.taskNotificationThreads = taskNotificationThreads; + this.taskNotificationThreads = ThreadCountParser.DEFAULT.parse(taskNotificationThreads); return this; } @@ -566,9 +566,9 @@ public int getTaskYieldThreads() @Config("task.task-yield-threads") @ConfigDescription("Number of threads used for setting yield signals") - public TaskManagerConfig setTaskYieldThreads(int taskYieldThreads) + public TaskManagerConfig setTaskYieldThreads(String taskYieldThreads) { - this.taskYieldThreads = taskYieldThreads; + this.taskYieldThreads = ThreadCountParser.DEFAULT.parse(taskYieldThreads); return this; } @@ -580,9 +580,9 @@ public int getDriverTimeoutThreads() @Config("task.driver-timeout-threads") @ConfigDescription("Number of threads used for timing out blocked drivers if the timeout is set") - public TaskManagerConfig setDriverTimeoutThreads(int driverTimeoutThreads) + public TaskManagerConfig setDriverTimeoutThreads(String driverTimeoutThreads) { - this.driverTimeoutThreads = driverTimeoutThreads; + this.driverTimeoutThreads = ThreadCountParser.DEFAULT.parse(driverTimeoutThreads); return this; } diff --git a/core/trino-main/src/main/java/io/trino/execution/scheduler/faulttolerant/EventDrivenFaultTolerantQueryScheduler.java b/core/trino-main/src/main/java/io/trino/execution/scheduler/faulttolerant/EventDrivenFaultTolerantQueryScheduler.java index 22a34933a117..c8fcca351dab 100644 --- a/core/trino-main/src/main/java/io/trino/execution/scheduler/faulttolerant/EventDrivenFaultTolerantQueryScheduler.java +++ b/core/trino-main/src/main/java/io/trino/execution/scheduler/faulttolerant/EventDrivenFaultTolerantQueryScheduler.java @@ -137,6 +137,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -1659,6 +1660,33 @@ public Void onSinkInstanceHandleAcquired(SinkInstanceHandleAcquiredEvent sinkIns nodeLease.release(); } }); + + // we observed it may happen that final task info notification may be lost. + // in such case query progression will be blocked. + // the code below is a stop-gap to mitigate this issue and unblock or fail query + // until we find and fix the bug + AtomicBoolean finalTaskInfoReceived = new AtomicBoolean(); + task.addStateChangeListener(taskStatus -> { + if (!taskStatus.getState().isDone()) { + return; + } + switch (taskStatus.getState()) { + case FINISHED -> scheduledExecutorService.schedule(() -> { + if (!finalTaskInfoReceived.get()) { + log.error("Did not receive final task info for task %s after it FINISHED; internal inconsistency; failing query", task.getTaskId()); + queryStateMachine.transitionToFailed(new TrinoException(GENERIC_INTERNAL_ERROR, "Did not receive final task info for task after it finished; failing query")); + } + }, 1, MINUTES); + case CANCELED, ABORTED, FAILED -> scheduledExecutorService.schedule(() -> { + if (!finalTaskInfoReceived.get()) { + log.error("Did not receive final task info for task %s after it %s; internal inconsistency; marking task failed in scheduler to unblock query progression", taskStatus.getState(), task.getTaskId()); + eventQueue.add(new RemoteTaskCompletedEvent(taskStatus)); + } + }, 1, MINUTES); + default -> throw new IllegalStateException("Unexpected task state: " + taskStatus.getState()); + } + }); + task.addFinalTaskInfoListener(taskExecutionStats::update); task.addFinalTaskInfoListener(taskInfo -> eventQueue.add(new RemoteTaskCompletedEvent(taskInfo.taskStatus()))); nodeLease.attachTaskId(task.getTaskId()); diff --git a/core/trino-main/src/main/java/io/trino/operator/DirectExchangeClientConfig.java b/core/trino-main/src/main/java/io/trino/operator/DirectExchangeClientConfig.java index 23b391bb5361..31b6838428a5 100644 --- a/core/trino-main/src/main/java/io/trino/operator/DirectExchangeClientConfig.java +++ b/core/trino-main/src/main/java/io/trino/operator/DirectExchangeClientConfig.java @@ -21,6 +21,7 @@ import io.airlift.units.Duration; import io.airlift.units.MinDataSize; import io.airlift.units.MinDuration; +import io.trino.execution.ThreadCountParser; import jakarta.validation.constraints.Min; import jakarta.validation.constraints.NotNull; @@ -99,9 +100,9 @@ public int getClientThreads() } @Config("exchange.client-threads") - public DirectExchangeClientConfig setClientThreads(int clientThreads) + public DirectExchangeClientConfig setClientThreads(String clientThreads) { - this.clientThreads = clientThreads; + this.clientThreads = ThreadCountParser.DEFAULT.parse(clientThreads); return this; } @@ -112,9 +113,9 @@ public int getPageBufferClientMaxCallbackThreads() } @Config("exchange.page-buffer-client.max-callback-threads") - public DirectExchangeClientConfig setPageBufferClientMaxCallbackThreads(int pageBufferClientMaxCallbackThreads) + public DirectExchangeClientConfig setPageBufferClientMaxCallbackThreads(String pageBufferClientMaxCallbackThreads) { - this.pageBufferClientMaxCallbackThreads = pageBufferClientMaxCallbackThreads; + this.pageBufferClientMaxCallbackThreads = ThreadCountParser.DEFAULT.parse(pageBufferClientMaxCallbackThreads); return this; } diff --git a/core/trino-main/src/main/java/io/trino/operator/OutputSpoolingOperatorFactory.java b/core/trino-main/src/main/java/io/trino/operator/OutputSpoolingOperatorFactory.java index 4dcc7a79cbec..7d336e2fc3ca 100644 --- a/core/trino-main/src/main/java/io/trino/operator/OutputSpoolingOperatorFactory.java +++ b/core/trino-main/src/main/java/io/trino/operator/OutputSpoolingOperatorFactory.java @@ -36,6 +36,8 @@ import java.io.IOException; import java.io.OutputStream; import java.io.UncheckedIOException; +import java.time.ZoneId; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -46,6 +48,7 @@ import static com.google.common.base.Preconditions.checkState; import static com.google.common.base.Verify.verify; import static io.airlift.units.Duration.succinctDuration; +import static io.trino.client.spooling.DataAttribute.EXPIRES_AT; import static io.trino.client.spooling.DataAttribute.ROWS_COUNT; import static io.trino.client.spooling.DataAttribute.SEGMENT_SIZE; import static io.trino.operator.OutputSpoolingOperatorFactory.OutputSpoolingOperator.State.FINISHED; @@ -137,6 +140,7 @@ static class OutputSpoolingOperator implements Operator { private final OutputSpoolingController controller; + private final ZoneId clientZoneId; enum State { @@ -161,6 +165,7 @@ enum State public OutputSpoolingOperator(OperatorContext operatorContext, QueryDataEncoder queryDataEncoder, SpoolingManager spoolingManager, Map layout) { this.operatorContext = requireNonNull(operatorContext, "operatorContext is null"); + this.clientZoneId = operatorContext.getSession().getTimeZoneKey().getZoneId(); this.controller = new OutputSpoolingController( isInliningEnabled(operatorContext.getSession()), getInliningMaxRows(operatorContext.getSession()), @@ -269,12 +274,14 @@ private Page spool(List pages, boolean finished) operatorContext.getDriverContext().getSession().getQueryId(), rows, size)); + String expiresAt = ZonedDateTime.ofInstant(segmentHandle.expirationTime(), clientZoneId).toLocalDateTime().toString(); OperationTimer overallTimer = new OperationTimer(false); try (OutputStream output = spoolingManager.createOutputStream(segmentHandle)) { DataAttributes attributes = queryDataEncoder.encodeTo(output, pages) .toBuilder() .set(ROWS_COUNT, rows) + .set(EXPIRES_AT, expiresAt) .build(); controller.recordEncoded(attributes.get(SEGMENT_SIZE, Integer.class)); diff --git a/core/trino-main/src/main/java/io/trino/server/CoordinatorModule.java b/core/trino-main/src/main/java/io/trino/server/CoordinatorModule.java index 2c1afc074bd4..3ba0bff47a80 100644 --- a/core/trino-main/src/main/java/io/trino/server/CoordinatorModule.java +++ b/core/trino-main/src/main/java/io/trino/server/CoordinatorModule.java @@ -58,6 +58,7 @@ import io.trino.execution.QueryPerformanceFetcher; import io.trino.execution.QueryPreparer; import io.trino.execution.RemoteTaskFactory; +import io.trino.execution.SessionPropertyEvaluator; import io.trino.execution.SqlQueryManager; import io.trino.execution.StageInfo; import io.trino.execution.TaskInfo; @@ -113,6 +114,7 @@ import io.trino.server.ui.WorkerResource; import io.trino.spi.VersionEmbedder; import io.trino.sql.PlannerContext; +import io.trino.sql.SessionPropertyResolver; import io.trino.sql.analyzer.AnalyzerFactory; import io.trino.sql.analyzer.QueryExplainerFactory; import io.trino.sql.planner.OptimizerStatsMBeanExporter; @@ -211,6 +213,8 @@ protected void setup(Binder binder) // dispatcher binder.bind(DispatchManager.class).in(Scopes.SINGLETON); + // WITH SESSION interpreter + binder.bind(SessionPropertyResolver.class).in(Scopes.SINGLETON); // export under the old name, for backwards compatibility newExporter(binder).export(DispatchManager.class).as(generator -> generator.generatedNameOf(QueryManager.class)); binder.bind(FailedDispatchQueryFactory.class).in(Scopes.SINGLETON); @@ -318,6 +322,9 @@ List getCompositeOutputDataSizeEstimatorDelegateFac // explain analyze binder.bind(ExplainAnalyzeContext.class).in(Scopes.SINGLETON); + // session evaluator + binder.bind(SessionPropertyEvaluator.class).in(Scopes.SINGLETON); + // execution scheduler jsonCodecBinder(binder).bindJsonCodec(TaskInfo.class); jsonCodecBinder(binder).bindJsonCodec(TaskStatus.class); diff --git a/core/trino-main/src/main/java/io/trino/server/QuerySessionSupplier.java b/core/trino-main/src/main/java/io/trino/server/QuerySessionSupplier.java index 1de68c23abdd..adb15c9ed715 100644 --- a/core/trino-main/src/main/java/io/trino/server/QuerySessionSupplier.java +++ b/core/trino-main/src/main/java/io/trino/server/QuerySessionSupplier.java @@ -34,7 +34,6 @@ import static io.trino.Session.SessionBuilder; import static io.trino.SystemSessionProperties.TIME_ZONE_ID; import static io.trino.server.HttpRequestSessionContextFactory.addEnabledRoles; -import static io.trino.spi.type.TimeZoneKey.getTimeZoneKey; import static java.util.Map.Entry; import static java.util.Objects.requireNonNull; @@ -128,18 +127,16 @@ public Session createSession(QueryId queryId, Span querySpan, SessionContext con sessionBuilder.setTimeZoneKey(forcedSessionTimeZone.get()); } else { - String sessionTimeZoneId = context.getSystemProperties().get(TIME_ZONE_ID); - if (sessionTimeZoneId != null) { - sessionBuilder.setTimeZoneKey(getTimeZoneKey(sessionTimeZoneId)); - } - else { - sessionBuilder.setTimeZoneKey(context.getTimeZoneId().map(TimeZoneKey::getTimeZoneKey)); - } + sessionBuilder.setTimeZoneKey(context.getTimeZoneId().map(TimeZoneKey::getTimeZoneKey)); } context.getLanguage().ifPresent(s -> sessionBuilder.setLocale(Locale.forLanguageTag(s))); for (Entry entry : context.getSystemProperties().entrySet()) { + if (entry.getKey().equals(TIME_ZONE_ID) && forcedSessionTimeZone.isPresent()) { + // Skip setting time zone id from session when forced session is set + continue; + } sessionBuilder.setSystemProperty(entry.getKey(), entry.getValue()); } for (Entry> catalogProperties : context.getCatalogSessionProperties().entrySet()) { diff --git a/core/trino-main/src/main/java/io/trino/server/ThrowableMapper.java b/core/trino-main/src/main/java/io/trino/server/ThrowableMapper.java index 1399e6d118f7..3da2e20352ee 100644 --- a/core/trino-main/src/main/java/io/trino/server/ThrowableMapper.java +++ b/core/trino-main/src/main/java/io/trino/server/ThrowableMapper.java @@ -15,6 +15,7 @@ import com.google.common.base.Throwables; import com.google.inject.Inject; +import io.airlift.jaxrs.JsonParsingException; import jakarta.ws.rs.BadRequestException; import jakarta.ws.rs.ForbiddenException; import jakarta.ws.rs.InternalServerErrorException; @@ -84,6 +85,9 @@ public Response toResponse(Throwable throwable) .entity("Error 408 Timeout: " + timeoutException.getMessage()) .build(); case WebApplicationException webApplicationException -> webApplicationException.getResponse(); + case JsonParsingException parsingException -> Response.status(Response.Status.BAD_REQUEST) + .entity(Throwables.getStackTraceAsString(parsingException)) + .build(); default -> { ResponseBuilder responseBuilder = plainTextError(Response.Status.INTERNAL_SERVER_ERROR); if (includeExceptionInResponse) { diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/Query.java b/core/trino-main/src/main/java/io/trino/server/protocol/Query.java index a161d59728af..6a8024c20679 100644 --- a/core/trino-main/src/main/java/io/trino/server/protocol/Query.java +++ b/core/trino-main/src/main/java/io/trino/server/protocol/Query.java @@ -66,6 +66,7 @@ import io.trino.util.Ciphers; import jakarta.ws.rs.NotFoundException; +import java.io.EOFException; import java.net.URI; import java.util.List; import java.util.Map; @@ -78,6 +79,7 @@ import static com.google.common.base.MoreObjects.firstNonNull; import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Throwables.getCausalChain; import static com.google.common.base.Verify.verify; import static com.google.common.util.concurrent.Futures.immediateFuture; import static com.google.common.util.concurrent.Futures.immediateVoidFuture; @@ -636,12 +638,17 @@ private void closeExchangeIfNecessary(ResultQueryInfo queryInfo) private synchronized void handleSerializationException(Throwable exception) { + if (clientDisconnected(exception)) { + // Allow client to retry nextURI call + return; + } + // failQuery can throw exception if query has already finished. try { queryManager.failQuery(queryId, exception); } catch (RuntimeException e) { - log.debug(e, "Could not fail query"); + log.warn(e, "Could not fail query"); } if (typeSerializationException.isEmpty()) { @@ -649,6 +656,12 @@ private synchronized void handleSerializationException(Throwable exception) } } + private static boolean clientDisconnected(Throwable exception) + { + return getCausalChain(exception).stream() + .anyMatch(EOFException.class::isInstance); + } + private synchronized void setQueryOutputInfo(QueryExecution.QueryOutputInfo outputInfo) { // if first callback, set column names diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/encoding/JsonQueryDataEncoder.java b/core/trino-main/src/main/java/io/trino/server/protocol/spooling/encoding/JsonQueryDataEncoder.java index c29432421c7f..2c0653da6117 100644 --- a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/encoding/JsonQueryDataEncoder.java +++ b/core/trino-main/src/main/java/io/trino/server/protocol/spooling/encoding/JsonQueryDataEncoder.java @@ -42,6 +42,7 @@ public class JsonQueryDataEncoder implements QueryDataEncoder { + private static final JsonFactory JSON_FACTORY = jsonFactory(); private static final String ENCODING = "json"; private final Session session; private final TypeEncoder[] typeEncoders; @@ -60,9 +61,8 @@ public JsonQueryDataEncoder(Session session, List columns) public DataAttributes encodeTo(OutputStream output, List pages) throws IOException { - JsonFactory jsonFactory = jsonFactory(); ConnectorSession connectorSession = session.toConnectorSession(); - try (CountingOutputStream wrapper = new CountingOutputStream(output); JsonGenerator generator = jsonFactory.createGenerator(wrapper)) { + try (CountingOutputStream wrapper = new CountingOutputStream(output); JsonGenerator generator = JSON_FACTORY.createGenerator(wrapper)) { writePagesToJsonGenerator(connectorSession, e -> { throw e; }, generator, typeEncoders, sourcePageChannels, pages); return DataAttributes.builder() .set(SEGMENT_SIZE, toIntExact(wrapper.getCount())) diff --git a/core/trino-main/src/main/java/io/trino/sql/SessionPropertyResolver.java b/core/trino-main/src/main/java/io/trino/sql/SessionPropertyResolver.java new file mode 100644 index 000000000000..8f7bade78ac2 --- /dev/null +++ b/core/trino-main/src/main/java/io/trino/sql/SessionPropertyResolver.java @@ -0,0 +1,158 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.sql; + +import com.google.common.collect.HashBasedTable; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Table; +import com.google.inject.Inject; +import io.trino.Session; +import io.trino.execution.QueryPreparer.PreparedQuery; +import io.trino.execution.SessionPropertyEvaluator; +import io.trino.security.AccessControl; +import io.trino.security.SecurityContext; +import io.trino.sql.tree.Expression; +import io.trino.sql.tree.NodeRef; +import io.trino.sql.tree.Parameter; +import io.trino.sql.tree.QualifiedName; +import io.trino.sql.tree.Query; +import io.trino.sql.tree.SessionProperty; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; + +import static com.google.common.base.Preconditions.checkState; +import static io.trino.execution.ParameterExtractor.bindParameters; +import static io.trino.spi.StandardErrorCode.INVALID_SESSION_PROPERTY; +import static io.trino.sql.analyzer.SemanticExceptions.semanticException; +import static java.util.Objects.requireNonNull; + +public class SessionPropertyResolver +{ + private final SessionPropertyEvaluator sessionPropertyEvaluator; + private final AccessControl accessControl; + + @Inject + public SessionPropertyResolver(SessionPropertyEvaluator sessionPropertyEvaluator, AccessControl accessControl) + { + this.sessionPropertyEvaluator = requireNonNull(sessionPropertyEvaluator, "sessionEvaluator is null"); + this.accessControl = requireNonNull(accessControl, "accessControl is null"); + } + + public SessionPropertiesApplier getSessionPropertiesApplier(PreparedQuery preparedQuery) + { + if (!(preparedQuery.getStatement() instanceof Query queryStatement)) { + return session -> session; + } + return session -> prepareSession(session, queryStatement.getSessionProperties(), bindParameters(preparedQuery.getStatement(), preparedQuery.getParameters())); + } + + private Session prepareSession(Session session, List sessionProperties, Map, Expression> parameters) + { + ResolvedSessionProperties resolvedSessionProperties = resolve(session, parameters, sessionProperties); + return overrideProperties(session, resolvedSessionProperties); + } + + private ResolvedSessionProperties resolve(Session session, Map, Expression> parameters, List sessionProperties) + { + ImmutableMap.Builder systemProperties = ImmutableMap.builder(); + Table catalogProperties = HashBasedTable.create(); + Set seenPropertyNames = new HashSet<>(); + + for (SessionProperty sessionProperty : sessionProperties) { + List nameParts = sessionProperty.getName().getParts(); + + if (!seenPropertyNames.add(sessionProperty.getName())) { + throw semanticException(INVALID_SESSION_PROPERTY, sessionProperty, "Session property %s already set", sessionProperty.getName()); + } + + if (nameParts.size() == 1) { + systemProperties.put(nameParts.getFirst(), sessionPropertyEvaluator.evaluate(session, sessionProperty.getName(), sessionProperty.getValue(), parameters)); + } + else if (nameParts.size() == 2) { + String catalogName = nameParts.getFirst(); + String propertyName = nameParts.getLast(); + catalogProperties.put(catalogName, propertyName, sessionPropertyEvaluator.evaluate(session, sessionProperty.getName(), sessionProperty.getValue(), parameters)); + } + else { + throw semanticException(INVALID_SESSION_PROPERTY, sessionProperty, "Invalid session property '%s'", sessionProperty.getName()); + } + } + + return new ResolvedSessionProperties(systemProperties.buildOrThrow(), catalogProperties.rowMap()); + } + + private Session overrideProperties(Session session, ResolvedSessionProperties resolvedSessionProperties) + { + requireNonNull(resolvedSessionProperties, "resolvedSessionProperties is null"); + + validateSystemProperties(session, resolvedSessionProperties.systemProperties()); + + // Catalog session properties were already evaluated so we need to evaluate overrides + if (session.getTransactionId().isPresent()) { + validateCatalogProperties(session, resolvedSessionProperties.catalogProperties()); + } + + // NOTE: properties are validated before calling overrideProperties + Map systemProperties = new HashMap<>(); + systemProperties.putAll(session.getSystemProperties()); + systemProperties.putAll(resolvedSessionProperties.systemProperties()); + + Map> catalogProperties = new HashMap<>(session.getCatalogProperties()); + for (Map.Entry> catalogEntry : resolvedSessionProperties.catalogProperties().entrySet()) { + catalogProperties.computeIfAbsent(catalogEntry.getKey(), _ -> new HashMap<>()) + .putAll(catalogEntry.getValue()); + } + + return session.withProperties(systemProperties, catalogProperties); + } + + private void validateSystemProperties(Session session, Map systemProperties) + { + for (Map.Entry property : systemProperties.entrySet()) { + // verify permissions + accessControl.checkCanSetSystemSessionProperty(session.getIdentity(), session.getQueryId(), property.getKey()); + } + } + + private void validateCatalogProperties(Session session, Map> catalogsProperties) + { + checkState(session.getTransactionId().isPresent(), "Not in transaction"); + for (Map.Entry> catalogProperties : catalogsProperties.entrySet()) { + for (Map.Entry catalogProperty : catalogProperties.getValue().entrySet()) { + // verify permissions + accessControl.checkCanSetCatalogSessionProperty(new SecurityContext(session.getRequiredTransactionId(), session.getIdentity(), session.getQueryId(), session.getStart()), catalogProperties.getKey(), catalogProperty.getKey()); + } + } + } + + public record ResolvedSessionProperties(Map systemProperties, Map> catalogProperties) + { + public ResolvedSessionProperties + { + systemProperties = ImmutableMap.copyOf(requireNonNull(systemProperties, "systemProperties is null")); + catalogProperties = ImmutableMap.copyOf(requireNonNull(catalogProperties, "catalogProperties is null")); + } + } + + @FunctionalInterface + public interface SessionPropertiesApplier + extends Function + { + } +} diff --git a/core/trino-main/src/main/java/io/trino/testing/MaterializedResult.java b/core/trino-main/src/main/java/io/trino/testing/MaterializedResult.java index 490a2df6413e..6c66cc7d4806 100644 --- a/core/trino-main/src/main/java/io/trino/testing/MaterializedResult.java +++ b/core/trino-main/src/main/java/io/trino/testing/MaterializedResult.java @@ -16,6 +16,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; +import io.trino.FullConnectorSession; import io.trino.Session; import io.trino.client.StatementStats; import io.trino.client.Warning; @@ -64,6 +65,7 @@ public class MaterializedResult { public static final int DEFAULT_PRECISION = 5; + private final Optional session; private final List rows; private final List types; private final List columnNames; @@ -75,17 +77,18 @@ public class MaterializedResult private final List warnings; private final Optional statementStats; - public MaterializedResult(List rows, List types) + public MaterializedResult(Optional session, List rows, List types) { - this(rows, types, Optional.empty(), Optional.empty()); + this(session, rows, types, Optional.empty(), Optional.empty()); } - public MaterializedResult(List rows, List types, Optional> columnNames, Optional queryDataEncoding) + public MaterializedResult(Optional session, List rows, List types, Optional> columnNames, Optional queryDataEncoding) { - this(rows, types, columnNames.orElse(ImmutableList.of()), queryDataEncoding, ImmutableMap.of(), ImmutableSet.of(), Optional.empty(), OptionalLong.empty(), ImmutableList.of(), Optional.empty()); + this(session, rows, types, columnNames.orElse(ImmutableList.of()), queryDataEncoding, ImmutableMap.of(), ImmutableSet.of(), Optional.empty(), OptionalLong.empty(), ImmutableList.of(), Optional.empty()); } public MaterializedResult( + Optional session, List rows, List types, List columnNames, @@ -97,6 +100,7 @@ public MaterializedResult( List warnings, Optional statementStats) { + this.session = requireNonNull(session, "session is null"); this.rows = ImmutableList.copyOf(requireNonNull(rows, "rows is null")); this.types = ImmutableList.copyOf(requireNonNull(types, "types is null")); this.columnNames = ImmutableList.copyOf(requireNonNull(columnNames, "columnNames is null")); @@ -109,6 +113,11 @@ public MaterializedResult( this.statementStats = requireNonNull(statementStats, "statementStats is null"); } + public Session getSession() + { + return session.orElseThrow(() -> new IllegalStateException("Effective session is not set")); + } + public int getRowCount() { return rows.size(); @@ -244,6 +253,7 @@ private MaterializedResult projected(Predicate columnFilter) } return new MaterializedResult( + session, getMaterializedRows().stream() .map(row -> new MaterializedRow( row.getPrecision(), @@ -283,6 +293,7 @@ public Object getOnlyValue() public MaterializedResult toTestTypes() { return new MaterializedResult( + session, rows.stream() .map(MaterializedResult::convertToTestTypes) .collect(toImmutableList()), @@ -359,7 +370,6 @@ public static class Builder private final ConnectorSession session; private final List types; private final ImmutableList.Builder rows = ImmutableList.builder(); - private Optional queryDataEncoding = Optional.empty(); private Optional> columnNames = Optional.empty(); Builder(ConnectorSession session, List types) @@ -422,15 +432,14 @@ public synchronized Builder columnNames(List columnNames) return this; } - public synchronized Builder queryDataEncoding(String encoding) - { - this.queryDataEncoding = Optional.of(requireNonNull(encoding, "encoding is null")); - return this; - } - public synchronized MaterializedResult build() { - return new MaterializedResult(rows.build(), types, columnNames, queryDataEncoding); + if ((session instanceof FullConnectorSession fullConnectorSession)) { + return new MaterializedResult(Optional.of(fullConnectorSession.getSession()), rows.build(), types, columnNames, Optional.empty()); + } + + // For TestingConnectorSession we are unable to retrieve full Session which makes the effective session empty in that case + return new MaterializedResult(Optional.empty(), rows.build(), types, columnNames, Optional.empty()); } } } diff --git a/core/trino-main/src/main/java/io/trino/testing/TestingDirectTrinoClient.java b/core/trino-main/src/main/java/io/trino/testing/TestingDirectTrinoClient.java index 34ea8aa09588..7f125d751c06 100644 --- a/core/trino-main/src/main/java/io/trino/testing/TestingDirectTrinoClient.java +++ b/core/trino-main/src/main/java/io/trino/testing/TestingDirectTrinoClient.java @@ -83,6 +83,7 @@ private static MaterializedResult toMaterializedRows(DispatchQuery dispatchQuery if (pages.isEmpty() && columnTypes == null) { // the query did not produce any output return new MaterializedResult( + Optional.of(dispatchQuery.getSession()), ImmutableList.of(), ImmutableList.of(), ImmutableList.of(), @@ -106,6 +107,7 @@ private static MaterializedResult toMaterializedRows(DispatchQuery dispatchQuery } return new MaterializedResult( + Optional.of(dispatchQuery.getSession()), materializedRows, columnTypes, columnNames, diff --git a/core/trino-main/src/test/java/io/trino/dispatcher/TestLocalDispatchQuery.java b/core/trino-main/src/test/java/io/trino/dispatcher/TestLocalDispatchQuery.java index 1a3f052a4b55..9db618a38d3f 100644 --- a/core/trino-main/src/test/java/io/trino/dispatcher/TestLocalDispatchQuery.java +++ b/core/trino-main/src/test/java/io/trino/dispatcher/TestLocalDispatchQuery.java @@ -123,6 +123,7 @@ public void testSubmittedForDispatchedQuery() createPlanOptimizersStatsCollector(), Optional.of(QueryType.DATA_DEFINITION), true, + Optional.empty(), new NodeVersion("test")); QueryMonitor queryMonitor = new QueryMonitor( JsonCodec.jsonCodec(StageInfo.class), diff --git a/core/trino-main/src/test/java/io/trino/execution/BaseDataDefinitionTaskTest.java b/core/trino-main/src/test/java/io/trino/execution/BaseDataDefinitionTaskTest.java index 912325d6775c..8e0f75f67fc5 100644 --- a/core/trino-main/src/test/java/io/trino/execution/BaseDataDefinitionTaskTest.java +++ b/core/trino-main/src/test/java/io/trino/execution/BaseDataDefinitionTaskTest.java @@ -244,6 +244,7 @@ private static QueryStateMachine stateMachine(TransactionManager transactionMana createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); } diff --git a/core/trino-main/src/test/java/io/trino/execution/TestCallTask.java b/core/trino-main/src/test/java/io/trino/execution/TestCallTask.java index 683cdf651a5d..d182af5509f7 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestCallTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestCallTask.java @@ -173,6 +173,7 @@ private QueryStateMachine stateMachine(TransactionManager transactionManager, Me createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); } diff --git a/core/trino-main/src/test/java/io/trino/execution/TestCommitTask.java b/core/trino-main/src/test/java/io/trino/execution/TestCommitTask.java index 1e49a2a7150c..5a34d08ed433 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestCommitTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestCommitTask.java @@ -147,6 +147,7 @@ private QueryStateMachine createQueryStateMachine(String query, Session session, createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); } diff --git a/core/trino-main/src/test/java/io/trino/execution/TestCreateCatalogTask.java b/core/trino-main/src/test/java/io/trino/execution/TestCreateCatalogTask.java index fc502fdb3eba..74328a1eaccf 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestCreateCatalogTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestCreateCatalogTask.java @@ -98,6 +98,7 @@ public void setUp() createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); this.queryRunner = queryRunner; @@ -190,6 +191,7 @@ public void testAddOrReplaceCatalogFail() createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); CreateCatalog statement = new CreateCatalog( diff --git a/core/trino-main/src/test/java/io/trino/execution/TestDeallocateTask.java b/core/trino-main/src/test/java/io/trino/execution/TestDeallocateTask.java index b63ab4a9caba..9547212ff04d 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestDeallocateTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestDeallocateTask.java @@ -116,6 +116,7 @@ private Set executeDeallocate(String statementName, String sqlString, Se createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); Deallocate deallocate = new Deallocate(new NodeLocation(1, 1), new Identifier(statementName)); new DeallocateTask().execute(deallocate, stateMachine, emptyList(), WarningCollector.NOOP); diff --git a/core/trino-main/src/test/java/io/trino/execution/TestDistributionSnapshot.java b/core/trino-main/src/test/java/io/trino/execution/TestDistributionSnapshot.java new file mode 100644 index 000000000000..e96d85a03076 --- /dev/null +++ b/core/trino-main/src/test/java/io/trino/execution/TestDistributionSnapshot.java @@ -0,0 +1,54 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.execution; + +import io.airlift.stats.TDigest; +import io.trino.plugin.base.metrics.TDigestHistogram; +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +class TestDistributionSnapshot +{ + @Test + void testConvertFromTDigestHistogram() + { + TDigest digest = new TDigest(); + digest.add(1.0); + digest.add(10.0); + digest.add(10.0); + digest.add(1.0); + digest.add(4.0); + digest.add(5.0); + digest.add(1.0); + digest.add(3.0); + digest.add(7.0); + + TDigestHistogram histogram = new TDigestHistogram(digest); + DistributionSnapshot snapshot = new DistributionSnapshot(histogram); + + assertThat(snapshot.total()).isEqualTo(9); + assertThat(snapshot.min()).isEqualTo(1.0); + assertThat(snapshot.max()).isEqualTo(10.0); + assertThat(snapshot.p01()).isEqualTo(1.0); + assertThat(snapshot.p05()).isEqualTo(1.0); + assertThat(snapshot.p10()).isEqualTo(1.0); + assertThat(snapshot.p25()).isEqualTo(1.0); + assertThat(snapshot.p50()).isEqualTo(4.0); + assertThat(snapshot.p75()).isEqualTo(7.0); + assertThat(snapshot.p90()).isEqualTo(10.0); + assertThat(snapshot.p95()).isEqualTo(10.0); + assertThat(snapshot.p99()).isEqualTo(10.0); + } +} diff --git a/core/trino-main/src/test/java/io/trino/execution/TestDropCatalogTask.java b/core/trino-main/src/test/java/io/trino/execution/TestDropCatalogTask.java index fc84d5a368b6..73dc2065ea0f 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestDropCatalogTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestDropCatalogTask.java @@ -129,6 +129,7 @@ private QueryStateMachine createNewQuery() createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); } } diff --git a/core/trino-main/src/test/java/io/trino/execution/TestPrepareTask.java b/core/trino-main/src/test/java/io/trino/execution/TestPrepareTask.java index 2c1187c65e5a..44ef35de41b8 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestPrepareTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestPrepareTask.java @@ -139,6 +139,7 @@ private Map executePrepare(String statementName, Statement state createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); Prepare prepare = new Prepare(identifier(statementName), statement); new PrepareTask(new SqlParser()).execute(prepare, stateMachine, emptyList(), WarningCollector.NOOP); diff --git a/core/trino-main/src/test/java/io/trino/execution/TestQueryManagerConfig.java b/core/trino-main/src/test/java/io/trino/execution/TestQueryManagerConfig.java index 4582a51ce380..5d6acd9595b7 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestQueryManagerConfig.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestQueryManagerConfig.java @@ -57,19 +57,19 @@ public void testDefaults() .setMaxHashPartitionCount(100) .setMinHashPartitionCount(4) .setMinHashPartitionCountForWrite(50) - .setQueryManagerExecutorPoolSize(5) - .setQueryExecutorPoolSize(1000) - .setMaxStateMachineCallbackThreads(5) - .setMaxSplitManagerCallbackThreads(100) + .setQueryManagerExecutorPoolSize("5") + .setQueryExecutorPoolSize("1000") + .setMaxStateMachineCallbackThreads("5") + .setMaxSplitManagerCallbackThreads("100") .setRemoteTaskMaxErrorDuration(new Duration(1, MINUTES)) - .setRemoteTaskMaxCallbackThreads(1000) + .setRemoteTaskMaxCallbackThreads("1000") .setQueryExecutionPolicy("phased") .setQueryMaxRunTime(new Duration(100, DAYS)) .setQueryMaxExecutionTime(new Duration(100, DAYS)) .setQueryMaxPlanningTime(new Duration(10, MINUTES)) .setQueryMaxCpuTime(new Duration(1_000_000_000, DAYS)) .setQueryReportedRuleStatsLimit(10) - .setDispatcherQueryPoolSize(max(50, Runtime.getRuntime().availableProcessors() * 10)) + .setDispatcherQueryPoolSize(Integer.toString(max(50, Runtime.getRuntime().availableProcessors() * 10))) .setQueryMaxScanPhysicalBytes(null) .setRequiredWorkers(1) .setRequiredWorkersMaxWait(new Duration(5, MINUTES)) @@ -220,19 +220,19 @@ public void testExplicitPropertyMappings() .setMaxHashPartitionCount(16) .setMinHashPartitionCount(2) .setMinHashPartitionCountForWrite(88) - .setQueryManagerExecutorPoolSize(11) - .setQueryExecutorPoolSize(111) - .setMaxStateMachineCallbackThreads(112) - .setMaxSplitManagerCallbackThreads(113) + .setQueryManagerExecutorPoolSize("11") + .setQueryExecutorPoolSize("111") + .setMaxStateMachineCallbackThreads("112") + .setMaxSplitManagerCallbackThreads("113") .setRemoteTaskMaxErrorDuration(new Duration(37, SECONDS)) - .setRemoteTaskMaxCallbackThreads(10) + .setRemoteTaskMaxCallbackThreads("10") .setQueryExecutionPolicy("foo-bar-execution-policy") .setQueryMaxRunTime(new Duration(2, HOURS)) .setQueryMaxExecutionTime(new Duration(3, HOURS)) .setQueryMaxPlanningTime(new Duration(1, HOURS)) .setQueryMaxCpuTime(new Duration(2, DAYS)) .setQueryReportedRuleStatsLimit(50) - .setDispatcherQueryPoolSize(151) + .setDispatcherQueryPoolSize("151") .setQueryMaxScanPhysicalBytes(DataSize.of(1, KILOBYTE)) .setRequiredWorkers(333) .setRequiredWorkersMaxWait(new Duration(33, MINUTES)) diff --git a/core/trino-main/src/test/java/io/trino/execution/TestQueryStateMachine.java b/core/trino-main/src/test/java/io/trino/execution/TestQueryStateMachine.java index eed4fdea1461..a52516ff3315 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestQueryStateMachine.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestQueryStateMachine.java @@ -867,6 +867,7 @@ public QueryStateMachine build() createPlanOptimizersStatsCollector(), QUERY_TYPE, false, + Optional.empty(), new NodeVersion("test")); stateMachine.setInputs(INPUTS); stateMachine.setOutput(OUTPUT); diff --git a/core/trino-main/src/test/java/io/trino/execution/TestResetSessionTask.java b/core/trino-main/src/test/java/io/trino/execution/TestResetSessionTask.java index e145ebd36638..6b1d24ab086a 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestResetSessionTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestResetSessionTask.java @@ -120,6 +120,7 @@ public void test() createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); getFutureValue(new ResetSessionTask(metadata, sessionPropertyManager).execute( diff --git a/core/trino-main/src/test/java/io/trino/execution/TestRoleTasks.java b/core/trino-main/src/test/java/io/trino/execution/TestRoleTasks.java index 310df2570f3e..3114f84c2757 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestRoleTasks.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestRoleTasks.java @@ -176,6 +176,7 @@ protected QueryStateMachine execute(String statement, Data createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); task.execute((T) parser.createStatement(statement), stateMachine, ImmutableList.of(), WarningCollector.NOOP); return stateMachine; diff --git a/core/trino-main/src/test/java/io/trino/execution/TestRollbackTask.java b/core/trino-main/src/test/java/io/trino/execution/TestRollbackTask.java index 8a6f94660185..429c9eb3e868 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestRollbackTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestRollbackTask.java @@ -137,6 +137,7 @@ private QueryStateMachine createQueryStateMachine(String query, Session session, createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); } diff --git a/core/trino-main/src/test/java/io/trino/execution/TestSetPathTask.java b/core/trino-main/src/test/java/io/trino/execution/TestSetPathTask.java index 6bede96b4b63..0a9d6d01fdea 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestSetPathTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestSetPathTask.java @@ -124,6 +124,7 @@ private QueryStateMachine createQueryStateMachine(String query) createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); } diff --git a/core/trino-main/src/test/java/io/trino/execution/TestSetSessionAuthorizationTask.java b/core/trino-main/src/test/java/io/trino/execution/TestSetSessionAuthorizationTask.java index fcc737fa765b..f0319df048d5 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestSetSessionAuthorizationTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestSetSessionAuthorizationTask.java @@ -124,6 +124,7 @@ private QueryStateMachine createStateMachine(Optional transaction createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); return stateMachine; } diff --git a/core/trino-main/src/test/java/io/trino/execution/TestSetSessionTask.java b/core/trino-main/src/test/java/io/trino/execution/TestSetSessionTask.java index 26a66a699510..644c1fb80cae 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestSetSessionTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestSetSessionTask.java @@ -25,6 +25,7 @@ import io.trino.spi.TrinoException; import io.trino.spi.resourcegroups.ResourceGroupId; import io.trino.sql.PlannerContext; +import io.trino.sql.SqlEnvironmentConfig; import io.trino.sql.tree.Expression; import io.trino.sql.tree.FunctionCall; import io.trino.sql.tree.LongLiteral; @@ -208,8 +209,9 @@ private void testSetSessionWithParameters(String property, Expression expression createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); - getFutureValue(new SetSessionTask(plannerContext, accessControl, sessionPropertyManager).execute(new SetSession(new NodeLocation(1, 1), qualifiedPropName, expression), stateMachine, parameters, WarningCollector.NOOP)); + getFutureValue(new SetSessionTask(new SessionPropertyEvaluator(plannerContext, accessControl, sessionPropertyManager, new SqlEnvironmentConfig()), accessControl).execute(new SetSession(new NodeLocation(1, 1), qualifiedPropName, expression), stateMachine, parameters, WarningCollector.NOOP)); Map sessionProperties = stateMachine.getSetSessionProperties(); assertThat(sessionProperties).isEqualTo(ImmutableMap.of(qualifiedPropName.toString(), expectedValue)); diff --git a/core/trino-main/src/test/java/io/trino/execution/TestSetTimeZoneTask.java b/core/trino-main/src/test/java/io/trino/execution/TestSetTimeZoneTask.java index 726cfa443655..c5401a5d9985 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestSetTimeZoneTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestSetTimeZoneTask.java @@ -266,6 +266,7 @@ private QueryStateMachine createQueryStateMachine(String query) createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); } diff --git a/core/trino-main/src/test/java/io/trino/execution/TestStartTransactionTask.java b/core/trino-main/src/test/java/io/trino/execution/TestStartTransactionTask.java index e6cd335f0f0e..415d259e0b36 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestStartTransactionTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestStartTransactionTask.java @@ -268,6 +268,7 @@ private QueryStateMachine createQueryStateMachine(String query, Session session, createPlanOptimizersStatsCollector(), Optional.empty(), true, + Optional.empty(), new NodeVersion("test")); } diff --git a/core/trino-main/src/test/java/io/trino/execution/TestTaskManagerConfig.java b/core/trino-main/src/test/java/io/trino/execution/TestTaskManagerConfig.java index 184814cf4a5c..22913a617256 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestTaskManagerConfig.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestTaskManagerConfig.java @@ -66,11 +66,11 @@ public void testDefaults() .setMinWriterCount(1) .setMaxWriterCount(DEFAULT_MAX_WRITER_COUNT) .setTaskConcurrency(DEFAULT_PROCESSOR_COUNT) - .setHttpResponseThreads(100) - .setHttpTimeoutThreads(3) - .setTaskNotificationThreads(5) - .setTaskYieldThreads(3) - .setDriverTimeoutThreads(5) + .setHttpResponseThreads("100") + .setHttpTimeoutThreads("3") + .setTaskNotificationThreads("5") + .setTaskYieldThreads("3") + .setDriverTimeoutThreads("5") .setLevelTimeMultiplier(new BigDecimal("2")) .setStatisticsCpuTimerEnabled(true) .setInterruptStuckSplitTasksEnabled(true) @@ -153,11 +153,11 @@ public void testExplicitPropertyMappings() .setMinWriterCount(4) .setMaxWriterCount(maxWriterCount) .setTaskConcurrency(processorCount) - .setHttpResponseThreads(4) - .setHttpTimeoutThreads(10) - .setTaskNotificationThreads(13) - .setTaskYieldThreads(8) - .setDriverTimeoutThreads(10) + .setHttpResponseThreads("4") + .setHttpTimeoutThreads("10") + .setTaskNotificationThreads("13") + .setTaskYieldThreads("8") + .setDriverTimeoutThreads("10") .setLevelTimeMultiplier(new BigDecimal("2.1")) .setStatisticsCpuTimerEnabled(false) .setInterruptStuckSplitTasksEnabled(false) diff --git a/core/trino-main/src/test/java/io/trino/operator/TestDirectExchangeClientConfig.java b/core/trino-main/src/test/java/io/trino/operator/TestDirectExchangeClientConfig.java index 49684828d156..0c007be5aa1f 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestDirectExchangeClientConfig.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestDirectExchangeClientConfig.java @@ -37,8 +37,8 @@ public void testDefaults() .setConcurrentRequestMultiplier(3) .setMaxErrorDuration(new Duration(1, TimeUnit.MINUTES)) .setMaxResponseSize(new HttpClientConfig().getMaxContentLength()) - .setPageBufferClientMaxCallbackThreads(25) - .setClientThreads(25) + .setPageBufferClientMaxCallbackThreads("25") + .setClientThreads("25") .setAcknowledgePages(true) .setDeduplicationBufferSize(DataSize.of(32, Unit.MEGABYTE))); } @@ -62,8 +62,8 @@ public void testExplicitPropertyMappings() .setConcurrentRequestMultiplier(13) .setMaxErrorDuration(new Duration(33, TimeUnit.SECONDS)) .setMaxResponseSize(DataSize.of(1, Unit.MEGABYTE)) - .setClientThreads(2) - .setPageBufferClientMaxCallbackThreads(16) + .setClientThreads("2") + .setPageBufferClientMaxCallbackThreads("16") .setAcknowledgePages(false) .setDeduplicationBufferSize(DataSize.of(2, Unit.MEGABYTE)); diff --git a/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryDataSerialization.java b/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryDataSerialization.java index 980c9844652a..6f79b1304206 100644 --- a/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryDataSerialization.java +++ b/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryDataSerialization.java @@ -16,15 +16,16 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import io.airlift.json.JsonCodec; import io.airlift.json.JsonCodecFactory; import io.airlift.json.ObjectMapperProvider; import io.trino.client.ClientTypeSignature; import io.trino.client.Column; -import io.trino.client.JsonCodec; import io.trino.client.QueryData; import io.trino.client.QueryResults; import io.trino.client.ResultRowsDecoder; import io.trino.client.StatementStats; +import io.trino.client.TrinoJsonCodec; import io.trino.client.TypedQueryData; import io.trino.client.spooling.DataAttributes; import io.trino.client.spooling.EncodedQueryData; @@ -38,7 +39,7 @@ import java.util.Set; import static io.trino.client.ClientStandardTypes.BIGINT; -import static io.trino.client.JsonCodec.jsonCodec; +import static io.trino.client.TrinoJsonCodec.jsonCodec; import static io.trino.client.spooling.DataAttribute.ROWS_COUNT; import static io.trino.client.spooling.DataAttribute.ROW_OFFSET; import static io.trino.client.spooling.DataAttribute.SCHEMA; @@ -54,8 +55,8 @@ public class TestQueryDataSerialization { private static final List COLUMNS_LIST = ImmutableList.of(new Column("_col0", "bigint", new ClientTypeSignature("bigint"))); - private static final JsonCodec CLIENT_CODEC = jsonCodec(QueryResults.class); - private static final io.airlift.json.JsonCodec SERVER_CODEC = new JsonCodecFactory(new ObjectMapperProvider() + private static final TrinoJsonCodec CLIENT_CODEC = jsonCodec(QueryResults.class); + private static final JsonCodec SERVER_CODEC = new JsonCodecFactory(new ObjectMapperProvider() .withModules(Set.of(new QueryDataJacksonModule()))) .jsonCodec(QueryResults.class); diff --git a/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryResultsSerialization.java b/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryResultsSerialization.java index badc453a165e..85f49a066fe2 100644 --- a/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryResultsSerialization.java +++ b/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryResultsSerialization.java @@ -15,15 +15,16 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.google.common.collect.ImmutableList; +import io.airlift.json.JsonCodec; import io.airlift.json.JsonCodecFactory; import io.airlift.json.ObjectMapperProvider; import io.trino.client.ClientTypeSignature; import io.trino.client.Column; -import io.trino.client.JsonCodec; import io.trino.client.QueryData; import io.trino.client.QueryResults; import io.trino.client.ResultRowsDecoder; import io.trino.client.StatementStats; +import io.trino.client.TrinoJsonCodec; import io.trino.client.TypedQueryData; import io.trino.server.protocol.spooling.QueryDataJacksonModule; import org.junit.jupiter.api.Test; @@ -35,7 +36,7 @@ import java.util.Set; import static io.trino.client.ClientStandardTypes.BIGINT; -import static io.trino.client.JsonCodec.jsonCodec; +import static io.trino.client.TrinoJsonCodec.jsonCodec; import static java.lang.String.format; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; @@ -45,11 +46,11 @@ public class TestQueryResultsSerialization private static final List COLUMNS = ImmutableList.of(new Column("_col0", BIGINT, new ClientTypeSignature("bigint"))); // As close as possible to the server mapper (client mapper differs) - private static final io.airlift.json.JsonCodec SERVER_CODEC = new JsonCodecFactory(new ObjectMapperProvider() + private static final JsonCodec SERVER_CODEC = new JsonCodecFactory(new ObjectMapperProvider() .withModules(Set.of(new QueryDataJacksonModule()))) .jsonCodec(QueryResults.class); - private static final JsonCodec CLIENT_CODEC = jsonCodec(QueryResults.class); + private static final TrinoJsonCodec CLIENT_CODEC = jsonCodec(QueryResults.class); @Test public void testNullDataSerialization() diff --git a/core/trino-main/src/test/java/io/trino/server/ui/PreviewUiQueryRunner.java b/core/trino-main/src/test/java/io/trino/server/ui/PreviewUiQueryRunner.java deleted file mode 100644 index 8ec87f18d096..000000000000 --- a/core/trino-main/src/test/java/io/trino/server/ui/PreviewUiQueryRunner.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.server.ui; - -import io.airlift.log.Logger; -import io.trino.server.testing.TestingTrinoServer; -import io.trino.testing.StandaloneQueryRunner; - -import static io.trino.testing.TestingHandles.TEST_CATALOG_NAME; -import static io.trino.testing.TestingSession.testSessionBuilder; - -public class PreviewUiQueryRunner -{ - private PreviewUiQueryRunner() {} - - public static void main(String[] args) - { - StandaloneQueryRunner queryRunner = new StandaloneQueryRunner(testSessionBuilder() - .setCatalog(TEST_CATALOG_NAME) - .setSchema("tiny") - .build(), PreviewUiQueryRunner::configureTrinoServer); - - Logger log = Logger.get(PreviewUiQueryRunner.class); - log.info("======== SERVER STARTED ========"); - log.info("\n====\n%s\n====", queryRunner.getCoordinator().getBaseUrl()); - } - - private static void configureTrinoServer(TestingTrinoServer.Builder builder) - { - builder.addProperty("web-ui.preview.enabled", "true"); - builder.addProperty("http-server.http.port", "8080"); - builder.addProperty("web-ui.authentication.type", "fixed"); - builder.addProperty("web-ui.user", "webapp-preview-user"); - } -} diff --git a/core/trino-main/src/test/java/io/trino/spiller/TestGenericPartitioningSpiller.java b/core/trino-main/src/test/java/io/trino/spiller/TestGenericPartitioningSpiller.java index 7523170eeeac..09117353f94b 100644 --- a/core/trino-main/src/test/java/io/trino/spiller/TestGenericPartitioningSpiller.java +++ b/core/trino-main/src/test/java/io/trino/spiller/TestGenericPartitioningSpiller.java @@ -79,7 +79,7 @@ public void setUp() tempDirectory = createTempDirectory(getClass().getSimpleName()); FeaturesConfig featuresConfig = new FeaturesConfig(); featuresConfig.setSpillerSpillPaths(ImmutableList.of(tempDirectory.toString())); - featuresConfig.setSpillerThreads(8); + featuresConfig.setSpillerThreads("8"); featuresConfig.setSpillMaxUsedSpaceThreshold(1.0); SingleStreamSpillerFactory singleStreamSpillerFactory = new FileSingleStreamSpillerFactory( new TestingBlockEncodingSerde(), diff --git a/core/trino-main/src/test/java/io/trino/sql/analyzer/TestFeaturesConfig.java b/core/trino-main/src/test/java/io/trino/sql/analyzer/TestFeaturesConfig.java index ca8b7134d1cb..b385ed3fb411 100644 --- a/core/trino-main/src/test/java/io/trino/sql/analyzer/TestFeaturesConfig.java +++ b/core/trino-main/src/test/java/io/trino/sql/analyzer/TestFeaturesConfig.java @@ -49,7 +49,7 @@ public void testDefaults() .setSpillEnabled(false) .setAggregationOperatorUnspillMemoryLimit(DataSize.valueOf("4MB")) .setSpillerSpillPaths(ImmutableList.of()) - .setSpillerThreads(4) + .setSpillerThreads("4") .setSpillMaxUsedSpaceThreshold(0.9) .setMemoryRevokingThreshold(0.9) .setMemoryRevokingTarget(0.5) @@ -114,7 +114,7 @@ public void testExplicitPropertyMappings() .setSpillEnabled(true) .setAggregationOperatorUnspillMemoryLimit(DataSize.valueOf("100MB")) .setSpillerSpillPaths(ImmutableList.of("/tmp/custom/spill/path1", "/tmp/custom/spill/path2")) - .setSpillerThreads(42) + .setSpillerThreads("42") .setSpillMaxUsedSpaceThreshold(0.8) .setMemoryRevokingThreshold(0.2) .setMemoryRevokingTarget(0.8) diff --git a/core/trino-main/src/test/java/io/trino/sql/query/TestInlineFunctions.java b/core/trino-main/src/test/java/io/trino/sql/query/TestInlineFunctions.java index f0d0f1160ddb..fc001354a883 100644 --- a/core/trino-main/src/test/java/io/trino/sql/query/TestInlineFunctions.java +++ b/core/trino-main/src/test/java/io/trino/sql/query/TestInlineFunctions.java @@ -276,6 +276,160 @@ WITH FUNCTION twice(x bigint) .hasMessage("line 1:6: Invalid function 'twice': Invalid definition: oops"); } + @Test + public void testInlineSqlFunctions() + { + assertThat(assertions.query( + """ + WITH FUNCTION abc(x integer) RETURNS integer RETURN x * 2 + SELECT abc(21) + """)) + .matches("VALUES 42"); + assertThat(assertions.query( + """ + WITH FUNCTION abc(x integer) RETURNS integer RETURN abs(x) + SELECT abc(-21) + """)) + .matches("VALUES 21"); + + assertThat(assertions.query( + """ + WITH + FUNCTION abc(x integer) RETURNS integer RETURN x * 2, + FUNCTION xyz(x integer) RETURNS integer RETURN abc(x) + 1 + SELECT xyz(21) + """)) + .matches("VALUES 43"); + + assertThat(assertions.query( + """ + WITH + FUNCTION my_pow(n int, p int) + RETURNS int + BEGIN + DECLARE r int DEFAULT n; + top: LOOP + IF p <= 1 THEN + LEAVE top; + END IF; + SET r = r * n; + SET p = p - 1; + END LOOP; + RETURN r; + END + SELECT my_pow(2, 8) + """)) + .matches("VALUES 256"); + + assertThat(assertions.query( + """ + WITH + FUNCTION fun_with_uppercase_var() + RETURNS int + BEGIN + DECLARE R int DEFAULT 7; + RETURN R; + END + SELECT fun_with_uppercase_var() + """)) + .matches("VALUES 7"); + + // invoke function on data from connector to prevent constant folding on the coordinator + assertThat(assertions.query( + """ + WITH + FUNCTION my_pow(n int, p int) + RETURNS int + BEGIN + DECLARE r int DEFAULT n; + top: LOOP + IF p <= 1 THEN + LEAVE top; + END IF; + SET r = r * n; + SET p = p - 1; + END LOOP; + RETURN r; + END + SELECT my_pow(CAST(nationkey AS integer), CAST(regionkey AS integer)) FROM nation WHERE nationkey IN (1,2,3,5,8) + """)) + .matches("VALUES 1, 2, 3, 5, 64"); + + // function with dereference + assertThat(assertions.query( + """ + WITH FUNCTION get(input row(varchar)) + RETURNS varchar + RETURN input[1] + SELECT get(ROW('abc')) + """)) + .matches("VALUES VARCHAR 'abc'"); + + // validations for inline functions + assertThat(assertions.query("WITH FUNCTION a.b() RETURNS int RETURN 42 SELECT a.b()")) + .failure() + .hasMessageContaining("line 1:6: Inline function names cannot be qualified: a.b"); + + assertThat(assertions.query("WITH FUNCTION x() RETURNS int SECURITY INVOKER RETURN 42 SELECT x()")) + .failure() + .hasMessageContaining("line 1:31: Security mode not supported for inline functions"); + + assertThat(assertions.query("WITH FUNCTION x() RETURNS bigint SECURITY DEFINER RETURN 42 SELECT x()")) + .failure() + .hasMessageContaining("line 1:34: Security mode not supported for inline functions"); + + // error location reporting + assertThat(assertions.query("WITH function x() RETURNS bigint DETERMINISTIC DETERMINISTIC RETURN 42 SELECT x()")) + .failure() + .hasMessageContaining("line 1:48: Multiple deterministic clauses specified"); + + // Verify the current restrictions on inline functions are enforced + + // inline function can mask a global function + assertThat(assertions.query( + """ + WITH FUNCTION abs(x integer) RETURNS integer RETURN x * 2 + SELECT abs(-10) + """)) + .matches("VALUES -20"); + assertThat(assertions.query( + """ + WITH + FUNCTION abs(x integer) RETURNS integer RETURN x * 2, + FUNCTION wrap_abs(x integer) RETURNS integer RETURN abs(x) + SELECT wrap_abs(-10) + """)) + .matches("VALUES -20"); + + // inline function can have the same name as a global function with a different signature + assertThat(assertions.query( + """ + WITH FUNCTION abs(x varchar) RETURNS varchar RETURN reverse(x) + SELECT abs('abc') + """)) + .skippingTypesCheck() + .matches("VALUES 'cba'"); + + // inline functions must be declared before they are used + assertThat(assertions.query( + """ + WITH + FUNCTION a(x integer) RETURNS integer RETURN b(x), + FUNCTION b(x integer) RETURNS integer RETURN x * 2 + SELECT a(10) + """)) + .failure().hasMessage("line 2:48: Function 'b' not registered"); + + // inline function cannot be recursive + // note: mutual recursion is not supported either, but it is not tested due to the forward declaration limitation above + assertThat(assertions.query( + """ + WITH FUNCTION a(x integer) RETURNS integer RETURN a(x) + SELECT a(10) + """)) + .failure().hasMessage("line 1:6: Recursive language functions are not supported: a(integer):integer"); + } + public static class TestingLanguageEnginePlugin implements Plugin { diff --git a/core/trino-main/src/test/java/io/trino/sql/query/TestInlineSession.java b/core/trino-main/src/test/java/io/trino/sql/query/TestInlineSession.java new file mode 100644 index 000000000000..21b5e90d039b --- /dev/null +++ b/core/trino-main/src/test/java/io/trino/sql/query/TestInlineSession.java @@ -0,0 +1,167 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.sql.query; + +import com.google.common.collect.ImmutableMap; +import io.trino.Session; +import io.trino.connector.MockConnectorFactory; +import io.trino.connector.MockConnectorPlugin; +import io.trino.plugin.tpch.TpchPlugin; +import io.trino.testing.QueryRunner; +import io.trino.testing.StandaloneQueryRunner; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.parallel.Execution; + +import java.util.Map; + +import static io.trino.plugin.tpch.TpchMetadata.TINY_SCHEMA_NAME; +import static io.trino.spi.StandardErrorCode.INVALID_SESSION_PROPERTY; +import static io.trino.spi.session.PropertyMetadata.stringProperty; +import static io.trino.testing.TestingHandles.TEST_CATALOG_NAME; +import static io.trino.testing.TestingSession.testSessionBuilder; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; +import static org.junit.jupiter.api.parallel.ExecutionMode.CONCURRENT; + +@TestInstance(PER_CLASS) +@Execution(CONCURRENT) +public class TestInlineSession +{ + private static final String MOCK_CATALOG = "mock"; + + private final QueryAssertions assertions; + + public TestInlineSession() + { + Session session = testSessionBuilder() + .setCatalog(TEST_CATALOG_NAME) + .setSchema(TINY_SCHEMA_NAME) + .build(); + + QueryRunner runner = new StandaloneQueryRunner(session); + MockConnectorPlugin mockConnectorPlugin = new MockConnectorPlugin(MockConnectorFactory.builder() + .withSessionProperty(stringProperty("catalog_property", "Test catalog property", "", false)) + .build()); + + runner.installPlugin(mockConnectorPlugin); + runner.createCatalog(MOCK_CATALOG, "mock", ImmutableMap.of()); + runner.installPlugin(new TpchPlugin()); + runner.createCatalog(TEST_CATALOG_NAME, "tpch", ImmutableMap.of("tpch.splits-per-node", "1")); + runner.installPlugin(new TestInlineFunctions.TestingLanguageEnginePlugin()); + + assertions = new QueryAssertions(runner); + } + + @AfterAll + public void teardown() + { + assertions.close(); + } + + @Test + public void testInlineSession() + { + assertThat(assertions.query("WITH SESSION time_zone_id = 'Europe/Wonderland' SELECT current_timezone()")) + .failure() + .hasMessageContaining("Time zone not supported: Europe/Wonderland"); + + assertThat(assertions.query("WITH SESSION time_zone_id = 'Europe/Warsaw' SELECT current_timezone()")) + .matches("VALUES CAST('Europe/Warsaw' AS varchar)"); + + Session session = assertions.sessionBuilder() + .setSystemProperty("time_zone_id", "America/Los_Angeles") + .build(); + + assertThat(assertions.query(session, "WITH SESSION time_zone_id = 'Europe/Warsaw' SELECT current_timezone()")) + .matches("VALUES CAST('Europe/Warsaw' AS varchar)"); + } + + @Test + public void testInlineSessionAndSqlFunctions() + { + assertThat(assertions.query(""" + WITH + SESSION time_zone_id = 'Europe/Warsaw' + WITH + FUNCTION foo() RETURNS varchar RETURN current_timezone() + SELECT foo() + """)) + .matches("VALUES CAST('Europe/Warsaw' AS varchar)"); + } + + @Test + void testValidSystemSessionProperties() + { + assertThat(assertions.execute("WITH SESSION query_max_total_memory = '10GB' SELECT 1").getSession().getSystemProperties()) + .isEqualTo(Map.of("query_max_total_memory", "10GB")); + + assertThat(assertions.execute("WITH SESSION query_max_total_memory = CAST('10GB' AS VARCHAR) SELECT 1").getSession().getSystemProperties()) + .isEqualTo(Map.of("query_max_total_memory", "10GB")); + } + + @Test + void testValidCatalogSessionProperty() + { + assertThat(assertions.execute("WITH SESSION mock.catalog_property = 'true' SELECT 1").getSession().getCatalogProperties("mock")) + .isEqualTo(Map.of("catalog_property", "true")); + + assertThat(assertions.execute("WITH SESSION mock.catalog_property = CAST(true AS varchar) SELECT 1").getSession().getCatalogProperties("mock")) + .isEqualTo(Map.of("catalog_property", "true")); + } + + @Test + void testInvalidSessionProperty() + { + assertThat(assertions.query("WITH SESSION test.schema.invalid_key = 'invalid_value' SELECT 1")) + .failure() + .hasMessageContaining("line 1:14: Invalid session property 'test.schema.invalid_key'"); + } + + @Test + void testInvalidSystemSessionProperties() + { + assertThat(assertions.query("WITH SESSION invalid_key = 'invalid_value' SELECT 1")) + .failure() + .hasErrorCode(INVALID_SESSION_PROPERTY) + .hasMessageContaining("line 1:28: Session property invalid_key does not exist"); + + assertThat(assertions.query("WITH SESSION query_max_total_memory = 'invalid_value' SELECT 1")) + .failure() + .hasErrorCode(INVALID_SESSION_PROPERTY) + .hasMessageContaining("line 1:39: size is not a valid data size string: invalid_value"); + + assertThat(assertions.query("WITH SESSION query_max_total_memory = '10GB', query_max_total_memory = '16GB' SELECT 1")) + .failure() + .hasErrorCode(INVALID_SESSION_PROPERTY) + .hasMessageContaining("line 1:47: Session property query_max_total_memory already set"); + } + + @Test + void testInvalidCatalogSessionProperties() + { + assertThat(assertions.query("WITH SESSION mock.invalid_key = 'invalid_value' SELECT 1")) + .failure() + .hasMessageContaining("line 1:33: Session property mock.invalid_key does not exist"); + + assertThat(assertions.query("WITH SESSION mock.catalog_property = true SELECT 1")) + .failure() + .hasMessageContaining("Unable to set session property 'mock.catalog_property' to 'true': Cannot cast type boolean to varchar"); + + assertThat(assertions.query("WITH SESSION mock.catalog_property = 'true', mock.catalog_property = 'false' SELECT 1")) + .failure() + .hasMessageContaining("line 1:46: Session property mock.catalog_property already set"); + } +} diff --git a/core/trino-parser/pom.xml b/core/trino-parser/pom.xml index 860e84fa9b1a..bc2ad96b9465 100644 --- a/core/trino-parser/pom.xml +++ b/core/trino-parser/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/core/trino-parser/src/main/java/io/trino/sql/QueryUtil.java b/core/trino-parser/src/main/java/io/trino/sql/QueryUtil.java index 2e8512a44f2a..b31276c42e0e 100644 --- a/core/trino-parser/src/main/java/io/trino/sql/QueryUtil.java +++ b/core/trino-parser/src/main/java/io/trino/sql/QueryUtil.java @@ -263,6 +263,7 @@ public static Query singleValueQuery(String columnName, boolean value) public static Query query(QueryBody body) { return new Query( + ImmutableList.of(), ImmutableList.of(), Optional.empty(), body, diff --git a/core/trino-parser/src/main/java/io/trino/sql/SqlFormatter.java b/core/trino-parser/src/main/java/io/trino/sql/SqlFormatter.java index 63c198cf3ab1..4b860d5eb9c2 100644 --- a/core/trino-parser/src/main/java/io/trino/sql/SqlFormatter.java +++ b/core/trino-parser/src/main/java/io/trino/sql/SqlFormatter.java @@ -139,6 +139,7 @@ import io.trino.sql.tree.SecurityCharacteristic; import io.trino.sql.tree.Select; import io.trino.sql.tree.SelectItem; +import io.trino.sql.tree.SessionProperty; import io.trino.sql.tree.SetColumnType; import io.trino.sql.tree.SetPath; import io.trino.sql.tree.SetProperties; @@ -643,6 +644,17 @@ protected Void visitDescribeInput(DescribeInput node, Integer indent) @Override protected Void visitQuery(Query node, Integer indent) { + if (!node.getSessionProperties().isEmpty()) { + builder.append("WITH SESSION\n"); + Iterator sessionProperties = node.getSessionProperties().iterator(); + while (sessionProperties.hasNext()) { + process(sessionProperties.next(), indent + 1); + if (sessionProperties.hasNext()) { + builder.append(','); + } + builder.append('\n'); + } + } if (!node.getFunctions().isEmpty()) { builder.append("WITH\n"); Iterator functions = node.getFunctions().iterator(); @@ -2315,6 +2327,15 @@ protected Void visitFunctionSpecification(FunctionSpecification node, Integer in return null; } + @Override + protected Void visitSessionProperty(SessionProperty node, Integer indent) + { + append(indent, formatName(node.getName())) + .append(" = ") + .append(formatExpression(node.getValue())); + return null; + } + @Override protected Void visitParameterDeclaration(ParameterDeclaration node, Integer indent) { diff --git a/core/trino-parser/src/main/java/io/trino/sql/parser/AstBuilder.java b/core/trino-parser/src/main/java/io/trino/sql/parser/AstBuilder.java index 1095a9c728cd..15f8d6dfa1ea 100644 --- a/core/trino-parser/src/main/java/io/trino/sql/parser/AstBuilder.java +++ b/core/trino-parser/src/main/java/io/trino/sql/parser/AstBuilder.java @@ -240,6 +240,7 @@ import io.trino.sql.tree.SecurityCharacteristic; import io.trino.sql.tree.Select; import io.trino.sql.tree.SelectItem; +import io.trino.sql.tree.SessionProperty; import io.trino.sql.tree.SetColumnType; import io.trino.sql.tree.SetPath; import io.trino.sql.tree.SetProperties; @@ -1120,8 +1121,10 @@ public Node visitRootQuery(SqlBaseParser.RootQueryContext context) return new Query( getLocation(context), - Optional.ofNullable(context.withFunction()) - .map(SqlBaseParser.WithFunctionContext::functionSpecification) + Optional.ofNullable(context.sessionProperty()) + .map(contexts -> visit(contexts, SessionProperty.class)) + .orElseGet(ImmutableList::of), + Optional.ofNullable(context.functionSpecification()) .map(contexts -> visit(contexts, FunctionSpecification.class)) .orElseGet(ImmutableList::of), query.getWith(), @@ -1139,6 +1142,7 @@ public Node visitQuery(SqlBaseParser.QueryContext context) return new Query( getLocation(context), ImmutableList.of(), + ImmutableList.of(), visitIfPresent(context.with(), With.class), body.getQueryBody(), body.getOrderBy(), @@ -1234,6 +1238,7 @@ else if (context.limit.rowCount().INTEGER_VALUE() != null) { return new Query( getLocation(context), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( getLocation(context), @@ -1254,6 +1259,7 @@ else if (context.limit.rowCount().INTEGER_VALUE() != null) { return new Query( getLocation(context), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), term, orderBy, @@ -3773,6 +3779,15 @@ public Node visitFunctionDefinition(SqlBaseParser.FunctionDefinitionContext cont return new StringLiteral(getLocation(context), value); } + @Override + public Node visitSessionProperty(SqlBaseParser.SessionPropertyContext context) + { + return new SessionProperty( + getLocation(context), + getQualifiedName(context.qualifiedName()), + (Expression) visit(context.expression())); + } + @Override public Node visitParameterDeclaration(SqlBaseParser.ParameterDeclarationContext context) { diff --git a/core/trino-parser/src/main/java/io/trino/sql/tree/AstVisitor.java b/core/trino-parser/src/main/java/io/trino/sql/tree/AstVisitor.java index 9165b3480b45..1da84c288c6f 100644 --- a/core/trino-parser/src/main/java/io/trino/sql/tree/AstVisitor.java +++ b/core/trino-parser/src/main/java/io/trino/sql/tree/AstVisitor.java @@ -1237,6 +1237,11 @@ protected R visitFunctionSpecification(FunctionSpecification node, C context) return visitNode(node, context); } + protected R visitSessionProperty(SessionProperty node, C context) + { + return visitNode(node, context); + } + protected R visitParameterDeclaration(ParameterDeclaration node, C context) { return visitNode(node, context); diff --git a/core/trino-parser/src/main/java/io/trino/sql/tree/Query.java b/core/trino-parser/src/main/java/io/trino/sql/tree/Query.java index a803ee25f3e3..87efcce2bfef 100644 --- a/core/trino-parser/src/main/java/io/trino/sql/tree/Query.java +++ b/core/trino-parser/src/main/java/io/trino/sql/tree/Query.java @@ -26,6 +26,7 @@ public class Query extends Statement { + private final List sessionProperties; private final List functions; private final Optional with; private final QueryBody queryBody; @@ -35,6 +36,7 @@ public class Query @Deprecated public Query( + List sessionProperties, List functions, Optional with, QueryBody queryBody, @@ -42,11 +44,12 @@ public Query( Optional offset, Optional limit) { - this(Optional.empty(), functions, with, queryBody, orderBy, offset, limit); + this(Optional.empty(), sessionProperties, functions, with, queryBody, orderBy, offset, limit); } public Query( NodeLocation location, + List sessionProperties, List functions, Optional with, QueryBody queryBody, @@ -54,11 +57,12 @@ public Query( Optional offset, Optional limit) { - this(Optional.of(location), functions, with, queryBody, orderBy, offset, limit); + this(Optional.of(location), sessionProperties, functions, with, queryBody, orderBy, offset, limit); } private Query( Optional location, + List sessionProperties, List functions, Optional with, QueryBody queryBody, @@ -67,7 +71,8 @@ private Query( Optional limit) { super(location); - requireNonNull(functions, "function si snull"); + requireNonNull(sessionProperties, "sessionProperties is null"); + requireNonNull(functions, "functions is null"); requireNonNull(with, "with is null"); requireNonNull(queryBody, "queryBody is null"); requireNonNull(orderBy, "orderBy is null"); @@ -75,6 +80,7 @@ private Query( requireNonNull(limit, "limit is null"); checkArgument(!limit.isPresent() || limit.get() instanceof FetchFirst || limit.get() instanceof Limit, "limit must be optional of either FetchFirst or Limit type"); + this.sessionProperties = ImmutableList.copyOf(sessionProperties); this.functions = ImmutableList.copyOf(functions); this.with = with; this.queryBody = queryBody; @@ -83,6 +89,11 @@ private Query( this.limit = limit; } + public List getSessionProperties() + { + return sessionProperties; + } + public List getFunctions() { return functions; @@ -124,6 +135,7 @@ public List getChildren() { ImmutableList.Builder nodes = ImmutableList.builder(); nodes.addAll(functions); + nodes.addAll(sessionProperties); with.ifPresent(nodes::add); nodes.add(queryBody); orderBy.ifPresent(nodes::add); @@ -136,6 +148,7 @@ public List getChildren() public String toString() { return toStringHelper(this) + .add("sessionProperties", sessionProperties.isEmpty() ? null : sessionProperties) .add("functions", functions.isEmpty() ? null : functions) .add("with", with.orElse(null)) .add("queryBody", queryBody) @@ -156,7 +169,8 @@ public boolean equals(Object obj) return false; } Query o = (Query) obj; - return Objects.equals(functions, o.functions) && + return Objects.equals(sessionProperties, o.sessionProperties) && + Objects.equals(functions, o.functions) && Objects.equals(with, o.with) && Objects.equals(queryBody, o.queryBody) && Objects.equals(orderBy, o.orderBy) && @@ -167,7 +181,7 @@ public boolean equals(Object obj) @Override public int hashCode() { - return Objects.hash(functions, with, queryBody, orderBy, offset, limit); + return Objects.hash(sessionProperties, functions, with, queryBody, orderBy, offset, limit); } @Override diff --git a/core/trino-parser/src/main/java/io/trino/sql/tree/SessionProperty.java b/core/trino-parser/src/main/java/io/trino/sql/tree/SessionProperty.java new file mode 100644 index 000000000000..773106e25b83 --- /dev/null +++ b/core/trino-parser/src/main/java/io/trino/sql/tree/SessionProperty.java @@ -0,0 +1,81 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.sql.tree; + +import com.google.common.collect.ImmutableList; + +import java.util.List; +import java.util.Objects; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class SessionProperty + extends Node +{ + private final QualifiedName name; + private final Expression value; + + public SessionProperty(NodeLocation location, QualifiedName name, Expression value) + { + super(location); + this.name = requireNonNull(name, "name is null"); + this.value = requireNonNull(value, "value is null"); + } + + public QualifiedName getName() + { + return name; + } + + public Expression getValue() + { + return value; + } + + @Override + public List getChildren() + { + return ImmutableList.of(value); + } + + @Override + public R accept(AstVisitor visitor, C context) + { + return visitor.visitSessionProperty(this, context); + } + + @Override + public int hashCode() + { + return Objects.hash(name, value); + } + + @Override + public boolean equals(Object obj) + { + return (obj instanceof SessionProperty other) && + Objects.equals(name, other.name) && + Objects.equals(value, other.value); + } + + @Override + public String toString() + { + return toStringHelper(this) + .add("name", name) + .add("value", value) + .toString(); + } +} diff --git a/core/trino-parser/src/test/java/io/trino/sql/parser/TestSqlParser.java b/core/trino-parser/src/test/java/io/trino/sql/parser/TestSqlParser.java index 0ee994342e56..b3c35ef28ab0 100644 --- a/core/trino-parser/src/test/java/io/trino/sql/parser/TestSqlParser.java +++ b/core/trino-parser/src/test/java/io/trino/sql/parser/TestSqlParser.java @@ -79,6 +79,7 @@ import io.trino.sql.tree.FrameBound; import io.trino.sql.tree.FunctionCall; import io.trino.sql.tree.FunctionCall.NullTreatment; +import io.trino.sql.tree.FunctionSpecification; import io.trino.sql.tree.GenericDataType; import io.trino.sql.tree.GenericLiteral; import io.trino.sql.tree.Grant; @@ -165,6 +166,8 @@ import io.trino.sql.tree.RenameView; import io.trino.sql.tree.ResetSession; import io.trino.sql.tree.ResetSessionAuthorization; +import io.trino.sql.tree.ReturnStatement; +import io.trino.sql.tree.ReturnsClause; import io.trino.sql.tree.Revoke; import io.trino.sql.tree.RevokeRoles; import io.trino.sql.tree.Rollback; @@ -172,6 +175,7 @@ import io.trino.sql.tree.SearchedCaseExpression; import io.trino.sql.tree.Select; import io.trino.sql.tree.SelectItem; +import io.trino.sql.tree.SessionProperty; import io.trino.sql.tree.SetColumnType; import io.trino.sql.tree.SetPath; import io.trino.sql.tree.SetProperties; @@ -2175,6 +2179,7 @@ public void testCreateTableAsSelect() .isEqualTo(new CreateTableAsSelect(location(1, 1), qualifiedName(location(1, 14), "foo"), new Query( location(1, 21), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 21), @@ -2200,6 +2205,7 @@ public void testCreateTableAsSelect() .isEqualTo(new CreateTableAsSelect(location(1, 1), qualifiedName(location(1, 14), "foo"), new Query( location(1, 24), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 24), @@ -2225,6 +2231,7 @@ public void testCreateTableAsSelect() .isEqualTo(new CreateTableAsSelect(location(1, 1), qualifiedName(location(1, 14), "foo"), new Query( location(1, 26), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 26), @@ -2254,6 +2261,7 @@ public void testCreateTableAsSelect() .isEqualTo(new CreateTableAsSelect(location(1, 1), qualifiedName(location(1, 25), "foo"), new Query( location(1, 32), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 32), @@ -2279,6 +2287,7 @@ public void testCreateTableAsSelect() .isEqualTo(new CreateTableAsSelect(location(1, 1), qualifiedName(location(1, 25), "foo"), new Query( location(1, 35), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 35), @@ -2304,6 +2313,7 @@ public void testCreateTableAsSelect() .isEqualTo(new CreateTableAsSelect(location(1, 1), qualifiedName(location(1, 25), "foo"), new Query( location(1, 37), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 37), @@ -2333,6 +2343,7 @@ public void testCreateTableAsSelect() .isEqualTo(new CreateTableAsSelect(location(1, 1), qualifiedName(location(1, 28), "foo"), new Query( location(1, 35), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 35), @@ -2358,6 +2369,7 @@ public void testCreateTableAsSelect() .isEqualTo(new CreateTableAsSelect(location(1, 1), qualifiedName(location(1, 28), "foo"), new Query( location(1, 38), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 38), @@ -2383,6 +2395,7 @@ public void testCreateTableAsSelect() .isEqualTo(new CreateTableAsSelect(location(1, 1), qualifiedName(location(1, 28), "foo"), new Query( location(1, 40), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 40), @@ -2412,6 +2425,7 @@ public void testCreateTableAsSelect() .isEqualTo(new CreateTableAsSelect(location(1, 1), qualifiedName(location(1, 14), "foo"), new Query( location(1, 21), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 21), @@ -2437,6 +2451,7 @@ public void testCreateTableAsSelect() .isEqualTo(new CreateTableAsSelect(location(1, 1), qualifiedName(location(1, 14), "foo"), new Query( location(1, 24), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 24), @@ -2462,6 +2477,7 @@ public void testCreateTableAsSelect() .isEqualTo(new CreateTableAsSelect(location(1, 1), qualifiedName(location(1, 14), "foo"), new Query( location(1, 26), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 26), @@ -2500,6 +2516,7 @@ public void testCreateTableAsSelect() new Query( location(4, 1), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(4, 1), @@ -2544,6 +2561,7 @@ CREATE TABLE foo(x) new Query( location(4, 1), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(4, 1), @@ -2585,6 +2603,7 @@ CREATE TABLE foo(x,y) new Query( location(4, 1), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(4, 1), @@ -2627,6 +2646,7 @@ CREATE TABLE foo(x,y) new Query( location(4, 1), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(4, 1), @@ -2672,6 +2692,7 @@ CREATE TABLE foo(x) new Query( location(4, 1), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(4, 1), @@ -2714,6 +2735,7 @@ CREATE TABLE foo(x,y) new Query( location(4, 1), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(4, 1), @@ -2756,6 +2778,7 @@ CREATE TABLE foo(x,y) new Query( location(4, 1), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(4, 1), @@ -2801,6 +2824,7 @@ CREATE TABLE foo(x) COMMENT 'test' new Query( location(4, 1), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(4, 1), @@ -2843,6 +2867,7 @@ CREATE TABLE foo(x,y) COMMENT 'test' new Query( location(4, 1), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(4, 1), @@ -2885,6 +2910,7 @@ CREATE TABLE foo(x,y) COMMENT 'test' new Query( location(4, 1), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(4, 1), @@ -2953,6 +2979,7 @@ WITH t(x) AS (VALUES 1) QualifiedName table = QualifiedName.of("foo"); Query query = new Query( + ImmutableList.of(), ImmutableList.of(), Optional.of(new With(false, ImmutableList.of( new WithQuery( @@ -4135,6 +4162,7 @@ public void testWith() { assertStatement("WITH a (t, u) AS (SELECT * FROM x), b AS (SELECT * FROM y) TABLE z", new Query( + ImmutableList.of(), ImmutableList.of(), Optional.of(new With(false, ImmutableList.of( new WithQuery( @@ -4156,6 +4184,7 @@ public void testWith() assertStatement("WITH RECURSIVE a AS (SELECT * FROM x) TABLE y", new Query( + ImmutableList.of(), ImmutableList.of(), Optional.of(new With(true, ImmutableList.of( new WithQuery( @@ -4715,6 +4744,7 @@ public void testShowStatsForQuery() new Query( location(1, 17), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 17), @@ -4745,6 +4775,7 @@ public void testShowStatsForQuery() new Query( location(1, 17), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 17), @@ -4781,6 +4812,7 @@ WITH t AS (SELECT 1 ) new Query( location(2, 4), ImmutableList.of(), + ImmutableList.of(), Optional.of( new With( location(2, 4), @@ -4792,6 +4824,7 @@ WITH t AS (SELECT 1 ) new Query( location(2, 15), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(2, 15), @@ -4891,6 +4924,7 @@ public void testQuantifiedComparison() new SubqueryExpression(location(1, 13), new Query( location(1, 13), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 13), @@ -4915,6 +4949,7 @@ public void testQuantifiedComparison() new SubqueryExpression(location(1, 13), new Query( location(1, 13), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new Values(location(1, 13), ImmutableList.of( new Row(location(1, 20), ImmutableList.of(new LongLiteral(location(1, 24), "1"))), @@ -4931,6 +4966,7 @@ public void testQuantifiedComparison() new SubqueryExpression(location(1, 15), new Query( location(1, 15), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 15), @@ -5253,6 +5289,7 @@ public void testCreateMaterializedView() new Query( new NodeLocation(1, 31), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( new NodeLocation(1, 31), @@ -5291,6 +5328,7 @@ public void testCreateMaterializedView() new Query( new NodeLocation(1, 100), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( new NodeLocation(1, 100), @@ -5328,6 +5366,7 @@ public void testCreateMaterializedView() new Query( new NodeLocation(1, 61), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( new NodeLocation(1, 61), @@ -5370,6 +5409,7 @@ public void testCreateMaterializedView() new Query( new NodeLocation(3, 5), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( new NodeLocation(3, 5), @@ -5420,6 +5460,7 @@ AS WITH a (t, u) AS (SELECT * FROM x), b AS (SELECT * FROM a) TABLE b new Query( new NodeLocation(3, 5), ImmutableList.of(), + ImmutableList.of(), Optional.of(new With( new NodeLocation(3, 5), false, @@ -5430,6 +5471,7 @@ AS WITH a (t, u) AS (SELECT * FROM x), b AS (SELECT * FROM a) TABLE b new Query( new NodeLocation(3, 23), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( new NodeLocation(3, 23), @@ -5459,6 +5501,7 @@ AS WITH a (t, u) AS (SELECT * FROM x), b AS (SELECT * FROM a) TABLE b new Query( new NodeLocation(3, 47), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( new NodeLocation(3, 47), @@ -5941,6 +5984,7 @@ public void testQueryPeriod() new Query( location(1, 1), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 1), @@ -5972,6 +6016,7 @@ public void testQueryPeriod() new Query( location(1, 1), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 1), @@ -6340,6 +6385,7 @@ private static Query selectAllFrom(Relation relation) return new Query( location(1, 1), ImmutableList.of(), + ImmutableList.of(), Optional.empty(), new QuerySpecification( location(1, 1), @@ -6525,6 +6571,109 @@ public void testJsonQuery() JsonQuery.EmptyOrErrorBehavior.ERROR)); } + @Test + public void testSessionProperty() + { + assertThat(statement(""" + WITH SESSION + key = 'value', + catalog.key2 = DECIMAL '10.0' + SELECT 1 + """)) + .isEqualTo(new Query( + location(1, 1), + ImmutableList.of( + new SessionProperty( + location(2, 4), + QualifiedName.of(ImmutableList.of(new Identifier(location(2, 4), "key", false))), + new StringLiteral(location(2, 10), "value")), + new SessionProperty( + location(3, 4), + QualifiedName.of(ImmutableList.of(new Identifier(location(3, 4), "catalog", false), new Identifier(location(3, 12), "key2", false))), + new DecimalLiteral(location(3, 19), "10.0"))), + ImmutableList.of(), + Optional.empty(), + new QuerySpecification( + location(4, 1), + new Select(location(4, 1), false, ImmutableList.of(new SingleColumn(location(4, 8), new LongLiteral(location(4, 8),"1"), Optional.empty()))), + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.empty(), + ImmutableList.of(), + Optional.empty(), + Optional.empty(), + Optional.empty()), + Optional.empty(), + Optional.empty(), + Optional.empty() + )); + } + + @Test + public void testInvalidQueryScoped() + { + // Double WITH + assertStatementIsInvalid("WITH WITH SESSION query_max_memory = '1GB' SELECT 1") + .withMessage("line 1:6: mismatched input 'WITH'. Expecting: 'FUNCTION', 'RECURSIVE', 'SESSION', "); + + // Session after function + assertStatementIsInvalid("WITH FUNCTION abc() RETURNS int RETURN 42, SESSION query_max_memory = '1GB' SELECT 1") + .withMessage("line 1:44: mismatched input 'SESSION'. Expecting: 'FUNCTION'"); + + // Session after function + assertStatementIsInvalid("WITH SESSION query_max_memory = '1GB', FUNCTION abc() RETURNS int RETURN 42, SESSION query_max_total_memory = '1GB' SELECT 1") + .withMessage("line 1:49: mismatched input 'abc'. Expecting: '.', '='"); + + // Repeated WITH SESSION + assertStatementIsInvalid("WITH SESSION query_max_memory = '1GB', WITH SESSION query_max_total_memory = '1GB' SELECT 1") + .withMessage("line 1:40: mismatched input 'WITH'. Expecting: "); + } + + @Test + public void testWithSessionAndFunction() + { + assertThat(statement(""" + WITH SESSION + key = 'value' + WITH + FUNCTION foo() + RETURNS bigint + RETURN 42 + SELECT 1""")) + .isEqualTo(new Query( + location(1, 1), + ImmutableList.of( + new SessionProperty( + location(2, 4), + QualifiedName.of(ImmutableList.of(new Identifier(location(2, 4), "key", false))), + new StringLiteral(location(2, 10), "value"))), + ImmutableList.of(new FunctionSpecification( + location(4, 4), + QualifiedName.of(ImmutableList.of(new Identifier(location(4, 13), "foo", false))), + ImmutableList.of(), + new ReturnsClause(location(5, 4), simpleType(location(5, 12), "bigint")), + ImmutableList.of(), + Optional.of(new ReturnStatement(location(6, 4), new LongLiteral(location(6, 11), "42"))), + Optional.empty())), + Optional.empty(), + new QuerySpecification( + location(7, 1), + new Select(location(7, 1), false, ImmutableList.of(new SingleColumn(location(7, 8), new LongLiteral(location(7, 8),"1"), Optional.empty()))), + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.empty(), + ImmutableList.of(), + Optional.empty(), + Optional.empty(), + Optional.empty()), + Optional.empty(), + Optional.empty(), + Optional.empty() + )); + } + @Test public void testJsonObject() { diff --git a/core/trino-parser/src/test/java/io/trino/sql/parser/TestSqlParserRoutines.java b/core/trino-parser/src/test/java/io/trino/sql/parser/TestSqlParserRoutines.java index 861b60efe139..d1ee60553ec1 100644 --- a/core/trino-parser/src/test/java/io/trino/sql/parser/TestSqlParserRoutines.java +++ b/core/trino-parser/src/test/java/io/trino/sql/parser/TestSqlParserRoutines.java @@ -353,6 +353,7 @@ private static ElseIfClause elseIf(Expression expression, ControlStatement... st private static Query query(FunctionSpecification function, Select select) { return new Query( + ImmutableList.of(), ImmutableList.of(function), Optional.empty(), new QuerySpecification( diff --git a/core/trino-server-main/pom.xml b/core/trino-server-main/pom.xml index fc8d7c68d535..e40fd2975f20 100644 --- a/core/trino-server-main/pom.xml +++ b/core/trino-server-main/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/core/trino-server-rpm/pom.xml b/core/trino-server-rpm/pom.xml index 750de003497a..50d4deef8da0 100644 --- a/core/trino-server-rpm/pom.xml +++ b/core/trino-server-rpm/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/core/trino-server/pom.xml b/core/trino-server/pom.xml index 6ec9e77e8ad2..ed85f52a348c 100644 --- a/core/trino-server/pom.xml +++ b/core/trino-server/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/core/trino-spi/pom.xml b/core/trino-spi/pom.xml index a7229430aca1..c743fda6b90d 100644 --- a/core/trino-spi/pom.xml +++ b/core/trino-spi/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/core/trino-web-ui/pom.xml b/core/trino-web-ui/pom.xml index 18908dca535c..5961f483d9e4 100644 --- a/core/trino-web-ui/pom.xml +++ b/core/trino-web-ui/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/package-lock.json b/core/trino-web-ui/src/main/resources/webapp-preview/package-lock.json index 4fe9a414822f..2d206d144bdd 100644 --- a/core/trino-web-ui/src/main/resources/webapp-preview/package-lock.json +++ b/core/trino-web-ui/src/main/resources/webapp-preview/package-lock.json @@ -8,38 +8,40 @@ "name": "webapp-preview", "version": "0.0.0", "dependencies": { - "@emotion/react": "^11.13.3", - "@emotion/styled": "^11.13.0", - "@fontsource/roboto": "^5.1.0", - "@mui/icons-material": "^6.1.7", - "@mui/material": "^6.1.7", - "@mui/x-charts": "^7.22.2", - "axios": "^1.7.7", + "@emotion/react": "^11.14.0", + "@emotion/styled": "^11.14.0", + "@fontsource/roboto": "^5.1.1", + "@mui/icons-material": "^6.4.2", + "@mui/material": "^6.4.2", + "@mui/x-charts": "^7.25.0", + "axios": "^1.7.9", + "lodash": "^4.17.21", "react": "^18.3.1", "react-dom": "^18.3.1", - "react-router-dom": "^6.28.0", + "react-router-dom": "^7.1.5", "react-syntax-highlighter": "^15.6.1", - "sass": "^1.81.0", - "zustand": "^5.0.1" + "sass": "^1.83.4", + "zustand": "^5.0.3" }, "devDependencies": { "@eslint/js": "^9.14.0", "@types/eslint__js": "^8.42.3", - "@types/react": "^18.3.12", - "@types/react-dom": "^18.3.1", + "@types/lodash": "^4.17.15", + "@types/react": "^18.3.18", + "@types/react-dom": "^18.3.5", "@types/react-syntax-highlighter": "^15.5.13", "@typescript-eslint/eslint-plugin": "^8.14.0", "@typescript-eslint/parser": "^8.14.0", - "@vitejs/plugin-react": "^4.3.3", - "eslint": "^9.15.0", - "eslint-plugin-react": "^7.37.2", - "eslint-plugin-react-hooks": "^5.0.0", - "eslint-plugin-react-refresh": "^0.4.14", - "globals": "^15.12.0", - "prettier": "^3.3.3", - "typescript": "^5.6.3", - "typescript-eslint": "^8.14.0", - "vite": "^5.4.11" + "@vitejs/plugin-react": "^4.3.4", + "eslint": "^9.19.0", + "eslint-plugin-react": "^7.37.4", + "eslint-plugin-react-hooks": "^5.1.0", + "eslint-plugin-react-refresh": "^0.4.18", + "globals": "^15.14.0", + "prettier": "^3.4.2", + "typescript": "^5.7.3", + "typescript-eslint": "^8.22.0", + "vite": "^6.0.11" } }, "node_modules/@ampproject/remapping": { @@ -71,9 +73,9 @@ } }, "node_modules/@babel/compat-data": { - "version": "7.26.3", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.26.3.tgz", - "integrity": "sha512-nHIxvKPniQXpmQLb0vhY3VaFb3S0YrTAwpOWJZh1wn3oJPjJk9Asva204PsBdmAE8vpzfHudT8DB0scYvy9q0g==", + "version": "7.26.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.26.5.tgz", + "integrity": "sha512-XvcZi1KWf88RVbF9wn8MN6tYFloU5qX8KjuF3E1PVBmJ9eypXfs4GRiJwLuTZL0iSnJUKn1BFPa5BPZZJyFzPg==", "dev": true, "license": "MIT", "engines": { @@ -81,22 +83,22 @@ } }, "node_modules/@babel/core": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.0.tgz", - "integrity": "sha512-i1SLeK+DzNnQ3LL/CswPCa/E5u4lh1k6IAEphON8F+cXt0t9euTshDru0q7/IqMa1PMPz5RnHuHscF8/ZJsStg==", + "version": "7.26.7", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.26.7.tgz", + "integrity": "sha512-SRijHmF0PSPgLIBYlWnG0hyeJLwXE2CgpsXaMOrtt2yp9/86ALw6oUlj9KYuZ0JN07T4eBMVIW4li/9S1j2BGA==", "dev": true, "license": "MIT", "dependencies": { "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.26.0", - "@babel/generator": "^7.26.0", - "@babel/helper-compilation-targets": "^7.25.9", + "@babel/code-frame": "^7.26.2", + "@babel/generator": "^7.26.5", + "@babel/helper-compilation-targets": "^7.26.5", "@babel/helper-module-transforms": "^7.26.0", - "@babel/helpers": "^7.26.0", - "@babel/parser": "^7.26.0", + "@babel/helpers": "^7.26.7", + "@babel/parser": "^7.26.7", "@babel/template": "^7.25.9", - "@babel/traverse": "^7.25.9", - "@babel/types": "^7.26.0", + "@babel/traverse": "^7.26.7", + "@babel/types": "^7.26.7", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", @@ -129,13 +131,13 @@ } }, "node_modules/@babel/generator": { - "version": "7.26.3", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.3.tgz", - "integrity": "sha512-6FF/urZvD0sTeO7k6/B15pMLC4CHUv1426lzr3N01aHJTl046uCAh9LXW/fzeXXjPNCJ6iABW5XaWOsIZB93aQ==", + "version": "7.26.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.5.tgz", + "integrity": "sha512-2caSP6fN9I7HOe6nqhtft7V4g7/V/gfDsC3Ag4W7kEzzvRGKqiv0pu0HogPiZ3KaVSoNDhUws6IJjDjpfmYIXw==", "license": "MIT", "dependencies": { - "@babel/parser": "^7.26.3", - "@babel/types": "^7.26.3", + "@babel/parser": "^7.26.5", + "@babel/types": "^7.26.5", "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.25", "jsesc": "^3.0.2" @@ -145,13 +147,13 @@ } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.9.tgz", - "integrity": "sha512-j9Db8Suy6yV/VHa4qzrj9yZfZxhLWQdVnRlXxmKLYlhWUVB1sB2G5sxuWYXk/whHD9iW76PmNzxZ4UCnTQTVEQ==", + "version": "7.26.5", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.26.5.tgz", + "integrity": "sha512-IXuyn5EkouFJscIDuFF5EsiSolseme1s0CZB+QxVugqJLYmKdxI1VfIBOst0SUu4rnk2Z7kqTwmoO1lp3HIfnA==", "dev": true, "license": "MIT", "dependencies": { - "@babel/compat-data": "^7.25.9", + "@babel/compat-data": "^7.26.5", "@babel/helper-validator-option": "^7.25.9", "browserslist": "^4.24.0", "lru-cache": "^5.1.1", @@ -203,9 +205,9 @@ } }, "node_modules/@babel/helper-plugin-utils": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.25.9.tgz", - "integrity": "sha512-kSMlyUVdWe25rEsRGviIgOWnoT/nfABVWlqt9N19/dIPWViAOW2s9wznP5tURbs/IDuNk4gPy3YdYRgH3uxhBw==", + "version": "7.26.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.26.5.tgz", + "integrity": "sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg==", "dev": true, "license": "MIT", "engines": { @@ -241,26 +243,26 @@ } }, "node_modules/@babel/helpers": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.0.tgz", - "integrity": "sha512-tbhNuIxNcVb21pInl3ZSjksLCvgdZy9KwJ8brv993QtIVKJBBkYXz4q4ZbAv31GdnC+R90np23L5FbEBlthAEw==", + "version": "7.26.7", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.26.7.tgz", + "integrity": "sha512-8NHiL98vsi0mbPQmYAGWwfcFaOy4j2HY49fXJCfuDcdE7fMIsH9a7GdaeXpIBsbT7307WU8KCMp5pUVDNL4f9A==", "dev": true, "license": "MIT", "dependencies": { "@babel/template": "^7.25.9", - "@babel/types": "^7.26.0" + "@babel/types": "^7.26.7" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/parser": { - "version": "7.26.3", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.3.tgz", - "integrity": "sha512-WJ/CvmY8Mea8iDXo6a7RK2wbmJITT5fN3BEkRuFlxVyNx8jOKIIhmC4fSkTcPcf8JyavbBwIe6OpiCOBXt/IcA==", + "version": "7.26.7", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.7.tgz", + "integrity": "sha512-kEvgGGgEjRUutvdVvZhbn/BxVt+5VSpwXz1j3WYXQbXDo8KzFOPNG2GQbdAiNq8g6wn1yKk7C/qrke03a84V+w==", "license": "MIT", "dependencies": { - "@babel/types": "^7.26.3" + "@babel/types": "^7.26.7" }, "bin": { "parser": "bin/babel-parser.js" @@ -302,9 +304,9 @@ } }, "node_modules/@babel/runtime": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.0.tgz", - "integrity": "sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw==", + "version": "7.26.7", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.7.tgz", + "integrity": "sha512-AOPI3D+a8dXnja+iwsUqGRjr1BbZIe771sXdapOtYI531gSqpi92vXivKcq2asu/DFpdl1ceFAKZyRzK2PCVcQ==", "license": "MIT", "dependencies": { "regenerator-runtime": "^0.14.0" @@ -328,16 +330,16 @@ } }, "node_modules/@babel/traverse": { - "version": "7.26.4", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.26.4.tgz", - "integrity": "sha512-fH+b7Y4p3yqvApJALCPJcwb0/XaOSgtK4pzV6WVjPR5GLFQBRI7pfoX2V2iM48NXvX07NUxxm1Vw98YjqTcU5w==", + "version": "7.26.7", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.26.7.tgz", + "integrity": "sha512-1x1sgeyRLC3r5fQOM0/xtQKsYjyxmFjaOrLJNtZ81inNjyJHGIolTULPiSc/2qe1/qfpFLisLQYFnnZl7QoedA==", "license": "MIT", "dependencies": { "@babel/code-frame": "^7.26.2", - "@babel/generator": "^7.26.3", - "@babel/parser": "^7.26.3", + "@babel/generator": "^7.26.5", + "@babel/parser": "^7.26.7", "@babel/template": "^7.25.9", - "@babel/types": "^7.26.3", + "@babel/types": "^7.26.7", "debug": "^4.3.1", "globals": "^11.1.0" }, @@ -355,9 +357,9 @@ } }, "node_modules/@babel/types": { - "version": "7.26.3", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.3.tgz", - "integrity": "sha512-vN5p+1kl59GVKMvTHt55NzzmYVxprfJD+ql7U9NFIfKCBkYE55LYtS+WtPlaYOyzydrKI8Nezd+aZextrd+FMA==", + "version": "7.26.7", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.7.tgz", + "integrity": "sha512-t8kDRGrKXyp6+tjUh7hw2RLyclsW4TRoRvRHtSyAX9Bb5ldlFh+90YAYY6awRXrlB4G5G2izNeGySpATlFzmOg==", "license": "MIT", "dependencies": { "@babel/helper-string-parser": "^7.25.9", @@ -514,9 +516,9 @@ "license": "MIT" }, "node_modules/@esbuild/aix-ppc64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", - "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.24.2.tgz", + "integrity": "sha512-thpVCb/rhxE/BnMLQ7GReQLLN8q9qbHmI55F4489/ByVg2aQaQ6kbcLb6FHkocZzQhxc4gx0sCk0tJkKBFzDhA==", "cpu": [ "ppc64" ], @@ -527,13 +529,13 @@ "aix" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/android-arm": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", - "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.24.2.tgz", + "integrity": "sha512-tmwl4hJkCfNHwFB3nBa8z1Uy3ypZpxqxfTQOcHX+xRByyYgunVbZ9MzUUfb0RxaHIMnbHagwAxuTL+tnNM+1/Q==", "cpu": [ "arm" ], @@ -544,13 +546,13 @@ "android" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/android-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", - "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.24.2.tgz", + "integrity": "sha512-cNLgeqCqV8WxfcTIOeL4OAtSmL8JjcN6m09XIgro1Wi7cF4t/THaWEa7eL5CMoMBdjoHOTh/vwTO/o2TRXIyzg==", "cpu": [ "arm64" ], @@ -561,13 +563,13 @@ "android" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/android-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", - "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.24.2.tgz", + "integrity": "sha512-B6Q0YQDqMx9D7rvIcsXfmJfvUYLoP722bgfBlO5cGvNVb5V/+Y7nhBE3mHV9OpxBf4eAS2S68KZztiPaWq4XYw==", "cpu": [ "x64" ], @@ -578,13 +580,13 @@ "android" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/darwin-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", - "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.24.2.tgz", + "integrity": "sha512-kj3AnYWc+CekmZnS5IPu9D+HWtUI49hbnyqk0FLEJDbzCIQt7hg7ucF1SQAilhtYpIujfaHr6O0UHlzzSPdOeA==", "cpu": [ "arm64" ], @@ -595,13 +597,13 @@ "darwin" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/darwin-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", - "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.24.2.tgz", + "integrity": "sha512-WeSrmwwHaPkNR5H3yYfowhZcbriGqooyu3zI/3GGpF8AyUdsrrP0X6KumITGA9WOyiJavnGZUwPGvxvwfWPHIA==", "cpu": [ "x64" ], @@ -612,13 +614,13 @@ "darwin" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/freebsd-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", - "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.24.2.tgz", + "integrity": "sha512-UN8HXjtJ0k/Mj6a9+5u6+2eZ2ERD7Edt1Q9IZiB5UZAIdPnVKDoG7mdTVGhHJIeEml60JteamR3qhsr1r8gXvg==", "cpu": [ "arm64" ], @@ -629,13 +631,13 @@ "freebsd" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/freebsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", - "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.24.2.tgz", + "integrity": "sha512-TvW7wE/89PYW+IevEJXZ5sF6gJRDY/14hyIGFXdIucxCsbRmLUcjseQu1SyTko+2idmCw94TgyaEZi9HUSOe3Q==", "cpu": [ "x64" ], @@ -646,13 +648,13 @@ "freebsd" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-arm": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", - "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.24.2.tgz", + "integrity": "sha512-n0WRM/gWIdU29J57hJyUdIsk0WarGd6To0s+Y+LwvlC55wt+GT/OgkwoXCXvIue1i1sSNWblHEig00GBWiJgfA==", "cpu": [ "arm" ], @@ -663,13 +665,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", - "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.24.2.tgz", + "integrity": "sha512-7HnAD6074BW43YvvUmE/35Id9/NB7BeX5EoNkK9obndmZBUk8xmJJeU7DwmUeN7tkysslb2eSl6CTrYz6oEMQg==", "cpu": [ "arm64" ], @@ -680,13 +682,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-ia32": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", - "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.24.2.tgz", + "integrity": "sha512-sfv0tGPQhcZOgTKO3oBE9xpHuUqguHvSo4jl+wjnKwFpapx+vUDcawbwPNuBIAYdRAvIDBfZVvXprIj3HA+Ugw==", "cpu": [ "ia32" ], @@ -697,13 +699,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-loong64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", - "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.24.2.tgz", + "integrity": "sha512-CN9AZr8kEndGooS35ntToZLTQLHEjtVB5n7dl8ZcTZMonJ7CCfStrYhrzF97eAecqVbVJ7APOEe18RPI4KLhwQ==", "cpu": [ "loong64" ], @@ -714,13 +716,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-mips64el": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", - "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.24.2.tgz", + "integrity": "sha512-iMkk7qr/wl3exJATwkISxI7kTcmHKE+BlymIAbHO8xanq/TjHaaVThFF6ipWzPHryoFsesNQJPE/3wFJw4+huw==", "cpu": [ "mips64el" ], @@ -731,13 +733,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-ppc64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", - "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.24.2.tgz", + "integrity": "sha512-shsVrgCZ57Vr2L8mm39kO5PPIb+843FStGt7sGGoqiiWYconSxwTiuswC1VJZLCjNiMLAMh34jg4VSEQb+iEbw==", "cpu": [ "ppc64" ], @@ -748,13 +750,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-riscv64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", - "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.24.2.tgz", + "integrity": "sha512-4eSFWnU9Hhd68fW16GD0TINewo1L6dRrB+oLNNbYyMUAeOD2yCK5KXGK1GH4qD/kT+bTEXjsyTCiJGHPZ3eM9Q==", "cpu": [ "riscv64" ], @@ -765,13 +767,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-s390x": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", - "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.24.2.tgz", + "integrity": "sha512-S0Bh0A53b0YHL2XEXC20bHLuGMOhFDO6GN4b3YjRLK//Ep3ql3erpNcPlEFed93hsQAjAQDNsvcK+hV90FubSw==", "cpu": [ "s390x" ], @@ -782,13 +784,13 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/linux-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", - "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.24.2.tgz", + "integrity": "sha512-8Qi4nQcCTbLnK9WoMjdC9NiTG6/E38RNICU6sUNqK0QFxCYgoARqVqxdFmWkdonVsvGqWhmm7MO0jyTqLqwj0Q==", "cpu": [ "x64" ], @@ -799,13 +801,30 @@ "linux" ], "engines": { - "node": ">=12" + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.24.2.tgz", + "integrity": "sha512-wuLK/VztRRpMt9zyHSazyCVdCXlpHkKm34WUyinD2lzK07FAHTq0KQvZZlXikNWkDGoT6x3TD51jKQ7gMVpopw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" } }, "node_modules/@esbuild/netbsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", - "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.24.2.tgz", + "integrity": "sha512-VefFaQUc4FMmJuAxmIHgUmfNiLXY438XrL4GDNV1Y1H/RW3qow68xTwjZKfj/+Plp9NANmzbH5R40Meudu8mmw==", "cpu": [ "x64" ], @@ -816,13 +835,30 @@ "netbsd" ], "engines": { - "node": ">=12" + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.24.2.tgz", + "integrity": "sha512-YQbi46SBct6iKnszhSvdluqDmxCJA+Pu280Av9WICNwQmMxV7nLRHZfjQzwbPs3jeWnuAhE9Jy0NrnJ12Oz+0A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" } }, "node_modules/@esbuild/openbsd-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", - "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.24.2.tgz", + "integrity": "sha512-+iDS6zpNM6EnJyWv0bMGLWSWeXGN/HTaF/LXHXHwejGsVi+ooqDfMCCTerNFxEkM3wYVcExkeGXNqshc9iMaOA==", "cpu": [ "x64" ], @@ -833,13 +869,13 @@ "openbsd" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/sunos-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", - "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.24.2.tgz", + "integrity": "sha512-hTdsW27jcktEvpwNHJU4ZwWFGkz2zRJUz8pvddmXPtXDzVKTTINmlmga3ZzwcuMpUvLw7JkLy9QLKyGpD2Yxig==", "cpu": [ "x64" ], @@ -850,13 +886,13 @@ "sunos" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/win32-arm64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", - "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.24.2.tgz", + "integrity": "sha512-LihEQ2BBKVFLOC9ZItT9iFprsE9tqjDjnbulhHoFxYQtQfai7qfluVODIYxt1PgdoyQkz23+01rzwNwYfutxUQ==", "cpu": [ "arm64" ], @@ -867,13 +903,13 @@ "win32" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/win32-ia32": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", - "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.24.2.tgz", + "integrity": "sha512-q+iGUwfs8tncmFC9pcnD5IvRHAzmbwQ3GPS5/ceCyHdjXubwQWI12MKWSNSMYLJMq23/IUCvJMS76PDqXe1fxA==", "cpu": [ "ia32" ], @@ -884,13 +920,13 @@ "win32" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@esbuild/win32-x64": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", - "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.24.2.tgz", + "integrity": "sha512-7VTgWzgMGvup6aSqDPLiW5zHaxYJGTO4OokMjIlrCtf+VpEL+cXKtCvg723iguPYI5oaUNdS+/V7OU2gvXVWEg==", "cpu": [ "x64" ], @@ -901,7 +937,7 @@ "win32" ], "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/@eslint-community/eslint-utils": { @@ -934,13 +970,13 @@ } }, "node_modules/@eslint/config-array": { - "version": "0.19.1", - "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.19.1.tgz", - "integrity": "sha512-fo6Mtm5mWyKjA/Chy1BYTdn5mGJoDNjC7C64ug20ADsRDGrA85bN3uK3MaKbeRkRuuIEAR5N33Jr1pbm411/PA==", + "version": "0.19.2", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.19.2.tgz", + "integrity": "sha512-GNKqxfHG2ySmJOBSHg7LxeUx4xpuCoFjacmlCoYWEbaPXLwvfIjixRI12xCQZeULksQb23uiA8F40w5TojpV7w==", "dev": true, "license": "Apache-2.0", "dependencies": { - "@eslint/object-schema": "^2.1.5", + "@eslint/object-schema": "^2.1.6", "debug": "^4.3.1", "minimatch": "^3.1.2" }, @@ -973,9 +1009,9 @@ } }, "node_modules/@eslint/core": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.9.1.tgz", - "integrity": "sha512-GuUdqkyyzQI5RMIWkHhvTWLCyLo1jNK3vzkSyaExH5kHPDHcuL2VOpHjmMY+y3+NC69qAKToBqldTBgYeLSr9Q==", + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.10.0.tgz", + "integrity": "sha512-gFHJ+xBOo4G3WRlR1e/3G8A6/KZAH6zcE/hkLRCZTi/B9avAG365QhFA8uOGzTMqgTghpn7/fSnscW++dpMSAw==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -1047,9 +1083,9 @@ } }, "node_modules/@eslint/js": { - "version": "9.17.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.17.0.tgz", - "integrity": "sha512-Sxc4hqcs1kTu0iID3kcZDW3JHq2a77HO9P8CP6YEA/FpH3Ll8UXE2r/86Rz9YJLKme39S9vU5OWNjC6Xl0Cr3w==", + "version": "9.19.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.19.0.tgz", + "integrity": "sha512-rbq9/g38qjfqFLOVPvwjIvFFdNziEC5S65jmjPw5r6A//QH+W91akh9irMwjDN8zKUTak6W9EsAv4m/7Wnw0UQ==", "dev": true, "license": "MIT", "engines": { @@ -1057,9 +1093,9 @@ } }, "node_modules/@eslint/object-schema": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.5.tgz", - "integrity": "sha512-o0bhxnL89h5Bae5T318nFoFzGy+YE5i/gGkoPAgkmTVdRKTiv3p8JHevPiPaMwoloKfEiiaHlawCqaZMqRm+XQ==", + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.6.tgz", + "integrity": "sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==", "dev": true, "license": "Apache-2.0", "engines": { @@ -1067,12 +1103,13 @@ } }, "node_modules/@eslint/plugin-kit": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.4.tgz", - "integrity": "sha512-zSkKow6H5Kdm0ZUQUB2kV5JIXqoG0+uH5YADhaEHswm664N9Db8dXSi0nMJpacpMf+MyyglF1vnZohpEg5yUtg==", + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.5.tgz", + "integrity": "sha512-lB05FkqEdUg2AA0xEbUz0SnkXT1LcCTa438W4IWTUh4hdOnVbQyOJ81OrDXsJk/LSiJHubgGEFoR5EHq1NsH1A==", "dev": true, "license": "Apache-2.0", "dependencies": { + "@eslint/core": "^0.10.0", "levn": "^0.4.1" }, "engines": { @@ -1200,9 +1237,9 @@ } }, "node_modules/@mui/core-downloads-tracker": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/@mui/core-downloads-tracker/-/core-downloads-tracker-6.3.1.tgz", - "integrity": "sha512-2OmnEyoHpj5//dJJpMuxOeLItCCHdf99pjMFfUFdBteCunAK9jW+PwEo4mtdGcLs7P+IgZ+85ypd52eY4AigoQ==", + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/@mui/core-downloads-tracker/-/core-downloads-tracker-6.4.2.tgz", + "integrity": "sha512-Qmod9fHsFWrtLxdSkZ4iDLRz2AUKt3C2ZEimuY+qKlQGVKJDNS5DuSlNOAgqfHFDq8mzB17ATN6HFcThwJlvUw==", "license": "MIT", "funding": { "type": "opencollective", @@ -1210,9 +1247,9 @@ } }, "node_modules/@mui/icons-material": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/@mui/icons-material/-/icons-material-6.3.1.tgz", - "integrity": "sha512-nJmWj1PBlwS3t1PnoqcixIsftE+7xrW3Su7f0yrjPw4tVjYrgkhU0hrRp+OlURfZ3ptdSkoBkalee9Bhf1Erfw==", + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/@mui/icons-material/-/icons-material-6.4.2.tgz", + "integrity": "sha512-uwsH1KRmxkJwK3NZyo1xL9pEduL16ftCnzYBYjd6nPNtm05QAoIc0aqedS9tqDV+Ab3q5C04HHOVsMDDv1EBpg==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.26.0" @@ -1225,7 +1262,7 @@ "url": "https://opencollective.com/mui-org" }, "peerDependencies": { - "@mui/material": "^6.3.1", + "@mui/material": "^6.4.2", "@types/react": "^17.0.0 || ^18.0.0 || ^19.0.0", "react": "^17.0.0 || ^18.0.0 || ^19.0.0" }, @@ -1236,16 +1273,16 @@ } }, "node_modules/@mui/material": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/@mui/material/-/material-6.3.1.tgz", - "integrity": "sha512-ynG9ayhxgCsHJ/dtDcT1v78/r2GwQyP3E0hPz3GdPRl0uFJz/uUTtI5KFYwadXmbC+Uv3bfB8laZ6+Cpzh03gA==", + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/@mui/material/-/material-6.4.2.tgz", + "integrity": "sha512-9jKr53KbAJyyBRx8LRmX7ATXHlGtxVQdPgm1uyXMoEPMVkSJW1yO3vFgfYoDbGx4ZHcCNuWa4FkFIPWVt9fghA==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.26.0", - "@mui/core-downloads-tracker": "^6.3.1", - "@mui/system": "^6.3.1", + "@mui/core-downloads-tracker": "^6.4.2", + "@mui/system": "^6.4.2", "@mui/types": "^7.2.21", - "@mui/utils": "^6.3.1", + "@mui/utils": "^6.4.2", "@popperjs/core": "^2.11.8", "@types/react-transition-group": "^4.4.12", "clsx": "^2.1.1", @@ -1264,7 +1301,7 @@ "peerDependencies": { "@emotion/react": "^11.5.0", "@emotion/styled": "^11.3.0", - "@mui/material-pigment-css": "^6.3.1", + "@mui/material-pigment-css": "^6.4.2", "@types/react": "^17.0.0 || ^18.0.0 || ^19.0.0", "react": "^17.0.0 || ^18.0.0 || ^19.0.0", "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0" @@ -1285,13 +1322,13 @@ } }, "node_modules/@mui/private-theming": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/@mui/private-theming/-/private-theming-6.3.1.tgz", - "integrity": "sha512-g0u7hIUkmXmmrmmf5gdDYv9zdAig0KoxhIQn1JN8IVqApzf/AyRhH3uDGx5mSvs8+a1zb4+0W6LC260SyTTtdQ==", + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/@mui/private-theming/-/private-theming-6.4.2.tgz", + "integrity": "sha512-2CkQT0gNlogM50qGTBJgWA7hPPx4AeH8RE2xJa+PHtIOowiVPX52ZsQ0e7Ho18DAqEbkngQ6Uju037ER+TCY5A==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.26.0", - "@mui/utils": "^6.3.1", + "@mui/utils": "^6.4.2", "prop-types": "^15.8.1" }, "engines": { @@ -1312,9 +1349,9 @@ } }, "node_modules/@mui/styled-engine": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/@mui/styled-engine/-/styled-engine-6.3.1.tgz", - "integrity": "sha512-/7CC0d2fIeiUxN5kCCwYu4AWUDd9cCTxWCyo0v/Rnv6s8uk6hWgJC3VLZBoDENBHf/KjqDZuYJ2CR+7hD6QYww==", + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/@mui/styled-engine/-/styled-engine-6.4.2.tgz", + "integrity": "sha512-cgjQK2bkllSYoWUBv93ALhCPJ0NhfO3NctsBf13/b4XSeQVfKPBAnR+P9mNpdFMa5a5RWwtWuBD3cZ5vktsN+g==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.26.0", @@ -1346,16 +1383,16 @@ } }, "node_modules/@mui/system": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/@mui/system/-/system-6.3.1.tgz", - "integrity": "sha512-AwqQ3EAIT2np85ki+N15fF0lFXX1iFPqenCzVOSl3QXKy2eifZeGd9dGtt7pGMoFw5dzW4dRGGzRpLAq9rkl7A==", + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/@mui/system/-/system-6.4.2.tgz", + "integrity": "sha512-wQbaPCtsxNsM5nR+NZpkFJBKVKH03GQnAjlkKENM8JQqGdWcRyM3f4fJZgzzNdIFpSQw4wpAQKnhfHkjf3d6yQ==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.26.0", - "@mui/private-theming": "^6.3.1", - "@mui/styled-engine": "^6.3.1", + "@mui/private-theming": "^6.4.2", + "@mui/styled-engine": "^6.4.2", "@mui/types": "^7.2.21", - "@mui/utils": "^6.3.1", + "@mui/utils": "^6.4.2", "clsx": "^2.1.1", "csstype": "^3.1.3", "prop-types": "^15.8.1" @@ -1400,9 +1437,9 @@ } }, "node_modules/@mui/utils": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/@mui/utils/-/utils-6.3.1.tgz", - "integrity": "sha512-sjGjXAngoio6lniQZKJ5zGfjm+LD2wvLwco7FbKe1fu8A7VIFmz2SwkLb+MDPLNX1lE7IscvNNyh1pobtZg2tw==", + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/@mui/utils/-/utils-6.4.2.tgz", + "integrity": "sha512-5NkhzlJkmR5+5RSs/Irqin1GPy2Z8vbLk/UzQrH9FEAnm6OA9SvuXjzgklxUs7N65VwEkGpKK1jMZ5K84hRdzQ==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.26.0", @@ -1430,15 +1467,15 @@ } }, "node_modules/@mui/x-charts": { - "version": "7.23.6", - "resolved": "https://registry.npmjs.org/@mui/x-charts/-/x-charts-7.23.6.tgz", - "integrity": "sha512-25kH01gwE5I0d8kWYZEpXZ9zHVHKSAVDiGwhteqagHtO6x/dEfqbMnjGuaNruCWppJR6wIylLKi/tuiOSpIy2A==", + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@mui/x-charts/-/x-charts-7.25.0.tgz", + "integrity": "sha512-+DhnojHrVTt8RsTgq8AztzdFpW1kzOgiBdo0Pkl0DyxVdaKELC5QaetFwim9nIxT2zmU/RmiBcOU+qbqmQpFNA==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.25.7", "@mui/utils": "^5.16.6 || ^6.0.0", "@mui/x-charts-vendor": "7.20.0", - "@mui/x-internals": "7.23.6", + "@mui/x-internals": "7.25.0", "@react-spring/rafz": "^9.7.5", "@react-spring/web": "^9.7.5", "clsx": "^2.1.1", @@ -1488,9 +1525,9 @@ } }, "node_modules/@mui/x-internals": { - "version": "7.23.6", - "resolved": "https://registry.npmjs.org/@mui/x-internals/-/x-internals-7.23.6.tgz", - "integrity": "sha512-hT1Pa4PNCnxwiauPbYMC3p4DiEF1x05Iu4C1MtC/jMJ1LtthymLmTuQ6ZQ53/R9FeqK6sYd6A6noR+vNMjp5DA==", + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@mui/x-internals/-/x-internals-7.25.0.tgz", + "integrity": "sha512-tBUN54YznAkmtCIRAOl35Kgl0MjFDIjUbzIrbWRgVSIR3QJ8bXnVSkiRBi+P91SZEl9+ZW0rDj+osq7xFJV0kg==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.25.7", @@ -1546,9 +1583,9 @@ } }, "node_modules/@parcel/watcher": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.5.0.tgz", - "integrity": "sha512-i0GV1yJnm2n3Yq1qw6QrUrd/LI9bE8WEBOTtOkpCXHHdyN3TAGgqAK/DAT05z4fq2x04cARXt2pDmjWjL92iTQ==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher/-/watcher-2.5.1.tgz", + "integrity": "sha512-dfUnCxiN9H4ap84DvD2ubjw+3vUNpstxa0TneY/Paat8a3R4uQZDLSvWjmznAY/DoahqTHl9V46HF/Zs3F29pg==", "hasInstallScript": true, "license": "MIT", "optional": true, @@ -1566,25 +1603,25 @@ "url": "https://opencollective.com/parcel" }, "optionalDependencies": { - "@parcel/watcher-android-arm64": "2.5.0", - "@parcel/watcher-darwin-arm64": "2.5.0", - "@parcel/watcher-darwin-x64": "2.5.0", - "@parcel/watcher-freebsd-x64": "2.5.0", - "@parcel/watcher-linux-arm-glibc": "2.5.0", - "@parcel/watcher-linux-arm-musl": "2.5.0", - "@parcel/watcher-linux-arm64-glibc": "2.5.0", - "@parcel/watcher-linux-arm64-musl": "2.5.0", - "@parcel/watcher-linux-x64-glibc": "2.5.0", - "@parcel/watcher-linux-x64-musl": "2.5.0", - "@parcel/watcher-win32-arm64": "2.5.0", - "@parcel/watcher-win32-ia32": "2.5.0", - "@parcel/watcher-win32-x64": "2.5.0" + "@parcel/watcher-android-arm64": "2.5.1", + "@parcel/watcher-darwin-arm64": "2.5.1", + "@parcel/watcher-darwin-x64": "2.5.1", + "@parcel/watcher-freebsd-x64": "2.5.1", + "@parcel/watcher-linux-arm-glibc": "2.5.1", + "@parcel/watcher-linux-arm-musl": "2.5.1", + "@parcel/watcher-linux-arm64-glibc": "2.5.1", + "@parcel/watcher-linux-arm64-musl": "2.5.1", + "@parcel/watcher-linux-x64-glibc": "2.5.1", + "@parcel/watcher-linux-x64-musl": "2.5.1", + "@parcel/watcher-win32-arm64": "2.5.1", + "@parcel/watcher-win32-ia32": "2.5.1", + "@parcel/watcher-win32-x64": "2.5.1" } }, "node_modules/@parcel/watcher-android-arm64": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.5.0.tgz", - "integrity": "sha512-qlX4eS28bUcQCdribHkg/herLe+0A9RyYC+mm2PXpncit8z5b3nSqGVzMNR3CmtAOgRutiZ02eIJJgP/b1iEFQ==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-android-arm64/-/watcher-android-arm64-2.5.1.tgz", + "integrity": "sha512-KF8+j9nNbUN8vzOFDpRMsaKBHZ/mcjEjMToVMJOhTozkDonQFFrRcfdLWn6yWKCmJKmdVxSgHiYvTCef4/qcBA==", "cpu": [ "arm64" ], @@ -1602,9 +1639,9 @@ } }, "node_modules/@parcel/watcher-darwin-arm64": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.0.tgz", - "integrity": "sha512-hyZ3TANnzGfLpRA2s/4U1kbw2ZI4qGxaRJbBH2DCSREFfubMswheh8TeiC1sGZ3z2jUf3s37P0BBlrD3sjVTUw==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-arm64/-/watcher-darwin-arm64-2.5.1.tgz", + "integrity": "sha512-eAzPv5osDmZyBhou8PoF4i6RQXAfeKL9tjb3QzYuccXFMQU0ruIc/POh30ePnaOyD1UXdlKguHBmsTs53tVoPw==", "cpu": [ "arm64" ], @@ -1622,9 +1659,9 @@ } }, "node_modules/@parcel/watcher-darwin-x64": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.5.0.tgz", - "integrity": "sha512-9rhlwd78saKf18fT869/poydQK8YqlU26TMiNg7AIu7eBp9adqbJZqmdFOsbZ5cnLp5XvRo9wcFmNHgHdWaGYA==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-darwin-x64/-/watcher-darwin-x64-2.5.1.tgz", + "integrity": "sha512-1ZXDthrnNmwv10A0/3AJNZ9JGlzrF82i3gNQcWOzd7nJ8aj+ILyW1MTxVk35Db0u91oD5Nlk9MBiujMlwmeXZg==", "cpu": [ "x64" ], @@ -1642,9 +1679,9 @@ } }, "node_modules/@parcel/watcher-freebsd-x64": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.5.0.tgz", - "integrity": "sha512-syvfhZzyM8kErg3VF0xpV8dixJ+RzbUaaGaeb7uDuz0D3FK97/mZ5AJQ3XNnDsXX7KkFNtyQyFrXZzQIcN49Tw==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-freebsd-x64/-/watcher-freebsd-x64-2.5.1.tgz", + "integrity": "sha512-SI4eljM7Flp9yPuKi8W0ird8TI/JK6CSxju3NojVI6BjHsTyK7zxA9urjVjEKJ5MBYC+bLmMcbAWlZ+rFkLpJQ==", "cpu": [ "x64" ], @@ -1662,9 +1699,9 @@ } }, "node_modules/@parcel/watcher-linux-arm-glibc": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.5.0.tgz", - "integrity": "sha512-0VQY1K35DQET3dVYWpOaPFecqOT9dbuCfzjxoQyif1Wc574t3kOSkKevULddcR9znz1TcklCE7Ht6NIxjvTqLA==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-glibc/-/watcher-linux-arm-glibc-2.5.1.tgz", + "integrity": "sha512-RCdZlEyTs8geyBkkcnPWvtXLY44BCeZKmGYRtSgtwwnHR4dxfHRG3gR99XdMEdQ7KeiDdasJwwvNSF5jKtDwdA==", "cpu": [ "arm" ], @@ -1682,9 +1719,9 @@ } }, "node_modules/@parcel/watcher-linux-arm-musl": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-musl/-/watcher-linux-arm-musl-2.5.0.tgz", - "integrity": "sha512-6uHywSIzz8+vi2lAzFeltnYbdHsDm3iIB57d4g5oaB9vKwjb6N6dRIgZMujw4nm5r6v9/BQH0noq6DzHrqr2pA==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm-musl/-/watcher-linux-arm-musl-2.5.1.tgz", + "integrity": "sha512-6E+m/Mm1t1yhB8X412stiKFG3XykmgdIOqhjWj+VL8oHkKABfu/gjFj8DvLrYVHSBNC+/u5PeNrujiSQ1zwd1Q==", "cpu": [ "arm" ], @@ -1702,9 +1739,9 @@ } }, "node_modules/@parcel/watcher-linux-arm64-glibc": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.5.0.tgz", - "integrity": "sha512-BfNjXwZKxBy4WibDb/LDCriWSKLz+jJRL3cM/DllnHH5QUyoiUNEp3GmL80ZqxeumoADfCCP19+qiYiC8gUBjA==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-glibc/-/watcher-linux-arm64-glibc-2.5.1.tgz", + "integrity": "sha512-LrGp+f02yU3BN9A+DGuY3v3bmnFUggAITBGriZHUREfNEzZh/GO06FF5u2kx8x+GBEUYfyTGamol4j3m9ANe8w==", "cpu": [ "arm64" ], @@ -1722,9 +1759,9 @@ } }, "node_modules/@parcel/watcher-linux-arm64-musl": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.5.0.tgz", - "integrity": "sha512-S1qARKOphxfiBEkwLUbHjCY9BWPdWnW9j7f7Hb2jPplu8UZ3nes7zpPOW9bkLbHRvWM0WDTsjdOTUgW0xLBN1Q==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-arm64-musl/-/watcher-linux-arm64-musl-2.5.1.tgz", + "integrity": "sha512-cFOjABi92pMYRXS7AcQv9/M1YuKRw8SZniCDw0ssQb/noPkRzA+HBDkwmyOJYp5wXcsTrhxO0zq1U11cK9jsFg==", "cpu": [ "arm64" ], @@ -1742,9 +1779,9 @@ } }, "node_modules/@parcel/watcher-linux-x64-glibc": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.5.0.tgz", - "integrity": "sha512-d9AOkusyXARkFD66S6zlGXyzx5RvY+chTP9Jp0ypSTC9d4lzyRs9ovGf/80VCxjKddcUvnsGwCHWuF2EoPgWjw==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-glibc/-/watcher-linux-x64-glibc-2.5.1.tgz", + "integrity": "sha512-GcESn8NZySmfwlTsIur+49yDqSny2IhPeZfXunQi48DMugKeZ7uy1FX83pO0X22sHntJ4Ub+9k34XQCX+oHt2A==", "cpu": [ "x64" ], @@ -1762,9 +1799,9 @@ } }, "node_modules/@parcel/watcher-linux-x64-musl": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-musl/-/watcher-linux-x64-musl-2.5.0.tgz", - "integrity": "sha512-iqOC+GoTDoFyk/VYSFHwjHhYrk8bljW6zOhPuhi5t9ulqiYq1togGJB5e3PwYVFFfeVgc6pbz3JdQyDoBszVaA==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-linux-x64-musl/-/watcher-linux-x64-musl-2.5.1.tgz", + "integrity": "sha512-n0E2EQbatQ3bXhcH2D1XIAANAcTZkQICBPVaxMeaCVBtOpBZpWJuf7LwyWPSBDITb7In8mqQgJ7gH8CILCURXg==", "cpu": [ "x64" ], @@ -1782,9 +1819,9 @@ } }, "node_modules/@parcel/watcher-win32-arm64": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.5.0.tgz", - "integrity": "sha512-twtft1d+JRNkM5YbmexfcH/N4znDtjgysFaV9zvZmmJezQsKpkfLYJ+JFV3uygugK6AtIM2oADPkB2AdhBrNig==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-arm64/-/watcher-win32-arm64-2.5.1.tgz", + "integrity": "sha512-RFzklRvmc3PkjKjry3hLF9wD7ppR4AKcWNzH7kXR7GUe0Igb3Nz8fyPwtZCSquGrhU5HhUNDr/mKBqj7tqA2Vw==", "cpu": [ "arm64" ], @@ -1802,9 +1839,9 @@ } }, "node_modules/@parcel/watcher-win32-ia32": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.5.0.tgz", - "integrity": "sha512-+rgpsNRKwo8A53elqbbHXdOMtY/tAtTzManTWShB5Kk54N8Q9mzNWV7tV+IbGueCbcj826MfWGU3mprWtuf1TA==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-ia32/-/watcher-win32-ia32-2.5.1.tgz", + "integrity": "sha512-c2KkcVN+NJmuA7CGlaGD1qJh1cLfDnQsHjE89E60vUEMlqduHGCdCLJCID5geFVM0dOtA3ZiIO8BoEQmzQVfpQ==", "cpu": [ "ia32" ], @@ -1822,9 +1859,9 @@ } }, "node_modules/@parcel/watcher-win32-x64": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.5.0.tgz", - "integrity": "sha512-lPrxve92zEHdgeff3aiu4gDOIt4u7sJYha6wbdEZDCDUhtjTsOMiaJzG5lMY4GkWH8p0fMmO2Ppq5G5XXG+DQw==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/@parcel/watcher-win32-x64/-/watcher-win32-x64-2.5.1.tgz", + "integrity": "sha512-9lHBdJITeNR++EvSQVUcaZoWupyHfXe1jZvGZ06O/5MflPcuPLtEphScIBL+AiCWBO46tDSHzWyD0uDmmZqsgA==", "cpu": [ "x64" ], @@ -1923,19 +1960,10 @@ "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0" } }, - "node_modules/@remix-run/router": { - "version": "1.21.0", - "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.21.0.tgz", - "integrity": "sha512-xfSkCAchbdG5PnbrKqFWwia4Bi61nH+wm8wLEqfHDyp7Y3dZzgqS2itV8i4gAq9pC2HsTpwyBC6Ds8VHZ96JlA==", - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.30.1.tgz", - "integrity": "sha512-pSWY+EVt3rJ9fQ3IqlrEUtXh3cGqGtPDH1FQlNZehO2yYxCHEX1SPsz1M//NXwYfbTlcKr9WObLnJX9FsS9K1Q==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.34.0.tgz", + "integrity": "sha512-Eeao7ewDq79jVEsrtWIj5RNqB8p2knlm9fhR6uJ2gqP7UfbLrTrxevudVrEPDM7Wkpn/HpRC2QfazH7MXLz3vQ==", "cpu": [ "arm" ], @@ -1947,9 +1975,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.30.1.tgz", - "integrity": "sha512-/NA2qXxE3D/BRjOJM8wQblmArQq1YoBVJjrjoTSBS09jgUisq7bqxNHJ8kjCHeV21W/9WDGwJEWSN0KQ2mtD/w==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.34.0.tgz", + "integrity": "sha512-yVh0Kf1f0Fq4tWNf6mWcbQBCLDpDrDEl88lzPgKhrgTcDrTtlmun92ywEF9dCjmYO3EFiSuJeeo9cYRxl2FswA==", "cpu": [ "arm64" ], @@ -1961,9 +1989,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.30.1.tgz", - "integrity": "sha512-r7FQIXD7gB0WJ5mokTUgUWPl0eYIH0wnxqeSAhuIwvnnpjdVB8cRRClyKLQr7lgzjctkbp5KmswWszlwYln03Q==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.34.0.tgz", + "integrity": "sha512-gCs0ErAZ9s0Osejpc3qahTsqIPUDjSKIyxK/0BGKvL+Tn0n3Kwvj8BrCv7Y5sR1Ypz1K2qz9Ny0VvkVyoXBVUQ==", "cpu": [ "arm64" ], @@ -1975,9 +2003,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.30.1.tgz", - "integrity": "sha512-x78BavIwSH6sqfP2xeI1hd1GpHL8J4W2BXcVM/5KYKoAD3nNsfitQhvWSw+TFtQTLZ9OmlF+FEInEHyubut2OA==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.34.0.tgz", + "integrity": "sha512-aIB5Anc8hngk15t3GUkiO4pv42ykXHfmpXGS+CzM9CTyiWyT8HIS5ygRAy7KcFb/wiw4Br+vh1byqcHRTfq2tQ==", "cpu": [ "x64" ], @@ -1989,9 +2017,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.30.1.tgz", - "integrity": "sha512-HYTlUAjbO1z8ywxsDFWADfTRfTIIy/oUlfIDmlHYmjUP2QRDTzBuWXc9O4CXM+bo9qfiCclmHk1x4ogBjOUpUQ==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.34.0.tgz", + "integrity": "sha512-kpdsUdMlVJMRMaOf/tIvxk8TQdzHhY47imwmASOuMajg/GXpw8GKNd8LNwIHE5Yd1onehNpcUB9jHY6wgw9nHQ==", "cpu": [ "arm64" ], @@ -2003,9 +2031,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.30.1.tgz", - "integrity": "sha512-1MEdGqogQLccphhX5myCJqeGNYTNcmTyaic9S7CG3JhwuIByJ7J05vGbZxsizQthP1xpVx7kd3o31eOogfEirw==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.34.0.tgz", + "integrity": "sha512-D0RDyHygOBCQiqookcPevrvgEarN0CttBecG4chOeIYCNtlKHmf5oi5kAVpXV7qs0Xh/WO2RnxeicZPtT50V0g==", "cpu": [ "x64" ], @@ -2017,9 +2045,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.30.1.tgz", - "integrity": "sha512-PaMRNBSqCx7K3Wc9QZkFx5+CX27WFpAMxJNiYGAXfmMIKC7jstlr32UhTgK6T07OtqR+wYlWm9IxzennjnvdJg==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.34.0.tgz", + "integrity": "sha512-mCIw8j5LPDXmCOW8mfMZwT6F/Kza03EnSr4wGYEswrEfjTfVsFOxvgYfuRMxTuUF/XmRb9WSMD5GhCWDe2iNrg==", "cpu": [ "arm" ], @@ -2031,9 +2059,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.30.1.tgz", - "integrity": "sha512-B8Rcyj9AV7ZlEFqvB5BubG5iO6ANDsRKlhIxySXcF1axXYUyqwBok+XZPgIYGBgs7LDXfWfifxhw0Ik57T0Yug==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.34.0.tgz", + "integrity": "sha512-AwwldAu4aCJPob7zmjuDUMvvuatgs8B/QiVB0KwkUarAcPB3W+ToOT+18TQwY4z09Al7G0BvCcmLRop5zBLTag==", "cpu": [ "arm" ], @@ -2045,9 +2073,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.30.1.tgz", - "integrity": "sha512-hqVyueGxAj3cBKrAI4aFHLV+h0Lv5VgWZs9CUGqr1z0fZtlADVV1YPOij6AhcK5An33EXaxnDLmJdQikcn5NEw==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.34.0.tgz", + "integrity": "sha512-e7kDUGVP+xw05pV65ZKb0zulRploU3gTu6qH1qL58PrULDGxULIS0OSDQJLH7WiFnpd3ZKUU4VM3u/Z7Zw+e7Q==", "cpu": [ "arm64" ], @@ -2059,9 +2087,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.30.1.tgz", - "integrity": "sha512-i4Ab2vnvS1AE1PyOIGp2kXni69gU2DAUVt6FSXeIqUCPIR3ZlheMW3oP2JkukDfu3PsexYRbOiJrY+yVNSk9oA==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.34.0.tgz", + "integrity": "sha512-SXYJw3zpwHgaBqTXeAZ31qfW/v50wq4HhNVvKFhRr5MnptRX2Af4KebLWR1wpxGJtLgfS2hEPuALRIY3LPAAcA==", "cpu": [ "arm64" ], @@ -2073,9 +2101,9 @@ ] }, "node_modules/@rollup/rollup-linux-loongarch64-gnu": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.30.1.tgz", - "integrity": "sha512-fARcF5g296snX0oLGkVxPmysetwUk2zmHcca+e9ObOovBR++9ZPOhqFUM61UUZ2EYpXVPN1redgqVoBB34nTpQ==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.34.0.tgz", + "integrity": "sha512-e5XiCinINCI4RdyU3sFyBH4zzz7LiQRvHqDtRe9Dt8o/8hTBaYpdPimayF00eY2qy5j4PaaWK0azRgUench6WQ==", "cpu": [ "loong64" ], @@ -2087,9 +2115,9 @@ ] }, "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.30.1.tgz", - "integrity": "sha512-GLrZraoO3wVT4uFXh67ElpwQY0DIygxdv0BNW9Hkm3X34wu+BkqrDrkcsIapAY+N2ATEbvak0XQ9gxZtCIA5Rw==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.34.0.tgz", + "integrity": "sha512-3SWN3e0bAsm9ToprLFBSro8nJe6YN+5xmB11N4FfNf92wvLye/+Rh5JGQtKOpwLKt6e61R1RBc9g+luLJsc23A==", "cpu": [ "ppc64" ], @@ -2101,9 +2129,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.30.1.tgz", - "integrity": "sha512-0WKLaAUUHKBtll0wvOmh6yh3S0wSU9+yas923JIChfxOaaBarmb/lBKPF0w/+jTVozFnOXJeRGZ8NvOxvk/jcw==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.34.0.tgz", + "integrity": "sha512-B1Oqt3GLh7qmhvfnc2WQla4NuHlcxAD5LyueUi5WtMc76ZWY+6qDtQYqnxARx9r+7mDGfamD+8kTJO0pKUJeJA==", "cpu": [ "riscv64" ], @@ -2115,9 +2143,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.30.1.tgz", - "integrity": "sha512-GWFs97Ruxo5Bt+cvVTQkOJ6TIx0xJDD/bMAOXWJg8TCSTEK8RnFeOeiFTxKniTc4vMIaWvCplMAFBt9miGxgkA==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.34.0.tgz", + "integrity": "sha512-UfUCo0h/uj48Jq2lnhX0AOhZPSTAq3Eostas+XZ+GGk22pI+Op1Y6cxQ1JkUuKYu2iU+mXj1QjPrZm9nNWV9rg==", "cpu": [ "s390x" ], @@ -2129,9 +2157,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.30.1.tgz", - "integrity": "sha512-UtgGb7QGgXDIO+tqqJ5oZRGHsDLO8SlpE4MhqpY9Llpzi5rJMvrK6ZGhsRCST2abZdBqIBeXW6WPD5fGK5SDwg==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.34.0.tgz", + "integrity": "sha512-chZLTUIPbgcpm+Z7ALmomXW8Zh+wE2icrG+K6nt/HenPLmtwCajhQC5flNSk1Xy5EDMt/QAOz2MhzfOfJOLSiA==", "cpu": [ "x64" ], @@ -2143,9 +2171,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.30.1.tgz", - "integrity": "sha512-V9U8Ey2UqmQsBT+xTOeMzPzwDzyXmnAoO4edZhL7INkwQcaW1Ckv3WJX3qrrp/VHaDkEWIBWhRwP47r8cdrOow==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.34.0.tgz", + "integrity": "sha512-jo0UolK70O28BifvEsFD/8r25shFezl0aUk2t0VJzREWHkq19e+pcLu4kX5HiVXNz5qqkD+aAq04Ct8rkxgbyQ==", "cpu": [ "x64" ], @@ -2157,9 +2185,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.30.1.tgz", - "integrity": "sha512-WabtHWiPaFF47W3PkHnjbmWawnX/aE57K47ZDT1BXTS5GgrBUEpvOzq0FI0V/UYzQJgdb8XlhVNH8/fwV8xDjw==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.34.0.tgz", + "integrity": "sha512-Vmg0NhAap2S54JojJchiu5An54qa6t/oKT7LmDaWggpIcaiL8WcWHEN6OQrfTdL6mQ2GFyH7j2T5/3YPEDOOGA==", "cpu": [ "arm64" ], @@ -2171,9 +2199,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.30.1.tgz", - "integrity": "sha512-pxHAU+Zv39hLUTdQQHUVHf4P+0C47y/ZloorHpzs2SXMRqeAWmGghzAhfOlzFHHwjvgokdFAhC4V+6kC1lRRfw==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.34.0.tgz", + "integrity": "sha512-CV2aqhDDOsABKHKhNcs1SZFryffQf8vK2XrxP6lxC99ELZAdvsDgPklIBfd65R8R+qvOm1SmLaZ/Fdq961+m7A==", "cpu": [ "ia32" ], @@ -2185,9 +2213,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.30.1.tgz", - "integrity": "sha512-D6qjsXGcvhTjv0kI4fU8tUuBDF/Ueee4SVX79VfNDXZa64TfCW1Slkb6Z7O1p7vflqZjcmOVdZlqf8gvJxc6og==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.34.0.tgz", + "integrity": "sha512-g2ASy1QwHP88y5KWvblUolJz9rN+i4ZOsYzkEwcNfaNooxNUXG+ON6F5xFo0NIItpHqxcdAyls05VXpBnludGw==", "cpu": [ "x64" ], @@ -2243,6 +2271,12 @@ "@babel/types": "^7.20.7" } }, + "node_modules/@types/cookie": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==", + "license": "MIT" + }, "node_modules/@types/d3-color": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", @@ -2338,6 +2372,13 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/lodash": { + "version": "4.17.15", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.15.tgz", + "integrity": "sha512-w/P33JFeySuhN6JLkysYUK2gEmy9kHHFN7E8ro0tkfmlDOgxBDzWEZ/J8cWA+fHqFevpswDTFZnDx+R9lbL6xw==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/parse-json": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", @@ -2396,17 +2437,17 @@ "license": "MIT" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "8.19.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.19.1.tgz", - "integrity": "sha512-tJzcVyvvb9h/PB96g30MpxACd9IrunT7GF9wfA9/0TJ1LxGOJx1TdPzSbBBnNED7K9Ka8ybJsnEpiXPktolTLg==", + "version": "8.22.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.22.0.tgz", + "integrity": "sha512-4Uta6REnz/xEJMvwf72wdUnC3rr4jAQf5jnTkeRQ9b6soxLxhDEbS/pfMPoJLDfFPNVRdryqWUIV/2GZzDJFZw==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "8.19.1", - "@typescript-eslint/type-utils": "8.19.1", - "@typescript-eslint/utils": "8.19.1", - "@typescript-eslint/visitor-keys": "8.19.1", + "@typescript-eslint/scope-manager": "8.22.0", + "@typescript-eslint/type-utils": "8.22.0", + "@typescript-eslint/utils": "8.22.0", + "@typescript-eslint/visitor-keys": "8.22.0", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", @@ -2426,16 +2467,16 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "8.19.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.19.1.tgz", - "integrity": "sha512-67gbfv8rAwawjYx3fYArwldTQKoYfezNUT4D5ioWetr/xCrxXxvleo3uuiFuKfejipvq+og7mjz3b0G2bVyUCw==", + "version": "8.22.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.22.0.tgz", + "integrity": "sha512-MqtmbdNEdoNxTPzpWiWnqNac54h8JDAmkWtJExBVVnSrSmi9z+sZUt0LfKqk9rjqmKOIeRhO4fHHJ1nQIjduIQ==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/scope-manager": "8.19.1", - "@typescript-eslint/types": "8.19.1", - "@typescript-eslint/typescript-estree": "8.19.1", - "@typescript-eslint/visitor-keys": "8.19.1", + "@typescript-eslint/scope-manager": "8.22.0", + "@typescript-eslint/types": "8.22.0", + "@typescript-eslint/typescript-estree": "8.22.0", + "@typescript-eslint/visitor-keys": "8.22.0", "debug": "^4.3.4" }, "engines": { @@ -2451,14 +2492,14 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "8.19.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.19.1.tgz", - "integrity": "sha512-60L9KIuN/xgmsINzonOcMDSB8p82h95hoBfSBtXuO4jlR1R9L1xSkmVZKgCPVfavDlXihh4ARNjXhh1gGnLC7Q==", + "version": "8.22.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.22.0.tgz", + "integrity": "sha512-/lwVV0UYgkj7wPSw0o8URy6YI64QmcOdwHuGuxWIYznO6d45ER0wXUbksr9pYdViAofpUCNJx/tAzNukgvaaiQ==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.19.1", - "@typescript-eslint/visitor-keys": "8.19.1" + "@typescript-eslint/types": "8.22.0", + "@typescript-eslint/visitor-keys": "8.22.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2469,14 +2510,14 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "8.19.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.19.1.tgz", - "integrity": "sha512-Rp7k9lhDKBMRJB/nM9Ksp1zs4796wVNyihG9/TU9R6KCJDNkQbc2EOKjrBtLYh3396ZdpXLtr/MkaSEmNMtykw==", + "version": "8.22.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.22.0.tgz", + "integrity": "sha512-NzE3aB62fDEaGjaAYZE4LH7I1MUwHooQ98Byq0G0y3kkibPJQIXVUspzlFOmOfHhiDLwKzMlWxaNv+/qcZurJA==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/typescript-estree": "8.19.1", - "@typescript-eslint/utils": "8.19.1", + "@typescript-eslint/typescript-estree": "8.22.0", + "@typescript-eslint/utils": "8.22.0", "debug": "^4.3.4", "ts-api-utils": "^2.0.0" }, @@ -2493,9 +2534,9 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "8.19.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.19.1.tgz", - "integrity": "sha512-JBVHMLj7B1K1v1051ZaMMgLW4Q/jre5qGK0Ew6UgXz1Rqh+/xPzV1aW581OM00X6iOfyr1be+QyW8LOUf19BbA==", + "version": "8.22.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.22.0.tgz", + "integrity": "sha512-0S4M4baNzp612zwpD4YOieP3VowOARgK2EkN/GBn95hpyF8E2fbMT55sRHWBq+Huaqk3b3XK+rxxlM8sPgGM6A==", "dev": true, "license": "MIT", "engines": { @@ -2507,14 +2548,14 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "8.19.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.19.1.tgz", - "integrity": "sha512-jk/TZwSMJlxlNnqhy0Eod1PNEvCkpY6MXOXE/WLlblZ6ibb32i2We4uByoKPv1d0OD2xebDv4hbs3fm11SMw8Q==", + "version": "8.22.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.22.0.tgz", + "integrity": "sha512-SJX99NAS2ugGOzpyhMza/tX+zDwjvwAtQFLsBo3GQxiGcvaKlqGBkmZ+Y1IdiSi9h4Q0Lr5ey+Cp9CGWNY/F/w==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.19.1", - "@typescript-eslint/visitor-keys": "8.19.1", + "@typescript-eslint/types": "8.22.0", + "@typescript-eslint/visitor-keys": "8.22.0", "debug": "^4.3.4", "fast-glob": "^3.3.2", "is-glob": "^4.0.3", @@ -2534,16 +2575,16 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "8.19.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.19.1.tgz", - "integrity": "sha512-IxG5gLO0Ne+KaUc8iW1A+XuKLd63o4wlbI1Zp692n1xojCl/THvgIKXJXBZixTh5dd5+yTJ/VXH7GJaaw21qXA==", + "version": "8.22.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.22.0.tgz", + "integrity": "sha512-T8oc1MbF8L+Bk2msAvCUzjxVB2Z2f+vXYfcucE2wOmYs7ZUwco5Ep0fYZw8quNwOiw9K8GYVL+Kgc2pETNTLOg==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "8.19.1", - "@typescript-eslint/types": "8.19.1", - "@typescript-eslint/typescript-estree": "8.19.1" + "@typescript-eslint/scope-manager": "8.22.0", + "@typescript-eslint/types": "8.22.0", + "@typescript-eslint/typescript-estree": "8.22.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2558,13 +2599,13 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "8.19.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.19.1.tgz", - "integrity": "sha512-fzmjU8CHK853V/avYZAvuVut3ZTfwN5YtMaoi+X9Y9MA9keaWNHC3zEQ9zvyX/7Hj+5JkNyK1l7TOR2hevHB6Q==", + "version": "8.22.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.22.0.tgz", + "integrity": "sha512-AWpYAXnUgvLNabGTy3uBylkgZoosva/miNd1I8Bz3SjotmQPbVqhO4Cczo8AsZ44XVErEBPr/CRSgaj8sG7g0w==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.19.1", + "@typescript-eslint/types": "8.22.0", "eslint-visitor-keys": "^4.2.0" }, "engines": { @@ -2807,6 +2848,16 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/async-function": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz", + "integrity": "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/asynckit": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", @@ -2978,9 +3029,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001692", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001692.tgz", - "integrity": "sha512-A95VKan0kdtrsnMubMKxEKUKImOPSuCpYgxSQBo036P5YYgVIcOYJEgt/txJWqObiRQeISNCfef9nvlQ0vbV7A==", + "version": "1.0.30001696", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001696.tgz", + "integrity": "sha512-pDCPkvzfa39ehJtJ+OwGT/2yvT2SbjfHhiIW2LWOAcMQ7BzwxT/XuyUp4OTOd0XFWA6BKw0JalnBHgSi5DGJBQ==", "dev": true, "funding": [ { @@ -3124,6 +3175,15 @@ "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", "license": "MIT" }, + "node_modules/cookie": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.0.2.tgz", + "integrity": "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA==", + "license": "MIT", + "engines": { + "node": ">=18" + } + }, "node_modules/cosmiconfig": { "version": "7.1.0", "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", @@ -3140,6 +3200,15 @@ "node": ">=10" } }, + "node_modules/cosmiconfig/node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "license": "ISC", + "engines": { + "node": ">= 6" + } + }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -3460,9 +3529,9 @@ } }, "node_modules/electron-to-chromium": { - "version": "1.5.80", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.80.tgz", - "integrity": "sha512-LTrKpW0AqIuHwmlVNV+cjFYTnXtM9K37OGhpe0ZI10ScPSxqVSryZHIY3WnCS5NSYbBODRTZyhRMS2h5FAEqAw==", + "version": "1.5.90", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.90.tgz", + "integrity": "sha512-C3PN4aydfW91Natdyd449Kw+BzhLmof6tzy5W1pFC5SpQxVXT+oyiyOG9AgYYSN9OdA/ik3YkCrpwqI8ug5Tug==", "dev": true, "license": "ISC" }, @@ -3590,9 +3659,9 @@ } }, "node_modules/es-object-atoms": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.0.0.tgz", - "integrity": "sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", "dev": true, "license": "MIT", "dependencies": { @@ -3647,9 +3716,9 @@ } }, "node_modules/esbuild": { - "version": "0.21.5", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", - "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "version": "0.24.2", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.24.2.tgz", + "integrity": "sha512-+9egpBW8I3CD5XPe0n6BfT5fxLzxrlDzqydF3aviG+9ni1lDC/OvMHcxqEFV0+LANZG5R1bFMWfUrjVsdwxJvA==", "dev": true, "hasInstallScript": true, "license": "MIT", @@ -3657,32 +3726,34 @@ "esbuild": "bin/esbuild" }, "engines": { - "node": ">=12" + "node": ">=18" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.21.5", - "@esbuild/android-arm": "0.21.5", - "@esbuild/android-arm64": "0.21.5", - "@esbuild/android-x64": "0.21.5", - "@esbuild/darwin-arm64": "0.21.5", - "@esbuild/darwin-x64": "0.21.5", - "@esbuild/freebsd-arm64": "0.21.5", - "@esbuild/freebsd-x64": "0.21.5", - "@esbuild/linux-arm": "0.21.5", - "@esbuild/linux-arm64": "0.21.5", - "@esbuild/linux-ia32": "0.21.5", - "@esbuild/linux-loong64": "0.21.5", - "@esbuild/linux-mips64el": "0.21.5", - "@esbuild/linux-ppc64": "0.21.5", - "@esbuild/linux-riscv64": "0.21.5", - "@esbuild/linux-s390x": "0.21.5", - "@esbuild/linux-x64": "0.21.5", - "@esbuild/netbsd-x64": "0.21.5", - "@esbuild/openbsd-x64": "0.21.5", - "@esbuild/sunos-x64": "0.21.5", - "@esbuild/win32-arm64": "0.21.5", - "@esbuild/win32-ia32": "0.21.5", - "@esbuild/win32-x64": "0.21.5" + "@esbuild/aix-ppc64": "0.24.2", + "@esbuild/android-arm": "0.24.2", + "@esbuild/android-arm64": "0.24.2", + "@esbuild/android-x64": "0.24.2", + "@esbuild/darwin-arm64": "0.24.2", + "@esbuild/darwin-x64": "0.24.2", + "@esbuild/freebsd-arm64": "0.24.2", + "@esbuild/freebsd-x64": "0.24.2", + "@esbuild/linux-arm": "0.24.2", + "@esbuild/linux-arm64": "0.24.2", + "@esbuild/linux-ia32": "0.24.2", + "@esbuild/linux-loong64": "0.24.2", + "@esbuild/linux-mips64el": "0.24.2", + "@esbuild/linux-ppc64": "0.24.2", + "@esbuild/linux-riscv64": "0.24.2", + "@esbuild/linux-s390x": "0.24.2", + "@esbuild/linux-x64": "0.24.2", + "@esbuild/netbsd-arm64": "0.24.2", + "@esbuild/netbsd-x64": "0.24.2", + "@esbuild/openbsd-arm64": "0.24.2", + "@esbuild/openbsd-x64": "0.24.2", + "@esbuild/sunos-x64": "0.24.2", + "@esbuild/win32-arm64": "0.24.2", + "@esbuild/win32-ia32": "0.24.2", + "@esbuild/win32-x64": "0.24.2" } }, "node_modules/escalade": { @@ -3708,19 +3779,19 @@ } }, "node_modules/eslint": { - "version": "9.17.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.17.0.tgz", - "integrity": "sha512-evtlNcpJg+cZLcnVKwsai8fExnqjGPicK7gnUtlNuzu+Fv9bI0aLpND5T44VLQtoMEnI57LoXO9XAkIXwohKrA==", + "version": "9.19.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.19.0.tgz", + "integrity": "sha512-ug92j0LepKlbbEv6hD911THhoRHmbdXt2gX+VDABAW/Ir7D3nqKdv5Pf5vtlyY6HQMTEP2skXY43ueqTCWssEA==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.12.1", "@eslint/config-array": "^0.19.0", - "@eslint/core": "^0.9.0", + "@eslint/core": "^0.10.0", "@eslint/eslintrc": "^3.2.0", - "@eslint/js": "9.17.0", - "@eslint/plugin-kit": "^0.2.3", + "@eslint/js": "9.19.0", + "@eslint/plugin-kit": "^0.2.5", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", "@humanwhocodes/retry": "^0.4.1", @@ -3768,9 +3839,9 @@ } }, "node_modules/eslint-plugin-react": { - "version": "7.37.3", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.37.3.tgz", - "integrity": "sha512-DomWuTQPFYZwF/7c9W2fkKkStqZmBd3uugfqBYLdkZ3Hii23WzZuOLUskGxB8qkSKqftxEeGL1TB2kMhrce0jA==", + "version": "7.37.4", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.37.4.tgz", + "integrity": "sha512-BGP0jRmfYyvOyvMoRX/uoUeW+GqNj9y16bPQzqAHf3AYII/tDs+jMN0dBVkl88/OZwNGwrVFxE7riHsXVfy/LQ==", "dev": true, "license": "MIT", "dependencies": { @@ -3814,9 +3885,9 @@ } }, "node_modules/eslint-plugin-react-refresh": { - "version": "0.4.16", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.16.tgz", - "integrity": "sha512-slterMlxAhov/DZO8NScf6mEeMBBXodFUolijDvrtTxyezyLoTQaa73FyYus/VbTdftd8wBgBxPMRk3poleXNQ==", + "version": "0.4.18", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.18.tgz", + "integrity": "sha512-IRGEoFn3OKalm3hjfolEWGqoF/jPqeEYFp+C8B0WMzwGwBMvlRDQd06kghDhF0C61uJ6WfSDhEZE/sAQjduKgw==", "dev": true, "license": "MIT", "peerDependencies": { @@ -4071,9 +4142,9 @@ "license": "MIT" }, "node_modules/fastq": { - "version": "1.18.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.18.0.tgz", - "integrity": "sha512-QKHXPW0hD8g4UET03SdOdunzSouc9N4AuHdsX8XNcTsuz+yYFILVNIX4l9yHABMhiEI9Db0JTTIpu0wB+Y1QQw==", + "version": "1.19.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.0.tgz", + "integrity": "sha512-7SFSRCNjBQIZH/xZR3iy5iQYR8aGBE0h3VG6/cwlbrpdciNYBMotQav8c1XI3HjHH+NikUpP53nPdlZSdWmFzA==", "dev": true, "license": "ISC", "dependencies": { @@ -4184,13 +4255,19 @@ } }, "node_modules/for-each": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz", - "integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==", + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.4.tgz", + "integrity": "sha512-kKaIINnFpzW6ffJNDjjyjrk21BkDx38c0xa/klsT8VzLCaMEefv4ZTacrcVR4DmgTeBra++jMDAfS/tS799YDw==", "dev": true, "license": "MIT", "dependencies": { - "is-callable": "^1.1.3" + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/form-data": { @@ -4665,12 +4742,13 @@ "license": "MIT" }, "node_modules/is-async-function": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.1.0.tgz", - "integrity": "sha512-GExz9MtyhlZyXYLxzlJRj5WUCE661zhDa1Yna52CN57AJsymh+DvXXjyveSioqSRdxvUrdKdvqB1b5cVKsNpWQ==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.1.1.tgz", + "integrity": "sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ==", "dev": true, "license": "MIT", "dependencies": { + "async-function": "^1.0.0", "call-bound": "^1.0.3", "get-proto": "^1.0.1", "has-tostringtag": "^1.0.2", @@ -5207,6 +5285,12 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "license": "MIT" + }, "node_modules/lodash.merge": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", @@ -5654,9 +5738,9 @@ } }, "node_modules/postcss": { - "version": "8.4.49", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.49.tgz", - "integrity": "sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA==", + "version": "8.5.1", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.1.tgz", + "integrity": "sha512-6oz2beyjc5VMn/KV1pPw8fliQkhBXrVn1Z3TVyqZxU8kZpzEKhBdmCFqI6ZbmGtamQvQGuU1sgPTk8ZrXDD7jQ==", "dev": true, "funding": [ { @@ -5674,7 +5758,7 @@ ], "license": "MIT", "dependencies": { - "nanoid": "^3.3.7", + "nanoid": "^3.3.8", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" }, @@ -5826,35 +5910,43 @@ } }, "node_modules/react-router": { - "version": "6.28.1", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.28.1.tgz", - "integrity": "sha512-2omQTA3rkMljmrvvo6WtewGdVh45SpL9hGiCI9uUrwGGfNFDIvGK4gYJsKlJoNVi6AQZcopSCballL+QGOm7fA==", + "version": "7.1.5", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.1.5.tgz", + "integrity": "sha512-8BUF+hZEU4/z/JD201yK6S+UYhsf58bzYIDq2NS1iGpwxSXDu7F+DeGSkIXMFBuHZB21FSiCzEcUb18cQNdRkA==", "license": "MIT", "dependencies": { - "@remix-run/router": "1.21.0" + "@types/cookie": "^0.6.0", + "cookie": "^1.0.1", + "set-cookie-parser": "^2.6.0", + "turbo-stream": "2.4.0" }, "engines": { - "node": ">=14.0.0" + "node": ">=20.0.0" }, "peerDependencies": { - "react": ">=16.8" + "react": ">=18", + "react-dom": ">=18" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + } } }, "node_modules/react-router-dom": { - "version": "6.28.1", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.28.1.tgz", - "integrity": "sha512-YraE27C/RdjcZwl5UCqF/ffXnZDxpJdk9Q6jw38SZHjXs7NNdpViq2l2c7fO7+4uWaEfcwfGCv3RSg4e1By/fQ==", + "version": "7.1.5", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.1.5.tgz", + "integrity": "sha512-/4f9+up0Qv92D3bB8iN5P1s3oHAepSGa9h5k6tpTFlixTTskJZwKGhJ6vRJ277tLD1zuaZTt95hyGWV1Z37csQ==", "license": "MIT", "dependencies": { - "@remix-run/router": "1.21.0", - "react-router": "6.28.1" + "react-router": "7.1.5" }, "engines": { - "node": ">=14.0.0" + "node": ">=20.0.0" }, "peerDependencies": { - "react": ">=16.8", - "react-dom": ">=16.8" + "react": ">=18", + "react-dom": ">=18" } }, "node_modules/react-syntax-highlighter": { @@ -5891,12 +5983,12 @@ } }, "node_modules/readdirp": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.0.2.tgz", - "integrity": "sha512-yDMz9g+VaZkqBYS/ozoBJwaBhTbZo3UNYQHNRw1D3UFQB8oHB4uS/tAODO+ZLjGWmUbKnIlOWO+aaIiAxrUWHA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.1.tgz", + "integrity": "sha512-h80JrZu/MHUZCyHu5ciuoI0+WxsCxzxJTILn6Fs8rxSnFPh+UVHYfeIxK1nVGugMqkfC4vJcBOYbkfkwYK0+gw==", "license": "MIT", "engines": { - "node": ">= 14.16.0" + "node": ">= 14.18.0" }, "funding": { "type": "individual", @@ -6024,9 +6116,9 @@ "license": "Unlicense" }, "node_modules/rollup": { - "version": "4.30.1", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.30.1.tgz", - "integrity": "sha512-mlJ4glW020fPuLi7DkM/lN97mYEZGWeqBnrljzN0gs7GLctqX3lNWxKQ7Gl712UAX+6fog/L3jh4gb7R6aVi3w==", + "version": "4.34.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.34.0.tgz", + "integrity": "sha512-+4C/cgJ9w6sudisA0nZz0+O7lTP9a3CzNLsoDwaRumM8QHwghUsu6tqHXiTmNUp/rqNiM14++7dkzHDyCRs0Jg==", "dev": true, "license": "MIT", "dependencies": { @@ -6040,25 +6132,25 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.30.1", - "@rollup/rollup-android-arm64": "4.30.1", - "@rollup/rollup-darwin-arm64": "4.30.1", - "@rollup/rollup-darwin-x64": "4.30.1", - "@rollup/rollup-freebsd-arm64": "4.30.1", - "@rollup/rollup-freebsd-x64": "4.30.1", - "@rollup/rollup-linux-arm-gnueabihf": "4.30.1", - "@rollup/rollup-linux-arm-musleabihf": "4.30.1", - "@rollup/rollup-linux-arm64-gnu": "4.30.1", - "@rollup/rollup-linux-arm64-musl": "4.30.1", - "@rollup/rollup-linux-loongarch64-gnu": "4.30.1", - "@rollup/rollup-linux-powerpc64le-gnu": "4.30.1", - "@rollup/rollup-linux-riscv64-gnu": "4.30.1", - "@rollup/rollup-linux-s390x-gnu": "4.30.1", - "@rollup/rollup-linux-x64-gnu": "4.30.1", - "@rollup/rollup-linux-x64-musl": "4.30.1", - "@rollup/rollup-win32-arm64-msvc": "4.30.1", - "@rollup/rollup-win32-ia32-msvc": "4.30.1", - "@rollup/rollup-win32-x64-msvc": "4.30.1", + "@rollup/rollup-android-arm-eabi": "4.34.0", + "@rollup/rollup-android-arm64": "4.34.0", + "@rollup/rollup-darwin-arm64": "4.34.0", + "@rollup/rollup-darwin-x64": "4.34.0", + "@rollup/rollup-freebsd-arm64": "4.34.0", + "@rollup/rollup-freebsd-x64": "4.34.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.34.0", + "@rollup/rollup-linux-arm-musleabihf": "4.34.0", + "@rollup/rollup-linux-arm64-gnu": "4.34.0", + "@rollup/rollup-linux-arm64-musl": "4.34.0", + "@rollup/rollup-linux-loongarch64-gnu": "4.34.0", + "@rollup/rollup-linux-powerpc64le-gnu": "4.34.0", + "@rollup/rollup-linux-riscv64-gnu": "4.34.0", + "@rollup/rollup-linux-s390x-gnu": "4.34.0", + "@rollup/rollup-linux-x64-gnu": "4.34.0", + "@rollup/rollup-linux-x64-musl": "4.34.0", + "@rollup/rollup-win32-arm64-msvc": "4.34.0", + "@rollup/rollup-win32-ia32-msvc": "4.34.0", + "@rollup/rollup-win32-x64-msvc": "4.34.0", "fsevents": "~2.3.2" } }, @@ -6142,9 +6234,9 @@ } }, "node_modules/sass": { - "version": "1.83.1", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.83.1.tgz", - "integrity": "sha512-EVJbDaEs4Rr3F0glJzFSOvtg2/oy2V/YrGFPqPY24UqcLDWcI9ZY5sN+qyO3c/QCZwzgfirvhXvINiJCE/OLcA==", + "version": "1.83.4", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.83.4.tgz", + "integrity": "sha512-B1bozCeNQiOgDcLd33e2Cs2U60wZwjUUXzh900ZyQF5qUasvMdDZYbQ566LJu7cqR+sAHlAfO6RMkaID5s6qpA==", "license": "MIT", "dependencies": { "chokidar": "^4.0.0", @@ -6171,9 +6263,9 @@ } }, "node_modules/semver": { - "version": "7.6.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", - "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "version": "7.7.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.0.tgz", + "integrity": "sha512-DrfFnPzblFmNrIZzg5RzHegbiRWg7KMR7btwi2yjHwx06zsUbO5g613sVwEV7FTwmzJu+Io0lJe2GJ3LxqpvBQ==", "dev": true, "license": "ISC", "bin": { @@ -6183,6 +6275,12 @@ "node": ">=10" } }, + "node_modules/set-cookie-parser": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.1.tgz", + "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==", + "license": "MIT" + }, "node_modules/set-function-length": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", @@ -6515,9 +6613,9 @@ } }, "node_modules/ts-api-utils": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.0.0.tgz", - "integrity": "sha512-xCt/TOAc+EOHS1XPnijD3/yzpH6qg2xppZO1YDqGoVsNXfQfzHpOdNuXwrwOU8u4ITXJyDCTyt8w5g1sZv9ynQ==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.0.1.tgz", + "integrity": "sha512-dnlgjFSVetynI8nzgJ+qF62efpglpWRk8isUEWZGWlJYySCTD6aKvbUDu+zbPeDakk3bg5H4XpitHukgfL1m9w==", "dev": true, "license": "MIT", "engines": { @@ -6527,6 +6625,12 @@ "typescript": ">=4.8.4" } }, + "node_modules/turbo-stream": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/turbo-stream/-/turbo-stream-2.4.0.tgz", + "integrity": "sha512-FHncC10WpBd2eOmGwpmQsWLDoK4cqsA/UT/GqNoaKOQnT8uzhtCbg3EoUDMvqpOSAI0S26mr0rkjzbOO6S3v1g==", + "license": "ISC" + }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", @@ -6633,15 +6737,15 @@ } }, "node_modules/typescript-eslint": { - "version": "8.19.1", - "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.19.1.tgz", - "integrity": "sha512-LKPUQpdEMVOeKluHi8md7rwLcoXHhwvWp3x+sJkMuq3gGm9yaYJtPo8sRZSblMFJ5pcOGCAak/scKf1mvZDlQw==", + "version": "8.22.0", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.22.0.tgz", + "integrity": "sha512-Y2rj210FW1Wb6TWXzQc5+P+EWI9/zdS57hLEc0gnyuvdzWo8+Y8brKlbj0muejonhMI/xAZCnZZwjbIfv1CkOw==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/eslint-plugin": "8.19.1", - "@typescript-eslint/parser": "8.19.1", - "@typescript-eslint/utils": "8.19.1" + "@typescript-eslint/eslint-plugin": "8.22.0", + "@typescript-eslint/parser": "8.22.0", + "@typescript-eslint/utils": "8.22.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -6716,21 +6820,21 @@ } }, "node_modules/vite": { - "version": "5.4.11", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.11.tgz", - "integrity": "sha512-c7jFQRklXua0mTzneGW9QVyxFjUgwcihC4bXEtujIo2ouWCe1Ajt/amn2PCxYnhYfd5k09JX3SB7OYWFKYqj8Q==", + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.0.11.tgz", + "integrity": "sha512-4VL9mQPKoHy4+FE0NnRE/kbY51TOfaknxAjt3fJbGJxhIpBZiqVzlZDEesWWsuREXHwNdAoOFZ9MkPEVXczHwg==", "dev": true, "license": "MIT", "dependencies": { - "esbuild": "^0.21.3", - "postcss": "^8.4.43", - "rollup": "^4.20.0" + "esbuild": "^0.24.2", + "postcss": "^8.4.49", + "rollup": "^4.23.0" }, "bin": { "vite": "bin/vite.js" }, "engines": { - "node": "^18.0.0 || >=20.0.0" + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" }, "funding": { "url": "https://github.com/vitejs/vite?sponsor=1" @@ -6739,19 +6843,25 @@ "fsevents": "~2.3.3" }, "peerDependencies": { - "@types/node": "^18.0.0 || >=20.0.0", + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "jiti": ">=1.21.0", "less": "*", "lightningcss": "^1.21.0", "sass": "*", "sass-embedded": "*", "stylus": "*", "sugarss": "*", - "terser": "^5.4.0" + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" }, "peerDependenciesMeta": { "@types/node": { "optional": true }, + "jiti": { + "optional": true + }, "less": { "optional": true }, @@ -6772,6 +6882,12 @@ }, "terser": { "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true } } }, @@ -6906,12 +7022,18 @@ "license": "ISC" }, "node_modules/yaml": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.7.0.tgz", + "integrity": "sha512-+hSoy/QHluxmC9kCIJyL/uyFmLmc+e5CFR5Wa+bpIhIj85LVb9ZH2nVnqrHoSvKogwODv0ClqZkmiSSaIH5LTA==", + "dev": true, "license": "ISC", + "optional": true, + "peer": true, + "bin": { + "yaml": "bin.mjs" + }, "engines": { - "node": ">= 6" + "node": ">= 14" } }, "node_modules/yocto-queue": { diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/package.json b/core/trino-web-ui/src/main/resources/webapp-preview/package.json index fbffeec2ede1..6ff4e735a41b 100644 --- a/core/trino-web-ui/src/main/resources/webapp-preview/package.json +++ b/core/trino-web-ui/src/main/resources/webapp-preview/package.json @@ -16,37 +16,39 @@ "check:clean": "npm clean-install && npm run lint && npm run prettier:check" }, "dependencies": { - "@emotion/react": "^11.13.3", - "@emotion/styled": "^11.13.0", - "@fontsource/roboto": "^5.1.0", - "@mui/icons-material": "^6.1.7", - "@mui/material": "^6.1.7", - "@mui/x-charts": "^7.22.2", - "axios": "^1.7.7", + "@emotion/react": "^11.14.0", + "@emotion/styled": "^11.14.0", + "@fontsource/roboto": "^5.1.1", + "@mui/icons-material": "^6.4.2", + "@mui/material": "^6.4.2", + "@mui/x-charts": "^7.25.0", + "axios": "^1.7.9", + "lodash": "^4.17.21", "react": "^18.3.1", "react-dom": "^18.3.1", - "react-router-dom": "^6.28.0", + "react-router-dom": "^7.1.5", "react-syntax-highlighter": "^15.6.1", - "sass": "^1.81.0", - "zustand": "^5.0.1" + "sass": "^1.83.4", + "zustand": "^5.0.3" }, "devDependencies": { "@eslint/js": "^9.14.0", "@types/eslint__js": "^8.42.3", - "@types/react": "^18.3.12", - "@types/react-dom": "^18.3.1", + "@types/lodash": "^4.17.15", + "@types/react": "^18.3.18", + "@types/react-dom": "^18.3.5", "@types/react-syntax-highlighter": "^15.5.13", "@typescript-eslint/eslint-plugin": "^8.14.0", "@typescript-eslint/parser": "^8.14.0", - "@vitejs/plugin-react": "^4.3.3", - "eslint": "^9.15.0", - "eslint-plugin-react": "^7.37.2", - "eslint-plugin-react-hooks": "^5.0.0", - "eslint-plugin-react-refresh": "^0.4.14", - "globals": "^15.12.0", - "prettier": "^3.3.3", - "typescript": "^5.6.3", - "typescript-eslint": "^8.14.0", - "vite": "^5.4.11" + "@vitejs/plugin-react": "^4.3.4", + "eslint": "^9.19.0", + "eslint-plugin-react": "^7.37.4", + "eslint-plugin-react-hooks": "^5.1.0", + "eslint-plugin-react-refresh": "^0.4.18", + "globals": "^15.14.0", + "prettier": "^3.4.2", + "typescript": "^5.7.3", + "typescript-eslint": "^8.22.0", + "vite": "^6.0.11" } } diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/api/webapp/api.ts b/core/trino-web-ui/src/main/resources/webapp-preview/src/api/webapp/api.ts index 48d20f42c2e7..5a63c5f160c2 100644 --- a/core/trino-web-ui/src/main/resources/webapp-preview/src/api/webapp/api.ts +++ b/core/trino-web-ui/src/main/resources/webapp-preview/src/api/webapp/api.ts @@ -78,6 +78,66 @@ export interface WorkerStatusInfo { uptime: string } +export interface QueryStats { + analysisTime: string + blockedDrivers: number + completedDrivers: number + createTime: string + cumulativeUserMemory: number + elapsedTime: string + endTime: string + executionTime: string + failedCpuTime: string + failedCumulativeUserMemory: number + failedScheduledTime: string + failedTasks: number + finishingTime: string + fullyBlocked: boolean + internalNetworkInputDataSize: string + peakTotalMemoryReservation: string + peakUserMemoryReservation: string + physicalInputDataSize: string + physicalInputReadTime: string + physicalWrittenDataSize: string + planningTime: string + progressPercentage: number + queuedDrivers: number + queuedTime: string + rawInputDataSize: string + rawInputPositions: number + runningDrivers: number + runningPercentage: number + spilledDataSize: string + totalCpuTime: string + totalDrivers: number + totalMemoryReservation: string + totalScheduledTime: string + userMemoryReservation: string + blockedReasons: string[] +} + +export interface QueryInfo { + clientTags: string[] + queryId: string + queryStats: QueryStats + queryTextPreview: string + queryType: string + resourceGroupId: string[] + retryPolicy: string + scheduled: boolean + self: string + sessionPrincipal: string + sessionSource: string + sessionUser: string + state: string + memoryPool: string + queryDataEncoding: string + errorType: string + errorCode: { + name: string + } +} + export async function statsApi(): Promise> { return await api.get('/ui/api/stats') } @@ -89,3 +149,7 @@ export async function workerApi(): Promise> { export async function workerStatusApi(nodeId: string): Promise> { return await api.get(`/ui/api/worker/${nodeId}/status`) } + +export async function queryApi(): Promise> { + return await api.get('/ui/api/query') +} diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/CodeBlock.tsx b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/CodeBlock.tsx index 531564a85504..6e010e3b73f9 100644 --- a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/CodeBlock.tsx +++ b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/CodeBlock.tsx @@ -13,24 +13,25 @@ */ import { Box, useMediaQuery } from '@mui/material' import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter' -import { materialLight, materialDark } from 'react-syntax-highlighter/dist/esm/styles/prism' +import { materialLight, a11yDark } from 'react-syntax-highlighter/dist/esm/styles/prism' import { Theme as ThemeStore, useConfigStore } from '../store' export interface ICodeBlockProps { code: string language: string + height?: string } export const CodeBlock = (props: ICodeBlockProps) => { const config = useConfigStore() - const { code, language } = props + const { code, language, height } = props const prefersDarkMode = useMediaQuery('(prefers-color-scheme: dark)') const styleToUse = () => { if (config.theme === ThemeStore.Auto) { - return prefersDarkMode ? materialDark : materialLight + return prefersDarkMode ? a11yDark : materialLight } else if (config.theme === ThemeStore.Dark) { - return materialDark + return a11yDark } else { return materialLight } @@ -38,19 +39,32 @@ export const CodeBlock = (props: ICodeBlockProps) => { return ( ({ padding: 0, borderRadius: 0, - backgroundColor: '#f5f5f5', - overflow: 'auto', - maxHeight: '400px', - border: '1px solid #ddd', - }} + border: `1px solid ${theme.palette.mode === 'dark' ? '#3f3f3f' : '#ddd'}`, + borderBottom: 'none', + width: '100%', + height: { + xs: '100%', + lg: height, + }, + })} > {code} diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/Dashboard.tsx b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/Dashboard.tsx index 701c287b4274..da8bdc07de0c 100644 --- a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/Dashboard.tsx +++ b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/Dashboard.tsx @@ -13,8 +13,9 @@ */ import { useEffect, useState } from 'react' import Typography from '@mui/material/Typography' -import { Box, Grid2 as Grid } from '@mui/material' +import { Box, Divider, Grid2 as Grid } from '@mui/material' import { MetricCard } from './MetricCard.tsx' +import { QueryList } from './QueryList.tsx' import { useSnackbar } from './SnackbarContext.ts' import { ApiResponse } from '../api/base.ts' import { statsApi, Stats } from '../api/webapp/api.ts' @@ -69,9 +70,11 @@ export const Dashboard = () => { const [error, setError] = useState(null) useEffect(() => { - getClusterStats() - const intervalId = setInterval(getClusterStats, 1000) - return () => clearInterval(intervalId) + const runLoop = () => { + getClusterStats() + setTimeout(runLoop, 1000) + } + runLoop() // eslint-disable-next-line react-hooks/exhaustive-deps }, []) @@ -89,7 +92,7 @@ export const Dashboard = () => { setClusterStats((prevClusterStats) => { let newRowInputRate: number[] = initialFilledHistory let newByteInputRate: number[] = initialFilledHistory - let newPerWorkerCpuTimeRate: number[] = [] + let newPerWorkerCpuTimeRate: number[] = initialFilledHistory if (prevClusterStats.lastRefresh !== null) { const rowsInputSinceRefresh = newClusterStats.totalInputRows - prevClusterStats.lastInputRows const bytesInputSinceRefresh = newClusterStats.totalInputBytes - prevClusterStats.lastInputBytes @@ -97,15 +100,15 @@ export const Dashboard = () => { const secsSinceRefresh = (Date.now() - prevClusterStats.lastRefresh) / 1000.0 newRowInputRate = addExponentiallyWeightedToHistory( - rowsInputSinceRefresh / secsSinceRefresh, + rowsInputSinceRefresh / (secsSinceRefresh || 1), prevClusterStats.rowInputRate ) newByteInputRate = addExponentiallyWeightedToHistory( - bytesInputSinceRefresh / secsSinceRefresh, + bytesInputSinceRefresh / (secsSinceRefresh || 1), prevClusterStats.byteInputRate ) newPerWorkerCpuTimeRate = addExponentiallyWeightedToHistory( - cpuTimeSinceRefresh / newClusterStats.activeWorkers / secsSinceRefresh, + cpuTimeSinceRefresh / (newClusterStats.activeWorkers || 1) / (secsSinceRefresh || 1), prevClusterStats.perWorkerCpuTimeRate ) } @@ -151,7 +154,7 @@ export const Dashboard = () => { Cluster Overview - + <> @@ -184,7 +187,7 @@ export const Dashboard = () => { @@ -197,7 +200,13 @@ export const Dashboard = () => { /> - + + + Query Details + + + + ) } diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/DebouncedTextField.tsx b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/DebouncedTextField.tsx new file mode 100644 index 000000000000..924e6c13b863 --- /dev/null +++ b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/DebouncedTextField.tsx @@ -0,0 +1,45 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import React, { useState, useEffect, useMemo, forwardRef } from 'react' +import TextField, { TextFieldProps } from '@mui/material/TextField' +import { debounce } from 'lodash' + +type DebouncedTextFieldProps = TextFieldProps & { + debounceTime?: number +} + +export const DebouncedTextField = forwardRef( + ({ onChange, debounceTime = 500, value: propValue, ...props }, ref) => { + const [value, setValue] = useState((propValue as string) ?? '') + + useEffect(() => { + setValue((propValue as string) ?? '') + }, [propValue]) + + const debouncedChangeHandler = useMemo(() => { + return debounce((event: React.ChangeEvent) => { + if (onChange) { + onChange(event) + } + }, debounceTime) + }, [debounceTime, onChange]) + + const handleChange = (event: React.ChangeEvent) => { + setValue(event.target.value) + debouncedChangeHandler(event) + } + + return + } +) diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/LinearProgressWithLabel.tsx b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/LinearProgressWithLabel.tsx new file mode 100644 index 000000000000..1a4b20950833 --- /dev/null +++ b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/LinearProgressWithLabel.tsx @@ -0,0 +1,44 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { Box, LinearProgress, Typography } from '@mui/material' +import Chip, { ChipProps } from '@mui/material/Chip' + +export interface LinearProgressWithLabelProps { + value: number + title: string + color?: ChipProps['color'] +} + +export const LinearProgressWithLabel = (props: LinearProgressWithLabelProps) => { + const { value, title, color } = props + + return ( + + + + + <> + + + + + {`${Math.round(props.value)}%`} + + + + ) +} diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/MetricCard.tsx b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/MetricCard.tsx index 9f9278038a6a..7e0c73b52252 100644 --- a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/MetricCard.tsx +++ b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/MetricCard.tsx @@ -12,10 +12,7 @@ * limitations under the License. */ import { Link as RouterLink } from 'react-router-dom' -import Card from '@mui/material/Card' -import CardContent from '@mui/material/CardContent' -import Typography from '@mui/material/Typography' -import { Grid2 as Grid } from '@mui/material' +import { Card, CardActionArea, CardContent, Grid2 as Grid, Typography } from '@mui/material' import { SparkLineChart } from '@mui/x-charts/SparkLineChart' import { styled } from '@mui/material/styles' @@ -37,50 +34,41 @@ const StyledLink = styled(RouterLink)(({ theme }) => ({ export const MetricCard = (props: IMetricCardProps) => { const { title, values, numberFormatter, link } = props const lastValue = values[values.length - 1] + const maxValue = Math.max(...values, 1) return ( - - - {link ? ( - - - {title} - - - ) : ( - - {title} - - )} - - - - + + + + + + {link ? ( + + {title} + + ) : ( + + {title} + + )} + + + {numberFormatter ? numberFormatter(lastValue) : lastValue} - - - + { /> - - + + ) } diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/QueryList.tsx b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/QueryList.tsx new file mode 100644 index 000000000000..0c069061c30d --- /dev/null +++ b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/QueryList.tsx @@ -0,0 +1,526 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { useEffect, useState } from 'react' +import { + Alert, + Box, + Checkbox, + CircularProgress, + Divider, + FormControl, + Grid2 as Grid, + MenuItem, + InputLabel, + ListItemText, + Select, + Stack, + Typography, +} from '@mui/material' +import { DebouncedTextField } from './DebouncedTextField.tsx' +import { QueryListItem } from './QueryListItem.tsx' +import { ApiResponse } from '../api/base.ts' +import { Texts } from '../constant.ts' +import { QueryInfo, queryApi } from '../api/webapp/api.ts' +import { getHumanReadableState, parseDataSize, parseDuration } from '../utils/utils.ts' + +const STATE_TYPE = { + RUNNING: (queryInfo: QueryInfo) => + !(queryInfo.state === 'QUEUED' || queryInfo.state === 'FINISHED' || queryInfo.state === 'FAILED'), + QUEUED: (queryInfo: QueryInfo) => queryInfo.state === 'QUEUED', + FINISHED: (queryInfo: QueryInfo) => queryInfo.state === 'FINISHED', +} as const + +const ERROR_TYPE = { + USER_ERROR: (queryInfo: QueryInfo) => queryInfo.state === 'FAILED' && queryInfo.errorType === 'USER_ERROR', + INTERNAL_ERROR: (queryInfo: QueryInfo) => queryInfo.state === 'FAILED' && queryInfo.errorType === 'INTERNAL_ERROR', + INSUFFICIENT_RESOURCES: (queryInfo: QueryInfo) => + queryInfo.state === 'FAILED' && queryInfo.errorType === 'INSUFFICIENT_RESOURCES', + EXTERNAL: (queryInfo: QueryInfo) => queryInfo.state === 'FAILED' && queryInfo.errorType === 'EXTERNAL', +} as const + +const SORT_TYPE = { + CREATED: (queryInfo: QueryInfo) => Date.parse(queryInfo.queryStats.createTime), + ELAPSED: (queryInfo: QueryInfo) => parseDuration(queryInfo.queryStats.elapsedTime), + EXECUTION: (queryInfo: QueryInfo) => parseDuration(queryInfo.queryStats.executionTime), + CPU: (queryInfo: QueryInfo) => parseDuration(queryInfo.queryStats.totalCpuTime), + CUMULATIVE_MEMORY: (queryInfo: QueryInfo) => queryInfo.queryStats.cumulativeUserMemory, + CURRENT_MEMORY: (queryInfo: QueryInfo) => parseDataSize(queryInfo.queryStats.userMemoryReservation), +} as const + +const SORT_ORDER = { + ASCENDING: (value: string | number) => value, + DESCENDING: (value: string | number) => (typeof value === 'number' ? -value : value), +} as const + +type StateTypeKeys = keyof typeof STATE_TYPE +type ErrorTypeKeys = keyof typeof ERROR_TYPE +type SortTypeKeys = keyof typeof SORT_TYPE +type SortOrderKeys = keyof typeof SORT_ORDER + +function useLocalStorageState< + T extends string | number | StateTypeKeys[] | ErrorTypeKeys[] | SortTypeKeys[] | SortOrderKeys[], +>(key: string, defaultValue: T) { + const [state, setState] = useState(() => { + const storedValue = localStorage.getItem(key) + return storedValue !== null ? (JSON.parse(storedValue) as T) : defaultValue + }) + + useEffect(() => { + localStorage.setItem(key, JSON.stringify(state)) + }, [key, state]) + + return [state, setState] as const +} + +export const QueryList = () => { + const [allQueries, setAllQueries] = useState([]) + const [displayedQueries, setDisplayedQueries] = useState([]) + const [searchString, setSearchString] = useLocalStorageState('searchString', '' as string) + const [stateFilters, setStateFilters] = useLocalStorageState('stateFilters', [ + 'RUNNING', + 'QUEUED', + ] as (keyof typeof STATE_TYPE)[]) + const [errorTypeFilters, setErrorTypeFilters] = useLocalStorageState('errorTypeFilters', [ + 'INTERNAL_ERROR', + 'INSUFFICIENT_RESOURCES', + 'EXTERNAL', + ] as (keyof typeof ERROR_TYPE)[]) + const [sortType, setSortType] = useLocalStorageState('sortType', 'CREATED' as keyof typeof SORT_TYPE) + const [sortOrder, setSortOrder] = useLocalStorageState('sortOrder', 'DESCENDING' as keyof typeof SORT_ORDER) + const [reorderInterval, setReorderInterval] = useLocalStorageState('reorderInterval', 5000 as number) + const [maxQueries, setMaxQueries] = useLocalStorageState('maxQueries', 100 as number) + const [lastReorder, setLastReorder] = useState(Date.now()) + const [loading, setLoading] = useState(true) + const [error, setError] = useState(null) + + useEffect(() => { + let timeoutId: number + const runLoop = () => { + getQueryListStatus() + timeoutId = setTimeout(runLoop, 1000) + } + runLoop() + + return () => clearTimeout(timeoutId) + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [searchString, stateFilters, errorTypeFilters, sortType, sortOrder, reorderInterval, maxQueries]) + + const sortQueries = ( + incomingQueries: QueryInfo[], + incomingSortType: keyof typeof SORT_TYPE, + incomingSortOrder: keyof typeof SORT_ORDER + ) => { + incomingQueries.sort((queryA, queryB) => { + const valueA = SORT_TYPE[incomingSortType](queryA) + const valueB = SORT_TYPE[incomingSortType](queryB) + + if (typeof valueA === 'number' && typeof valueB === 'number') { + // @ts-expect-error TODO fix it without using any + return SORT_ORDER[incomingSortOrder](valueA) - SORT_ORDER[incomingSortOrder](valueB) + } + + return 0 + }) + } + + const sortAndLimitQueries = ( + incomingQueries: QueryInfo[], + incomingSortType: keyof typeof SORT_TYPE, + incomingSortOrder: keyof typeof SORT_ORDER, + incomingMaxQueries: number + ) => { + sortQueries(incomingQueries, incomingSortType, incomingSortOrder) + if (incomingQueries.length > incomingMaxQueries) { + incomingQueries.splice(incomingMaxQueries, incomingQueries.length - incomingMaxQueries) + } + } + + const filterQueries = ( + incomingQueries: QueryInfo[], + incomingStateFilters: (keyof typeof STATE_TYPE)[], + incomingErrorTypeFilters: (keyof typeof ERROR_TYPE)[], + incomingSearchString: string + ) => { + const stateFilteredQueries = incomingQueries.filter( + (query) => + incomingStateFilters.some((filter) => STATE_TYPE[filter](query)) || + incomingErrorTypeFilters.some((filter) => ERROR_TYPE[filter](query)) + ) + + return incomingSearchString === '' + ? stateFilteredQueries + : stateFilteredQueries.filter((query) => { + const term = incomingSearchString.toLowerCase() + return [ + query.queryId, + getHumanReadableState(query), + query.queryTextPreview, + query.sessionUser, + query.sessionSource, + query.resourceGroupId?.join('.'), + query.errorCode?.name, + ...(query.clientTags || []), + ].some((value) => value?.toLowerCase().includes(term)) + }) + } + + const getQueryListStatus = () => { + setError(null) + queryApi().then((apiResponse: ApiResponse) => { + setLoading(false) + if (apiResponse.status === 200 && apiResponse.data) { + const queriesList = apiResponse.data + + const queryMap = queriesList.reduce((map: Record, queryInfo: QueryInfo) => { + map[queryInfo.queryId] = queryInfo + return map + }, {}) + + let updatedQueries: QueryInfo[] = [] + displayedQueries.forEach((oldQuery: QueryInfo) => { + if (oldQuery.queryId in queryMap) { + updatedQueries.push(queryMap[oldQuery.queryId]) + delete queryMap[oldQuery.queryId] + } + }) + + let newQueries: QueryInfo[] = [] + for (const queryId in queryMap) { + if (queryMap[queryId]) { + newQueries.push(queryMap[queryId]) + } + } + + newQueries = filterQueries(newQueries, stateFilters, errorTypeFilters, searchString) + + const now: number = Date.now() + + if (reorderInterval !== 0 && now - lastReorder > reorderInterval) { + updatedQueries = filterQueries(updatedQueries, stateFilters, errorTypeFilters, searchString) + updatedQueries = updatedQueries.concat(newQueries) + sortQueries(updatedQueries, sortType, sortOrder) + setLastReorder(now) + } else { + sortQueries(newQueries, sortType, sortOrder) + updatedQueries = updatedQueries.concat(newQueries) + } + + if (maxQueries !== 0 && updatedQueries.length > maxQueries) { + updatedQueries.splice(maxQueries, updatedQueries.length - maxQueries) + } + + setAllQueries(queriesList) + setDisplayedQueries(updatedQueries) + } else { + setError(`${Texts.Error.Communication} ${apiResponse.status}: ${apiResponse.message}`) + } + }) + } + + const smallFormControlSx = { + fontSize: '0.8rem', + } + + const smallDropdownMenuPropsSx = { + PaperProps: { + sx: { + '& .MuiMenuItem-root': smallFormControlSx, + }, + }, + } + + const renderSearchStringTextField = () => { + const handleChange = (newSearchString: string) => { + setTimeout(() => { + const newDisplayedQueries = filterQueries(allQueries, stateFilters, errorTypeFilters, newSearchString) + sortAndLimitQueries(newDisplayedQueries, sortType, sortOrder, maxQueries) + + setSearchString(newSearchString) + setDisplayedQueries(newDisplayedQueries) + }) + } + + return ( + handleChange(event.target.value)} + debounceTime={500} + fullWidth + /> + ) + } + const renderStateTypeSelectItem = (newStateType: keyof typeof STATE_TYPE, filterText: string) => { + const handleClick = () => { + setTimeout(() => { + const newStateFilters = stateFilters.slice() + if (stateFilters.includes(newStateType)) { + newStateFilters.splice(newStateFilters.indexOf(newStateType), 1) + } else { + newStateFilters.push(newStateType) + } + const newDisplayedQueries = filterQueries(allQueries, newStateFilters, errorTypeFilters, searchString) + sortAndLimitQueries(newDisplayedQueries, sortType, sortOrder, maxQueries) + + setStateFilters(newStateFilters) + setDisplayedQueries(newDisplayedQueries) + }) + } + + return ( + + + {filterText}} /> + + ) + } + + const renderErrorTypeSelectItem = (newErrorType: keyof typeof ERROR_TYPE, errorTypeText: string) => { + const handleClick = () => { + setTimeout(() => { + const newErrorTypeFilters = errorTypeFilters.slice() + if (errorTypeFilters.includes(newErrorType)) { + newErrorTypeFilters.splice(newErrorTypeFilters.indexOf(newErrorType), 1) + } else { + newErrorTypeFilters.push(newErrorType) + } + const newDisplayedQueries = filterQueries(allQueries, stateFilters, newErrorTypeFilters, searchString) + sortAndLimitQueries(newDisplayedQueries, sortType, sortOrder, maxQueries) + + setErrorTypeFilters(newErrorTypeFilters) + setDisplayedQueries(newDisplayedQueries) + }) + } + + return ( + + + Failed - {errorTypeText}} /> + + ) + } + + const renderSortTypeSelectItem = (newSortType: keyof typeof SORT_TYPE, text: string) => { + const handleClick = () => { + setTimeout(() => { + const newDisplayedQueries = filterQueries(allQueries, stateFilters, errorTypeFilters, searchString) + sortAndLimitQueries(newDisplayedQueries, newSortType, sortOrder, maxQueries) + + setSortType(newSortType) + setDisplayedQueries(newDisplayedQueries) + }) + } + + return ( + + {text} + + ) + } + + const renderSortOrderSelectItem = (newSortOrder: keyof typeof SORT_ORDER, text: string) => { + const handleClick = () => { + setTimeout(() => { + const newDisplayedQueries = filterQueries(allQueries, stateFilters, errorTypeFilters, searchString) + sortAndLimitQueries(newDisplayedQueries, sortType, newSortOrder, maxQueries) + + setSortOrder(newSortOrder) + setDisplayedQueries(newDisplayedQueries) + }) + } + + return ( + + {text} + + ) + } + + const renderReorderIntervalSelectItem = (newReorderInterval: number, text: string) => { + const handleClick = () => { + setReorderInterval(newReorderInterval) + } + + return ( + + {text} + + ) + } + + const renderMaxQueriesSelectItem = (newMaxQueries: number, text: string) => { + const handleClick = () => { + setTimeout(() => { + const newDisplayedQueries = filterQueries(allQueries, stateFilters, errorTypeFilters, searchString) + sortAndLimitQueries(newDisplayedQueries, sortType, sortOrder, newMaxQueries) + + setMaxQueries(newMaxQueries) + setDisplayedQueries(newDisplayedQueries) + }) + } + + return ( + + {text} + + ) + } + + if (loading || error) { + return ( + + {loading ? ( + + ) : ( + error && ( + + {Texts.Error.QueryListNotLoaded} - {error} + + ) + )} + + ) + } + + return ( + <> + + + {renderSearchStringTextField()} + + + + + {Texts.QueryList.Filter.State} + + + + {Texts.QueryList.Filter.SortBy} + + + + {Texts.QueryList.Filter.Ordering} + + + + {Texts.QueryList.Filter.ReorderInterval} + + + + {Texts.QueryList.Filter.Limit} + + + + + + + {displayedQueries.length > 0 ? ( + displayedQueries.map((queryInfo: QueryInfo) => ( + + + + + )) + ) : ( + + {allQueries.length === 0 ? Texts.QueryList.NoQueries : Texts.QueryList.NoMatchedFilterQueries} + + )} + + + ) +} diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/QueryListItem.tsx b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/QueryListItem.tsx new file mode 100644 index 000000000000..3d711d93f97f --- /dev/null +++ b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/QueryListItem.tsx @@ -0,0 +1,355 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import React from 'react' +import { Box, Grid2 as Grid, Stack, Tooltip, Typography } from '@mui/material' +import AvTimerIcon from '@mui/icons-material/AvTimer' +import BadgeIcon from '@mui/icons-material/Badge' +import BrokenImageIcon from '@mui/icons-material/BrokenImage' +import CheckCircleIcon from '@mui/icons-material/CheckCircle' +import DevicesIcon from '@mui/icons-material/Devices' +import DownloadingIcon from '@mui/icons-material/Downloading' +import FunctionsIcon from '@mui/icons-material/Functions' +import GroupsIcon from '@mui/icons-material/Groups' +import HighlightOff from '@mui/icons-material/HighlightOff' +import HistoryToggleOffIcon from '@mui/icons-material/HistoryToggleOff' +import Memory from '@mui/icons-material/Memory' +import NotStartedIcon from '@mui/icons-material/NotStarted' +import PlayCircleIcon from '@mui/icons-material/PlayCircle' +import QueryBuilderIcon from '@mui/icons-material/QueryBuilder' +import { CodeBlock } from './CodeBlock.tsx' +import { LinearProgressWithLabel, LinearProgressWithLabelProps } from './LinearProgressWithLabel.tsx' +import { QueryInfo } from '../api/webapp/api.ts' +import { + formatDataSizeBytes, + formatShortTime, + getHumanReadableState, + parseAndFormatDataSize, + truncateString, +} from '../utils/utils.ts' + +interface IQueryListItemProps { + queryInfo: QueryInfo +} + +export const QueryListItem = (props: IQueryListItemProps) => { + const { queryInfo } = props + + const STATE_COLOR_MAP: Record = { + QUEUED: 'default', + RUNNING: 'info', + PLANNING: 'info', + FINISHED: 'success', + BLOCKED: 'secondary', + USER_ERROR: 'error', + CANCELED: 'warning', + INSUFFICIENT_RESOURCES: 'error', + EXTERNAL_ERROR: 'error', + UNKNOWN_ERROR: 'error', + } + + const getQueryStateColor = (query: QueryInfo): LinearProgressWithLabelProps['color'] => { + switch (query.state) { + case 'QUEUED': + return STATE_COLOR_MAP.QUEUED + case 'PLANNING': + return STATE_COLOR_MAP.PLANNING + case 'STARTING': + case 'FINISHING': + case 'RUNNING': + if (query.queryStats && query.queryStats.fullyBlocked) { + return STATE_COLOR_MAP.BLOCKED + } + return STATE_COLOR_MAP.RUNNING + case 'FAILED': + switch (query.errorType) { + case 'USER_ERROR': + if (query.errorCode.name === 'USER_CANCELED') { + return STATE_COLOR_MAP.CANCELED + } + return STATE_COLOR_MAP.USER_ERROR + case 'EXTERNAL': + return STATE_COLOR_MAP.EXTERNAL_ERROR + case 'INSUFFICIENT_RESOURCES': + return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES + default: + return STATE_COLOR_MAP.UNKNOWN_ERROR + } + case 'FINISHED': + return STATE_COLOR_MAP.FINISHED + default: + return STATE_COLOR_MAP.QUEUED + } + } + + const getProgressBarPercentage = (queryInfo: QueryInfo) => { + if (queryInfo.state !== 'RUNNING') { + return 100 + } + + const progress = queryInfo.queryStats.progressPercentage || 0 + return Math.round(progress) + } + + const getProgressBarTitle = (queryInfo: QueryInfo) => { + return getHumanReadableState(queryInfo) + } + + const stripQueryTextWhitespace = (queryText: string) => { + const maxLines = 6 + const lines = queryText.split('\n') + let minLeadingWhitespace = -1 + for (let i = 0; i < lines.length; i++) { + if (minLeadingWhitespace === 0) { + break + } + + if (lines[i].trim().length === 0) { + continue + } + + const leadingWhitespace = lines[i].search(/\S/) + + if (leadingWhitespace > -1 && (leadingWhitespace < minLeadingWhitespace || minLeadingWhitespace === -1)) { + minLeadingWhitespace = leadingWhitespace + } + } + + let formattedQueryText = '' + + for (let i = 0; i < lines.length; i++) { + const trimmedLine = lines[i].substring(minLeadingWhitespace).replace(/\s+$/g, '') + + if (trimmedLine.length > 0) { + formattedQueryText += trimmedLine + if (i < maxLines - 1) { + formattedQueryText += '\n' + } else { + formattedQueryText += '\n...' + break + } + } + } + + return formattedQueryText + } + + const renderTextWithIcon = ( + icon: React.ReactElement, + title: string, + help: string, + spacing: number = 0, + color: string = 'inherit' + ) => { + const smallIcon = React.cloneElement(icon, { fontSize: 'small', color: color }) + + return ( + + + {smallIcon} + + {title} + + + + ) + } + + return ( + + + + + + {queryInfo.queryId} + + + + + + {formatShortTime(new Date(Date.parse(queryInfo.queryStats.createTime)))} + + + + + + + {renderTextWithIcon( + , + truncateString(queryInfo.sessionUser, 35), + 'User', + 1, + 'info' + )} + + + {renderTextWithIcon( + , + truncateString(queryInfo.sessionSource, 35), + 'Source', + 1, + 'info' + )} + + + {renderTextWithIcon( + , + queryInfo.queryDataEncoding ? 'spooled ' + queryInfo.queryDataEncoding : 'non-spooled', + 'Protocol Encoding', + 1, + 'info' + )} + + + {renderTextWithIcon( + , + truncateString(queryInfo.resourceGroupId ? queryInfo.resourceGroupId.join('.') : 'n/a', 35), + 'Resource Group', + 1, + 'info' + )} + + + + + {renderTextWithIcon( + , + queryInfo.queryStats.completedDrivers.toString(), + 'Complete Splits', + 1 + )} + + + {renderTextWithIcon( + , + (queryInfo.state === 'FINISHED' || queryInfo.state === 'FAILED' + ? 0 + : queryInfo.queryStats.runningDrivers + ).toString(), + 'Running splits', + 1 + )} + + + {renderTextWithIcon( + , + (queryInfo.state === 'FINISHED' || queryInfo.state === 'FAILED' + ? 0 + : queryInfo.queryStats.queuedDrivers + ).toString(), + 'Queued splits', + 1 + )} + + {queryInfo.retryPolicy === 'TASK' && ( + + {renderTextWithIcon( + , + queryInfo.queryStats.failedTasks.toString(), + 'Failed tasks', + 1 + )} + + )} + + + + {renderTextWithIcon( + , + queryInfo.queryStats.executionTime.toString(), + 'Wall time spent executing the query (not including queued time)', + 1 + )} + + + {renderTextWithIcon( + , + queryInfo.queryStats.elapsedTime.toString(), + 'Total query wall time', + 1 + )} + + + {renderTextWithIcon( + , + queryInfo.queryStats.totalCpuTime.toString(), + 'CPU time spent by this query', + 1 + )} + + + + + {renderTextWithIcon( + , + parseAndFormatDataSize(queryInfo.queryStats.totalMemoryReservation), + 'Current total reserved memory', + 1 + )} + + + {renderTextWithIcon( + , + parseAndFormatDataSize(queryInfo.queryStats.peakTotalMemoryReservation), + 'Peak total memory', + 1 + )} + + + {renderTextWithIcon( + , + formatDataSizeBytes(queryInfo.queryStats.cumulativeUserMemory / 1000.0), + 'Cumulative user memory', + 1 + )} + + + + + + + + + + + + + + + ) +} diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/WorkerStatus.tsx b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/WorkerStatus.tsx index cf3273fcfb0a..83bd4f8eaad4 100644 --- a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/WorkerStatus.tsx +++ b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/WorkerStatus.tsx @@ -63,9 +63,11 @@ export const WorkerStatus = () => { const [error, setError] = useState(null) useEffect(() => { - getWorkerStatus() - const intervalId = setInterval(getWorkerStatus, 1000) - return () => clearInterval(intervalId) + const runLoop = () => { + getWorkerStatus() + setTimeout(runLoop, 1000) + } + runLoop() // eslint-disable-next-line react-hooks/exhaustive-deps }, []) diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/WorkersList.tsx b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/WorkersList.tsx index 85d0673c47c0..aa5840c52cc9 100644 --- a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/WorkersList.tsx +++ b/core/trino-web-ui/src/main/resources/webapp-preview/src/components/WorkersList.tsx @@ -44,9 +44,11 @@ export const WorkersList = () => { const [error, setError] = useState(null) useEffect(() => { - getWorkersList() - const intervalId = setInterval(getWorkersList, 1000) - return () => clearInterval(intervalId) + const runLoop = () => { + getWorkersList() + setTimeout(runLoop, 1000) + } + runLoop() }, []) useEffect(() => { diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/constant.ts b/core/trino-web-ui/src/main/resources/webapp-preview/src/constant.ts index 5858661cc600..4ad33f0c6370 100644 --- a/core/trino-web-ui/src/main/resources/webapp-preview/src/constant.ts +++ b/core/trino-web-ui/src/main/resources/webapp-preview/src/constant.ts @@ -43,6 +43,7 @@ export const Texts = { Forbidden: 'Forbidden', Network: 'The network has wandered off, please try again later!', NodeInformationNotLoaded: 'Node information could not be loaded', + QueryListNotLoaded: 'Query list could not be loaded', }, Menu: { Header: { @@ -58,4 +59,24 @@ export const Texts = { DemoComponents: 'Demo Components', }, }, + QueryList: { + NoQueries: 'No Queries', + NoMatchedFilterQueries: 'No queries matched filters', + Filter: { + Search: 'Search', + SearchPlaceholder: + 'User, source, query ID, query state, resource group, error name, query text or client tags', + State: 'State', + Type: { + RUNNING: 'Running', + QUEUED: 'Queued', + FINISHED: 'Finished', + }, + + SortBy: 'SortBy', + Ordering: 'Ordering', + ReorderInterval: 'Reorder Interval', + Limit: 'Limit', + }, + }, } diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/router.tsx b/core/trino-web-ui/src/main/resources/webapp-preview/src/router.tsx index e22ffd21b888..f9540f14ed96 100644 --- a/core/trino-web-ui/src/main/resources/webapp-preview/src/router.tsx +++ b/core/trino-web-ui/src/main/resources/webapp-preview/src/router.tsx @@ -15,12 +15,10 @@ import { ReactNode } from 'react' import AppsOutlinedIcon from '@mui/icons-material/AppsOutlined' import HomeOutlinedIcon from '@mui/icons-material/HomeOutlined' import DnsOutlinedIcon from '@mui/icons-material/DnsOutlined' -import HistoryOutlinedIcon from '@mui/icons-material/HistoryOutlined' import { RouteProps } from 'react-router-dom' import { Dashboard } from './components/Dashboard' import { DemoComponents } from './components/DemoComponents' import { WorkersList } from './components/WorkersList.tsx' -import { QueryHistory } from './components/QueryHistory' import { Texts } from './constant' export interface RouterItem { @@ -51,15 +49,6 @@ export const routers: RouterItems = [ element: , }, }, - { - itemKey: 'query-history', - text: Texts.Menu.Drawer.QueryHistory, - icon: , - routeProps: { - path: '/query-history', - element: , - }, - }, { itemKey: 'demo-components', text: Texts.Menu.Drawer.DemoComponents, diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/utils/utils.ts b/core/trino-web-ui/src/main/resources/webapp-preview/src/utils/utils.ts index 2f59a3df82f3..eab57ed5fb3a 100644 --- a/core/trino-web-ui/src/main/resources/webapp-preview/src/utils/utils.ts +++ b/core/trino-web-ui/src/main/resources/webapp-preview/src/utils/utils.ts @@ -11,6 +11,57 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +import { QueryInfo } from '../api/webapp/api.ts' + +export const getHumanReadableState = (queryInfo: QueryInfo) => { + if (queryInfo.state === 'RUNNING') { + let title = 'RUNNING' + + if (queryInfo.scheduled && queryInfo.queryStats.totalDrivers > 0 && queryInfo.queryStats.runningDrivers >= 0) { + if (queryInfo.queryStats.fullyBlocked) { + title = 'BLOCKED' + + if (queryInfo.queryStats.blockedReasons?.length > 0) { + title += ' (' + queryInfo.queryStats.blockedReasons.join(', ') + ')' + } + } + + if (queryInfo.memoryPool === 'reserved') { + title += ' (RESERVED)' + } + + return title + } + } + + if (queryInfo.state === 'FAILED') { + let errorMsg = '' + switch (queryInfo.errorType) { + case 'USER_ERROR': + errorMsg = 'USER ERROR' + if (queryInfo.errorCode.name === 'USER_CANCELED') { + errorMsg = 'USER CANCELED' + } + break + case 'INTERNAL_ERROR': + errorMsg = 'INTERNAL ERROR' + break + case 'INSUFFICIENT_RESOURCES': + errorMsg = 'INSUFFICIENT RESOURCES' + break + case 'EXTERNAL': + errorMsg = 'EXTERNAL ERROR' + break + } + if (queryInfo.errorCode && queryInfo.errorCode.name) { + errorMsg += ` - ${queryInfo.errorCode.name}` + } + return errorMsg + } + + return queryInfo.state +} + // Sparkline-related functions // =========================== @@ -39,6 +90,17 @@ export function addExponentiallyWeightedToHistory(value: number, valuesArray: nu return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0)) } +// Utility functions +// ================= + +export function truncateString(inputString: string, length: number): string { + if (inputString && inputString.length > length) { + return inputString.substring(0, length) + '...' + } + + return inputString +} + export function precisionRound(n: number | null): string { if (n === null) { return '' @@ -124,3 +186,72 @@ function formatDataSizeMinUnit(size: number | null, minUnit: string): string { } return precisionRound(size) + unit } + +export function parseDataSize(value: string): number | null { + const DATA_SIZE_PATTERN = /^\s*(\d+(?:\.\d+)?)\s*([a-zA-Z]+)\s*$/ + const match = DATA_SIZE_PATTERN.exec(value) + if (match === null) { + return null + } + const number = parseFloat(match[1]) + switch (match[2]) { + case 'B': + return number + case 'kB': + return number * Math.pow(2, 10) + case 'MB': + return number * Math.pow(2, 20) + case 'GB': + return number * Math.pow(2, 30) + case 'TB': + return number * Math.pow(2, 40) + case 'PB': + return number * Math.pow(2, 50) + default: + return null + } +} + +export function parseAndFormatDataSize(value: string): string { + const parsed = parseDataSize(value) + + if (parsed == null) { + return '' + } + + return formatDataSize(parsed) +} + +export function parseDuration(value: string): number | null { + const DURATION_PATTERN = /^\s*(\d+(?:\.\d+)?)\s*([a-zA-Z]+)\s*$/ + + const match = DURATION_PATTERN.exec(value) + if (match === null) { + return null + } + const number = parseFloat(match[1]) + switch (match[2]) { + case 'ns': + return number / 1000000.0 + case 'us': + return number / 1000.0 + case 'ms': + return number + case 's': + return number * 1000 + case 'm': + return number * 1000 * 60 + case 'h': + return number * 1000 * 60 * 60 + case 'd': + return number * 1000 * 60 * 60 * 24 + default: + return null + } +} + +export function formatShortTime(date: Date): string { + const hours = date.getHours() % 12 || 12 + const minutes = (date.getMinutes() < 10 ? '0' : '') + date.getMinutes() + return hours + ':' + minutes + (date.getHours() >= 12 ? 'pm' : 'am') +} diff --git a/docs/build b/docs/build index 17f35ba1de78..0260e2650a0b 100755 --- a/docs/build +++ b/docs/build @@ -13,5 +13,5 @@ docker run --security-opt label:disable --rm $OPTS -e TRINO_VERSION -u $(id -u): # Sort sitemap for reproducible builds sorted_sitemap="sitemap_${RANDOM}.xml" -docker run -v "$PWD":/docs $SPHINX_IMAGE xsltproc -o /docs/target/html/"${sorted_sitemap}" /docs/sitemap.xslt /docs/target/html/sitemap.xml +docker run --rm -v "$PWD":/docs $SPHINX_IMAGE xsltproc -o /docs/target/html/"${sorted_sitemap}" /docs/sitemap.xslt /docs/target/html/sitemap.xml mv target/html/"${sorted_sitemap}" target/html/sitemap.xml diff --git a/docs/pom.xml b/docs/pom.xml index f1e56a73a11a..8e99367bc21d 100644 --- a/docs/pom.xml +++ b/docs/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT trino-docs diff --git a/docs/src/main/sphinx/admin.md b/docs/src/main/sphinx/admin.md index cb7879475eed..7f76ccb98da9 100644 --- a/docs/src/main/sphinx/admin.md +++ b/docs/src/main/sphinx/admin.md @@ -8,9 +8,12 @@ running, and managing Trino clusters. admin/web-interface admin/preview-web-interface +admin/logging admin/tuning admin/jmx admin/opentelemetry +admin/openmetrics +admin/properties admin/spill admin/resource-groups admin/session-property-managers diff --git a/docs/src/main/sphinx/admin/fault-tolerant-execution.md b/docs/src/main/sphinx/admin/fault-tolerant-execution.md index 65f3d9c849ec..8a88306d8630 100644 --- a/docs/src/main/sphinx/admin/fault-tolerant-execution.md +++ b/docs/src/main/sphinx/admin/fault-tolerant-execution.md @@ -507,6 +507,10 @@ the property may be configured for: - Block [data size](prop-type-data-size) for HDFS storage. - `4MB` - HDFS +* - `exchange.hdfs.skip-directory-scheme-validation` + - Skip directory scheme validation to support Hadoop-compatible file system. + - false + - HDFS * - `hdfs.config.resources` - Comma-separated list of paths to HDFS configuration files, for example `/etc/hdfs-site.xml`. The files must exist on all nodes in the Trino @@ -604,6 +608,15 @@ exchange.base-directories=hadoop-master:9000/exchange-spooling-directory hdfs.config.resources=/usr/lib/hadoop/etc/hadoop/core-site.xml ``` +When you want use Hadoop-compatible file system as the spooling storage location, +you should enable `exchange.hdfs.skip-directory-scheme-validation` in `exchange-manager.properties` +when configure `exchange.base-directories` with a specific scheme instead of `hdfs` and the following steps +may be necessary. + +1. Configure the `AbstractFileSystem` implementation in `core-site.xml`. +2. Add the relevant client JAR files into the directory `${Trino_HOME}/plugin/exchange-hdfs` +on all Trino cluster nodes. + (fte-exchange-local-filesystem)= #### Local filesystem storage diff --git a/docs/src/main/sphinx/admin/logging.md b/docs/src/main/sphinx/admin/logging.md new file mode 100644 index 000000000000..d693935e9e18 --- /dev/null +++ b/docs/src/main/sphinx/admin/logging.md @@ -0,0 +1,133 @@ + +# Logging + +Trino include numerous features to better understand and monitor a running +system, such as [](/admin/opentelemetry) or [](/admin/jmx). Logging and +configuring logging is one important aspect for operating and troubleshooting +Trino. + +(logging-configuration)= +## Configuration + +Trino application logging is optional and configured in the `log.properties` +file in your Trino installation `etc` configuration directory as set by the +[launcher](running-trino). + +Use it to add specific loggers and configure the minimum log levels. Every +logger has a name, which is typically the fully qualified name of the class that +uses the logger. Loggers have a hierarchy based on the dots in the name, like +Java packages. The four log levels are `DEBUG`, `INFO`, `WARN` and `ERROR`, +sorted by decreasing verbosity. + +For example, consider the following log levels file: + +```properties +io.trino=WARN +io.trino.plugin.iceberg=DEBUG +io.trino.parquet=DEBUG +``` + +The preceding configuration sets the changes the level for all loggers in the +`io.trino` namespace to `WARN` as an update from the default `INFO` to make +logging less verbose. The example also increases logging verbosity for the +Iceberg connector using the `io.trino.plugin.iceberg` namespace, and the Parquet +file reader and writer support located in the `io.trino.parquet` namespace to +`DEBUG` for troubleshooting purposes. + +Additional loggers can include other package namespaces from libraries and +dependencies embedded within Trino or part of the Java runtime, for example: + +* `io.airlift` for the [Airlift](https://github.com/airlift/airlift) application + framework used by Trino. +* `org.eclipse.jetty` for the [Eclipse Jetty](https://jetty.org/) web server + used by Trino. +* `org.postgresql` for the [PostgresSQL JDBC driver](https://github.com/pgjdbc) + used by the PostgreSQL connector. +* `javax.net.ssl` for TLS from the Java runtime. +* `java.io` for I/O operations. + +There are numerous additional properties available to customize logging in +[](config-properties), with details documented in [](/admin/properties-logging) +and in following example sections. + +## Log output + +By default, logging output is file-based with rotated files in `var/log`: + +* `launcher.log` for logging out put from the application startup from the + [launcher](running-trino). Only used if the launcher starts Trino in the + background, and therefore not used in the Trino container. +* `http-request.log` for HTTP request logs, mostly from the [client + protocol](/client/client-protocol) and the [Web UI](/admin/web-interface). +* `server.log` for the main application log of Trino, including logging from all + plugins. + +## JSON and TCP channel logging + +Trino supports logging to JSON-formatted output files with the configuration +`log.format=json`. Optionally you can set `node.annotations-file` as path to a +properties file such as the following example: + +```properties +host_ip=1.2.3.4 +service_name=trino +node_name=${ENV:MY_NODE_NAME} +pod_name=${ENV:MY_POD_NAME} +pod_namespace=${ENV:MY_POD_NAMESPACE} +``` + +The annotations file supports environment variable substitution, so that the +above example attaches the name of the Trino node as `pod_name` and other +information to every log line. When running Trino on Kubernetes, you have access +to [a lot of information to use in the +log](https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/). + +TCP logging allows you to log to a TCP socket instead of a file with the +configuration `log.path=tcp://:`. The endpoint must be +available at the URL configured with `server_ip` and `server_port` and is +assumed to be stable. + +You can use an application such as [fluentbit](https://fluentbit.io/) as a +consumer for these JSON-formatted logs. + +Example fluentbit configuration file `config.yaml`: + +```yaml +pipeline: + inputs: + - name: tcp + tag: trino + listen: 0.0.0.0 + port: 5170 + buffer_size: 2048 + format: json + outputs: + - name: stdout + match: '*' +``` + +Start the application with the command: + +```shell +fluent-bit -c config.yaml +``` + +Use the following Trino properties configuration: + +```properties +log.path=tcp://localhost:5170 +log.format=json +node.annotation-file=etc/annotations.properties +``` + +File `etc/annotation.properties`: + +```properties +host_ip=1.2.3.4 +service_name=trino +pod_name=${ENV:HOSTNAME} +``` + +As a result, Trino logs appear as structured JSON log lines in fluentbit in the +user interface, and can also be [forwarded into a configured logging +system](https://docs.fluentbit.io/manual/pipeline/outputs). diff --git a/docs/src/main/sphinx/admin/openmetrics.md b/docs/src/main/sphinx/admin/openmetrics.md new file mode 100644 index 000000000000..8596e564ba05 --- /dev/null +++ b/docs/src/main/sphinx/admin/openmetrics.md @@ -0,0 +1,300 @@ +# Trino metrics with OpenMetrics + +Trino supports the metrics standard [OpenMetrics](https://openmetrics.io/), that +originated with the open-source systems monitoring and alerting toolkit +[Prometheus](https://prometheus.io/). + +Metrics are automatically enabled and available on the coordinator at the +`/metrics` endpoint. The endpoint is protected with the configured +[authentication](security-authentication), identical to the +[](/admin/web-interface) and the [](/client/client-protocol). + +For example, you can retrieve metrics data from an unsecured Trino server +running on `localhost:8080` with random username `example`: + +```shell +curl -H X-Trino-User:foo localhost:8080/metrics +``` + +The result follows the [OpenMetrics +specification](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md) +and looks similar to the following example output: + +``` +# TYPE io_airlift_http_client_type_HttpClient_name_ForDiscoveryClient_CurrentResponseProcessTime_Min gauge +io_airlift_http_client_type_HttpClient_name_ForDiscoveryClient_CurrentResponseProcessTime_Min NaN +# TYPE io_airlift_http_client_type_HttpClient_name_ForDiscoveryClient_CurrentResponseProcessTime_P25 gauge +io_airlift_http_client_type_HttpClient_name_ForDiscoveryClient_CurrentResponseProcessTime_P25 NaN +# TYPE io_airlift_http_client_type_HttpClient_name_ForDiscoveryClient_CurrentResponseProcessTime_Total gauge +io_airlift_http_client_type_HttpClient_name_ForDiscoveryClient_CurrentResponseProcessTime_Total 0.0 +# TYPE io_airlift_http_client_type_HttpClient_name_ForDiscoveryClient_CurrentResponseProcessTime_P90 gauge +io_airlift_http_client_type_HttpClient_name_ForDiscoveryClient_CurrentResponseProcessTime_P90 NaN +``` + +The same data is available when using a browser, and logging manually. + +The user, `foo` in the example, must have read permission to system information +on a secured deployment, and the URL and port must be adjusted accordingly. + +Each Trino node, so the coordinator and all workers, provide separate metrics +independently. + +Use the property `openmetrics.jmx-object-names` in [](config-properties) to +define the JMX object names to include when retrieving all metrics. Multiple +object names are must be separated with `|`. Metrics use the package namespace +for any metric. Use `:*` to expose all metrics. Use `name` to select specific +classes or `type` for specific metric types. + +Examples: + +* `trino.plugin.exchange.filesystem:name=FileSystemExchangeStats` for metrics + from the `FileSystemExchangeStats` class in the + `trino.plugin.exchange.filesystem` package. +* `trino.plugin.exchange.filesystem.s3:name=S3FileSystemExchangeStorageStats` + for metrics from the `S3FileSystemExchangeStorageStats` class in the + `trino.plugin.exchange.filesystem.s3` package. +* `io.trino.hdfs:*` for all metrics in the `io.trino.hdfs` package. +* `java.lang:type=Memory` for all memory metrics in the `java.lang` package. + +Typically, Prometheus or a similar application is configured to monitor the +endpoint. The same application can then be used to inspect the metrics data. + +Trino also includes a [](/connector/prometheus) that allows you to query +Prometheus data using SQL. + +## Examples + +The following sections provide tips and tricks for your usage with small +examples. + +Other configurations with tools such as +[grafana-agent](https://grafana.com/docs/agent/latest/) or [grafana alloy +opentelemetry agent](https://grafana.com/docs/alloy/latest/) are also possible, +and can use platforms such as [Cortex](https://cortexmetrics.io/) or [Grafana +Mimir](https://grafana.com/oss/mimir/mimir) for metrics storage and related +monitoring and analysis. + +### Simple example with Docker and Prometheus + +The following steps provide a simple demo setup to run +[Prometheus](https://prometheus.io/) and Trino locally in Docker containers. + +Create a shared network for both servers called `platform`: + +```shell +docker network create platform +``` + +Start Trino in the background: + +```shell +docker run -d \ + --name=trino \ + --network=platform \ + --network-alias=trino \ + -p 8080:8080 \ + trinodb/trino:latest +``` + +The preceding command starts Trino and adds it to the `platform` network with +the hostname `trino`. + +Create a `prometheus.yml` configuration file with the following content, that +point Prometheus at the `trino` hostname: + +```yaml +scrape_configs: +- job_name: trino + basic_auth: + username: trino-user + static_configs: + - targets: + - trino:8080 +``` + +Start Prometheus from the same directory as the configuration file: + +```shell +docker run -d \ + --name=prometheus \ + --network=platform \ + -p 9090:9090 \ + --mount type=bind,source=$PWD/prometheus.yml,target=/etc/prometheus/prometheus.yml \ + prom/prometheus +``` + +The preceding command adds Prometheus to the `platform` network. It also mounts +the configuration file into the container so that metrics from Trino are +gathered by Prometheus. + +Now everything is running. + +Install and run the [Trino CLI](/client/cli) or any other client application and +submit a query such as `SHOW CATALOGS;` or `SELECT * FROM tpch.tiny.nation;`. + +Optionally, log into the [Trino Web UI](/admin/web-interface) at +[http://localhost:8080](http://localhost:8080) with a random username. Press +the **Finished** button and inspect the details for the completed queries. + +Access the Prometheus UI at [http://localhost:9090/](http://localhost:9090/), +select **Status** > **Targets** and see the configured endpoint for Trino +metrics. + +To see an example graph, select **Graph**, add the metric name +`trino_execution_name_QueryManager_RunningQueries` in the input field and press +**Execute**. Press **Table** for the raw data or **Graph** for a visualization. + +As a next step, run more queries and inspect the effect on the metrics. + +Once you are done you can stop the containers: + +```shell +docker stop prometheus +docker stop trino +``` + +You can start them again for further testing: + +```shell +docker start trino +docker start prometheus +``` + +Use the following commands to completely remove the network and containers: + +```shell +docker rm trino +docker rm prometheus +docker network rm platform +``` + +## Coordinator and worker metrics with Kubernetes + +To get a complete picture of the metrics on your cluster, you must access the +coordinator and the worker metrics. This section details tips for setting up for +this scenario with the [Trino Helm chart](https://github.com/trinodb/charts) on +Kubernetes. + +Add an annotation to flag all cluster nodes for scraping in your values for the +Trino Helm chart: + +```yaml +coordinator: + annotations: + prometheus.io/trino_scrape: "true" +worker: + annotations: + prometheus.io/trino_scrape: "true" +``` + +Configure metrics retrieval from the workers in your Prometheus configuration: + +```yaml + - job_name: trino-metrics-worker + scrape_interval: 10s + scrape_timeout: 10s + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_trino_scrape] + action: keep # scrape only pods with the trino scrape anotation + regex: true + - source_labels: [__meta_kubernetes_pod_container_name] + action: keep # dont try to scrape non trino container + regex: trino-worker + - action: hashmod + modulus: $(SHARDS) + source_labels: + - __address__ + target_label: __tmp_hash + - action: keep + regex: $(SHARD) + source_labels: + - __tmp_hash + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: container + metric_relabel_configs: + - source_labels: [__name__] + regex: ".+_FifteenMinute.+|.+_FiveMinute.+|.+IterativeOptimizer.+|.*io_airlift_http_client_type_HttpClient.+" + action: drop # droping some highly granular metrics + - source_labels: [__meta_kubernetes_pod_name] + regex: ".+" + target_label: pod + action: replace + - source_labels: [__meta_kubernetes_pod_container_name] + regex: ".+" + target_label: container + action: replace + + scheme: http + tls_config: + insecure_skip_verify: true + basic_auth: + username: mysuer # replace with a user with system information permission + # DO NOT ADD PASSWORD +``` + +The worker authentication uses a user with access to the system information, yet +does not add a password and uses access via HTTP. + +Configure metrics retrieval from the coordinator in your Prometheus +configuration: + +```yaml + - job_name: trino-metrics-coordinator + scrape_interval: 10s + scrape_timeout: 10s + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_trino_scrape] + action: keep # scrape only pods with the trino scrape anotation + regex: true + - source_labels: [__meta_kubernetes_pod_container_name] + action: keep # dont try to scrape non trino container + regex: trino-coordinator + - action: hashmod + modulus: $(SHARDS) + source_labels: + - __address__ + target_label: __tmp_hash + - action: keep + regex: $(SHARD) + source_labels: + - __tmp_hash + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_container_name] + action: replace + target_label: container + - action: replace # overide the address to the https ingress address + target_label: __address__ + replacement: {{ .Values.trinourl }} + metric_relabel_configs: + - source_labels: [__name__] + regex: ".+_FifteenMinute.+|.+_FiveMinute.+|.+IterativeOptimizer.+|.*io_airlift_http_client_type_HttpClient.+" + action: drop # droping some highly granular metrics + - source_labels: [__meta_kubernetes_pod_name] + regex: ".+" + target_label: pod + action: replace + - source_labels: [__meta_kubernetes_pod_container_name] + regex: ".+" + target_label: container + action: replace + + scheme: https + tls_config: + insecure_skip_verify: true + basic_auth: + username: mysuer # replace with a user with system information permission + password_file: /some/password/file +``` + +The coordinator authentication uses a user with access to the system information +and requires authentication and access via HTTPS. diff --git a/docs/src/main/sphinx/admin/properties-client-protocol.md b/docs/src/main/sphinx/admin/properties-client-protocol.md index eca1a57bb06f..1f27833807b2 100644 --- a/docs/src/main/sphinx/admin/properties-client-protocol.md +++ b/docs/src/main/sphinx/admin/properties-client-protocol.md @@ -23,7 +23,11 @@ used automatically. - **Type:** [](prop-type-string) A required 256 bit, base64-encoded secret key used to secure spooled metadata -exchanged with the client. +exchanged with the client. Create a suitable value with the following command: + +```shell +openssl rand -base64 32 +``` ### `protocol.spooling.retrieval-mode` diff --git a/docs/src/main/sphinx/admin/properties-exchange.md b/docs/src/main/sphinx/admin/properties-exchange.md index 03f85e11724e..c49c3c0a158b 100644 --- a/docs/src/main/sphinx/admin/properties-exchange.md +++ b/docs/src/main/sphinx/admin/properties-exchange.md @@ -39,7 +39,7 @@ network utilization. - **Type:** {ref}`prop-type-string` - **Allowed values:** `NONE`, `LZ4`, `ZSTD` -- **Default value:** `LZ4` +- **Default value:** `NONE` The compression codec to use for [](file-compression) when exchanging data between nodes and the exchange storage with [](/admin/fault-tolerant-execution) diff --git a/docs/src/main/sphinx/client/client-protocol.md b/docs/src/main/sphinx/client/client-protocol.md index 793cf7ae0b7a..4091c5c954f0 100644 --- a/docs/src/main/sphinx/client/client-protocol.md +++ b/docs/src/main/sphinx/client/client-protocol.md @@ -17,6 +17,9 @@ The protocol is a sequence of REST API calls to the 6. The client and coordinator continue with steps 4. and 5. until all result set data is returned to the client or the client stops requesting more data. +7. If the client fails to fetch the result set, the coordinator does not initiate + further processing, fails the query, and returns a `USER_CANCELED` error. +8. The final response when the query is complete is `FINISHED`. The client protocol supports two modes. Configure the [spooling protocol](protocol-spooling) for optimal throughput for your clients. diff --git a/docs/src/main/sphinx/connector.md b/docs/src/main/sphinx/connector.md index c7e7a3b0402e..9c6259eb632f 100644 --- a/docs/src/main/sphinx/connector.md +++ b/docs/src/main/sphinx/connector.md @@ -26,6 +26,7 @@ Ignite JMX Kafka Kudu +Loki MariaDB Memory MongoDB diff --git a/docs/src/main/sphinx/connector/bigquery.md b/docs/src/main/sphinx/connector/bigquery.md index 11283e90bdb2..0fe31ba4c165 100644 --- a/docs/src/main/sphinx/connector/bigquery.md +++ b/docs/src/main/sphinx/connector/bigquery.md @@ -86,12 +86,12 @@ create the two catalogs, `sales` and `analytics` respectively. ### Arrow serialization support This is a feature which introduces support for using Apache Arrow -as the serialization format when reading from BigQuery. Please note there are -a few caveats: +as the serialization format when reading from BigQuery. Add the following +required, additional JVM argument to the [](jvm-config): -- Using Apache Arrow serialization is enabled by default. Add - `--add-opens=java.base/java.nio=ALL-UNNAMED` to the Trino - {ref}`jvm-config`. +```none +--add-opens=java.base/java.nio=ALL-UNNAMED +``` (bigquery-reading-from-views)= ### Reading from views diff --git a/docs/src/main/sphinx/connector/delta-lake.md b/docs/src/main/sphinx/connector/delta-lake.md index 0069b9fe65af..9ac117de3797 100644 --- a/docs/src/main/sphinx/connector/delta-lake.md +++ b/docs/src/main/sphinx/connector/delta-lake.md @@ -561,7 +561,7 @@ Write operations are supported for tables stored on the following systems: - S3 and S3-compatible storage - Writes to {doc}`Amazon S3 ` and S3-compatible storage must be enabled + Writes to Amazon S3 and S3-compatible storage must be enabled with the `delta.enable-non-concurrent-writes` property. Writes to S3 can safely be made from multiple Trino clusters; however, write collisions are not detected when writing concurrently from other Delta Lake engines. You must diff --git a/docs/src/main/sphinx/connector/faker.md b/docs/src/main/sphinx/connector/faker.md index 2a3f01cd9c09..bb4160e3c988 100644 --- a/docs/src/main/sphinx/connector/faker.md +++ b/docs/src/main/sphinx/connector/faker.md @@ -50,6 +50,15 @@ The following table details all general configuration properties: * - `faker.locale` - Default locale for generating character-based data, specified as a IETF BCP 47 language tag string. Defaults to `en`. +* - `faker.sequence-detection-enabled` + - If true, when creating a table using existing data, columns with the number + of distinct values close to the number of rows are treated as sequences. + Defaults to `true`. +* - `faker.dictionary-detection-enabled` + - If true, when creating a table using existing data, columns with a low + number of distinct values are treated as dictionaries, and get + the `allowed_values` column property populated with random values. + Defaults to `true`. ::: The following table details all supported schema properties. If they're not @@ -66,6 +75,15 @@ set, values from corresponding configuration properties are used. them, in any table of this schema. * - `default_limit` - Default number of rows in a table. +* - `sequence_detection_enabled` + - If true, when creating a table using existing data, columns with the number + of distinct values close to the number of rows are treated as sequences. + Defaults to `true`. +* - `dictionary_detection_enabled` + - If true, when creating a table using existing data, columns with a low + number of distinct values are treated as dictionaries, and get + the `allowed_values` column property populated with random values. + Defaults to `true`. ::: The following table details all supported table properties. If they're not set, @@ -82,6 +100,15 @@ values from corresponding schema properties are used. `null` in the table. * - `default_limit` - Default number of rows in the table. +* - `sequence_detection_enabled` + - If true, when creating a table using existing data, columns with the number + of distinct values close to the number of rows are treated as sequences. + Defaults to `true`. +* - `dictionary_detection_enabled` + - If true, when creating a table using existing data, columns with a low + number of distinct values are treated as dictionaries, and get + the `allowed_values` column property populated with random values. + Defaults to `true`. ::: The following table details all supported column properties. @@ -245,7 +272,7 @@ operation](sql-read-operations) statements to generate data. To define the schema for generating data, it supports the following features: - [](/sql/create-table) -- [](/sql/create-table-as) +- [](/sql/create-table-as), see also [](faker-statistics) - [](/sql/drop-table) - [](/sql/create-schema) - [](/sql/drop-schema) @@ -317,3 +344,77 @@ CREATE TABLE generator.default.customer ( group_id INTEGER WITH (allowed_values = ARRAY['10', '32', '81']) ); ``` + +(faker-statistics)= +### Using existing data statistics + +The Faker connector automatically sets the `default_limit` table property, and +the `min`, `max`, and `null_probability` column properties, based on statistics +collected by scanning existing data read by Trino from the data source. The +connector uses these statistics to be able to generate data that is more similar +to the original data set, without using any of that data: + + +```sql +CREATE TABLE generator.default.customer AS +SELECT * +FROM production.public.customer +WHERE created_at > CURRENT_DATE - INTERVAL '1' YEAR; +``` + +Instead of using range, or other predicates, tables can be sampled, +see [](tablesample). + +When the `SELECT` statement doesn't contain a `WHERE` clause, a shorter notation +can be used: + +```sql +CREATE TABLE generator.default.customer AS TABLE production.public.customer; +``` + +The Faker connector detects sequence columns, which are integer column with the +number of distinct values almost equal to the number of rows in the table. For +such columns, Faker sets the `step` column property to 1. + +Sequence detection can be turned off using the `sequence_detection_enabled` +table, or schema property or in the connector configuration file, using the +`faker.sequence-detection-enabled` property. + +The Faker connector detects dictionary columns, which are columns of +non-character types with the number of distinct values lower or equal to 1000. +For such columns, Faker generates a list of random values to choose from, and +saves it in the `allowed_values` column property. + +Dictionary detection can be turned off using the `dictionary_detection_enabled` +table, or schema property or in the connector configuration file, using +the `faker.dictionary-detection-enabled` property. + +For example, copy the `orders` table from the TPC-H connector with +statistics, using the following query: + +```sql +CREATE TABLE generator.default.orders AS TABLE tpch.tiny.orders; +``` + +Inspect the schema of the table created by the Faker connector: +```sql +SHOW CREATE TABLE generator.default.orders; +``` + +The table schema should contain additional column and table properties. +``` +CREATE TABLE generator.default.orders ( + orderkey bigint WITH (max = '60000', min = '1', null_probability = 0E0, step = '1'), + custkey bigint WITH (allowed_values = ARRAY['153','662','1453','63','784', ..., '1493','657'], null_probability = 0E0), + orderstatus varchar(1), + totalprice double WITH (max = '466001.28', min = '874.89', null_probability = 0E0), + orderdate date WITH (max = '1998-08-02', min = '1992-01-01', null_probability = 0E0), + orderpriority varchar(15), + clerk varchar(15), + shippriority integer WITH (allowed_values = ARRAY['0'], null_probability = 0E0), + comment varchar(79) +) +WITH ( + default_limit = 15000 +) +``` diff --git a/docs/src/main/sphinx/connector/iceberg.md b/docs/src/main/sphinx/connector/iceberg.md index a96c7b5c409e..1cb6a487d8dc 100644 --- a/docs/src/main/sphinx/connector/iceberg.md +++ b/docs/src/main/sphinx/connector/iceberg.md @@ -758,6 +758,17 @@ EXECUTE `. ```{include} optimize.fragment ``` +(iceberg-optimize-manifests)= +##### optimize_manifests + +Optimize table manifests to speed up planning. + +`optimize_manifests` can be run as follows: + +```sql +ALTER TABLE test_table EXECUTE optimize_manifests; +``` + (iceberg-expire-snapshots)= ##### expire_snapshots @@ -1917,6 +1928,7 @@ ORDER BY _change_ordinal ASC; The connector includes a number of performance improvements, detailed in the following sections. +(iceberg-table-statistics)= ### Table statistics The Iceberg connector can collect column statistics using {doc}`/sql/analyze` diff --git a/docs/src/main/sphinx/connector/kafka.md b/docs/src/main/sphinx/connector/kafka.md index a417cf6a85b7..528e57b6b7ec 100644 --- a/docs/src/main/sphinx/connector/kafka.md +++ b/docs/src/main/sphinx/connector/kafka.md @@ -79,7 +79,7 @@ creates a catalog named `sales` using the configured connector. ### Log levels Kafka consumer logging can be verbose and pollute Trino logs. To lower the -{ref}`log level `, simply add the following to `etc/log.properties`: +[log level](logging-configuration), simply add the following to `etc/log.properties`: ```text org.apache.kafka=WARN diff --git a/docs/src/main/sphinx/connector/loki.md b/docs/src/main/sphinx/connector/loki.md new file mode 100644 index 000000000000..09c3847883be --- /dev/null +++ b/docs/src/main/sphinx/connector/loki.md @@ -0,0 +1,176 @@ +# Loki connector + +```{raw} html + +``` + +The Loki connector allows querying log data stored in [Grafana +Loki](https://grafana.com/oss/loki/). This document describes how to configure a +catalog with the Loki connector to run SQL queries against Loki. + +## Requirements + +To connect to Loki, you need: + +- Loki 3.1.0 or higher. +- Network access from the Trino coordinator and workers to Loki. Port 3100 is + the default port. + +## Configuration + +The connector can query log data in Loki. Create a catalog properties file that +specifies the Loki connector by setting the `connector.name` to `loki`. + +For example, to access a database as the `example` catalog, create the file +`etc/catalog/example.properties`. + +```text +connector.name=loki +loki.uri=http://loki.example.com:3100 +``` + +The following table contains a list of all available configuration properties. + +:::{list-table} Loki configuration properties +:widths: 40, 60 +:header-rows: 1 + +* - Property name + - Description +* - `loki.uri` + - The URI endpoint for the Loki server that Trino cluster nodes use to access + the Loki APIs. +* - `loki.query-timeout` + - [Duration](prop-type-duration) that Trino waits for a result from Loki + before the specific query request times out. Defaults to `10s`. A minimum of + `1s` is required. +::: + +(loki-type-mapping)= +## Type mapping + +Because Trino and Loki each support types that the other does not, this +connector [modifies some types](type-mapping-overview) when reading data. + +### Loki to Trino type mapping + +Each log line in Loki is split up by the connector into three columns: + +* `timestamp` +* `values` +* `labels` + +These are separately mapped to Trino types: + +:::{list-table} Loki log entry to Trino type mapping +:widths: 40, 60 +:header-rows: 1 + +* - Loki type + - Trino type +* - `timestamp` + - `TIMESTAMP WITH TIME ZONE` +* - `values` for [log queries](https://grafana.com/docs/loki/latest/query/log_queries/) + - `VARCHAR` +* - `values` for [metrics queries](https://grafana.com/docs/loki/latest/query/metric_queries/) + - `DOUBLE` +* - `labels` + - `MAP` with label names and values as `VARCHAR` key value pairs +::: + +No other types are supported. + +(loki-sql-support)= +## SQL support + +The Loki connector does not provide access to any schema or tables. Instead you +must use the [query_range](loki-query-range) table function to return a table +representation of the desired log data. Use the data in the returned table like +any other table in a SQL query, including use of functions, joins, and other SQL +functionality. + +(lok-table-functions)= +### Table functions + +The connector provides the following [table function](/functions/table) to +access Loki. + +(loki-query-range)= +### `query_range(varchar, timestamp, timestamp) -> table` + +The `query_range` function allows you to query the log data in Loki with the +following parameters: + +* The first parameter is a `varchar` string that uses valid + [LogQL](https://grafana.com/docs/loki/latest/query/) query. +* The second parameter is a `timestamp` formatted data and time representing the + start date and time of the log data range to query. +* The third parameter is a `timestamp` formatted data and time representing the + end date and time of the log data range to query. + +The table function is available in the `system` schema of the catalog using the +Loki connector, and returns a table with the columns `timestamp`, `value`, and +`labels` described in the [](loki-type-mapping) section. + +The following query invokes the `query_range` table function in the `example` +catalog. It uses the LogQL query string `{origin="CA"}` to retrieve all log data +with the value `CA` for the `origin` label on the log entries. The timestamp +parameters set a range of all log entries from the first of January 2025. + +```sql +SELECT timestamp, value +FROM + TABLE( + example.system.query_range( + '{origin="CA"}', + TIMESTAMP '2025-01-01 00:00:00', + TIMESTAMP '2025-01-02 00:00:00' + ) + ) +; +``` + +The query only returns the timestamp and value for each log entry, and omits the +label data in the `labels` column. The value is a `varchar` string since the +LoqQL query is a log query. + +## Examples + +The following examples show case combinations of +[LogQL](https://grafana.com/docs/loki/latest/query/) queries passed through the +table function with SQL accessing the data in the returned table. + +The following query uses a metrics query and therefore returns a `count` column +with double values, limiting the result data to the latest 100 values. + +```sql +SELECT value AS count +FROM + TABLE( + example.system.query_range( + 'count_over_time({test="metrics_query"}[5m])', + TIMESTAMP '2025-01-01 00:00:00', + TIMESTAMP '2025-01-02 00:00:00' + ) + ) +ORDER BY timestamp DESC +LIMIT 100; +``` + +The following query accesses the value of the label named `province` and returns +it as separate column. + +```sql +SELECT + timestamp, + value, + labels['province'] AS province +FROM + TABLE( + example.system.query_range( + '{origin="CA"}', + TIMESTAMP '2025-01-01 00:00:00', + TIMESTAMP '2025-01-02 00:00:00' + ) + ) +; diff --git a/docs/src/main/sphinx/connector/snowflake.md b/docs/src/main/sphinx/connector/snowflake.md index 8d82faa722ce..d59f20d7466a 100644 --- a/docs/src/main/sphinx/connector/snowflake.md +++ b/docs/src/main/sphinx/connector/snowflake.md @@ -33,15 +33,13 @@ snowflake.role=role snowflake.warehouse=warehouse ``` -### Arrow serialization support +The Snowflake connector uses Apache Arrow as the serialization format when +reading from Snowflake. Add the following required, additional JVM argument +to the [](jvm-config): -This is an experimental feature which introduces support for using Apache Arrow -as the serialization format when reading from Snowflake. Please note there are -a few caveats: - -- Using Apache Arrow serialization is disabled by default. In order to enable - it, add `--add-opens=java.base/java.nio=ALL-UNNAMED` to the Trino - {ref}`jvm-config`. +```none +--add-opens=java.base/java.nio=ALL-UNNAMED +``` ### Multiple Snowflake databases or accounts diff --git a/docs/src/main/sphinx/installation/deployment.md b/docs/src/main/sphinx/installation/deployment.md index b1d100428ef3..b1d2cb9d85ae 100644 --- a/docs/src/main/sphinx/installation/deployment.md +++ b/docs/src/main/sphinx/installation/deployment.md @@ -241,24 +241,9 @@ properties for topics such as {doc}`/admin/properties-general`, {doc}`/admin/properties-query-management`, {doc}`/admin/properties-web-interface`, and others. -(log-levels)= -### Log levels - -The optional log levels file, `etc/log.properties`, allows setting the -minimum log level for named logger hierarchies. Every logger has a name, -which is typically the fully qualified name of the class that uses the logger. -Loggers have a hierarchy based on the dots in the name, like Java packages. -For example, consider the following log levels file: - -```text -io.trino=INFO -``` - -This would set the minimum level to `INFO` for both -`io.trino.server` and `io.trino.plugin.hive`. -The default minimum level is `INFO`, -thus the above example does not actually change anything. -There are four levels: `DEBUG`, `INFO`, `WARN` and `ERROR`. +Further configuration can include [](/admin/logging), [](/admin/opentelemetry), +[](/admin/jmx), [](/admin/openmetrics), and other functionality described in the +[](/admin) section. (catalog-properties)= ### Catalog properties diff --git a/docs/src/main/sphinx/object-storage.md b/docs/src/main/sphinx/object-storage.md index 7d883c064e9b..390f59e87d7e 100644 --- a/docs/src/main/sphinx/object-storage.md +++ b/docs/src/main/sphinx/object-storage.md @@ -61,7 +61,7 @@ compatible replacements: * [](/object-storage/file-system-s3) * [](/object-storage/file-system-alluxio) -The native support is available in all four connectors, but must be activated +The native support is available in all four connectors, and must be activated for use. (file-system-legacy)= @@ -73,14 +73,15 @@ System (HDFS): - [](/object-storage/file-system-hdfs) -All four connectors can use the related `hive.*` properties for access to other -object storage system as *legacy* support. Additional documentation is available -with the Hive connector and relevant dedicated pages: +All four connectors can use the deprecated `hive.*` properties for access to +other object storage system as *legacy* support. These properties will be +removed in a future release. Additional documentation is available with the Hive +connector and relevant migration guides pages: - [](/connector/hive) -- [](/object-storage/legacy-azure) -- [](/object-storage/legacy-gcs) -- [](/object-storage/legacy-s3) +- [Azure Storage migration from hive.azure.* properties](fs-legacy-azure-migration) +- [Google Cloud Storage migration from hive.gcs.* properties](fs-legacy-gcs-migration) +- [S3 migration from hive.s3.* properties](fs-legacy-s3-migration) (object-storage-other)= ## Other object storage support @@ -101,9 +102,6 @@ storage: /object-storage/file-system-gcs /object-storage/file-system-s3 /object-storage/file-system-hdfs -/object-storage/legacy-azure -/object-storage/legacy-gcs -/object-storage/legacy-s3 /object-storage/file-system-cache /object-storage/file-system-alluxio /object-storage/metastores diff --git a/docs/src/main/sphinx/object-storage/file-system-azure.md b/docs/src/main/sphinx/object-storage/file-system-azure.md index e0b46cb8b674..e3a5e914d8d5 100644 --- a/docs/src/main/sphinx/object-storage/file-system-azure.md +++ b/docs/src/main/sphinx/object-storage/file-system-azure.md @@ -1,11 +1,12 @@ # Azure Storage file system support -Trino includes a native implementation to access [Azure -Storage](https://learn.microsoft.com/en-us/azure/storage/) with a catalog using -the Delta Lake, Hive, Hudi, or Iceberg connectors. +Trino includes a native implementation to access [Azure Data Lake Storage +Gen2](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-blobs-overview#about-azure-data-lake-storage-gen2) +with a catalog using the Delta Lake, Hive, Hudi, or Iceberg connectors. Enable the native implementation with `fs.native-azure.enabled=true` in your -catalog properties file. +catalog properties file. Additionally, the Azure storage account must have +hierarchical namespace enabled. ## General configuration @@ -116,3 +117,52 @@ storage accounts: use the **Client ID**, **Secret** and **Tenant ID** values from the application registration, to configure the catalog using properties from [](azure-oauth-authentication). + + +(fs-legacy-azure-migration)= +## Migration from legacy Azure Storage file system + +Trino includes legacy Azure Storage support to use with a catalog using the +Delta Lake, Hive, Hudi, or Iceberg connectors. Upgrading existing deployments to +the current native implementation is recommended. Legacy support is deprecated +and will be removed. + +To migrate a catalog to use the native file system implementation for Azure, +make the following edits to your catalog configuration: + +1. Add the `fs.native-azure.enabled=true` catalog configuration property. +2. Configure the `azure.auth-type` catalog configuration property. +3. Refer to the following table to rename your existing legacy catalog + configuration properties to the corresponding native configuration + properties. Supported configuration values are identical unless otherwise + noted. + + :::{list-table} + :widths: 35, 35, 65 + :header-rows: 1 + * - Legacy property + - Native property + - Notes + * - `hive.azure.abfs-access-key` + - `azure.access-key` + - + * - `hive.azure.abfs.oauth.endpoint` + - `azure.oauth.endpoint` + - Also see `azure.oauth.tenant-id` in [](azure-oauth-authentication). + * - `hive.azure.abfs.oauth.client-id` + - `azure.oauth.client-id` + - + * - `hive.azure.abfs.oauth.secret` + - `azure.oauth.secret` + - + * - `hive.azure.abfs.oauth2.passthrough` + - `azure.use-oauth-passthrough-token` + - + ::: + +4. Remove the following legacy configuration properties if they exist in your + catalog configuration: + + * `hive.azure.abfs-storage-account` + * `hive.azure.wasb-access-key` + * `hive.azure.wasb-storage-account` diff --git a/docs/src/main/sphinx/object-storage/file-system-gcs.md b/docs/src/main/sphinx/object-storage/file-system-gcs.md index dde5cfc041de..3bde8515b782 100644 --- a/docs/src/main/sphinx/object-storage/file-system-gcs.md +++ b/docs/src/main/sphinx/object-storage/file-system-gcs.md @@ -78,3 +78,34 @@ Cloud Storage: - Path to the JSON file on each node that contains your Google Cloud Platform service account key. Not to be set together with `gcs.json-key`. ::: + +(fs-legacy-gcs-migration)= +## Migration from legacy Google Cloud Storage file system + +Trino includes legacy Google Cloud Storage support to use with a catalog using +the Delta Lake, Hive, Hudi, or Iceberg connectors. Upgrading existing +deployments to the current native implementation is recommended. Legacy support +is deprecated and will be removed. + +To migrate a catalog to use the native file system implementation for Google +Cloud Storage, make the following edits to your catalog configuration: + +1. Add the `fs.native-gcs.enabled=true` catalog configuration property. +2. Refer to the following table to rename your existing legacy catalog + configuration properties to the corresponding native configuration + properties. Supported configuration values are identical unless otherwise + noted. + + :::{list-table} + :widths: 35, 35, 65 + :header-rows: 1 + * - Legacy property + - Native property + - Notes + * - `hive.gcs.use-access-token` + - `gcs.use-access-token` + - + * - `hive.gcs.json-key-file-path` + - `gcs.json-key-file-path` + - Also see `gcs.json-key` in preceding sections + ::: diff --git a/docs/src/main/sphinx/object-storage/file-system-s3.md b/docs/src/main/sphinx/object-storage/file-system-s3.md index e18f188cdcc8..de85fbbe1c97 100644 --- a/docs/src/main/sphinx/object-storage/file-system-s3.md +++ b/docs/src/main/sphinx/object-storage/file-system-s3.md @@ -277,3 +277,130 @@ Example JSON configuration: are converted to a colon. Choose a value not used in any of your IAM ARNs. ::: + + +(fs-legacy-s3-migration)= +## Migration from legacy S3 file system + +Trino includes legacy Amazon S3 support to use with a catalog using the Delta +Lake, Hive, Hudi, or Iceberg connectors. Upgrading existing deployments to the +current native implementation is recommended. Legacy support is deprecated and +will be removed. + +To migrate a catalog to use the native file system implementation for S3, make +the following edits to your catalog configuration: + +1. Add the `fs.native-s3.enabled=true` catalog configuration property. +2. Refer to the following table to rename your existing legacy catalog + configuration properties to the corresponding native configuration + properties. Supported configuration values are identical unless otherwise + noted. + + :::{list-table} + :widths: 35, 35, 65 + :header-rows: 1 + * - Legacy property + - Native property + - Notes + * - `hive.s3.aws-access-key` + - `s3.aws-access-key` + - + * - `hive.s3.aws-secret-key` + - `s3.aws-secret-key` + - + * - `hive.s3.iam-role` + - `s3.iam-role` + - Also see `s3.role-session-name` in preceding sections + for more role configuration options. + * - `hive.s3.external-id` + - `s3.external-id` + - + * - `hive.s3.endpoint` + - `s3.endpoint` + - Add the `https://` prefix to make the value a correct URL. + * - `hive.s3.region` + - `s3.region` + - + * - `hive.s3.sse.enabled` + - None + - `s3.sse.type` set to the default value of `NONE` is equivalent to + `hive.s3.sse.enabled=false`. + * - `hive.s3.sse.type` + - `s3.sse.type` + - + * - `hive.s3.sse.kms-key-id` + - `s3.sse.kms-key-id` + - + * - `hive.s3.upload-acl-type` + - `s3.canned-acl` + - See preceding sections for supported values. + * - `hive.s3.streaming.part-size` + - `s3.streaming.part-size` + - + * - `hive.s3.proxy.host`, `hive.s3.proxy.port` + - `s3.http-proxy` + - Specify the host and port in one URL, for example `localhost:8888`. + * - `hive.s3.proxy.protocol` + - `s3.http-proxy.secure` + - Set to `TRUE` to enable HTTPS. + * - `hive.s3.proxy.non-proxy-hosts` + - `s3.http-proxy.non-proxy-hosts` + - + * - `hive.s3.proxy.username` + - `s3.http-proxy.username` + - + * - `hive.s3.proxy.password` + - `s3.http-proxy.password` + - + * - `hive.s3.proxy.preemptive-basic-auth` + - `s3.http-proxy.preemptive-basic-auth` + - + * - `hive.s3.sts.endpoint` + - `s3.sts.endpoint` + - + * - `hive.s3.sts.region` + - `s3.sts.region` + - + * - `hive.s3.max-error-retries` + - `s3.max-error-retries` + - Also see `s3.retry-mode` in preceding sections for more retry behavior + configuration options. + * - `hive.s3.connect-timeout` + - `s3.connect-timeout` + - + * - `hive.s3.connect-ttl` + - `s3.connection-ttl` + - Also see `s3.connection-max-idle-time` in preceding section for more + connection keep-alive options. + * - `hive.s3.socket-timeout` + - `s3.socket-read-timeout` + - Also see `s3.tcp-keep-alive` in preceding sections for more socket + connection keep-alive options. + * - `hive.s3.max-connections` + - `s3.max-connections` + - + * - `hive.s3.path-style-access` + - `s3.path-style-access` + - + ::: + +1. Remove the following legacy configuration properties if they exist in your + catalog configuration: + + * `hive.s3.storage-class` + * `hive.s3.signer-type` + * `hive.s3.signer-class` + * `hive.s3.staging-directory` + * `hive.s3.pin-client-to-current-region` + * `hive.s3.ssl.enabled` + * `hive.s3.sse.enabled` + * `hive.s3.kms-key-id` + * `hive.s3.encryption-materials-provider` + * `hive.s3.streaming.enabled` + * `hive.s3.max-client-retries` + * `hive.s3.max-backoff-time` + * `hive.s3.max-retry-time` + * `hive.s3.multipart.min-file-size` + * `hive.s3.multipart.min-part-size` + * `hive.s3-file-system-type` + * `hive.s3.user-agent-prefix` diff --git a/docs/src/main/sphinx/object-storage/legacy-azure.md b/docs/src/main/sphinx/object-storage/legacy-azure.md deleted file mode 100644 index c44fd3257d60..000000000000 --- a/docs/src/main/sphinx/object-storage/legacy-azure.md +++ /dev/null @@ -1,253 +0,0 @@ -# Legacy Azure Storage support - -The {doc}`/connector/hive` can be configured to use [Azure Data Lake Storage -(Gen2)](https://azure.microsoft.com/products/storage/data-lake-storage/). Trino -supports Azure Blob File System (ABFS) to access data in ADLS Gen2. - -:::{warning} -Legacy support is not recommended and will be removed. Use -[](file-system-azure). -::: - -## Hive connector configuration for Azure Storage credentials - -To configure Trino to use the Azure Storage credentials, set the following -configuration properties in the catalog properties file. It is best to use this -type of configuration if the primary storage account is linked to the cluster. - -The specific configuration depends on the type of storage and uses the -properties from the following sections in the catalog properties file. - -For more complex use cases, such as configuring multiple secondary storage -accounts using Hadoop's `core-site.xml`, see the -{ref}`hive-azure-advanced-config` options. - -To use legacy support, the `fs.hadoop.enabled` property must be set to `true` in -your catalog configuration file. - -### ADLS Gen2 / ABFS storage - -To connect to ABFS storage, you may either use the storage account's access -key, or a service principal. Do not use both sets of properties at the -same time. - -:::{list-table} ABFS Access Key -:widths: 30, 70 -:header-rows: 1 - -* - Property name - - Description -* - `hive.azure.abfs-storage-account` - - The name of the ADLS Gen2 storage account -* - `hive.azure.abfs-access-key` - - The decrypted access key for the ADLS Gen2 storage account -::: - -:::{list-table} ABFS Service Principal OAuth -:widths: 30, 70 -:header-rows: 1 - -* - Property name - - Description -* - `hive.azure.abfs.oauth.endpoint` - - The service principal / application's OAuth 2.0 token endpoint (v1). -* - `hive.azure.abfs.oauth.client-id` - - The service principal's client/application ID. -* - `hive.azure.abfs.oauth.secret` - - A client secret for the service principal. -::: - -When using a service principal, it must have the Storage Blob Data Owner, -Contributor, or Reader role on the storage account you are using, depending on -which operations you would like to use. - - -(hive-azure-advanced-config)= -### Advanced configuration - -All of the configuration properties for the Azure storage driver are stored in -the Hadoop `core-site.xml` configuration file. When there are secondary -storage accounts involved, we recommend configuring Trino using a -`core-site.xml` containing the appropriate credentials for each account. - -The path to the file must be configured in the catalog properties file: - -```text -hive.config.resources= -``` - -One way to find your account key is to ask for the connection string for the -storage account. The `abfsexample.dfs.core.windows.net` account refers to the -storage account. The connection string contains the account key: - -```text -az storage account show-connection-string --name abfswales1 -{ - "connectionString": "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=abfsexample;AccountKey=examplekey..." -} -``` - -When you have the account access key, you can add it to your `core-site.xml` -or Java cryptography extension (JCEKS) file. Alternatively, you can have your -cluster management tool to set the option -`fs.azure.account.key.STORAGE-ACCOUNT` to the account key value: - -```text - - fs.azure.account.key.abfsexample.dfs.core.windows.net - examplekey... - -``` - -For more information, see [Hadoop Azure Support: ABFS](https://hadoop.apache.org/docs/stable/hadoop-azure/abfs.html). - -## Accessing Azure Storage data - -### URI scheme to reference data - -Consistent with other FileSystem implementations within Hadoop, the Azure -Standard Blob and Azure Data Lake Storage Gen2 (ABFS) drivers define their own -URI scheme so that resources (directories and files) may be distinctly -addressed. You can access both primary and secondary storage accounts linked to -the cluster with the same URI scheme. Following are example URIs for the -different systems. - -ABFS URI: - -```text -abfs[s]://@.dfs.core.windows.net/// -``` - -ADLS Gen1 URI: - -```text -adl://.azuredatalakestore.net// -``` - -Azure Standard Blob URI: - -```text -wasb[s]://@.blob.core.windows.net/// -``` - -### Querying Azure Storage - -You can query tables already configured in your Hive metastore used in your Hive -catalog. To access Azure Storage data that is not yet mapped in the Hive -metastore, you need to provide the schema of the data, the file format, and the -data location. - -For example, if you have ORC or Parquet files in an ABFS `file_system`, you -need to execute a query: - -``` --- select schema in which the table is to be defined, must already exist -USE hive.default; - --- create table -CREATE TABLE orders ( - orderkey BIGINT, - custkey BIGINT, - orderstatus VARCHAR(1), - totalprice DOUBLE, - orderdate DATE, - orderpriority VARCHAR(15), - clerk VARCHAR(15), - shippriority INTEGER, - comment VARCHAR(79) -) WITH ( - external_location = 'abfs[s]://@.dfs.core.windows.net///', - format = 'ORC' -- or 'PARQUET' -); -``` - -Now you can query the newly mapped table: - -``` -SELECT * FROM orders; -``` - -## Writing data - -### Prerequisites - -Before you attempt to write data to Azure Storage, make sure you have configured -everything necessary to read data from the storage. - -### Create a write schema - -If the Hive metastore contains schema(s) mapped to Azure storage filesystems, -you can use them to write data to Azure storage. - -If you don't want to use existing schemas, or there are no appropriate schemas -in the Hive metastore, you need to create a new one: - -``` -CREATE SCHEMA hive.abfs_export -WITH (location = 'abfs[s]://file_system@account_name.dfs.core.windows.net/'); -``` - -### Write data to Azure Storage - -Once you have a schema pointing to a location where you want to write the data, -you can issue a `CREATE TABLE AS` statement and select your desired file -format. The data will be written to one or more files within the -`abfs[s]://file_system@account_name.dfs.core.windows.net//my_table` -namespace. Example: - -``` -CREATE TABLE hive.abfs_export.orders_abfs -WITH (format = 'ORC') -AS SELECT * FROM tpch.sf1.orders; -``` - -(fs-legacy-azure-migration)= -## Migration to Azure Storage file system - -Trino includes a [native implementation to access Azure -Storage](/object-storage/file-system-azure) with a catalog using the Delta Lake, -Hive, Hudi, or Iceberg connectors. Upgrading existing deployments to the new -native implementation is recommended. Legacy support will be deprecated and -removed. - -To migrate a catalog to use the native file system implementation for Azure, -make the following edits to your catalog configuration: - -1. Add the `fs.native-azure.enabled=true` catalog configuration property. -2. Configure the `azure.auth-type` catalog configuration property. -3. Refer to the following table to rename your existing legacy catalog - configuration properties to the corresponding native configuration - properties. Supported configuration values are identical unless otherwise - noted. - - :::{list-table} - :widths: 35, 35, 65 - :header-rows: 1 - * - Legacy property - - Native property - - Notes - * - `hive.azure.abfs-access-key` - - `azure.access-key` - - - * - `hive.azure.abfs.oauth.endpoint` - - `azure.oauth.endpoint` - - Also see `azure.oauth.tenant-id` in [](azure-oauth-authentication). - * - `hive.azure.abfs.oauth.client-id` - - `azure.oauth.client-id` - - - * - `hive.azure.abfs.oauth.secret` - - `azure.oauth.secret` - - - * - `hive.azure.abfs.oauth2.passthrough` - - `azure.use-oauth-passthrough-token` - - - ::: - -4. Remove the following legacy configuration properties if they exist in your - catalog configuration: - - * `hive.azure.abfs-storage-account` - * `hive.azure.wasb-access-key` - * `hive.azure.wasb-storage-account` - -For more information, see the [](/object-storage/file-system-azure). \ No newline at end of file diff --git a/docs/src/main/sphinx/object-storage/legacy-gcs.md b/docs/src/main/sphinx/object-storage/legacy-gcs.md deleted file mode 100644 index de29544f9466..000000000000 --- a/docs/src/main/sphinx/object-storage/legacy-gcs.md +++ /dev/null @@ -1,120 +0,0 @@ -# Legacy Google Cloud Storage support - -Object storage connectors can access -[Google Cloud Storage](https://cloud.google.com/storage/) data using the -`gs://` URI prefix. - -:::{warning} -Legacy support is not recommended and will be removed. Use [](file-system-gcs). -::: - -## Requirements - -To use Google Cloud Storage with non-anonymous access objects, you need: - -- A [Google Cloud service account](https://console.cloud.google.com/projectselector2/iam-admin/serviceaccounts) -- The key for the service account in JSON format - -(hive-google-cloud-storage-configuration)= -## Configuration - -To use legacy support, the `fs.hadoop.enabled` property must be set to `true` in -your catalog configuration file. - -The use of Google Cloud Storage as a storage location for an object storage -catalog requires setting a configuration property that defines the -[authentication method for any non-anonymous access object](https://cloud.google.com/storage/docs/authentication). Access methods cannot -be combined. - -The default root path used by the `gs:\\` prefix is set in the catalog by the -contents of the specified key file, or the key file used to create the OAuth -token. - -:::{list-table} Google Cloud Storage configuration properties -:widths: 35, 65 -:header-rows: 1 - -* - Property Name - - Description -* - `hive.gcs.json-key-file-path` - - JSON key file used to authenticate your Google Cloud service account with - Google Cloud Storage. -* - `hive.gcs.use-access-token` - - Use client-provided OAuth token to access Google Cloud Storage. -::: - -The following uses the Delta Lake connector in an example of a minimal -configuration file for an object storage catalog using a JSON key file: - -```properties -connector.name=delta_lake -hive.metastore.uri=thrift://example.net:9083 -hive.gcs.json-key-file-path=${ENV:GCP_CREDENTIALS_FILE_PATH} -``` - -## General usage - -Create a schema to use if one does not already exist, as in the following -example: - -```sql -CREATE SCHEMA storage_catalog.sales_data_in_gcs WITH (location = 'gs://example_location'); -``` - -Once you have created a schema, you can create tables in the schema, as in the -following example: - -```sql -CREATE TABLE storage_catalog.sales_data_in_gcs.orders ( - orderkey BIGINT, - custkey BIGINT, - orderstatus VARCHAR(1), - totalprice DOUBLE, - orderdate DATE, - orderpriority VARCHAR(15), - clerk VARCHAR(15), - shippriority INTEGER, - comment VARCHAR(79) -); -``` - -This statement creates the folder `gs://sales_data_in_gcs/orders` in the root -folder defined in the JSON key file. - -Your table is now ready to populate with data using `INSERT` statements. -Alternatively, you can use `CREATE TABLE AS` statements to create and -populate the table in a single statement. - -(fs-legacy-gcs-migration)= -## Migration to Google Cloud Storage file system - -Trino includes a [native implementation to access Google Cloud -Storage](/object-storage/file-system-gcs) with a catalog using the Delta Lake, -Hive, Hudi, or Iceberg connectors. Upgrading existing deployments to the new -native implementation is recommended. Legacy support will be deprecated and -removed. - -To migrate a catalog to use the native file system implementation for Google -Cloud Storage, make the following edits to your catalog configuration: - -1. Add the `fs.native-gcs.enabled=true` catalog configuration property. -2. Refer to the following table to rename your existing legacy catalog - configuration properties to the corresponding native configuration - properties. Supported configuration values are identical unless otherwise - noted. - - :::{list-table} - :widths: 35, 35, 65 - :header-rows: 1 - * - Legacy property - - Native property - - Notes - * - `hive.gcs.use-access-token` - - `gcs.use-access-token` - - - * - `hive.gcs.json-key-file-path` - - `gcs.json-key-file-path` - - Also see `gcs.json-key` in [](/object-storage/file-system-gcs) - ::: - -For more information, see the [](/object-storage/file-system-gcs). \ No newline at end of file diff --git a/docs/src/main/sphinx/object-storage/legacy-s3.md b/docs/src/main/sphinx/object-storage/legacy-s3.md deleted file mode 100644 index 84f88b201145..000000000000 --- a/docs/src/main/sphinx/object-storage/legacy-s3.md +++ /dev/null @@ -1,465 +0,0 @@ -# Legacy S3 support - -The {doc}`/connector/hive` can read and write tables that are stored in -[Amazon S3](https://aws.amazon.com/s3/) or S3-compatible systems. -This is accomplished by having a table or database location that -uses an S3 prefix, rather than an HDFS prefix. - -Trino uses its own S3 filesystem for the URI prefixes -`s3://`, `s3n://` and `s3a://`. - -:::{warning} -Legacy support is not recommended and will be removed. Use [](file-system-s3). -::: - -To use legacy support, the `fs.hadoop.enabled` property must be set to `true` in -your catalog configuration file. - -(hive-s3-configuration)= -## S3 configuration properties - -:::{list-table} -:widths: 35, 65 -:header-rows: 1 - -* - Property name - - Description -* - `hive.s3.aws-access-key` - - Default AWS access key to use. -* - `hive.s3.aws-secret-key` - - Default AWS secret key to use. -* - `hive.s3.iam-role` - - IAM role to assume. -* - `hive.s3.external-id` - - External ID for the IAM role trust policy. -* - `hive.s3.endpoint` - - The S3 storage endpoint server. This can be used to connect to an - S3-compatible storage system instead of AWS. When using v4 signatures, it is - recommended to set this to the AWS region-specific endpoint (e.g., - `http[s]://s3..amazonaws.com`). -* - `hive.s3.region` - - Optional property to force the S3 client to connect to the specified region - only. -* - `hive.s3.storage-class` - - The S3 storage class to use when writing the data. Currently only `STANDARD` - and `INTELLIGENT_TIERING` storage classes are supported. Default storage - class is `STANDARD` -* - `hive.s3.signer-type` - - Specify a different signer type for S3-compatible storage. Example: - `S3SignerType` for v2 signer type -* - `hive.s3.signer-class` - - Specify a different signer class for S3-compatible storage. -* - `hive.s3.path-style-access` - - Use path-style access for all requests to the S3-compatible storage. This is - for S3-compatible storage that doesn't support virtual-hosted-style access, - defaults to `false`. -* - `hive.s3.staging-directory` - - Local staging directory for data written to S3. This defaults to the Java - temporary directory specified by the JVM system property `java.io.tmpdir`. -* - `hive.s3.pin-client-to-current-region` - - Pin S3 requests to the same region as the EC2 instance where Trino is - running, defaults to `false`. -* - `hive.s3.ssl.enabled` - - Use HTTPS to communicate with the S3 API, defaults to `true`. -* - `hive.s3.sse.enabled` - - Use S3 server-side encryption, defaults to `false`. -* - `hive.s3.sse.type` - - The type of key management for S3 server-side encryption. Use `S3` for S3 - managed or `KMS` for KMS-managed keys, defaults to `S3`. -* - `hive.s3.sse.kms-key-id` - - The KMS Key ID to use for S3 server-side encryption with KMS-managed keys. - If not set, the default key is used. -* - `hive.s3.kms-key-id` - - If set, use S3 client-side encryption and use the AWS KMS to store - encryption keys and use the value of this property as the KMS Key ID for - newly created objects. -* - `hive.s3.encryption-materials-provider` - - If set, use S3 client-side encryption and use the value of this property as - the fully qualified name of a Java class which implements the AWS SDK's - `EncryptionMaterialsProvider` interface. If the class also implements - `Configurable` from the Hadoop API, the Hadoop configuration will be passed - in after the object has been created. -* - `hive.s3.upload-acl-type` - - Canned ACL to use while uploading files to S3, defaults to `PRIVATE`. If the - files are to be uploaded to an S3 bucket owned by a different AWS user, the - canned ACL has to be set to one of the following: `AUTHENTICATED_READ`, - `AWS_EXEC_READ`, `BUCKET_OWNER_FULL_CONTROL`, `BUCKET_OWNER_READ`, - `LOG_DELIVERY_WRITE`, `PUBLIC_READ`, `PUBLIC_READ_WRITE`. Refer to the `AWS - canned ACL - `_ - guide to understand each option's definition. -* - `hive.s3.skip-glacier-objects` - - Ignore Glacier objects rather than failing the query. This skips data that - may be expected to be part of the table or partition. Defaults to `false`. -* - `hive.s3.streaming.enabled` - - Use S3 multipart upload API to upload file in streaming way, without staging - file to be created in the local file system. Defaults to `true`. -* - `hive.s3.streaming.part-size` - - The part size for S3 streaming upload. Defaults to `16MB`. -* - `hive.s3.proxy.host` - - Proxy host to use if connecting through a proxy. -* - `hive.s3.proxy.port` - - Proxy port to use if connecting through a proxy. -* - `hive.s3.proxy.protocol` - - Proxy protocol. HTTP or HTTPS , defaults to `HTTPS`. -* - `hive.s3.proxy.non-proxy-hosts` - - Hosts list to access without going through the proxy. -* - `hive.s3.proxy.username` - - Proxy user name to use if connecting through a proxy. -* - `hive.s3.proxy.password` - - Proxy password to use if connecting through a proxy. -* - `hive.s3.proxy.preemptive-basic-auth` - - Whether to attempt to authenticate preemptively against proxy when using - base authorization, defaults to `false`. -* - `hive.s3.sts.endpoint` - - Optional override for the sts endpoint given that IAM role based - authentication via sts is used. -* - `hive.s3.sts.region` - - Optional override for the sts region given that IAM role based - authentication via sts is used. -* - `hive.s3.storage-class-filter` - - Filter based on storage class of S3 object, defaults to `READ_ALL`. - -::: - -(hive-s3-credentials)= -## S3 credentials - -If you are running Trino on Amazon EC2, using EMR or another facility, -it is recommended that you use IAM Roles for EC2 to govern access to S3. -To enable this, your EC2 instances need to be assigned an IAM Role which -grants appropriate access to the data stored in the S3 bucket(s) you wish -to use. It is also possible to configure an IAM role with `hive.s3.iam-role` -that is used for accessing any S3 bucket. This is much cleaner than -setting AWS access and secret keys in the `hive.s3.aws-access-key` -and `hive.s3.aws-secret-key` settings, and also allows EC2 to automatically -rotate credentials on a regular basis without any additional work on your part. - -If you are running Trino on Amazon EKS, and authenticate using a Kubernetes -service account, you can set the -`trino.s3.use-web-identity-token-credentials-provider` to `true`, so Trino does -not try using different credential providers from the default credential -provider chain. The property must be set in the Hadoop configuration files -referenced by the `hive.config.resources` Hive connector property. - -## Custom S3 credentials provider - -You can configure a custom S3 credentials provider by setting the configuration -property `trino.s3.credentials-provider` to the fully qualified class name of -a custom AWS credentials provider implementation. The property must be set in -the Hadoop configuration files referenced by the `hive.config.resources` Hive -connector property. - -The class must implement the -[AWSCredentialsProvider](http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/AWSCredentialsProvider.html) -interface and provide a two-argument constructor that takes a -`java.net.URI` and a Hadoop `org.apache.hadoop.conf.Configuration` -as arguments. A custom credentials provider can be used to provide -temporary credentials from STS (using `STSSessionCredentialsProvider`), -IAM role-based credentials (using `STSAssumeRoleSessionCredentialsProvider`), -or credentials for a specific use case (e.g., bucket/user specific credentials). - -(hive-s3-security-mapping)= -## S3 security mapping - -Trino supports flexible security mapping for S3, allowing for separate -credentials or IAM roles for specific users or buckets/paths. The IAM role -for a specific query can be selected from a list of allowed roles by providing -it as an *extra credential*. - -Each security mapping entry may specify one or more match criteria. If multiple -criteria are specified, all criteria must match. Available match criteria: - -- `user`: Regular expression to match against username. Example: `alice|bob` -- `group`: Regular expression to match against any of the groups that the user - belongs to. Example: `finance|sales` -- `prefix`: S3 URL prefix. It can specify an entire bucket or a path within a - bucket. The URL must start with `s3://` but will also match `s3a` or `s3n`. - Example: `s3://bucket-name/abc/xyz/` - -The security mapping must provide one or more configuration settings: - -- `accessKey` and `secretKey`: AWS access key and secret key. This overrides - any globally configured credentials, such as access key or instance credentials. -- `iamRole`: IAM role to use if no user provided role is specified as an - extra credential. This overrides any globally configured IAM role. This role - is allowed to be specified as an extra credential, although specifying it - explicitly has no effect, as it would be used anyway. -- `roleSessionName`: Optional role session name to use with `iamRole`. This can only - be used when `iamRole` is specified. If `roleSessionName` includes the string - `${USER}`, then the `${USER}` portion of the string will be replaced with the - current session's username. If `roleSessionName` is not specified, it defaults - to `trino-session`. -- `allowedIamRoles`: IAM roles that are allowed to be specified as an extra - credential. This is useful because a particular AWS account may have permissions - to use many roles, but a specific user should only be allowed to use a subset - of those roles. -- `kmsKeyId`: ID of KMS-managed key to be used for client-side encryption. -- `allowedKmsKeyIds`: KMS-managed key IDs that are allowed to be specified as an extra - credential. If list cotains "\*", then any key can be specified via extra credential. - -* ``endpoint``: The S3 storage endpoint server. This optional property can be used -to override S3 endpoints on a per-bucket basis. - -* ``region``: The region S3 client should connect to. This optional property can be used -to override S3 regions on a per-bucket basis. - -The security mapping entries are processed in the order listed in the configuration -JSON. More specific mappings should thus be specified before less specific mappings. -For example, the mapping list might have URL prefix `s3://abc/xyz/` followed by -`s3://abc/` to allow different configuration for a specific path within a bucket -than for other paths within the bucket. You can set default configuration by not -including any match criteria for the last entry in the list. - -In addition to the rules above, the default mapping can contain the optional -`useClusterDefault` boolean property with the following behavior: - -- `false` - (is set by default) property is ignored. - -- `true` - This causes the default cluster role to be used as a fallback option. - It can not be used with the following configuration properties: - - - `accessKey` - - `secretKey` - - `iamRole` - - `allowedIamRoles` - -If no mapping entry matches and no default is configured, the access is denied. - -The configuration JSON can either be retrieved from a file or REST-endpoint specified via -`hive.s3.security-mapping.config-file`. - -Example JSON configuration: - -```json -{ - "mappings": [ - { - "prefix": "s3://bucket-name/abc/", - "iamRole": "arn:aws:iam::123456789101:role/test_path" - }, - { - "user": "bob|charlie", - "iamRole": "arn:aws:iam::123456789101:role/test_default", - "allowedIamRoles": [ - "arn:aws:iam::123456789101:role/test1", - "arn:aws:iam::123456789101:role/test2", - "arn:aws:iam::123456789101:role/test3" - ] - }, - { - "prefix": "s3://special-bucket/", - "accessKey": "AKIAxxxaccess", - "secretKey": "iXbXxxxsecret" - }, - { - "prefix": "s3://regional-bucket/", - "iamRole": "arn:aws:iam::123456789101:role/regional-user", - "endpoint": "https://bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1.vpce.amazonaws.com", - "region": "us-east-1" - }, - { - "prefix": "s3://encrypted-bucket/", - "kmsKeyId": "kmsKey_10" - }, - { - "user": "test.*", - "iamRole": "arn:aws:iam::123456789101:role/test_users" - }, - { - "group": "finance", - "iamRole": "arn:aws:iam::123456789101:role/finance_users" - }, - { - "iamRole": "arn:aws:iam::123456789101:role/default" - } - ] -} -``` - -| Property name | Description | -| ----------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `hive.s3.security-mapping.config-file` | The JSON configuration file or REST-endpoint URI containing security mappings. | -| `hive.s3.security-mapping.json-pointer` | A JSON pointer (RFC 6901) to mappings inside the JSON retrieved from the config file or REST-endpont. The whole document ("") by default. | -| `hive.s3.security-mapping.iam-role-credential-name` | The name of the *extra credential* used to provide the IAM role. | -| `hive.s3.security-mapping.kms-key-id-credential-name` | The name of the *extra credential* used to provide the KMS-managed key ID. | -| `hive.s3.security-mapping.refresh-period` | How often to refresh the security mapping configuration. | -| `hive.s3.security-mapping.colon-replacement` | The character or characters to be used in place of the colon (`:`) character when specifying an IAM role name as an extra credential. Any instances of this replacement value in the extra credential value will be converted to a colon. Choose a value that is not used in any of your IAM ARNs. | - -(hive-s3-tuning-configuration)= -## Tuning properties - -The following tuning properties affect the behavior of the client -used by the Trino S3 filesystem when communicating with S3. -Most of these parameters affect settings on the `ClientConfiguration` -object associated with the `AmazonS3Client`. - -| Property name | Description | Default | -| --------------------------------- | ------------------------------------------------------------------------------------------------- | -------------------------- | -| `hive.s3.max-error-retries` | Maximum number of error retries, set on the S3 client. | `10` | -| `hive.s3.max-client-retries` | Maximum number of read attempts to retry. | `5` | -| `hive.s3.max-backoff-time` | Use exponential backoff starting at 1 second up to this maximum value when communicating with S3. | `10 minutes` | -| `hive.s3.max-retry-time` | Maximum time to retry communicating with S3. | `10 minutes` | -| `hive.s3.connect-timeout` | TCP connect timeout. | `5 seconds` | -| `hive.s3.connect-ttl` | TCP connect TTL, which affects connection reusage. | Connections do not expire. | -| `hive.s3.socket-timeout` | TCP socket read timeout. | `5 seconds` | -| `hive.s3.max-connections` | Maximum number of simultaneous open connections to S3. | `500` | -| `hive.s3.multipart.min-file-size` | Minimum file size before multi-part upload to S3 is used. | `16 MB` | -| `hive.s3.multipart.min-part-size` | Minimum multi-part upload part size. | `5 MB` | - -(hive-s3-data-encryption)= -## S3 data encryption - -Trino supports reading and writing encrypted data in S3 using both -server-side encryption with S3 managed keys and client-side encryption using -either the Amazon KMS or a software plugin to manage AES encryption keys. - -With [S3 server-side encryption](http://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html), -called *SSE-S3* in the Amazon documentation, the S3 infrastructure takes care of all encryption and decryption -work. One exception is SSL to the client, assuming you have `hive.s3.ssl.enabled` set to `true`. -S3 also manages all the encryption keys for you. To enable this, set `hive.s3.sse.enabled` to `true`. - -With [S3 client-side encryption](http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html), -S3 stores encrypted data and the encryption keys are managed outside of the S3 infrastructure. Data is encrypted -and decrypted by Trino instead of in the S3 infrastructure. In this case, encryption keys can be managed -either by using the AWS KMS, or your own key management system. To use the AWS KMS for key management, set -`hive.s3.kms-key-id` to the UUID of a KMS key. Your AWS credentials or EC2 IAM role will need to be -granted permission to use the given key as well. - -To use a custom encryption key management system, set `hive.s3.encryption-materials-provider` to the -fully qualified name of a class which implements the -[EncryptionMaterialsProvider](http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/model/EncryptionMaterialsProvider.html) -interface from the AWS Java SDK. This class has to be accessible to the Hive Connector through the -classpath and must be able to communicate with your custom key management system. If this class also implements -the `org.apache.hadoop.conf.Configurable` interface from the Hadoop Java API, then the Hadoop configuration -is passed in after the object instance is created, and before it is asked to provision or retrieve any -encryption keys. - -(fs-legacy-s3-migration)= -## Migration to S3 file system - -Trino includes a [native implementation to access Amazon -S3](/object-storage/file-system-s3) with a catalog using the Delta Lake, Hive, -Hudi, or Iceberg connectors. Upgrading existing deployments to the new native -implementation is recommended. Legacy support will be deprecated and removed. - -To migrate a catalog to use the native file system implementation for S3, make -the following edits to your catalog configuration: - -1. Add the `fs.native-s3.enabled=true` catalog configuration property. -2. Refer to the following table to rename your existing legacy catalog - configuration properties to the corresponding native configuration - properties. Supported configuration values are identical unless otherwise - noted. - - :::{list-table} - :widths: 35, 35, 65 - :header-rows: 1 - * - Legacy property - - Native property - - Notes - * - `hive.s3.aws-access-key` - - `s3.aws-access-key` - - - * - `hive.s3.aws-secret-key` - - `s3.aws-secret-key` - - - * - `hive.s3.iam-role` - - `s3.iam-role` - - Also see `s3.role-session-name` in [](/object-storage/file-system-s3) - for more role configuration options. - * - `hive.s3.external-id` - - `s3.external-id` - - - * - `hive.s3.endpoint` - - `s3.endpoint` - - Add the `https://` prefix to make the value a correct URL. - * - `hive.s3.region` - - `s3.region` - - - * - `hive.s3.sse.enabled` - - None - - `s3.sse.type` set to the default value of `NONE` is equivalent to - `hive.s3.sse.enabled=false`. - * - `hive.s3.sse.type` - - `s3.sse.type` - - - * - `hive.s3.sse.kms-key-id` - - `s3.sse.kms-key-id` - - - * - `hive.s3.upload-acl-type` - - `s3.canned-acl` - - See [](/object-storage/file-system-s3) for supported values. - * - `hive.s3.streaming.part-size` - - `s3.streaming.part-size` - - - * - `hive.s3.proxy.host`, `hive.s3.proxy.port` - - `s3.http-proxy` - - Specify the host and port in one URL, for example `localhost:8888`. - * - `hive.s3.proxy.protocol` - - `s3.http-proxy.secure` - - Set to `TRUE` to enable HTTPS. - * - `hive.s3.proxy.non-proxy-hosts` - - `s3.http-proxy.non-proxy-hosts` - - - * - `hive.s3.proxy.username` - - `s3.http-proxy.username` - - - * - `hive.s3.proxy.password` - - `s3.http-proxy.password` - - - * - `hive.s3.proxy.preemptive-basic-auth` - - `s3.http-proxy.preemptive-basic-auth` - - - * - `hive.s3.sts.endpoint` - - `s3.sts.endpoint` - - - * - `hive.s3.sts.region` - - `s3.sts.region` - - - * - `hive.s3.max-error-retries` - - `s3.max-error-retries` - - Also see `s3.retry-mode` in [](/object-storage/file-system-s3) for more - retry behavior configuration options. - * - `hive.s3.connect-timeout` - - `s3.connect-timeout` - - - * - `hive.s3.connect-ttl` - - `s3.connection-ttl` - - Also see `s3.connection-max-idle-time` in - [](/object-storage/file-system-s3) for more connection keep-alive - options. - * - `hive.s3.socket-timeout` - - `s3.socket-read-timeout` - - Also see `s3.tcp-keep-alive` in [](/object-storage/file-system-s3) for - more socket connection keep-alive options. - * - `hive.s3.max-connections` - - `s3.max-connections` - - - * - `hive.s3.path-style-access` - - `s3.path-style-access` - - - ::: - -1. Remove the following legacy configuration properties if they exist in your - catalog configuration: - - * `hive.s3.storage-class` - * `hive.s3.signer-type` - * `hive.s3.signer-class` - * `hive.s3.staging-directory` - * `hive.s3.pin-client-to-current-region` - * `hive.s3.ssl.enabled` - * `hive.s3.sse.enabled` - * `hive.s3.kms-key-id` - * `hive.s3.encryption-materials-provider` - * `hive.s3.streaming.enabled` - * `hive.s3.max-client-retries` - * `hive.s3.max-backoff-time` - * `hive.s3.max-retry-time` - * `hive.s3.multipart.min-file-size` - * `hive.s3.multipart.min-part-size` - * `hive.s3-file-system-type` - * `hive.s3.user-agent-prefix` - -For more information, see the [](/object-storage/file-system-s3). diff --git a/docs/src/main/sphinx/optimizer/statistics.md b/docs/src/main/sphinx/optimizer/statistics.md index f3b026bca786..ea69d7743bf6 100644 --- a/docs/src/main/sphinx/optimizer/statistics.md +++ b/docs/src/main/sphinx/optimizer/statistics.md @@ -4,7 +4,9 @@ Trino supports statistics based optimizations for queries. For a query to take advantage of these optimizations, Trino must have statistical information for the tables in that query. -Table statistics are provided to the query planner by connectors. +Table statistics are estimates about the stored data. They are provided to the +query planner by connectors and enable performance improvements for query +processing. ## Available statistics @@ -27,6 +29,17 @@ being used and can also vary by table. For example, the Hive connector does not currently provide statistics on data size. Table statistics can be displayed via the Trino SQL interface using the -{doc}`/sql/show-stats` command. For the Hive connector, refer to the -{ref}`Hive connector ` documentation to learn how to update table -statistics. +[](/sql/show-stats) command. + +Depending on the connector support, table statistics are updated by Trino when +executing [data management statements](sql-data-management) like `INSERT`, +`UPDATE`, or `DELETE`. For example, the [Delta Lake +connector](delta-lake-table-statistics), the [Hive connector](hive-analyze), and +the [Iceberg connector](iceberg-table-statistics) all support table statistics +management from Trino. + +You can also initialize statistics collection with the [](/sql/analyze) command. +This is needed when other systems manipulate the data without Trino, and +therefore statistics tracked by Trino are out of date. Other connectors rely on +the underlying data source to manage table statistics or do not support table +statistics use at all. diff --git a/docs/src/main/sphinx/redirects.txt b/docs/src/main/sphinx/redirects.txt index e22fee37f421..281dab4c5161 100644 --- a/docs/src/main/sphinx/redirects.txt +++ b/docs/src/main/sphinx/redirects.txt @@ -10,7 +10,10 @@ connector/atop.md connector/removed.md connector/localfile.md connector/removed.md connector/accumulo.md connector/removed.md connector/kinesis.md connector/removed.md +object-storage/legacy-azure.md object-storage/file-system-azure.md object-storage/legacy-cos.md object-storage/file-system-s3.md +object-storage/legacy-gcs.md object-storage/file-system-gcs.md +object-storage/legacy-s3.md object-storage/file-system-s3.md security/apache-ranger-access-control.md security/ranger-access-control.md routines.md udf.md routines/function.md udf/function.md diff --git a/docs/src/main/sphinx/release.md b/docs/src/main/sphinx/release.md index e79581c08d43..7059c8ccfbbd 100644 --- a/docs/src/main/sphinx/release.md +++ b/docs/src/main/sphinx/release.md @@ -6,6 +6,7 @@ ```{toctree} :maxdepth: 1 +release/release-470 release/release-469 ``` diff --git a/docs/src/main/sphinx/release/release-332.md b/docs/src/main/sphinx/release/release-332.md index 9377faa25904..cd38e091cc31 100644 --- a/docs/src/main/sphinx/release/release-332.md +++ b/docs/src/main/sphinx/release/release-332.md @@ -63,7 +63,7 @@ - Add support for Alluxio Catalog Service. ({issue}`2116`) - Remove unnecessary `hive.metastore.glue.use-instance-credentials` configuration property. ({issue}`3265`) - Remove unnecessary `hive.s3.use-instance-credentials` configuration property. ({issue}`3265`) -- Add flexible {ref}`hive-s3-security-mapping`, allowing for separate credentials +- Add flexible S3 security mapping, allowing for separate credentials or IAM roles for specific users or buckets/paths. ({issue}`3265`) - Add support for specifying an External ID for an IAM role trust policy using the `hive.metastore.glue.external-id` configuration property ({issue}`3144`) diff --git a/docs/src/main/sphinx/release/release-470.md b/docs/src/main/sphinx/release/release-470.md new file mode 100644 index 000000000000..65108e367e5d --- /dev/null +++ b/docs/src/main/sphinx/release/release-470.md @@ -0,0 +1,103 @@ +# Release 470 (5 Feb 2025) + +## General + +* Add [](/connector/duckdb). ({issue}`18031`) +* Add [](/connector/loki). ({issue}`23053`) +* Add support for the [](select-with-session) to set per-query session + properties with `SELECT` queries. ({issue}`24889`) +* Improve compatibility of fault-tolerant exchange storage with S3-compliant + object stores. ({issue}`24822`) +* Allow skipping directory schema validation to improve compatibility of + fault-tolerant exchange storage with HDFS-like file systems. This can be + configured with the `exchange.hdfs.skip-directory-scheme-validation` property. ({issue}`24627`) +* Export JMX metric for `blockedQueries`. ({issue}`24907`) +* {{breaking}} Remove support for the `optimize_hash_generation` session + property and the `optimizer.optimize-hash-generation` configuration option. + ({issue}`24792`) +* Fix failure when using upper-case variable names in SQL user-defined + functions. ({issue}`24460`) +* Prevent failures of the {func}`array_histogram` function when the input + contains null values. ({issue}`24765`) + +## JDBC driver + +* {{breaking}} Raise minimum runtime requirement to Java 11. ({issue}`23639`) + +## CLI + +* {{breaking}} Raise minimum runtime requirement to Java 11. ({issue}`23639`) + +## Delta Lake connector + +* Prevent connection leakage when using the Azure Storage file system. ({issue}`24116`) +* Deprecate use of the legacy file system support for Azure Storage, Google + Cloud Storage, IBM Cloud Object Storage, S3 and S3-compatible object storage + systems. Use the migration guides for [Azure + Storage](fs-legacy-azure-migration), [Google Cloud + Storage](fs-legacy-gcs-migration), and [S3](fs-legacy-s3-migration) to assist + if you have not switched from legacy support. ({issue}`24878`) +* Fix potential table corruption when using the `vacuum` procedure. ({issue}`24872`) + +## Faker connector + +* [Derive constraints](faker-statistics) from source data when using `CREATE TABLE ... AS SELECT`. ({issue}`24585`) + +## Hive connector + +* Deprecate use of the legacy file system support for Azure Storage, Google + Cloud Storage, IBM Cloud Object Storage, S3 and S3-compatible object storage + systems. Use the migration guides for [Azure + Storage](fs-legacy-azure-migration), [Google Cloud + Storage](fs-legacy-gcs-migration), and [S3](fs-legacy-s3-migration) to assist + if you have not switched from legacy support. ({issue}`24878`) +* Prevent connection leakage when using the Azure Storage file system. ({issue}`24116`) +* Fix NullPointerException when listing tables on Glue. ({issue}`24834`) + +## Hudi connector + +* Deprecate use of the legacy file system support for Azure Storage, Google + Cloud Storage, IBM Cloud Object Storage, S3 and S3-compatible object storage + systems. Use the migration guides for [Azure + Storage](fs-legacy-azure-migration), [Google Cloud + Storage](fs-legacy-gcs-migration), and [S3](fs-legacy-s3-migration) to assist + if you have not switched from legacy support. ({issue}`24878`) +* Prevent connection leakage when using the Azure Storage file system. ({issue}`24116`) + +## Iceberg connector + +* Add the [optimize_manifests](iceberg-optimize-manifests) table procedure. ({issue}`14821`) +* Allow configuration of the number of commit retries with the + `max_commit_retry` table property. ({issue}`22672`) +* Allow caching of table metadata when using the Hive metastore. ({issue}`13115`) +* Deprecate use of the legacy file system support for Azure Storage, Google + Cloud Storage, IBM Cloud Object Storage, S3 and S3-compatible object storage + systems. Use the migration guides for [Azure + Storage](fs-legacy-azure-migration), [Google Cloud + Storage](fs-legacy-gcs-migration), and [S3](fs-legacy-s3-migration) to assist + if you have not switched from legacy support. ({issue}`24878`) +* Prevent connection leakage when using the Azure Storage file system. ({issue}`24116`) +* Fix failure when adding a new column with a name containing a dot. ({issue}`24813`) +* Fix failure when reading from tables with [equality + deletes](https://iceberg.apache.org/spec/#equality-delete-files) with nested + fields. ({issue}`18625`) +* Fix failure when reading `$entries` and `$all_entries` tables using [equality + deletes](https://iceberg.apache.org/spec/#equality-delete-files). ({issue}`24775`) + +## JMX connector + +* Prevent missing metrics values when MBeans in coordinator and workers do not + match. ({issue}`24908`) + +## Kinesis connector + +* {{breaking}} Remove the Kinesis connector. ({issue}`23923`) + +## MySQL connector + +* Add support for `MERGE` statement. ({issue}`24428`) +* Prevent writing of invalid, negative date values. ({issue}`24809`) + +## PostgreSQL connector + +* Raise minimum required version to PostgreSQL 12. ({issue}`24836`) diff --git a/docs/src/main/sphinx/security/ldap.md b/docs/src/main/sphinx/security/ldap.md index 1ade3a221826..8acb524629a7 100644 --- a/docs/src/main/sphinx/security/ldap.md +++ b/docs/src/main/sphinx/security/ldap.md @@ -284,7 +284,7 @@ Verify the password for a keystore file and view its contents using ### Debug Trino to LDAP server issues If you need to debug issues with Trino communicating with the LDAP server, -you can change the {ref}`log level ` for the LDAP authenticator: +you can change the [log level](logging-configuration) for the LDAP authenticator: ```none io.trino.plugin.password=DEBUG diff --git a/docs/src/main/sphinx/security/oauth2.md b/docs/src/main/sphinx/security/oauth2.md index ca9feae7c1f0..932017d49de4 100644 --- a/docs/src/main/sphinx/security/oauth2.md +++ b/docs/src/main/sphinx/security/oauth2.md @@ -243,7 +243,7 @@ The following configuration properties are available: (trino-oauth2-troubleshooting)= ## Troubleshooting -To debug issues, change the {ref}`log level ` for the OAuth 2.0 +To debug issues, change the [log level ` for the OAuth 2.0 authenticator: ```none diff --git a/docs/src/main/sphinx/sql/select.md b/docs/src/main/sphinx/sql/select.md index 59d96267e0f0..cd32437fc4f1 100644 --- a/docs/src/main/sphinx/sql/select.md +++ b/docs/src/main/sphinx/sql/select.md @@ -3,7 +3,8 @@ ## Synopsis ```text -[ WITH FUNCTION udf ] +[ WITH SESSION [ name = expression [, ...] ] +[ WITH [ FUNCTION udf ] [, ...] ] [ WITH [ RECURSIVE ] with_query [, ...] ] SELECT [ ALL | DISTINCT ] select_expression [, ...] [ FROM from_item [, ...] ] @@ -68,6 +69,31 @@ ROLLUP ( column [, ...] ) Retrieve rows from zero or more tables. +(select-with-session)= +## WITH SESSION clause + +The `WITH SESSION` clause allows you to [set session and catalog session +property values](/sql/set-session) applicable for the processing of the current +SELECT statement only. The defined values override any other configuration and +session property settings. Multiple properties are separated by commas. + +The following example overrides the global configuration property +`query.max-execution-time` with the session property `query_max_execution_time` +to reduce the time to `2h`. It also overrides the catalog property +`iceberg.query-partition-filter-required` from the `example` catalog using +[](/connector/iceberg) setting the catalog session property +`query_partition_filter_required` to `true`: + +```sql +WITH + SESSION + query_max_execution_time='2h', + example.query_partition_filter_required=true +SELECT * +FROM example.default.thetable +LIMIT 100; +``` + ## WITH FUNCTION clause The `WITH FUNCTION` clause allows you to define a list of [](udf-inline) that @@ -1038,6 +1064,7 @@ ORDER BY regionkey FETCH FIRST ROW WITH TIES; (5 rows) ``` +(tablesample)= ## TABLESAMPLE There are multiple sample methods: diff --git a/docs/src/main/sphinx/static/img/loki.png b/docs/src/main/sphinx/static/img/loki.png new file mode 100644 index 000000000000..37692fd1093e Binary files /dev/null and b/docs/src/main/sphinx/static/img/loki.png differ diff --git a/lib/trino-array/pom.xml b/lib/trino-array/pom.xml index e64cf23e160f..6a6b71f20f9a 100644 --- a/lib/trino-array/pom.xml +++ b/lib/trino-array/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-cache/pom.xml b/lib/trino-cache/pom.xml index cfdf92734f56..91271bae90f9 100644 --- a/lib/trino-cache/pom.xml +++ b/lib/trino-cache/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem-alluxio/pom.xml b/lib/trino-filesystem-alluxio/pom.xml index d71100d95eea..9e63b53d16f5 100644 --- a/lib/trino-filesystem-alluxio/pom.xml +++ b/lib/trino-filesystem-alluxio/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem-azure/pom.xml b/lib/trino-filesystem-azure/pom.xml index 7cda7e60d1e6..c61241370487 100644 --- a/lib/trino-filesystem-azure/pom.xml +++ b/lib/trino-filesystem-azure/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem-cache-alluxio/pom.xml b/lib/trino-filesystem-cache-alluxio/pom.xml index 4ec3684e3093..1d5ef6f498a1 100644 --- a/lib/trino-filesystem-cache-alluxio/pom.xml +++ b/lib/trino-filesystem-cache-alluxio/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem-gcs/pom.xml b/lib/trino-filesystem-gcs/pom.xml index a489a77bded7..90ee01392ebc 100644 --- a/lib/trino-filesystem-gcs/pom.xml +++ b/lib/trino-filesystem-gcs/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem-manager/pom.xml b/lib/trino-filesystem-manager/pom.xml index 929ce72dcea0..a14ef7444c13 100644 --- a/lib/trino-filesystem-manager/pom.xml +++ b/lib/trino-filesystem-manager/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem-s3/pom.xml b/lib/trino-filesystem-s3/pom.xml index f94068965bb0..c2de646bac97 100644 --- a/lib/trino-filesystem-s3/pom.xml +++ b/lib/trino-filesystem-s3/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem-s3/src/main/java/io/trino/filesystem/s3/S3FileSystem.java b/lib/trino-filesystem-s3/src/main/java/io/trino/filesystem/s3/S3FileSystem.java index c4ac486b61db..e7462b28aeeb 100644 --- a/lib/trino-filesystem-s3/src/main/java/io/trino/filesystem/s3/S3FileSystem.java +++ b/lib/trino-filesystem-s3/src/main/java/io/trino/filesystem/s3/S3FileSystem.java @@ -24,6 +24,8 @@ import io.trino.filesystem.TrinoOutputFile; import io.trino.filesystem.UriLocation; import io.trino.filesystem.encryption.EncryptionKey; +import software.amazon.awssdk.auth.signer.AwsS3V4Signer; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.CommonPrefix; @@ -196,6 +198,7 @@ private void deleteObjects(Collection locations) .overrideConfiguration(context::applyCredentialProviderOverride) .requestPayer(requestPayer) .bucket(bucket) + .overrideConfiguration(disableStrongIntegrityChecksums()) .delete(builder -> builder.objects(objects).quiet(true)) .build(); @@ -388,4 +391,14 @@ private static void validateS3Location(Location location) { new S3Location(location); } + + // TODO (https://github.com/trinodb/trino/issues/24955): + // remove me once all of the S3-compatible storage support strong integrity checks + @SuppressWarnings("deprecation") + static AwsRequestOverrideConfiguration disableStrongIntegrityChecksums() + { + return AwsRequestOverrideConfiguration.builder() + .signer(AwsS3V4Signer.create()) + .build(); + } } diff --git a/lib/trino-filesystem-s3/src/test/java/io/trino/filesystem/s3/AbstractTestS3FileSystem.java b/lib/trino-filesystem-s3/src/test/java/io/trino/filesystem/s3/AbstractTestS3FileSystem.java index ca72e134a5a5..d0d3161f2d1c 100644 --- a/lib/trino-filesystem-s3/src/test/java/io/trino/filesystem/s3/AbstractTestS3FileSystem.java +++ b/lib/trino-filesystem-s3/src/test/java/io/trino/filesystem/s3/AbstractTestS3FileSystem.java @@ -41,6 +41,7 @@ import static com.google.common.collect.Iterables.getOnlyElement; import static io.trino.filesystem.encryption.EncryptionKey.randomAes256; +import static io.trino.filesystem.s3.S3FileSystem.disableStrongIntegrityChecksums; import static io.trino.filesystem.s3.S3SseCUtils.encoded; import static io.trino.filesystem.s3.S3SseCUtils.md5Checksum; import static java.nio.charset.StandardCharsets.UTF_8; @@ -156,6 +157,7 @@ void testFileWithTrailingWhitespaceAgainstNativeClient() builder.sseCustomerKeyMD5(md5Checksum(randomEncryptionKey)); } }) + .overrideConfiguration(disableStrongIntegrityChecksums()) .build(); s3Client.putObject(putObjectRequest, RequestBody.fromBytes(contents.clone())); @@ -271,7 +273,9 @@ public TempDirectory(S3Client s3Client, String path) public void create() { - s3Client.putObject(request -> request.bucket(bucket()).key(path), RequestBody.empty()); + s3Client.putObject(request -> request + .overrideConfiguration(disableStrongIntegrityChecksums()) + .bucket(bucket()).key(path), RequestBody.empty()); } @Override diff --git a/lib/trino-filesystem-s3/src/test/java/io/trino/filesystem/s3/TestS3FileSystemAwsS3.java b/lib/trino-filesystem-s3/src/test/java/io/trino/filesystem/s3/TestS3FileSystemAwsS3.java index 5d578543aedb..fa611c1f436b 100644 --- a/lib/trino-filesystem-s3/src/test/java/io/trino/filesystem/s3/TestS3FileSystemAwsS3.java +++ b/lib/trino-filesystem-s3/src/test/java/io/trino/filesystem/s3/TestS3FileSystemAwsS3.java @@ -29,6 +29,7 @@ import java.util.List; import static com.google.common.collect.Iterables.getOnlyElement; +import static io.trino.filesystem.s3.S3FileSystem.disableStrongIntegrityChecksums; import static io.trino.testing.SystemEnvironmentUtils.requireEnv; import static org.assertj.core.api.Assertions.assertThat; @@ -46,6 +47,7 @@ protected void initEnvironment() accessKey = requireEnv("AWS_ACCESS_KEY_ID"); secretKey = requireEnv("AWS_SECRET_ACCESS_KEY"); region = requireEnv("AWS_REGION"); + bucket = requireEnv("EMPTY_S3_BUCKET"); } @@ -86,6 +88,7 @@ void testS3FileIteratorFileEntryTags() .bucket(bucket()) .key(key) .storageClass(storageClass.toString()) + .overrideConfiguration(disableStrongIntegrityChecksums()) .build(); s3Client.putObject( putObjectRequestBuilder, diff --git a/lib/trino-filesystem/pom.xml b/lib/trino-filesystem/pom.xml index 6f2fd9ccab48..92c16486dfe4 100644 --- a/lib/trino-filesystem/pom.xml +++ b/lib/trino-filesystem/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-geospatial-toolkit/pom.xml b/lib/trino-geospatial-toolkit/pom.xml index 5c1566b54170..0a3fa16f2e3b 100644 --- a/lib/trino-geospatial-toolkit/pom.xml +++ b/lib/trino-geospatial-toolkit/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-hdfs/pom.xml b/lib/trino-hdfs/pom.xml index a1485c727b7f..0c08cacad909 100644 --- a/lib/trino-hdfs/pom.xml +++ b/lib/trino-hdfs/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-hdfs/src/main/java/io/trino/hdfs/azure/HiveAzureConfig.java b/lib/trino-hdfs/src/main/java/io/trino/hdfs/azure/HiveAzureConfig.java index 8b523711d3c6..f85e6edc28e0 100644 --- a/lib/trino-hdfs/src/main/java/io/trino/hdfs/azure/HiveAzureConfig.java +++ b/lib/trino-hdfs/src/main/java/io/trino/hdfs/azure/HiveAzureConfig.java @@ -33,12 +33,14 @@ public class HiveAzureConfig private String abfsOAuthClientId; private String abfsOAuthClientSecret; + @Deprecated(forRemoval = true, since = "470") public Optional getWasbStorageAccount() { return Optional.ofNullable(wasbStorageAccount); } @ConfigSecuritySensitive + @Deprecated(forRemoval = true, since = "470") @Config("hive.azure.wasb-storage-account") public HiveAzureConfig setWasbStorageAccount(String wasbStorageAccount) { @@ -46,12 +48,14 @@ public HiveAzureConfig setWasbStorageAccount(String wasbStorageAccount) return this; } + @Deprecated(forRemoval = true, since = "470") public Optional getWasbAccessKey() { return Optional.ofNullable(wasbAccessKey); } @ConfigSecuritySensitive + @Deprecated(forRemoval = true, since = "470") @Config("hive.azure.wasb-access-key") public HiveAzureConfig setWasbAccessKey(String wasbAccessKey) { @@ -59,12 +63,14 @@ public HiveAzureConfig setWasbAccessKey(String wasbAccessKey) return this; } + @Deprecated public Optional getAbfsStorageAccount() { return Optional.ofNullable(abfsStorageAccount); } @ConfigSecuritySensitive + @Deprecated(forRemoval = true, since = "470") @Config("hive.azure.abfs-storage-account") public HiveAzureConfig setAbfsStorageAccount(String abfsStorageAccount) { @@ -72,12 +78,14 @@ public HiveAzureConfig setAbfsStorageAccount(String abfsStorageAccount) return this; } + @Deprecated(forRemoval = true, since = "470") public Optional getAbfsAccessKey() { return Optional.ofNullable(abfsAccessKey); } @ConfigSecuritySensitive + @Deprecated(forRemoval = true, since = "470") @Config("hive.azure.abfs-access-key") public HiveAzureConfig setAbfsAccessKey(String abfsAccessKey) { @@ -86,6 +94,7 @@ public HiveAzureConfig setAbfsAccessKey(String abfsAccessKey) } @ConfigSecuritySensitive + @Deprecated(forRemoval = true, since = "470") @Config("hive.azure.adl-client-id") public HiveAzureConfig setAdlClientId(String adlClientId) { @@ -93,12 +102,14 @@ public HiveAzureConfig setAdlClientId(String adlClientId) return this; } + @Deprecated(forRemoval = true, since = "470") public Optional getAdlClientId() { return Optional.ofNullable(adlClientId); } @ConfigSecuritySensitive + @Deprecated(forRemoval = true, since = "470") @Config("hive.azure.adl-credential") public HiveAzureConfig setAdlCredential(String adlCredential) { @@ -106,17 +117,20 @@ public HiveAzureConfig setAdlCredential(String adlCredential) return this; } + @Deprecated(forRemoval = true, since = "470") public Optional getAdlCredential() { return Optional.ofNullable(adlCredential); } + @Deprecated(forRemoval = true, since = "470") public Optional getAdlRefreshUrl() { return Optional.ofNullable(adlRefreshUrl); } @ConfigSecuritySensitive + @Deprecated(forRemoval = true, since = "470") @Config("hive.azure.adl-refresh-url") public HiveAzureConfig setAdlRefreshUrl(String adlRefreshUrl) { @@ -124,6 +138,7 @@ public HiveAzureConfig setAdlRefreshUrl(String adlRefreshUrl) return this; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.azure.adl-proxy-host") public HiveAzureConfig setAdlProxyHost(HostAndPort adlProxyHost) { @@ -131,12 +146,14 @@ public HiveAzureConfig setAdlProxyHost(HostAndPort adlProxyHost) return this; } + @Deprecated(forRemoval = true, since = "470") public Optional getAdlProxyHost() { return Optional.ofNullable(adlProxyHost); } @ConfigSecuritySensitive + @Deprecated(forRemoval = true, since = "470") @Config("hive.azure.abfs.oauth.endpoint") public HiveAzureConfig setAbfsOAuthClientEndpoint(String endpoint) { @@ -144,12 +161,14 @@ public HiveAzureConfig setAbfsOAuthClientEndpoint(String endpoint) return this; } + @Deprecated(forRemoval = true, since = "470") public Optional getAbfsOAuthClientEndpoint() { return Optional.ofNullable(abfsOAuthClientEndpoint); } @ConfigSecuritySensitive + @Deprecated(forRemoval = true, since = "470") @Config("hive.azure.abfs.oauth.client-id") public HiveAzureConfig setAbfsOAuthClientId(String id) { @@ -157,12 +176,14 @@ public HiveAzureConfig setAbfsOAuthClientId(String id) return this; } + @Deprecated(forRemoval = true, since = "470") public Optional getAbfsOAuthClientId() { return Optional.ofNullable(abfsOAuthClientId); } @ConfigSecuritySensitive + @Deprecated(forRemoval = true, since = "470") @Config("hive.azure.abfs.oauth.secret") public HiveAzureConfig setAbfsOAuthClientSecret(String secret) { @@ -170,6 +191,7 @@ public HiveAzureConfig setAbfsOAuthClientSecret(String secret) return this; } + @Deprecated(forRemoval = true, since = "470") public Optional getAbfsOAuthClientSecret() { return Optional.ofNullable(abfsOAuthClientSecret); diff --git a/lib/trino-hdfs/src/main/java/io/trino/hdfs/cos/HiveCosServiceConfig.java b/lib/trino-hdfs/src/main/java/io/trino/hdfs/cos/HiveCosServiceConfig.java index 393724b15817..dd0cb9b019d7 100644 --- a/lib/trino-hdfs/src/main/java/io/trino/hdfs/cos/HiveCosServiceConfig.java +++ b/lib/trino-hdfs/src/main/java/io/trino/hdfs/cos/HiveCosServiceConfig.java @@ -22,14 +22,14 @@ public class HiveCosServiceConfig { private File serviceConfig; - @Deprecated + @Deprecated(forRemoval = true, since = "470") @FileExists public File getServiceConfig() { return serviceConfig; } - @Deprecated + @Deprecated(forRemoval = true, since = "470") @Config("hive.cos.service-config") public HiveCosServiceConfig setServiceConfig(File serviceConfig) { diff --git a/lib/trino-hdfs/src/main/java/io/trino/hdfs/gcs/HiveGcsConfig.java b/lib/trino-hdfs/src/main/java/io/trino/hdfs/gcs/HiveGcsConfig.java index cda101a55f1c..2fadde7d3d40 100644 --- a/lib/trino-hdfs/src/main/java/io/trino/hdfs/gcs/HiveGcsConfig.java +++ b/lib/trino-hdfs/src/main/java/io/trino/hdfs/gcs/HiveGcsConfig.java @@ -27,11 +27,13 @@ public class HiveGcsConfig private String jsonKey; private String jsonKeyFilePath; + @Deprecated(forRemoval = true, since = "470") public boolean isUseGcsAccessToken() { return useGcsAccessToken; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.gcs.use-access-token") @ConfigDescription("Use client-provided OAuth token to access Google Cloud Storage") public HiveGcsConfig setUseGcsAccessToken(boolean useGcsAccessToken) @@ -40,12 +42,14 @@ public HiveGcsConfig setUseGcsAccessToken(boolean useGcsAccessToken) return this; } + @Deprecated(forRemoval = true, since = "470") @Nullable public String getJsonKey() { return jsonKey; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.gcs.json-key") @ConfigSecuritySensitive public HiveGcsConfig setJsonKey(String jsonKey) @@ -54,6 +58,7 @@ public HiveGcsConfig setJsonKey(String jsonKey) return this; } + @Deprecated(forRemoval = true, since = "470") @Nullable @FileExists public String getJsonKeyFilePath() @@ -61,6 +66,7 @@ public String getJsonKeyFilePath() return jsonKeyFilePath; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.gcs.json-key-file-path") @ConfigDescription("JSON key file used to access Google Cloud Storage") public HiveGcsConfig setJsonKeyFilePath(String jsonKeyFilePath) diff --git a/lib/trino-hdfs/src/main/java/io/trino/hdfs/s3/HiveS3Config.java b/lib/trino-hdfs/src/main/java/io/trino/hdfs/s3/HiveS3Config.java index 70d00c56030f..2694696e8fc0 100644 --- a/lib/trino-hdfs/src/main/java/io/trino/hdfs/s3/HiveS3Config.java +++ b/lib/trino-hdfs/src/main/java/io/trino/hdfs/s3/HiveS3Config.java @@ -83,11 +83,13 @@ public class HiveS3Config private String s3StsEndpoint; private String s3StsRegion; + @Deprecated(forRemoval = true, since = "470") public String getS3AwsAccessKey() { return s3AwsAccessKey; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.aws-access-key") public HiveS3Config setS3AwsAccessKey(String s3AwsAccessKey) { @@ -95,11 +97,13 @@ public HiveS3Config setS3AwsAccessKey(String s3AwsAccessKey) return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3AwsSecretKey() { return s3AwsSecretKey; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.aws-secret-key") @ConfigSecuritySensitive public HiveS3Config setS3AwsSecretKey(String s3AwsSecretKey) @@ -108,11 +112,13 @@ public HiveS3Config setS3AwsSecretKey(String s3AwsSecretKey) return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3Endpoint() { return s3Endpoint; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.endpoint") public HiveS3Config setS3Endpoint(String s3Endpoint) { @@ -120,11 +126,13 @@ public HiveS3Config setS3Endpoint(String s3Endpoint) return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3Region() { return s3Region; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.region") public HiveS3Config setS3Region(String s3Region) { @@ -132,12 +140,14 @@ public HiveS3Config setS3Region(String s3Region) return this; } + @Deprecated(forRemoval = true, since = "470") @NotNull public TrinoS3StorageClass getS3StorageClass() { return s3StorageClass; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.storage-class") @ConfigDescription("AWS S3 storage class to use when writing the data") public HiveS3Config setS3StorageClass(TrinoS3StorageClass s3StorageClass) @@ -146,11 +156,13 @@ public HiveS3Config setS3StorageClass(TrinoS3StorageClass s3StorageClass) return this; } + @Deprecated(forRemoval = true, since = "470") public TrinoS3SignerType getS3SignerType() { return s3SignerType; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.signer-type") public HiveS3Config setS3SignerType(TrinoS3SignerType s3SignerType) { @@ -158,11 +170,13 @@ public HiveS3Config setS3SignerType(TrinoS3SignerType s3SignerType) return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3SignerClass() { return s3SignerClass; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.signer-class") public HiveS3Config setS3SignerClass(String s3SignerClass) { @@ -170,11 +184,13 @@ public HiveS3Config setS3SignerClass(String s3SignerClass) return this; } + @Deprecated(forRemoval = true, since = "470") public boolean isS3PathStyleAccess() { return s3PathStyleAccess; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.path-style-access") @ConfigDescription("Use path-style access for all request to S3") public HiveS3Config setS3PathStyleAccess(boolean s3PathStyleAccess) @@ -183,11 +199,13 @@ public HiveS3Config setS3PathStyleAccess(boolean s3PathStyleAccess) return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3IamRole() { return s3IamRole; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.iam-role") @ConfigDescription("ARN of an IAM role to assume when connecting to S3") public HiveS3Config setS3IamRole(String s3IamRole) @@ -196,11 +214,13 @@ public HiveS3Config setS3IamRole(String s3IamRole) return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3ExternalId() { return s3ExternalId; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.external-id") @ConfigDescription("External ID for the IAM role trust policy when connecting to S3") public HiveS3Config setS3ExternalId(String s3ExternalId) @@ -209,11 +229,13 @@ public HiveS3Config setS3ExternalId(String s3ExternalId) return this; } + @Deprecated(forRemoval = true, since = "470") public boolean isS3SslEnabled() { return s3SslEnabled; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.ssl.enabled") public HiveS3Config setS3SslEnabled(boolean s3SslEnabled) { @@ -221,11 +243,13 @@ public HiveS3Config setS3SslEnabled(boolean s3SslEnabled) return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3EncryptionMaterialsProvider() { return s3EncryptionMaterialsProvider; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.encryption-materials-provider") @ConfigDescription("Use a custom encryption materials provider for S3 data encryption") public HiveS3Config setS3EncryptionMaterialsProvider(String s3EncryptionMaterialsProvider) @@ -234,11 +258,13 @@ public HiveS3Config setS3EncryptionMaterialsProvider(String s3EncryptionMaterial return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3KmsKeyId() { return s3KmsKeyId; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.kms-key-id") @ConfigDescription("Use an AWS KMS key for S3 data encryption") public HiveS3Config setS3KmsKeyId(String s3KmsKeyId) @@ -247,11 +273,13 @@ public HiveS3Config setS3KmsKeyId(String s3KmsKeyId) return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3SseKmsKeyId() { return s3SseKmsKeyId; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.sse.kms-key-id") @ConfigDescription("KMS Key ID to use for S3 server-side encryption with KMS-managed key") public HiveS3Config setS3SseKmsKeyId(String s3SseKmsKeyId) @@ -260,11 +288,13 @@ public HiveS3Config setS3SseKmsKeyId(String s3SseKmsKeyId) return this; } + @Deprecated(forRemoval = true, since = "470") public boolean isS3SseEnabled() { return s3SseEnabled; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.sse.enabled") @ConfigDescription("Enable S3 server side encryption") public HiveS3Config setS3SseEnabled(boolean s3SseEnabled) @@ -273,12 +303,14 @@ public HiveS3Config setS3SseEnabled(boolean s3SseEnabled) return this; } + @Deprecated(forRemoval = true, since = "470") @NotNull public TrinoS3SseType getS3SseType() { return s3SseType; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.sse.type") @ConfigDescription("Key management type for S3 server-side encryption (S3 or KMS)") public HiveS3Config setS3SseType(TrinoS3SseType s3SseType) @@ -287,12 +319,14 @@ public HiveS3Config setS3SseType(TrinoS3SseType s3SseType) return this; } + @Deprecated(forRemoval = true, since = "470") @Min(0) public int getS3MaxClientRetries() { return s3MaxClientRetries; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.max-client-retries") public HiveS3Config setS3MaxClientRetries(int s3MaxClientRetries) { @@ -300,12 +334,14 @@ public HiveS3Config setS3MaxClientRetries(int s3MaxClientRetries) return this; } + @Deprecated(forRemoval = true, since = "470") @Min(0) public int getS3MaxErrorRetries() { return s3MaxErrorRetries; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.max-error-retries") public HiveS3Config setS3MaxErrorRetries(int s3MaxErrorRetries) { @@ -313,6 +349,7 @@ public HiveS3Config setS3MaxErrorRetries(int s3MaxErrorRetries) return this; } + @Deprecated(forRemoval = true, since = "470") @MinDuration("1s") @NotNull public Duration getS3MaxBackoffTime() @@ -320,6 +357,7 @@ public Duration getS3MaxBackoffTime() return s3MaxBackoffTime; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.max-backoff-time") public HiveS3Config setS3MaxBackoffTime(Duration s3MaxBackoffTime) { @@ -327,6 +365,7 @@ public HiveS3Config setS3MaxBackoffTime(Duration s3MaxBackoffTime) return this; } + @Deprecated(forRemoval = true, since = "470") @MinDuration("1ms") @NotNull public Duration getS3MaxRetryTime() @@ -334,6 +373,7 @@ public Duration getS3MaxRetryTime() return s3MaxRetryTime; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.max-retry-time") public HiveS3Config setS3MaxRetryTime(Duration s3MaxRetryTime) { @@ -341,6 +381,7 @@ public HiveS3Config setS3MaxRetryTime(Duration s3MaxRetryTime) return this; } + @Deprecated(forRemoval = true, since = "470") @MinDuration("1ms") @NotNull public Duration getS3ConnectTimeout() @@ -348,6 +389,7 @@ public Duration getS3ConnectTimeout() return s3ConnectTimeout; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.connect-timeout") public HiveS3Config setS3ConnectTimeout(Duration s3ConnectTimeout) { @@ -355,12 +397,14 @@ public HiveS3Config setS3ConnectTimeout(Duration s3ConnectTimeout) return this; } + @Deprecated(forRemoval = true, since = "470") @NotNull public Optional getS3ConnectTtl() { return s3ConnectTtl; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.connect-ttl") @ConfigDescription("TCP connect TTL in the client side, which affects connection reusage") public HiveS3Config setS3ConnectTtl(Duration s3ConnectTtl) @@ -369,6 +413,7 @@ public HiveS3Config setS3ConnectTtl(Duration s3ConnectTtl) return this; } + @Deprecated(forRemoval = true, since = "470") @MinDuration("1ms") @NotNull public Duration getS3SocketTimeout() @@ -376,6 +421,7 @@ public Duration getS3SocketTimeout() return s3SocketTimeout; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.socket-timeout") public HiveS3Config setS3SocketTimeout(Duration s3SocketTimeout) { @@ -383,12 +429,14 @@ public HiveS3Config setS3SocketTimeout(Duration s3SocketTimeout) return this; } + @Deprecated(forRemoval = true, since = "470") @Min(1) public int getS3MaxConnections() { return s3MaxConnections; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.max-connections") public HiveS3Config setS3MaxConnections(int s3MaxConnections) { @@ -396,6 +444,7 @@ public HiveS3Config setS3MaxConnections(int s3MaxConnections) return this; } + @Deprecated(forRemoval = true, since = "470") @NotNull @FileExists public File getS3StagingDirectory() @@ -403,6 +452,7 @@ public File getS3StagingDirectory() return s3StagingDirectory; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.staging-directory") @ConfigDescription("Temporary directory for staging files before uploading to S3") public HiveS3Config setS3StagingDirectory(File s3StagingDirectory) @@ -411,6 +461,7 @@ public HiveS3Config setS3StagingDirectory(File s3StagingDirectory) return this; } + @Deprecated(forRemoval = true, since = "470") @NotNull @MinDataSize("16MB") public DataSize getS3MultipartMinFileSize() @@ -418,6 +469,7 @@ public DataSize getS3MultipartMinFileSize() return s3MultipartMinFileSize; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.multipart.min-file-size") @ConfigDescription("Minimum file size for an S3 multipart upload") public HiveS3Config setS3MultipartMinFileSize(DataSize size) @@ -426,6 +478,7 @@ public HiveS3Config setS3MultipartMinFileSize(DataSize size) return this; } + @Deprecated(forRemoval = true, since = "470") @NotNull @MinDataSize("5MB") public DataSize getS3MultipartMinPartSize() @@ -433,6 +486,7 @@ public DataSize getS3MultipartMinPartSize() return s3MultipartMinPartSize; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.multipart.min-part-size") @ConfigDescription("Minimum part size for an S3 multipart upload") public HiveS3Config setS3MultipartMinPartSize(DataSize size) @@ -441,11 +495,13 @@ public HiveS3Config setS3MultipartMinPartSize(DataSize size) return this; } + @Deprecated(forRemoval = true, since = "470") public boolean isPinS3ClientToCurrentRegion() { return pinS3ClientToCurrentRegion; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.pin-client-to-current-region") @ConfigDescription("Should the S3 client be pinned to the current EC2 region") public HiveS3Config setPinS3ClientToCurrentRegion(boolean pinS3ClientToCurrentRegion) @@ -454,12 +510,14 @@ public HiveS3Config setPinS3ClientToCurrentRegion(boolean pinS3ClientToCurrentRe return this; } + @Deprecated(forRemoval = true, since = "470") @NotNull public String getS3UserAgentPrefix() { return s3UserAgentPrefix; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.user-agent-prefix") @ConfigDescription("The user agent prefix to use for S3 calls") public HiveS3Config setS3UserAgentPrefix(String s3UserAgentPrefix) @@ -468,12 +526,14 @@ public HiveS3Config setS3UserAgentPrefix(String s3UserAgentPrefix) return this; } + @Deprecated(forRemoval = true, since = "470") @NotNull public TrinoS3AclType getS3AclType() { return s3AclType; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.upload-acl-type") @ConfigDescription("Canned ACL type for S3 uploads") public HiveS3Config setS3AclType(TrinoS3AclType s3AclType) @@ -482,11 +542,13 @@ public HiveS3Config setS3AclType(TrinoS3AclType s3AclType) return this; } + @Deprecated(forRemoval = true, since = "470") public boolean isSkipGlacierObjects() { return skipGlacierObjects; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.skip-glacier-objects") public HiveS3Config setSkipGlacierObjects(boolean skipGlacierObjects) { @@ -494,11 +556,13 @@ public HiveS3Config setSkipGlacierObjects(boolean skipGlacierObjects) return this; } + @Deprecated(forRemoval = true, since = "470") public boolean isRequesterPaysEnabled() { return requesterPaysEnabled; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.requester-pays.enabled") public HiveS3Config setRequesterPaysEnabled(boolean requesterPaysEnabled) { @@ -506,11 +570,13 @@ public HiveS3Config setRequesterPaysEnabled(boolean requesterPaysEnabled) return this; } + @Deprecated(forRemoval = true, since = "470") public boolean isS3StreamingUploadEnabled() { return s3StreamingUploadEnabled; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.streaming.enabled") public HiveS3Config setS3StreamingUploadEnabled(boolean s3StreamingUploadEnabled) { @@ -518,6 +584,7 @@ public HiveS3Config setS3StreamingUploadEnabled(boolean s3StreamingUploadEnabled return this; } + @Deprecated(forRemoval = true, since = "470") @NotNull @MinDataSize("5MB") @MaxDataSize("256MB") @@ -526,6 +593,7 @@ public DataSize getS3StreamingPartSize() return s3StreamingPartSize; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.streaming.part-size") @ConfigDescription("Part size for S3 streaming upload") public HiveS3Config setS3StreamingPartSize(DataSize s3StreamingPartSize) @@ -534,11 +602,13 @@ public HiveS3Config setS3StreamingPartSize(DataSize s3StreamingPartSize) return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3ProxyHost() { return s3proxyHost; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.proxy.host") public HiveS3Config setS3ProxyHost(String s3proxyHost) { @@ -546,11 +616,13 @@ public HiveS3Config setS3ProxyHost(String s3proxyHost) return this; } + @Deprecated(forRemoval = true, since = "470") public int getS3ProxyPort() { return s3proxyPort; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.proxy.port") public HiveS3Config setS3ProxyPort(int s3proxyPort) { @@ -558,11 +630,13 @@ public HiveS3Config setS3ProxyPort(int s3proxyPort) return this; } + @Deprecated(forRemoval = true, since = "470") public TrinoS3Protocol getS3ProxyProtocol() { return s3ProxyProtocol; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.proxy.protocol") public HiveS3Config setS3ProxyProtocol(String s3ProxyProtocol) { @@ -570,11 +644,13 @@ public HiveS3Config setS3ProxyProtocol(String s3ProxyProtocol) return this; } + @Deprecated(forRemoval = true, since = "470") public List getS3NonProxyHosts() { return s3nonProxyHosts; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.proxy.non-proxy-hosts") public HiveS3Config setS3NonProxyHosts(List s3nonProxyHosts) { @@ -582,11 +658,13 @@ public HiveS3Config setS3NonProxyHosts(List s3nonProxyHosts) return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3ProxyUsername() { return s3proxyUsername; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.proxy.username") public HiveS3Config setS3ProxyUsername(String s3proxyUsername) { @@ -594,11 +672,13 @@ public HiveS3Config setS3ProxyUsername(String s3proxyUsername) return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3ProxyPassword() { return s3proxyPassword; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.proxy.password") @ConfigSecuritySensitive public HiveS3Config setS3ProxyPassword(String s3proxyPassword) @@ -607,11 +687,13 @@ public HiveS3Config setS3ProxyPassword(String s3proxyPassword) return this; } + @Deprecated(forRemoval = true, since = "470") public boolean getS3PreemptiveBasicProxyAuth() { return s3preemptiveBasicProxyAuth; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.proxy.preemptive-basic-auth") public HiveS3Config setS3PreemptiveBasicProxyAuth(boolean s3preemptiveBasicProxyAuth) { @@ -619,11 +701,13 @@ public HiveS3Config setS3PreemptiveBasicProxyAuth(boolean s3preemptiveBasicProxy return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3StsEndpoint() { return s3StsEndpoint; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.sts.endpoint") public HiveS3Config setS3StsEndpoint(String s3StsEndpoint) { @@ -631,11 +715,13 @@ public HiveS3Config setS3StsEndpoint(String s3StsEndpoint) return this; } + @Deprecated(forRemoval = true, since = "470") public String getS3StsRegion() { return s3StsRegion; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.sts.region") public HiveS3Config setS3StsRegion(String s3StsRegion) { diff --git a/lib/trino-hdfs/src/main/java/io/trino/hdfs/s3/HiveS3TypeConfig.java b/lib/trino-hdfs/src/main/java/io/trino/hdfs/s3/HiveS3TypeConfig.java index db43dde957d8..c15b9b14a4a7 100644 --- a/lib/trino-hdfs/src/main/java/io/trino/hdfs/s3/HiveS3TypeConfig.java +++ b/lib/trino-hdfs/src/main/java/io/trino/hdfs/s3/HiveS3TypeConfig.java @@ -20,12 +20,14 @@ public class HiveS3TypeConfig { private S3FileSystemType s3FileSystemType = S3FileSystemType.TRINO; + @Deprecated(forRemoval = true, since = "470") @NotNull public S3FileSystemType getS3FileSystemType() { return s3FileSystemType; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3-file-system-type") public HiveS3TypeConfig setS3FileSystemType(S3FileSystemType s3FileSystemType) { diff --git a/lib/trino-hdfs/src/main/java/io/trino/hdfs/s3/S3SecurityMappingConfig.java b/lib/trino-hdfs/src/main/java/io/trino/hdfs/s3/S3SecurityMappingConfig.java index 6ec715ff5140..98c7093e263a 100644 --- a/lib/trino-hdfs/src/main/java/io/trino/hdfs/s3/S3SecurityMappingConfig.java +++ b/lib/trino-hdfs/src/main/java/io/trino/hdfs/s3/S3SecurityMappingConfig.java @@ -29,11 +29,13 @@ public class S3SecurityMappingConfig private Duration refreshPeriod; private String colonReplacement; + @Deprecated(forRemoval = true, since = "470") public Optional getConfigFilePath() { return Optional.ofNullable(configFilePath); } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.security-mapping.config-file") @ConfigDescription("JSON configuration file containing security mappings") public S3SecurityMappingConfig setConfigFilePath(String configFilePath) @@ -42,12 +44,14 @@ public S3SecurityMappingConfig setConfigFilePath(String configFilePath) return this; } + @Deprecated(forRemoval = true, since = "470") @NotNull public String getJsonPointer() { return jsonPointer; } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.security-mapping.json-pointer") @ConfigDescription("JSON pointer (RFC 6901) to mappings inside JSON config") public S3SecurityMappingConfig setJsonPointer(String jsonPointer) @@ -56,11 +60,13 @@ public S3SecurityMappingConfig setJsonPointer(String jsonPointer) return this; } + @Deprecated(forRemoval = true, since = "470") public Optional getRoleCredentialName() { return Optional.ofNullable(roleCredentialName); } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.security-mapping.iam-role-credential-name") @ConfigDescription("Name of the extra credential used to provide IAM role") public S3SecurityMappingConfig setRoleCredentialName(String roleCredentialName) @@ -69,11 +75,13 @@ public S3SecurityMappingConfig setRoleCredentialName(String roleCredentialName) return this; } + @Deprecated(forRemoval = true, since = "470") public Optional getKmsKeyIdCredentialName() { return Optional.ofNullable(kmsKeyIdCredentialName); } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.security-mapping.kms-key-id-credential-name") @ConfigDescription("Name of the extra credential used to provide KMS Key ID") public S3SecurityMappingConfig setKmsKeyIdCredentialName(String kmsKeyIdCredentialName) @@ -82,11 +90,13 @@ public S3SecurityMappingConfig setKmsKeyIdCredentialName(String kmsKeyIdCredenti return this; } + @Deprecated(forRemoval = true, since = "470") public Optional getRefreshPeriod() { return Optional.ofNullable(refreshPeriod); } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.security-mapping.refresh-period") @ConfigDescription("How often to refresh the security mapping configuration") public S3SecurityMappingConfig setRefreshPeriod(Duration refreshPeriod) @@ -95,11 +105,13 @@ public S3SecurityMappingConfig setRefreshPeriod(Duration refreshPeriod) return this; } + @Deprecated(forRemoval = true, since = "470") public Optional getColonReplacement() { return Optional.ofNullable(colonReplacement); } + @Deprecated(forRemoval = true, since = "470") @Config("hive.s3.security-mapping.colon-replacement") @ConfigDescription("Value used in place of colon for IAM role name in extra credentials") public S3SecurityMappingConfig setColonReplacement(String colonReplacement) diff --git a/lib/trino-hive-formats/pom.xml b/lib/trino-hive-formats/pom.xml index ee25039b66de..91ef864eecfe 100644 --- a/lib/trino-hive-formats/pom.xml +++ b/lib/trino-hive-formats/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-matching/pom.xml b/lib/trino-matching/pom.xml index 57c32d4b94b2..0fec4f6c7588 100644 --- a/lib/trino-matching/pom.xml +++ b/lib/trino-matching/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-memory-context/pom.xml b/lib/trino-memory-context/pom.xml index 88b972032dd5..c4c4cdd7a087 100644 --- a/lib/trino-memory-context/pom.xml +++ b/lib/trino-memory-context/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-metastore/pom.xml b/lib/trino-metastore/pom.xml index 54dbc123d8a5..a4911d46e000 100644 --- a/lib/trino-metastore/pom.xml +++ b/lib/trino-metastore/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-metastore/src/main/java/io/trino/metastore/HiveType.java b/lib/trino-metastore/src/main/java/io/trino/metastore/HiveType.java index 2f3bdb3a87bf..21247b772d94 100644 --- a/lib/trino-metastore/src/main/java/io/trino/metastore/HiveType.java +++ b/lib/trino-metastore/src/main/java/io/trino/metastore/HiveType.java @@ -34,6 +34,7 @@ import static io.trino.metastore.type.TypeConstants.TIMESTAMPLOCALTZ_TYPE_NAME; import static io.trino.metastore.type.TypeConstants.TIMESTAMP_TYPE_NAME; import static io.trino.metastore.type.TypeConstants.TINYINT_TYPE_NAME; +import static io.trino.metastore.type.TypeConstants.VARIANT_TYPE_NAME; import static io.trino.metastore.type.TypeInfoFactory.getPrimitiveTypeInfo; import static io.trino.metastore.type.TypeInfoUtils.getTypeInfoFromTypeString; import static io.trino.metastore.type.TypeInfoUtils.getTypeInfosFromTypeString; @@ -55,6 +56,7 @@ public final class HiveType public static final HiveType HIVE_TIMESTAMPLOCALTZ = new HiveType(getPrimitiveTypeInfo(TIMESTAMPLOCALTZ_TYPE_NAME)); public static final HiveType HIVE_DATE = new HiveType(getPrimitiveTypeInfo(DATE_TYPE_NAME)); public static final HiveType HIVE_BINARY = new HiveType(getPrimitiveTypeInfo(BINARY_TYPE_NAME)); + public static final HiveType HIVE_VARIANT = new HiveType(getPrimitiveTypeInfo(VARIANT_TYPE_NAME)); private final HiveTypeName hiveTypeName; private final TypeInfo typeInfo; diff --git a/lib/trino-metastore/src/main/java/io/trino/metastore/type/PrimitiveCategory.java b/lib/trino-metastore/src/main/java/io/trino/metastore/type/PrimitiveCategory.java index f0336cff7272..dac44e8c5592 100644 --- a/lib/trino-metastore/src/main/java/io/trino/metastore/type/PrimitiveCategory.java +++ b/lib/trino-metastore/src/main/java/io/trino/metastore/type/PrimitiveCategory.java @@ -18,5 +18,5 @@ public enum PrimitiveCategory { VOID, BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, STRING, DATE, TIMESTAMP, TIMESTAMPLOCALTZ, BINARY, DECIMAL, VARCHAR, CHAR, - INTERVAL_YEAR_MONTH, INTERVAL_DAY_TIME, UNKNOWN + INTERVAL_YEAR_MONTH, INTERVAL_DAY_TIME, VARIANT, UNKNOWN } diff --git a/lib/trino-metastore/src/main/java/io/trino/metastore/type/TypeConstants.java b/lib/trino-metastore/src/main/java/io/trino/metastore/type/TypeConstants.java index df41b965ce14..147e9f1f54a4 100644 --- a/lib/trino-metastore/src/main/java/io/trino/metastore/type/TypeConstants.java +++ b/lib/trino-metastore/src/main/java/io/trino/metastore/type/TypeConstants.java @@ -35,6 +35,7 @@ private TypeConstants() {} public static final String BINARY_TYPE_NAME = "binary"; public static final String INTERVAL_YEAR_MONTH_TYPE_NAME = "interval_year_month"; public static final String INTERVAL_DAY_TIME_TYPE_NAME = "interval_day_time"; + public static final String VARIANT_TYPE_NAME = "variant"; public static final String LIST_TYPE_NAME = "array"; public static final String MAP_TYPE_NAME = "map"; diff --git a/lib/trino-metastore/src/main/java/io/trino/metastore/type/TypeInfoUtils.java b/lib/trino-metastore/src/main/java/io/trino/metastore/type/TypeInfoUtils.java index dc1a94fc910a..bb351d433650 100644 --- a/lib/trino-metastore/src/main/java/io/trino/metastore/type/TypeInfoUtils.java +++ b/lib/trino-metastore/src/main/java/io/trino/metastore/type/TypeInfoUtils.java @@ -61,6 +61,7 @@ private TypeInfoUtils() {} registerType(new PrimitiveTypeEntry(PrimitiveCategory.INTERVAL_YEAR_MONTH, TypeConstants.INTERVAL_YEAR_MONTH_TYPE_NAME)); registerType(new PrimitiveTypeEntry(PrimitiveCategory.INTERVAL_DAY_TIME, TypeConstants.INTERVAL_DAY_TIME_TYPE_NAME)); registerType(new PrimitiveTypeEntry(PrimitiveCategory.DECIMAL, TypeConstants.DECIMAL_TYPE_NAME)); + registerType(new PrimitiveTypeEntry(PrimitiveCategory.VARIANT, TypeConstants.VARIANT_TYPE_NAME)); registerType(new PrimitiveTypeEntry(PrimitiveCategory.UNKNOWN, "unknown")); } diff --git a/lib/trino-orc/pom.xml b/lib/trino-orc/pom.xml index 57ed1562264f..2769f9bf3199 100644 --- a/lib/trino-orc/pom.xml +++ b/lib/trino-orc/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-parquet/pom.xml b/lib/trino-parquet/pom.xml index 9cecbda33c17..67c2cc617efa 100644 --- a/lib/trino-parquet/pom.xml +++ b/lib/trino-parquet/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml @@ -13,6 +13,11 @@ Trino - Parquet file format support + + com.fasterxml.jackson.core + jackson-core + + com.google.errorprone error_prone_annotations diff --git a/lib/trino-parquet/src/main/java/io/trino/parquet/ParquetTypeUtils.java b/lib/trino-parquet/src/main/java/io/trino/parquet/ParquetTypeUtils.java index 34980fd6af82..85ce8a555875 100644 --- a/lib/trino-parquet/src/main/java/io/trino/parquet/ParquetTypeUtils.java +++ b/lib/trino-parquet/src/main/java/io/trino/parquet/ParquetTypeUtils.java @@ -39,8 +39,11 @@ import java.util.Map; import java.util.Optional; +import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.collect.ImmutableMap.toImmutableMap; import static io.trino.spi.StandardErrorCode.NOT_SUPPORTED; +import static io.trino.spi.type.StandardTypes.JSON; +import static io.trino.spi.type.VarbinaryType.VARBINARY; import static java.lang.String.format; import static org.apache.parquet.schema.Type.Repetition.OPTIONAL; import static org.apache.parquet.schema.Type.Repetition.REPEATED; @@ -291,6 +294,15 @@ private static Optional constructField(Type type, ColumnIO columnIO, bool boolean required = columnIO.getType().getRepetition() != OPTIONAL; int repetitionLevel = columnIO.getRepetitionLevel(); int definitionLevel = columnIO.getDefinitionLevel(); + if (isVariantType(type, columnIO)) { + checkArgument(type.getTypeParameters().isEmpty(), "Expected type parameters to be empty for variant but got %s", type.getTypeParameters()); + if (!(columnIO instanceof GroupColumnIO groupColumnIo)) { + throw new IllegalStateException("Expected columnIO to be GroupColumnIO but got %s".formatted(columnIO.getClass().getSimpleName())); + } + Field valueField = constructField(VARBINARY, groupColumnIo.getChild(0), false).orElseThrow(); + Field metadataField = constructField(VARBINARY, groupColumnIo.getChild(1), false).orElseThrow(); + return Optional.of(new VariantField(type, repetitionLevel, definitionLevel, required, valueField, metadataField)); + } if (type instanceof RowType rowType) { GroupColumnIO groupColumnIO = (GroupColumnIO) columnIO; ImmutableList.Builder> fieldsBuilder = ImmutableList.builder(); @@ -350,4 +362,13 @@ private static Optional constructField(Type type, ColumnIO columnIO, bool } return Optional.of(new PrimitiveField(type, required, primitiveColumnIO.getColumnDescriptor(), primitiveColumnIO.getId())); } + + private static boolean isVariantType(Type type, ColumnIO columnIO) + { + return type.getTypeSignature().getBase().equals(JSON) && + columnIO instanceof GroupColumnIO groupColumnIo && + groupColumnIo.getChildrenCount() == 2 && + groupColumnIo.getChild("value") != null && + groupColumnIo.getChild("metadata") != null; + } } diff --git a/lib/trino-parquet/src/main/java/io/trino/parquet/VariantField.java b/lib/trino-parquet/src/main/java/io/trino/parquet/VariantField.java new file mode 100644 index 000000000000..f9b65baf4ca9 --- /dev/null +++ b/lib/trino-parquet/src/main/java/io/trino/parquet/VariantField.java @@ -0,0 +1,56 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.parquet; + +import io.trino.spi.type.Type; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static java.util.Objects.requireNonNull; + +public class VariantField + extends Field +{ + private final Field value; + private final Field metadata; + + public VariantField(Type type, int repetitionLevel, int definitionLevel, boolean required, Field value, Field metadata) + { + super(type, repetitionLevel, definitionLevel, required); + this.value = requireNonNull(value, "value is null"); + this.metadata = requireNonNull(metadata, "metadata is null"); + } + + public Field getValue() + { + return value; + } + + public Field getMetadata() + { + return metadata; + } + + @Override + public String toString() + { + return toStringHelper(this) + .add("type", getType()) + .add("repetitionLevel", getRepetitionLevel()) + .add("definitionLevel", getDefinitionLevel()) + .add("required", isRequired()) + .add("value", value) + .add("metadata", getMetadata()) + .toString(); + } +} diff --git a/lib/trino-parquet/src/main/java/io/trino/parquet/reader/ParquetReader.java b/lib/trino-parquet/src/main/java/io/trino/parquet/reader/ParquetReader.java index c77d013fdc35..5ba0976581bf 100644 --- a/lib/trino-parquet/src/main/java/io/trino/parquet/reader/ParquetReader.java +++ b/lib/trino-parquet/src/main/java/io/trino/parquet/reader/ParquetReader.java @@ -19,6 +19,7 @@ import com.google.common.collect.ListMultimap; import com.google.errorprone.annotations.FormatMethod; import io.airlift.log.Logger; +import io.airlift.slice.Slice; import io.trino.memory.context.AggregatedMemoryContext; import io.trino.parquet.ChunkKey; import io.trino.parquet.Column; @@ -30,14 +31,17 @@ import io.trino.parquet.ParquetReaderOptions; import io.trino.parquet.ParquetWriteValidation; import io.trino.parquet.PrimitiveField; +import io.trino.parquet.VariantField; import io.trino.parquet.metadata.ColumnChunkMetadata; import io.trino.parquet.metadata.PrunedBlockMetadata; import io.trino.parquet.predicate.TupleDomainParquetPredicate; import io.trino.parquet.reader.FilteredOffsetIndex.OffsetRange; +import io.trino.parquet.spark.Variant; import io.trino.plugin.base.metrics.LongCount; import io.trino.spi.Page; import io.trino.spi.block.ArrayBlock; import io.trino.spi.block.Block; +import io.trino.spi.block.BlockBuilder; import io.trino.spi.block.DictionaryBlock; import io.trino.spi.block.RowBlock; import io.trino.spi.block.RunLengthEncodedBlock; @@ -59,6 +63,7 @@ import java.io.Closeable; import java.io.IOException; +import java.time.ZoneId; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -69,6 +74,7 @@ import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.collect.ImmutableList.toImmutableList; import static com.google.common.collect.ImmutableSet.toImmutableSet; +import static io.airlift.slice.Slices.utf8Slice; import static io.trino.parquet.ParquetValidationUtils.validateParquet; import static io.trino.parquet.ParquetWriteValidation.StatisticsValidation; import static io.trino.parquet.ParquetWriteValidation.StatisticsValidation.createStatisticsValidationBuilder; @@ -76,6 +82,8 @@ import static io.trino.parquet.ParquetWriteValidation.WriteChecksumBuilder.createWriteChecksumBuilder; import static io.trino.parquet.reader.ListColumnReader.calculateCollectionOffsets; import static io.trino.parquet.reader.PageReader.createPageReader; +import static io.trino.spi.type.VarbinaryType.VARBINARY; +import static io.trino.spi.type.VarcharType.VARCHAR; import static java.lang.Math.max; import static java.lang.Math.min; import static java.lang.Math.toIntExact; @@ -97,6 +105,7 @@ public class ParquetReader private final List columnFields; private final List primitiveFields; private final ParquetDataSource dataSource; + private final ZoneId zoneId; private final ColumnReaderFactory columnReaderFactory; private final AggregatedMemoryContext memoryContext; @@ -149,6 +158,7 @@ public ParquetReader( this.primitiveFields = getPrimitiveFields(columnFields.stream().map(Column::field).collect(toImmutableList())); this.rowGroups = requireNonNull(rowGroups, "rowGroups is null"); this.dataSource = requireNonNull(dataSource, "dataSource is null"); + this.zoneId = requireNonNull(timeZone, "timeZone is null").toTimeZone().toZoneId(); this.columnReaderFactory = new ColumnReaderFactory(timeZone, options); this.memoryContext = requireNonNull(memoryContext, "memoryContext is null"); this.currentRowGroupMemoryContext = memoryContext.newAggregatedMemoryContext(); @@ -332,6 +342,25 @@ private void freeCurrentRowGroupBuffers() } } + private ColumnChunk readVariant(VariantField field) + throws IOException + { + ColumnChunk valueChunk = readColumnChunk(field.getValue()); + + BlockBuilder variantBlock = VARCHAR.createBlockBuilder(null, 1); + if (valueChunk.getBlock().getPositionCount() == 0) { + variantBlock.appendNull(); + } + else { + ColumnChunk metadataChunk = readColumnChunk(field.getMetadata()); + Slice value = VARBINARY.getSlice(valueChunk.getBlock(), 0); + Slice metadata = VARBINARY.getSlice(metadataChunk.getBlock(), 0); + Variant variant = new Variant(value.byteArray(), metadata.byteArray()); + VARCHAR.writeSlice(variantBlock, utf8Slice(variant.toJson(zoneId))); + } + return new ColumnChunk(variantBlock.build(), valueChunk.getDefinitionLevels(), valueChunk.getRepetitionLevels()); + } + private ColumnChunk readArray(GroupField field) throws IOException { @@ -523,6 +552,10 @@ else if (field instanceof GroupField groupField) { .flatMap(Optional::stream) .forEach(child -> parseField(child, primitiveFields)); } + else if (field instanceof VariantField variantField) { + parseField(variantField.getValue(), primitiveFields); + parseField(variantField.getMetadata(), primitiveFields); + } } public Block readBlock(Field field) @@ -535,7 +568,10 @@ private ColumnChunk readColumnChunk(Field field) throws IOException { ColumnChunk columnChunk; - if (field.getType() instanceof RowType) { + if (field instanceof VariantField variantField) { + columnChunk = readVariant(variantField); + } + else if (field.getType() instanceof RowType) { columnChunk = readStruct((GroupField) field); } else if (field.getType() instanceof MapType) { diff --git a/lib/trino-parquet/src/main/java/io/trino/parquet/spark/Variant.java b/lib/trino-parquet/src/main/java/io/trino/parquet/spark/Variant.java new file mode 100644 index 000000000000..12b7d6a69817 --- /dev/null +++ b/lib/trino-parquet/src/main/java/io/trino/parquet/spark/Variant.java @@ -0,0 +1,172 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.parquet.spark; + +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.CharArrayWriter; +import java.io.IOException; +import java.time.Instant; +import java.time.LocalDate; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.util.Base64; +import java.util.Locale; + +import static com.google.common.base.Preconditions.checkArgument; +import static io.trino.parquet.spark.VariantUtil.SIZE_LIMIT; +import static io.trino.parquet.spark.VariantUtil.VERSION; +import static io.trino.parquet.spark.VariantUtil.VERSION_MASK; +import static io.trino.parquet.spark.VariantUtil.getBinary; +import static io.trino.parquet.spark.VariantUtil.getBoolean; +import static io.trino.parquet.spark.VariantUtil.getDecimal; +import static io.trino.parquet.spark.VariantUtil.getDouble; +import static io.trino.parquet.spark.VariantUtil.getFloat; +import static io.trino.parquet.spark.VariantUtil.getLong; +import static io.trino.parquet.spark.VariantUtil.getMetadataKey; +import static io.trino.parquet.spark.VariantUtil.getString; +import static io.trino.parquet.spark.VariantUtil.getType; +import static io.trino.parquet.spark.VariantUtil.handleArray; +import static io.trino.parquet.spark.VariantUtil.handleObject; +import static io.trino.parquet.spark.VariantUtil.readUnsigned; +import static io.trino.plugin.base.util.JsonUtils.jsonFactory; +import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE; +import static java.time.format.DateTimeFormatter.ISO_LOCAL_TIME; +import static java.time.temporal.ChronoUnit.MICROS; + +/** + * Copied from https://github.com/apache/spark/blob/53d65fd12dd9231139188227ef9040d40d759021/common/variant/src/main/java/org/apache/spark/types/variant/Variant.java + * and adjusted the code style. + */ +public final class Variant +{ + private static final DateTimeFormatter TIMESTAMP_NTZ_FORMATTER = new DateTimeFormatterBuilder() + .append(ISO_LOCAL_DATE) + .appendLiteral(' ') + .append(ISO_LOCAL_TIME) + .toFormatter(Locale.US); + + private static final DateTimeFormatter TIMESTAMP_FORMATTER = new DateTimeFormatterBuilder() + .append(TIMESTAMP_NTZ_FORMATTER) + .appendOffset("+HH:MM", "+00:00") + .toFormatter(Locale.US); + + private final byte[] value; + private final byte[] metadata; + // The variant value doesn't use the whole `value` binary, but starts from its `pos` index and + // spans a size of `valueSize(value, pos)`. This design avoids frequent copies of the value binary + // when reading a sub-variant in the array/object element. + private final int position; + + public Variant(byte[] value, byte[] metadata) + { + this(value, metadata, 0); + } + + private Variant(byte[] value, byte[] metadata, int position) + { + this.value = value; + this.metadata = metadata; + this.position = position; + checkArgument(metadata.length >= 1, "metadata must be present"); + checkArgument((metadata[0] & VERSION_MASK) == VERSION, "metadata version must be %s", VERSION); + // Don't attempt to use a Variant larger than 16 MiB. We'll never produce one, and it risks memory instability. + checkArgument(metadata.length <= SIZE_LIMIT, "max metadata size is %s: %s", SIZE_LIMIT, metadata.length); + checkArgument(value.length <= SIZE_LIMIT, "max value size is %s: %s", SIZE_LIMIT, value.length); + } + + // Stringify the variant in JSON format. + public String toJson(ZoneId zoneId) + { + StringBuilder json = new StringBuilder(); + toJsonImpl(value, metadata, position, json, zoneId); + return json.toString(); + } + + private static void toJsonImpl(byte[] value, byte[] metadata, int position, StringBuilder json, ZoneId zoneId) + { + switch (getType(value, position)) { + case NULL -> json.append("null"); + case BOOLEAN -> json.append(getBoolean(value, position)); + case LONG -> json.append(getLong(value, position)); + case FLOAT -> json.append(getFloat(value, position)); + case DOUBLE -> json.append(getDouble(value, position)); + case DECIMAL -> json.append(getDecimal(value, position).toPlainString()); + case STRING -> json.append(escapeJson(getString(value, position))); + case BINARY -> appendQuoted(json, Base64.getEncoder().encodeToString(getBinary(value, position))); + case DATE -> appendQuoted(json, LocalDate.ofEpochDay(getLong(value, position)).toString()); + case TIMESTAMP -> appendQuoted(json, TIMESTAMP_FORMATTER.format(microsToInstant(getLong(value, position)).atZone(zoneId))); + case TIMESTAMP_NTZ -> appendQuoted(json, TIMESTAMP_NTZ_FORMATTER.format(microsToInstant(getLong(value, position)).atZone(ZoneOffset.UTC))); + case ARRAY -> handleArray(value, position, (size, offsetSize, offsetStart, dataStart) -> { + json.append('['); + for (int i = 0; i < size; ++i) { + int offset = readUnsigned(value, offsetStart + offsetSize * i, offsetSize); + int elementPos = dataStart + offset; + if (i != 0) { + json.append(','); + } + toJsonImpl(value, metadata, elementPos, json, zoneId); + } + json.append(']'); + return null; + }); + case OBJECT -> handleObject(value, position, (size, idSize, offsetSize, idStart, offsetStart, dataStart) -> { + json.append('{'); + for (int i = 0; i < size; ++i) { + int id = readUnsigned(value, idStart + idSize * i, idSize); + int offset = readUnsigned(value, offsetStart + offsetSize * i, offsetSize); + int elementPosition = dataStart + offset; + if (i != 0) { + json.append(','); + } + json.append(escapeJson(getMetadataKey(metadata, id))); + json.append(':'); + toJsonImpl(value, metadata, elementPosition, json, zoneId); + } + json.append('}'); + return null; + }); + } + } + + private static Instant microsToInstant(long timestamp) + { + return Instant.EPOCH.plus(timestamp, MICROS); + } + + // A simplified and more performant version of `sb.append(escapeJson(value))`. It is used when we + // know `value` doesn't contain any special character that needs escaping. + private static void appendQuoted(StringBuilder json, String value) + { + json.append('"').append(value).append('"'); + } + + // Escape a string so that it can be pasted into JSON structure. + // For example, if `str` only contains a new-line character, then the result content is "\n" + // (4 characters). + private static String escapeJson(String value) + { + try (CharArrayWriter writer = new CharArrayWriter(); + JsonGenerator generator = jsonFactory().createGenerator(writer)) { + generator.writeString(value); + generator.flush(); + return writer.toString(); + } + catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/lib/trino-parquet/src/main/java/io/trino/parquet/spark/VariantUtil.java b/lib/trino-parquet/src/main/java/io/trino/parquet/spark/VariantUtil.java new file mode 100644 index 000000000000..c17c33f647f0 --- /dev/null +++ b/lib/trino-parquet/src/main/java/io/trino/parquet/spark/VariantUtil.java @@ -0,0 +1,480 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.parquet.spark; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.Arrays; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * Copied from https://github.com/apache/spark/blob/53d65fd12dd9231139188227ef9040d40d759021/common/variant/src/main/java/org/apache/spark/types/variant/VariantUtil.java + * + * This class defines constants related to the variant format and provides functions for + * manipulating variant binaries. + + * A variant is made up of 2 binaries: value and metadata. A variant value consists of a one-byte + * header and a number of content bytes (can be zero). The header byte is divided into upper 6 bits + * (called "type info") and lower 2 bits (called "basic type"). The content format is explained in + * the below constants for all possible basic type and type info values. + + * The variant metadata includes a version id and a dictionary of distinct strings (case-sensitive). + * Its binary format is: + * - Version: 1-byte unsigned integer. The only acceptable value is 1 currently. + * - Dictionary size: 4-byte little-endian unsigned integer. The number of keys in the + * dictionary. + * - Offsets: (size + 1) * 4-byte little-endian unsigned integers. `offsets[i]` represents the + * starting position of string i, counting starting from the address of `offsets[0]`. Strings + * must be stored contiguously, so we don’t need to store the string size, instead, we compute it + * with `offset[i + 1] - offset[i]`. + * - UTF-8 string data. + */ +public final class VariantUtil +{ + public static final int BASIC_TYPE_BITS = 2; + public static final int BASIC_TYPE_MASK = 0x3; + public static final int TYPE_INFO_MASK = 0x3F; + + // Below is all possible basic type values. + // Primitive value. The type info value must be one of the values in the below section. + public static final int PRIMITIVE = 0; + // Short string value. The type info value is the string size, which must be in `[0, + // kMaxShortStrSize]`. + // The string content bytes directly follow the header byte. + public static final int SHORT_STR = 1; + // Object value. The content contains a size, a list of field ids, a list of field offsets, and + // the actual field data. The length of the id list is `size`, while the length of the offset + // list is `size + 1`, where the last offset represent the total size of the field data. The + // fields in an object must be sorted by the field name in alphabetical order. Duplicate field + // names in one object are not allowed. + // We use 5 bits in the type info to specify the integer type of the object header: it should + // be 0_b4_b3b2_b1b0 (MSB is 0), where: + // - b4 specifies the type of size. When it is 0/1, `size` is a little-endian 1/4-byte + // unsigned integer. + // - b3b2/b1b0 specifies the integer type of id and offset. When the 2 bits are 0/1/2, the + // list contains 1/2/3-byte little-endian unsigned integers. + public static final int OBJECT = 2; + // Array value. The content contains a size, a list of field offsets, and the actual element + // data. It is similar to an object without the id list. The length of the offset list + // is `size + 1`, where the last offset represent the total size of the element data. + // Its type info should be: 000_b2_b1b0: + // - b2 specifies the type of size. + // - b1b0 specifies the integer type of offset. + public static final int ARRAY = 3; + + // Below is all possible type info values for `PRIMITIVE`. + // JSON Null value. Empty content. + public static final int NULL = 0; + // True value. Empty content. + public static final int TRUE = 1; + // False value. Empty content. + public static final int FALSE = 2; + // 1-byte little-endian signed integer. + public static final int INT1 = 3; + // 2-byte little-endian signed integer. + public static final int INT2 = 4; + // 4-byte little-endian signed integer. + public static final int INT4 = 5; + // 4-byte little-endian signed integer. + public static final int INT8 = 6; + // 8-byte IEEE double. + public static final int DOUBLE = 7; + // 4-byte decimal. Content is 1-byte scale + 4-byte little-endian signed integer. + public static final int DECIMAL4 = 8; + // 8-byte decimal. Content is 1-byte scale + 8-byte little-endian signed integer. + public static final int DECIMAL8 = 9; + // 16-byte decimal. Content is 1-byte scale + 16-byte little-endian signed integer. + public static final int DECIMAL16 = 10; + // Date value. Content is 4-byte little-endian signed integer that represents the number of days + // from the Unix epoch. + public static final int DATE = 11; + // Timestamp value. Content is 8-byte little-endian signed integer that represents the number of + // microseconds elapsed since the Unix epoch, 1970-01-01 00:00:00 UTC. It is displayed to users in + // their local time zones and may be displayed differently depending on the execution environment. + public static final int TIMESTAMP = 12; + // Timestamp_ntz value. It has the same content as `TIMESTAMP` but should always be interpreted + // as if the local time zone is UTC. + public static final int TIMESTAMP_NTZ = 13; + // 4-byte IEEE float. + public static final int FLOAT = 14; + // Binary value. The content is (4-byte little-endian unsigned integer representing the binary + // size) + (size bytes of binary content). + public static final int BINARY = 15; + // Long string value. The content is (4-byte little-endian unsigned integer representing the + // string size) + (size bytes of string content). + public static final int LONG_STR = 16; + + public static final byte VERSION = 1; + // The lower 4 bits of the first metadata byte contain the version. + public static final byte VERSION_MASK = 0x0F; + + public static final int U24_MAX = 0xFFFFFF; + public static final int U32_SIZE = 4; + + // Both variant value and variant metadata need to be no longer than 16MiB. + public static final int SIZE_LIMIT = U24_MAX + 1; + + public static final int MAX_DECIMAL4_PRECISION = 9; + public static final int MAX_DECIMAL8_PRECISION = 18; + public static final int MAX_DECIMAL16_PRECISION = 38; + + private VariantUtil() {} + + // Check the validity of an array index `position`. Throw `MALFORMED_VARIANT` if it is out of bound, + // meaning that the variant is malformed. + static void checkIndex(int position, int length) + { + if (position < 0 || position >= length) { + throw new IllegalArgumentException("Index out of bound: %s (length: %s)".formatted(position, length)); + } + } + + // Read a little-endian signed long value from `bytes[position, position + numBytes)`. + static long readLong(byte[] bytes, int position, int numBytes) + { + checkIndex(position, bytes.length); + checkIndex(position + numBytes - 1, bytes.length); + long result = 0; + // All bytes except the most significant byte should be unsign-extended and shifted (so we need + // `& 0xFF`). The most significant byte should be sign-extended and is handled after the loop. + for (int i = 0; i < numBytes - 1; ++i) { + long unsignedByteValue = bytes[position + i] & 0xFF; + result |= unsignedByteValue << (8 * i); + } + long signedByteValue = bytes[position + numBytes - 1]; + result |= signedByteValue << (8 * (numBytes - 1)); + return result; + } + + // Read a little-endian unsigned int value from `bytes[position, position + numBytes)`. The value must fit + // into a non-negative int (`[0, Integer.MAX_VALUE]`). + static int readUnsigned(byte[] bytes, int position, int numBytes) + { + checkIndex(position, bytes.length); + checkIndex(position + numBytes - 1, bytes.length); + int result = 0; + // Similar to the `readLong` loop, but all bytes should be unsign-extended. + for (int i = 0; i < numBytes; ++i) { + int unsignedByteValue = bytes[position + i] & 0xFF; + result |= unsignedByteValue << (8 * i); + } + if (result < 0) { + throw new IllegalArgumentException("Value out of bound: %s".formatted(result)); + } + return result; + } + + // The value type of variant value. It is determined by the header byte but not a 1:1 mapping + // (for example, INT1/2/4/8 all maps to `Type.LONG`). + public enum Type + { + NULL, + BOOLEAN, + LONG, + FLOAT, + DOUBLE, + DECIMAL, + STRING, + BINARY, + DATE, + TIMESTAMP, + TIMESTAMP_NTZ, + ARRAY, + OBJECT, + } + + // Get the value type of variant value `value[position...]`. It is only legal to call `get*` if + // `getType` returns this type (for example, it is only legal to call `getLong` if `getType` + // returns `Type.Long`). + // Throw `MALFORMED_VARIANT` if the variant is malformed. + public static Type getType(byte[] value, int position) + { + checkIndex(position, value.length); + int basicType = value[position] & BASIC_TYPE_MASK; + int typeInfo = (value[position] >> BASIC_TYPE_BITS) & TYPE_INFO_MASK; + return switch (basicType) { + case SHORT_STR -> Type.STRING; + case OBJECT -> Type.OBJECT; + case ARRAY -> Type.ARRAY; + default -> switch (typeInfo) { + case NULL -> Type.NULL; + case TRUE, FALSE -> Type.BOOLEAN; + case INT1, INT2, INT4, INT8 -> Type.LONG; + case DOUBLE -> Type.DOUBLE; + case DECIMAL4, DECIMAL8, DECIMAL16 -> Type.DECIMAL; + case DATE -> Type.DATE; + case TIMESTAMP -> Type.TIMESTAMP; + case TIMESTAMP_NTZ -> Type.TIMESTAMP_NTZ; + case FLOAT -> Type.FLOAT; + case BINARY -> Type.BINARY; + case LONG_STR -> Type.STRING; + default -> throw new IllegalArgumentException("Unexpected type: " + typeInfo); + }; + }; + } + + private static IllegalStateException unexpectedType(Type type) + { + return new IllegalStateException("Expect type to be " + type); + } + + // Get a boolean value from variant value `value[position...]`. + // Throw `MALFORMED_VARIANT` if the variant is malformed. + public static boolean getBoolean(byte[] value, int position) + { + checkIndex(position, value.length); + int basicType = value[position] & BASIC_TYPE_MASK; + int typeInfo = (value[position] >> BASIC_TYPE_BITS) & TYPE_INFO_MASK; + if (basicType != PRIMITIVE || (typeInfo != TRUE && typeInfo != FALSE)) { + throw unexpectedType(Type.BOOLEAN); + } + return typeInfo == TRUE; + } + + // Get a long value from variant value `value[position...]`. + // It is only legal to call it if `getType` returns one of `Type.LONG/DATE/TIMESTAMP/ + // TIMESTAMP_NTZ`. If the type is `DATE`, the return value is guaranteed to fit into an int and + // represents the number of days from the Unix epoch. If the type is `TIMESTAMP/TIMESTAMP_NTZ`, + // the return value represents the number of microseconds from the Unix epoch. + // Throw `MALFORMED_VARIANT` if the variant is malformed. + public static long getLong(byte[] value, int position) + { + checkIndex(position, value.length); + int basicType = value[position] & BASIC_TYPE_MASK; + int typeInfo = (value[position] >> BASIC_TYPE_BITS) & TYPE_INFO_MASK; + String exceptionMessage = "Expect type to be LONG/DATE/TIMESTAMP/TIMESTAMP_NTZ"; + if (basicType != PRIMITIVE) { + throw new IllegalStateException(exceptionMessage); + } + return switch (typeInfo) { + case INT1 -> readLong(value, position + 1, 1); + case INT2 -> readLong(value, position + 1, 2); + case INT4, DATE -> readLong(value, position + 1, 4); + case INT8, TIMESTAMP, TIMESTAMP_NTZ -> readLong(value, position + 1, 8); + default -> throw new IllegalStateException(exceptionMessage); + }; + } + + // Get a double value from variant value `value[position...]`. + // Throw `MALFORMED_VARIANT` if the variant is malformed. + public static double getDouble(byte[] value, int position) + { + checkIndex(position, value.length); + int basicType = value[position] & BASIC_TYPE_MASK; + int typeInfo = (value[position] >> BASIC_TYPE_BITS) & TYPE_INFO_MASK; + if (basicType != PRIMITIVE || typeInfo != DOUBLE) { + throw unexpectedType(Type.DOUBLE); + } + return Double.longBitsToDouble(readLong(value, position + 1, 8)); + } + + // Check whether the precision and scale of the decimal are within the limit. + private static void checkDecimal(BigDecimal decimal, int maxPrecision) + { + if (decimal.precision() > maxPrecision || decimal.scale() > maxPrecision) { + throw new IllegalArgumentException("Decimal out of bound: " + decimal); + } + } + + // Get a decimal value from variant value `value[position...]`. + // Throw `MALFORMED_VARIANT` if the variant is malformed. + public static BigDecimal getDecimal(byte[] value, int position) + { + checkIndex(position, value.length); + int basicType = value[position] & BASIC_TYPE_MASK; + int typeInfo = (value[position] >> BASIC_TYPE_BITS) & TYPE_INFO_MASK; + if (basicType != PRIMITIVE) { + throw unexpectedType(Type.DECIMAL); + } + // Interpret the scale byte as unsigned. If it is a negative byte, the unsigned value must be + // greater than `MAX_DECIMAL16_PRECISION` and will trigger an error in `checkDecimal`. + int scale = value[position + 1] & 0xFF; + BigDecimal result; + switch (typeInfo) { + case DECIMAL4: + result = BigDecimal.valueOf(readLong(value, position + 2, 4), scale); + checkDecimal(result, MAX_DECIMAL4_PRECISION); + break; + case DECIMAL8: + result = BigDecimal.valueOf(readLong(value, position + 2, 8), scale); + checkDecimal(result, MAX_DECIMAL8_PRECISION); + break; + case DECIMAL16: + checkIndex(position + 17, value.length); + byte[] bytes = new byte[16]; + // Copy the bytes reversely because the `BigInteger` constructor expects a big-endian + // representation. + for (int i = 0; i < 16; ++i) { + bytes[i] = value[position + 17 - i]; + } + result = new BigDecimal(new BigInteger(bytes), scale); + checkDecimal(result, MAX_DECIMAL16_PRECISION); + break; + default: + throw unexpectedType(Type.DECIMAL); + } + return result.stripTrailingZeros(); + } + + // Get a float value from variant value `value[position...]`. + // Throw `MALFORMED_VARIANT` if the variant is malformed. + public static float getFloat(byte[] value, int position) + { + checkIndex(position, value.length); + int basicType = value[position] & BASIC_TYPE_MASK; + int typeInfo = (value[position] >> BASIC_TYPE_BITS) & TYPE_INFO_MASK; + if (basicType != PRIMITIVE || typeInfo != FLOAT) { + throw unexpectedType(Type.FLOAT); + } + return Float.intBitsToFloat((int) readLong(value, position + 1, 4)); + } + + // Get a binary value from variant value `value[position...]`. + // Throw `MALFORMED_VARIANT` if the variant is malformed. + public static byte[] getBinary(byte[] value, int position) + { + checkIndex(position, value.length); + int basicType = value[position] & BASIC_TYPE_MASK; + int typeInfo = (value[position] >> BASIC_TYPE_BITS) & TYPE_INFO_MASK; + if (basicType != PRIMITIVE || typeInfo != BINARY) { + throw unexpectedType(Type.BINARY); + } + int start = position + 1 + U32_SIZE; + int length = readUnsigned(value, position + 1, U32_SIZE); + checkIndex(start + length - 1, value.length); + return Arrays.copyOfRange(value, start, start + length); + } + + // Get a string value from variant value `value[position...]`. + // Throw `MALFORMED_VARIANT` if the variant is malformed. + public static String getString(byte[] value, int position) + { + checkIndex(position, value.length); + int basicType = value[position] & BASIC_TYPE_MASK; + int typeInfo = (value[position] >> BASIC_TYPE_BITS) & TYPE_INFO_MASK; + if (basicType == SHORT_STR || (basicType == PRIMITIVE && typeInfo == LONG_STR)) { + int start; + int length; + if (basicType == SHORT_STR) { + start = position + 1; + length = typeInfo; + } + else { + start = position + 1 + U32_SIZE; + length = readUnsigned(value, position + 1, U32_SIZE); + } + checkIndex(start + length - 1, value.length); + return new String(value, start, length, UTF_8); + } + throw unexpectedType(Type.STRING); + } + + public interface ObjectHandler + { + /** + * @param size Number of object fields. + * @param idSize The integer size of the field id list. + * @param offsetSize The integer size of the offset list. + * @param idStart The starting index of the field id list in the variant value array. + * @param offsetStart The starting index of the offset list in the variant value array. + * @param dataStart The starting index of field data in the variant value array. + */ + T apply(int size, int idSize, int offsetSize, int idStart, int offsetStart, int dataStart); + } + + // A helper function to access a variant object. It provides `handler` with its required + // parameters and returns what it returns. + public static T handleObject(byte[] value, int position, ObjectHandler handler) + { + checkIndex(position, value.length); + int basicType = value[position] & BASIC_TYPE_MASK; + int typeInfo = (value[position] >> BASIC_TYPE_BITS) & TYPE_INFO_MASK; + if (basicType != OBJECT) { + throw unexpectedType(Type.OBJECT); + } + // Refer to the comment of the `OBJECT` constant for the details of the object header encoding. + // Suppose `typeInfo` has a bit representation of 0_b4_b3b2_b1b0, the following line extracts + // b4 to determine whether the object uses a 1/4-byte size. + boolean largeSize = ((typeInfo >> 4) & 0x1) != 0; + int sizeBytes = (largeSize ? U32_SIZE : 1); + int size = readUnsigned(value, position + 1, sizeBytes); + // Extracts b3b2 to determine the integer size of the field id list. + int idSize = ((typeInfo >> 2) & 0x3) + 1; + // Extracts b1b0 to determine the integer size of the offset list. + int offsetSize = (typeInfo & 0x3) + 1; + int idStart = position + 1 + sizeBytes; + int offsetStart = idStart + size * idSize; + int dataStart = offsetStart + (size + 1) * offsetSize; + return handler.apply(size, idSize, offsetSize, idStart, offsetStart, dataStart); + } + + public interface ArrayHandler + { + /** + * @param size Number of array elements. + * @param offsetSize The integer size of the offset list. + * @param offsetStart The starting index of the offset list in the variant value array. + * @param dataStart The starting index of element data in the variant value array. + */ + T apply(int size, int offsetSize, int offsetStart, int dataStart); + } + + // A helper function to access a variant array. + public static T handleArray(byte[] value, int position, ArrayHandler handler) + { + checkIndex(position, value.length); + int basicType = value[position] & BASIC_TYPE_MASK; + int typeInfo = (value[position] >> BASIC_TYPE_BITS) & TYPE_INFO_MASK; + if (basicType != ARRAY) { + throw unexpectedType(Type.ARRAY); + } + // Refer to the comment of the `ARRAY` constant for the details of the object header encoding. + // Suppose `typeInfo` has a bit representation of 000_b2_b1b0, the following line extracts + // b2 to determine whether the object uses a 1/4-byte size. + boolean largeSize = ((typeInfo >> 2) & 0x1) != 0; + int sizeBytes = (largeSize ? U32_SIZE : 1); + int size = readUnsigned(value, position + 1, sizeBytes); + // Extracts b1b0 to determine the integer size of the offset list. + int offsetSize = (typeInfo & 0x3) + 1; + int offsetStart = position + 1 + sizeBytes; + int dataStart = offsetStart + (size + 1) * offsetSize; + return handler.apply(size, offsetSize, offsetStart, dataStart); + } + + // Get a key at `id` in the variant metadata. + // Throw `MALFORMED_VARIANT` if the variant is malformed. An out-of-bound `id` is also considered + // a malformed variant because it is read from the corresponding variant value. + public static String getMetadataKey(byte[] metadata, int id) + { + checkIndex(0, metadata.length); + // Extracts the highest 2 bits in the metadata header to determine the integer size of the + // offset list. + int offsetSize = ((metadata[0] >> 6) & 0x3) + 1; + int dictSize = readUnsigned(metadata, 1, offsetSize); + if (id >= dictSize) { + throw new IllegalArgumentException("Index out of bound: %s (size: %s)".formatted(id, dictSize)); + } + // There are a header byte, a `dictSize` with `offsetSize` bytes, and `(dictSize + 1)` offsets + // before the string data. + int stringStart = 1 + (dictSize + 2) * offsetSize; + int offset = readUnsigned(metadata, 1 + (id + 1) * offsetSize, offsetSize); + int nextOffset = readUnsigned(metadata, 1 + (id + 2) * offsetSize, offsetSize); + if (offset > nextOffset) { + throw new IllegalArgumentException("Invalid offset: %s > %s".formatted(offset, nextOffset)); + } + checkIndex(stringStart + nextOffset - 1, metadata.length); + return new String(metadata, stringStart + offset, nextOffset - offset, UTF_8); + } +} diff --git a/lib/trino-parquet/src/test/java/io/trino/parquet/spark/TestVariant.java b/lib/trino-parquet/src/test/java/io/trino/parquet/spark/TestVariant.java new file mode 100644 index 000000000000..fb963f39fef2 --- /dev/null +++ b/lib/trino-parquet/src/test/java/io/trino/parquet/spark/TestVariant.java @@ -0,0 +1,87 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.parquet.spark; + +import org.intellij.lang.annotations.Language; +import org.junit.jupiter.api.Test; + +import java.time.ZoneOffset; + +import static java.time.ZoneOffset.UTC; +import static org.assertj.core.api.Assertions.assertThat; + +final class TestVariant +{ + @Test + void testVariantToJson() + { + assertVariantToJson(new byte[] {2, 1, 0, 0, 2, 12, 1}, new byte[] {1, 1, 0, 3, 99, 111, 108}, "{\"col\":1}"); + assertVariantToJson(new byte[] {2, 1, 0, 0, 2, 12, 2}, new byte[] {1, 1, 0, 5, 97, 114, 114, 97, 121}, "{\"array\":2}"); + assertVariantToJson(new byte[] {2, 1, 0, 0, 2, 12, 3}, new byte[] {1, 1, 0, 3, 109, 97, 112}, "{\"map\":3}"); + assertVariantToJson(new byte[] {2, 1, 0, 0, 2, 12, 4}, new byte[] {1, 1, 0, 6, 115, 116, 114, 117, 99, 116}, "{\"struct\":4}"); + + assertVariantToJson(new byte[] {12, 1}, new byte[] {1, 0, 0}, "1"); + assertVariantToJson(new byte[] {12, 2}, new byte[] {1, 0, 0}, "2"); + assertVariantToJson(new byte[] {12, 3}, new byte[] {1, 0, 0}, "3"); + assertVariantToJson(new byte[] {12, 4}, new byte[] {1, 0, 0}, "4"); + } + + @Test + void testVariantToJsonAllTypes() + { + assertVariantToJson(new byte[] {4}, new byte[] {1, 0, 0}, "true"); + assertVariantToJson(new byte[] {12, 1}, new byte[] {1, 0, 0}, "1"); + assertVariantToJson(new byte[] {56, -51, -52, 76, 62}, new byte[] {1, 0, 0}, "0.2"); + assertVariantToJson(new byte[] {28, 51, 51, 51, 51, 51, 51, -45, 63}, new byte[] {1, 0, 0}, "0.3"); + assertVariantToJson(new byte[] {32, 1, 4, 0, 0, 0}, new byte[] {1, 0, 0}, "0.4"); + assertVariantToJson(new byte[] {37, 116, 101, 115, 116, 32, 100, 97, 116, 97}, new byte[] {1, 0, 0}, "\"test data\""); + assertVariantToJson(new byte[] {60, 3, 0, 0, 0, 101, 104, 63}, new byte[] {1, 0, 0}, "\"ZWg/\""); + assertVariantToJson(new byte[] {44, -27, 72, 0, 0}, new byte[] {1, 0, 0}, "\"2021-02-03\""); + assertVariantToJson(new byte[] {48, -88, -34, 22, -20, 19, -116, 3, 0}, new byte[] {1, 0, 0}, "\"2001-08-22 01:02:03.321+00:00\""); + assertVariantToJson(new byte[] {52, 64, -34, -104, 21, -22, -73, 5, 0}, new byte[] {1, 0, 0}, "\"2021-01-02 12:34:56.123456\""); + assertVariantToJson(new byte[] {3, 1, 0, 2, 12, 1}, new byte[] {1, 0, 0}, "[1]"); + assertVariantToJson(new byte[] {2, 2, 0, 1, 0, 2, 4, 12, 1, 12, 2}, new byte[] {1, 2, 0, 4, 8, 107, 101, 121, 49, 107, 101, 121, 50}, "{\"key1\":1,\"key2\":2}"); + assertVariantToJson(new byte[] {2, 1, 0, 0, 2, 12, 1}, new byte[] {1, 1, 0, 1, 120}, "{\"x\":1}"); + } + + @Test + void testVariantToJsonTimestamp() + { + assertThat(new Variant(new byte[] {48, -88, -34, 22, -20, 19, -116, 3, 0}, new byte[] {1, 0, 0}).toJson(ZoneOffset.ofHours(-18))) + .isEqualTo("\"2001-08-21 07:02:03.321-18:00\""); + assertThat(new Variant(new byte[] {48, -88, -34, 22, -20, 19, -116, 3, 0}, new byte[] {1, 0, 0}).toJson(ZoneOffset.ofHours(-1))) + .isEqualTo("\"2001-08-22 00:02:03.321-01:00\""); + assertThat(new Variant(new byte[] {48, -88, -34, 22, -20, 19, -116, 3, 0}, new byte[] {1, 0, 0}).toJson(UTC)) + .isEqualTo("\"2001-08-22 01:02:03.321+00:00\""); + assertThat(new Variant(new byte[] {48, -88, -34, 22, -20, 19, -116, 3, 0}, new byte[] {1, 0, 0}).toJson(ZoneOffset.ofHours(1))) + .isEqualTo("\"2001-08-22 02:02:03.321+01:00\""); + assertThat(new Variant(new byte[] {48, -88, -34, 22, -20, 19, -116, 3, 0}, new byte[] {1, 0, 0}).toJson(ZoneOffset.ofHours(18))) + .isEqualTo("\"2001-08-22 19:02:03.321+18:00\""); + } + + @Test + void testVariantToJsonNullValue() + { + assertVariantToJson(new byte[] {2, 1, 0, 0, 1, 0}, new byte[] {1, 1, 0, 3, 99, 111, 108}, "{\"col\":null}"); + assertVariantToJson(new byte[] {2, 1, 0, 0, 2, 0}, new byte[] {1, 1, 0, 5, 97, 114, 114, 97, 121}, "{\"array\":null}"); + assertVariantToJson(new byte[] {2, 1, 0, 0, 2, 0}, new byte[] {1, 1, 0, 3, 109, 97, 112}, "{\"map\":null}"); + assertVariantToJson(new byte[] {2, 1, 0, 0, 2, 0}, new byte[] {1, 1, 0, 6, 115, 116, 114, 117, 99, 116}, "{\"struct\":null}"); + } + + private static void assertVariantToJson(byte[] value, byte[] metadata, @Language("JSON") String expectedJson) + { + assertThat(new Variant(value, metadata).toJson(UTC)) + .isEqualTo(expectedJson); + } +} diff --git a/lib/trino-plugin-toolkit/pom.xml b/lib/trino-plugin-toolkit/pom.xml index eba6d953686d..1e6fa2cedcb6 100644 --- a/lib/trino-plugin-toolkit/pom.xml +++ b/lib/trino-plugin-toolkit/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/lib/trino-plugin-toolkit/src/main/java/io/trino/plugin/base/JdkCompatibilityChecks.java b/lib/trino-plugin-toolkit/src/main/java/io/trino/plugin/base/JdkCompatibilityChecks.java new file mode 100644 index 000000000000..3780a9e7d589 --- /dev/null +++ b/lib/trino-plugin-toolkit/src/main/java/io/trino/plugin/base/JdkCompatibilityChecks.java @@ -0,0 +1,78 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.base; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Multimap; +import com.google.inject.Binder; + +import java.lang.management.ManagementFactory; +import java.util.List; +import java.util.regex.Pattern; + +import static java.lang.String.format; +import static java.util.Objects.requireNonNull; + +public class JdkCompatibilityChecks +{ + private final String inputArguments; + + private static final JdkCompatibilityChecks INSTANCE = new JdkCompatibilityChecks( + ManagementFactory.getRuntimeMXBean().getInputArguments()); + + @VisibleForTesting + JdkCompatibilityChecks(List inputArguments) + { + this.inputArguments = Joiner.on(" ") + .skipNulls() + .join(requireNonNull(inputArguments, "inputArguments is null")); + } + + public static void verifyConnectorAccessOpened(Binder binder, String connectorName, Multimap modules) + { + INSTANCE.verifyAccessOpened(wrap(binder), format("Connector '%s'", connectorName), modules); + } + + @VisibleForTesting + void verifyAccessOpened(ThrowableSettable throwableSettable, String description, Multimap modules) + { + ImmutableList.Builder missingJvmArguments = ImmutableList.builder(); + for (String fromModule : modules.keySet()) { + for (String toModule : modules.get(fromModule)) { + String requiredJvmArgument = format(".*?--add-opens[\\s=]%s/%s=ALL-UNNAMED.*?", Pattern.quote(fromModule), Pattern.quote(toModule)); + if (!inputArguments.matches(requiredJvmArgument)) { + missingJvmArguments.add(format("--add-opens=%s/%s=ALL-UNNAMED", fromModule, toModule)); + } + } + } + + List requiredJvmArguments = missingJvmArguments.build(); + if (!requiredJvmArguments.isEmpty()) { + throwableSettable.setThrowable(new IllegalStateException(format("%s requires additional JVM argument(s). Please add the following to the JVM configuration: '%s'", description, String.join(" ", requiredJvmArguments)))); + } + } + + private static ThrowableSettable wrap(Binder binder) + { + return throwable -> binder.addError(throwable.getMessage()); + } + + @VisibleForTesting + interface ThrowableSettable + { + void setThrowable(Throwable throwable); + } +} diff --git a/lib/trino-plugin-toolkit/src/test/java/io/trino/plugin/base/TestJdkCompatibilityChecks.java b/lib/trino-plugin-toolkit/src/test/java/io/trino/plugin/base/TestJdkCompatibilityChecks.java new file mode 100644 index 000000000000..2442901a88aa --- /dev/null +++ b/lib/trino-plugin-toolkit/src/test/java/io/trino/plugin/base/TestJdkCompatibilityChecks.java @@ -0,0 +1,105 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.base; + +import com.google.common.collect.ImmutableMultimap; +import com.google.inject.Binder; +import com.google.inject.CreationException; +import com.google.inject.Guice; +import com.google.inject.Module; +import org.junit.jupiter.api.Test; + +import java.util.List; + +import static io.trino.plugin.base.JdkCompatibilityChecks.verifyConnectorAccessOpened; +import static java.util.Objects.requireNonNull; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +public class TestJdkCompatibilityChecks +{ + @Test + public void shouldThrowWhenAccessIsNotGranted() + { + ThrowableSettableMock errorSink = new ThrowableSettableMock(); + new JdkCompatibilityChecks(List.of()).verifyAccessOpened(errorSink, "Connector 'snowflake'", ImmutableMultimap.of( + "java.base", "java.nio")); + + assertThat(errorSink.getThrowable()) + .isInstanceOf(IllegalStateException.class) + .hasMessage("Connector 'snowflake' requires additional JVM argument(s). Please add the following to the JVM configuration: '--add-opens=java.base/java.nio=ALL-UNNAMED'"); + } + + @Test + public void shouldThrowWhenOneOfAccessGrantsIsMissing() + { + ThrowableSettableMock errorSink = new ThrowableSettableMock(); + new JdkCompatibilityChecks(List.of("--add-opens", "java.base/java.nio=ALL-UNNAMED")).verifyAccessOpened(errorSink, "Connector 'snowflake'", ImmutableMultimap.of( + "java.base", "java.nio", + "java.base", "java.lang")); + + assertThat(errorSink.getThrowable()) + .isInstanceOf(IllegalStateException.class) + .hasMessage("Connector 'snowflake' requires additional JVM argument(s). Please add the following to the JVM configuration: '--add-opens=java.base/java.lang=ALL-UNNAMED'"); + } + + @Test + public void shouldSetErrorOnBinder() + { + assertThatThrownBy(() -> Guice.createInjector(new TestingModule())) + .isInstanceOf(CreationException.class) + .hasMessageContaining("Unable to create injector, see the following errors:") + .hasMessageContaining("1) Connector 'Testing' requires additional JVM argument(s). Please add the following to the JVM configuration: '--add-opens=java.base/java.non.existing=ALL-UNNAMED --add-opens=java.base/java.this.should.fail=ALL-UNNAMED'"); + } + + @Test + public void shouldNotThrowWhenAllAccessGrantsArePresent() + { + ThrowableSettableMock errorSink = new ThrowableSettableMock(); + new JdkCompatibilityChecks(List.of("--add-opens=java.base/java.nio=ALL-UNNAMED", "--add-opens java.base/java.lang=ALL-UNNAMED")).verifyAccessOpened(errorSink, "Connector 'snowflake'", ImmutableMultimap.of( + "java.base", "java.nio", + "java.base", "java.lang")); + + assertThat(errorSink.getThrowable()).isNull(); + } + + private static class TestingModule + implements Module + { + @Override + public void configure(Binder binder) + { + verifyConnectorAccessOpened(binder, "Testing", ImmutableMultimap.of( + "java.base", "java.non.existing", + "java.base", "java.this.should.fail")); + } + } + + private static class ThrowableSettableMock + implements JdkCompatibilityChecks.ThrowableSettable + { + private Throwable throwable; + + @Override + public void setThrowable(Throwable throwable) + { + this.throwable = requireNonNull(throwable, "throwable is null"); + } + + public Throwable getThrowable() + { + return throwable; + } + } +} diff --git a/lib/trino-record-decoder/pom.xml b/lib/trino-record-decoder/pom.xml index dab148b6c23b..b0e83566fdd1 100644 --- a/lib/trino-record-decoder/pom.xml +++ b/lib/trino-record-decoder/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-base-jdbc/pom.xml b/plugin/trino-base-jdbc/pom.xml index 8fdfb4eab5b0..0bdb96cb8631 100644 --- a/plugin/trino-base-jdbc/pom.xml +++ b/plugin/trino-base-jdbc/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-base-jdbc/src/main/java/io/trino/plugin/jdbc/credential/keystore/KeyStoreUtils.java b/plugin/trino-base-jdbc/src/main/java/io/trino/plugin/jdbc/credential/keystore/KeyStoreUtils.java index 045a91f6b5c0..23b59059b2e5 100644 --- a/plugin/trino-base-jdbc/src/main/java/io/trino/plugin/jdbc/credential/keystore/KeyStoreUtils.java +++ b/plugin/trino-base-jdbc/src/main/java/io/trino/plugin/jdbc/credential/keystore/KeyStoreUtils.java @@ -18,6 +18,7 @@ import java.io.FileInputStream; import java.io.IOException; +import java.io.InputStream; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.KeyStore.PasswordProtection; @@ -31,7 +32,9 @@ public static KeyStore loadKeyStore(String keyStoreType, String keyStorePath, St throws IOException, GeneralSecurityException { KeyStore keyStore = KeyStore.getInstance(keyStoreType); - keyStore.load(new FileInputStream(keyStorePath), keyStorePassword.toCharArray()); + try (InputStream stream = new FileInputStream(keyStorePath)) { + keyStore.load(stream, keyStorePassword.toCharArray()); + } return keyStore; } diff --git a/plugin/trino-bigquery/pom.xml b/plugin/trino-bigquery/pom.xml index 6b83385997d5..07d3845181f3 100644 --- a/plugin/trino-bigquery/pom.xml +++ b/plugin/trino-bigquery/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-bigquery/src/main/java/io/trino/plugin/bigquery/BigQueryConnectorModule.java b/plugin/trino-bigquery/src/main/java/io/trino/plugin/bigquery/BigQueryConnectorModule.java index dd77ccab7bdf..50209b68b5a9 100644 --- a/plugin/trino-bigquery/src/main/java/io/trino/plugin/bigquery/BigQueryConnectorModule.java +++ b/plugin/trino-bigquery/src/main/java/io/trino/plugin/bigquery/BigQueryConnectorModule.java @@ -15,6 +15,7 @@ import com.google.api.gax.rpc.FixedHeaderProvider; import com.google.api.gax.rpc.HeaderProvider; +import com.google.common.collect.ImmutableMultimap; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.inject.Binder; import com.google.inject.Key; @@ -34,11 +35,7 @@ import io.trino.spi.function.table.ConnectorTableFunction; import io.trino.spi.procedure.Procedure; -import java.lang.management.ManagementFactory; -import java.util.Set; import java.util.concurrent.ExecutorService; -import java.util.regex.Matcher; -import java.util.regex.Pattern; import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator; import static com.google.inject.multibindings.Multibinder.newSetBinder; @@ -47,10 +44,9 @@ import static io.airlift.configuration.ConditionalModule.conditionalModule; import static io.airlift.configuration.ConfigBinder.configBinder; import static io.trino.plugin.base.ClosingBinder.closingBinder; -import static io.trino.plugin.bigquery.BigQueryConfig.ARROW_SERIALIZATION_ENABLED; +import static io.trino.plugin.base.JdkCompatibilityChecks.verifyConnectorAccessOpened; import static java.util.concurrent.Executors.newCachedThreadPool; import static java.util.concurrent.Executors.newFixedThreadPool; -import static java.util.stream.Collectors.toSet; import static org.weakref.jmx.guice.ExportBinder.newExporter; public class BigQueryConnectorModule @@ -181,7 +177,11 @@ public static class ArrowSerializationModule @Override protected void setup(Binder binder) { - verifyPackageAccessAllowed(binder); + // Check reflective access allowed - required by Apache Arrow usage in BigQuery + verifyConnectorAccessOpened( + binder, + "bigquery", + ImmutableMultimap.of("java.base", "java.nio")); configBinder(binder).bindConfig(BigQueryArrowConfig.class); binder.bind(BigQueryArrowBufferAllocator.class).in(Scopes.SINGLETON); @@ -189,33 +189,5 @@ protected void setup(Binder binder) newExporter(binder).export(BigQueryArrowBufferAllocator.class).withGeneratedName(); } - - /** - * Apache Arrow requires reflective access to certain Java internals prohibited since Java 17. - * Adds an error to the {@code binder} if required --add-opens is not passed to the JVM. - */ - private static void verifyPackageAccessAllowed(Binder binder) - { - // Match an --add-opens argument that opens a package to unnamed modules. - // The first group is the opened package. - Pattern argPattern = Pattern.compile( - "^--add-opens=(.*)=([A-Za-z0-9_.]+,)*ALL-UNNAMED(,[A-Za-z0-9_.]+)*$"); - // We don't need to check for values in separate arguments because - // they are joined with "=" before we get them. - - Set openedModules = ManagementFactory.getRuntimeMXBean() - .getInputArguments() - .stream() - .map(argPattern::matcher) - .filter(Matcher::matches) - .map(matcher -> matcher.group(1)) - .collect(toSet()); - - if (!openedModules.contains("java.base/java.nio")) { - binder.addError( - "BigQuery connector requires additional JVM arguments to run when '" + ARROW_SERIALIZATION_ENABLED + "' is enabled. " + - "Please add '--add-opens=java.base/java.nio=ALL-UNNAMED' to the JVM configuration."); - } - } } } diff --git a/plugin/trino-blackhole/pom.xml b/plugin/trino-blackhole/pom.xml index b67d407c1540..84b186556e90 100644 --- a/plugin/trino-blackhole/pom.xml +++ b/plugin/trino-blackhole/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-cassandra/pom.xml b/plugin/trino-cassandra/pom.xml index eed45ed96264..e0f5338b24a5 100644 --- a/plugin/trino-cassandra/pom.xml +++ b/plugin/trino-cassandra/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-clickhouse/pom.xml b/plugin/trino-clickhouse/pom.xml index 57a091476922..d578ff7230f9 100644 --- a/plugin/trino-clickhouse/pom.xml +++ b/plugin/trino-clickhouse/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-delta-lake/pom.xml b/plugin/trino-delta-lake/pom.xml index 660e78eb5de5..5d2fe0d7cd8a 100644 --- a/plugin/trino-delta-lake/pom.xml +++ b/plugin/trino-delta-lake/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaHiveTypeTranslator.java b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaHiveTypeTranslator.java index 8b31d8a6bf1e..82512f5c9119 100644 --- a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaHiveTypeTranslator.java +++ b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaHiveTypeTranslator.java @@ -29,6 +29,7 @@ import io.trino.spi.type.TypeSignatureParameter; import io.trino.spi.type.VarcharType; +import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Verify.verify; import static com.google.common.collect.ImmutableList.toImmutableList; import static io.trino.metastore.HiveType.HIVE_BINARY; @@ -42,6 +43,7 @@ import static io.trino.metastore.HiveType.HIVE_SHORT; import static io.trino.metastore.HiveType.HIVE_STRING; import static io.trino.metastore.HiveType.HIVE_TIMESTAMP; +import static io.trino.metastore.HiveType.HIVE_VARIANT; import static io.trino.metastore.type.CharTypeInfo.MAX_CHAR_LENGTH; import static io.trino.metastore.type.TypeInfoFactory.getCharTypeInfo; import static io.trino.metastore.type.TypeInfoFactory.getListTypeInfo; @@ -57,6 +59,7 @@ import static io.trino.spi.type.IntegerType.INTEGER; import static io.trino.spi.type.RealType.REAL; import static io.trino.spi.type.SmallintType.SMALLINT; +import static io.trino.spi.type.StandardTypes.JSON; import static io.trino.spi.type.TinyintType.TINYINT; import static io.trino.spi.type.VarbinaryType.VARBINARY; import static java.lang.String.format; @@ -129,6 +132,10 @@ public static TypeInfo translate(Type type) if (type instanceof DecimalType decimalType) { return new DecimalTypeInfo(decimalType.getPrecision(), decimalType.getScale()); } + if (type.getTypeSignature().getBase().equals(JSON)) { + checkArgument(type.getTypeSignature().getParameters().isEmpty(), "JSON type should not have parameters"); + return HIVE_VARIANT.getTypeInfo(); + } if (type instanceof ArrayType arrayType) { TypeInfo elementType = translate(arrayType.getElementType()); return getListTypeInfo(elementType); diff --git a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeConfig.java b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeConfig.java index 1d553252dfe8..fe21dded379f 100644 --- a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeConfig.java +++ b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeConfig.java @@ -23,6 +23,7 @@ import io.airlift.units.Duration; import io.airlift.units.MaxDuration; import io.airlift.units.MinDuration; +import io.airlift.units.ThreadCount; import io.trino.plugin.hive.HiveCompressionCodec; import jakarta.validation.constraints.DecimalMax; import jakarta.validation.constraints.DecimalMin; @@ -425,9 +426,9 @@ public int getStoreTableMetadataThreads() @Config("delta.metastore.store-table-metadata-threads") @ConfigDescription("Number of threads used for storing table metadata in metastore") - public DeltaLakeConfig setStoreTableMetadataThreads(int storeTableMetadataThreads) + public DeltaLakeConfig setStoreTableMetadataThreads(String storeTableMetadataThreads) { - this.storeTableMetadataThreads = storeTableMetadataThreads; + this.storeTableMetadataThreads = ThreadCount.valueOf(storeTableMetadataThreads).getThreadCount(); return this; } diff --git a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeMetadata.java b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeMetadata.java index ab729bea2793..2a5c0bb097d4 100644 --- a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeMetadata.java +++ b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeMetadata.java @@ -3282,7 +3282,7 @@ private void setRollback(Runnable action) checkState(rollbackAction.compareAndSet(null, action), "rollback action is already set"); } - private static String toUriFormat(String path) + public static String toUriFormat(String path) { verify(!path.startsWith("/") && !path.contains(":/"), "unexpected path: %s", path); try { diff --git a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/delete/DeletionVectors.java b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/delete/DeletionVectors.java index fb7e7a82c55f..2ade9c976859 100644 --- a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/delete/DeletionVectors.java +++ b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/delete/DeletionVectors.java @@ -62,13 +62,21 @@ private DeletionVectors() {} public static RoaringBitmapArray readDeletionVectors(TrinoFileSystem fileSystem, Location location, DeletionVectorEntry deletionVector) throws IOException { - if (deletionVector.storageType().equals(UUID_MARKER)) { - TrinoInputFile inputFile = fileSystem.newInputFile(location.appendPath(toFileName(deletionVector.pathOrInlineDv()))); - ByteBuffer buffer = readDeletionVector(inputFile, deletionVector.offset().orElseThrow(), deletionVector.sizeInBytes()); - return deserializeDeletionVectors(buffer); - } - if (deletionVector.storageType().equals(INLINE_MARKER) || deletionVector.storageType().equals(PATH_MARKER)) { - throw new TrinoException(NOT_SUPPORTED, "Unsupported storage type for deletion vector: " + deletionVector.storageType()); + switch (deletionVector.storageType()) { + case UUID_MARKER -> { + TrinoInputFile inputFile = fileSystem.newInputFile(location.appendPath(toFileName(deletionVector.pathOrInlineDv()))); + ByteBuffer buffer = readDeletionVector(inputFile, deletionVector.offset().orElseThrow(), deletionVector.sizeInBytes()); + return deserializeDeletionVectors(buffer); + } + case PATH_MARKER -> { + TrinoInputFile inputFile = fileSystem.newInputFile(Location.of(deletionVector.pathOrInlineDv())); + if (!inputFile.exists()) { + throw new IllegalArgumentException("Unable to find 'p' type deletion vector by path: " + deletionVector.pathOrInlineDv()); + } + ByteBuffer buffer = readDeletionVector(inputFile, deletionVector.offset().orElseThrow(), deletionVector.sizeInBytes()); + return deserializeDeletionVectors(buffer); + } + case INLINE_MARKER -> throw new TrinoException(NOT_SUPPORTED, "Unsupported storage type for deletion vector: " + deletionVector.storageType()); } throw new IllegalArgumentException("Unexpected storage type: " + deletionVector.storageType()); } diff --git a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/procedure/VacuumProcedure.java b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/procedure/VacuumProcedure.java index e84a5d700c29..e6907a872a84 100644 --- a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/procedure/VacuumProcedure.java +++ b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/procedure/VacuumProcedure.java @@ -66,6 +66,7 @@ import static io.trino.plugin.deltalake.DeltaLakeMetadata.MAX_WRITER_VERSION; import static io.trino.plugin.deltalake.DeltaLakeMetadata.checkUnsupportedUniversalFormat; import static io.trino.plugin.deltalake.DeltaLakeMetadata.checkValidTableHandle; +import static io.trino.plugin.deltalake.DeltaLakeMetadata.toUriFormat; import static io.trino.plugin.deltalake.DeltaLakeSessionProperties.getVacuumMinRetention; import static io.trino.plugin.deltalake.transactionlog.DeltaLakeTableFeatures.DELETION_VECTORS_FEATURE_NAME; import static io.trino.plugin.deltalake.transactionlog.DeltaLakeTableFeatures.unsupportedWriterFeatures; @@ -269,7 +270,9 @@ private void doVacuum( "Unexpected path [%s] returned when listing files under [%s]", location, tableLocation); - String relativePath = location.substring(commonPathPrefix.length()); + + // Paths are RFC 2396 URI encoded https://github.com/delta-io/delta/blob/master/PROTOCOL.md#add-file-and-remove-file + String relativePath = toUriFormat(location.substring(commonPathPrefix.length())); if (relativePath.isEmpty()) { // A file returned for "tableLocation/", might be possible on S3. continue; diff --git a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/transactionlog/DeltaLakeSchemaSupport.java b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/transactionlog/DeltaLakeSchemaSupport.java index d9fc72df070a..39b6e37c2e2a 100644 --- a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/transactionlog/DeltaLakeSchemaSupport.java +++ b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/transactionlog/DeltaLakeSchemaSupport.java @@ -86,6 +86,7 @@ import static io.trino.spi.type.IntegerType.INTEGER; import static io.trino.spi.type.RealType.REAL; import static io.trino.spi.type.SmallintType.SMALLINT; +import static io.trino.spi.type.StandardTypes.JSON; import static io.trino.spi.type.TimestampType.TIMESTAMP_MICROS; import static io.trino.spi.type.TimestampWithTimeZoneType.TIMESTAMP_TZ_MILLIS; import static io.trino.spi.type.TinyintType.TINYINT; @@ -765,7 +766,7 @@ private static Type buildType(TypeManager typeManager, JsonNode typeNode, boolea // For more info, see https://delta-users.slack.com/archives/GKTUWT03T/p1585760533005400 // and https://cwiki.apache.org/confluence/display/Hive/Different+TIMESTAMP+types case "timestamp" -> TIMESTAMP_TZ_MILLIS; - case "variant" -> throw new UnsupportedTypeException("variant"); + case "variant" -> typeManager.getType(new TypeSignature(JSON)); default -> throw new TypeNotFoundException(new TypeSignature(primitiveType)); }; } diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/BaseDeltaLakeConnectorSmokeTest.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/BaseDeltaLakeConnectorSmokeTest.java index 2bfd28edd699..bd65ceedce9f 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/BaseDeltaLakeConnectorSmokeTest.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/BaseDeltaLakeConnectorSmokeTest.java @@ -1798,6 +1798,52 @@ public void testVacuumWithTrailingSlash() } } + @Test + public void testVacuumWithWhiteSpace() + throws Exception + { + String catalog = getSession().getCatalog().orElseThrow(); + String tableName = "test_vacuum_white_space_" + randomNameSuffix(); + String tableLocation = getLocationForTable(bucketName, tableName) + "/"; + Session sessionWithShortRetentionUnlocked = Session.builder(getSession()) + .setCatalogSessionProperty(catalog, "vacuum_min_retention", "0s") + .build(); + + assertUpdate(format("CREATE TABLE %s (val int, col_white_space timestamp(6)) WITH (location = '%s', partitioned_by = ARRAY['col_white_space'])", tableName, tableLocation)); + + try { + assertUpdate("INSERT INTO " + tableName + " VALUES (1, TIMESTAMP '2024-12-13 11:00:00.000000'), (2, TIMESTAMP '2024-12-13 12:00:00.000000')", 2); + + Set initialFiles = getActiveFiles(tableName); + assertThat(initialFiles).hasSize(2); + + computeActual("UPDATE " + tableName + " SET val = val + 100"); + Stopwatch timeSinceUpdate = Stopwatch.createStarted(); + Set updatedFiles = getActiveFiles(tableName); + assertThat(updatedFiles).hasSize(2).doesNotContainAnyElementsOf(initialFiles); + assertThat(getAllDataFilesFromTableDirectory(tableName)).isEqualTo(union(initialFiles, updatedFiles)); + + // vacuum with high retention period, nothing should change + assertUpdate(sessionWithShortRetentionUnlocked, "CALL system.vacuum(CURRENT_SCHEMA, '" + tableName + "', '10m')"); + assertQuery("SELECT * FROM " + tableName, "VALUES (101, TIMESTAMP '2024-12-13 11:00:00.000000'), (102, TIMESTAMP '2024-12-13 12:00:00.000000')"); + assertThat(getActiveFiles(tableName)).isEqualTo(updatedFiles); + assertThat(getAllDataFilesFromTableDirectory(tableName)).isEqualTo(union(initialFiles, updatedFiles)); + + // vacuum with low retention period + MILLISECONDS.sleep(1_000 - timeSinceUpdate.elapsed(MILLISECONDS) + 1); + assertUpdate(sessionWithShortRetentionUnlocked, "CALL system.vacuum(CURRENT_SCHEMA, '" + tableName + "', '1s')"); + // table data shouldn't change + assertQuery("SELECT * FROM " + tableName, "VALUES (101, TIMESTAMP '2024-12-13 11:00:00.000000'), (102, TIMESTAMP '2024-12-13 12:00:00.000000')"); + // active files shouldn't change + assertThat(getActiveFiles(tableName)).isEqualTo(updatedFiles); + // old files should be cleaned up + assertThat(getAllDataFilesFromTableDirectory(tableName)).isEqualTo(updatedFiles); + } + finally { + assertUpdate("DROP TABLE " + tableName); + } + } + @Test public void testVacuumParameterValidation() { diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeBasic.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeBasic.java index 0f0b29f84340..a7eb09f73bbd 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeBasic.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeBasic.java @@ -125,6 +125,7 @@ public class TestDeltaLakeBasic new ResourceTable("no_column_stats", "databricks73/no_column_stats"), new ResourceTable("liquid_clustering", "deltalake/liquid_clustering"), new ResourceTable("region_91_lts", "databricks91/region"), + new ResourceTable("region_104_lts", "databricks104/region"), new ResourceTable("timestamp_ntz", "databricks131/timestamp_ntz"), new ResourceTable("timestamp_ntz_partition", "databricks131/timestamp_ntz_partition"), new ResourceTable("uniform_hudi", "deltalake/uniform_hudi"), @@ -133,6 +134,7 @@ public class TestDeltaLakeBasic new ResourceTable("unsupported_writer_feature", "deltalake/unsupported_writer_feature"), new ResourceTable("unsupported_writer_version", "deltalake/unsupported_writer_version"), new ResourceTable("variant", "databricks153/variant"), + new ResourceTable("variant_types", "databricks153/variant_types"), new ResourceTable("type_widening", "databricks153/type_widening"), new ResourceTable("type_widening_partition", "databricks153/type_widening_partition"), new ResourceTable("type_widening_nested", "databricks153/type_widening_nested")); @@ -217,6 +219,14 @@ void testDatabricks91() .matches("SELECT * FROM tpch.tiny.region"); } + @Test + void testDatabricks104() + { + assertThat(query("SELECT * FROM region_104_lts")) + .skippingTypesCheck() // name and comment columns are unbounded varchar in Delta Lake and bounded varchar in TPCH + .matches("SELECT * FROM tpch.tiny.region"); + } + @Test public void testNoColumnStats() { @@ -1504,14 +1514,65 @@ public void testUniFormIcebergV2() @Test public void testVariant() { - // TODO (https://github.com/trinodb/trino/issues/22309) Add support for variant type assertThat(query("DESCRIBE variant")).result().projected("Column", "Type") .skippingTypesCheck() - .matches("VALUES ('col_int', 'integer'), ('col_string', 'varchar')"); + .matches("VALUES " + + "('col_int', 'integer')," + + "('simple_variant', 'json')," + + "('array_variant', 'array(json)')," + + "('map_variant', 'map(varchar, json)')," + + "('struct_variant', 'row(x json)')," + + "('col_string', 'varchar')"); + + assertThat(query("SELECT col_int, simple_variant, array_variant[1], map_variant['key1'], struct_variant.x, col_string FROM variant")) + .skippingTypesCheck() + .matches("VALUES " + + "(1, JSON '{\"col\":1}', JSON '{\"array\":2}', JSON '{\"map\":3}', JSON '{\"struct\":4}', 'test data')," + + "(2, JSON '{\"col\":null}', JSON '{\"array\":null}', JSON '{\"map\":null}', JSON '{\"struct\":null}', 'test null data')," + + "(3, NULL, NULL, NULL, NULL, 'test null')," + + "(4, JSON '1', JSON '2', JSON '3', JSON '4', 'test without fields')"); - assertQuery("SELECT * FROM variant", "VALUES (1, 'test data')"); + assertQueryFails("INSERT INTO variant VALUES (2, null, null, null, null, 'new data')", "Unsupported writer features: .*"); + } - assertQueryFails("INSERT INTO variant VALUES (2, 'new data')", "Unsupported writer features: .*"); + /** + * @see databricks153.variant_types + */ + @Test + public void testVariantTypes() + { + assertThat(query(""" + SELECT + col_boolean, + col_long, + col_float, + col_double, + col_decimal, + col_string, + col_binary, + col_date, + col_timestamp, + col_timestampntz, + col_array, + col_map, + col_struct + FROM variant_types""")) + .skippingTypesCheck() + .matches(""" + VALUES + ('true', + '1', + '0.2', + '0.3', + '0.4', + '"test data"', + '"ZWg/"', + '"2021-02-03"', + '"2001-08-21 19:02:03.321-06:00"', + '"2021-01-02 12:34:56.123456"', + '[1]', + '{"key1":1,"key2":2}', + '{"x":1}')"""); } @Test diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeConfig.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeConfig.java index fb4555c6fb1d..f197ac7de78a 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeConfig.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeConfig.java @@ -66,7 +66,7 @@ public void testDefaults() .setParquetTimeZone(TimeZone.getDefault().getID()) .setPerTransactionMetastoreCacheMaximumSize(1000) .setStoreTableMetadataEnabled(false) - .setStoreTableMetadataThreads(5) + .setStoreTableMetadataThreads("5") .setStoreTableMetadataInterval(new Duration(1, SECONDS)) .setTargetMaxFileSize(DataSize.of(1, GIGABYTE)) .setIdleWriterMinFileSize(DataSize.of(16, MEGABYTE)) @@ -149,7 +149,7 @@ public void testExplicitPropertyMappings() .setParquetTimeZone(nonDefaultTimeZone().getID()) .setPerTransactionMetastoreCacheMaximumSize(500) .setStoreTableMetadataEnabled(true) - .setStoreTableMetadataThreads(1) + .setStoreTableMetadataThreads("1") .setStoreTableMetadataInterval(new Duration(30, MINUTES)) .setTargetMaxFileSize(DataSize.of(2, GIGABYTE)) .setIdleWriterMinFileSize(DataSize.of(1, MEGABYTE)) diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/delete/TestDeletionVectors.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/delete/TestDeletionVectors.java index c118ac743a08..b91004fefad9 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/delete/TestDeletionVectors.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/delete/TestDeletionVectors.java @@ -47,15 +47,6 @@ public void testUuidStorageType() assertThat(bitmaps.contains(2)).isFalse(); } - @Test - public void testUnsupportedPathStorageType() - { - TrinoFileSystem fileSystem = HDFS_FILE_SYSTEM_FACTORY.create(SESSION); - DeletionVectorEntry deletionVector = new DeletionVectorEntry("p", "s3://bucket/table/deletion_vector.bin", OptionalInt.empty(), 40, 1); - assertThatThrownBy(() -> readDeletionVectors(fileSystem, Location.of("s3://bucket/table"), deletionVector)) - .hasMessageContaining("Unsupported storage type for deletion vector: p"); - } - @Test public void testUnsupportedInlineStorageType() { diff --git a/plugin/trino-delta-lake/src/test/resources/databricks104/region/README.md b/plugin/trino-delta-lake/src/test/resources/databricks104/region/README.md new file mode 100644 index 000000000000..094c62593cb0 --- /dev/null +++ b/plugin/trino-delta-lake/src/test/resources/databricks104/region/README.md @@ -0,0 +1,13 @@ +Data generated using Databricks 10.4: + +```sql +CREATE TABLE default.region (regionkey bigint, name string, comment string) +USING DELTA LOCATION 's3://trino-ci-test/default/region'; + +INSERT INTO default.region VALUES +(0, 'AFRICA', 'lar deposits. blithely final packages cajole. regular waters are final requests. regular accounts are according to '), +(1, 'AMERICA', 'hs use ironic, even requests. s'), +(2, 'ASIA', 'ges. thinly even pinto beans ca'), +(3, 'EUROPE', 'ly final courts cajole furiously final excuse'), +(4, 'MIDDLE EAST', 'uickly special accounts cajole carefully blithely close requests. carefully final asymptotes haggle furiousl'); +``` diff --git a/plugin/trino-delta-lake/src/test/resources/databricks104/region/_delta_log/00000000000000000000.json b/plugin/trino-delta-lake/src/test/resources/databricks104/region/_delta_log/00000000000000000000.json new file mode 100644 index 000000000000..4c880ca0b7eb --- /dev/null +++ b/plugin/trino-delta-lake/src/test/resources/databricks104/region/_delta_log/00000000000000000000.json @@ -0,0 +1,3 @@ +{"commitInfo":{"timestamp":1738882384750,"userId":"7853186923043731","userName":"yuya.ebihara@starburstdata.com","operation":"CREATE TABLE","operationParameters":{"isManaged":"false","description":null,"partitionBy":"[]","properties":"{}"},"notebook":{"notebookId":"1841155838656679"},"clusterId":"0705-100936-r65whg38","isolationLevel":"WriteSerializable","isBlindAppend":true,"operationMetrics":{},"engineInfo":"Databricks-Runtime/10.4.x-scala2.12","txnId":"38b7030f-ef5e-4fa7-a511-5687aa66081b"}} +{"protocol":{"minReaderVersion":1,"minWriterVersion":2}} +{"metaData":{"id":"fe62a9c4-7768-4a9a-a498-9706284fc221","format":{"provider":"parquet","options":{}},"schemaString":"{\"type\":\"struct\",\"fields\":[{\"name\":\"regionkey\",\"type\":\"long\",\"nullable\":true,\"metadata\":{}},{\"name\":\"name\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"comment\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}}]}","partitionColumns":[],"configuration":{},"createdTime":1738882384385}} diff --git a/plugin/trino-delta-lake/src/test/resources/databricks104/region/_delta_log/00000000000000000001.json b/plugin/trino-delta-lake/src/test/resources/databricks104/region/_delta_log/00000000000000000001.json new file mode 100644 index 000000000000..1a0f607bc2a1 --- /dev/null +++ b/plugin/trino-delta-lake/src/test/resources/databricks104/region/_delta_log/00000000000000000001.json @@ -0,0 +1,3 @@ +{"commitInfo":{"timestamp":1738882405690,"userId":"7853186923043731","userName":"yuya.ebihara@starburstdata.com","operation":"WRITE","operationParameters":{"mode":"Append","partitionBy":"[]"},"notebook":{"notebookId":"1841155838656679"},"clusterId":"0705-100936-r65whg38","readVersion":0,"isolationLevel":"WriteSerializable","isBlindAppend":true,"operationMetrics":{"numFiles":"2","numOutputRows":"5","numOutputBytes":"3497"},"engineInfo":"Databricks-Runtime/10.4.x-scala2.12","txnId":"c6563eac-ecba-40e4-b2c8-0820ba2b9594"}} +{"add":{"path":"part-00000-c5f42889-aac9-4899-8ca6-f9db1e07d400-c000.snappy.parquet","partitionValues":{},"size":1725,"modificationTime":1738882406000,"dataChange":true,"stats":"{\"numRecords\":2,\"minValues\":{\"regionkey\":0,\"name\":\"AFRICA\",\"comment\":\"hs use ironic, even requests. s\"},\"maxValues\":{\"regionkey\":1,\"name\":\"AMERICA\",\"comment\":\"lar deposits. blithely final pac�\"},\"nullCount\":{\"regionkey\":0,\"name\":0,\"comment\":0}}","tags":{"INSERTION_TIME":"1738882406000000","OPTIMIZE_TARGET_SIZE":"268435456"}}} +{"add":{"path":"part-00001-76423f81-c703-421f-bb1a-c227a8c914b3-c000.snappy.parquet","partitionValues":{},"size":1772,"modificationTime":1738882406000,"dataChange":true,"stats":"{\"numRecords\":3,\"minValues\":{\"regionkey\":2,\"name\":\"ASIA\",\"comment\":\"ges. thinly even pinto beans ca\"},\"maxValues\":{\"regionkey\":4,\"name\":\"MIDDLE EAST\",\"comment\":\"uickly special accounts cajole c�\"},\"nullCount\":{\"regionkey\":0,\"name\":0,\"comment\":0}}","tags":{"INSERTION_TIME":"1738882406000001","OPTIMIZE_TARGET_SIZE":"268435456"}}} diff --git a/plugin/trino-delta-lake/src/test/resources/databricks104/region/part-00000-c5f42889-aac9-4899-8ca6-f9db1e07d400-c000.snappy.parquet b/plugin/trino-delta-lake/src/test/resources/databricks104/region/part-00000-c5f42889-aac9-4899-8ca6-f9db1e07d400-c000.snappy.parquet new file mode 100644 index 000000000000..c26f976710af Binary files /dev/null and b/plugin/trino-delta-lake/src/test/resources/databricks104/region/part-00000-c5f42889-aac9-4899-8ca6-f9db1e07d400-c000.snappy.parquet differ diff --git a/plugin/trino-delta-lake/src/test/resources/databricks104/region/part-00001-76423f81-c703-421f-bb1a-c227a8c914b3-c000.snappy.parquet b/plugin/trino-delta-lake/src/test/resources/databricks104/region/part-00001-76423f81-c703-421f-bb1a-c227a8c914b3-c000.snappy.parquet new file mode 100644 index 000000000000..d0699765f352 Binary files /dev/null and b/plugin/trino-delta-lake/src/test/resources/databricks104/region/part-00001-76423f81-c703-421f-bb1a-c227a8c914b3-c000.snappy.parquet differ diff --git a/plugin/trino-delta-lake/src/test/resources/databricks153/variant/README.md b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/README.md index 65935f0c8ac3..62015da9be4a 100644 --- a/plugin/trino-delta-lake/src/test/resources/databricks153/variant/README.md +++ b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/README.md @@ -8,4 +8,13 @@ LOCATION ?; INSERT INTO default.test_variant VALUES (1, parse_json('{"col":1}'), array(parse_json('{"array":2}')), map('key1', parse_json('{"map":3}')), named_struct('x', parse_json('{"struct":4}')), 'test data'); + +INSERT INTO default.test_variant +VALUES (2, parse_json('{"col":null}'), array(parse_json('{"array":null}')), map('key1', parse_json('{"map":null}')), named_struct('x', parse_json('{"struct":null}')), 'test null data'); + +INSERT INTO default.test_variant +VALUES (3, parse_json(NULL), array(NULL), map('key1', NULL), named_struct('x', NULL), 'test null'); + +INSERT INTO default.test_variant +VALUES (4, parse_json('1'), array(parse_json('2')), map('key1', parse_json('3')), named_struct('x', parse_json('4')), 'test without fields'); ``` diff --git a/plugin/trino-delta-lake/src/test/resources/databricks153/variant/_delta_log/00000000000000000002.json b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/_delta_log/00000000000000000002.json new file mode 100644 index 000000000000..2bd3f1f5801a --- /dev/null +++ b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/_delta_log/00000000000000000002.json @@ -0,0 +1,2 @@ +{"commitInfo":{"timestamp":1718586112107,"userId":"7853186923043731","userName":"yuya.ebihara@starburstdata.com","operation":"WRITE","operationParameters":{"mode":"Append","statsOnLoad":false,"partitionBy":"[]"},"notebook":{"notebookId":"1841155838656679"},"clusterId":"0607-024930-gxd23c26","readVersion":0,"isolationLevel":"WriteSerializable","isBlindAppend":true,"operationMetrics":{"numFiles":"1","numOutputRows":"1","numOutputBytes":"3377"},"tags":{"noRowsCopied":"true","restoresDeletedRows":"false"},"engineInfo":"Databricks-Runtime/15.3.x-scala2.12","txnId":"58f6af45-8063-4fae-9018-95bfe6cbd561"}} +{"add":{"path":"part-00000-090dbdcd-fc4e-47c9-8549-59cf79d9c7e6-c000.snappy.parquet","partitionValues":{},"size":3377,"modificationTime":1718586111000,"dataChange":true,"stats":"{\"numRecords\":1,\"minValues\":{\"col_int\":2,\"col_string\":\"test null data\"},\"maxValues\":{\"col_int\":2,\"col_string\":\"test null data\"},\"nullCount\":{\"col_int\":0,\"simple_variant\":0,\"array_variant\":0,\"map_variant\":0,\"struct_variant\":{\"x\":0},\"col_string\":0},\"tightBounds\":true}","tags":{"INSERTION_TIME":"1718586111000000","MIN_INSERTION_TIME":"1718586111000000","MAX_INSERTION_TIME":"1718586111000000","OPTIMIZE_TARGET_SIZE":"268435456"}}} diff --git a/plugin/trino-delta-lake/src/test/resources/databricks153/variant/_delta_log/00000000000000000003.json b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/_delta_log/00000000000000000003.json new file mode 100644 index 000000000000..66839aa9fb0f --- /dev/null +++ b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/_delta_log/00000000000000000003.json @@ -0,0 +1,2 @@ +{"commitInfo":{"timestamp":1718586451540,"userId":"7853186923043731","userName":"yuya.ebihara@starburstdata.com","operation":"WRITE","operationParameters":{"mode":"Append","statsOnLoad":false,"partitionBy":"[]"},"notebook":{"notebookId":"1841155838656679"},"clusterId":"0607-024930-gxd23c26","readVersion":1,"isolationLevel":"WriteSerializable","isBlindAppend":true,"operationMetrics":{"numFiles":"1","numOutputRows":"1","numOutputBytes":"2849"},"tags":{"noRowsCopied":"true","restoresDeletedRows":"false"},"engineInfo":"Databricks-Runtime/15.3.x-scala2.12","txnId":"43f39922-3699-49fe-9b32-a1556bbef0a0"}} +{"add":{"path":"part-00000-4d01f20d-41d2-49fb-b98e-251a4c0984aa-c000.snappy.parquet","partitionValues":{},"size":2849,"modificationTime":1718586452000,"dataChange":true,"stats":"{\"numRecords\":1,\"minValues\":{\"col_int\":3,\"col_string\":\"test null\"},\"maxValues\":{\"col_int\":3,\"col_string\":\"test null\"},\"nullCount\":{\"col_int\":0,\"simple_variant\":1,\"array_variant\":0,\"map_variant\":0,\"struct_variant\":{\"x\":1},\"col_string\":0},\"tightBounds\":true}","tags":{"INSERTION_TIME":"1718586452000000","MIN_INSERTION_TIME":"1718586452000000","MAX_INSERTION_TIME":"1718586452000000","OPTIMIZE_TARGET_SIZE":"268435456"}}} diff --git a/plugin/trino-delta-lake/src/test/resources/databricks153/variant/_delta_log/00000000000000000004.json b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/_delta_log/00000000000000000004.json new file mode 100644 index 000000000000..7d8d016f9b08 --- /dev/null +++ b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/_delta_log/00000000000000000004.json @@ -0,0 +1,2 @@ +{"commitInfo":{"timestamp":1718587514563,"userId":"7853186923043731","userName":"yuya.ebihara@starburstdata.com","operation":"WRITE","operationParameters":{"mode":"Append","statsOnLoad":false,"partitionBy":"[]"},"notebook":{"notebookId":"1841155838656679"},"clusterId":"0607-024930-gxd23c26","readVersion":2,"isolationLevel":"WriteSerializable","isBlindAppend":true,"operationMetrics":{"numFiles":"1","numOutputRows":"1","numOutputBytes":"3156"},"tags":{"noRowsCopied":"true","restoresDeletedRows":"false"},"engineInfo":"Databricks-Runtime/15.3.x-scala2.12","txnId":"75d31328-c821-4273-af1b-4d7f5d15cdd8"}} +{"add":{"path":"part-00000-81478783-cb9b-4719-975e-027177ae86e3-c000.snappy.parquet","partitionValues":{},"size":3156,"modificationTime":1718587515000,"dataChange":true,"stats":"{\"numRecords\":1,\"minValues\":{\"col_int\":4,\"col_string\":\"test without fields\"},\"maxValues\":{\"col_int\":4,\"col_string\":\"test without fields\"},\"nullCount\":{\"col_int\":0,\"simple_variant\":0,\"array_variant\":0,\"map_variant\":0,\"struct_variant\":{\"x\":0},\"col_string\":0},\"tightBounds\":true}","tags":{"INSERTION_TIME":"1718587515000000","MIN_INSERTION_TIME":"1718587515000000","MAX_INSERTION_TIME":"1718587515000000","OPTIMIZE_TARGET_SIZE":"268435456"}}} diff --git a/plugin/trino-delta-lake/src/test/resources/databricks153/variant/part-00000-090dbdcd-fc4e-47c9-8549-59cf79d9c7e6-c000.snappy.parquet b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/part-00000-090dbdcd-fc4e-47c9-8549-59cf79d9c7e6-c000.snappy.parquet new file mode 100644 index 000000000000..1917a2760127 Binary files /dev/null and b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/part-00000-090dbdcd-fc4e-47c9-8549-59cf79d9c7e6-c000.snappy.parquet differ diff --git a/plugin/trino-delta-lake/src/test/resources/databricks153/variant/part-00000-4d01f20d-41d2-49fb-b98e-251a4c0984aa-c000.snappy.parquet b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/part-00000-4d01f20d-41d2-49fb-b98e-251a4c0984aa-c000.snappy.parquet new file mode 100644 index 000000000000..6c58889f533e Binary files /dev/null and b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/part-00000-4d01f20d-41d2-49fb-b98e-251a4c0984aa-c000.snappy.parquet differ diff --git a/plugin/trino-delta-lake/src/test/resources/databricks153/variant/part-00000-81478783-cb9b-4719-975e-027177ae86e3-c000.snappy.parquet b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/part-00000-81478783-cb9b-4719-975e-027177ae86e3-c000.snappy.parquet new file mode 100644 index 000000000000..cd4a81cc6c7a Binary files /dev/null and b/plugin/trino-delta-lake/src/test/resources/databricks153/variant/part-00000-81478783-cb9b-4719-975e-027177ae86e3-c000.snappy.parquet differ diff --git a/plugin/trino-delta-lake/src/test/resources/databricks153/variant_types/README.md b/plugin/trino-delta-lake/src/test/resources/databricks153/variant_types/README.md new file mode 100644 index 000000000000..c18728cf598a --- /dev/null +++ b/plugin/trino-delta-lake/src/test/resources/databricks153/variant_types/README.md @@ -0,0 +1,21 @@ +Data generated using Databricks 15.3: + +```sql +CREATE TABLE default.test_variant_types +USING delta +LOCATION ? +AS SELECT + CAST(true AS variant) AS col_boolean, + CAST(1 AS variant) AS col_long, + CAST(CAST('0.2' AS float) AS variant) AS col_float, + CAST(CAST('0.3' AS double) AS variant) AS col_double, + CAST(CAST('0.4' AS decimal) AS variant) AS col_decimal, + CAST('test data' AS variant) AS col_string, + CAST(X'65683F' AS variant) AS col_binary, + CAST(date '2021-02-03' AS variant) AS col_date, + CAST(timestamp '2001-08-22 01:02:03.321 UTC' AS variant) AS col_timestamp, + CAST(timestamp_ntz '2021-01-02 12:34:56.123456' AS variant) AS col_timestampntz, + CAST(array(1) AS variant) AS col_array, + CAST(map('key1', 1, 'key2', 2) AS variant) AS col_map, + CAST(named_struct('x', 1) AS variant) AS col_struct; +``` diff --git a/plugin/trino-delta-lake/src/test/resources/databricks153/variant_types/_delta_log/00000000000000000000.json b/plugin/trino-delta-lake/src/test/resources/databricks153/variant_types/_delta_log/00000000000000000000.json new file mode 100644 index 000000000000..31a0a76acbf8 --- /dev/null +++ b/plugin/trino-delta-lake/src/test/resources/databricks153/variant_types/_delta_log/00000000000000000000.json @@ -0,0 +1,4 @@ +{"commitInfo":{"timestamp":1718597262586,"userId":"7853186923043731","userName":"yuya.ebihara@starburstdata.com","operation":"CREATE TABLE AS SELECT","operationParameters":{"partitionBy":"[]","clusterBy":"[]","description":null,"isManaged":"false","properties":"{\"delta.enableDeletionVectors\":\"true\"}","statsOnLoad":false},"notebook":{"notebookId":"1841155838656679"},"clusterId":"0607-024930-gxd23c26","isolationLevel":"WriteSerializable","isBlindAppend":true,"operationMetrics":{"numFiles":"1","numOutputRows":"1","numOutputBytes":"6331"},"tags":{"noRowsCopied":"true","restoresDeletedRows":"false"},"engineInfo":"Databricks-Runtime/15.3.x-scala2.12","txnId":"a5f86d81-443e-4374-a377-699ca48ae6da"}} +{"metaData":{"id":"ea47d678-864d-4652-8957-3099f5cf3153","format":{"provider":"parquet","options":{}},"schemaString":"{\"type\":\"struct\",\"fields\":[{\"name\":\"col_boolean\",\"type\":\"variant\",\"nullable\":true,\"metadata\":{}},{\"name\":\"col_long\",\"type\":\"variant\",\"nullable\":true,\"metadata\":{}},{\"name\":\"col_float\",\"type\":\"variant\",\"nullable\":true,\"metadata\":{}},{\"name\":\"col_double\",\"type\":\"variant\",\"nullable\":true,\"metadata\":{}},{\"name\":\"col_decimal\",\"type\":\"variant\",\"nullable\":true,\"metadata\":{}},{\"name\":\"col_string\",\"type\":\"variant\",\"nullable\":true,\"metadata\":{}},{\"name\":\"col_binary\",\"type\":\"variant\",\"nullable\":true,\"metadata\":{}},{\"name\":\"col_date\",\"type\":\"variant\",\"nullable\":true,\"metadata\":{}},{\"name\":\"col_timestamp\",\"type\":\"variant\",\"nullable\":true,\"metadata\":{}},{\"name\":\"col_timestampntz\",\"type\":\"variant\",\"nullable\":true,\"metadata\":{}},{\"name\":\"col_array\",\"type\":\"variant\",\"nullable\":true,\"metadata\":{}},{\"name\":\"col_map\",\"type\":\"variant\",\"nullable\":true,\"metadata\":{}},{\"name\":\"col_struct\",\"type\":\"variant\",\"nullable\":true,\"metadata\":{}}]}","partitionColumns":[],"configuration":{"delta.enableDeletionVectors":"true"},"createdTime":1718597261906}} +{"protocol":{"minReaderVersion":3,"minWriterVersion":7,"readerFeatures":["deletionVectors","variantType-preview"],"writerFeatures":["deletionVectors","variantType-preview"]}} +{"add":{"path":"part-00000-376865ac-f6c8-4f0f-9d9b-bb8a982e6c0b-c000.snappy.parquet","partitionValues":{},"size":6331,"modificationTime":1718597263000,"dataChange":true,"stats":"{\"numRecords\":1,\"nullCount\":{\"col_boolean\":0,\"col_long\":0,\"col_float\":0,\"col_double\":0,\"col_decimal\":0,\"col_string\":0,\"col_binary\":0,\"col_date\":0,\"col_timestamp\":0,\"col_timestampntz\":0,\"col_array\":0,\"col_map\":0,\"col_struct\":0},\"tightBounds\":true}","tags":{"INSERTION_TIME":"1718597263000000","MIN_INSERTION_TIME":"1718597263000000","MAX_INSERTION_TIME":"1718597263000000","OPTIMIZE_TARGET_SIZE":"268435456"}}} diff --git a/plugin/trino-delta-lake/src/test/resources/databricks153/variant_types/part-00000-376865ac-f6c8-4f0f-9d9b-bb8a982e6c0b-c000.snappy.parquet b/plugin/trino-delta-lake/src/test/resources/databricks153/variant_types/part-00000-376865ac-f6c8-4f0f-9d9b-bb8a982e6c0b-c000.snappy.parquet new file mode 100644 index 000000000000..c0713ee5ce1f Binary files /dev/null and b/plugin/trino-delta-lake/src/test/resources/databricks153/variant_types/part-00000-376865ac-f6c8-4f0f-9d9b-bb8a982e6c0b-c000.snappy.parquet differ diff --git a/plugin/trino-druid/pom.xml b/plugin/trino-druid/pom.xml index 1c64362ad0a1..fbaea6df7227 100644 --- a/plugin/trino-druid/pom.xml +++ b/plugin/trino-druid/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-duckdb/pom.xml b/plugin/trino-duckdb/pom.xml index 3ba85d670923..b0c4ba9f904f 100644 --- a/plugin/trino-duckdb/pom.xml +++ b/plugin/trino-duckdb/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-elasticsearch/pom.xml b/plugin/trino-elasticsearch/pom.xml index e8b63d571ac6..4ac3d1c36f4d 100644 --- a/plugin/trino-elasticsearch/pom.xml +++ b/plugin/trino-elasticsearch/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-example-http/pom.xml b/plugin/trino-example-http/pom.xml index 8700da9e6d87..b0b00866b340 100644 --- a/plugin/trino-example-http/pom.xml +++ b/plugin/trino-example-http/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-example-jdbc/pom.xml b/plugin/trino-example-jdbc/pom.xml index 7281159c7e40..72f5d1f7a381 100644 --- a/plugin/trino-example-jdbc/pom.xml +++ b/plugin/trino-example-jdbc/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-exasol/pom.xml b/plugin/trino-exasol/pom.xml index b1bb0f1eb9a4..f80b2b679411 100644 --- a/plugin/trino-exasol/pom.xml +++ b/plugin/trino-exasol/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-exchange-filesystem/pom.xml b/plugin/trino-exchange-filesystem/pom.xml index 73954851e143..10710a5ba68f 100644 --- a/plugin/trino-exchange-filesystem/pom.xml +++ b/plugin/trino-exchange-filesystem/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-exchange-filesystem/src/main/java/io/trino/plugin/exchange/filesystem/s3/S3FileSystemExchangeStorage.java b/plugin/trino-exchange-filesystem/src/main/java/io/trino/plugin/exchange/filesystem/s3/S3FileSystemExchangeStorage.java index a21f6fe570b8..514e5e13ee54 100644 --- a/plugin/trino-exchange-filesystem/src/main/java/io/trino/plugin/exchange/filesystem/s3/S3FileSystemExchangeStorage.java +++ b/plugin/trino-exchange-filesystem/src/main/java/io/trino/plugin/exchange/filesystem/s3/S3FileSystemExchangeStorage.java @@ -53,6 +53,8 @@ import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.auth.signer.AwsS3V4Signer; +import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; import software.amazon.awssdk.awscore.endpoint.AwsClientEndpointProvider; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.checksums.RequestChecksumCalculation; @@ -376,6 +378,7 @@ private ListenableFuture> deleteObjects(String bucke DeleteObjectsRequest request = DeleteObjectsRequest.builder() .bucket(bucketName) .delete(Delete.builder().objects(list.stream().map(key -> ObjectIdentifier.builder().key(key).build()).collect(toImmutableList())).build()) + .overrideConfiguration(disableStrongIntegrityChecksums()) .build(); return toListenableFuture(s3AsyncClient.deleteObjects(request)); }).collect(toImmutableList()))); @@ -906,4 +909,14 @@ private ListenableFuture abortMultipartUpload(Stri return stats.getAbortMultipartUpload().record(toListenableFuture(s3AsyncClient.abortMultipartUpload(abortMultipartUploadRequest))); } } + + // TODO (https://github.com/trinodb/trino/issues/24955): + // remove me once all of the S3-compatible storage support strong integrity checks + @SuppressWarnings("deprecation") + static AwsRequestOverrideConfiguration disableStrongIntegrityChecksums() + { + return AwsRequestOverrideConfiguration.builder() + .signer(AwsS3V4Signer.create()) + .build(); + } } diff --git a/plugin/trino-exchange-hdfs/pom.xml b/plugin/trino-exchange-hdfs/pom.xml index f792560a0269..69d9269732f6 100644 --- a/plugin/trino-exchange-hdfs/pom.xml +++ b/plugin/trino-exchange-hdfs/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-exchange-hdfs/src/main/java/io/trino/plugin/exchange/hdfs/ExchangeHdfsConfig.java b/plugin/trino-exchange-hdfs/src/main/java/io/trino/plugin/exchange/hdfs/ExchangeHdfsConfig.java index 092ba2688056..165bbce9519b 100644 --- a/plugin/trino-exchange-hdfs/src/main/java/io/trino/plugin/exchange/hdfs/ExchangeHdfsConfig.java +++ b/plugin/trino-exchange-hdfs/src/main/java/io/trino/plugin/exchange/hdfs/ExchangeHdfsConfig.java @@ -31,6 +31,7 @@ public class ExchangeHdfsConfig { private DataSize hdfsStorageBlockSize = DataSize.of(4, MEGABYTE); + private boolean skipDirectorySchemeValidation; private List resourceConfigFiles = ImmutableList.of(); @NotNull @@ -49,6 +50,19 @@ public ExchangeHdfsConfig setHdfsStorageBlockSize(DataSize hdfsStorageBlockSize) return this; } + public boolean isSkipDirectorySchemeValidation() + { + return skipDirectorySchemeValidation; + } + + @Config("exchange.hdfs.skip-directory-scheme-validation") + @ConfigDescription("Skip directory scheme validation to support hadoop compatible file system") + public ExchangeHdfsConfig setSkipDirectorySchemeValidation(boolean skipDirectorySchemeValidation) + { + this.skipDirectorySchemeValidation = skipDirectorySchemeValidation; + return this; + } + @NotNull public List<@FileExists File> getResourceConfigFiles() { diff --git a/plugin/trino-exchange-hdfs/src/main/java/io/trino/plugin/exchange/hdfs/HdfsExchangeModule.java b/plugin/trino-exchange-hdfs/src/main/java/io/trino/plugin/exchange/hdfs/HdfsExchangeModule.java index 5c30087c0b73..9c179f47628e 100644 --- a/plugin/trino-exchange-hdfs/src/main/java/io/trino/plugin/exchange/hdfs/HdfsExchangeModule.java +++ b/plugin/trino-exchange-hdfs/src/main/java/io/trino/plugin/exchange/hdfs/HdfsExchangeModule.java @@ -25,7 +25,6 @@ import java.net.URI; import java.util.List; -import static io.airlift.configuration.ConfigBinder.configBinder; import static io.trino.spi.StandardErrorCode.CONFIGURATION_INVALID; import static io.trino.spi.StandardErrorCode.NOT_SUPPORTED; import static java.lang.String.format; @@ -47,10 +46,13 @@ protected void setup(Binder binder) binder.addError(new TrinoException(CONFIGURATION_INVALID, "Multiple schemes in exchange base directories")); return; } + String scheme = baseDirectories.get(0).getScheme(); - if (scheme.equalsIgnoreCase("hdfs")) { + + boolean skipDirectorySchemeValidation = buildConfigObject(ExchangeHdfsConfig.class).isSkipDirectorySchemeValidation(); + + if (scheme.equalsIgnoreCase("hdfs") || skipDirectorySchemeValidation) { binder.bind(FileSystemExchangeStorage.class).to(HadoopFileSystemExchangeStorage.class).in(Scopes.SINGLETON); - configBinder(binder).bindConfig(ExchangeHdfsConfig.class); } else { binder.addError(new TrinoException(NOT_SUPPORTED, diff --git a/plugin/trino-exchange-hdfs/src/test/java/io/trino/plugin/exchange/hdfs/TestExchangeHdfsConfig.java b/plugin/trino-exchange-hdfs/src/test/java/io/trino/plugin/exchange/hdfs/TestExchangeHdfsConfig.java index 190258e93c81..42ea955bc448 100644 --- a/plugin/trino-exchange-hdfs/src/test/java/io/trino/plugin/exchange/hdfs/TestExchangeHdfsConfig.java +++ b/plugin/trino-exchange-hdfs/src/test/java/io/trino/plugin/exchange/hdfs/TestExchangeHdfsConfig.java @@ -35,7 +35,8 @@ public void testDefaults() { assertRecordedDefaults(recordDefaults(ExchangeHdfsConfig.class) .setResourceConfigFiles(ImmutableList.of()) - .setHdfsStorageBlockSize(DataSize.of(4, MEGABYTE))); + .setHdfsStorageBlockSize(DataSize.of(4, MEGABYTE)) + .setSkipDirectorySchemeValidation(false)); } @Test @@ -48,11 +49,13 @@ public void testExplicitPropertyMappings() Map properties = ImmutableMap.builder() .put("hdfs.config.resources", resource1 + "," + resource2) .put("exchange.hdfs.block-size", "8MB") + .put("exchange.hdfs.skip-directory-scheme-validation", "true") .buildOrThrow(); ExchangeHdfsConfig expected = new ExchangeHdfsConfig() .setResourceConfigFiles(ImmutableList.of(resource1.toString(), resource2.toString())) - .setHdfsStorageBlockSize(DataSize.of(8, MEGABYTE)); + .setHdfsStorageBlockSize(DataSize.of(8, MEGABYTE)) + .setSkipDirectorySchemeValidation(true); assertFullMapping(properties, expected); } diff --git a/plugin/trino-faker/pom.xml b/plugin/trino-faker/pom.xml index 040980f907e6..c499010be129 100644 --- a/plugin/trino-faker/pom.xml +++ b/plugin/trino-faker/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml @@ -142,6 +142,12 @@ test + + io.trino + trino-exchange-filesystem + test + + io.trino trino-main diff --git a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerColumnHandle.java b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerColumnHandle.java index 4b81b75858ca..043d9256fa4f 100644 --- a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerColumnHandle.java +++ b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerColumnHandle.java @@ -14,8 +14,6 @@ package io.trino.plugin.faker; -import com.google.common.collect.ImmutableList; -import io.airlift.units.Duration; import io.trino.spi.TrinoException; import io.trino.spi.connector.ColumnHandle; import io.trino.spi.connector.ColumnMetadata; @@ -32,17 +30,15 @@ import io.trino.spi.type.VarcharType; import java.util.Collection; -import java.util.List; -import java.util.concurrent.TimeUnit; import static com.google.common.base.Preconditions.checkState; -import static com.google.common.collect.ImmutableList.toImmutableList; import static io.trino.plugin.faker.ColumnInfo.ALLOWED_VALUES_PROPERTY; import static io.trino.plugin.faker.ColumnInfo.GENERATOR_PROPERTY; import static io.trino.plugin.faker.ColumnInfo.MAX_PROPERTY; import static io.trino.plugin.faker.ColumnInfo.MIN_PROPERTY; import static io.trino.plugin.faker.ColumnInfo.NULL_PROBABILITY_PROPERTY; import static io.trino.plugin.faker.ColumnInfo.STEP_PROPERTY; +import static io.trino.plugin.faker.PropertyValues.propertyValue; import static io.trino.spi.StandardErrorCode.INVALID_COLUMN_PROPERTY; import static io.trino.spi.type.BigintType.BIGINT; import static io.trino.spi.type.DateType.DATE; @@ -86,20 +82,13 @@ public static FakerColumnHandle of(int columnId, ColumnMetadata column, double d } domain = Domain.create(ValueSet.ofRanges(range(column.getType(), min, max)), false); } - if (column.getProperties().containsKey(ALLOWED_VALUES_PROPERTY)) { + Object allowedValues = propertyValue(column, ALLOWED_VALUES_PROPERTY); + + if (allowedValues != null) { if (min != null || max != null || generator != null) { throw new TrinoException(INVALID_COLUMN_PROPERTY, "The `%s` property cannot be set together with `%s`, `%s`, and `%s` properties".formatted(ALLOWED_VALUES_PROPERTY, MIN_PROPERTY, MAX_PROPERTY, GENERATOR_PROPERTY)); } - ImmutableList.Builder builder = ImmutableList.builder(); - for (String value : strings((List) column.getProperties().get(ALLOWED_VALUES_PROPERTY))) { - try { - builder.add(Literal.parse(value, column.getType())); - } - catch (IllegalArgumentException | ClassCastException e) { - throw new TrinoException(INVALID_COLUMN_PROPERTY, "The `%s` property must only contain valid %s literals, failed to parse `%s`".formatted(ALLOWED_VALUES_PROPERTY, column.getType().getDisplayName(), value), e); - } - } - domain = Domain.create(ValueSet.copyOf(column.getType(), builder.build()), false); + domain = Domain.create(ValueSet.copyOf(column.getType(), (Collection) allowedValues), false); } return new FakerColumnHandle( @@ -117,40 +106,21 @@ private static boolean isCharacterColumn(ColumnMetadata column) return column.getType() instanceof CharType || column.getType() instanceof VarcharType || column.getType() instanceof VarbinaryType; } - private static Object propertyValue(ColumnMetadata column, String property) - { - try { - return Literal.parse((String) column.getProperties().get(property), column.getType()); - } - catch (IllegalArgumentException e) { - throw new TrinoException(INVALID_COLUMN_PROPERTY, "The `%s` property must be a valid %s literal".formatted(property, column.getType().getDisplayName()), e); - } - } - private static ValueSet stepValue(ColumnMetadata column) { Type type = column.getType(); - String rawStep = (String) column.getProperties().get(STEP_PROPERTY); - if (rawStep == null) { + Object step = propertyValue(column, STEP_PROPERTY); + if (step == null) { return ValueSet.none(type); } if (isCharacterColumn(column)) { throw new TrinoException(INVALID_COLUMN_PROPERTY, "The `%s` property cannot be set for CHAR, VARCHAR or VARBINARY columns".formatted(STEP_PROPERTY)); } - if (DATE.equals(column.getType()) || type instanceof TimestampType || type instanceof TimestampWithTimeZoneType || type instanceof TimeType || type instanceof TimeWithTimeZoneType) { - try { - return ValueSet.of(BIGINT, Duration.valueOf(rawStep).roundTo(TimeUnit.NANOSECONDS)); - } - catch (IllegalArgumentException e) { - throw new TrinoException(INVALID_COLUMN_PROPERTY, "The `%s` property for a %s column must be a valid duration literal".formatted(STEP_PROPERTY, column.getType().getDisplayName()), e); - } - } - try { - return ValueSet.of(type, Literal.parse(rawStep, type)); - } - catch (IllegalArgumentException e) { - throw new TrinoException(INVALID_COLUMN_PROPERTY, "The `%s` property for a %s column must be a valid %s literal".formatted(STEP_PROPERTY, column.getType().getDisplayName(), type.getDisplayName()), e); + Type stepType = type; + if (DATE.equals(type) || type instanceof TimestampType || type instanceof TimestampWithTimeZoneType || type instanceof TimeType || type instanceof TimeWithTimeZoneType) { + stepType = BIGINT; } + return ValueSet.of(stepType, step); } private static Range range(Type type, Object min, Object max) @@ -168,13 +138,6 @@ private static Range range(Type type, Object min, Object max) return Range.range(type, min, true, max, true); } - private static List strings(Collection values) - { - return values.stream() - .map(String.class::cast) - .collect(toImmutableList()); - } - public FakerColumnHandle withNullProbability(double nullProbability) { return new FakerColumnHandle(columnIndex, name, type, nullProbability, generator, domain, step); diff --git a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerConfig.java b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerConfig.java index 070a1e5753d7..06a651870ebb 100644 --- a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerConfig.java +++ b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerConfig.java @@ -80,7 +80,7 @@ public boolean isSequenceDetectionEnabled() @ConfigDescription( """ If true, when creating a table using existing data, columns with the number of distinct values close to - the number of rows will be treated as sequences""") + the number of rows are treated as sequences""") public FakerConfig setSequenceDetectionEnabled(boolean value) { this.sequenceDetectionEnabled = value; @@ -96,7 +96,7 @@ public boolean isDictionaryDetectionEnabled() @ConfigDescription( """ If true, when creating a table using existing data, columns with a low number of distinct values - will have the allowed_values column property populated with random values""") + are treated as dictionaries, and get the allowed_values column property populated with random values""") public FakerConfig setDictionaryDetectionEnabled(boolean value) { this.dictionaryDetectionEnabled = value; diff --git a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerConnector.java b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerConnector.java index 322fbfa006ea..06039bcb5d03 100644 --- a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerConnector.java +++ b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerConnector.java @@ -131,14 +131,14 @@ public List> getSchemaProperties() SchemaInfo.SEQUENCE_DETECTION_ENABLED, """ If true, when creating a table using existing data, columns with the number of distinct values close to - the number of rows will be treated as sequences""", + the number of rows are treated as sequences""", null, false), booleanProperty( SchemaInfo.DICTIONARY_DETECTION_ENABLED, """ If true, when creating a table using existing data, columns with a low number of distinct values - will have the allowed_values column property populated with random values""", + are treated as dictionaries, and get the allowed_values column property populated with random values""", null, false)); } @@ -163,14 +163,14 @@ public List> getTableProperties() TableInfo.SEQUENCE_DETECTION_ENABLED, """ If true, when creating a table using existing data, columns with the number of distinct values close to - the number of rows will be treated as sequences""", + the number of rows are treated as sequences""", null, false), booleanProperty( TableInfo.DICTIONARY_DETECTION_ENABLED, """ If true, when creating a table using existing data, columns with a low number of distinct values - will have the allowed_values column property populated with random values""", + are treated as dictionaries, and get the allowed_values column property populated with random values""", null, false)); } diff --git a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerMetadata.java b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerMetadata.java index a8ae2f291fd7..f3b1193fe45e 100644 --- a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerMetadata.java +++ b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerMetadata.java @@ -49,7 +49,10 @@ import io.trino.spi.predicate.ValueSet; import io.trino.spi.security.TrinoPrincipal; import io.trino.spi.statistics.ColumnStatisticMetadata; +import io.trino.spi.statistics.ColumnStatistics; import io.trino.spi.statistics.ComputedStatistics; +import io.trino.spi.statistics.Estimate; +import io.trino.spi.statistics.TableStatistics; import io.trino.spi.statistics.TableStatisticsMetadata; import io.trino.spi.type.CharType; import io.trino.spi.type.Type; @@ -395,9 +398,9 @@ public synchronized FakerOutputTableHandle beginCreateTable(ConnectorSession ses return new FakerOutputTableHandle(tableName); } - private static boolean isNotRangeType(Type type) + private static boolean isRangeType(Type type) { - return type instanceof CharType || type instanceof VarcharType || type instanceof VarbinaryType; + return !(type instanceof CharType || type instanceof VarcharType || type instanceof VarbinaryType); } private static boolean isSequenceType(Type type) @@ -513,7 +516,7 @@ private synchronized TableInfo createTableInfoFromStats(SchemaTableName tableNam private static ColumnInfo createColumnInfoFromStats(ColumnInfo column, Object min, Object max, long distinctValues, Optional nonNullValues, long rowCount, boolean isSequenceDetectionEnabled, List allowedValues) { - if (isNotRangeType(column.type())) { + if (!isRangeType(column.type())) { return column; } FakerColumnHandle handle = column.handle(); @@ -560,6 +563,7 @@ private Map> getColumnValues(SchemaTableName tableName, Tab .filter(entry -> entry.getValue() <= MAX_DICTIONARY_SIZE) .map(entry -> columnHandles.get(entry.getKey())) .filter(Objects::nonNull) + .filter(column -> isRangeType(column.type())) .map(column -> !minimums.containsKey(column.name()) ? column : column.withDomain(Domain.create(ValueSet.ofRanges(Range.range( column.type(), minimums.get(column.name()), @@ -567,6 +571,9 @@ private Map> getColumnValues(SchemaTableName tableName, Tab maximums.get(column.name()), true)), false))) .collect(toImmutableList()); + if (dictionaryColumns.isEmpty()) { + return ImmutableMap.of(); + } ImmutableMap.Builder> columnValues = ImmutableMap.builder(); try (FakerPageSource pageSource = new FakerPageSource(faker, random, dictionaryColumns, 0, MAX_DICTIONARY_SIZE * 2)) { Page page = null; @@ -759,4 +766,41 @@ public FunctionDependencyDeclaration getFunctionDependencies(ConnectorSession se { return FunctionDependencyDeclaration.NO_DEPENDENCIES; } + + @Override + public synchronized TableStatistics getTableStatistics(ConnectorSession session, ConnectorTableHandle tableHandle) + { + FakerTableHandle fakerTableHandle = (FakerTableHandle) tableHandle; + TableInfo info = tables.get(fakerTableHandle.schemaTableName()); + + TableStatistics.Builder tableStatisitics = TableStatistics.builder(); + tableStatisitics.setRowCount(Estimate.of(fakerTableHandle.limit())); + + info.columns().forEach(columnInfo -> { + Object min = PropertyValues.propertyValue(columnInfo.metadata(), MIN_PROPERTY); + Object max = PropertyValues.propertyValue(columnInfo.metadata(), MAX_PROPERTY); + Object step = PropertyValues.propertyValue(columnInfo.metadata(), STEP_PROPERTY); + Collection allowedValues = (Collection) columnInfo.metadata().getProperties().get(ALLOWED_VALUES_PROPERTY); // skip parsing as we don't need the values + + checkState(allowedValues == null || (min == null && max == null), "The `%s` property cannot be set together with `%s` and `%s` properties".formatted(ALLOWED_VALUES_PROPERTY, MIN_PROPERTY, MAX_PROPERTY)); + + ColumnStatistics.Builder columnStatistics = ColumnStatistics.builder(); + if (allowedValues != null) { + columnStatistics.setDistinctValuesCount(Estimate.of(allowedValues.size())); + } + else { + Type type = columnInfo.metadata().getType(); + if (min != null && max != null && type.getJavaType() == long.class) { + long distinctValuesCount = (long) max - (long) min; + if (step != null) { + distinctValuesCount = distinctValuesCount / (long) step; + } + columnStatistics.setDistinctValuesCount(Estimate.of(distinctValuesCount)); + } + } + columnStatistics.setNullsFraction(Estimate.of(columnInfo.handle().nullProbability())); + tableStatisitics.setColumnStatistics(columnInfo.handle(), columnStatistics.build()); + }); + return tableStatisitics.build(); + } } diff --git a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerSplit.java b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerSplit.java index b1bb921830f7..c8cd5bd61e80 100644 --- a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerSplit.java +++ b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerSplit.java @@ -16,14 +16,23 @@ import io.trino.spi.connector.ConnectorSplit; import static com.google.common.base.Preconditions.checkArgument; +import static io.airlift.slice.SizeOf.instanceSize; public record FakerSplit(long splitNumber, long rowsOffset, long rowsCount) implements ConnectorSplit { + private static final int INSTANCE_SIZE = instanceSize(FakerSplit.class); + public FakerSplit { checkArgument(splitNumber >= 0, "splitNumber is negative"); checkArgument(rowsOffset >= 0, "rowsOffset is negative"); checkArgument(rowsCount >= 0, "rowsCount is negative"); } + + @Override + public long getRetainedSizeInBytes() + { + return INSTANCE_SIZE; + } } diff --git a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/PropertyValues.java b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/PropertyValues.java new file mode 100644 index 000000000000..00200bd1a397 --- /dev/null +++ b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/PropertyValues.java @@ -0,0 +1,84 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.faker; + +import io.airlift.units.Duration; +import io.trino.spi.TrinoException; +import io.trino.spi.connector.ColumnMetadata; +import io.trino.spi.type.TimeType; +import io.trino.spi.type.TimeWithTimeZoneType; +import io.trino.spi.type.TimestampType; +import io.trino.spi.type.TimestampWithTimeZoneType; +import io.trino.spi.type.Type; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static io.trino.spi.StandardErrorCode.INVALID_COLUMN_PROPERTY; +import static io.trino.spi.type.DateType.DATE; + +public class PropertyValues +{ + private PropertyValues() {} + + public static Object propertyValue(ColumnMetadata column, String property) + { + Object propertyValue = column.getProperties().get(property); + if (propertyValue == null) { + return null; + } + + if (propertyValue instanceof Collection propertyValues) { + return propertyValues.stream() + .map(String.class::cast) + .map(value -> { + try { + return Literal.parse(value, column.getType()); + } + catch (IllegalArgumentException | ClassCastException e) { + throw new TrinoException(INVALID_COLUMN_PROPERTY, "The `%s` property must only contain valid %s literals, failed to parse `%s`".formatted(property, column.getType().getDisplayName(), value), e); + } + }) + .collect(toImmutableList()); + } + + if (property.equals(ColumnInfo.STEP_PROPERTY)) { + Type type = column.getType(); + if (DATE.equals(type) || type instanceof TimestampType || type instanceof TimestampWithTimeZoneType || type instanceof TimeType || type instanceof TimeWithTimeZoneType) { + try { + return Duration.valueOf((String) propertyValue).roundTo(TimeUnit.NANOSECONDS); + } + catch (IllegalArgumentException e) { + throw new TrinoException(INVALID_COLUMN_PROPERTY, "The `%s` property for a %s column must be a valid duration literal".formatted(property, type.getDisplayName()), e); + } + } + } + + try { + return Literal.parse((String) propertyValue, column.getType()); + } + catch (IllegalArgumentException e) { + throw new TrinoException(INVALID_COLUMN_PROPERTY, "The `%s` property must be a valid %s literal".formatted(property, column.getType().getDisplayName()), e); + } + } + + private static List strings(Collection values) + { + return values.stream() + .map(String.class::cast) + .collect(toImmutableList()); + } +} diff --git a/plugin/trino-faker/src/test/java/io/trino/plugin/faker/FakerQueryRunner.java b/plugin/trino-faker/src/test/java/io/trino/plugin/faker/FakerQueryRunner.java index f591c4ddff7c..50e7e7c40558 100644 --- a/plugin/trino-faker/src/test/java/io/trino/plugin/faker/FakerQueryRunner.java +++ b/plugin/trino-faker/src/test/java/io/trino/plugin/faker/FakerQueryRunner.java @@ -17,14 +17,17 @@ import io.airlift.log.Level; import io.airlift.log.Logger; import io.airlift.log.Logging; +import io.trino.plugin.exchange.filesystem.FileSystemExchangePlugin; import io.trino.plugin.tpch.TpchPlugin; import io.trino.testing.DistributedQueryRunner; import io.trino.testing.QueryRunner; +import java.io.File; import java.util.Map; import static io.airlift.testing.Closeables.closeAllSuppress; import static io.trino.testing.TestingSession.testSessionBuilder; +import static java.nio.file.Files.createTempDirectory; import static java.util.Objects.requireNonNullElse; public class FakerQueryRunner @@ -95,4 +98,37 @@ public static void main(String[] args) log.info("======== SERVER STARTED ========"); log.info("\n====\n%s\n====", queryRunner.getCoordinator().getBaseUrl()); } + + public static final class FakerQueryRunnerWithTaskRetries + { + private FakerQueryRunnerWithTaskRetries() {} + + public static void main(String[] args) + throws Exception + { + Logger log = Logger.get(FakerQueryRunnerWithTaskRetries.class); + + File exchangeManagerDirectory = createTempDirectory("exchange_manager").toFile(); + Map exchangeManagerProperties = ImmutableMap.builder() + .put("exchange.base-directories", exchangeManagerDirectory.getAbsolutePath()) + .buildOrThrow(); + exchangeManagerDirectory.deleteOnExit(); + + @SuppressWarnings("resource") + QueryRunner queryRunner = builder() + .setExtraProperties(ImmutableMap.builder() + .put("http-server.http.port", requireNonNullElse(System.getenv("TRINO_PORT"), "8080")) + .put("retry-policy", "TASK") + .put("fault-tolerant-execution-task-memory", "1GB") + .buildOrThrow()) + .setAdditionalSetup(runner -> { + runner.installPlugin(new FileSystemExchangePlugin()); + runner.loadExchangeManager("filesystem", exchangeManagerProperties); + }) + .build(); + + log.info("======== SERVER STARTED ========"); + log.info("\n====\n%s\n====", queryRunner.getCoordinator().getBaseUrl()); + } + } } diff --git a/plugin/trino-faker/src/test/java/io/trino/plugin/faker/TestFakerQueries.java b/plugin/trino-faker/src/test/java/io/trino/plugin/faker/TestFakerQueries.java index 643c04674b06..3e2096c23d04 100644 --- a/plugin/trino-faker/src/test/java/io/trino/plugin/faker/TestFakerQueries.java +++ b/plugin/trino-faker/src/test/java/io/trino/plugin/faker/TestFakerQueries.java @@ -592,4 +592,24 @@ FROM TABLE(sequence(start => 0, stop => 1000, step => 1)) assertThat(createTable).containsPattern("nullable integer WITH \\(null_probability = 1E0\\)"); } } + + @Test + void testCreateTableAsSelectVarchar() + { + String source = """ + SELECT * FROM tpch.tiny.orders + """; + try (TestTable table = new TestTable(getQueryRunner()::execute, "varchars", "AS " + source)) { + String createTable = (String) computeScalar("SHOW CREATE TABLE " + table.getName()); + assertThat(createTable).containsPattern("orderkey bigint WITH \\(max = '60000', min = '1', null_probability = 0E0, step = '1'\\)"); + assertThat(createTable).containsPattern("custkey bigint WITH \\(allowed_values = ARRAY\\['.*'], null_probability = 0E0\\)"); + assertThat(createTable).containsPattern("orderstatus varchar\\(1\\)"); + assertThat(createTable).containsPattern("totalprice double WITH \\(max = '.*', min = '.*', null_probability = 0E0\\)"); + assertThat(createTable).containsPattern("orderdate date WITH \\(max = '1998-08-02', min = '1992-01-01', null_probability = 0E0\\)"); + assertThat(createTable).containsPattern("orderpriority varchar\\(15\\)"); + assertThat(createTable).containsPattern("clerk varchar\\(15\\)"); + assertThat(createTable).containsPattern("shippriority integer WITH \\(allowed_values = ARRAY\\['0'], null_probability = 0E0\\)"); + assertThat(createTable).containsPattern("comment varchar\\(79\\)"); + } + } } diff --git a/plugin/trino-functions-python/pom.xml b/plugin/trino-functions-python/pom.xml index dfa50b77062e..db604859b6d4 100644 --- a/plugin/trino-functions-python/pom.xml +++ b/plugin/trino-functions-python/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-geospatial/pom.xml b/plugin/trino-geospatial/pom.xml index c6a55007dfbd..66cf3675b536 100644 --- a/plugin/trino-geospatial/pom.xml +++ b/plugin/trino-geospatial/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-google-sheets/pom.xml b/plugin/trino-google-sheets/pom.xml index 43c91666c17a..4a81066eeb66 100644 --- a/plugin/trino-google-sheets/pom.xml +++ b/plugin/trino-google-sheets/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-hive/pom.xml b/plugin/trino-hive/pom.xml index bf6ad8289bce..361542ba541f 100644 --- a/plugin/trino-hive/pom.xml +++ b/plugin/trino-hive/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConfig.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConfig.java index 5b392f4bf14f..aae4a95efef9 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConfig.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConfig.java @@ -1255,11 +1255,13 @@ public HiveConfig setPartitionProjectionEnabled(boolean enabledAthenaPartitionPr return this; } + @Deprecated public S3StorageClassFilter getS3StorageClassFilter() { return s3StorageClassFilter; } + @Deprecated @Config("hive.s3.storage-class-filter") @ConfigDescription("Filter based on storage class of S3 object") public HiveConfig setS3StorageClassFilter(S3StorageClassFilter s3StorageClassFilter) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/SparkMetastoreUtil.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/SparkMetastoreUtil.java index a8f28eb3cf45..05532e0b4afc 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/SparkMetastoreUtil.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/SparkMetastoreUtil.java @@ -137,7 +137,7 @@ static HiveColumnStatistics fromMetastoreColumnStatistics(String columnName, Hiv toDecimal(parameters.get(field + COLUMN_MAX)), nullsCount, distinctValuesWithNullCount); - case TIMESTAMPLOCALTZ, INTERVAL_YEAR_MONTH, INTERVAL_DAY_TIME, VOID, UNKNOWN -> HiveColumnStatistics.empty(); + case TIMESTAMPLOCALTZ, INTERVAL_YEAR_MONTH, INTERVAL_DAY_TIME, VARIANT, VOID, UNKNOWN -> HiveColumnStatistics.empty(); }; } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueConverter.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueConverter.java index e36dacc6c04a..03785bde62f2 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueConverter.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueConverter.java @@ -40,6 +40,8 @@ import io.trino.spi.TrinoException; import io.trino.spi.function.LanguageFunction; import io.trino.spi.security.PrincipalType; +import jakarta.annotation.Nullable; +import org.gaul.modernizer_maven_annotations.SuppressModernizer; import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.services.glue.model.BinaryColumnStatisticsData; import software.amazon.awssdk.services.glue.model.BooleanColumnStatisticsData; @@ -88,6 +90,7 @@ import static io.trino.metastore.Table.TABLE_COMMENT; import static io.trino.plugin.hive.HiveErrorCode.HIVE_INVALID_METADATA; import static io.trino.plugin.hive.HiveErrorCode.HIVE_UNSUPPORTED_FORMAT; +import static io.trino.plugin.hive.TableType.EXTERNAL_TABLE; import static io.trino.plugin.hive.ViewReaderUtil.isTrinoMaterializedView; import static io.trino.plugin.hive.ViewReaderUtil.isTrinoView; import static io.trino.plugin.hive.metastore.MetastoreUtil.metastoreFunctionName; @@ -117,6 +120,19 @@ final class GlueConverter private GlueConverter() {} + public static String getTableType(software.amazon.awssdk.services.glue.model.Table glueTable) + { + // Athena treats a missing table type as EXTERNAL_TABLE. + return firstNonNull(getTableTypeNullable(glueTable), EXTERNAL_TABLE.name()); + } + + @Nullable + @SuppressModernizer // Usage of `Table.tableType` is not allowed. Only this method can call that. + public static String getTableTypeNullable(software.amazon.awssdk.services.glue.model.Table glueTable) + { + return glueTable.tableType(); + } + public static Database fromGlueDatabase(software.amazon.awssdk.services.glue.model.Database glueDb) { return new Database( @@ -140,8 +156,7 @@ public static DatabaseInput toGlueDatabaseInput(Database database) public static Table fromGlueTable(software.amazon.awssdk.services.glue.model.Table glueTable, String databaseName) { - // Athena treats a missing table type as EXTERNAL_TABLE. - String tableType = firstNonNull(glueTable.tableType(), "EXTERNAL_TABLE"); + String tableType = getTableType(glueTable); Map tableParameters = glueTable.parameters(); if (glueTable.description() != null) { diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java index a7103e4326cc..6d428eaac57f 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java @@ -122,6 +122,7 @@ import static io.trino.plugin.hive.metastore.MetastoreUtil.toPartitionName; import static io.trino.plugin.hive.metastore.MetastoreUtil.updateStatisticsParameters; import static io.trino.plugin.hive.metastore.glue.GlueConverter.fromGlueStatistics; +import static io.trino.plugin.hive.metastore.glue.GlueConverter.getTableTypeNullable; import static io.trino.plugin.hive.metastore.glue.GlueConverter.toGlueColumnStatistics; import static io.trino.plugin.hive.metastore.glue.GlueConverter.toGlueDatabaseInput; import static io.trino.plugin.hive.metastore.glue.GlueConverter.toGlueFunctionInput; @@ -448,7 +449,7 @@ private List getTablesInternal(Consumer cacheTable, String dat return glueTables.stream() .map(table -> new TableInfo( new SchemaTableName(databaseName, table.name()), - TableInfo.ExtendedRelationType.fromTableTypeAndComment(table.tableType(), table.parameters().get(TABLE_COMMENT)))) + TableInfo.ExtendedRelationType.fromTableTypeAndComment(GlueConverter.getTableType(table), table.parameters().get(TABLE_COMMENT)))) .toList(); } catch (EntityNotFoundException _) { @@ -716,7 +717,7 @@ public static TableInput.Builder asTableInputBuilder(software.amazon.awssdk.serv .partitionKeys(table.partitionKeys()) .viewOriginalText(table.viewOriginalText()) .viewExpandedText(table.viewExpandedText()) - .tableType(table.tableType()) + .tableType(getTableTypeNullable(table)) .targetTable(table.targetTable()) .parameters(table.parameters()); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreUtil.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreUtil.java index 1e377956c03d..6ca070dcdca9 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreUtil.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreUtil.java @@ -789,6 +789,7 @@ public static ColumnStatisticsObj createMetastoreColumnStatistics(String columnN case TIMESTAMPLOCALTZ: case INTERVAL_YEAR_MONTH: case INTERVAL_DAY_TIME: + case VARIANT: // TODO support these, when we add support for these Hive types case VOID: case UNKNOWN: diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveBucketingV1.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveBucketingV1.java index a1ee3ced114a..8cf3ddbce48b 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveBucketingV1.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveBucketingV1.java @@ -186,6 +186,7 @@ private static int hash(TypeInfo type, Object value) case TIMESTAMPLOCALTZ: case INTERVAL_YEAR_MONTH: case INTERVAL_DAY_TIME: + case VARIANT: // TODO break; case VOID: diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveBucketingV2.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveBucketingV2.java index 6989489b2707..c8ae02fc6461 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveBucketingV2.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveBucketingV2.java @@ -188,6 +188,7 @@ private static int hash(TypeInfo type, Object value) case TIMESTAMPLOCALTZ: case INTERVAL_YEAR_MONTH: case INTERVAL_DAY_TIME: + case VARIANT: // TODO break; case VOID: diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveTypeUtil.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveTypeUtil.java index 1dd0c9b7aeea..c7ca54693a58 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveTypeUtil.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveTypeUtil.java @@ -107,6 +107,7 @@ private static boolean typeSupported(PrimitiveCategory category) case INTERVAL_YEAR_MONTH, INTERVAL_DAY_TIME, VOID, + VARIANT, UNKNOWN -> false; }; } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveWriteUtils.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveWriteUtils.java index fc6cf307e54a..3cd9b2619016 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveWriteUtils.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/util/HiveWriteUtils.java @@ -304,6 +304,7 @@ private static boolean isWritablePrimitiveType(PrimitiveCategory primitiveCatego case TIMESTAMPLOCALTZ: case INTERVAL_YEAR_MONTH: case INTERVAL_DAY_TIME: + case VARIANT: case UNKNOWN: // unsupported for writing break; diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java index 3961df684989..3b53186f5c20 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java @@ -597,11 +597,11 @@ public void testInvalidValueForQueryPartitionFilterRequiredSchemas() { assertQueryFails( "SET SESSION hive.query_partition_filter_required_schemas = ARRAY['tpch', null]", - "line 1:1: Invalid null or empty value in query_partition_filter_required_schemas property"); + "line 1:60: Invalid null or empty value in query_partition_filter_required_schemas property"); assertQueryFails( "SET SESSION hive.query_partition_filter_required_schemas = ARRAY['tpch', '']", - "line 1:1: Invalid null or empty value in query_partition_filter_required_schemas property"); + "line 1:60: Invalid null or empty value in query_partition_filter_required_schemas property"); } @Test diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveGlueMetadataListing.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveGlueMetadataListing.java index 42fc5ff5f07d..b1c13d26b132 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveGlueMetadataListing.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveGlueMetadataListing.java @@ -32,6 +32,8 @@ import software.amazon.awssdk.services.glue.model.TableInput; import java.nio.file.Path; +import java.util.List; +import java.util.Set; import static io.trino.plugin.hive.metastore.glue.GlueMetastoreModule.createGlueClient; import static io.trino.plugin.hive.metastore.glue.TestingGlueHiveMetastore.createTestingGlueHiveMetastore; @@ -46,6 +48,7 @@ public class TestHiveGlueMetadataListing extends AbstractTestQueryFramework { public static final String FAILING_TABLE_WITH_NULL_STORAGE_DESCRIPTOR_NAME = "failing_table_with_null_storage_descriptor"; + public static final String FAILING_TABLE_WITH_NULL_TYPE = "failing_table_with_null_type"; private static final Logger LOG = Logger.get(TestHiveGlueMetadataListing.class); private static final String HIVE_CATALOG = "hive"; private final String tpchSchema = "test_tpch_schema_" + randomNameSuffix(); @@ -75,7 +78,7 @@ protected QueryRunner createQueryRunner() queryRunner.execute("CREATE SCHEMA " + tpchSchema + " WITH (location = '" + dataDirectory.toUri() + "')"); copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, hiveSession, ImmutableList.of(TpchTable.REGION, TpchTable.NATION)); - createBrokenTable(dataDirectory); + createBrokenTables(dataDirectory); return queryRunner; } @@ -98,43 +101,54 @@ public void cleanup() @Test public void testReadInformationSchema() { - String expectedTables = format("VALUES '%s', '%s', '%s'", TpchTable.REGION.getTableName(), TpchTable.NATION.getTableName(), FAILING_TABLE_WITH_NULL_STORAGE_DESCRIPTOR_NAME); - - assertThat(query("SELECT table_name FROM hive.information_schema.tables")) - .skippingTypesCheck() - .containsAll(expectedTables); - assertThat(query("SELECT table_name FROM hive.information_schema.tables WHERE table_schema='" + tpchSchema + "'")) - .skippingTypesCheck() - .matches(expectedTables); - assertThat(query("SELECT table_name FROM hive.information_schema.tables WHERE table_name = 'region' AND table_schema='" + tpchSchema + "'")) - .skippingTypesCheck() - .matches("VALUES 'region'"); + Set expectedTables = ImmutableSet.builder() + .add(TpchTable.REGION.getTableName()) + .add(TpchTable.NATION.getTableName()) + .add(FAILING_TABLE_WITH_NULL_STORAGE_DESCRIPTOR_NAME) + .add(FAILING_TABLE_WITH_NULL_TYPE) + .build(); + + assertThat(computeActual("SELECT table_name FROM hive.information_schema.tables").getOnlyColumnAsSet()).containsAll(expectedTables); + assertThat(computeActual("SELECT table_name FROM hive.information_schema.tables WHERE table_schema='" + tpchSchema + "'").getOnlyColumnAsSet()).containsAll(expectedTables); + assertThat(computeScalar("SELECT table_name FROM hive.information_schema.tables WHERE table_name = 'region' AND table_schema='" + tpchSchema + "'")) + .isEqualTo(TpchTable.REGION.getTableName()); assertQueryReturnsEmptyResult(format("SELECT table_name FROM hive.information_schema.tables WHERE table_name = '%s' AND table_schema='%s'", FAILING_TABLE_WITH_NULL_STORAGE_DESCRIPTOR_NAME, tpchSchema)); + assertQueryReturnsEmptyResult(format("SELECT table_name FROM hive.information_schema.tables WHERE table_name = '%s' AND table_schema='%s'", FAILING_TABLE_WITH_NULL_TYPE, tpchSchema)); assertQuery("SELECT table_name, column_name from hive.information_schema.columns WHERE table_schema = '" + tpchSchema + "'", "VALUES ('region', 'regionkey'), ('region', 'name'), ('region', 'comment'), ('nation', 'nationkey'), ('nation', 'name'), ('nation', 'regionkey'), ('nation', 'comment')"); assertQuery("SELECT table_name, column_name from hive.information_schema.columns WHERE table_name = 'region' AND table_schema='" + tpchSchema + "'", "VALUES ('region', 'regionkey'), ('region', 'name'), ('region', 'comment')"); assertQueryReturnsEmptyResult(format("SELECT table_name FROM hive.information_schema.columns WHERE table_name = '%s' AND table_schema='%s'", FAILING_TABLE_WITH_NULL_STORAGE_DESCRIPTOR_NAME, tpchSchema)); + assertQueryReturnsEmptyResult(format("SELECT table_name FROM hive.information_schema.columns WHERE table_name = '%s' AND table_schema='%s'", FAILING_TABLE_WITH_NULL_TYPE, tpchSchema)); + + assertThat(computeActual("SHOW TABLES FROM hive." + tpchSchema).getOnlyColumnAsSet()).isEqualTo(expectedTables); + } - assertQuery("SHOW TABLES FROM hive." + tpchSchema, expectedTables); + private void createBrokenTables(Path dataDirectory) + { + TableInput nullStorageTable = TableInput.builder() + .name(FAILING_TABLE_WITH_NULL_STORAGE_DESCRIPTOR_NAME) + .tableType("HIVE") + .build(); + TableInput nullTypeTable = TableInput.builder() + .name(FAILING_TABLE_WITH_NULL_TYPE) + .build(); + createBrokenTable(List.of(nullStorageTable, nullTypeTable), dataDirectory); } - private void createBrokenTable(Path dataDirectory) + private void createBrokenTable(List tablesInput, Path dataDirectory) { GlueHiveMetastoreConfig glueConfig = new GlueHiveMetastoreConfig() .setDefaultWarehouseDir(dataDirectory.toString()); try (GlueClient glueClient = createGlueClient(glueConfig, ImmutableSet.of())) { - TableInput tableInput = TableInput.builder() - .name(FAILING_TABLE_WITH_NULL_STORAGE_DESCRIPTOR_NAME) - .tableType("HIVE") - .build(); - - CreateTableRequest createTableRequest = CreateTableRequest.builder() - .databaseName(tpchSchema) - .tableInput(tableInput) - .build(); - glueClient.createTable(createTableRequest); + for (TableInput tableInput : tablesInput) { + CreateTableRequest createTableRequest = CreateTableRequest.builder() + .databaseName(tpchSchema) + .tableInput(tableInput) + .build(); + glueClient.createTable(createTableRequest); + } } } } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueConverter.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueConverter.java index 4eda8e552254..08e47220ba2f 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueConverter.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueConverter.java @@ -47,6 +47,7 @@ import static io.trino.plugin.hive.TableType.EXTERNAL_TABLE; import static io.trino.plugin.hive.ViewReaderUtil.PRESTO_VIEW_FLAG; import static io.trino.plugin.hive.metastore.glue.GlueConverter.PUBLIC_OWNER; +import static io.trino.plugin.hive.metastore.glue.GlueConverter.getTableTypeNullable; import static io.trino.plugin.hive.util.HiveUtil.DELTA_LAKE_PROVIDER; import static io.trino.plugin.hive.util.HiveUtil.ICEBERG_TABLE_TYPE_NAME; import static io.trino.plugin.hive.util.HiveUtil.ICEBERG_TABLE_TYPE_VALUE; @@ -246,7 +247,7 @@ void testConvertTable() io.trino.metastore.Table trinoTable = GlueConverter.fromGlueTable(glueTable, glueDatabase.name()); assertThat(trinoTable.getTableName()).isEqualTo(glueTable.name()); assertThat(trinoTable.getDatabaseName()).isEqualTo(glueDatabase.name()); - assertThat(trinoTable.getTableType()).isEqualTo(glueTable.tableType()); + assertThat(trinoTable.getTableType()).isEqualTo(getTableTypeNullable(glueTable)); assertThat(trinoTable.getOwner().orElse(null)).isEqualTo(glueTable.owner()); assertThat(trinoTable.getParameters()).isEqualTo(glueTable.parameters()); assertColumnList(glueTable.storageDescriptor().columns(), trinoTable.getDataColumns()); @@ -278,7 +279,7 @@ void testConvertTableWithOpenCSVSerDe() assertThat(trinoTable.getTableName()).isEqualTo(glueTable.name()); assertThat(trinoTable.getDatabaseName()).isEqualTo(glueDatabase.name()); - assertThat(trinoTable.getTableType()).isEqualTo(glueTable.tableType()); + assertThat(trinoTable.getTableType()).isEqualTo(getTableTypeNullable(glueTable)); assertThat(trinoTable.getOwner().orElse(null)).isEqualTo(glueTable.owner()); assertThat(trinoTable.getParameters()).isEqualTo(glueTable.parameters()); assertThat(trinoTable.getDataColumns()).hasSize(1); diff --git a/plugin/trino-http-event-listener/pom.xml b/plugin/trino-http-event-listener/pom.xml index accb64361675..88ec2a52eddd 100644 --- a/plugin/trino-http-event-listener/pom.xml +++ b/plugin/trino-http-event-listener/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-http-server-event-listener/pom.xml b/plugin/trino-http-server-event-listener/pom.xml index 9905461170a8..c51d303b4833 100644 --- a/plugin/trino-http-server-event-listener/pom.xml +++ b/plugin/trino-http-server-event-listener/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-hudi/pom.xml b/plugin/trino-hudi/pom.xml index 1a39863c44ac..ae435bff6742 100644 --- a/plugin/trino-hudi/pom.xml +++ b/plugin/trino-hudi/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-iceberg/README.md b/plugin/trino-iceberg/README.md new file mode 100644 index 000000000000..0a4d5ad7d93f --- /dev/null +++ b/plugin/trino-iceberg/README.md @@ -0,0 +1,30 @@ +# Iceberg Connector Developer Notes + +Steps to create TPCH tables on S3 Tables: +1. Set `AWS_REGION`, `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables. +2. Replace placeholders in the following command and run it: +```sh +./spark-sql \ +--packages org.apache.iceberg:iceberg-spark-runtime-3.5_2.12:1.6.1,software.amazon.awssdk:bundle:2.20.10,software.amazon.s3tables:s3-tables-catalog-for-iceberg-runtime:0.1.3,org.apache.kyuubi:kyuubi-spark-connector-tpch_2.12:1.8.0 \ +--conf spark.sql.catalog.s3tablesbucket=org.apache.iceberg.spark.SparkCatalog \ +--conf spark.sql.catalog.s3tablesbucket.catalog-impl=software.amazon.s3tables.iceberg.S3TablesCatalog \ +--conf spark.sql.catalog.s3tablesbucket.warehouse=arn:aws:s3tables:{region}:{account-id}:bucket/{bucket-name} \ +--conf spark.sql.extensions=org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions \ +--conf spark.sql.catalog.tpch=org.apache.kyuubi.spark.connector.tpch.TPCHCatalog +``` + +3. Run the following command to create TPCH tables: +```sql +CREATE TABLE s3tablesbucket.tpch.nation AS SELECT + n_nationkey AS nationkey, + n_name AS name, + n_regionkey AS regionkey, + n_comment AS comment +FROM tpch.tiny.nation; + +CREATE TABLE s3tablesbucket.tpch.region AS SELECT + r_regionkey AS regionkey, + r_name AS name, + r_comment AS comment +FROM tpch.tiny.region; +``` diff --git a/plugin/trino-iceberg/pom.xml b/plugin/trino-iceberg/pom.xml index ce70b35219c8..7207a8af07b1 100644 --- a/plugin/trino-iceberg/pom.xml +++ b/plugin/trino-iceberg/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml @@ -15,7 +15,7 @@ - 0.102.2 + 0.102.5 @@ -130,6 +130,11 @@ trino-filesystem-manager + + io.trino + trino-filesystem-s3 + + io.trino trino-hive @@ -218,6 +223,11 @@ iceberg-api + + org.apache.iceberg + iceberg-aws + + org.apache.iceberg iceberg-core @@ -359,12 +369,6 @@ runtime - - io.trino - trino-filesystem-s3 - runtime - - jakarta.servlet jakarta.servlet-api @@ -744,6 +748,7 @@ **/TestIcebergGlueTableOperationsInsertFailure.java **/TestIcebergGlueCatalogSkipArchive.java **/TestIcebergS3AndGlueMetastoreTest.java + **/TestIcebergS3TablesConnectorSmokeTest.java **/TestIcebergGcsConnectorSmokeTest.java **/TestIcebergAbfsConnectorSmokeTest.java **/Test*FailureRecoveryTest.java @@ -809,6 +814,7 @@ **/TestIcebergGlueTableOperationsInsertFailure.java **/TestIcebergGlueCatalogSkipArchive.java **/TestIcebergS3AndGlueMetastoreTest.java + **/TestIcebergS3TablesConnectorSmokeTest.java **/TestIcebergGcsConnectorSmokeTest.java **/TestIcebergAbfsConnectorSmokeTest.java **/TestIcebergSnowflakeCatalogConnectorSmokeTest.java diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/EntriesTable.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/EntriesTable.java index 9c0560800826..f900bfb799fb 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/EntriesTable.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/EntriesTable.java @@ -61,6 +61,8 @@ import static io.trino.spi.type.VarbinaryType.VARBINARY; import static io.trino.spi.type.VarcharType.VARCHAR; import static java.util.Objects.requireNonNull; +import static org.apache.iceberg.MetadataColumns.DELETE_FILE_PATH; +import static org.apache.iceberg.MetadataColumns.DELETE_FILE_POS; import static org.apache.iceberg.MetadataTableType.ALL_ENTRIES; import static org.apache.iceberg.MetadataTableType.ENTRIES; @@ -202,13 +204,26 @@ private void appendDataFile(RowBlockBuilder blockBuilder, StructProjection dataF Map nanValueCounts = dataFile.get(++position, Map.class); appendIntegerBigintMap((MapBlockBuilder) fieldBuilders.get(position), nanValueCounts); - //noinspection unchecked - Map lowerBounds = dataFile.get(++position, Map.class); - appendIntegerVarcharMap((MapBlockBuilder) fieldBuilders.get(position), lowerBounds); + switch (ContentType.of(content)) { + case DATA, EQUALITY_DELETE -> { + //noinspection unchecked + Map lowerBounds = dataFile.get(++position, Map.class); + appendIntegerVarcharMap((MapBlockBuilder) fieldBuilders.get(position), lowerBounds); - //noinspection unchecked - Map upperBounds = dataFile.get(++position, Map.class); - appendIntegerVarcharMap((MapBlockBuilder) fieldBuilders.get(position), upperBounds); + //noinspection unchecked + Map upperBounds = dataFile.get(++position, Map.class); + appendIntegerVarcharMap((MapBlockBuilder) fieldBuilders.get(position), upperBounds); + } + case POSITION_DELETE -> { + //noinspection unchecked + Map lowerBounds = dataFile.get(++position, Map.class); + appendBoundsForPositionDelete((MapBlockBuilder) fieldBuilders.get(position), lowerBounds); + + //noinspection unchecked + Map upperBounds = dataFile.get(++position, Map.class); + appendBoundsForPositionDelete((MapBlockBuilder) fieldBuilders.get(position), upperBounds); + } + } ByteBuffer keyMetadata = dataFile.get(++position, ByteBuffer.class); if (keyMetadata == null) { @@ -222,12 +237,30 @@ private void appendDataFile(RowBlockBuilder blockBuilder, StructProjection dataF List splitOffsets = dataFile.get(++position, List.class); appendBigintArray((ArrayBlockBuilder) fieldBuilders.get(position), splitOffsets); - //noinspection unchecked - List equalityIds = dataFile.get(++position, List.class); - appendBigintArray((ArrayBlockBuilder) fieldBuilders.get(position), equalityIds); + switch (ContentType.of(content)) { + case DATA -> { + // data files don't have equality ids + fieldBuilders.get(++position).appendNull(); - Integer sortOrderId = dataFile.get(++position, Integer.class); - INTEGER.writeLong(fieldBuilders.get(position), Long.valueOf(sortOrderId)); + Integer sortOrderId = dataFile.get(++position, Integer.class); + INTEGER.writeLong(fieldBuilders.get(position), Long.valueOf(sortOrderId)); + } + case POSITION_DELETE -> { + // position delete files don't have equality ids + fieldBuilders.get(++position).appendNull(); + + // position delete files don't have sort order id + fieldBuilders.get(++position).appendNull(); + } + case EQUALITY_DELETE -> { + //noinspection unchecked + List equalityIds = dataFile.get(++position, List.class); + appendIntegerArray((ArrayBlockBuilder) fieldBuilders.get(position), equalityIds); + + Integer sortOrderId = dataFile.get(++position, Integer.class); + INTEGER.writeLong(fieldBuilders.get(position), Long.valueOf(sortOrderId)); + } + } }); } @@ -244,6 +277,19 @@ public static void appendBigintArray(ArrayBlockBuilder blockBuilder, @Nullable L }); } + public static void appendIntegerArray(ArrayBlockBuilder blockBuilder, @Nullable List values) + { + if (values == null) { + blockBuilder.appendNull(); + return; + } + blockBuilder.buildEntry(elementBuilder -> { + for (Integer value : values) { + INTEGER.writeLong(elementBuilder, value); + } + }); + } + private static void appendIntegerBigintMap(MapBlockBuilder blockBuilder, @Nullable Map values) { if (values == null) { @@ -268,4 +314,43 @@ private void appendIntegerVarcharMap(MapBlockBuilder blockBuilder, @Nullable Map VARCHAR.writeString(valueBuilder, Transforms.identity().toHumanString(type, Conversions.fromByteBuffer(type, value))); })); } + + private static void appendBoundsForPositionDelete(MapBlockBuilder blockBuilder, @Nullable Map values) + { + if (values == null) { + blockBuilder.appendNull(); + return; + } + + blockBuilder.buildEntry((keyBuilder, valueBuilder) -> { + INTEGER.writeLong(keyBuilder, DELETE_FILE_POS.fieldId()); + ByteBuffer pos = values.get(DELETE_FILE_POS.fieldId()); + checkArgument(pos != null, "delete file pos is null"); + VARCHAR.writeString(valueBuilder, Transforms.identity().toHumanString(Types.LongType.get(), Conversions.fromByteBuffer(Types.LongType.get(), pos))); + + INTEGER.writeLong(keyBuilder, DELETE_FILE_PATH.fieldId()); + ByteBuffer path = values.get(DELETE_FILE_PATH.fieldId()); + checkArgument(path != null, "delete file path is null"); + VARCHAR.writeString(valueBuilder, Transforms.identity().toHumanString(Types.StringType.get(), Conversions.fromByteBuffer(Types.StringType.get(), path))); + }); + } + + private enum ContentType + { + DATA, + POSITION_DELETE, + EQUALITY_DELETE; + + static ContentType of(int content) + { + checkArgument(content >= 0 && content <= 2, "Unexpected content type: %s", content); + if (content == 0) { + return DATA; + } + if (content == 1) { + return POSITION_DELETE; + } + return EQUALITY_DELETE; + } + } } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConfig.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConfig.java index 51040a281419..389d64ee3519 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConfig.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConfig.java @@ -21,6 +21,7 @@ import io.airlift.configuration.LegacyConfig; import io.airlift.units.DataSize; import io.airlift.units.Duration; +import io.airlift.units.ThreadCount; import io.trino.plugin.hive.HiveCompressionCodec; import jakarta.validation.constraints.AssertFalse; import jakarta.validation.constraints.DecimalMax; @@ -488,9 +489,9 @@ public int getSplitManagerThreads() @Config("iceberg.split-manager-threads") @ConfigDescription("Number of threads to use for generating splits") - public IcebergConfig setSplitManagerThreads(int splitManagerThreads) + public IcebergConfig setSplitManagerThreads(String splitManagerThreads) { - this.splitManagerThreads = splitManagerThreads; + this.splitManagerThreads = ThreadCount.valueOf(splitManagerThreads).getThreadCount(); return this; } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadata.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadata.java index f94341c3b584..316b793c24b6 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadata.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadata.java @@ -52,6 +52,7 @@ import io.trino.plugin.iceberg.procedure.IcebergDropExtendedStatsHandle; import io.trino.plugin.iceberg.procedure.IcebergExpireSnapshotsHandle; import io.trino.plugin.iceberg.procedure.IcebergOptimizeHandle; +import io.trino.plugin.iceberg.procedure.IcebergOptimizeManifestsHandle; import io.trino.plugin.iceberg.procedure.IcebergRemoveOrphanFilesHandle; import io.trino.plugin.iceberg.procedure.IcebergRollbackToSnapshotHandle; import io.trino.plugin.iceberg.procedure.IcebergTableExecuteHandle; @@ -148,6 +149,7 @@ import org.apache.iceberg.PartitionSpecParser; import org.apache.iceberg.ReplaceSortOrder; import org.apache.iceberg.RewriteFiles; +import org.apache.iceberg.RewriteManifests; import org.apache.iceberg.RowDelta; import org.apache.iceberg.Schema; import org.apache.iceberg.SchemaParser; @@ -330,6 +332,7 @@ import static io.trino.plugin.iceberg.procedure.IcebergTableProcedureId.DROP_EXTENDED_STATS; import static io.trino.plugin.iceberg.procedure.IcebergTableProcedureId.EXPIRE_SNAPSHOTS; import static io.trino.plugin.iceberg.procedure.IcebergTableProcedureId.OPTIMIZE; +import static io.trino.plugin.iceberg.procedure.IcebergTableProcedureId.OPTIMIZE_MANIFESTS; import static io.trino.plugin.iceberg.procedure.IcebergTableProcedureId.REMOVE_ORPHAN_FILES; import static io.trino.plugin.iceberg.procedure.IcebergTableProcedureId.ROLLBACK_TO_SNAPSHOT; import static io.trino.plugin.iceberg.procedure.MigrationUtils.addFiles; @@ -380,6 +383,8 @@ import static org.apache.iceberg.TableProperties.DELETE_ISOLATION_LEVEL; import static org.apache.iceberg.TableProperties.DELETE_ISOLATION_LEVEL_DEFAULT; import static org.apache.iceberg.TableProperties.FORMAT_VERSION; +import static org.apache.iceberg.TableProperties.MANIFEST_TARGET_SIZE_BYTES; +import static org.apache.iceberg.TableProperties.MANIFEST_TARGET_SIZE_BYTES_DEFAULT; import static org.apache.iceberg.TableProperties.OBJECT_STORE_ENABLED; import static org.apache.iceberg.TableProperties.ORC_BLOOM_FILTER_COLUMNS; import static org.apache.iceberg.TableProperties.PARQUET_BLOOM_FILTER_COLUMN_ENABLED_PREFIX; @@ -522,7 +527,7 @@ public ConnectorTableHandle getTableHandle( BaseTable table; try { - table = (BaseTable) catalog.loadTable(session, new SchemaTableName(tableName.getSchemaName(), tableName.getTableName())); + table = catalog.loadTable(session, new SchemaTableName(tableName.getSchemaName(), tableName.getTableName())); } catch (TableNotFoundException e) { return null; @@ -878,7 +883,7 @@ public ConnectorTableMetadata getTableMetadata(ConnectorSession session, Connect IcebergTableHandle tableHandle = checkValidTableHandle(table); // This method does not calculate column metadata for the projected columns checkArgument(tableHandle.getProjectedColumns().isEmpty(), "Unexpected projected columns"); - Table icebergTable = catalog.loadTable(session, tableHandle.getSchemaTableName()); + BaseTable icebergTable = catalog.loadTable(session, tableHandle.getSchemaTableName()); List columns = getColumnMetadatas(SchemaParser.fromJson(tableHandle.getTableSchemaJson()), typeManager); return new ConnectorTableMetadata(tableHandle.getSchemaTableName(), columns, getIcebergTableProperties(icebergTable), getTableComment(icebergTable)); } @@ -1592,6 +1597,7 @@ public Optional getTableHandleForExecute( return switch (procedureId) { case OPTIMIZE -> getTableHandleForOptimize(tableHandle, icebergTable, executeProperties, retryMode); + case OPTIMIZE_MANIFESTS -> getTableHandleForOptimizeManifests(session, tableHandle); case DROP_EXTENDED_STATS -> getTableHandleForDropExtendedStats(session, tableHandle); case ROLLBACK_TO_SNAPSHOT -> getTableHandleForRollbackToSnapshot(session, tableHandle, executeProperties); case EXPIRE_SNAPSHOTS -> getTableHandleForExpireSnapshots(session, tableHandle, executeProperties); @@ -1628,6 +1634,18 @@ private Optional getTableHandleForOptimize( icebergTable.io().properties())); } + private Optional getTableHandleForOptimizeManifests(ConnectorSession session, IcebergTableHandle tableHandle) + { + Table icebergTable = catalog.loadTable(session, tableHandle.getSchemaTableName()); + + return Optional.of(new IcebergTableExecuteHandle( + tableHandle.getSchemaTableName(), + OPTIMIZE_MANIFESTS, + new IcebergOptimizeManifestsHandle(), + icebergTable.location(), + icebergTable.io().properties())); + } + private Optional getTableHandleForDropExtendedStats(ConnectorSession session, IcebergTableHandle tableHandle) { Table icebergTable = catalog.loadTable(session, tableHandle.getSchemaTableName()); @@ -1791,6 +1809,7 @@ public Optional getLayoutForTableExecute(ConnectorSession switch (executeHandle.procedureId()) { case OPTIMIZE: return getLayoutForOptimize(session, executeHandle); + case OPTIMIZE_MANIFESTS: case DROP_EXTENDED_STATS: case ROLLBACK_TO_SNAPSHOT: case EXPIRE_SNAPSHOTS: @@ -1821,6 +1840,7 @@ public BeginTableExecuteResult OPTIMIZE_MAX_SUPPORTED_TABLE_VERSION) { throw new TrinoException(NOT_SUPPORTED, format( "%s is not supported for Iceberg table format version > %d. Table %s format version is %s.", @@ -1867,6 +1887,7 @@ public void finishTableExecute(ConnectorSession session, ConnectorTableExecuteHa case OPTIMIZE: finishOptimize(session, executeHandle, fragments, splitSourceInfo); return; + case OPTIMIZE_MANIFESTS: case DROP_EXTENDED_STATS: case ROLLBACK_TO_SNAPSHOT: case EXPIRE_SNAPSHOTS: @@ -2005,6 +2026,9 @@ public void executeTableExecute(ConnectorSession session, ConnectorTableExecuteH { IcebergTableExecuteHandle executeHandle = (IcebergTableExecuteHandle) tableExecuteHandle; switch (executeHandle.procedureId()) { + case OPTIMIZE_MANIFESTS: + executeOptimizeManifests(session, executeHandle); + return; case DROP_EXTENDED_STATS: executeDropExtendedStats(session, executeHandle); return; @@ -2028,6 +2052,26 @@ public void executeTableExecute(ConnectorSession session, ConnectorTableExecuteH } } + private void executeOptimizeManifests(ConnectorSession session, IcebergTableExecuteHandle executeHandle) + { + checkArgument(executeHandle.procedureHandle() instanceof IcebergOptimizeManifestsHandle, "Unexpected procedure handle %s", executeHandle.procedureHandle()); + + BaseTable icebergTable = catalog.loadTable(session, executeHandle.schemaTableName()); + List manifests = icebergTable.currentSnapshot().allManifests(icebergTable.io()); + if (manifests.isEmpty()) { + return; + } + if (manifests.size() == 1 && manifests.getFirst().length() < icebergTable.operations().current().propertyAsLong(MANIFEST_TARGET_SIZE_BYTES, MANIFEST_TARGET_SIZE_BYTES_DEFAULT)) { + return; + } + + beginTransaction(icebergTable); + RewriteManifests rewriteManifests = transaction.rewriteManifests(); + rewriteManifests.clusterBy(_ -> "file").commit(); + commitTransaction(transaction, "optimize manifests"); + transaction = null; + } + private void executeDropExtendedStats(ConnectorSession session, IcebergTableExecuteHandle executeHandle) { checkArgument(executeHandle.procedureHandle() instanceof IcebergDropExtendedStatsHandle, "Unexpected procedure handle %s", executeHandle.procedureHandle()); @@ -2056,7 +2100,7 @@ private void executeExpireSnapshots(ConnectorSession session, IcebergTableExecut { IcebergExpireSnapshotsHandle expireSnapshotsHandle = (IcebergExpireSnapshotsHandle) executeHandle.procedureHandle(); - Table table = catalog.loadTable(session, executeHandle.schemaTableName()); + BaseTable table = catalog.loadTable(session, executeHandle.schemaTableName()); Duration retention = requireNonNull(expireSnapshotsHandle.retentionThreshold(), "retention is null"); validateTableExecuteParameters( table, @@ -2098,7 +2142,7 @@ private void executeExpireSnapshots(ConnectorSession session, IcebergTableExecut } private static void validateTableExecuteParameters( - Table table, + BaseTable table, SchemaTableName schemaTableName, String procedureName, Duration retentionThreshold, @@ -2106,7 +2150,7 @@ private static void validateTableExecuteParameters( String minRetentionParameterName, String sessionMinRetentionParameterName) { - int tableFormatVersion = ((BaseTable) table).operations().current().formatVersion(); + int tableFormatVersion = table.operations().current().formatVersion(); if (tableFormatVersion > CLEANING_UP_PROCEDURES_MAX_SUPPORTED_TABLE_VERSION) { // It is not known if future version won't bring any new kind of metadata or data files // because of the way procedures are implemented it is safer to fail here than to potentially remove @@ -2138,7 +2182,7 @@ public void executeRemoveOrphanFiles(ConnectorSession session, IcebergTableExecu { IcebergRemoveOrphanFilesHandle removeOrphanFilesHandle = (IcebergRemoveOrphanFilesHandle) executeHandle.procedureHandle(); - Table table = catalog.loadTable(session, executeHandle.schemaTableName()); + BaseTable table = catalog.loadTable(session, executeHandle.schemaTableName()); Duration retention = requireNonNull(removeOrphanFilesHandle.retentionThreshold(), "retention is null"); validateTableExecuteParameters( table, diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergModule.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergModule.java index e2845bf4195a..ea88078bf1d8 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergModule.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergModule.java @@ -46,6 +46,7 @@ import io.trino.plugin.iceberg.procedure.AddFilesTableProcedure; import io.trino.plugin.iceberg.procedure.DropExtendedStatsTableProcedure; import io.trino.plugin.iceberg.procedure.ExpireSnapshotsTableProcedure; +import io.trino.plugin.iceberg.procedure.OptimizeManifestsTableProcedure; import io.trino.plugin.iceberg.procedure.OptimizeTableProcedure; import io.trino.plugin.iceberg.procedure.RegisterTableProcedure; import io.trino.plugin.iceberg.procedure.RemoveOrphanFilesTableProcedure; @@ -130,6 +131,7 @@ public void configure(Binder binder) Multibinder tableProcedures = newSetBinder(binder, TableProcedureMetadata.class); tableProcedures.addBinding().toProvider(OptimizeTableProcedure.class).in(Scopes.SINGLETON); + tableProcedures.addBinding().toProvider(OptimizeManifestsTableProcedure.class).in(Scopes.SINGLETON); tableProcedures.addBinding().toProvider(DropExtendedStatsTableProcedure.class).in(Scopes.SINGLETON); tableProcedures.addBinding().toProvider(RollbackToSnapshotTableProcedure.class).in(Scopes.SINGLETON); tableProcedures.addBinding().toProvider(ExpireSnapshotsTableProcedure.class).in(Scopes.SINGLETON); diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergPageSinkProvider.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergPageSinkProvider.java index b2157715e4a2..bc4d694483f7 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergPageSinkProvider.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergPageSinkProvider.java @@ -146,6 +146,7 @@ public ConnectorPageSink createPageSink(ConnectorTransactionHandle transactionHa sortingFileWriterMaxOpenFiles, typeManager, pageSorter); + case OPTIMIZE_MANIFESTS: case DROP_EXTENDED_STATS: case ROLLBACK_TO_SNAPSHOT: case EXPIRE_SNAPSHOTS: diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTableProperties.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTableProperties.java index c54294e16be0..a125370b3f0d 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTableProperties.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTableProperties.java @@ -253,7 +253,7 @@ private static void validateFormatVersion(int version) } } - public static int getMaxComiRetry(Map tableProperties) + public static int getMaxCommitRetry(Map tableProperties) { return (int) tableProperties.getOrDefault(MAX_COMMIT_RETRY, COMMIT_NUM_RETRIES_DEFAULT); } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergUtil.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergUtil.java index ae9954aa6fd9..6b0b0ec51387 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergUtil.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergUtil.java @@ -217,7 +217,7 @@ public final class IcebergUtil private IcebergUtil() {} - public static Table loadIcebergTable(TrinoCatalog catalog, IcebergTableOperationsProvider tableOperationsProvider, ConnectorSession session, SchemaTableName table) + public static BaseTable loadIcebergTable(TrinoCatalog catalog, IcebergTableOperationsProvider tableOperationsProvider, ConnectorSession session, SchemaTableName table) { TableOperations operations = tableOperationsProvider.createTableOperations( catalog, @@ -229,7 +229,7 @@ public static Table loadIcebergTable(TrinoCatalog catalog, IcebergTableOperation return new BaseTable(operations, quotedTableName(table), TRINO_METRICS_REPORTER); } - public static Table getIcebergTableWithMetadata( + public static BaseTable getIcebergTableWithMetadata( TrinoCatalog catalog, IcebergTableOperationsProvider tableOperationsProvider, ConnectorSession session, @@ -299,7 +299,7 @@ public static List buildPath(Map indexParents, int fi return ImmutableList.copyOf(path.reversed()); } - public static Map getIcebergTableProperties(Table icebergTable) + public static Map getIcebergTableProperties(BaseTable icebergTable) { ImmutableMap.Builder properties = ImmutableMap.builder(); properties.put(FILE_FORMAT_PROPERTY, getFileFormat(icebergTable)); @@ -318,7 +318,7 @@ public static Map getIcebergTableProperties(Table icebergTable) properties.put(LOCATION_PROPERTY, icebergTable.location()); } - int formatVersion = ((BaseTable) icebergTable).operations().current().formatVersion(); + int formatVersion = icebergTable.operations().current().formatVersion(); properties.put(FORMAT_VERSION_PROPERTY, formatVersion); if (icebergTable.properties().containsKey(COMMIT_NUM_RETRIES)) { @@ -862,7 +862,7 @@ public static Map createTableProperties(ConnectorTableMetadata t IcebergFileFormat fileFormat = IcebergTableProperties.getFileFormat(tableMetadata.getProperties()); propertiesBuilder.put(DEFAULT_FILE_FORMAT, fileFormat.toIceberg().toString()); propertiesBuilder.put(FORMAT_VERSION, Integer.toString(IcebergTableProperties.getFormatVersion(tableMetadata.getProperties()))); - propertiesBuilder.put(COMMIT_NUM_RETRIES, Integer.toString(IcebergTableProperties.getMaxComiRetry(tableMetadata.getProperties()))); + propertiesBuilder.put(COMMIT_NUM_RETRIES, Integer.toString(IcebergTableProperties.getMaxCommitRetry(tableMetadata.getProperties()))); boolean objectStoreLayoutEnabled = IcebergTableProperties.getObjectStoreLayoutEnabled(tableMetadata.getProperties()); if (objectStoreLayoutEnabled) { diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/AbstractTrinoCatalog.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/AbstractTrinoCatalog.java index 55406ef7c86d..83c8cca7e537 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/AbstractTrinoCatalog.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/AbstractTrinoCatalog.java @@ -201,7 +201,7 @@ public Map getMaterializedViewProperties(ConnectorSession sessio .getSchemaTableName(); try { - Table storageTable = loadTable(session, definition.getStorageTable().orElseThrow().getSchemaTableName()); + BaseTable storageTable = loadTable(session, definition.getStorageTable().orElseThrow().getSchemaTableName()); return ImmutableMap.builder() .putAll(getIcebergTableProperties(storageTable)) .put(STORAGE_SCHEMA, storageTableName.getSchemaName()) @@ -246,7 +246,7 @@ protected Transaction newCreateOrReplaceTableTransaction( BaseTable table; Optional metadata = Optional.empty(); try { - table = (BaseTable) loadTable(session, new SchemaTableName(schemaTableName.getSchemaName(), schemaTableName.getTableName())); + table = loadTable(session, new SchemaTableName(schemaTableName.getSchemaName(), schemaTableName.getTableName())); metadata = Optional.of(table.operations().current()); } catch (TableNotFoundException _) { diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/TrinoCatalog.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/TrinoCatalog.java index c7370d65d810..ffab19f2c1b8 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/TrinoCatalog.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/TrinoCatalog.java @@ -146,7 +146,7 @@ Transaction newCreateOrReplaceTableTransaction( * @return Iceberg table loaded * @throws UnknownTableTypeException if table is not of Iceberg type in the metastore */ - Table loadTable(ConnectorSession session, SchemaTableName schemaTableName); + BaseTable loadTable(ConnectorSession session, SchemaTableName schemaTableName); /** * Bulk load column metadata. The returned map may contain fewer entries then asked for. diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/TrinoGlueCatalog.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/TrinoGlueCatalog.java index 12782f9957ea..d43e7130d377 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/TrinoGlueCatalog.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/TrinoGlueCatalog.java @@ -82,7 +82,6 @@ import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.SortOrder; -import org.apache.iceberg.Table; import org.apache.iceberg.TableMetadata; import org.apache.iceberg.TableMetadataParser; import org.apache.iceberg.TableOperations; @@ -598,7 +597,7 @@ private void getCommentsFromIcebergMetadata( } @Override - public Table loadTable(ConnectorSession session, SchemaTableName table) + public BaseTable loadTable(ConnectorSession session, SchemaTableName table) { if (viewCache.asMap().containsKey(table) || materializedViewCache.asMap().containsKey(table)) { throw new TableNotFoundException(table); @@ -711,7 +710,7 @@ private Optional> getCachedColumnMetadata(com.amazonaws.ser @Override public void dropTable(ConnectorSession session, SchemaTableName schemaTableName) { - BaseTable table = (BaseTable) loadTable(session, schemaTableName); + BaseTable table = loadTable(session, schemaTableName); try { deleteTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()); } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/hms/TrinoHiveCatalog.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/hms/TrinoHiveCatalog.java index 665a050f69b3..9e85eb1011b4 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/hms/TrinoHiveCatalog.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/hms/TrinoHiveCatalog.java @@ -59,7 +59,6 @@ import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.SortOrder; -import org.apache.iceberg.Table; import org.apache.iceberg.TableMetadata; import org.apache.iceberg.TableMetadataParser; import org.apache.iceberg.Transaction; @@ -421,7 +420,7 @@ public Optional> streamRelationComments( @Override public void dropTable(ConnectorSession session, SchemaTableName schemaTableName) { - BaseTable table = (BaseTable) loadTable(session, schemaTableName); + BaseTable table = loadTable(session, schemaTableName); TableMetadata metadata = table.operations().current(); io.trino.metastore.Table metastoreTable = metastore.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()) @@ -475,14 +474,14 @@ public void renameTable(ConnectorSession session, SchemaTableName from, SchemaTa } @Override - public Table loadTable(ConnectorSession session, SchemaTableName schemaTableName) + public BaseTable loadTable(ConnectorSession session, SchemaTableName schemaTableName) { TableMetadata metadata; try { metadata = uncheckedCacheGet( tableMetadataCache, schemaTableName, - () -> ((BaseTable) loadIcebergTable(this, tableOperationsProvider, session, schemaTableName)).operations().current()); + () -> loadIcebergTable(this, tableOperationsProvider, session, schemaTableName).operations().current()); } catch (UncheckedExecutionException e) { throwIfUnchecked(e.getCause()); diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/jdbc/TrinoJdbcCatalog.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/jdbc/TrinoJdbcCatalog.java index 0e5c401c8ba5..97a6b0183439 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/jdbc/TrinoJdbcCatalog.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/jdbc/TrinoJdbcCatalog.java @@ -42,7 +42,6 @@ import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.SortOrder; -import org.apache.iceberg.Table; import org.apache.iceberg.TableMetadata; import org.apache.iceberg.Transaction; import org.apache.iceberg.catalog.Namespace; @@ -338,7 +337,7 @@ public void unregisterTable(ConnectorSession session, SchemaTableName tableName) @Override public void dropTable(ConnectorSession session, SchemaTableName schemaTableName) { - BaseTable table = (BaseTable) loadTable(session, schemaTableName); + BaseTable table = loadTable(session, schemaTableName); jdbcCatalog.dropTable(toIdentifier(schemaTableName), false); try { @@ -381,14 +380,14 @@ public void renameTable(ConnectorSession session, SchemaTableName from, SchemaTa } @Override - public Table loadTable(ConnectorSession session, SchemaTableName schemaTableName) + public BaseTable loadTable(ConnectorSession session, SchemaTableName schemaTableName) { TableMetadata metadata; try { metadata = uncheckedCacheGet( tableMetadataCache, schemaTableName, - () -> ((BaseTable) loadIcebergTable(this, tableOperationsProvider, session, schemaTableName)).operations().current()); + () -> loadIcebergTable(this, tableOperationsProvider, session, schemaTableName).operations().current()); } catch (UncheckedExecutionException e) { throwIfUnchecked(e.getCause()); diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/nessie/TrinoNessieCatalog.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/nessie/TrinoNessieCatalog.java index ed7be861fd9f..dad39b8d28fa 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/nessie/TrinoNessieCatalog.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/nessie/TrinoNessieCatalog.java @@ -40,7 +40,6 @@ import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.SortOrder; -import org.apache.iceberg.Table; import org.apache.iceberg.TableMetadata; import org.apache.iceberg.TableOperations; import org.apache.iceberg.Transaction; @@ -199,7 +198,7 @@ public Optional> streamRelationComments( } @Override - public Table loadTable(ConnectorSession session, SchemaTableName table) + public BaseTable loadTable(ConnectorSession session, SchemaTableName table) { TableMetadata metadata; try { diff --git a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/QueryHistory.tsx b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/AwsProperties.java similarity index 63% rename from core/trino-web-ui/src/main/resources/webapp-preview/src/components/QueryHistory.tsx rename to plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/AwsProperties.java index 18371faf01af..c28e7e35e5b0 100644 --- a/core/trino-web-ui/src/main/resources/webapp-preview/src/components/QueryHistory.tsx +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/AwsProperties.java @@ -11,15 +11,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -import { Box, Typography } from '@mui/material' +package io.trino.plugin.iceberg.catalog.rest; -export const QueryHistory = () => { - return ( - <> - - Query History - - Placeholder for Query History - - ) +import java.util.Map; + +@FunctionalInterface +public interface AwsProperties +{ + Map get(); } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/IcebergRestCatalogConfig.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/IcebergRestCatalogConfig.java index 0390d9ab38f4..89cc158f7744 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/IcebergRestCatalogConfig.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/IcebergRestCatalogConfig.java @@ -47,6 +47,8 @@ public enum SessionType private Security security = Security.NONE; private SessionType sessionType = SessionType.NONE; private boolean vendedCredentialsEnabled; + private boolean viewEndpointsEnabled = true; + private boolean sigV4Enabled; private boolean caseInsensitiveNameMatching; private Duration caseInsensitiveNameMatchingCacheTtl = new Duration(1, MINUTES); @@ -146,6 +148,32 @@ public IcebergRestCatalogConfig setVendedCredentialsEnabled(boolean vendedCreden return this; } + public boolean isViewEndpointsEnabled() + { + return viewEndpointsEnabled; + } + + @Config("iceberg.rest-catalog.view-endpoints-enabled") + @ConfigDescription("Enable view endpoints") + public IcebergRestCatalogConfig setViewEndpointsEnabled(boolean viewEndpointsEnabled) + { + this.viewEndpointsEnabled = viewEndpointsEnabled; + return this; + } + + public boolean isSigV4Enabled() + { + return sigV4Enabled; + } + + @Config("iceberg.rest-catalog.sigv4-enabled") + @ConfigDescription("Enable AWS Signature version 4 (SigV4)") + public IcebergRestCatalogConfig setSigV4Enabled(boolean sigV4Enabled) + { + this.sigV4Enabled = sigV4Enabled; + return this; + } + public boolean isCaseInsensitiveNameMatching() { return caseInsensitiveNameMatching; diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/IcebergRestCatalogModule.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/IcebergRestCatalogModule.java index 4753b5c8cb12..9efc4465c567 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/IcebergRestCatalogModule.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/IcebergRestCatalogModule.java @@ -13,6 +13,7 @@ */ package io.trino.plugin.iceberg.catalog.rest; +import com.google.common.collect.ImmutableMap; import com.google.inject.Binder; import com.google.inject.Scopes; import io.airlift.configuration.AbstractConfigurationAwareModule; @@ -39,6 +40,14 @@ protected void setup(Binder binder) config -> config.getSecurity() == Security.OAUTH2, new OAuth2SecurityModule(), new NoneSecurityModule())); + install(conditionalModule( + IcebergRestCatalogConfig.class, + IcebergRestCatalogConfig::isSigV4Enabled, + internalBinder -> { + configBinder(internalBinder).bindConfig(IcebergRestCatalogSigV4Config.class); + internalBinder.bind(AwsProperties.class).to(SigV4AwsProperties.class).in(Scopes.SINGLETON); + }, + internalBinder -> internalBinder.bind(AwsProperties.class).toInstance(ImmutableMap::of))); binder.bind(TrinoCatalogFactory.class).to(TrinoIcebergRestCatalogFactory.class).in(Scopes.SINGLETON); newOptionalBinder(binder, IcebergFileSystemFactory.class).setBinding().to(IcebergRestCatalogFileSystemFactory.class).in(Scopes.SINGLETON); diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/IcebergRestCatalogSigV4Config.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/IcebergRestCatalogSigV4Config.java new file mode 100644 index 000000000000..e7c80e628762 --- /dev/null +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/IcebergRestCatalogSigV4Config.java @@ -0,0 +1,38 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.iceberg.catalog.rest; + +import io.airlift.configuration.Config; +import io.airlift.configuration.ConfigDescription; +import jakarta.validation.constraints.NotNull; +import org.apache.iceberg.aws.AwsProperties; + +public class IcebergRestCatalogSigV4Config +{ + private String signingName = AwsProperties.REST_SIGNING_NAME_DEFAULT; + + @NotNull + public String getSigningName() + { + return signingName; + } + + @Config("iceberg.rest-catalog.signing-name") + @ConfigDescription("AWS SigV4 signing service name") + public IcebergRestCatalogSigV4Config setSigningName(String signingName) + { + this.signingName = signingName; + return this; + } +} diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/SigV4AwsProperties.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/SigV4AwsProperties.java new file mode 100644 index 000000000000..dcf93db4bdb6 --- /dev/null +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/SigV4AwsProperties.java @@ -0,0 +1,52 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.iceberg.catalog.rest; + +import com.google.common.collect.ImmutableMap; +import com.google.inject.Inject; +import io.trino.filesystem.s3.S3FileSystemConfig; +import io.trino.plugin.iceberg.IcebergSecurityConfig; + +import java.util.Map; + +import static com.google.common.base.Preconditions.checkArgument; +import static io.trino.plugin.iceberg.IcebergSecurityConfig.IcebergSecurity.READ_ONLY; +import static java.util.Objects.requireNonNull; + +public class SigV4AwsProperties + implements AwsProperties +{ + private final Map properties; + + @Inject + public SigV4AwsProperties(IcebergSecurityConfig securityConfig, IcebergRestCatalogSigV4Config sigV4Config, S3FileSystemConfig s3Config) + { + // TODO https://github.com/trinodb/trino/issues/24916 Allow write operations with SigV4 + checkArgument(securityConfig.getSecuritySystem() == READ_ONLY, "Read-only security system is required"); + this.properties = ImmutableMap.builder() + .put("rest.sigv4-enabled", "true") + .put("rest.signing-name", sigV4Config.getSigningName()) + .put("rest.access-key-id", requireNonNull(s3Config.getAwsAccessKey(), "s3.aws-access-key is null")) + .put("rest.secret-access-key", requireNonNull(s3Config.getAwsSecretKey(), "s3.aws-secret-key is null")) + .put("rest.signing-region", requireNonNull(s3Config.getRegion(), "s3.region is null")) + .put("rest-metrics-reporting-enabled", "false") + .buildOrThrow(); + } + + @Override + public Map get() + { + return properties; + } +} diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/TrinoIcebergRestCatalogFactory.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/TrinoIcebergRestCatalogFactory.java index c3f47dffc2f0..a4cb154aa758 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/TrinoIcebergRestCatalogFactory.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/TrinoIcebergRestCatalogFactory.java @@ -57,7 +57,9 @@ public class TrinoIcebergRestCatalogFactory private final boolean nestedNamespaceEnabled; private final SessionType sessionType; private final boolean vendedCredentialsEnabled; + private final boolean viewEndpointsEnabled; private final SecurityProperties securityProperties; + private final AwsProperties awsProperties; private final boolean uniqueTableLocation; private final TypeManager typeManager; private final boolean caseInsensitiveNameMatching; @@ -73,6 +75,7 @@ public TrinoIcebergRestCatalogFactory( CatalogName catalogName, IcebergRestCatalogConfig restConfig, SecurityProperties securityProperties, + AwsProperties awsProperties, IcebergConfig icebergConfig, TypeManager typeManager, NodeVersion nodeVersion) @@ -87,7 +90,9 @@ public TrinoIcebergRestCatalogFactory( this.nestedNamespaceEnabled = restConfig.isNestedNamespaceEnabled(); this.sessionType = restConfig.getSessionType(); this.vendedCredentialsEnabled = restConfig.isVendedCredentialsEnabled(); + this.viewEndpointsEnabled = restConfig.isViewEndpointsEnabled(); this.securityProperties = requireNonNull(securityProperties, "securityProperties is null"); + this.awsProperties = requireNonNull(awsProperties, "awsProperties is null"); requireNonNull(icebergConfig, "icebergConfig is null"); this.uniqueTableLocation = icebergConfig.isUniqueTableLocation(); this.typeManager = requireNonNull(typeManager, "typeManager is null"); @@ -112,9 +117,10 @@ public synchronized TrinoCatalog create(ConnectorIdentity identity) properties.put(CatalogProperties.URI, serverUri.toString()); warehouse.ifPresent(location -> properties.put(CatalogProperties.WAREHOUSE_LOCATION, location)); prefix.ifPresent(prefix -> properties.put("prefix", prefix)); - properties.put("view-endpoints-supported", "true"); + properties.put("view-endpoints-supported", Boolean.toString(viewEndpointsEnabled)); properties.put("trino-version", trinoVersion); properties.putAll(securityProperties.get()); + properties.putAll(awsProperties.get()); if (vendedCredentialsEnabled) { properties.put("header.X-Iceberg-Access-Delegation", "vended-credentials"); diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/TrinoRestCatalog.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/TrinoRestCatalog.java index 1ab12b1ee824..275e23596bc6 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/TrinoRestCatalog.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/rest/TrinoRestCatalog.java @@ -119,7 +119,7 @@ public class TrinoRestCatalog private final Cache remoteNamespaceMappingCache; private final Cache remoteTableMappingCache; - private final Cache tableCache = EvictableCacheBuilder.newBuilder() + private final Cache tableCache = EvictableCacheBuilder.newBuilder() .maximumSize(PER_QUERY_CACHE_SIZE) .build(); @@ -489,7 +489,7 @@ public void renameTable(ConnectorSession session, SchemaTableName from, SchemaTa } @Override - public Table loadTable(ConnectorSession session, SchemaTableName schemaTableName) + public BaseTable loadTable(ConnectorSession session, SchemaTableName schemaTableName) { Namespace namespace = toNamespace(schemaTableName.getSchemaName()); try { diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/snowflake/TrinoSnowflakeCatalog.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/snowflake/TrinoSnowflakeCatalog.java index 2f58396e6c69..fbe3ef737121 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/snowflake/TrinoSnowflakeCatalog.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/snowflake/TrinoSnowflakeCatalog.java @@ -263,7 +263,7 @@ public void renameTable(ConnectorSession session, SchemaTableName from, SchemaTa } @Override - public Table loadTable(ConnectorSession session, SchemaTableName schemaTableName) + public BaseTable loadTable(ConnectorSession session, SchemaTableName schemaTableName) { TableMetadata metadata; try { diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/IcebergOptimizeManifestsHandle.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/IcebergOptimizeManifestsHandle.java new file mode 100644 index 000000000000..cc1a44174ec5 --- /dev/null +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/IcebergOptimizeManifestsHandle.java @@ -0,0 +1,17 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.iceberg.procedure; + +public record IcebergOptimizeManifestsHandle() + implements IcebergProcedureHandle {} diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/IcebergProcedureHandle.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/IcebergProcedureHandle.java index f09e6b8258d9..bd2c64e0778c 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/IcebergProcedureHandle.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/IcebergProcedureHandle.java @@ -24,6 +24,7 @@ @JsonSubTypes.Type(value = IcebergRollbackToSnapshotHandle.class, name = "rollback_to_snapshot"), @JsonSubTypes.Type(value = IcebergExpireSnapshotsHandle.class, name = "expire_snapshots"), @JsonSubTypes.Type(value = IcebergOptimizeHandle.class, name = "optimize"), + @JsonSubTypes.Type(value = IcebergOptimizeManifestsHandle.class, name = "optimize_manifests"), @JsonSubTypes.Type(value = IcebergRemoveOrphanFilesHandle.class, name = "remove_orphan_files"), @JsonSubTypes.Type(value = IcebergAddFilesHandle.class, name = "add_files"), @JsonSubTypes.Type(value = IcebergAddFilesFromTableHandle.class, name = "add_files_from_table"), diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/IcebergTableProcedureId.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/IcebergTableProcedureId.java index 6dd2a003a8fc..6230f8b779b6 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/IcebergTableProcedureId.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/IcebergTableProcedureId.java @@ -16,6 +16,7 @@ public enum IcebergTableProcedureId { OPTIMIZE, + OPTIMIZE_MANIFESTS, DROP_EXTENDED_STATS, ROLLBACK_TO_SNAPSHOT, EXPIRE_SNAPSHOTS, diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/OptimizeManifestsTableProcedure.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/OptimizeManifestsTableProcedure.java new file mode 100644 index 000000000000..62604a27fa80 --- /dev/null +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/procedure/OptimizeManifestsTableProcedure.java @@ -0,0 +1,34 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.iceberg.procedure; + +import com.google.common.collect.ImmutableList; +import com.google.inject.Provider; +import io.trino.spi.connector.TableProcedureMetadata; + +import static io.trino.plugin.iceberg.procedure.IcebergTableProcedureId.OPTIMIZE_MANIFESTS; +import static io.trino.spi.connector.TableProcedureExecutionMode.coordinatorOnly; + +public class OptimizeManifestsTableProcedure + implements Provider +{ + @Override + public TableProcedureMetadata get() + { + return new TableProcedureMetadata( + OPTIMIZE_MANIFESTS.name(), + coordinatorOnly(), + ImmutableList.of()); + } +} diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java index 251affd72387..1df245797cee 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java @@ -1694,7 +1694,7 @@ private void testRollbackSnapshot(String rollbackToSnapshotFormat) // extra insert which should be dropped on rollback assertUpdate("INSERT INTO test_rollback (col0, col1) VALUES (999, CAST(999 AS BIGINT))", 1); - assertUpdate(format("ALTER TABLE tpch.test_rollback EXECUTE rollback_to_snapshot(%s)", afterSecondInsertId)); + assertUpdate(format(rollbackToSnapshotFormat, afterSecondInsertId)); assertQuery("SELECT * FROM test_rollback ORDER BY col0", "VALUES (789, CAST(987 AS BIGINT))"); assertUpdate("DROP TABLE test_rollback"); diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergSystemTables.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergSystemTables.java index 8f0d77d8820c..001d49e0b331 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergSystemTables.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergSystemTables.java @@ -38,6 +38,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.Function; import static com.google.common.collect.ImmutableMap.toImmutableMap; @@ -45,11 +46,14 @@ import static io.trino.plugin.iceberg.IcebergFileFormat.PARQUET; import static io.trino.plugin.iceberg.IcebergTestUtils.getFileSystemFactory; import static io.trino.plugin.iceberg.IcebergTestUtils.getHiveMetastore; +import static io.trino.plugin.iceberg.util.EqualityDeleteUtils.writeEqualityDeleteForTable; import static io.trino.spi.type.BigintType.BIGINT; import static io.trino.testing.MaterializedResult.DEFAULT_PRECISION; import static io.trino.testing.MaterializedResult.resultBuilder; import static java.util.Locale.ENGLISH; import static java.util.Objects.requireNonNull; +import static org.apache.iceberg.MetadataColumns.DELETE_FILE_PATH; +import static org.apache.iceberg.MetadataColumns.DELETE_FILE_POS; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; @@ -639,6 +643,129 @@ void testEntriesTable() } } + @Test + void testEntriesAfterPositionDelete() + { + try (TestTable table = new TestTable(getQueryRunner()::execute, "test_entries", "AS SELECT 1 id, DATE '2014-01-01' dt")) { + assertUpdate("DELETE FROM " + table.getName() + " WHERE id = 1", 1); + + Table icebergTable = loadTable(table.getName()); + Snapshot snapshot = icebergTable.currentSnapshot(); + long snapshotId = snapshot.snapshotId(); + long sequenceNumber = snapshot.sequenceNumber(); + + assertThat(computeScalar("SELECT status FROM \"" + table.getName() + "$entries\"" + " WHERE snapshot_id = " + snapshotId)) + .isEqualTo(1); + assertThat(computeScalar("SELECT snapshot_id FROM \"" + table.getName() + "$entries\"" + " WHERE snapshot_id = " + snapshotId)) + .isEqualTo(snapshotId); + assertThat(computeScalar("SELECT sequence_number FROM \"" + table.getName() + "$entries\"" + " WHERE snapshot_id = " + snapshotId)) + .isEqualTo(sequenceNumber); + assertThat(computeScalar("SELECT file_sequence_number FROM \"" + table.getName() + "$entries\"" + " WHERE snapshot_id = " + snapshotId)) + .isEqualTo(2L); + + MaterializedRow deleteFile = (MaterializedRow) computeScalar("SELECT data_file FROM \"" + table.getName() + "$entries\"" + " WHERE snapshot_id = " + snapshotId); + assertThat(deleteFile.getFieldCount()).isEqualTo(16); + assertThat(deleteFile.getField(0)).isEqualTo(1); // content + assertThat((String) deleteFile.getField(1)).endsWith(format.toString().toLowerCase(ENGLISH)); // file_path + assertThat(deleteFile.getField(2)).isEqualTo(format.toString()); // file_format + assertThat(deleteFile.getField(3)).isEqualTo(0); // spec_id + assertThat(deleteFile.getField(4)).isEqualTo(1L); // record_count + assertThat((long) deleteFile.getField(5)).isPositive(); // file_size_in_bytes + + //noinspection unchecked + Map columnSizes = (Map) deleteFile.getField(6); + switch (format) { + case ORC -> assertThat(columnSizes).isNull(); + case PARQUET -> assertThat(columnSizes) + .hasSize(2) + .satisfies(_ -> assertThat(columnSizes.get(DELETE_FILE_POS.fieldId())).isPositive()) + .satisfies(_ -> assertThat(columnSizes.get(DELETE_FILE_PATH.fieldId())).isPositive()); + default -> throw new IllegalArgumentException("Unsupported format: " + format); + } + + assertThat(deleteFile.getField(7)).isEqualTo(Map.of(DELETE_FILE_POS.fieldId(), 1L, DELETE_FILE_PATH.fieldId(), 1L)); // value_counts + assertThat(deleteFile.getField(8)).isEqualTo(Map.of(DELETE_FILE_POS.fieldId(), 0L, DELETE_FILE_PATH.fieldId(), 0L)); // null_value_counts + assertThat(deleteFile.getField(9)).isEqualTo(value(Map.of(), null)); // nan_value_counts + + // lower_bounds + //noinspection unchecked + Map lowerBounds = (Map) deleteFile.getField(10); + assertThat(lowerBounds) + .hasSize(2) + .satisfies(_ -> assertThat(lowerBounds.get(DELETE_FILE_POS.fieldId())).isEqualTo("0")) + .satisfies(_ -> assertThat(lowerBounds.get(DELETE_FILE_PATH.fieldId())).contains(table.getName())); + + // upper_bounds + //noinspection unchecked + Map upperBounds = (Map) deleteFile.getField(11); + assertThat(lowerBounds) + .hasSize(2) + .satisfies(_ -> assertThat(upperBounds.get(DELETE_FILE_POS.fieldId())).isEqualTo("0")) + .satisfies(_ -> assertThat(upperBounds.get(DELETE_FILE_PATH.fieldId())).contains(table.getName())); + + assertThat(deleteFile.getField(12)).isNull(); // key_metadata + assertThat(deleteFile.getField(13)).isEqualTo(List.of(value(4L, 3L))); // split_offsets + assertThat(deleteFile.getField(14)).isNull(); // equality_ids + assertThat(deleteFile.getField(15)).isNull(); // sort_order_id + + assertThat(computeScalar("SELECT readable_metrics FROM \"" + table.getName() + "$entries\"" + " WHERE snapshot_id = " + snapshotId)) + .isEqualTo(""" + {\ + "dt":{"column_size":null,"value_count":null,"null_value_count":null,"nan_value_count":null,"lower_bound":null,"upper_bound":null},\ + "id":{"column_size":null,"value_count":null,"null_value_count":null,"nan_value_count":null,"lower_bound":null,"upper_bound":null}\ + }"""); + } + } + + @Test + void testEntriesAfterEqualityDelete() + throws Exception + { + try (TestTable table = new TestTable(getQueryRunner()::execute, "test_entries", "AS SELECT 1 id, DATE '2014-01-01' dt")) { + Table icebergTable = loadTable(table.getName()); + assertThat(icebergTable.currentSnapshot().summary()).containsEntry("total-equality-deletes", "0"); + writeEqualityDeleteForTable(icebergTable, fileSystemFactory, Optional.empty(), Optional.empty(), ImmutableMap.of("id", 1), Optional.empty()); + assertThat(icebergTable.currentSnapshot().summary()).containsEntry("total-equality-deletes", "1"); + + Snapshot snapshot = icebergTable.currentSnapshot(); + long snapshotId = snapshot.snapshotId(); + long sequenceNumber = snapshot.sequenceNumber(); + + assertThat(computeScalar("SELECT status FROM \"" + table.getName() + "$entries\"" + " WHERE snapshot_id = " + snapshotId)) + .isEqualTo(1); + assertThat(computeScalar("SELECT snapshot_id FROM \"" + table.getName() + "$entries\"" + " WHERE snapshot_id = " + snapshotId)) + .isEqualTo(snapshotId); + assertThat(computeScalar("SELECT sequence_number FROM \"" + table.getName() + "$entries\"" + " WHERE snapshot_id = " + snapshotId)) + .isEqualTo(sequenceNumber); + assertThat(computeScalar("SELECT file_sequence_number FROM \"" + table.getName() + "$entries\"" + " WHERE snapshot_id = " + snapshotId)) + .isEqualTo(2L); + + MaterializedRow dataFile = (MaterializedRow) computeScalar("SELECT data_file FROM \"" + table.getName() + "$entries\"" + " WHERE snapshot_id = " + snapshotId); + assertThat(dataFile.getFieldCount()).isEqualTo(16); + assertThat(dataFile.getField(0)).isEqualTo(2); // content + assertThat(dataFile.getField(3)).isEqualTo(0); // spec_id + assertThat(dataFile.getField(4)).isEqualTo(1L); // record_count + assertThat((long) dataFile.getField(5)).isPositive(); // file_size_in_bytes + assertThat(dataFile.getField(6)).isEqualTo(Map.of(1, 45L)); // column_sizes + assertThat(dataFile.getField(7)).isEqualTo(Map.of(1, 1L)); // value_counts + assertThat(dataFile.getField(8)).isEqualTo(Map.of(1, 0L)); // null_value_counts + assertThat(dataFile.getField(9)).isEqualTo(Map.of()); // nan_value_counts + assertThat(dataFile.getField(10)).isEqualTo(Map.of(1, "1")); // lower_bounds + assertThat(dataFile.getField(11)).isEqualTo(Map.of(1, "1")); // upper_bounds + assertThat(dataFile.getField(12)).isNull(); // key_metadata + assertThat(dataFile.getField(13)).isEqualTo(List.of(4L)); // split_offsets + assertThat(dataFile.getField(14)).isEqualTo(List.of(1)); // equality_ids + assertThat(dataFile.getField(15)).isEqualTo(0); // sort_order_id + + assertThat(computeScalar("SELECT readable_metrics FROM \"" + table.getName() + "$entries\"" + " WHERE snapshot_id = " + snapshotId)) + .isEqualTo(""" + {\ + "dt":{"column_size":null,"value_count":null,"null_value_count":null,"nan_value_count":null,"lower_bound":null,"upper_bound":null},\ + "id":{"column_size":45,"value_count":1,"null_value_count":0,"nan_value_count":null,"lower_bound":1,"upper_bound":1}\ + }"""); + } + } + @Test public void testPartitionColumns() { diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/IcebergTestUtils.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/IcebergTestUtils.java index 3078e25694f1..f5e10c1d48a9 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/IcebergTestUtils.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/IcebergTestUtils.java @@ -196,7 +196,7 @@ public static BaseTable loadTable(String tableName, false, new IcebergConfig().isHideMaterializedViewStorageTable(), directExecutor()); - return (BaseTable) loadIcebergTable(catalog, tableOperationsProvider, SESSION, new SchemaTableName(schemaName, tableName)); + return loadIcebergTable(catalog, tableOperationsProvider, SESSION, new SchemaTableName(schemaName, tableName)); } public static Map getMetadataFileAndUpdatedMillis(TrinoFileSystem trinoFileSystem, String tableLocation) diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergConfig.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergConfig.java index 8edfa5ee4c39..949fd1a2b7d0 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergConfig.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergConfig.java @@ -72,7 +72,7 @@ public void testDefaults() .setSortedWritingEnabled(true) .setQueryPartitionFilterRequired(false) .setQueryPartitionFilterRequiredSchemas(ImmutableSet.of()) - .setSplitManagerThreads(Runtime.getRuntime().availableProcessors() * 2) + .setSplitManagerThreads(Integer.toString(Runtime.getRuntime().availableProcessors() * 2)) .setAllowedExtraProperties(ImmutableList.of()) .setIncrementalRefreshEnabled(true) .setMetadataCacheEnabled(true) @@ -150,7 +150,7 @@ public void testExplicitPropertyMappings() .setSortedWritingEnabled(false) .setQueryPartitionFilterRequired(true) .setQueryPartitionFilterRequiredSchemas(ImmutableSet.of("bronze", "silver")) - .setSplitManagerThreads(42) + .setSplitManagerThreads("42") .setAllowedExtraProperties(ImmutableList.of("propX", "propY")) .setIncrementalRefreshEnabled(false) .setMetadataCacheEnabled(false) diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergLocalConcurrentWrites.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergLocalConcurrentWrites.java index ed3cc5313aa7..ab9b5a6f5d5c 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergLocalConcurrentWrites.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergLocalConcurrentWrites.java @@ -15,22 +15,28 @@ import com.google.common.collect.ImmutableList; import io.airlift.concurrent.MoreFutures; +import io.trino.plugin.blackhole.BlackHolePlugin; import io.trino.testing.AbstractTestQueryFramework; +import io.trino.testing.DistributedQueryRunner; import io.trino.testing.MaterializedResult; import io.trino.testing.QueryRunner; import io.trino.testing.sql.TestTable; import org.junit.jupiter.api.RepeatedTest; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; +import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.concurrent.Callable; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; +import java.util.stream.Collectors; import java.util.stream.IntStream; import static com.google.common.base.Preconditions.checkState; +import static com.google.common.base.Verify.verify; import static com.google.common.collect.ImmutableList.toImmutableList; import static io.airlift.concurrent.MoreFutures.tryGetFutureValue; import static io.trino.testing.QueryAssertions.getTrinoExceptionCause; @@ -49,7 +55,10 @@ final class TestIcebergLocalConcurrentWrites protected QueryRunner createQueryRunner() throws Exception { - return IcebergQueryRunner.builder().build(); + DistributedQueryRunner queryRunner = IcebergQueryRunner.builder().build(); + queryRunner.installPlugin(new BlackHolePlugin()); + queryRunner.createCatalog("blackhole", "blackhole"); + return queryRunner; } // Repeat test with invocationCount for better test coverage, since the tested aspect is inherently non-deterministic. @@ -1044,6 +1053,97 @@ void testConcurrentUpdateWithEnforcedAndUnenforcedPartitions() } } + @Test + public void testOptimizeDuringWriteOperations() + throws Exception + { + runOptimizeDuringWriteOperations(true); + runOptimizeDuringWriteOperations(false); + } + + private void runOptimizeDuringWriteOperations(boolean useSmallFiles) + throws Exception + { + int threads = 5; + int deletionThreads = threads - 1; + int rows = 12; + int rowsPerThread = rows / deletionThreads; + + CyclicBarrier barrier = new CyclicBarrier(threads); + ExecutorService executor = newFixedThreadPool(threads); + + // Slow down the delete operations so optimize is more likely to complete + String blackholeTable = "blackhole_table_" + randomNameSuffix(); + assertUpdate("CREATE TABLE blackhole.default.%s (a INT, b INT) WITH (split_count = 1, pages_per_split = 1, rows_per_page = 1, page_processing_delay = '3s')".formatted(blackholeTable)); + + try (TestTable table = newTrinoTable( + "test_optimize_during_write_operations", + "(int_col INT)")) { + String tableName = table.getName(); + + // Testing both situations where a file is fully removed by the delete operation and when a row level delete is required. + if (useSmallFiles) { + for (int i = 0; i < rows; i++) { + assertUpdate(format("INSERT INTO %s VALUES %s", tableName, i), 1); + } + } + else { + String values = IntStream.range(0, rows).mapToObj(String::valueOf).collect(Collectors.joining(", ")); + assertUpdate(format("INSERT INTO %s VALUES %s", tableName, values), rows); + } + + List>> deletionFutures = IntStream.range(0, deletionThreads) + .mapToObj(threadNumber -> executor.submit(() -> { + barrier.await(10, SECONDS); + List successfulDeletes = new ArrayList<>(); + for (int i = 0; i < rowsPerThread; i++) { + try { + int rowNumber = threadNumber * rowsPerThread + i; + getQueryRunner().execute(format("DELETE FROM %s WHERE int_col = %s OR ((SELECT count(*) FROM blackhole.default.%s) > 42)", tableName, rowNumber, blackholeTable)); + successfulDeletes.add(true); + } + catch (RuntimeException e) { + successfulDeletes.add(false); + } + } + return successfulDeletes; + })) + .collect(toImmutableList()); + + Future optimizeFuture = executor.submit(() -> { + try { + barrier.await(10, SECONDS); + // Allow for some deletes to start before running optimize + Thread.sleep(50); + assertUpdate("ALTER TABLE %s EXECUTE optimize".formatted(tableName)); + } + catch (Exception e) { + throw new RuntimeException(e); + } + }); + + List expectedValues = new ArrayList<>(); + for (int threadNumber = 0; threadNumber < deletionThreads; threadNumber++) { + List deleteOutcomes = deletionFutures.get(threadNumber).get(); + verify(deleteOutcomes.size() == rowsPerThread); + for (int rowNumber = 0; rowNumber < rowsPerThread; rowNumber++) { + boolean successfulDelete = deleteOutcomes.get(rowNumber); + if (!successfulDelete) { + expectedValues.add(String.valueOf(threadNumber * rowsPerThread + rowNumber)); + } + } + } + + optimizeFuture.get(); + assertThat(expectedValues.size()).isGreaterThan(0).isLessThan(rows); + assertQuery("SELECT * FROM " + tableName, "VALUES " + String.join(", ", expectedValues)); + } + finally { + executor.shutdownNow(); + executor.awaitTermination(10, SECONDS); + } + } + private long getCurrentSnapshotId(String tableName) { return (long) computeScalar("SELECT snapshot_id FROM \"" + tableName + "$snapshots\" ORDER BY committed_at DESC FETCH FIRST 1 ROW WITH TIES"); diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergV2.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergV2.java index aaa156799aa2..98c3005e4a5e 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergV2.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergV2.java @@ -27,8 +27,6 @@ import io.trino.metastore.HiveType; import io.trino.metastore.PrincipalPrivileges; import io.trino.metastore.Storage; -import io.trino.plugin.base.util.Closables; -import io.trino.plugin.blackhole.BlackHolePlugin; import io.trino.plugin.hive.HiveStorageFormat; import io.trino.plugin.hive.TestingHivePlugin; import io.trino.plugin.iceberg.fileio.ForwardingFileIo; @@ -77,22 +75,17 @@ import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.OptionalLong; import java.util.Set; import java.util.UUID; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; -import java.util.stream.IntStream; -import static com.google.common.base.Verify.verify; +import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.collect.ImmutableList.toImmutableList; import static com.google.common.collect.ImmutableSet.toImmutableSet; import static com.google.common.collect.Iterables.getOnlyElement; @@ -111,8 +104,8 @@ import static java.nio.ByteOrder.LITTLE_ENDIAN; import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Map.entry; -import static java.util.concurrent.Executors.newFixedThreadPool; -import static java.util.concurrent.TimeUnit.SECONDS; +import static org.apache.iceberg.FileContent.EQUALITY_DELETES; +import static org.apache.iceberg.FileContent.POSITION_DELETES; import static org.apache.iceberg.FileFormat.ORC; import static org.apache.iceberg.FileFormat.PARQUET; import static org.apache.iceberg.TableProperties.DEFAULT_NAME_MAPPING; @@ -145,15 +138,6 @@ protected QueryRunner createQueryRunner() .put("hive.security", "allow-all") .buildOrThrow()); - try { - queryRunner.installPlugin(new BlackHolePlugin()); - queryRunner.createCatalog("blackhole", "blackhole"); - } - catch (RuntimeException e) { - Closables.closeAllSuppress(e, queryRunner); - throw e; - } - return queryRunner; } @@ -279,7 +263,7 @@ public void testV2TableWithEqualityDelete() assertUpdate("INSERT INTO " + tableName + " SELECT * FROM tpch.tiny.nation", 25); writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[] {2L})), ImmutableMap.of("regionkey", 2L)); // the equality delete file is applied to 2 data files - assertQuery("SELECT count(*) FROM \"" + tableName + "$files\" WHERE content = " + FileContent.EQUALITY_DELETES.id(), "VALUES 2"); + assertQuery("SELECT count(*) FROM \"" + tableName + "$files\" WHERE content = " + EQUALITY_DELETES.id(), "VALUES 2"); } @Test @@ -672,93 +656,27 @@ public void testOptimizingPartitionsOfV2TableWithGlobalEqualityDeleteFile() } @Test - public void testOptimizeDuringWriteOperations() - throws Exception - { - runOptimizeDuringWriteOperations(true); - runOptimizeDuringWriteOperations(false); - } - - private void runOptimizeDuringWriteOperations(boolean useSmallFiles) + public void testOptimizingWholeTableRemovesDeleteFiles() throws Exception { - int threads = 5; - int deletionThreads = threads - 1; - int rows = 12; - int rowsPerThread = rows / deletionThreads; + try (TestTable testTable = newTrinoTable("test_optimize_removes_obsolete_delete_files_", "AS SELECT * FROM tpch.tiny.nation")) { + assertUpdate("DELETE FROM " + testTable.getName() + " WHERE regionkey % 2 = 0", 15); + Table icebergTable = loadTable(testTable.getName()); + writeEqualityDeleteToNationTable(icebergTable, Optional.of(icebergTable.spec()), Optional.of(new PartitionData(new Long[] {1L}))); - CyclicBarrier barrier = new CyclicBarrier(threads); - ExecutorService executor = newFixedThreadPool(threads); + assertThat(query("SELECT * FROM " + testTable.getName())) + .matches("SELECT * FROM nation WHERE regionkey != 1 AND regionkey % 2 = 1"); - // Slow down the delete operations so optimize is more likely to complete - String blackholeTable = "blackhole_table_" + randomNameSuffix(); - assertUpdate("CREATE TABLE blackhole.default.%s (a INT, b INT) WITH (split_count = 1, pages_per_split = 1, rows_per_page = 1, page_processing_delay = '3s')".formatted(blackholeTable)); + assertQuery("SELECT count(*) FROM \"" + testTable.getName() + "$files\" WHERE content = " + POSITION_DELETES.id(), "VALUES 1"); + assertQuery("SELECT count(*) FROM \"" + testTable.getName() + "$files\" WHERE content = " + EQUALITY_DELETES.id(), "VALUES 1"); - try (TestTable table = newTrinoTable( - "test_optimize_during_write_operations", - "(int_col INT)")) { - String tableName = table.getName(); - - // Testing both situations where a file is fully removed by the delete operation and when a row level delete is required. - if (useSmallFiles) { - for (int i = 0; i < rows; i++) { - assertUpdate(format("INSERT INTO %s VALUES %s", tableName, i), 1); - } - } - else { - String values = IntStream.range(0, rows).mapToObj(String::valueOf).collect(Collectors.joining(", ")); - assertUpdate(format("INSERT INTO %s VALUES %s", tableName, values), rows); - } + assertQuerySucceeds("ALTER TABLE " + testTable.getName() + " EXECUTE OPTIMIZE"); - List>> deletionFutures = IntStream.range(0, deletionThreads) - .mapToObj(threadNumber -> executor.submit(() -> { - barrier.await(10, SECONDS); - List successfulDeletes = new ArrayList<>(); - for (int i = 0; i < rowsPerThread; i++) { - try { - int rowNumber = threadNumber * rowsPerThread + i; - getQueryRunner().execute(format("DELETE FROM %s WHERE int_col = %s OR ((SELECT count(*) FROM blackhole.default.%s) > 42)", tableName, rowNumber, blackholeTable)); - successfulDeletes.add(true); - } - catch (RuntimeException e) { - successfulDeletes.add(false); - } - } - return successfulDeletes; - })) - .collect(toImmutableList()); - - Future optimizeFuture = executor.submit(() -> { - try { - barrier.await(10, SECONDS); - // Allow for some deletes to start before running optimize - Thread.sleep(50); - assertUpdate("ALTER TABLE %s EXECUTE optimize".formatted(tableName)); - } - catch (Exception e) { - throw new RuntimeException(e); - } - }); - - List expectedValues = new ArrayList<>(); - for (int threadNumber = 0; threadNumber < deletionThreads; threadNumber++) { - List deleteOutcomes = deletionFutures.get(threadNumber).get(); - verify(deleteOutcomes.size() == rowsPerThread); - for (int rowNumber = 0; rowNumber < rowsPerThread; rowNumber++) { - boolean successfulDelete = deleteOutcomes.get(rowNumber); - if (!successfulDelete) { - expectedValues.add(String.valueOf(threadNumber * rowsPerThread + rowNumber)); - } - } - } + assertQuery("SELECT count(*) FROM \"" + testTable.getName() + "$files\" WHERE content = " + POSITION_DELETES.id(), "VALUES 0"); + assertQuery("SELECT count(*) FROM \"" + testTable.getName() + "$files\" WHERE content = " + EQUALITY_DELETES.id(), "VALUES 0"); - optimizeFuture.get(); - assertThat(expectedValues.size()).isGreaterThan(0).isLessThan(rows); - assertQuery("SELECT * FROM " + tableName, "VALUES " + String.join(", ", expectedValues)); - } - finally { - executor.shutdownNow(); - executor.awaitTermination(10, SECONDS); + assertThat(query("SELECT * FROM " + testTable.getName())) + .matches("SELECT * FROM nation WHERE regionkey != 1 AND regionkey % 2 = 1"); } } @@ -1298,7 +1216,7 @@ public void testNestedFieldPartitioning() assertThat(loadTable(tableName).newScan().planFiles()).hasSize(4); assertUpdate("ALTER TABLE " + tableName + " SET PROPERTIES partitioning = ARRAY['\"state.name\"', '\"district.name\"']"); - Table icebergTable = updateTableToV2(tableName); + Table icebergTable = loadTable(tableName); assertThat(icebergTable.spec().fields().stream().map(PartitionField::name).toList()) .containsExactlyInAnyOrder("state.name", "district.name"); @@ -1356,7 +1274,7 @@ public void testHighlyNestedFieldPartitioning() assertThat(loadTable(tableName).newScan().planFiles()).hasSize(2); assertUpdate("ALTER TABLE " + tableName + " SET PROPERTIES partitioning = ARRAY['\"country.state.district.name\"', '\"country.state.name\"']"); - Table icebergTable = updateTableToV2(tableName); + Table icebergTable = loadTable(tableName); assertThat(icebergTable.spec().fields().stream().map(PartitionField::name).toList()) .containsExactlyInAnyOrder("country.state.district.name", "country.state.name"); @@ -1647,6 +1565,7 @@ private Table updateTableToV2(String tableName) BaseTable table = loadTable(tableName); TableOperations operations = table.operations(); TableMetadata currentMetadata = operations.current(); + checkArgument(currentMetadata.formatVersion() != 2, "Format version is already 2: '%s'", tableName); operations.commit(currentMetadata, currentMetadata.upgradeToFormatVersion(2)); return table; diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergPolarisCatalogConnectorSmokeTest.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergPolarisCatalogConnectorSmokeTest.java index 0ccb0d3d4b4e..6d700474b7b7 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergPolarisCatalogConnectorSmokeTest.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergPolarisCatalogConnectorSmokeTest.java @@ -106,7 +106,7 @@ protected String getMetadataLocation(String tableName) { TrinoCatalogFactory catalogFactory = ((IcebergConnector) getQueryRunner().getCoordinator().getConnector("iceberg")).getInjector().getInstance(TrinoCatalogFactory.class); TrinoCatalog trinoCatalog = catalogFactory.create(getSession().getIdentity().toConnectorIdentity()); - BaseTable table = (BaseTable) trinoCatalog.loadTable(getSession().toConnectorSession(), new SchemaTableName(getSession().getSchema().orElseThrow(), tableName)); + BaseTable table = trinoCatalog.loadTable(getSession().toConnectorSession(), new SchemaTableName(getSession().getSchema().orElseThrow(), tableName)); return table.operations().current().metadataFileLocation(); } @@ -115,7 +115,7 @@ protected String getTableLocation(String tableName) { TrinoCatalogFactory catalogFactory = ((IcebergConnector) getQueryRunner().getCoordinator().getConnector("iceberg")).getInjector().getInstance(TrinoCatalogFactory.class); TrinoCatalog trinoCatalog = catalogFactory.create(getSession().getIdentity().toConnectorIdentity()); - BaseTable table = (BaseTable) trinoCatalog.loadTable(getSession().toConnectorSession(), new SchemaTableName(getSession().getSchema().orElseThrow(), tableName)); + BaseTable table = trinoCatalog.loadTable(getSession().toConnectorSession(), new SchemaTableName(getSession().getSchema().orElseThrow(), tableName)); return table.operations().current().location(); } diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergPolarisCatalogCaseInsensitiveMapping.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergRestCatalogCaseInsensitiveMapping.java similarity index 80% rename from plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergPolarisCatalogCaseInsensitiveMapping.java rename to plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergRestCatalogCaseInsensitiveMapping.java index b84c0401a1b8..cbe93d6030f3 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergPolarisCatalogCaseInsensitiveMapping.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergRestCatalogCaseInsensitiveMapping.java @@ -13,17 +13,15 @@ */ package io.trino.plugin.iceberg.catalog.rest; -import com.google.common.collect.ImmutableMap; +import io.airlift.http.server.testing.TestingHttpServer; import io.trino.plugin.iceberg.IcebergQueryRunner; import io.trino.testing.AbstractTestQueryFramework; import io.trino.testing.QueryRunner; -import org.apache.iceberg.CatalogProperties; import org.apache.iceberg.Schema; import org.apache.iceberg.catalog.Namespace; -import org.apache.iceberg.catalog.SessionCatalog.SessionContext; import org.apache.iceberg.catalog.TableIdentifier; -import org.apache.iceberg.rest.HTTPClient; -import org.apache.iceberg.rest.RESTSessionCatalog; +import org.apache.iceberg.jdbc.JdbcCatalog; +import org.apache.iceberg.rest.DelegatingRestSessionCatalog; import org.apache.iceberg.types.Types; import org.apache.iceberg.view.ViewBuilder; import org.assertj.core.util.Files; @@ -41,59 +39,48 @@ import java.util.Optional; import java.util.Set; +import static com.google.common.io.MoreFiles.deleteRecursively; +import static com.google.common.io.RecursiveDeleteOption.ALLOW_INSECURE; import static io.trino.plugin.iceberg.IcebergSchemaProperties.LOCATION_PROPERTY; -import static io.trino.plugin.iceberg.catalog.rest.TestingPolarisCatalog.WAREHOUSE; +import static io.trino.plugin.iceberg.catalog.rest.RestCatalogTestUtils.backendCatalog; import static io.trino.testing.TestingNames.randomNameSuffix; import static java.nio.file.Files.createDirectories; import static java.util.Locale.ENGLISH; -import static org.apache.iceberg.CatalogProperties.WAREHOUSE_LOCATION; -import static org.apache.iceberg.rest.auth.OAuth2Properties.CREDENTIAL; -import static org.apache.iceberg.rest.auth.OAuth2Properties.SCOPE; import static org.apache.iceberg.types.Types.NestedField.required; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; @TestInstance(PER_CLASS) -final class TestIcebergPolarisCatalogCaseInsensitiveMapping +final class TestIcebergRestCatalogCaseInsensitiveMapping extends AbstractTestQueryFramework { - private static final SessionContext SESSION_CONTEXT = new SessionContext("dummy", null, null, ImmutableMap.of(), null); private static final String SCHEMA = "LeVeL1_" + randomNameSuffix(); private static final String LOWERCASE_SCHEMA = SCHEMA.toLowerCase(ENGLISH); private static final Namespace NAMESPACE = Namespace.of(SCHEMA); - private RESTSessionCatalog icebergCatalog; + private JdbcCatalog backend; @Override protected QueryRunner createQueryRunner() throws Exception { File warehouseLocation = Files.newTemporaryFolder(); - TestingPolarisCatalog polarisCatalog = closeAfterClass(new TestingPolarisCatalog(warehouseLocation.getPath())); + closeAfterClass(() -> deleteRecursively(warehouseLocation.toPath(), ALLOW_INSECURE)); - Map properties = ImmutableMap.builder() - .put(CatalogProperties.URI, polarisCatalog.restUri() + "/api/catalog") - .put(WAREHOUSE_LOCATION, WAREHOUSE) - .put(CREDENTIAL, polarisCatalog.oauth2Credentials()) - .put(SCOPE, "PRINCIPAL_ROLE:ALL") - .put("view-endpoints-supported", "true") - .buildOrThrow(); + backend = closeAfterClass((JdbcCatalog) backendCatalog(warehouseLocation)); - RESTSessionCatalog icebergCatalogInstance = new RESTSessionCatalog( - config -> HTTPClient.builder(config).uri(config.get(CatalogProperties.URI)).build(), null); - icebergCatalogInstance.initialize("test_catalog", properties); + DelegatingRestSessionCatalog delegatingCatalog = DelegatingRestSessionCatalog.builder() + .delegate(backend) + .build(); - icebergCatalog = icebergCatalogInstance; - closeAfterClass(icebergCatalog); + TestingHttpServer testServer = delegatingCatalog.testServer(); + testServer.start(); + closeAfterClass(testServer::stop); return IcebergQueryRunner.builder(LOWERCASE_SCHEMA) .setBaseDataDir(Optional.of(warehouseLocation.toPath())) .addIcebergProperty("iceberg.catalog.type", "rest") - .addIcebergProperty("iceberg.rest-catalog.uri", polarisCatalog.restUri() + "/api/catalog") - .addIcebergProperty("iceberg.rest-catalog.warehouse", WAREHOUSE) - .addIcebergProperty("iceberg.rest-catalog.security", "OAUTH2") - .addIcebergProperty("iceberg.rest-catalog.oauth2.credential", polarisCatalog.oauth2Credentials()) - .addIcebergProperty("iceberg.rest-catalog.oauth2.scope", "PRINCIPAL_ROLE:ALL") + .addIcebergProperty("iceberg.rest-catalog.uri", testServer.getBaseUrl().toString()) .addIcebergProperty("iceberg.rest-catalog.case-insensitive-name-matching", "true") .addIcebergProperty("iceberg.register-table-procedure.enabled", "true") .build(); @@ -102,7 +89,7 @@ protected QueryRunner createQueryRunner() @BeforeAll void setup() { - icebergCatalog.createNamespace(SESSION_CONTEXT, NAMESPACE); + backend.createNamespace(NAMESPACE); assertThat(computeActual("SHOW SCHEMAS").getOnlyColumnAsSet()) .containsExactlyInAnyOrder( "information_schema", @@ -125,7 +112,7 @@ void setup() @Test void testCaseInsensitiveMatchingForTable() { - Map namespaceMetadata = icebergCatalog.loadNamespaceMetadata(SESSION_CONTEXT, NAMESPACE); + Map namespaceMetadata = backend.loadNamespaceMetadata(NAMESPACE); String namespaceLocation = namespaceMetadata.get(LOCATION_PROPERTY); createDir(namespaceLocation); @@ -143,8 +130,8 @@ void testCaseInsensitiveMatchingForTable() createDir(table2Location); createDir(table2Location + "/data"); createDir(table2Location + "/metadata"); - icebergCatalog - .buildTable(SESSION_CONTEXT, TableIdentifier.of(NAMESPACE, tableName2), new Schema(required(1, "x", Types.LongType.get()))) + backend + .buildTable(TableIdentifier.of(NAMESPACE, tableName2), new Schema(required(1, "x", Types.LongType.get()))) .withLocation(table2Location) .createTransaction() .commitTransaction(); @@ -152,7 +139,7 @@ void testCaseInsensitiveMatchingForTable() assertQuery("SELECT * FROM " + tableName2, "VALUES (78)"); // Test register/unregister table. Re-register for further testing. - assertThat(icebergCatalog.dropTable(SESSION_CONTEXT, TableIdentifier.of(NAMESPACE, lowercaseTableName1))).isTrue(); + assertThat(backend.dropTable(TableIdentifier.of(NAMESPACE, lowercaseTableName1), false)).isTrue(); assertQueryFails("SELECT * FROM " + tableName1, ".*'iceberg.%s.%s' does not exist".formatted(LOWERCASE_SCHEMA, lowercaseTableName1)); assertUpdate("CALL system.register_table (CURRENT_SCHEMA, '" + tableName1 + "', '" + table1Location + "')"); assertQuery("SELECT * FROM " + tableName1, "VALUES (42, -38.5)"); @@ -196,7 +183,7 @@ void testCaseInsensitiveMatchingForTable() @Test void testCaseInsensitiveMatchingForView() { - Map namespaceMetadata = icebergCatalog.loadNamespaceMetadata(SESSION_CONTEXT, NAMESPACE); + Map namespaceMetadata = backend.loadNamespaceMetadata(NAMESPACE); String namespaceLocation = namespaceMetadata.get(LOCATION_PROPERTY); createDir(namespaceLocation); @@ -213,7 +200,7 @@ void testCaseInsensitiveMatchingForView() createDir(view2Location); createDir(view2Location + "/data"); createDir(view2Location + "/metadata"); - ViewBuilder viewBuilder = icebergCatalog.buildView(SESSION_CONTEXT, TableIdentifier.of(NAMESPACE, viewName2)); + ViewBuilder viewBuilder = backend.buildView(TableIdentifier.of(NAMESPACE, viewName2)); viewBuilder .withQuery("trino", "SELECT BIGINT '34' y") .withSchema(new Schema(required(1, "y", Types.LongType.get()))) diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergRestCatalogConfig.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergRestCatalogConfig.java index 010e2878f194..a06cd2940a21 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergRestCatalogConfig.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergRestCatalogConfig.java @@ -37,6 +37,8 @@ public void testDefaults() .setSessionType(IcebergRestCatalogConfig.SessionType.NONE) .setSecurity(IcebergRestCatalogConfig.Security.NONE) .setVendedCredentialsEnabled(false) + .setViewEndpointsEnabled(true) + .setSigV4Enabled(false) .setCaseInsensitiveNameMatching(false) .setCaseInsensitiveNameMatchingCacheTtl(new Duration(1, MINUTES))); } @@ -52,6 +54,8 @@ public void testExplicitPropertyMappings() .put("iceberg.rest-catalog.security", "OAUTH2") .put("iceberg.rest-catalog.session", "USER") .put("iceberg.rest-catalog.vended-credentials-enabled", "true") + .put("iceberg.rest-catalog.view-endpoints-enabled", "false") + .put("iceberg.rest-catalog.sigv4-enabled", "true") .put("iceberg.rest-catalog.case-insensitive-name-matching", "true") .put("iceberg.rest-catalog.case-insensitive-name-matching.cache-ttl", "3m") .buildOrThrow(); @@ -64,6 +68,8 @@ public void testExplicitPropertyMappings() .setSessionType(IcebergRestCatalogConfig.SessionType.USER) .setSecurity(IcebergRestCatalogConfig.Security.OAUTH2) .setVendedCredentialsEnabled(true) + .setViewEndpointsEnabled(false) + .setSigV4Enabled(true) .setCaseInsensitiveNameMatching(true) .setCaseInsensitiveNameMatchingCacheTtl(new Duration(3, MINUTES)); diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergRestCatalogSigV4Config.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergRestCatalogSigV4Config.java new file mode 100644 index 000000000000..2ea5254c5147 --- /dev/null +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergRestCatalogSigV4Config.java @@ -0,0 +1,46 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.iceberg.catalog.rest; + +import com.google.common.collect.ImmutableMap; +import org.junit.jupiter.api.Test; + +import java.util.Map; + +import static io.airlift.configuration.testing.ConfigAssertions.assertFullMapping; +import static io.airlift.configuration.testing.ConfigAssertions.assertRecordedDefaults; +import static io.airlift.configuration.testing.ConfigAssertions.recordDefaults; + +final class TestIcebergRestCatalogSigV4Config +{ + @Test + void testDefaults() + { + assertRecordedDefaults(recordDefaults(IcebergRestCatalogSigV4Config.class) + .setSigningName("execute-api")); + } + + @Test + void testExplicitPropertyMappings() + { + Map properties = ImmutableMap.builder() + .put("iceberg.rest-catalog.signing-name", "glue") + .buildOrThrow(); + + IcebergRestCatalogSigV4Config expected = new IcebergRestCatalogSigV4Config() + .setSigningName("glue"); + + assertFullMapping(properties, expected); + } +} diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergS3TablesConnectorSmokeTest.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergS3TablesConnectorSmokeTest.java new file mode 100644 index 000000000000..071be8897d83 --- /dev/null +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergS3TablesConnectorSmokeTest.java @@ -0,0 +1,537 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.iceberg.catalog.rest; + +import io.trino.filesystem.Location; +import io.trino.plugin.iceberg.BaseIcebergConnectorSmokeTest; +import io.trino.plugin.iceberg.IcebergConfig; +import io.trino.plugin.iceberg.IcebergQueryRunner; +import io.trino.testing.QueryRunner; +import io.trino.testing.TestingConnectorBehavior; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; + +import static io.trino.testing.SystemEnvironmentUtils.requireEnv; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; + +@TestInstance(PER_CLASS) +final class TestIcebergS3TablesConnectorSmokeTest + extends BaseIcebergConnectorSmokeTest +{ + public static final String S3_TABLES_BUCKET = requireEnv("S3_TABLES_BUCKET"); + public static final String AWS_ACCESS_KEY_ID = requireEnv("AWS_ACCESS_KEY_ID"); + public static final String AWS_SECRET_ACCESS_KEY = requireEnv("AWS_SECRET_ACCESS_KEY"); + public static final String AWS_REGION = requireEnv("AWS_REGION"); + + public TestIcebergS3TablesConnectorSmokeTest() + { + super(new IcebergConfig().getFileFormat().toIceberg()); + } + + @Override + protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) + { + return switch (connectorBehavior) { + case SUPPORTS_CREATE_MATERIALIZED_VIEW, + SUPPORTS_RENAME_MATERIALIZED_VIEW, + SUPPORTS_RENAME_SCHEMA -> false; + default -> super.hasBehavior(connectorBehavior); + }; + } + + @Override + protected QueryRunner createQueryRunner() + throws Exception + { + return IcebergQueryRunner.builder("tpch") + .addIcebergProperty("iceberg.security", "read_only") + .addIcebergProperty("iceberg.file-format", format.name()) + .addIcebergProperty("iceberg.catalog.type", "rest") + .addIcebergProperty("iceberg.rest-catalog.uri", "https://glue.%s.amazonaws.com/iceberg".formatted(AWS_REGION)) + .addIcebergProperty("iceberg.rest-catalog.warehouse", "s3tablescatalog/" + S3_TABLES_BUCKET) + .addIcebergProperty("iceberg.rest-catalog.view-endpoints-enabled", "false") + .addIcebergProperty("iceberg.rest-catalog.sigv4-enabled", "true") + .addIcebergProperty("iceberg.rest-catalog.signing-name", "glue") + .addIcebergProperty("fs.hadoop.enabled", "false") + .addIcebergProperty("fs.native-s3.enabled", "true") + .addIcebergProperty("s3.region", AWS_REGION) + .addIcebergProperty("s3.aws-access-key", AWS_ACCESS_KEY_ID) + .addIcebergProperty("s3.aws-secret-key", AWS_SECRET_ACCESS_KEY) + .disableSchemaInitializer() + .build(); + } + + @Override + protected void dropTableFromMetastore(String tableName) + { + throw new UnsupportedOperationException(); + } + + @Override + protected String getMetadataLocation(String tableName) + { + throw new UnsupportedOperationException(); + } + + @Override + protected String schemaPath() + { + return "dummy"; + } + + @Override + protected boolean locationExists(String location) + { + throw new UnsupportedOperationException(); + } + + @Override + protected boolean isFileSorted(Location path, String sortColumnName) + { + throw new UnsupportedOperationException(); + } + + @Override + protected void deleteDirectory(String location) + { + throw new UnsupportedOperationException(); + } + + @Test + @Override // Override because the location pattern differs + public void testShowCreateTable() + { + assertThat((String) computeScalar("SHOW CREATE TABLE region")) + .matches("CREATE TABLE iceberg.tpch.region \\(\n" + + " regionkey bigint,\n" + + " name varchar,\n" + + " comment varchar\n" + + "\\)\n" + + "WITH \\(\n" + + " format = 'PARQUET',\n" + + " format_version = 2,\n" + + " location = 's3://.*--table-s3'\n" + + "\\)"); + } + + @Test + @Override + public void testView() + { + assertThatThrownBy(super::testView) + .hasStackTraceContaining("Access Denied"); + } + + @Test + @Override + public void testCommentView() + { + assertThatThrownBy(super::testCommentView) + .hasStackTraceContaining("Access Denied"); + } + + @Test + @Override + public void testCommentViewColumn() + { + assertThatThrownBy(super::testCommentViewColumn) + .hasStackTraceContaining("Access Denied"); + } + + @Test + @Override + public void testMaterializedView() + { + assertThatThrownBy(super::testMaterializedView) + .hasStackTraceContaining("Access Denied"); + } + + @Test + @Override + public void testRenameSchema() + { + assertThatThrownBy(super::testRenameSchema) + .hasStackTraceContaining("Access Denied"); + } + + @Test + @Override + public void testRenameTable() + { + assertThatThrownBy(super::testRenameTable) + .hasStackTraceContaining("Access Denied"); + } + + @Test + @Override + public void testRenameTableAcrossSchemas() + { + assertThatThrownBy(super::testRenameTableAcrossSchemas) + .hasStackTraceContaining("Access Denied"); + } + + @Test + @Override + public void testCreateTable() + { + assertThatThrownBy(super::testCreateTable) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testCreateTableAsSelect() + { + assertThatThrownBy(super::testCreateTableAsSelect) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testUpdate() + { + assertThatThrownBy(super::testUpdate) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testInsert() + { + assertThatThrownBy(super::testInsert) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testHiddenPathColumn() + { + assertThatThrownBy(super::testHiddenPathColumn) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testRowLevelDelete() + { + assertThatThrownBy(super::testRowLevelDelete) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testDeleteAllDataFromTable() + { + assertThatThrownBy(super::testDeleteAllDataFromTable) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testDeleteRowsConcurrently() + { + assertThatThrownBy(super::testDeleteRowsConcurrently) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testCreateOrReplaceTable() + { + assertThatThrownBy(super::testCreateOrReplaceTable) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testCreateOrReplaceTableChangeColumnNamesAndTypes() + { + assertThatThrownBy(super::testCreateOrReplaceTableChangeColumnNamesAndTypes) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testRegisterTableWithTableLocation() + { + assertThatThrownBy(super::testRegisterTableWithTableLocation) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testRegisterTableWithComments() + { + assertThatThrownBy(super::testRegisterTableWithComments) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testRowLevelUpdate() + { + assertThatThrownBy(super::testRowLevelUpdate) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testMerge() + { + assertThatThrownBy(super::testMerge) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testCreateSchema() + { + assertThatThrownBy(super::testCreateSchema) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testCreateSchemaWithNonLowercaseOwnerName() + { + assertThatThrownBy(super::testCreateSchemaWithNonLowercaseOwnerName) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testRegisterTableWithShowCreateTable() + { + assertThatThrownBy(super::testRegisterTableWithShowCreateTable) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testRegisterTableWithReInsert() + { + assertThatThrownBy(super::testRegisterTableWithReInsert) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testRegisterTableWithDroppedTable() + { + assertThatThrownBy(super::testRegisterTableWithDroppedTable) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testRegisterTableWithDifferentTableName() + { + assertThatThrownBy(super::testRegisterTableWithDifferentTableName) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testRegisterTableWithMetadataFile() + { + assertThatThrownBy(super::testRegisterTableWithMetadataFile) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testCreateTableWithTrailingSpaceInLocation() + { + assertThatThrownBy(super::testCreateTableWithTrailingSpaceInLocation) + .hasStackTraceContaining("Access Denied"); + } + + @Test + @Override + public void testRegisterTableWithTrailingSpaceInLocation() + { + assertThatThrownBy(super::testRegisterTableWithTrailingSpaceInLocation) + .hasStackTraceContaining("Access Denied"); + } + + @Test + @Override + public void testUnregisterTable() + { + assertThatThrownBy(super::testUnregisterTable) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testUnregisterBrokenTable() + { + assertThatThrownBy(super::testUnregisterBrokenTable) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testUnregisterTableNotExistingTable() + { + assertThatThrownBy(super::testUnregisterTableNotExistingTable) + .hasStackTraceContaining("Table .* not found"); + } + + @Test + @Override + public void testUnregisterTableNotExistingSchema() + { + assertThatThrownBy(super::testUnregisterTableNotExistingSchema) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testRepeatUnregisterTable() + { + assertThatThrownBy(super::testRepeatUnregisterTable) + .hasStackTraceContaining("Table .* not found"); + } + + @Test + @Override + public void testUnregisterTableAccessControl() + { + assertThatThrownBy(super::testUnregisterTableAccessControl) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testCreateTableWithNonExistingSchemaVerifyLocation() + { + assertThatThrownBy(super::testCreateTableWithNonExistingSchemaVerifyLocation) + .hasStackTraceContaining("Access Denied"); + } + + @Test + @Override + public void testSortedNationTable() + { + assertThatThrownBy(super::testSortedNationTable) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testFileSortingWithLargerTable() + { + assertThatThrownBy(super::testFileSortingWithLargerTable) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testDropTableWithMissingMetadataFile() + { + assertThatThrownBy(super::testDropTableWithMissingMetadataFile) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testDropTableWithMissingSnapshotFile() + { + assertThatThrownBy(super::testDropTableWithMissingSnapshotFile) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testDropTableWithMissingManifestListFile() + { + assertThatThrownBy(super::testDropTableWithMissingManifestListFile) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testDropTableWithMissingDataFile() + { + assertThatThrownBy(super::testDropTableWithMissingDataFile) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testDropTableWithNonExistentTableLocation() + { + assertThatThrownBy(super::testDropTableWithNonExistentTableLocation) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testMetadataTables() + { + assertThatThrownBy(super::testMetadataTables) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testPartitionFilterRequired() + { + assertThatThrownBy(super::testPartitionFilterRequired) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testTableChangesFunction() + { + assertThatThrownBy(super::testTableChangesFunction) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testRowLevelDeletesWithTableChangesFunction() + { + assertThatThrownBy(super::testRowLevelDeletesWithTableChangesFunction) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testCreateOrReplaceWithTableChangesFunction() + { + assertThatThrownBy(super::testCreateOrReplaceWithTableChangesFunction) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testTruncateTable() + { + assertThatThrownBy(super::testTruncateTable) + .hasMessageContaining("Access Denied"); + } + + @Test + @Override + public void testMetadataDeleteAfterCommitEnabled() + { + assertThatThrownBy(super::testMetadataDeleteAfterCommitEnabled) + .hasStackTraceContaining("Access Denied"); + } +} diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/containers/NessieContainer.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/containers/NessieContainer.java index 9ade575cbc37..2cd5ded54071 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/containers/NessieContainer.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/containers/NessieContainer.java @@ -28,7 +28,7 @@ public class NessieContainer { private static final Logger log = Logger.get(NessieContainer.class); - public static final String DEFAULT_IMAGE = "ghcr.io/projectnessie/nessie:0.102.2"; + public static final String DEFAULT_IMAGE = "ghcr.io/projectnessie/nessie:0.102.5"; public static final String DEFAULT_HOST_NAME = "nessie"; public static final String VERSION_STORE_TYPE = "IN_MEMORY"; diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/procedure/TestIcebergOptimizeManifestsProcedure.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/procedure/TestIcebergOptimizeManifestsProcedure.java new file mode 100644 index 000000000000..3708a2bd76bc --- /dev/null +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/procedure/TestIcebergOptimizeManifestsProcedure.java @@ -0,0 +1,127 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.iceberg.procedure; + +import io.trino.plugin.iceberg.IcebergQueryRunner; +import io.trino.testing.AbstractTestQueryFramework; +import io.trino.testing.QueryRunner; +import io.trino.testing.sql.TestTable; +import org.junit.jupiter.api.Test; + +import java.util.Set; + +import static com.google.common.collect.ImmutableSet.toImmutableSet; +import static org.assertj.core.api.Assertions.assertThat; + +final class TestIcebergOptimizeManifestsProcedure + extends AbstractTestQueryFramework +{ + @Override + protected QueryRunner createQueryRunner() + throws Exception + { + return IcebergQueryRunner.builder().build(); + } + + @Test + void testOptimizeManifests() + { + try (TestTable table = newTrinoTable("test_optimize_manifests", "(x int)")) { + assertUpdate("INSERT INTO " + table.getName() + " VALUES 1", 1); + assertUpdate("INSERT INTO " + table.getName() + " VALUES 2", 1); + + Set manifestFiles = manifestFiles(table.getName()); + assertThat(manifestFiles).hasSize(2); + + assertUpdate("ALTER TABLE " + table.getName() + " EXECUTE optimize_manifests"); + assertThat(manifestFiles(table.getName())) + .hasSize(1) + .doesNotContainAnyElementsOf(manifestFiles); + + assertThat(query("SELECT * FROM " + table.getName())) + .matches("VALUES 1, 2"); + } + } + + @Test + void testPartitionTable() + { + try (TestTable table = newTrinoTable("test_partition", "(id int, part int) WITH (partitioning = ARRAY['part'])")) { + assertUpdate("INSERT INTO " + table.getName() + " VALUES (1, 10)", 1); + assertUpdate("INSERT INTO " + table.getName() + " VALUES (2, 10)", 1); + assertUpdate("INSERT INTO " + table.getName() + " VALUES (3, 20)", 1); + assertUpdate("INSERT INTO " + table.getName() + " VALUES (4, 20)", 1); + + Set manifestFiles = manifestFiles(table.getName()); + assertThat(manifestFiles).hasSize(4); + + assertUpdate("ALTER TABLE " + table.getName() + " EXECUTE optimize_manifests"); + assertThat(manifestFiles(table.getName())) + .hasSize(1) + .doesNotContainAnyElementsOf(manifestFiles); + + assertThat(query("SELECT * FROM " + table.getName())) + .matches("VALUES (1, 10), (2, 10), (3, 20), (4, 20)"); + } + } + + @Test + void testEmptyManifest() + { + try (TestTable table = newTrinoTable("test_no_rewrite", "(x int)")) { + Set manifestFiles = manifestFiles(table.getName()); + assertThat(manifestFiles).isEmpty(); + + assertUpdate("ALTER TABLE " + table.getName() + " EXECUTE optimize_manifests"); + assertThat(manifestFiles(table.getName())).isEmpty(); + + assertQueryReturnsEmptyResult("SELECT * FROM " + table.getName()); + } + } + + @Test + void testNotRewriteSingleManifest() + { + try (TestTable table = newTrinoTable("test_no_rewrite", "(x int)")) { + assertUpdate("INSERT INTO " + table.getName() + " VALUES 1", 1); + + Set manifestFiles = manifestFiles(table.getName()); + assertThat(manifestFiles).hasSize(1); + + assertUpdate("ALTER TABLE " + table.getName() + " EXECUTE optimize_manifests"); + assertThat(manifestFiles(table.getName())) + .hasSize(1) + .isEqualTo(manifestFiles); + + assertThat(query("SELECT * FROM " + table.getName())) + .matches("VALUES 1"); + } + } + + @Test + void testUnsupportedWhere() + { + try (TestTable table = newTrinoTable("test_unsupported_where", "WITH (partitioning = ARRAY['part']) AS SELECT 1 id, 1 part")) { + assertQueryFails("ALTER TABLE " + table.getName() + " EXECUTE optimize_manifests WHERE id = 1", ".* WHERE not supported for procedure OPTIMIZE_MANIFESTS"); + assertQueryFails("ALTER TABLE " + table.getName() + " EXECUTE optimize_manifests WHERE part = 10", ".* WHERE not supported for procedure OPTIMIZE_MANIFESTS"); + } + } + + private Set manifestFiles(String tableName) + { + return computeActual("SELECT path FROM \"" + tableName + "$manifests\"").getOnlyColumnAsSet().stream() + .map(path -> (String) path) + .collect(toImmutableSet()); + } +} diff --git a/plugin/trino-ignite/pom.xml b/plugin/trino-ignite/pom.xml index 3285f0edaa74..93297f9e69fc 100644 --- a/plugin/trino-ignite/pom.xml +++ b/plugin/trino-ignite/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-jmx/pom.xml b/plugin/trino-jmx/pom.xml index b052b34c11ee..dac709141553 100644 --- a/plugin/trino-jmx/pom.xml +++ b/plugin/trino-jmx/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-jmx/src/main/java/io/trino/plugin/jmx/JmxRecordSetProvider.java b/plugin/trino-jmx/src/main/java/io/trino/plugin/jmx/JmxRecordSetProvider.java index 9045d6ac98d4..3c3c0cf67b4d 100644 --- a/plugin/trino-jmx/src/main/java/io/trino/plugin/jmx/JmxRecordSetProvider.java +++ b/plugin/trino-jmx/src/main/java/io/trino/plugin/jmx/JmxRecordSetProvider.java @@ -29,6 +29,7 @@ import io.trino.spi.type.Type; import javax.management.Attribute; +import javax.management.InstanceNotFoundException; import javax.management.JMException; import javax.management.MBeanServer; import javax.management.ObjectName; @@ -228,7 +229,12 @@ private List> getLiveRows(JmxTableHandle tableHandle, List> rows = ImmutableList.builder(); for (String objectName : tableHandle.objectNames()) { - rows.add(getLiveRow(objectName, columns, 0)); + try { + rows.add(getLiveRow(objectName, columns, 0)); + } + catch (InstanceNotFoundException _) { + // Ignore if the object doesn't exist. This might happen when it exists on the coordinator but has not yet been created on the worker. + } } return rows.build(); } diff --git a/plugin/trino-jmx/src/test/java/io/trino/plugin/jmx/TestJmxSplitManager.java b/plugin/trino-jmx/src/test/java/io/trino/plugin/jmx/TestJmxSplitManager.java index 9c1add71c322..09243b1afe91 100644 --- a/plugin/trino-jmx/src/test/java/io/trino/plugin/jmx/TestJmxSplitManager.java +++ b/plugin/trino-jmx/src/test/java/io/trino/plugin/jmx/TestJmxSplitManager.java @@ -27,7 +27,6 @@ import io.trino.spi.connector.ConnectorSplit; import io.trino.spi.connector.ConnectorSplitSource; import io.trino.spi.connector.ConnectorTransactionHandle; -import io.trino.spi.connector.Constraint; import io.trino.spi.connector.DynamicFilter; import io.trino.spi.connector.RecordCursor; import io.trino.spi.connector.RecordSet; @@ -43,6 +42,7 @@ import java.net.URI; import java.util.HashSet; import java.util.List; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -51,6 +51,7 @@ import static io.airlift.slice.Slices.utf8Slice; import static io.trino.plugin.jmx.JmxMetadata.HISTORY_SCHEMA_NAME; import static io.trino.plugin.jmx.JmxMetadata.JMX_SCHEMA_NAME; +import static io.trino.spi.connector.Constraint.alwaysTrue; import static io.trino.spi.type.TimestampWithTimeZoneType.createTimestampWithTimeZoneType; import static io.trino.spi.type.VarcharType.createUnboundedVarcharType; import static io.trino.testing.TestingConnectorSession.SESSION; @@ -111,7 +112,7 @@ public void testPredicatePushdown() TupleDomain nodeTupleDomain = TupleDomain.fromFixedValues(ImmutableMap.of(columnHandle, NullableValue.of(createUnboundedVarcharType(), utf8Slice(nodeIdentifier)))); JmxTableHandle tableHandle = new JmxTableHandle(new SchemaTableName("schema", "tableName"), ImmutableList.of("objectName"), ImmutableList.of(columnHandle), true, nodeTupleDomain); - ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE, SESSION, tableHandle, DynamicFilter.EMPTY, Constraint.alwaysTrue()); + ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE, SESSION, tableHandle, DynamicFilter.EMPTY, alwaysTrue()); List allSplits = getAllSplits(splitSource); assertThat(allSplits).hasSize(1); @@ -126,7 +127,7 @@ public void testNoPredicate() throws Exception { JmxTableHandle tableHandle = new JmxTableHandle(new SchemaTableName("schema", "tableName"), ImmutableList.of("objectName"), ImmutableList.of(columnHandle), true, TupleDomain.all()); - ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE, SESSION, tableHandle, DynamicFilter.EMPTY, Constraint.alwaysTrue()); + ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE, SESSION, tableHandle, DynamicFilter.EMPTY, alwaysTrue()); List allSplits = getAllSplits(splitSource); assertThat(allSplits).hasSize(nodes.size()); @@ -156,6 +157,45 @@ public void testRecordSetProvider() } } + @Test + public void testNonExistentObjectName() + throws Exception + { + JmxTableHandle jmxTableHandle = metadata.listTables(SESSION, Optional.of(JMX_SCHEMA_NAME)).stream() + .map(schemaTableName -> metadata.getTableHandle(SESSION, schemaTableName, Optional.empty(), Optional.empty())) + .filter(Objects::nonNull) + .filter(tableHandle -> !tableHandle.objectNames().isEmpty()) + .findFirst() + .orElseThrow(); + + ImmutableList objectNamesWithUnknowns = ImmutableList.builder() + .addAll(jmxTableHandle.objectNames()) + .add("JMImplementation:type=Unknown") + .build(); + JmxTableHandle tableHandleWithUnknownObject = new JmxTableHandle( + jmxTableHandle.tableName(), + objectNamesWithUnknowns, + jmxTableHandle.columnHandles(), + jmxTableHandle.liveData(), + jmxTableHandle.nodeFilter()); + + List columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(SESSION, tableHandleWithUnknownObject).values()); + ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE, SESSION, tableHandleWithUnknownObject, DynamicFilter.EMPTY, alwaysTrue()); + List allSplits = getAllSplits(splitSource); + ConnectorSplit split = allSplits.getFirst(); + + RecordSet recordSet = recordSetProvider.getRecordSet(JmxTransactionHandle.INSTANCE, SESSION, split, tableHandleWithUnknownObject, columnHandles); + + int count = 0; + try (RecordCursor cursor = recordSet.cursor()) { + while (cursor.advanceNextPosition()) { + count++; + } + } + + assertThat(count).isEqualTo(objectNamesWithUnknowns.size() - 1); + } + @Test public void testHistoryRecordSetProvider() throws Exception @@ -202,7 +242,7 @@ private RecordSet getRecordSet(SchemaTableName schemaTableName) JmxTableHandle tableHandle = metadata.getTableHandle(SESSION, schemaTableName, Optional.empty(), Optional.empty()); List columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(SESSION, tableHandle).values()); - ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE, SESSION, tableHandle, DynamicFilter.EMPTY, Constraint.alwaysTrue()); + ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE, SESSION, tableHandle, DynamicFilter.EMPTY, alwaysTrue()); List allSplits = getAllSplits(splitSource); assertThat(allSplits).hasSize(nodes.size()); ConnectorSplit split = allSplits.get(0); diff --git a/plugin/trino-kafka-event-listener/pom.xml b/plugin/trino-kafka-event-listener/pom.xml index 3e5da3242179..a1300cf52756 100644 --- a/plugin/trino-kafka-event-listener/pom.xml +++ b/plugin/trino-kafka-event-listener/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-kafka/pom.xml b/plugin/trino-kafka/pom.xml index cbbe43f039ab..9d258c46e5ff 100644 --- a/plugin/trino-kafka/pom.xml +++ b/plugin/trino-kafka/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-kudu/pom.xml b/plugin/trino-kudu/pom.xml index be56e44969c6..f01006c3b3ea 100644 --- a/plugin/trino-kudu/pom.xml +++ b/plugin/trino-kudu/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-loki/pom.xml b/plugin/trino-loki/pom.xml index c88ce05e8a60..c9df9017cecf 100644 --- a/plugin/trino-loki/pom.xml +++ b/plugin/trino-loki/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml @@ -56,7 +56,7 @@ io.github.jeschkies loki-client - 0.0.2 + 0.0.3 diff --git a/plugin/trino-loki/src/main/java/io/trino/plugin/loki/LokiRecordSet.java b/plugin/trino-loki/src/main/java/io/trino/plugin/loki/LokiRecordSet.java index abc730c02ec8..03c979a1bd27 100644 --- a/plugin/trino-loki/src/main/java/io/trino/plugin/loki/LokiRecordSet.java +++ b/plugin/trino-loki/src/main/java/io/trino/plugin/loki/LokiRecordSet.java @@ -49,7 +49,7 @@ public LokiRecordSet(LokiClient lokiClient, LokiSplit split, List splits = ImmutableList.of(new LokiSplit(table.query(), table.start(), table.end())); + List splits = ImmutableList.of(new LokiSplit(table.query(), table.start(), table.end(), table.step())); log.debug("created %d splits", splits.size()); return new FixedSplitSource(splits); diff --git a/plugin/trino-loki/src/main/java/io/trino/plugin/loki/LokiTableHandle.java b/plugin/trino-loki/src/main/java/io/trino/plugin/loki/LokiTableHandle.java index d752f9a81813..f2202b3f0ff6 100644 --- a/plugin/trino-loki/src/main/java/io/trino/plugin/loki/LokiTableHandle.java +++ b/plugin/trino-loki/src/main/java/io/trino/plugin/loki/LokiTableHandle.java @@ -22,7 +22,7 @@ import static java.util.Objects.requireNonNull; -public record LokiTableHandle(String query, Instant start, Instant end, List columnHandles) +public record LokiTableHandle(String query, Instant start, Instant end, int step, List columnHandles) implements ConnectorTableHandle { public LokiTableHandle diff --git a/plugin/trino-loki/src/main/java/io/trino/plugin/loki/QueryRangeTableFunction.java b/plugin/trino-loki/src/main/java/io/trino/plugin/loki/QueryRangeTableFunction.java index 6d0766d7743f..ac54e8653bd4 100644 --- a/plugin/trino-loki/src/main/java/io/trino/plugin/loki/QueryRangeTableFunction.java +++ b/plugin/trino-loki/src/main/java/io/trino/plugin/loki/QueryRangeTableFunction.java @@ -41,6 +41,7 @@ import static com.google.common.collect.ImmutableList.toImmutableList; import static io.trino.spi.StandardErrorCode.INVALID_FUNCTION_ARGUMENT; import static io.trino.spi.function.table.ReturnTypeSpecification.GenericTable.GENERIC_TABLE; +import static io.trino.spi.type.IntegerType.INTEGER; import static io.trino.spi.type.TimestampWithTimeZoneType.TIMESTAMP_TZ_NANOS; import static io.trino.spi.type.Timestamps.MILLISECONDS_PER_SECOND; import static io.trino.spi.type.Timestamps.NANOSECONDS_PER_MILLISECOND; @@ -74,6 +75,11 @@ public QueryRangeTableFunction(LokiMetadata metadata) ScalarArgumentSpecification.builder() .name("END") .type(TIMESTAMP_TZ_NANOS) + .build(), + ScalarArgumentSpecification.builder() + .name("STEP") + .type(INTEGER) + .defaultValue(0L) .build()), GENERIC_TABLE); @@ -89,6 +95,11 @@ public TableFunctionAnalysis analyze(ConnectorSession session, ConnectorTransact LongTimestampWithTimeZone startArgument = (LongTimestampWithTimeZone) ((ScalarArgument) arguments.get("START")).getValue(); LongTimestampWithTimeZone endArgument = (LongTimestampWithTimeZone) ((ScalarArgument) arguments.get("END")).getValue(); + Long step = (Long) ((ScalarArgument) arguments.get("STEP")).getValue(); + if (step == null || step < 0L) { + throw new TrinoException(INVALID_FUNCTION_ARGUMENT, "step must be positive"); + } + if (Strings.isNullOrEmpty(query)) { throw new TrinoException(INVALID_FUNCTION_ARGUMENT, query); } @@ -118,6 +129,7 @@ public TableFunctionAnalysis analyze(ConnectorSession session, ConnectorTransact query, start, end, + step.intValue(), columnHandles); return TableFunctionAnalysis.builder() diff --git a/plugin/trino-loki/src/test/java/io/trino/plugin/loki/TestLokiIntegration.java b/plugin/trino-loki/src/test/java/io/trino/plugin/loki/TestLokiIntegration.java index 34f7a8ffa42b..d552286ac21b 100644 --- a/plugin/trino-loki/src/test/java/io/trino/plugin/loki/TestLokiIntegration.java +++ b/plugin/trino-loki/src/test/java/io/trino/plugin/loki/TestLokiIntegration.java @@ -21,13 +21,11 @@ import java.time.Duration; import java.time.Instant; -import java.time.LocalDate; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.format.DateTimeFormatter; import java.time.temporal.ChronoUnit; -import static io.trino.type.DateTimes.MILLISECONDS_PER_DAY; import static java.lang.String.format; final class TestLokiIntegration @@ -37,6 +35,7 @@ final class TestLokiIntegration private static final DateTimeFormatter timestampFormatter = DateTimeFormatter.ofPattern("uuuu-MM-dd HH:mm:ss.SSS'Z'").withZone(ZoneOffset.UTC); private static final DateTimeFormatter timestampFormatterAtEasternTime = DateTimeFormatter.ofPattern("uuuu-MM-dd HH:mm:ss'-05:00'").withZone(ZoneId.of("US/Eastern")); + private static final DateTimeFormatter isoTimestampFormatter = DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH:mm:ss.SSS'Z'").withZone(ZoneOffset.UTC); @Override protected QueryRunner createQueryRunner() @@ -155,7 +154,6 @@ void testLabelsComplex() void testSelectTimestampLogsQuery() throws Exception { - DateTimeFormatter isoTimestampFormatter = DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH:mm:ss.SSS'Z'").withZone(ZoneOffset.UTC); Instant start = Instant.now().truncatedTo(ChronoUnit.DAYS).minus(Duration.ofHours(12)); Instant end = start.plus(Duration.ofHours(4)); Instant firstLineTimestamp = start.truncatedTo(ChronoUnit.MILLIS); @@ -184,24 +182,24 @@ void testSelectTimestampLogsQuery() void testTimestampMetricsQuery() throws Exception { - LocalDate baseLineDate = LocalDate.now(); - Instant start = Instant.ofEpochMilli(baseLineDate.toEpochDay() * MILLISECONDS_PER_DAY); - Instant end = start.plus(Duration.ofHours(4)); + Instant start = Instant.now().truncatedTo(ChronoUnit.HOURS).minus(Duration.ofHours(4)); + Instant end = start.plus(Duration.ofHours(3)); - this.client.pushLogLine("line 1", start.plus(Duration.ofHours(1)), ImmutableMap.of("test", "timestamp_metrics_query")); + this.client.pushLogLine("line 1", start.plus(Duration.ofMinutes(4)), ImmutableMap.of("test", "timestamp_metrics_query")); this.client.pushLogLine("line 2", start.plus(Duration.ofHours(2)), ImmutableMap.of("test", "timestamp_metrics_query")); this.client.pushLogLine("line 3", start.plus(Duration.ofHours(3)), ImmutableMap.of("test", "timestamp_metrics_query")); this.client.flush(); assertQuery(format(""" - SELECT CAST(timestamp AS DATE) FROM + SELECT to_iso8601(timestamp), value FROM TABLE(system.query_range( 'count_over_time({test="timestamp_metrics_query"}[5m])', TIMESTAMP '%s', - TIMESTAMP '%s' + TIMESTAMP '%s', + 300 )) LIMIT 1 """, timestampFormatter.format(start), timestampFormatter.format(end)), - "VALUES DATE '%s'".formatted(baseLineDate)); + "VALUES ('%s', 1.0)".formatted(isoTimestampFormatter.format(start.plus(Duration.ofMinutes(5))))); } @Test @@ -209,4 +207,33 @@ void testSelectFromTableFails() { assertQueryFails("SELECT * FROM default", "Loki connector does not support querying tables directly. Use the TABLE function instead."); } + + @Test + void testQueryRangeInvalidArguments() + { + assertQueryFails( + """ + SELECT to_iso8601(timestamp), value FROM + TABLE(system.query_range( + 'count_over_time({test="timestamp_metrics_query"}[5m])', + TIMESTAMP '2012-08-08', + TIMESTAMP '2012-08-09', + -300 + )) + LIMIT 1 + """, + "step must be positive"); + assertQueryFails( + """ + SELECT to_iso8601(timestamp), value FROM + TABLE(system.query_range( + 'count_over_time({test="timestamp_metrics_query"}[5m])', + TIMESTAMP '2012-08-08', + TIMESTAMP '2012-08-09', + NULL + )) + LIMIT 1 + """, + "step must be positive"); + } } diff --git a/plugin/trino-mariadb/pom.xml b/plugin/trino-mariadb/pom.xml index ce2e8b77f257..1d040776e57c 100644 --- a/plugin/trino-mariadb/pom.xml +++ b/plugin/trino-mariadb/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-memory/pom.xml b/plugin/trino-memory/pom.xml index e6a4f4023e28..66ceb8b17f47 100644 --- a/plugin/trino-memory/pom.xml +++ b/plugin/trino-memory/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-ml/pom.xml b/plugin/trino-ml/pom.xml index 57275996e84a..076fae7a71fb 100644 --- a/plugin/trino-ml/pom.xml +++ b/plugin/trino-ml/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-mongodb/pom.xml b/plugin/trino-mongodb/pom.xml index 9e31177b0b36..dc9f93dfe41b 100644 --- a/plugin/trino-mongodb/pom.xml +++ b/plugin/trino-mongodb/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-mysql-event-listener/pom.xml b/plugin/trino-mysql-event-listener/pom.xml index 4fc69cb43413..c1089289e13c 100644 --- a/plugin/trino-mysql-event-listener/pom.xml +++ b/plugin/trino-mysql-event-listener/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-mysql/pom.xml b/plugin/trino-mysql/pom.xml index 0ff45ea18479..d94504d2aea8 100644 --- a/plugin/trino-mysql/pom.xml +++ b/plugin/trino-mysql/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-opa/pom.xml b/plugin/trino-opa/pom.xml index df9c4280856f..8308036fdfdf 100644 --- a/plugin/trino-opa/pom.xml +++ b/plugin/trino-opa/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-openlineage/pom.xml b/plugin/trino-openlineage/pom.xml index 9e74bb39c7bb..190161cf4bcd 100644 --- a/plugin/trino-openlineage/pom.xml +++ b/plugin/trino-openlineage/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml @@ -61,7 +61,7 @@ io.openlineage openlineage-java - 1.27.0 + 1.28.0 diff --git a/plugin/trino-openlineage/src/main/java/io/trino/plugin/openlineage/transport/http/OpenLineageHttpTransport.java b/plugin/trino-openlineage/src/main/java/io/trino/plugin/openlineage/transport/http/OpenLineageHttpTransport.java index 151790a20a39..6851e3560d68 100644 --- a/plugin/trino-openlineage/src/main/java/io/trino/plugin/openlineage/transport/http/OpenLineageHttpTransport.java +++ b/plugin/trino-openlineage/src/main/java/io/trino/plugin/openlineage/transport/http/OpenLineageHttpTransport.java @@ -15,6 +15,7 @@ import com.google.inject.Inject; import io.openlineage.client.transports.HttpConfig; +import io.openlineage.client.transports.HttpSslContextConfig; import io.openlineage.client.transports.HttpTransport; import io.openlineage.client.transports.TokenProvider; import io.trino.plugin.openlineage.transport.OpenLineageTransport; @@ -57,7 +58,8 @@ public HttpTransport buildTransport() this.tokenProvider, this.urlParams, this.headers, - null)); + null, + new HttpSslContextConfig())); } private static TokenProvider createTokenProvider(String token) diff --git a/plugin/trino-opensearch/pom.xml b/plugin/trino-opensearch/pom.xml index 2cc11dcdd2d3..d1b68bb4d33a 100644 --- a/plugin/trino-opensearch/pom.xml +++ b/plugin/trino-opensearch/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-oracle/pom.xml b/plugin/trino-oracle/pom.xml index c6d34c86ed6b..e8c6a8ff0e5a 100644 --- a/plugin/trino-oracle/pom.xml +++ b/plugin/trino-oracle/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-password-authenticators/pom.xml b/plugin/trino-password-authenticators/pom.xml index 1ba9bda10072..11e83ef51a93 100644 --- a/plugin/trino-password-authenticators/pom.xml +++ b/plugin/trino-password-authenticators/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-phoenix5/pom.xml b/plugin/trino-phoenix5/pom.xml index 1ea599888b20..8d15336db712 100644 --- a/plugin/trino-phoenix5/pom.xml +++ b/plugin/trino-phoenix5/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-pinot/pom.xml b/plugin/trino-pinot/pom.xml index ec79efe57775..18766206ee8b 100755 --- a/plugin/trino-pinot/pom.xml +++ b/plugin/trino-pinot/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-postgresql/pom.xml b/plugin/trino-postgresql/pom.xml index 7112b17f67ca..638c02bdec75 100644 --- a/plugin/trino-postgresql/pom.xml +++ b/plugin/trino-postgresql/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-prometheus/pom.xml b/plugin/trino-prometheus/pom.xml index e13f2b1fe8e5..fe8eb0588812 100644 --- a/plugin/trino-prometheus/pom.xml +++ b/plugin/trino-prometheus/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-ranger/pom.xml b/plugin/trino-ranger/pom.xml index 77e2bde116c6..48f35bf8c372 100644 --- a/plugin/trino-ranger/pom.xml +++ b/plugin/trino-ranger/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-redis/pom.xml b/plugin/trino-redis/pom.xml index 4524fb5f3bcf..384604f7f5f0 100644 --- a/plugin/trino-redis/pom.xml +++ b/plugin/trino-redis/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml @@ -81,7 +81,7 @@ org.apache.commons commons-pool2 - 2.12.0 + 2.12.1 diff --git a/plugin/trino-redshift/pom.xml b/plugin/trino-redshift/pom.xml index e823460bb878..3555935aa700 100644 --- a/plugin/trino-redshift/pom.xml +++ b/plugin/trino-redshift/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-resource-group-managers/pom.xml b/plugin/trino-resource-group-managers/pom.xml index 72153ab85009..21acb560dab3 100644 --- a/plugin/trino-resource-group-managers/pom.xml +++ b/plugin/trino-resource-group-managers/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-session-property-managers/pom.xml b/plugin/trino-session-property-managers/pom.xml index 3717b7dce73a..3c4eb7300089 100644 --- a/plugin/trino-session-property-managers/pom.xml +++ b/plugin/trino-session-property-managers/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-singlestore/pom.xml b/plugin/trino-singlestore/pom.xml index 77677b989891..ba668677547d 100644 --- a/plugin/trino-singlestore/pom.xml +++ b/plugin/trino-singlestore/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-snowflake/pom.xml b/plugin/trino-snowflake/pom.xml index c31e01eeb3af..b2cbe794fa26 100644 --- a/plugin/trino-snowflake/pom.xml +++ b/plugin/trino-snowflake/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml @@ -15,6 +15,7 @@ true + ${air.test.jvm.additional-arguments.default} --add-opens=java.base/java.nio=ALL-UNNAMED @@ -208,9 +209,6 @@ false - - ${air.test.jvm.additional-arguments.default} --add-opens=java.base/java.nio=ALL-UNNAMED - diff --git a/plugin/trino-snowflake/src/main/java/io/trino/plugin/snowflake/SnowflakeClientModule.java b/plugin/trino-snowflake/src/main/java/io/trino/plugin/snowflake/SnowflakeClientModule.java index 0e511587d5ec..72b10a63ad2e 100644 --- a/plugin/trino-snowflake/src/main/java/io/trino/plugin/snowflake/SnowflakeClientModule.java +++ b/plugin/trino-snowflake/src/main/java/io/trino/plugin/snowflake/SnowflakeClientModule.java @@ -13,6 +13,7 @@ */ package io.trino.plugin.snowflake; +import com.google.common.collect.ImmutableMultimap; import com.google.inject.Binder; import com.google.inject.Module; import com.google.inject.Provides; @@ -38,6 +39,7 @@ import static com.google.inject.multibindings.Multibinder.newSetBinder; import static io.airlift.configuration.ConfigBinder.configBinder; +import static io.trino.plugin.base.JdkCompatibilityChecks.verifyConnectorAccessOpened; import static io.trino.spi.StandardErrorCode.NOT_SUPPORTED; public class SnowflakeClientModule @@ -46,6 +48,11 @@ public class SnowflakeClientModule @Override public void configure(Binder binder) { + // Check reflective access allowed - required by Apache Arrow usage in Snowflake JDBC driver + verifyConnectorAccessOpened( + binder, + "snowflake", + ImmutableMultimap.of("java.base", "java.nio")); binder.bind(JdbcClient.class).annotatedWith(ForBaseJdbc.class).to(SnowflakeClient.class).in(Scopes.SINGLETON); configBinder(binder).bindConfig(SnowflakeConfig.class); configBinder(binder).bindConfig(TypeHandlingJdbcConfig.class); diff --git a/plugin/trino-spooling-filesystem/pom.xml b/plugin/trino-spooling-filesystem/pom.xml index 63ddd865d620..31088ca0e43a 100644 --- a/plugin/trino-spooling-filesystem/pom.xml +++ b/plugin/trino-spooling-filesystem/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-sqlserver/pom.xml b/plugin/trino-sqlserver/pom.xml index 8a47d2736ce0..6c515c5b30e1 100644 --- a/plugin/trino-sqlserver/pom.xml +++ b/plugin/trino-sqlserver/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-teradata-functions/pom.xml b/plugin/trino-teradata-functions/pom.xml index 0ce75d4e21dc..11020fd7b782 100644 --- a/plugin/trino-teradata-functions/pom.xml +++ b/plugin/trino-teradata-functions/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-thrift-api/pom.xml b/plugin/trino-thrift-api/pom.xml index 5a2a1f543284..69cf373e0eb4 100644 --- a/plugin/trino-thrift-api/pom.xml +++ b/plugin/trino-thrift-api/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-thrift-testing-server/pom.xml b/plugin/trino-thrift-testing-server/pom.xml index 79752c502d23..12918d78b29b 100644 --- a/plugin/trino-thrift-testing-server/pom.xml +++ b/plugin/trino-thrift-testing-server/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-thrift/pom.xml b/plugin/trino-thrift/pom.xml index e6648963e834..8b660f7e10c2 100644 --- a/plugin/trino-thrift/pom.xml +++ b/plugin/trino-thrift/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-tpcds/pom.xml b/plugin/trino-tpcds/pom.xml index dc223788c867..4dfddced9d3f 100644 --- a/plugin/trino-tpcds/pom.xml +++ b/plugin/trino-tpcds/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-tpch/pom.xml b/plugin/trino-tpch/pom.xml index a05f8a85089d..9e07afc684a4 100644 --- a/plugin/trino-tpch/pom.xml +++ b/plugin/trino-tpch/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-vertica/pom.xml b/plugin/trino-vertica/pom.xml index eb16a8883d95..95412867f633 100644 --- a/plugin/trino-vertica/pom.xml +++ b/plugin/trino-vertica/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/pom.xml b/pom.xml index 76a20126fd19..9a418a17b548 100644 --- a/pom.xml +++ b/pom.xml @@ -10,7 +10,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT pom ${project.artifactId} @@ -149,7 +149,7 @@ 23 - 2025-01-27T23:28:18Z + 2025-02-06T01:52:24Z ERROR @@ -182,7 +182,7 @@ ${air.test.jvm.additional-arguments.default} - 300 + 302 2.9.6 4.13.2 1.12.0 @@ -191,7 +191,7 @@ 7.7.1 108 1.22 - 11.3.0 + 11.3.1 1.15.1 v22.13.1 11.1.0 @@ -200,7 +200,7 @@ 5.3.3 1.7.1 5.16.0 - 2.13.0 + 2.13.1 0.12.6 1.20.0 3.9.0 @@ -237,7 +237,7 @@ com.google.cloud libraries-bom - 26.53.0 + 26.54.0 pom import @@ -309,7 +309,7 @@ software.amazon.awssdk bom - 2.30.10 + 2.30.15 pom import @@ -486,7 +486,7 @@ com.google.code.gson gson - 2.12.0 + 2.12.1 @@ -564,7 +564,7 @@ com.nimbusds oauth2-oidc-sdk - 11.21.2 + 11.22 jdk11 @@ -816,7 +816,7 @@ io.minio minio - 8.5.15 + 8.5.17 @@ -903,7 +903,7 @@ io.projectreactor.netty reactor-netty-core - 1.1.21 + 1.1.26 @@ -1896,7 +1896,7 @@ org.apache.httpcomponents.client5 httpclient5 - 5.4.1 + 5.4.2 @@ -1923,6 +1923,12 @@ + + org.apache.iceberg + iceberg-aws + ${dep.iceberg.version} + + org.apache.iceberg iceberg-core @@ -2166,7 +2172,7 @@ org.checkerframework checker-qual - 3.48.4 + 3.49.0 diff --git a/service/trino-proxy/pom.xml b/service/trino-proxy/pom.xml index 0e8da1622d88..4ec78d03ada9 100644 --- a/service/trino-proxy/pom.xml +++ b/service/trino-proxy/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/service/trino-verifier/pom.xml b/service/trino-verifier/pom.xml index 8935c7781957..06b4e22e6b83 100644 --- a/service/trino-verifier/pom.xml +++ b/service/trino-verifier/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/service/trino-verifier/src/main/java/io/trino/verifier/QueryRewriter.java b/service/trino-verifier/src/main/java/io/trino/verifier/QueryRewriter.java index 327b80298f5f..a616a5b027ec 100644 --- a/service/trino-verifier/src/main/java/io/trino/verifier/QueryRewriter.java +++ b/service/trino-verifier/src/main/java/io/trino/verifier/QueryRewriter.java @@ -206,10 +206,10 @@ private List getColumns(Connection connection, CreateTableAsSelect creat querySpecification.getOffset(), Optional.of(new Limit(new LongLiteral("0")))); - zeroRowsQuery = new io.trino.sql.tree.Query(ImmutableList.of(), createSelectClause.getWith(), innerQuery, Optional.empty(), Optional.empty(), Optional.empty()); + zeroRowsQuery = new io.trino.sql.tree.Query(ImmutableList.of(), ImmutableList.of(), createSelectClause.getWith(), innerQuery, Optional.empty(), Optional.empty(), Optional.empty()); } else { - zeroRowsQuery = new io.trino.sql.tree.Query(ImmutableList.of(), createSelectClause.getWith(), innerQuery, Optional.empty(), Optional.empty(), Optional.of(new Limit(new LongLiteral("0")))); + zeroRowsQuery = new io.trino.sql.tree.Query(ImmutableList.of(), ImmutableList.of(), createSelectClause.getWith(), innerQuery, Optional.empty(), Optional.empty(), Optional.of(new Limit(new LongLiteral("0")))); } ImmutableList.Builder columns = ImmutableList.builder(); diff --git a/testing/trino-benchmark-queries/pom.xml b/testing/trino-benchmark-queries/pom.xml index 4ac19063fe16..c2c75b019cbf 100644 --- a/testing/trino-benchmark-queries/pom.xml +++ b/testing/trino-benchmark-queries/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/testing/trino-benchto-benchmarks/pom.xml b/testing/trino-benchto-benchmarks/pom.xml index 05d246a48cf2..21a6ec32b6b5 100644 --- a/testing/trino-benchto-benchmarks/pom.xml +++ b/testing/trino-benchto-benchmarks/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/testing/trino-faulttolerant-tests/pom.xml b/testing/trino-faulttolerant-tests/pom.xml index 28adc658e04d..0de1a31640eb 100644 --- a/testing/trino-faulttolerant-tests/pom.xml +++ b/testing/trino-faulttolerant-tests/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/testing/trino-plugin-reader/pom.xml b/testing/trino-plugin-reader/pom.xml index 200682f76e02..2d8afd5e4de7 100644 --- a/testing/trino-plugin-reader/pom.xml +++ b/testing/trino-plugin-reader/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/testing/trino-product-tests-groups/pom.xml b/testing/trino-product-tests-groups/pom.xml index 3a8bd65e271c..1d1ff8f5f9aa 100644 --- a/testing/trino-product-tests-groups/pom.xml +++ b/testing/trino-product-tests-groups/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/testing/trino-product-tests-groups/src/main/java/io/trino/tests/product/TestGroups.java b/testing/trino-product-tests-groups/src/main/java/io/trino/tests/product/TestGroups.java index 11c13365cdb9..4c9057bd9b5a 100644 --- a/testing/trino-product-tests-groups/src/main/java/io/trino/tests/product/TestGroups.java +++ b/testing/trino-product-tests-groups/src/main/java/io/trino/tests/product/TestGroups.java @@ -96,12 +96,11 @@ public final class TestGroups public static final String DELTA_LAKE_AZURE = "delta-lake-azure"; public static final String DELTA_LAKE_GCS = "delta-lake-gcs"; public static final String DELTA_LAKE_DATABRICKS = "delta-lake-databricks"; - public static final String DELTA_LAKE_DATABRICKS_113 = "delta-lake-databricks-113"; public static final String DELTA_LAKE_DATABRICKS_122 = "delta-lake-databricks-122"; public static final String DELTA_LAKE_DATABRICKS_133 = "delta-lake-databricks-133"; public static final String DELTA_LAKE_DATABRICKS_143 = "delta-lake-databricks-143"; public static final String DATABRICKS_UNITY_HTTP_HMS = "databricks-unity-http-hms"; - public static final String DELTA_LAKE_EXCLUDE_104 = "delta-lake-exclude-104"; + public static final String DELTA_LAKE_EXCLUDE_113 = "delta-lake-exclude-113"; public static final String DELTA_LAKE_ALLUXIO_CACHING = "delta-lake-alluxio-caching"; public static final String HUDI = "hudi"; public static final String PARQUET = "parquet"; diff --git a/testing/trino-product-tests-launcher/pom.xml b/testing/trino-product-tests-launcher/pom.xml index d409248cc9e1..b448200aff62 100644 --- a/testing/trino-product-tests-launcher/pom.xml +++ b/testing/trino-product-tests-launcher/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvSinglenodeDeltaLakeDatabricks104.java b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvSinglenodeDeltaLakeDatabricks104.java deleted file mode 100644 index cf204d8d18ab..000000000000 --- a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvSinglenodeDeltaLakeDatabricks104.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.tests.product.launcher.env.environment; - -import com.google.inject.Inject; -import io.trino.tests.product.launcher.docker.DockerFiles; -import io.trino.tests.product.launcher.env.common.Standard; -import io.trino.tests.product.launcher.env.common.TestsEnvironment; - -import static io.trino.testing.SystemEnvironmentUtils.requireEnv; - -@TestsEnvironment -public class EnvSinglenodeDeltaLakeDatabricks104 - extends AbstractSinglenodeDeltaLakeDatabricks -{ - @Inject - public EnvSinglenodeDeltaLakeDatabricks104(Standard standard, DockerFiles dockerFiles) - { - super(standard, dockerFiles); - } - - @Override - String databricksTestJdbcUrl() - { - return requireEnv("DATABRICKS_104_JDBC_URL"); - } -} diff --git a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvSinglenodeSparkIcebergNessie.java b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvSinglenodeSparkIcebergNessie.java index d51819e8c96c..1bc328fd18ac 100644 --- a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvSinglenodeSparkIcebergNessie.java +++ b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/env/environment/EnvSinglenodeSparkIcebergNessie.java @@ -43,7 +43,7 @@ public class EnvSinglenodeSparkIcebergNessie private static final int SPARK_THRIFT_PORT = 10213; private static final int NESSIE_PORT = 19120; - private static final String NESSIE_VERSION = "0.102.2"; + private static final String NESSIE_VERSION = "0.102.5"; private static final String SPARK = "spark"; private final DockerFiles dockerFiles; diff --git a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/SuiteDeltaLakeDatabricks104.java b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/SuiteDeltaLakeDatabricks104.java deleted file mode 100644 index decb318190d4..000000000000 --- a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/SuiteDeltaLakeDatabricks104.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.tests.product.launcher.suite.suites; - -import com.google.common.collect.ImmutableList; -import io.trino.tests.product.launcher.env.EnvironmentConfig; -import io.trino.tests.product.launcher.env.environment.EnvSinglenodeDeltaLakeDatabricks104; -import io.trino.tests.product.launcher.suite.SuiteDeltaLakeDatabricks; -import io.trino.tests.product.launcher.suite.SuiteTestRun; - -import java.util.List; - -import static io.trino.tests.product.TestGroups.CONFIGURED_FEATURES; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_EXCLUDE_104; -import static io.trino.tests.product.launcher.suite.SuiteTestRun.testOnEnvironment; - -public class SuiteDeltaLakeDatabricks104 - extends SuiteDeltaLakeDatabricks -{ - @Override - public List getTestRuns(EnvironmentConfig config) - { - return ImmutableList.of( - testOnEnvironment(EnvSinglenodeDeltaLakeDatabricks104.class) - .withGroups(CONFIGURED_FEATURES, DELTA_LAKE_DATABRICKS) - .withExcludedGroups(DELTA_LAKE_EXCLUDE_104) - .withExcludedTests(getExcludedTests()) - .build()); - } -} diff --git a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/SuiteDeltaLakeDatabricks113.java b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/SuiteDeltaLakeDatabricks113.java index dc6c51530ea4..ae9f51cfb3ec 100644 --- a/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/SuiteDeltaLakeDatabricks113.java +++ b/testing/trino-product-tests-launcher/src/main/java/io/trino/tests/product/launcher/suite/suites/SuiteDeltaLakeDatabricks113.java @@ -22,7 +22,8 @@ import java.util.List; import static io.trino.tests.product.TestGroups.CONFIGURED_FEATURES; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_113; +import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; +import static io.trino.tests.product.TestGroups.DELTA_LAKE_EXCLUDE_113; import static io.trino.tests.product.launcher.suite.SuiteTestRun.testOnEnvironment; public class SuiteDeltaLakeDatabricks113 @@ -33,7 +34,8 @@ public List getTestRuns(EnvironmentConfig config) { return ImmutableList.of( testOnEnvironment(EnvSinglenodeDeltaLakeDatabricks113.class) - .withGroups(CONFIGURED_FEATURES, DELTA_LAKE_DATABRICKS_113) + .withGroups(CONFIGURED_FEATURES, DELTA_LAKE_DATABRICKS) + .withExcludedGroups(DELTA_LAKE_EXCLUDE_113) .withExcludedTests(getExcludedTests()) .build()); } diff --git a/testing/trino-product-tests/pom.xml b/testing/trino-product-tests/pom.xml index b70445c927b3..bfa04d698751 100644 --- a/testing/trino-product-tests/pom.xml +++ b/testing/trino-product-tests/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeAlterTableCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeAlterTableCompatibility.java index bc8d7edd1f24..3b90e0a1fd2c 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeAlterTableCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeAlterTableCompatibility.java @@ -25,11 +25,10 @@ import static io.trino.tempto.assertions.QueryAssert.assertQueryFailure; import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_EXCLUDE_104; +import static io.trino.tests.product.TestGroups.DELTA_LAKE_EXCLUDE_113; import static io.trino.tests.product.TestGroups.DELTA_LAKE_OSS; import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; import static io.trino.tests.product.deltalake.util.DatabricksVersion.DATABRICKS_143_RUNTIME_VERSION; -import static io.trino.tests.product.deltalake.util.DatabricksVersion.DATABRICKS_91_RUNTIME_VERSION; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_ISSUE; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_MATCH; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.dropDeltaTableWithRetry; @@ -244,7 +243,7 @@ public void testTrinoPreservesReaderAndWriterVersions() } } - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_EXCLUDE_104, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_EXCLUDE_113, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testTrinoPreservesTableFeature() { @@ -297,7 +296,7 @@ public void testTrinoAlterTablePreservesGeneratedColumn() onTrino().executeQuery("ALTER TABLE delta.default." + tableName + " ADD COLUMN c INT"); assertThat((String) onDelta().executeQuery("SHOW CREATE TABLE default." + tableName).getOnlyValue()) - .contains((getDatabricksRuntimeVersion().orElseThrow().equals(DATABRICKS_91_RUNTIME_VERSION) ? "`b`" : "b") + " INT GENERATED ALWAYS AS ( a * 2 )"); + .contains("b INT GENERATED ALWAYS AS ( a * 2 )"); onDelta().executeQuery("INSERT INTO default." + tableName + " (a, c) VALUES (1, 3)"); assertThat(onTrino().executeQuery("SELECT * FROM delta.default." + tableName)) .containsOnly(row(1, 2, 3)); diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCaseInsensitiveMapping.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCaseInsensitiveMapping.java index 30971019b12a..679937044cde 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCaseInsensitiveMapping.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCaseInsensitiveMapping.java @@ -24,11 +24,8 @@ import static io.trino.tempto.assertions.QueryAssert.assertQueryFailure; import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_113; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_122; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_133; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_143; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_EXCLUDE_104; import static io.trino.tests.product.TestGroups.DELTA_LAKE_OSS; import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; import static io.trino.tests.product.deltalake.util.DatabricksVersion.DATABRICKS_143_RUNTIME_VERSION; @@ -75,7 +72,7 @@ public void testNonLowercaseColumnNames() } } - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_113, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testNonLowercaseFieldNames() { @@ -190,8 +187,7 @@ public void testGeneratedColumnWithNonLowerCaseColumnName() } } - // Exclude 10.4 because it throws MISSING_COLUMN when executing INSERT statement - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_EXCLUDE_104, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testIdentityColumnWithNonLowerCaseColumnName() { diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeChangeDataFeedCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeChangeDataFeedCompatibility.java index fca2c054dbcf..74b210918459 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeChangeDataFeedCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeChangeDataFeedCompatibility.java @@ -31,11 +31,10 @@ import static io.trino.tempto.assertions.QueryAssert.assertQueryFailure; import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_113; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_122; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_133; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_143; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_EXCLUDE_104; +import static io.trino.tests.product.TestGroups.DELTA_LAKE_EXCLUDE_113; import static io.trino.tests.product.TestGroups.DELTA_LAKE_OSS; import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_ISSUE; @@ -99,7 +98,7 @@ public void testUpdateTableWithCdf(String columnMappingMode) } } - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_OSS, DELTA_LAKE_EXCLUDE_104, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_EXCLUDE_113, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testUpdateTableWithChangeDataFeedWriterFeature() { @@ -532,7 +531,7 @@ public void testMergeDeleteIntoTableWithCdfEnabled(String columnMappingMode) } } - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_113, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testMergeMixedDeleteAndUpdateIntoTableWithCdfEnabled() { diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCheckpointsCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCheckpointsCompatibility.java index 530a409e2402..8a50e0a77fa1 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCheckpointsCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCheckpointsCompatibility.java @@ -24,7 +24,6 @@ import io.trino.tempto.query.QueryResult; import io.trino.testng.services.Flaky; import io.trino.tests.product.deltalake.util.DatabricksVersion; -import org.testng.SkipException; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; @@ -38,7 +37,6 @@ import static io.trino.tempto.assertions.QueryAssert.Row.row; import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_113; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_122; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_133; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_143; @@ -46,8 +44,7 @@ import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; import static io.trino.tests.product.deltalake.TransactionLogAssertions.assertLastEntryIsCheckpointed; import static io.trino.tests.product.deltalake.TransactionLogAssertions.assertTransactionLogVersion; -import static io.trino.tests.product.deltalake.util.DatabricksVersion.DATABRICKS_104_RUNTIME_VERSION; -import static io.trino.tests.product.deltalake.util.DatabricksVersion.DATABRICKS_91_RUNTIME_VERSION; +import static io.trino.tests.product.deltalake.util.DatabricksVersion.DATABRICKS_113_RUNTIME_VERSION; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_ISSUE; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_MATCH; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.dropDeltaTableWithRetry; @@ -56,7 +53,6 @@ import static io.trino.tests.product.utils.QueryExecutors.onTrino; import static java.lang.String.format; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; public class TestDeltaLakeCheckpointsCompatibility extends BaseTestDeltaLakeS3Storage @@ -278,7 +274,7 @@ private void trinoUsesCheckpointInterval(String deltaTableProperties) } } - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_113, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testDatabricksUsesCheckpointInterval() { @@ -370,7 +366,7 @@ private void testCheckpointMinMaxStatisticsForRowType(Consumer sqlExecut // Assert min/max queries can be computed from just metadata String explainSelectMax = getOnlyElement(onDelta().executeQuery("EXPLAIN SELECT max(root.entry_one) FROM default." + tableName).column(1)); - String column = databricksRuntimeVersion.orElseThrow().isAtLeast(DATABRICKS_104_RUNTIME_VERSION) ? "root.entry_one" : "root.entry_one AS `entry_one`"; + String column = databricksRuntimeVersion.orElseThrow().isAtLeast(DATABRICKS_113_RUNTIME_VERSION) ? "root.entry_one" : "root.entry_one AS `entry_one`"; assertThat(explainSelectMax).matches("== Physical Plan ==\\s*LocalTableScan \\[max\\(" + column + "\\).*]\\s*"); // check both engines can read both tables @@ -436,7 +432,7 @@ private void testCheckpointNullStatisticsForRowType(Consumer sqlExecutor // Assert counting non null entries can be computed from just metadata String explainCountNotNull = getOnlyElement(onDelta().executeQuery("EXPLAIN SELECT count(root.entry_two) FROM default." + tableName).column(1)); - String column = databricksRuntimeVersion.orElseThrow().isAtLeast(DATABRICKS_104_RUNTIME_VERSION) ? "root.entry_two" : "root.entry_two AS `entry_two`"; + String column = databricksRuntimeVersion.orElseThrow().isAtLeast(DATABRICKS_113_RUNTIME_VERSION) ? "root.entry_two" : "root.entry_two AS `entry_two`"; assertThat(explainCountNotNull).matches("== Physical Plan ==\\s*LocalTableScan \\[count\\(" + column + "\\).*]\\s*"); // check both engines can read both tables @@ -562,11 +558,6 @@ private void testWriteStatsAsJsonEnabled(Consumer sqlExecutor, String ta " delta.checkpoint.writeStatsAsStruct = true)", tableName, type, bucketName); - if (databricksRuntimeVersion.isPresent() && databricksRuntimeVersion.get().equals(DATABRICKS_91_RUNTIME_VERSION) && type.equals("struct")) { - assertThatThrownBy(() -> onDelta().executeQuery(createTableSql)).hasStackTraceContaining("ParseException"); - throw new SkipException("New runtime version covers the type"); - } - onDelta().executeQuery(createTableSql); try { diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCloneTableCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCloneTableCompatibility.java index d773c66a6547..b330ef9c4d99 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCloneTableCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCloneTableCompatibility.java @@ -28,7 +28,6 @@ import static io.trino.tempto.assertions.QueryAssert.Row.row; import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_EXCLUDE_104; import static io.trino.tests.product.TestGroups.DELTA_LAKE_OSS; import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_ISSUE; @@ -241,7 +240,7 @@ public void testReadFromSchemaChangedShallowCloneTable() testReadSchemaChangedCloneTable("SHALLOW", false); } - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_EXCLUDE_104, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testReadFromSchemaChangedDeepCloneTable() { @@ -380,6 +379,67 @@ private void testReadSchemaChangedCloneTable(String cloneType, boolean partition } } + @Test(groups = {DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) + public void testReadShallowCloneTableWithSourceDeletionVector() + { + testReadShallowCloneTableWithSourceDeletionVector(true); + testReadShallowCloneTableWithSourceDeletionVector(false); + } + + private void testReadShallowCloneTableWithSourceDeletionVector(boolean partitioned) + { + String baseTable = "test_dv_base_table_" + randomNameSuffix(); + String clonedTable = "test_dv_clone_table_" + randomNameSuffix(); + String directoryName = "clone-deletion-vector-compatibility-test-"; + try { + onDelta().executeQuery("CREATE TABLE default." + baseTable + + " (a_int INT, b_string STRING) USING delta " + + (partitioned ? "PARTITIONED BY (b_string) " : "") + + "LOCATION 's3://" + bucketName + "/" + directoryName + baseTable + "'" + + "TBLPROPERTIES ('delta.enableDeletionVectors'='true')"); + + onDelta().executeQuery("INSERT INTO " + baseTable + " VALUES (1, 'aaa'), (2, 'aaa'), (3, 'bbb'), (4, 'bbb')"); + // enforce the rows into one file, so that later is partial delete of the data file instead of remove all rows. + // This allows the cloned table to reference the same deletion vector but different offset + // and help us to test the read process of 'p' type deletion vector better. + onDelta().executeQuery("OPTIMIZE " + baseTable); + onDelta().executeQuery("DELETE FROM default." + baseTable + " WHERE a_int IN (2, 3)"); + + onDelta().executeQuery("CREATE TABLE default." + clonedTable + + " SHALLOW CLONE default." + baseTable + + " LOCATION 's3://" + bucketName + "/" + directoryName + clonedTable + "'"); + + List expectedRows = ImmutableList.of(row(1, "aaa"), row(4, "bbb")); + assertThat(onDelta().executeQuery("SELECT * FROM default." + baseTable)).containsOnly(expectedRows); + assertThat(onDelta().executeQuery("SELECT * FROM default." + clonedTable)).containsOnly(expectedRows); + assertThat(onTrino().executeQuery("SELECT * FROM delta.default." + baseTable)).containsOnly(expectedRows); + assertThat(onTrino().executeQuery("SELECT * FROM delta.default." + clonedTable)).containsOnly(expectedRows); + + assertThat(getDeletionVectorType(baseTable)).isNotEqualTo("p"); + assertThat(getDeletionVectorType(clonedTable)).isEqualTo("p"); + } + finally { + onDelta().executeQuery("DROP TABLE IF EXISTS default." + baseTable); + onDelta().executeQuery("DROP TABLE IF EXISTS default." + clonedTable); + } + } + + private static String getDeletionVectorType(String tableName) + { + return (String) onTrino().executeQuery( + """ + SELECT json_extract_scalar(elem, '$.add.deletionVector.storageType') AS storage_type + FROM ( + SELECT CAST(transaction AS JSON) AS json_arr + FROM default."%s$transactions" + ORDER BY version + ) t, UNNEST(CAST(t.json_arr AS ARRAY(JSON))) AS u(elem) + WHERE json_extract_scalar(elem, '$.add.deletionVector.storageType') IS NOT NULL + LIMIT 1 + """.formatted(tableName)) + .getOnlyValue(); + } + private List getActiveDataFiles(String tableName) { return onTrino().executeQuery("SELECT DISTINCT \"$path\" FROM default." + tableName).column(1); diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeColumnMappingMode.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeColumnMappingMode.java index 9037f8c833a9..aa284125c55d 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeColumnMappingMode.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeColumnMappingMode.java @@ -27,7 +27,6 @@ import static io.trino.tempto.assertions.QueryAssert.assertQueryFailure; import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_113; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_122; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_133; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_143; @@ -53,7 +52,7 @@ public class TestDeltaLakeColumnMappingMode extends BaseTestDeltaLakeS3Storage { - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_113, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testColumnMappingModeNone() { @@ -226,7 +225,7 @@ private void testColumnMappingModeReaderAndWriterVersion(Consumer create onTrino().executeQuery("DROP TABLE delta.default." + tableName); } - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_113, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}, dataProvider = "columnMappingDataProvider") + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}, dataProvider = "columnMappingDataProvider") @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testTrinoColumnMappingMode(String mode) { diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCreateTableAsSelectCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCreateTableAsSelectCompatibility.java index a6845c3492f5..230c614c721d 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCreateTableAsSelectCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeCreateTableAsSelectCompatibility.java @@ -25,7 +25,6 @@ import static io.trino.tempto.assertions.QueryAssert.Row.row; import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_113; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_122; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_133; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_143; @@ -43,7 +42,7 @@ public class TestDeltaLakeCreateTableAsSelectCompatibility extends BaseTestDeltaLakeS3Storage { - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_113, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testTrinoTypesWithDatabricks() { diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeDatabricksCreateTableCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeDatabricksCreateTableCompatibility.java index 09dad521a2d9..07b8f3abdd89 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeDatabricksCreateTableCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeDatabricksCreateTableCompatibility.java @@ -26,7 +26,6 @@ import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; -import static io.trino.tests.product.deltalake.util.DatabricksVersion.DATABRICKS_104_RUNTIME_VERSION; import static io.trino.tests.product.deltalake.util.DatabricksVersion.DATABRICKS_113_RUNTIME_VERSION; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_ISSUE; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_MATCH; @@ -69,7 +68,7 @@ public void testDatabricksCanReadInitialCreateTable() assertThat(onDelta().executeQuery("SHOW TABLES FROM default LIKE '" + tableName + "'")).contains(row("default", tableName, false)); assertThat(onDelta().executeQuery("SELECT count(*) FROM default." + tableName)).contains(row(0)); String showCreateTable; - if (databricksRuntimeVersion.isAtLeast(DATABRICKS_104_RUNTIME_VERSION)) { + if (databricksRuntimeVersion.isAtLeast(DATABRICKS_113_RUNTIME_VERSION)) { showCreateTable = format( "CREATE TABLE spark_catalog.default.%s (\n integer INT,\n string STRING,\n timetz TIMESTAMP)\nUSING delta\nLOCATION 's3://%s/%s'\n%s", tableName, @@ -112,7 +111,7 @@ public void testDatabricksCanReadInitialCreatePartitionedTable() assertThat(onDelta().executeQuery("SHOW TABLES LIKE '" + tableName + "'")).contains(row("default", tableName, false)); assertThat(onDelta().executeQuery("SELECT count(*) FROM " + tableName)).contains(row(0)); String showCreateTable; - if (databricksRuntimeVersion.isAtLeast(DATABRICKS_104_RUNTIME_VERSION)) { + if (databricksRuntimeVersion.isAtLeast(DATABRICKS_113_RUNTIME_VERSION)) { showCreateTable = format( "CREATE TABLE spark_catalog.default.%s (\n integer INT,\n string STRING,\n timetz TIMESTAMP)\nUSING delta\n" + "PARTITIONED BY (string)\nLOCATION 's3://%s/%s'\n%s", @@ -155,7 +154,7 @@ public void testDatabricksCanReadInitialCreateTableAs() assertThat(onDelta().executeQuery("SHOW TABLES FROM default LIKE '" + tableName + "'")).contains(row("default", tableName, false)); assertThat(onDelta().executeQuery("SELECT count(*) FROM default." + tableName)).contains(row(3)); String showCreateTable; - if (databricksRuntimeVersion.isAtLeast(DATABRICKS_104_RUNTIME_VERSION)) { + if (databricksRuntimeVersion.isAtLeast(DATABRICKS_113_RUNTIME_VERSION)) { showCreateTable = format( "CREATE TABLE spark_catalog.default.%s (\n integer INT,\n string STRING,\n timetz TIMESTAMP)\nUSING delta\nLOCATION 's3://%s/%s'\n%s", tableName, @@ -201,7 +200,7 @@ public void testDatabricksCanReadInitialCreatePartitionedTableAs() assertThat(onDelta().executeQuery("SHOW TABLES LIKE '" + tableName + "'")).contains(row("default", tableName, false)); assertThat(onDelta().executeQuery("SELECT count(*) FROM " + tableName)).contains(row(3)); String showCreateTable; - if (databricksRuntimeVersion.isAtLeast(DATABRICKS_104_RUNTIME_VERSION)) { + if (databricksRuntimeVersion.isAtLeast(DATABRICKS_113_RUNTIME_VERSION)) { showCreateTable = format( "CREATE TABLE spark_catalog.default.%s (\n integer INT,\n string STRING,\n timetz TIMESTAMP)\nUSING delta\n" + "PARTITIONED BY (string)\nLOCATION 's3://%s/%s'\n%s", @@ -401,13 +400,6 @@ public void testCreateTableWithAllPartitionColumns() private String getDatabricksDefaultTableProperties() { - if (databricksRuntimeVersion.equals(DATABRICKS_104_RUNTIME_VERSION)) { - return "TBLPROPERTIES (\n" + - " 'Type' = 'EXTERNAL',\n" + - " 'delta.enableDeletionVectors' = 'false',\n" + - " 'delta.minReaderVersion' = '1',\n" + - " 'delta.minWriterVersion' = '2')\n"; - } if (databricksRuntimeVersion.isAtLeast(DATABRICKS_113_RUNTIME_VERSION)) { return "TBLPROPERTIES (\n" + " 'delta.enableDeletionVectors' = 'false',\n" + diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeDeleteCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeDeleteCompatibility.java index ef24c834f3a8..23ad38b572aa 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeDeleteCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeDeleteCompatibility.java @@ -31,7 +31,7 @@ import static io.trino.tempto.assertions.QueryAssert.assertQueryFailure; import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_EXCLUDE_104; +import static io.trino.tests.product.TestGroups.DELTA_LAKE_EXCLUDE_113; import static io.trino.tests.product.TestGroups.DELTA_LAKE_OSS; import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_ISSUE; @@ -208,7 +208,7 @@ public void testTrinoDeletionVectors() } // Databricks 12.1 and OSS Delta 2.4.0 added support for deletion vectors - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_OSS, DELTA_LAKE_EXCLUDE_104, PROFILE_SPECIFIC_TESTS}, dataProvider = "columnMappingModeDataProvider") + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_EXCLUDE_113, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}, dataProvider = "columnMappingModeDataProvider") @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testDeletionVectors(String mode) { @@ -495,7 +495,7 @@ public void testDeletionVectorsAcrossAddFile(boolean partitioned) } } - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_OSS, DELTA_LAKE_EXCLUDE_104, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_EXCLUDE_113, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testDeletionVectorsTruncateTable() { @@ -595,8 +595,7 @@ public void testDeletionVectorsAbsolutePath() "LOCATION 's3://" + bucketName + "/databricks-compatibility-test-clone-" + baseTableName + "'"); assertThat(onDelta().executeQuery("SELECT * FROM default." + tableName)).contains(expected); - assertQueryFailure(() -> onTrino().executeQuery("SELECT * FROM delta.default." + tableName)) - .hasMessageContaining("Unsupported storage type for deletion vector: p"); + assertThat(onTrino().executeQuery("SELECT * FROM delta.default." + tableName)).contains(expected); } finally { dropDeltaTableWithRetry("default." + baseTableName); diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeIdentityColumnCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeIdentityColumnCompatibility.java index a0cf50cd03e4..eb7d60a1d6af 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeIdentityColumnCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeIdentityColumnCompatibility.java @@ -21,7 +21,7 @@ import static io.trino.tempto.assertions.QueryAssert.Row.row; import static io.trino.tempto.assertions.QueryAssert.assertQueryFailure; import static io.trino.testing.TestingNames.randomNameSuffix; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_113; +import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_122; import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_ISSUE; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_MATCH; @@ -37,7 +37,7 @@ public class TestDeltaLakeIdentityColumnCompatibility extends BaseTestDeltaLakeS3Storage { - @Test(groups = {DELTA_LAKE_DATABRICKS_113, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS_122, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testIdentityColumn() { @@ -77,7 +77,7 @@ public void testIdentityColumn() } } - @Test(groups = {DELTA_LAKE_DATABRICKS_113, PROFILE_SPECIFIC_TESTS}, dataProvider = "columnMappingDataProvider") + @Test(groups = {DELTA_LAKE_DATABRICKS_122, PROFILE_SPECIFIC_TESTS}, dataProvider = "columnMappingDataProvider") @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testRenameIdentityColumn(String mode) { @@ -114,7 +114,7 @@ public void testRenameIdentityColumn(String mode) } } - @Test(groups = {DELTA_LAKE_DATABRICKS_113, PROFILE_SPECIFIC_TESTS}, dataProvider = "columnMappingDataProvider") + @Test(groups = {DELTA_LAKE_DATABRICKS_122, PROFILE_SPECIFIC_TESTS}, dataProvider = "columnMappingDataProvider") @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testDropIdentityColumn(String mode) { @@ -152,7 +152,7 @@ public void testDropIdentityColumn(String mode) } } - @Test(groups = {DELTA_LAKE_DATABRICKS_113, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS_122, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testVacuumProcedureWithIdentityColumn() { @@ -183,7 +183,7 @@ public void testVacuumProcedureWithIdentityColumn() } } - @Test(groups = {DELTA_LAKE_DATABRICKS_113, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS_122, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testIdentityColumnCheckpointInterval() { diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeInsertCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeInsertCompatibility.java index b7290e0839b0..d080a1f6f848 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeInsertCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeInsertCompatibility.java @@ -31,13 +31,12 @@ import static io.trino.tempto.assertions.QueryAssert.assertQueryFailure; import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_113; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_122; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_133; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_143; import static io.trino.tests.product.TestGroups.DELTA_LAKE_OSS; import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; -import static io.trino.tests.product.deltalake.util.DatabricksVersion.DATABRICKS_104_RUNTIME_VERSION; +import static io.trino.tests.product.deltalake.util.DatabricksVersion.DATABRICKS_113_RUNTIME_VERSION; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_ISSUE; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_MATCH; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.dropDeltaTableWithRetry; @@ -58,7 +57,7 @@ public void setup() databricksRuntimeVersion = getDatabricksRuntimeVersion(); } - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_113, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testInsertCompatibility() { @@ -94,7 +93,7 @@ public void testInsertCompatibility() } } - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_113, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testPartitionedInsertCompatibility() { @@ -414,7 +413,7 @@ public void testCompression(String compressionCodec) assertThat(onTrino().executeQuery("SELECT * FROM " + trinoTableName)) .containsOnly(expected); - if ("ZSTD".equals(compressionCodec) && databricksRuntimeVersion.orElseThrow().isOlderThan(DATABRICKS_104_RUNTIME_VERSION)) { + if ("ZSTD".equals(compressionCodec) && databricksRuntimeVersion.orElseThrow().isOlderThan(DATABRICKS_113_RUNTIME_VERSION)) { assertQueryFailure(() -> onDelta().executeQuery("SELECT * FROM default." + tableName)) .hasMessageContaining("java.lang.ClassNotFoundException: org.apache.hadoop.io.compress.ZStandardCodec"); } diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeSelectCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeSelectCompatibility.java index 3a5a65f57132..e8e7ac5e03d0 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeSelectCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeSelectCompatibility.java @@ -24,7 +24,6 @@ import static io.trino.tempto.assertions.QueryAssert.Row.row; import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_113; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_122; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_133; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_143; @@ -41,7 +40,7 @@ public class TestDeltaLakeSelectCompatibility extends BaseTestDeltaLakeS3Storage { - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_113, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_122, DELTA_LAKE_DATABRICKS_133, DELTA_LAKE_DATABRICKS_143, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testPartitionedSelectSpecialCharacters() { diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeSystemTableCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeSystemTableCompatibility.java index 842737d9ed3c..6f7abf4ea8ab 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeSystemTableCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeSystemTableCompatibility.java @@ -24,7 +24,6 @@ import static io.trino.tempto.assertions.QueryAssert.Row.row; import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_EXCLUDE_104; import static io.trino.tests.product.TestGroups.DELTA_LAKE_OSS; import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_ISSUE; @@ -38,7 +37,7 @@ public class TestDeltaLakeSystemTableCompatibility extends BaseTestDeltaLakeS3Storage { - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_EXCLUDE_104, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testTablePropertiesCaseSensitivity() { diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeUpdateCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeUpdateCompatibility.java index 918507de550e..e7a7b48f69a5 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeUpdateCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeUpdateCompatibility.java @@ -24,7 +24,6 @@ import static io.trino.tempto.assertions.QueryAssert.Row.row; import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_113; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_122; import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; import static io.trino.tests.product.deltalake.util.DeltaLakeTestUtils.DATABRICKS_COMMUNICATION_FAILURE_ISSUE; @@ -38,7 +37,7 @@ public class TestDeltaLakeUpdateCompatibility extends BaseTestDeltaLakeS3Storage { - @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_113, DELTA_LAKE_DATABRICKS_122, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_DATABRICKS_122, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testUpdatesFromDatabricks() { diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeWriteDatabricksCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeWriteDatabricksCompatibility.java index 6e08f9a20a9d..1787a1de9ff2 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeWriteDatabricksCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeWriteDatabricksCompatibility.java @@ -35,7 +35,7 @@ import static io.trino.tempto.assertions.QueryAssert.assertQueryFailure; import static io.trino.testing.TestingNames.randomNameSuffix; import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS; -import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_113; +import static io.trino.tests.product.TestGroups.DELTA_LAKE_DATABRICKS_122; import static io.trino.tests.product.TestGroups.DELTA_LAKE_OSS; import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; import static io.trino.tests.product.deltalake.util.DatabricksVersion.DATABRICKS_122_RUNTIME_VERSION; @@ -323,7 +323,7 @@ public void testInsertingIntoDatabricksTableWithAddedNotNullConstraint() } } - @Test(groups = {DELTA_LAKE_DATABRICKS_113, PROFILE_SPECIFIC_TESTS}) + @Test(groups = {DELTA_LAKE_DATABRICKS_122, PROFILE_SPECIFIC_TESTS}) @Flaky(issue = DATABRICKS_COMMUNICATION_FAILURE_ISSUE, match = DATABRICKS_COMMUNICATION_FAILURE_MATCH) public void testTrinoVacuumRemoveChangeDataFeedFiles() { diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/util/DatabricksVersion.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/util/DatabricksVersion.java index 6d3c2ba8d72b..2433a5ccd23d 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/util/DatabricksVersion.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/util/DatabricksVersion.java @@ -24,8 +24,6 @@ public record DatabricksVersion(int majorVersion, int minorVersion) public static final DatabricksVersion DATABRICKS_143_RUNTIME_VERSION = new DatabricksVersion(14, 3); public static final DatabricksVersion DATABRICKS_122_RUNTIME_VERSION = new DatabricksVersion(12, 2); public static final DatabricksVersion DATABRICKS_113_RUNTIME_VERSION = new DatabricksVersion(11, 3); - public static final DatabricksVersion DATABRICKS_104_RUNTIME_VERSION = new DatabricksVersion(10, 4); - public static final DatabricksVersion DATABRICKS_91_RUNTIME_VERSION = new DatabricksVersion(9, 1); private static final Pattern DATABRICKS_VERSION_PATTERN = Pattern.compile("(\\d+)\\.(\\d+)"); diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/iceberg/TestIcebergSparkCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/iceberg/TestIcebergSparkCompatibility.java index 0c570f17d00e..df814c6cb20c 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/iceberg/TestIcebergSparkCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/iceberg/TestIcebergSparkCompatibility.java @@ -2090,6 +2090,26 @@ public void testOptimizeOnV2IcebergTable() .containsOnly(row(1, 2), row(2, 2), row(3, 2), row(11, 12), row(12, 12), row(13, 12)); } + @Test(groups = {ICEBERG, PROFILE_SPECIFIC_TESTS}) + public void testOptimizeManifests() + { + String tableName = "test_optimize_manifests_" + randomNameSuffix(); + String sparkTableName = sparkTableName(tableName); + String trinoTableName = trinoTableName(tableName); + + onSpark().executeQuery("CREATE TABLE " + sparkTableName + "(a INT) USING ICEBERG"); + onSpark().executeQuery("INSERT INTO " + sparkTableName + " VALUES (1)"); + onSpark().executeQuery("INSERT INTO " + sparkTableName + " VALUES (2)"); + + onTrino().executeQuery("ALTER TABLE " + trinoTableName + " EXECUTE optimize_manifests"); + assertThat(onTrino().executeQuery("SELECT * FROM " + trinoTableName)) + .containsOnly(row(1), row(2)); + assertThat(onSpark().executeQuery("SELECT * FROM " + sparkTableName)) + .containsOnly(row(1), row(2)); + + onSpark().executeQuery("DROP TABLE " + sparkTableName); + } + @Test(groups = {ICEBERG, PROFILE_SPECIFIC_TESTS}) public void testAlterTableExecuteProceduresOnEmptyTable() { diff --git a/testing/trino-server-dev/pom.xml b/testing/trino-server-dev/pom.xml index 7c9cad2383fd..e63c8c65c91c 100644 --- a/testing/trino-server-dev/pom.xml +++ b/testing/trino-server-dev/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/testing/trino-test-jdbc-compatibility-old-driver/pom.xml b/testing/trino-test-jdbc-compatibility-old-driver/pom.xml index 44ccf7034c36..534f79c1faf3 100644 --- a/testing/trino-test-jdbc-compatibility-old-driver/pom.xml +++ b/testing/trino-test-jdbc-compatibility-old-driver/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml @@ -14,7 +14,7 @@ - 470-SNAPSHOT + 471-SNAPSHOT diff --git a/testing/trino-test-jdbc-compatibility-old-server/pom.xml b/testing/trino-test-jdbc-compatibility-old-server/pom.xml index 46bc2d39692d..f40377142acc 100644 --- a/testing/trino-test-jdbc-compatibility-old-server/pom.xml +++ b/testing/trino-test-jdbc-compatibility-old-server/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/testing/trino-testing-containers/pom.xml b/testing/trino-testing-containers/pom.xml index 3852c8d57392..f6932625b348 100644 --- a/testing/trino-testing-containers/pom.xml +++ b/testing/trino-testing-containers/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/testing/trino-testing-containers/src/main/java/io/trino/testing/containers/Minio.java b/testing/trino-testing-containers/src/main/java/io/trino/testing/containers/Minio.java index fc5d2756d5b2..02f7174a7625 100644 --- a/testing/trino-testing-containers/src/main/java/io/trino/testing/containers/Minio.java +++ b/testing/trino-testing-containers/src/main/java/io/trino/testing/containers/Minio.java @@ -41,7 +41,7 @@ public class Minio { private static final Logger log = Logger.get(Minio.class); - public static final String DEFAULT_IMAGE = "minio/minio:RELEASE.2025-01-20T14-49-07Z"; + public static final String DEFAULT_IMAGE = "minio/minio:RELEASE.2024-12-18T13-15-44Z"; public static final String DEFAULT_HOST_NAME = "minio"; public static final int MINIO_API_PORT = 4566; diff --git a/testing/trino-testing-kafka/pom.xml b/testing/trino-testing-kafka/pom.xml index aaf4ba35b3c9..99e87a0a2ddf 100644 --- a/testing/trino-testing-kafka/pom.xml +++ b/testing/trino-testing-kafka/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/testing/trino-testing-resources/pom.xml b/testing/trino-testing-resources/pom.xml index e60b414fd2cc..262a4f278623 100644 --- a/testing/trino-testing-resources/pom.xml +++ b/testing/trino-testing-resources/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/testing/trino-testing-services/pom.xml b/testing/trino-testing-services/pom.xml index d89ddf0faea3..d3d01fd762ea 100644 --- a/testing/trino-testing-services/pom.xml +++ b/testing/trino-testing-services/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml diff --git a/testing/trino-testing/pom.xml b/testing/trino-testing/pom.xml index 95dd62d1e686..7ef6dd335b2e 100644 --- a/testing/trino-testing/pom.xml +++ b/testing/trino-testing/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml @@ -18,11 +18,6 @@ jackson-annotations - - com.fasterxml.jackson.core - jackson-core - - com.google.errorprone error_prone_annotations @@ -63,6 +58,11 @@ http-server + + io.airlift + json + + io.airlift log diff --git a/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestEngineOnlyQueries.java b/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestEngineOnlyQueries.java index ed5461d58d14..2c9e15b143fb 100644 --- a/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestEngineOnlyQueries.java +++ b/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestEngineOnlyQueries.java @@ -6605,156 +6605,6 @@ public void testColumnNames() assertThat(showCreateTableResult.getColumnNames()).isEqualTo(ImmutableList.of("Create Table")); } - @Test - public void testInlineSqlFunctions() - { - assertThat(query( - """ - WITH FUNCTION abc(x integer) RETURNS integer RETURN x * 2 - SELECT abc(21) - """)) - .matches("VALUES 42"); - assertThat(query( - """ - WITH FUNCTION abc(x integer) RETURNS integer RETURN abs(x) - SELECT abc(-21) - """)) - .matches("VALUES 21"); - - assertThat(query( - """ - WITH - FUNCTION abc(x integer) RETURNS integer RETURN x * 2, - FUNCTION xyz(x integer) RETURNS integer RETURN abc(x) + 1 - SELECT xyz(21) - """)) - .matches("VALUES 43"); - - assertThat(query( - """ - WITH - FUNCTION my_pow(n int, p int) - RETURNS int - BEGIN - DECLARE r int DEFAULT n; - top: LOOP - IF p <= 1 THEN - LEAVE top; - END IF; - SET r = r * n; - SET p = p - 1; - END LOOP; - RETURN r; - END - SELECT my_pow(2, 8) - """)) - .matches("VALUES 256"); - - assertThat(query( - """ - WITH - FUNCTION fun_with_uppercase_var() - RETURNS int - BEGIN - DECLARE R int DEFAULT 7; - RETURN R; - END - SELECT fun_with_uppercase_var() - """)) - .matches("VALUES 7"); - - // invoke function on data from connector to prevent constant folding on the coordinator - assertThat(query( - """ - WITH - FUNCTION my_pow(n int, p int) - RETURNS int - BEGIN - DECLARE r int DEFAULT n; - top: LOOP - IF p <= 1 THEN - LEAVE top; - END IF; - SET r = r * n; - SET p = p - 1; - END LOOP; - RETURN r; - END - SELECT my_pow(CAST(nationkey AS integer), CAST(regionkey AS integer)) FROM nation WHERE nationkey IN (1,2,3,5,8) - """)) - .matches("VALUES 1, 2, 3, 5, 64"); - - // function with dereference - assertThat(query( - """ - WITH FUNCTION get(input row(varchar)) - RETURNS varchar - RETURN input[1] - SELECT get(ROW('abc')) - """)) - .matches("VALUES VARCHAR 'abc'"); - - // validations for inline functions - assertQueryFails("WITH FUNCTION a.b() RETURNS int RETURN 42 SELECT a.b()", - "line 1:6: Inline function names cannot be qualified: a.b"); - - assertQueryFails("WITH FUNCTION x() RETURNS int SECURITY INVOKER RETURN 42 SELECT x()", - "line 1:31: Security mode not supported for inline functions"); - - assertQueryFails("WITH FUNCTION x() RETURNS bigint SECURITY DEFINER RETURN 42 SELECT x()", - "line 1:34: Security mode not supported for inline functions"); - - // error location reporting - assertQueryFails("WITH function x() RETURNS bigint DETERMINISTIC DETERMINISTIC RETURN 42 SELECT x()", - "line 1:48: Multiple deterministic clauses specified"); - - // Verify the current restrictions on inline functions are enforced - - // inline function can mask a global function - assertThat(query( - """ - WITH FUNCTION abs(x integer) RETURNS integer RETURN x * 2 - SELECT abs(-10) - """)) - .matches("VALUES -20"); - assertThat(query( - """ - WITH - FUNCTION abs(x integer) RETURNS integer RETURN x * 2, - FUNCTION wrap_abs(x integer) RETURNS integer RETURN abs(x) - SELECT wrap_abs(-10) - """)) - .matches("VALUES -20"); - - // inline function can have the same name as a global function with a different signature - assertThat(query( - """ - WITH FUNCTION abs(x varchar) RETURNS varchar RETURN reverse(x) - SELECT abs('abc') - """)) - .skippingTypesCheck() - .matches("VALUES 'cba'"); - - // inline functions must be declared before they are used - assertThat(query( - """ - WITH - FUNCTION a(x integer) RETURNS integer RETURN b(x), - FUNCTION b(x integer) RETURNS integer RETURN x * 2 - SELECT a(10) - """)) - .failure().hasMessage("line 2:48: Function 'b' not registered"); - - // inline function cannot be recursive - // note: mutual recursion is not supported either, but it is not tested due to the forward declaration limitation above - assertThat(query( - """ - WITH FUNCTION a(x integer) RETURNS integer RETURN a(x) - SELECT a(10) - """)) - .failure().hasMessage("line 1:6: Recursive language functions are not supported: a(integer):integer"); - } - @Test public void testCreateFunctionErrorReporting() { diff --git a/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestingTrinoClient.java b/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestingTrinoClient.java index 279329a66d10..cfa4092b7433 100644 --- a/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestingTrinoClient.java +++ b/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestingTrinoClient.java @@ -140,7 +140,7 @@ public ResultWithQueryId execute(Session session, @Language("SQL") String sql throw new QueryFailedException(queryId, "Query failed: " + results.getError().getMessage()); // dump query info to console for debugging (NOTE: not pretty printed) - // JsonCodec queryInfoJsonCodec = createCodecFactory().prettyPrint().jsonCodec(QueryInfo.class); + // TrinoJsonCodec queryInfoJsonCodec = createCodecFactory().prettyPrint().jsonCodec(QueryInfo.class); // log.info("\n" + queryInfoJsonCodec.toJson(queryInfo)); } } diff --git a/testing/trino-testing/src/main/java/io/trino/testing/DistributedQueryRunner.java b/testing/trino-testing/src/main/java/io/trino/testing/DistributedQueryRunner.java index df29a287aa8e..beec799a308d 100644 --- a/testing/trino-testing/src/main/java/io/trino/testing/DistributedQueryRunner.java +++ b/testing/trino-testing/src/main/java/io/trino/testing/DistributedQueryRunner.java @@ -911,7 +911,7 @@ public DistributedQueryRunner build() addExtraProperty("protocol.spooling.enabled", "true"); // create smaller number of segments addExtraProperty("protocol.spooling.initial-segment-size", "16MB"); - addExtraProperty("protocol.spooling.maximum-segment-size", "32MB"); + addExtraProperty("protocol.spooling.max-segment-size", "32MB"); addExtraProperty("protocol.spooling.shared-secret-key", randomAESKey()); // LocalSpoolingManager doesn't support direct storage access addExtraProperty("protocol.spooling.retrieval-mode", "coordinator_proxy"); diff --git a/testing/trino-testing/src/main/java/io/trino/testing/H2QueryRunner.java b/testing/trino-testing/src/main/java/io/trino/testing/H2QueryRunner.java index 059e7784e887..63dc6df447ae 100644 --- a/testing/trino-testing/src/main/java/io/trino/testing/H2QueryRunner.java +++ b/testing/trino-testing/src/main/java/io/trino/testing/H2QueryRunner.java @@ -190,6 +190,7 @@ public void close() public MaterializedResult execute(Session session, @Language("SQL") String sql, List resultTypes) { MaterializedResult materializedRows = new MaterializedResult( + Optional.of(session), handle.setSqlParser(new RawSqlParser()) .setTemplateEngine((template, context) -> template) .createQuery(sql) diff --git a/testing/trino-testing/src/main/java/io/trino/testing/LocalSpoolingManager.java b/testing/trino-testing/src/main/java/io/trino/testing/LocalSpoolingManager.java index 2eec9f6534f7..ad2550507ec5 100644 --- a/testing/trino-testing/src/main/java/io/trino/testing/LocalSpoolingManager.java +++ b/testing/trino-testing/src/main/java/io/trino/testing/LocalSpoolingManager.java @@ -16,10 +16,9 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.core.JsonProcessingException; import com.google.errorprone.annotations.DoNotCall; +import io.airlift.json.JsonCodec; import io.airlift.slice.Slice; -import io.trino.client.JsonCodec; import io.trino.spi.Plugin; import io.trino.spi.spool.SpooledLocation; import io.trino.spi.spool.SpooledLocation.DirectLocation; @@ -46,9 +45,10 @@ import static com.google.common.base.MoreObjects.toStringHelper; import static com.google.common.io.MoreFiles.deleteRecursively; +import static io.airlift.json.JsonCodec.jsonCodec; import static io.airlift.slice.Slices.utf8Slice; -import static io.trino.client.JsonCodec.jsonCodec; import static java.nio.file.StandardOpenOption.CREATE_NEW; +import static java.time.temporal.ChronoUnit.MINUTES; import static java.util.Objects.requireNonNull; public class LocalSpoolingManager @@ -96,12 +96,7 @@ public InputStream openInputStream(SpooledSegmentHandle handle) @Override public SpooledSegmentHandle handle(Slice identifier, Map> headers) { - try { - return HANDLE_CODEC.fromJson(identifier.toStringUtf8()); - } - catch (JsonProcessingException e) { - throw new UncheckedIOException(e); - } + return HANDLE_CODEC.fromJson(identifier.toStringUtf8()); } @Override @@ -182,7 +177,7 @@ public LocalSpooledSegmentHandle(String encoding, Path path) @Override public Instant expirationTime() { - return Instant.MAX; + return Instant.now().plus(5, MINUTES); } @JsonIgnore diff --git a/testing/trino-testing/src/main/java/io/trino/testing/TestingTrinoClient.java b/testing/trino-testing/src/main/java/io/trino/testing/TestingTrinoClient.java index 5a1aa90031c5..58e5bda680e3 100644 --- a/testing/trino-testing/src/main/java/io/trino/testing/TestingTrinoClient.java +++ b/testing/trino-testing/src/main/java/io/trino/testing/TestingTrinoClient.java @@ -76,6 +76,7 @@ import static io.trino.type.JsonType.JSON; import static io.trino.util.MoreLists.mappedCopy; import static java.time.temporal.ChronoField.NANO_OF_SECOND; +import static java.util.Objects.requireNonNull; import static java.util.stream.Collectors.toList; public class TestingTrinoClient @@ -119,14 +120,14 @@ public TestingTrinoClient(TestingTrinoServer trinoServer, TestingStatementClient @Override protected ResultsSession getResultSession(Session session) { - return new MaterializedResultSession(); + return new MaterializedResultSession(session); } private class MaterializedResultSession implements ResultsSession { + private final Session session; private final ImmutableList.Builder rows = ImmutableList.builder(); - private final AtomicReference> types = new AtomicReference<>(); private final AtomicReference> columnNames = new AtomicReference<>(); private final AtomicReference queryDataEncoding = new AtomicReference<>(); @@ -135,6 +136,11 @@ private class MaterializedResultSession private final AtomicReference> warnings = new AtomicReference<>(ImmutableList.of()); private final AtomicReference> statementStats = new AtomicReference<>(Optional.empty()); + public MaterializedResultSession(Session session) + { + this.session = requireNonNull(session, "session is null"); + } + @Override public void setUpdateType(String type) { @@ -186,6 +192,7 @@ public MaterializedResult build(Map setSessionProperties, Set + { + private Map connectorProperties = ImmutableMap.of(); + + private Builder() + { + super(testSessionBuilder() + .setCatalog("tpch") + .setSchema("tiny") + .build()); + } + + @CanIgnoreReturnValue + public Builder withConnectorProperties(Map connectorProperties) + { + this.connectorProperties = ImmutableMap.copyOf(connectorProperties); + return this; + } + + @Override + public DistributedQueryRunner build() + throws Exception + { + DistributedQueryRunner queryRunner = super.build(); + try { + queryRunner.installPlugin(new TpchPlugin()); + queryRunner.createCatalog("tpch", "tpch", connectorProperties); + return queryRunner; + } + catch (Exception e) { + queryRunner.close(); + throw e; + } + } + } + + public static void main(String[] args) + throws Exception + { + DistributedQueryRunner queryRunner = builder() + .addCoordinatorProperty("web-ui.preview.enabled", "true") + .addCoordinatorProperty("http-server.http.port", "8080") + .addCoordinatorProperty("web-ui.authentication.type", "fixed") + .addCoordinatorProperty("web-ui.user", "webapp-preview-user") + .withProtocolSpooling("json") + .build(); + + Logger log = Logger.get(WebUiPreviewQueryRunner.class); + log.info("======== SERVER STARTED ========"); + log.info("\n====\n%s\n====", queryRunner.getCoordinator().getBaseUrl()); + log.info("\n====\nPreview UI %s/ui/preview\n====", queryRunner.getCoordinator().getBaseUrl()); + } +} diff --git a/testing/trino-tests/pom.xml b/testing/trino-tests/pom.xml index 7eeefc51f7a0..b5710022870e 100644 --- a/testing/trino-tests/pom.xml +++ b/testing/trino-tests/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 470-SNAPSHOT + 471-SNAPSHOT ../../pom.xml