diff --git a/core/trino-spi/src/main/java/io/trino/spi/function/InvocationConvention.java b/core/trino-spi/src/main/java/io/trino/spi/function/InvocationConvention.java index a11e03f158a0..b9db00fdb52a 100644 --- a/core/trino-spi/src/main/java/io/trino/spi/function/InvocationConvention.java +++ b/core/trino-spi/src/main/java/io/trino/spi/function/InvocationConvention.java @@ -110,7 +110,7 @@ public enum InvocationArgumentConvention */ NEVER_NULL(false, 1), /** - * Argument is always an object type. A SQL null will be passed a Java null. + * Argument is always an object type. An SQL null will be passed a Java null. */ BOXED_NULLABLE(true, 1), /** diff --git a/docs/README.md b/docs/README.md index a07481b0072f..3e99056464be 100644 --- a/docs/README.md +++ b/docs/README.md @@ -208,7 +208,7 @@ contribution](https://trino.io/development/process.html). 2. You might select a GitHub doc issue to work on that requires you to verify how Trino handles a situation, such as [adding - documentation](https://github.com/trinodb/trino/issues/7660) for a SQL + documentation](https://github.com/trinodb/trino/issues/7660) for an SQL language function. In this case, the five-minute video [Learning Trino SQL with diff --git a/docs/src/main/sphinx/appendix/from-hive.rst b/docs/src/main/sphinx/appendix/from-hive.rst index d86db6dee6e1..41b313d8ffa0 100644 --- a/docs/src/main/sphinx/appendix/from-hive.rst +++ b/docs/src/main/sphinx/appendix/from-hive.rst @@ -2,7 +2,7 @@ Migrating from Hive =================== -Trino uses ANSI SQL syntax and semantics, whereas Hive uses a SQL-like language called HiveQL which is loosely modeled after MySQL (which itself has many differences from ANSI SQL). +Trino uses ANSI SQL syntax and semantics, whereas Hive uses an SQL-like language called HiveQL which is loosely modeled after MySQL (which itself has many differences from ANSI SQL). Use subscript for accessing a dynamic index of an array instead of a udf ------------------------------------------------------------------------ diff --git a/docs/src/main/sphinx/connector/accumulo.rst b/docs/src/main/sphinx/connector/accumulo.rst index 9e9f3c3ca7a8..fd791512bd03 100644 --- a/docs/src/main/sphinx/connector/accumulo.rst +++ b/docs/src/main/sphinx/connector/accumulo.rst @@ -13,7 +13,7 @@ Installing the iterator dependency ---------------------------------- The Accumulo connector uses custom Accumulo iterators in -order to push various information in a SQL predicate clause to Accumulo for +order to push various information in an SQL predicate clause to Accumulo for server-side filtering, known as *predicate pushdown*. In order for the server-side iterators to work, you need to add the ``trino-accumulo-iterators`` JAR file to Accumulo's ``lib/ext`` directory on each TabletServer node. diff --git a/docs/src/main/sphinx/connector/iceberg.rst b/docs/src/main/sphinx/connector/iceberg.rst index b40376ec8db0..00bdbe954175 100644 --- a/docs/src/main/sphinx/connector/iceberg.rst +++ b/docs/src/main/sphinx/connector/iceberg.rst @@ -567,7 +567,7 @@ Use the ``$snapshots`` metadata table to determine the latest snapshot ID of the FROM iceberg.testdb."customer_orders$snapshots" ORDER BY committed_at DESC LIMIT 1 -A SQL procedure ``system.rollback_to_snapshot`` allows the caller to roll back +An SQL procedure ``system.rollback_to_snapshot`` allows the caller to roll back the state of the table to a previous snapshot id:: CALL iceberg.system.rollback_to_snapshot('testdb', 'customer_orders', 8954597067493422955) diff --git a/docs/src/main/sphinx/develop/example-http.rst b/docs/src/main/sphinx/develop/example-http.rst index 8f1c12583805..76129672dcbe 100644 --- a/docs/src/main/sphinx/develop/example-http.rst +++ b/docs/src/main/sphinx/develop/example-http.rst @@ -5,7 +5,7 @@ Example HTTP connector The Example HTTP connector has a simple goal: it reads comma-separated data over HTTP. For example, if you have a large amount of data in a CSV format, you can point the example HTTP connector at this data and -write a SQL query to process it. +write an SQL query to process it. Code ---- diff --git a/docs/src/main/sphinx/develop/functions.rst b/docs/src/main/sphinx/develop/functions.rst index f36279179f51..242d389ea86c 100644 --- a/docs/src/main/sphinx/develop/functions.rst +++ b/docs/src/main/sphinx/develop/functions.rst @@ -310,7 +310,7 @@ Deprecated function ------------------- The ``@Deprecated`` annotation has to be used on any function that should no longer be -used. The annotation causes Trino to generate a warning whenever a SQL statement +used. The annotation causes Trino to generate a warning whenever an SQL statement uses a deprecated function. When a function is deprecated, the ``@Description`` needs to be replaced with a note about the deprecation and the replacement function: diff --git a/docs/src/main/sphinx/functions/json.rst b/docs/src/main/sphinx/functions/json.rst index 7cc7187cc4f8..17bf03e5ec6d 100644 --- a/docs/src/main/sphinx/functions/json.rst +++ b/docs/src/main/sphinx/functions/json.rst @@ -27,7 +27,7 @@ JSON path language The JSON path language is a special language, used exclusively by certain SQL operators to specify the query to perform on the JSON input. Although JSON path -expressions are embedded in a SQL query, their syntax significantly differs +expressions are embedded in an SQL query, their syntax significantly differs from SQL. The semantics of predicates, operators, etc. in JSON path expressions generally follow the semantics of SQL. The JSON path language is case-sensitive for keywords and identifiers. @@ -37,7 +37,7 @@ for keywords and identifiers. JSON path syntax and semantics ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A JSON path expression, similar to a SQL expression, is a recursive structure. +A JSON path expression, similar to an SQL expression, is a recursive structure. Although the name "path" suggests a linear sequence of operations going step by step deeper into the JSON structure, a JSON path expression is in fact a tree. It can access the input JSON item multiple times, in multiple ways, and combine @@ -1492,7 +1492,7 @@ The following examples show the behavior of casting to JSON with these types:: -- JSON '{"v1":123,"v2":"abc","v3":true}' Casting from NULL to ``JSON`` is not straightforward. Casting -from a standalone ``NULL`` will produce a SQL ``NULL`` instead of +from a standalone ``NULL`` will produce an SQL ``NULL`` instead of ``JSON 'null'``. However, when casting from arrays or map containing ``NULL``\s, the produced ``JSON`` will have ``null``\s in it. diff --git a/docs/src/main/sphinx/overview/concepts.rst b/docs/src/main/sphinx/overview/concepts.rst index 28f2a3f1532d..ebb468766ec5 100644 --- a/docs/src/main/sphinx/overview/concepts.rst +++ b/docs/src/main/sphinx/overview/concepts.rst @@ -108,7 +108,7 @@ Catalog A Trino catalog contains schemas and references a data source via a connector. For example, you can configure a JMX catalog to provide -access to JMX information via the JMX connector. When you run a SQL +access to JMX information via the JMX connector. When you run an SQL statement in Trino, you are running it against one or more catalogs. Other examples of catalogs include the Hive catalog to connect to a Hive data source. @@ -154,7 +154,7 @@ expressions, and predicates. Some readers might be curious why this section lists separate concepts for statements and queries. This is necessary because, in Trino, -statements simply refer to the textual representation of a SQL +statements simply refer to the textual representation of an SQL statement. When a statement is executed, Trino creates a query along with a query plan that is then distributed across a series of Trino workers. diff --git a/docs/src/main/sphinx/release/release-0.113.rst b/docs/src/main/sphinx/release/release-0.113.rst index 194175a1d447..c5de343fd440 100644 --- a/docs/src/main/sphinx/release/release-0.113.rst +++ b/docs/src/main/sphinx/release/release-0.113.rst @@ -20,7 +20,7 @@ for internal Presto data structures and temporary allocations. Session properties ------------------ -All session properties now have a SQL type, default value and description. The +All session properties now have an SQL type, default value and description. The value for :doc:`/sql/set-session` can now be any constant expression, and the :doc:`/sql/show-session` command prints the current effective value and default value for all session properties. diff --git a/docs/src/main/sphinx/release/release-0.166.rst b/docs/src/main/sphinx/release/release-0.166.rst index f61e62d6bd75..78a83557b430 100644 --- a/docs/src/main/sphinx/release/release-0.166.rst +++ b/docs/src/main/sphinx/release/release-0.166.rst @@ -7,7 +7,7 @@ General * Fix failure due to implicit coercion issue in ``IN`` expressions for certain combinations of data types (e.g., ``double`` and ``decimal``). -* Add ``query.max-length`` config flag to set the maximum length of a SQL query. +* Add ``query.max-length`` config flag to set the maximum length of an SQL query. The default maximum length is 1MB. * Improve performance of :func:`approx_percentile`. diff --git a/docs/src/main/sphinx/release/release-0.205.rst b/docs/src/main/sphinx/release/release-0.205.rst index 018255353e20..563763fb3e83 100644 --- a/docs/src/main/sphinx/release/release-0.205.rst +++ b/docs/src/main/sphinx/release/release-0.205.rst @@ -74,7 +74,7 @@ SPI * Disallow non-static methods to be annotated with ``@ScalarFunction``. Non-static SQL function implementations must now be declared in a class annotated with ``@ScalarFunction``. * Disallow having multiple public constructors in ``@ScalarFunction`` classes. All non-static - implementations of a SQL function will now be associated with a single constructor. + implementations of an SQL function will now be associated with a single constructor. This improves support for providing specialized implementations of SQL functions with generic arguments. * Deprecate ``checkCanSelectFromTable/checkCanSelectFromView`` and ``checkCanCreateViewWithSelectFromTable/checkCanCreateViewWithSelectFromView`` in ``ConnectorAccessControl`` diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestPredicatePushdown.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestPredicatePushdown.java index ccb7cf5b8397..b41b7cea2af7 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestPredicatePushdown.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestPredicatePushdown.java @@ -138,7 +138,7 @@ public void testUpdatePushdown() /** * Assert on the number of rows read and updated by a read operation * @param actual The query to test - * @param expected The expected results as a SQL expression + * @param expected The expected results as an SQL expression * @param countProcessed The number of rows expected to be processed */ private void assertPushdown(String actual, String expected, long countProcessed) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftHiveMetastoreClient.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftHiveMetastoreClient.java index 839b64daada9..0659a3cfb1b7 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftHiveMetastoreClient.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftHiveMetastoreClient.java @@ -189,13 +189,13 @@ public List getTablesWithParameter(String databaseName, String parameter /* * The parameter value is restricted to have only alphanumeric characters so that it's safe * to be used against HMS. When using with a LIKE operator, the HMS may want the parameter - * value to follow a Java regex pattern or a SQL pattern. And it's hard to predict the + * value to follow a Java regex pattern or an SQL pattern. And it's hard to predict the * HMS's behavior from outside. Also, by restricting parameter values, we avoid the problem * of how to quote them when passing within the filter string. */ checkArgument(TABLE_PARAMETER_SAFE_VALUE_PATTERN.matcher(parameterValue).matches(), "Parameter value contains invalid characters: '%s'", parameterValue); /* - * Thrift call `get_table_names_by_filter` may be translated by Metastore to a SQL query against Metastore database. + * Thrift call `get_table_names_by_filter` may be translated by Metastore to an SQL query against Metastore database. * Hive 2.3 on some databases uses CLOB for table parameter value column and some databases disallow `=` predicate over * CLOB values. At the same time, they allow `LIKE` predicates over them. */ diff --git a/plugin/trino-raptor-legacy/src/main/java/io/trino/plugin/raptor/legacy/util/DatabaseUtil.java b/plugin/trino-raptor-legacy/src/main/java/io/trino/plugin/raptor/legacy/util/DatabaseUtil.java index bc22e1b8ff3a..3e6604bfb289 100644 --- a/plugin/trino-raptor-legacy/src/main/java/io/trino/plugin/raptor/legacy/util/DatabaseUtil.java +++ b/plugin/trino-raptor-legacy/src/main/java/io/trino/plugin/raptor/legacy/util/DatabaseUtil.java @@ -92,7 +92,7 @@ public static TrinoException metadataError(Throwable cause) } /** - * Run a SQL query as ignoring any constraint violations. + * Run an SQL query as ignoring any constraint violations. * This allows idempotent inserts (equivalent to INSERT IGNORE). */ public static void runIgnoringConstraintViolation(Runnable task)