Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
89 commits
Select commit Hold shift + click to select a range
b924d04
Move getBucketCount to LocalExchange
gaurav8297 Jan 6, 2023
9aabe39
Make hash partition count adaptive
gaurav8297 Jan 24, 2023
55a1239
Add metric to measure requests duration of HttpPageBufferClient
radek-kondziolka Jan 12, 2023
ddeea3a
Update examples in connector docs
sheajamba Dec 21, 2022
6af6b77
Update connector docs example names
sheajamba Dec 27, 2022
ed35936
Update connector docs example names
sheajamba Jan 4, 2023
3e1318c
Update connector docs example names
sheajamba Jan 4, 2023
db35297
Update connector docs example names
sheajamba Jan 4, 2023
56f3e99
Document shared secrets security verification
sheajamba Dec 14, 2022
84e1880
Add description to keystore and truststore types
Jessie212 Oct 26, 2022
efc22f0
Add parquet.max-read-block-row-count property
Jessie212 Jan 5, 2023
8c1fb84
Add denyExecuteFunction extraInfo overloadings
djsagain Nov 15, 2022
5d0324d
Add column add/drop/rename events
djsagain Nov 28, 2022
76460b3
Make NestedLoopOutputIterator sealed
pettyjamesm Jan 20, 2023
8e4b797
Remove redundant if condition
sopel39 Jan 23, 2023
90853e1
Stop RetryDriver when interrupted
pettyjamesm Jan 21, 2023
c1a3e1a
Avoid TaskRunner destruction on cancel
pettyjamesm Jan 21, 2023
72fde16
Update TrinoFileSystemCache to represent latest hadoop implementation
jitheshtr Jul 19, 2022
0810e63
Add BenchmarkGetFileSystem to benchmark TrinoFileSytemCache
jitheshtr Jan 17, 2023
6a9637b
Restrict user to drop a column that is used in older partition spec
krvikash Jan 19, 2023
0ba5c09
Restrict user to drop a void partition column in iceberg connector
krvikash Jan 19, 2023
1df4ea7
Update code using Map-of-Map
findepi Jan 23, 2023
92be2dc
Optimize null representation in encoded VariableBlockWidthBlock
radek-kondziolka Jan 18, 2023
db75056
Use worker maxWriterCount in ScaledWriterScheduler
gaurav8297 Jan 18, 2023
769b4c0
Increase task.scale-writers.max-writer-count to core count
gaurav8297 Jan 18, 2023
d503edb
Continue commits checks when one fails
findepi Jan 24, 2023
2802a98
Add CatalogHandle to ConnectorContext
lshrinivas Jan 23, 2023
d18a112
Provide extra extension point for adding column in BaseJdbcClient
SemionPar Jan 20, 2023
f2fa1ce
Static import isDeterministic in StatementAnalyzer
ebyhr Jan 18, 2023
8dc6f48
Add support for CHECK constraint in INSERT statement
ebyhr Nov 2, 2022
23dca82
Add anchors to Hive S3 documentation
Jan 25, 2023
84a0f47
Improve metastore stats cache tests
sopel39 Jan 23, 2023
332febd
Use bulk loading in EmptyCache
sopel39 Jan 24, 2023
dfbb3b2
Put dynamic filtering comment before right PredicatePushDown call
sopel39 Jan 25, 2023
68fdc35
Add more row type test cases for changing column types
ebyhr Jan 23, 2023
7501170
Add support for changing row type in Iceberg
ebyhr Jan 23, 2023
8908701
Reuse PagePartitioner's across drivers
lukasz-stec Jan 4, 2023
f1e1ea6
Add Trino 406 release notes
colebow Jan 5, 2023
bdbee79
[maven-release-plugin] prepare release 406
martint Jan 25, 2023
dbd58bf
[maven-release-plugin] prepare for next development iteration
martint Jan 25, 2023
a8b2439
Enable register_table procedure in Iceberg REST tests
ebyhr Jan 23, 2023
b9fa334
Improve error message for register_table in Iceberg JDBC catalog
ebyhr Jan 23, 2023
449a5e2
Fix spelling in FTE docs
mosabua Jan 25, 2023
6454a8e
Remove unsupported properties
mosabua Jan 25, 2023
9d2c6ab
Unified Materialized View Table Type in information_schema
jklamer Dec 8, 2022
e0fa548
Refactor BINARY backed long decimal decoder in parquet reader
raunaqmorarka Jan 12, 2023
6798570
Support reading BINARY backed short decimals in batched parquet reader
Jan 12, 2023
2f96035
Support reading INT64 into smaller integer types in batched parquet
raunaqmorarka Jan 12, 2023
6e896c6
Support rescaled long decimals in batched parquet reader
Jan 12, 2023
7bf52f5
Support rescaled short decimals in batched parquet reader
Jan 12, 2023
33ba9cb
Support reading zero scale decimals as integers in batched parquet
raunaqmorarka Jan 13, 2023
ae2e013
Remove unnecessary handling of RLE, BIT_PACKED in parquet reader
raunaqmorarka Jan 18, 2023
7a8e22d
Avoid fallback to old column readers with optimized parquet
raunaqmorarka Jan 13, 2023
f91b8a5
Add unregister_table procedure to Iceberg connector
ebyhr Jan 26, 2023
ec81c15
Fix 406 release note
ebyhr Jan 26, 2023
6edfa0b
Use pattern variables
findinpath Jan 25, 2023
230e919
Unhide building logic of the `primitiveTypes` field
findinpath Jan 25, 2023
5388839
Move getter methods above the `private` `static` methods
findinpath Jan 25, 2023
61a3242
Remove redundant BaseIcebergMaterializedViewTest.getSchemaName method
findepi Jan 24, 2023
b193764
Update test names to follow convention
findepi Jan 24, 2023
c7aa854
Improve BaseIcebergMaterializedViewTest code style
findepi Jan 25, 2023
f81d7a5
Disallow using of single-entry loader in bulk cache
sopel39 Jan 26, 2023
04d80b9
Fetch table stats directly from delegate metastore
sopel39 Jan 26, 2023
4ab1db3
Update to Airbase 133
electrum Jan 23, 2023
a6b4def
Update discovery server to 1.32
electrum Jan 20, 2023
ce9bf49
Update to Jetty 10
electrum Jan 13, 2023
e27dae2
Fix field name typo in TestCreateMaterializedViewTask
findepi Jan 23, 2023
0e4f36e
Add @Language annotation in AbstractTestQueryFramework
findepi Jan 25, 2023
87d071f
Fix code indentation
findepi Jan 25, 2023
13acdb7
Remove legacy MaterializedViewFreshness constructor
findepi Jan 23, 2023
c4fe7d2
Fix check-commits task names
findepi Jan 24, 2023
7ae1b61
Remove outdated SPI revapi exclusions
findepi Jan 26, 2023
9661c2c
Call purgeTable in Iceberg REST catalog
ebyhr Jan 26, 2023
7a688df
Fix typo
krvikash Jan 26, 2023
5543197
Add temporal type constants and canonicalize
findepi Jan 25, 2023
73a445d
Remove dangerous fallback handling of uknown type in Redis
findepi Jan 25, 2023
88c5d38
Remove unnecessary code block
findepi Jan 25, 2023
b0d8efa
Update usages of deprecated constant
findepi Jan 25, 2023
cb2078d
Fix precision loss when coercing TIME to TIME TZ
findepi Jan 26, 2023
84be870
Fix abundant precision when coercing DATE to TIMESTAMP TZ
findepi Jan 26, 2023
ca3b8d5
Attach query when plan assertion fails
findepi Jan 24, 2023
2975a56
Prevent accidental modifications of analysis masks and filters
findepi Jan 17, 2023
65fc142
Annotate PlanMatchPattern method arguments as SQL
findepi Jan 25, 2023
0906b06
Encapsulate ExpressionMatcher construction
findepi Jan 25, 2023
33c4127
Test unwrapping within nested expressions
findepi Jan 24, 2023
36afd1e
Add more null filter related tests in TestLogicalPlanner
findepi Jan 24, 2023
8e60663
Add coercions for row filters
findepi Jan 17, 2023
1d5e057
Apply coercions when creating FilterNode
findepi Jan 24, 2023
ddcbc48
Add task.scale-writers.max-nodes-count property to limit writer scaling
radek-kondziolka Jan 27, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
15 changes: 8 additions & 7 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ jobs:
if: steps.cache.outputs.cache-hit != 'true'
run: rm -rf ~/.m2/repository/io/trino/trino-*

check-commits:
check-commits-dispatcher:
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
outputs:
Expand All @@ -101,12 +101,12 @@ jobs:
- uses: actions/checkout@v3
with:
fetch-depth: 0 # checkout all commits to be able to determine merge base
- name: Check Commits
- name: Block illegal commits
uses: trinodb/github-actions/block-commits@c2991972560c5219d9ae5fb68c0c9d687ffcdd10
with:
action-merge: fail
action-fixup: none
- name: Set matrix
- name: Set matrix (dispatch commit checks)
id: set-matrix
run: |
# The output from rev-list ends with a newline, so we have to filter out index -1 in jq since it's an empty string
Expand All @@ -125,12 +125,13 @@ jobs:
echo "Commit matrix: $(jq '.' commit-matrix.json)"
echo "matrix=$(jq -c '.' commit-matrix.json)" >> $GITHUB_OUTPUT

check-commits-dispatcher:
check-commit:
runs-on: ubuntu-latest
needs: check-commits
if: github.event_name == 'pull_request' && needs.check-commits.outputs.matrix != ''
needs: check-commits-dispatcher
if: github.event_name == 'pull_request' && needs.check-commits-dispatcher.outputs.matrix != ''
strategy:
matrix: ${{ fromJson(needs.check-commits.outputs.matrix) }}
fail-fast: false
matrix: ${{ fromJson(needs.check-commits-dispatcher.outputs.matrix) }}
steps:
- uses: actions/checkout@v3
with:
Expand Down
2 changes: 1 addition & 1 deletion client/trino-cli/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
<parent>
<groupId>io.trino</groupId>
<artifactId>trino-root</artifactId>
<version>406-SNAPSHOT</version>
<version>407-SNAPSHOT</version>
<relativePath>../../pom.xml</relativePath>
</parent>

Expand Down
2 changes: 1 addition & 1 deletion client/trino-client/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
<parent>
<groupId>io.trino</groupId>
<artifactId>trino-root</artifactId>
<version>406-SNAPSHOT</version>
<version>407-SNAPSHOT</version>
<relativePath>../../pom.xml</relativePath>
</parent>

Expand Down
2 changes: 1 addition & 1 deletion client/trino-jdbc/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
<parent>
<groupId>io.trino</groupId>
<artifactId>trino-root</artifactId>
<version>406-SNAPSHOT</version>
<version>407-SNAPSHOT</version>
<relativePath>../../pom.xml</relativePath>
</parent>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -813,7 +813,7 @@ public void testGetColumns()
assertColumnSpec(rs, Types.TIME, 15L, null, 6L, null, createTimeType(6));
assertColumnSpec(rs, Types.TIME, 18L, null, 9L, null, createTimeType(9));
assertColumnSpec(rs, Types.TIME, 21L, null, 12L, null, createTimeType(12));
assertColumnSpec(rs, Types.TIME_WITH_TIMEZONE, 18L, null, 3L, null, TimeWithTimeZoneType.TIME_WITH_TIME_ZONE);
assertColumnSpec(rs, Types.TIME_WITH_TIMEZONE, 18L, null, 3L, null, TimeWithTimeZoneType.TIME_TZ_MILLIS);
assertColumnSpec(rs, Types.TIME_WITH_TIMEZONE, 14L, null, 0L, null, createTimeWithTimeZoneType(0));
assertColumnSpec(rs, Types.TIME_WITH_TIMEZONE, 18L, null, 3L, null, createTimeWithTimeZoneType(3));
assertColumnSpec(rs, Types.TIME_WITH_TIMEZONE, 21L, null, 6L, null, createTimeWithTimeZoneType(6));
Expand Down
2 changes: 1 addition & 1 deletion core/trino-main/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
<parent>
<groupId>io.trino</groupId>
<artifactId>trino-root</artifactId>
<version>406-SNAPSHOT</version>
<version>407-SNAPSHOT</version>
<relativePath>../../pom.xml</relativePath>
</parent>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@ public final class SystemSessionProperties
public static final String JOIN_MAX_BROADCAST_TABLE_SIZE = "join_max_broadcast_table_size";
public static final String JOIN_MULTI_CLAUSE_INDEPENDENCE_FACTOR = "join_multi_clause_independence_factor";
public static final String DISTRIBUTED_INDEX_JOIN = "distributed_index_join";
public static final String HASH_PARTITION_COUNT = "hash_partition_count";
public static final String MAX_HASH_PARTITION_COUNT = "max_hash_partition_count";
public static final String MIN_HASH_PARTITION_COUNT = "min_hash_partition_count";
public static final String PREFER_STREAMING_OPERATORS = "prefer_streaming_operators";
public static final String TASK_WRITER_COUNT = "task_writer_count";
public static final String TASK_PARTITIONED_WRITER_COUNT = "task_partitioned_writer_count";
Expand All @@ -80,6 +81,7 @@ public final class SystemSessionProperties
public static final String PREFERRED_WRITE_PARTITIONING_MIN_NUMBER_OF_PARTITIONS = "preferred_write_partitioning_min_number_of_partitions";
public static final String SCALE_WRITERS = "scale_writers";
public static final String TASK_SCALE_WRITERS_ENABLED = "task_scale_writers_enabled";
public static final String MAX_WRITERS_NODES_COUNT = "max_writers_nodes_count";
public static final String TASK_SCALE_WRITERS_MAX_WRITER_COUNT = "task_scale_writers_max_writer_count";
public static final String WRITER_MIN_SIZE = "writer_min_size";
public static final String PUSH_TABLE_WRITE_THROUGH_UNION = "push_table_write_through_union";
Expand Down Expand Up @@ -173,10 +175,13 @@ public final class SystemSessionProperties
public static final String ADAPTIVE_PARTIAL_AGGREGATION_MIN_ROWS = "adaptive_partial_aggregation_min_rows";
public static final String ADAPTIVE_PARTIAL_AGGREGATION_UNIQUE_ROWS_RATIO_THRESHOLD = "adaptive_partial_aggregation_unique_rows_ratio_threshold";
public static final String JOIN_PARTITIONED_BUILD_MIN_ROW_COUNT = "join_partitioned_build_min_row_count";
public static final String MIN_INPUT_SIZE_PER_TASK = "min_input_size_per_task";
public static final String MIN_INPUT_ROWS_PER_TASK = "min_input_rows_per_task";
public static final String USE_EXACT_PARTITIONING = "use_exact_partitioning";
public static final String FORCE_SPILLING_JOIN = "force_spilling_join";
public static final String FAULT_TOLERANT_EXECUTION_EVENT_DRIVEN_SCHEDULER_ENABLED = "fault_tolerant_execution_event_driven_scheduler_enabled";
public static final String FAULT_TOLERANT_EXECUTION_FORCE_PREFERRED_WRITE_PARTITIONING_ENABLED = "fault_tolerant_execution_force_preferred_write_partitioning_enabled";
public static final String PAGE_PARTITIONING_BUFFER_POOL_SIZE = "page_partitioning_buffer_pool_size";

private final List<PropertyMetadata<?>> sessionProperties;

Expand Down Expand Up @@ -241,9 +246,14 @@ public SystemSessionProperties(
optimizerConfig.isDistributedIndexJoinsEnabled(),
false),
integerProperty(
HASH_PARTITION_COUNT,
"Number of partitions for distributed joins and aggregations",
queryManagerConfig.getHashPartitionCount(),
MAX_HASH_PARTITION_COUNT,
"Maximum number of partitions for distributed joins and aggregations",
queryManagerConfig.getMaxHashPartitionCount(),
false),
integerProperty(
MIN_HASH_PARTITION_COUNT,
"Minimum number of partitions for distributed joins and aggregations",
queryManagerConfig.getMinHashPartitionCount(),
false),
booleanProperty(
PREFER_STREAMING_OPERATORS,
Expand Down Expand Up @@ -286,6 +296,11 @@ public SystemSessionProperties(
"Scale out writers based on throughput (use minimum necessary)",
featuresConfig.isScaleWriters(),
false),
integerProperty(
MAX_WRITERS_NODES_COUNT,
"Set upper limit on number of nodes that take part in writing if task.scale-writers.enabled is set",
queryManagerConfig.getMaxWritersNodesCount(),
false),
booleanProperty(
TASK_SCALE_WRITERS_ENABLED,
"Scale the number of concurrent table writers per task based on throughput",
Expand Down Expand Up @@ -860,6 +875,16 @@ public SystemSessionProperties(
optimizerConfig.getJoinPartitionedBuildMinRowCount(),
value -> validateNonNegativeLongValue(value, JOIN_PARTITIONED_BUILD_MIN_ROW_COUNT),
false),
dataSizeProperty(
MIN_INPUT_SIZE_PER_TASK,
"Minimum input data size required per task. This will help optimizer determine hash partition count for joins and aggregations",
optimizerConfig.getMinInputSizePerTask(),
false),
longProperty(
MIN_INPUT_ROWS_PER_TASK,
"Minimum input rows required per task. This will help optimizer determine hash partition count for joins and aggregations",
optimizerConfig.getMinInputRowsPerTask(),
false),
booleanProperty(
USE_EXACT_PARTITIONING,
"When enabled this forces data repartitioning unless the partitioning of upstream stage matches exactly what downstream stage expects",
Expand All @@ -879,6 +904,10 @@ public SystemSessionProperties(
FAULT_TOLERANT_EXECUTION_FORCE_PREFERRED_WRITE_PARTITIONING_ENABLED,
"Force preferred write partitioning for fault tolerant execution",
queryManagerConfig.isFaultTolerantExecutionForcePreferredWritePartitioningEnabled(),
true),
integerProperty(PAGE_PARTITIONING_BUFFER_POOL_SIZE,
"Maximum number of free buffers in the per task partitioned page buffer pool. Setting this to zero effectively disables the pool",
taskManagerConfig.getPagePartitioningBufferPoolSize(),
true));
}

Expand Down Expand Up @@ -918,9 +947,14 @@ public static boolean isDistributedIndexJoinEnabled(Session session)
return session.getSystemProperty(DISTRIBUTED_INDEX_JOIN, Boolean.class);
}

public static int getHashPartitionCount(Session session)
public static int getMaxHashPartitionCount(Session session)
{
return session.getSystemProperty(MAX_HASH_PARTITION_COUNT, Integer.class);
}

public static int getMinHashPartitionCount(Session session)
{
return session.getSystemProperty(HASH_PARTITION_COUNT, Integer.class);
return session.getSystemProperty(MIN_HASH_PARTITION_COUNT, Integer.class);
}

public static boolean preferStreamingOperators(Session session)
Expand Down Expand Up @@ -968,6 +1002,11 @@ public static int getTaskScaleWritersMaxWriterCount(Session session)
return session.getSystemProperty(TASK_SCALE_WRITERS_MAX_WRITER_COUNT, Integer.class);
}

public static int getMaxWritersNodesCount(Session session)
{
return session.getSystemProperty(MAX_WRITERS_NODES_COUNT, Integer.class);
}

public static DataSize getWriterMinSize(Session session)
{
return session.getSystemProperty(WRITER_MIN_SIZE, DataSize.class);
Expand Down Expand Up @@ -1548,6 +1587,16 @@ public static long getJoinPartitionedBuildMinRowCount(Session session)
return session.getSystemProperty(JOIN_PARTITIONED_BUILD_MIN_ROW_COUNT, Long.class);
}

public static DataSize getMinInputSizePerTask(Session session)
{
return session.getSystemProperty(MIN_INPUT_SIZE_PER_TASK, DataSize.class);
}

public static long getMinInputRowsPerTask(Session session)
{
return session.getSystemProperty(MIN_INPUT_ROWS_PER_TASK, Long.class);
}

public static boolean isUseExactPartitioning(Session session)
{
return session.getSystemProperty(USE_EXACT_PARTITIONING, Boolean.class);
Expand All @@ -1571,4 +1620,9 @@ public static boolean isFaultTolerantExecutionForcePreferredWritePartitioningEna
}
return session.getSystemProperty(FAULT_TOLERANT_EXECUTION_FORCE_PREFERRED_WRITE_PARTITIONING_ENABLED, Boolean.class);
}

public static int getPagePartitioningBufferPoolSize(Session session)
{
return session.getSystemProperty(PAGE_PARTITIONING_BUFFER_POOL_SIZE, Integer.class);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import io.trino.spi.PageIndexerFactory;
import io.trino.spi.PageSorter;
import io.trino.spi.VersionEmbedder;
import io.trino.spi.connector.CatalogHandle;
import io.trino.spi.connector.ConnectorContext;
import io.trino.spi.connector.MetadataProvider;
import io.trino.spi.type.TypeManager;
Expand All @@ -38,8 +39,10 @@ public class ConnectorContextInstance
private final PageIndexerFactory pageIndexerFactory;
private final Supplier<ClassLoader> duplicatePluginClassLoaderFactory;
private final AtomicBoolean pluginClassLoaderDuplicated = new AtomicBoolean();
private final CatalogHandle catalogHandle;

public ConnectorContextInstance(
CatalogHandle catalogHandle,
NodeManager nodeManager,
VersionEmbedder versionEmbedder,
TypeManager typeManager,
Expand All @@ -55,6 +58,13 @@ public ConnectorContextInstance(
this.pageSorter = requireNonNull(pageSorter, "pageSorter is null");
this.pageIndexerFactory = requireNonNull(pageIndexerFactory, "pageIndexerFactory is null");
this.duplicatePluginClassLoaderFactory = requireNonNull(duplicatePluginClassLoaderFactory, "duplicatePluginClassLoaderFactory is null");
this.catalogHandle = requireNonNull(catalogHandle, "catalogHandle is null");
}

@Override
public CatalogHandle getCatalogHandle()
{
return catalogHandle;
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,7 @@ private Connector createConnector(
Map<String, String> properties)
{
ConnectorContext context = new ConnectorContextInstance(
catalogHandle,
new ConnectorAwareNodeManager(nodeManager, nodeInfo.getEnvironment(), catalogHandle, schedulerIncludeCoordinator),
versionEmbedder,
typeManager,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
*/
package io.trino.connector;

import com.google.common.collect.ImmutableList;
import io.trino.FullConnectorSession;
import io.trino.Session;
import io.trino.metadata.MaterializedViewDefinition;
Expand Down Expand Up @@ -54,12 +55,12 @@ public Optional<ConnectorTableSchema> getRelationMetadata(ConnectorSession conne

Optional<MaterializedViewDefinition> materializedView = metadata.getMaterializedView(session, qualifiedName);
if (materializedView.isPresent()) {
return Optional.of(new ConnectorTableSchema(tableName.getSchemaTableName(), toColumnSchema(materializedView.get().getColumns())));
return Optional.of(new ConnectorTableSchema(tableName.getSchemaTableName(), toColumnSchema(materializedView.get().getColumns()), ImmutableList.of()));
}

Optional<ViewDefinition> view = metadata.getView(session, qualifiedName);
if (view.isPresent()) {
return Optional.of(new ConnectorTableSchema(tableName.getSchemaTableName(), toColumnSchema(view.get().getColumns())));
return Optional.of(new ConnectorTableSchema(tableName.getSchemaTableName(), toColumnSchema(view.get().getColumns()), ImmutableList.of()));
}

Optional<TableHandle> tableHandle = metadata.getTableHandle(session, qualifiedName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
import static io.trino.connector.informationschema.InformationSchemaMetadata.defaultPrefixes;
import static io.trino.connector.informationschema.InformationSchemaMetadata.isTablesEnumeratingTable;
import static io.trino.metadata.MetadataListing.getViews;
import static io.trino.metadata.MetadataListing.listMaterializedViews;
import static io.trino.metadata.MetadataListing.listSchemas;
import static io.trino.metadata.MetadataListing.listTableColumns;
import static io.trino.metadata.MetadataListing.listTablePrivileges;
Expand Down Expand Up @@ -271,12 +272,18 @@ private void addColumnsRecords(QualifiedTablePrefix prefix)
private void addTablesRecords(QualifiedTablePrefix prefix)
{
Set<SchemaTableName> tables = listTables(session, metadata, accessControl, prefix);
Set<SchemaTableName> materializedViews = listMaterializedViews(session, metadata, accessControl, prefix);
Set<SchemaTableName> views = listViews(session, metadata, accessControl, prefix);
// TODO (https://github.com/trinodb/trino/issues/8207) define a type for materialized views

for (SchemaTableName name : union(tables, views)) {
for (SchemaTableName name : union(union(tables, materializedViews), views)) {
// if table and view names overlap, the view wins
String type = views.contains(name) ? "VIEW" : "BASE TABLE";
String type = "BASE TABLE";
if (materializedViews.contains(name)) {
type = "MATERIALIZED VIEW";
}
else if (views.contains(name)) {
type = "VIEW";
}
addRecord(
prefix.getCatalogName(),
name.getSchemaName(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

import static io.trino.SystemSessionProperties.getCostEstimationWorkerCount;
import static io.trino.SystemSessionProperties.getFaultTolerantExecutionPartitionCount;
import static io.trino.SystemSessionProperties.getHashPartitionCount;
import static io.trino.SystemSessionProperties.getMaxHashPartitionCount;
import static io.trino.SystemSessionProperties.getRetryPolicy;
import static java.lang.Math.min;
import static java.lang.Math.toIntExact;
Expand Down Expand Up @@ -73,7 +73,7 @@ public int estimateHashedTaskCount(Session session)
partitionCount = getFaultTolerantExecutionPartitionCount(session);
}
else {
partitionCount = getHashPartitionCount(session);
partitionCount = getMaxHashPartitionCount(session);
}
return min(estimateSourceDistributedTaskCount(session), partitionCount);
}
Expand Down
Loading