Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@

package com.facebook.presto.sql.planner;

import com.facebook.presto.Session;
import com.facebook.presto.spi.ConnectorTableHandle;
import com.facebook.presto.spi.plan.AggregationNode;
import com.facebook.presto.spi.plan.JoinDistributionType;
Expand All @@ -38,7 +37,6 @@
import java.nio.file.Paths;
import java.util.stream.Stream;

import static com.facebook.presto.SystemSessionProperties.OPTIMIZER_USE_HISTOGRAMS;
import static com.facebook.presto.spi.plan.JoinDistributionType.REPLICATED;
import static com.facebook.presto.spi.plan.JoinType.INNER;
import static com.facebook.presto.sql.Optimizer.PlanStage.OPTIMIZED_AND_VALIDATED;
Expand Down Expand Up @@ -78,42 +76,11 @@ public void test(String queryResourcePath)
assertEquals(generateQueryPlan(read(queryResourcePath)), read(getQueryPlanResourcePath(queryResourcePath)));
}

@Test(dataProvider = "getQueriesDataProvider")
public void histogramsPlansMatch(String queryResourcePath)
{
String sql = read(queryResourcePath);
Session histogramSession = Session.builder(getQueryRunner().getDefaultSession())
.setSystemProperty(OPTIMIZER_USE_HISTOGRAMS, "true")
.build();
Session noHistogramSession = Session.builder(getQueryRunner().getDefaultSession())
.setSystemProperty(OPTIMIZER_USE_HISTOGRAMS, "false")
.build();
String regularPlan = generateQueryPlan(sql, noHistogramSession);
String histogramPlan = generateQueryPlan(sql, histogramSession);
if (!regularPlan.equals(histogramPlan)) {
assertEquals(histogramPlan, read(getHistogramPlanResourcePath(getQueryPlanResourcePath(queryResourcePath))));
}
}

private String getQueryPlanResourcePath(String queryResourcePath)
{
return queryResourcePath.replaceAll("\\.sql$", ".plan.txt");
}

private String getHistogramPlanResourcePath(String regularPlanResourcePath)
{
Path root = Paths.get(regularPlanResourcePath);
return root.getParent().resolve("histogram/" + root.getFileName()).toString();
}

private Path getResourceWritePath(String queryResourcePath)
{
return Paths.get(
getSourcePath().toString(),
"src/test/resources",
getQueryPlanResourcePath(queryResourcePath));
}

public void generate()
throws Exception
{
Expand All @@ -123,24 +90,12 @@ public void generate()
.parallel()
.forEach(queryResourcePath -> {
try {
Path queryPlanWritePath = getResourceWritePath(queryResourcePath);
Path queryPlanWritePath = Paths.get(
getSourcePath().toString(),
"src/test/resources",
getQueryPlanResourcePath(queryResourcePath));
createParentDirs(queryPlanWritePath.toFile());
Session histogramSession = Session.builder(getQueryRunner().getDefaultSession())
.setSystemProperty(OPTIMIZER_USE_HISTOGRAMS, "true")
.build();
Session noHistogramSession = Session.builder(getQueryRunner().getDefaultSession())
.setSystemProperty(OPTIMIZER_USE_HISTOGRAMS, "false")
.build();
String sql = read(queryResourcePath);
String regularPlan = generateQueryPlan(sql, noHistogramSession);
String histogramPlan = generateQueryPlan(sql, histogramSession);
write(regularPlan.getBytes(UTF_8), queryPlanWritePath.toFile());
// write out the histogram plan if it differs
if (!regularPlan.equals(histogramPlan)) {
Path histogramPlanWritePath = getResourceWritePath(getHistogramPlanResourcePath(queryResourcePath));
createParentDirs(histogramPlanWritePath.toFile());
write(histogramPlan.getBytes(UTF_8), histogramPlanWritePath.toFile());
}
write(generateQueryPlan(read(queryResourcePath)).getBytes(UTF_8), queryPlanWritePath.toFile());
System.out.println("Generated expected plan for query: " + queryResourcePath);
}
catch (IOException e) {
Expand All @@ -164,16 +119,11 @@ private static String read(String resource)
}

private String generateQueryPlan(String query)
{
return generateQueryPlan(query, getQueryRunner().getDefaultSession());
}

private String generateQueryPlan(String query, Session session)
{
String sql = query.replaceAll("\\s+;\\s+$", "")
.replace("${database}.${schema}.", "")
.replace("\"${database}\".\"${schema}\".\"${prefix}", "\"");
Plan plan = plan(session, sql, OPTIMIZED_AND_VALIDATED, false);
Plan plan = plan(sql, OPTIMIZED_AND_VALIDATED, false);

JoinOrderPrinter joinOrderPrinter = new JoinOrderPrinter();
plan.getRoot().accept(joinOrderPrinter, 0);
Expand Down

This file was deleted.

17 changes: 6 additions & 11 deletions presto-docs/src/main/sphinx/connector/iceberg.rst
Original file line number Diff line number Diff line change
Expand Up @@ -225,13 +225,10 @@ Property Name Description

``iceberg.enable-parquet-dereference-pushdown`` Enable parquet dereference pushdown. ``true``

``iceberg.hive-statistics-merge-strategy`` Comma separated list of statistics to use from the
Hive Metastore to override Iceberg table statistics.
The available values are ``NUMBER_OF_DISTINCT_VALUES``
and ``TOTAL_SIZE_IN_BYTES``.

**Note**: Only valid when the Iceberg connector is
configured with Hive.
``iceberg.hive-statistics-merge-strategy`` Determines how to merge statistics that are stored in the ``NONE``
Hive Metastore. The available values are ``NONE``,
``USE_NULLS_FRACTION_AND_NDV``, ``USE_NULLS_FRACTIONS``
and, ``USE_NDV``

``iceberg.statistic-snapshot-record-difference-weight`` The amount that the difference in total record count matters
when calculating the closest snapshot when picking
Expand Down Expand Up @@ -309,8 +306,6 @@ Property Name Description
============================================= ======================================================================
``iceberg.delete_as_join_rewrite_enabled`` Overrides the behavior of the connector property
``iceberg.delete-as-join-rewrite-enabled`` in the current session.
``iceberg.hive_statistics_merge_strategy`` Overrides the behavior of the connector property
``iceberg.hive-statistics-merge-strategy`` in the current session.
============================================= ======================================================================

Caching Support
Expand Down Expand Up @@ -1177,7 +1172,7 @@ each Iceberg data type to the corresponding Presto data type, and from each Pres
The following tables detail the specific type maps between PrestoDB and Iceberg.

Iceberg to PrestoDB type mapping
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

Map of Iceberg types to the relevant PrestoDB types:

Expand Down Expand Up @@ -1220,7 +1215,7 @@ Map of Iceberg types to the relevant PrestoDB types:
No other types are supported.

PrestoDB to Iceberg type mapping
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

Map of PrestoDB types to the relevant Iceberg types:

Expand Down
Loading