Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ public void testBroadcastSpatialJoinIntersects()
}

@Test
public void tesDistributedSpatialJoinIntersects()
public void testDistributedSpatialJoinIntersects()
{
assertUpdate(format("CREATE TABLE intersects_partitioning AS " +
"SELECT spatial_partitioning(ST_GeometryFromText(wkt)) as v " +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1148,22 +1148,18 @@ private void prepareAddTable(MetastoreContext metastoreContext, HdfsContext cont
checkArgument(!targetLocation.isEmpty(), "target location is empty");
Optional<Path> currentPath = tableAndMore.getCurrentLocation();
Path targetPath = new Path(targetLocation);
if (table.getPartitionColumns().isEmpty() && currentPath.isPresent()) {
// CREATE TABLE AS SELECT unpartitioned table
if (targetPath.equals(currentPath.get())) {
// Target path and current path are the same. Therefore, directory move is not needed.
}
else {
renameDirectory(
context,
hdfsEnvironment,
currentPath.get(),
targetPath,
() -> cleanUpTasksForAbort.add(new DirectoryCleanUpTask(context, targetPath, true)));
}
if (table.getPartitionColumns().isEmpty() && currentPath.isPresent() && !targetPath.equals(currentPath.get())) {
// CREATE TABLE AS SELECT unpartitioned table with staging directory
renameDirectory(
context,
hdfsEnvironment,
currentPath.get(),
targetPath,
() -> cleanUpTasksForAbort.add(new DirectoryCleanUpTask(context, targetPath, true)));
}
else {
// CREATE TABLE AS SELECT partitioned table, or
// CREATE TABLE AS SELECT unpartitioned table without temporary staging directory
// CREATE TABLE partitioned/unpartitioned table (without data)
if (pathExists(context, hdfsEnvironment, targetPath)) {
if (currentPath.isPresent() && currentPath.get().equals(targetPath)) {
Expand Down Expand Up @@ -1808,7 +1804,7 @@ else if (deleteEmptyDirectories && !recursiveDeleteResult.isDirectoryNoLongerExi
* <p>
* This method will not delete anything that's neither a directory nor a file.
*
* @param queryIds prefix or suffix of files that should be deleted
* @param queryIds prefix or suffix of files that should be deleted
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

accidental change?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

was automatically reformatted by intellij. figured it was better to commit the autoformatting changes than fight with it all the time.

* @param deleteEmptyDirectories whether empty directories should be deleted
*/
private static RecursiveDeleteResult recursiveDeleteFiles(HdfsEnvironment hdfsEnvironment, HdfsContext context, Path directory, Set<String> queryIds, boolean deleteEmptyDirectories)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@
import javax.annotation.concurrent.ThreadSafe;
import javax.inject.Inject;

import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayDeque;
Expand Down Expand Up @@ -263,6 +264,12 @@ else if (table.getTableType().equals(EXTERNAL_TABLE)) {
throw new PrestoException(NOT_SUPPORTED, "Table type not supported: " + table.getTableType());
}

if (!table.getTableType().equals(VIRTUAL_VIEW)) {
File location = new File(new Path(table.getStorage().getLocation()).toUri());
checkArgument(location.isDirectory(), "Table location is not a directory: %s", location);
checkArgument(location.exists(), "Table directory does not exist: %s", location);
}

writeSchemaFile("table", tableMetadataDirectory, tableCodec, new TableMetadata(table), false);

for (Entry<String, Collection<HivePrivilegeInfo>> entry : principalPrivileges.getUserPrivileges().asMap().entrySet()) {
Expand Down Expand Up @@ -918,8 +925,8 @@ public synchronized List<String> getPartitionNamesByFilter(
List<String> parts = convertPredicateToParts(partitionPredicates);
// todo this should be more efficient by selectively walking the directory tree
return getPartitionNames(metastoreContext, databaseName, tableName).map(partitionNames -> partitionNames.stream()
.filter(partitionName -> partitionMatches(partitionName, parts))
.collect(toImmutableList()))
.filter(partitionName -> partitionMatches(partitionName, parts))
.collect(toImmutableList()))
.orElse(ImmutableList.of());
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ public synchronized void createTable(MetastoreContext metastoreContext, Table ta
}
else {
File directory = new File(new Path(table.getSd().getLocation()).toUri());
checkArgument(directory.exists(), "Table directory does not exist");
checkArgument(directory.exists(), "Table directory does not exist: %s", directory);
if (tableType == MANAGED_TABLE) {
checkArgument(isParentDir(directory, baseDirectory), "Table directory must be inside of the metastore base directory");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ public final class HiveSessionProperties
private static final String OPTIMIZE_MISMATCHED_BUCKET_COUNT = "optimize_mismatched_bucket_count";
private static final String S3_SELECT_PUSHDOWN_ENABLED = "s3_select_pushdown_enabled";
public static final String SHUFFLE_PARTITIONED_COLUMNS_FOR_TABLE_WRITE = "shuffle_partitioned_columns_for_table_write";
private static final String TEMPORARY_STAGING_DIRECTORY_ENABLED = "temporary_staging_directory_enabled";
public static final String TEMPORARY_STAGING_DIRECTORY_ENABLED = "temporary_staging_directory_enabled";
private static final String TEMPORARY_STAGING_DIRECTORY_PATH = "temporary_staging_directory_path";
private static final String TEMPORARY_TABLE_SCHEMA = "temporary_table_schema";
private static final String TEMPORARY_TABLE_STORAGE_FORMAT = "temporary_table_storage_format";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@
import static com.facebook.presto.hive.HiveSessionProperties.RCFILE_OPTIMIZED_WRITER_ENABLED;
import static com.facebook.presto.hive.HiveSessionProperties.SORTED_WRITE_TEMP_PATH_SUBDIRECTORY_COUNT;
import static com.facebook.presto.hive.HiveSessionProperties.SORTED_WRITE_TO_TEMP_PATH_ENABLED;
import static com.facebook.presto.hive.HiveSessionProperties.TEMPORARY_STAGING_DIRECTORY_ENABLED;
import static com.facebook.presto.hive.HiveSessionProperties.getInsertExistingPartitionsBehavior;
import static com.facebook.presto.hive.HiveStorageFormat.PAGEFILE;
import static com.facebook.presto.hive.HiveTableProperties.BUCKETED_BY_PROPERTY;
Expand Down Expand Up @@ -1144,6 +1145,28 @@ public void testCreateEmptyUnpartitionedBucketedTable()
assertUpdate("DROP TABLE " + tableName);
}

@Test
public void testCreateEmptyUnpartitionedBucketedTableNoStaging()
{
String tableName = "test_create_empty_bucketed_table_no_staging";
assertUpdate(
Session.builder(getSession())
.setCatalogSessionProperty(catalog, TEMPORARY_STAGING_DIRECTORY_ENABLED, "false")
.build(),
"" +
"CREATE TABLE " + tableName + " " +
"WITH (" +
" bucketed_by = ARRAY[ 'custkey' ], " +
" bucket_count = 11 " +
") " +
"AS " +
"SELECT custkey, comment " +
"FROM customer " +
"WHERE custkey < 0", 0);
assertQuery("SELECT count(*) FROM " + tableName, "SELECT 0");
assertUpdate("DROP TABLE " + tableName);
}

@Test
public void testCreateEmptyBucketedPartition()
{
Expand Down Expand Up @@ -5371,9 +5394,9 @@ public void testCreateMaterializedView()
assertQueryFails(
"CREATE MATERIALIZED VIEW test_customer_view AS SELECT name FROM test_customer_base",
format(
".* Materialized view '%s.%s.test_customer_view' already exists",
getSession().getCatalog().get(),
getSession().getSchema().get()));
".* Materialized view '%s.%s.test_customer_view' already exists",
getSession().getCatalog().get(),
getSession().getSchema().get()));
assertQuerySucceeds("CREATE MATERIALIZED VIEW IF NOT EXISTS test_customer_view AS SELECT name FROM test_customer_base");

// Test partition mapping
Expand Down Expand Up @@ -5424,15 +5447,15 @@ public void testCreateMaterializedView()
public void testShowCreateOnMaterializedView()
{
String createMaterializedViewSql = formatSqlText(format("CREATE MATERIALIZED VIEW %s.%s.test_customer_view_1\n" +
"WITH (\n" +
" format = 'ORC'," +
" partitioned_by = ARRAY['nationkey']\n" +
retentionDays(15) +
") AS SELECT\n" +
" name\n" +
", nationkey\n" +
"FROM\n" +
" test_customer_base_1",
"WITH (\n" +
" format = 'ORC'," +
" partitioned_by = ARRAY['nationkey']\n" +
retentionDays(15) +
") AS SELECT\n" +
" name\n" +
", nationkey\n" +
"FROM\n" +
" test_customer_base_1",
getSession().getCatalog().get(),
getSession().getSchema().get()));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ public static DistributedQueryRunner createIcebergQueryRunner(Map<String, String
queryRunner.installPlugin(new IcebergPlugin());
Map<String, String> icebergProperties = ImmutableMap.<String, String>builder()
.put("hive.metastore", "file")
.put("hive.metastore.catalog.dir", dataDir.toString() + "/catalog")
.put("hive.metastore.catalog.dir", catalogDir.toFile().toURI().toString())
.put("iceberg.file-format", format.name())
.build();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,28 +42,28 @@ protected QueryRunner createQueryRunner()
{
Session session = testSessionBuilder()
.setIdentity(new Identity(
"hive",
Optional.empty(),
ImmutableMap.of("hive", new SelectedRole(ROLE, Optional.of("admin"))),
ImmutableMap.of(),
ImmutableMap.of()))
"hive",
Optional.empty(),
ImmutableMap.of("hive", new SelectedRole(ROLE, Optional.of("admin"))),
ImmutableMap.of(),
ImmutableMap.of()))
.build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).build();

Path dataDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data");
Path catalogDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").resolve("catalog");

queryRunner.installPlugin(new IcebergPlugin());
Map<String, String> icebergProperties = ImmutableMap.<String, String>builder()
.put("hive.metastore", "file")
.put("hive.metastore.catalog.dir", dataDir.toString() + "/catalog")
.put("hive.metastore.catalog.dir", catalogDir.toFile().toURI().toString())
.build();

queryRunner.createCatalog(ICEBERG_CATALOG, "iceberg", icebergProperties);

queryRunner.installPlugin(new HivePlugin("hive"));
Map<String, String> hiveProperties = ImmutableMap.<String, String>builder()
.put("hive.metastore", "file")
.put("hive.metastore.catalog.dir", dataDir.toString() + "/catalog")
.put("hive.metastore.catalog.dir", catalogDir.toFile().toURI().toString())
.put("hive.security", "sql-standard")
.build();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,12 @@ protected QueryRunner createQueryRunner()
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");

Path dataDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data");
Path catalogDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").resolve("catalog");

queryRunner.installPlugin(new IcebergPlugin());
Map<String, String> icebergProperties = ImmutableMap.<String, String>builder()
.put("hive.metastore", "file")
.put("hive.metastore.catalog.dir", dataDir.toString() + "/catalog")
.put("hive.metastore.catalog.dir", catalogDir.toFile().toURI().toString())
.build();

queryRunner.createCatalog(ICEBERG_CATALOG, "iceberg", icebergProperties);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,12 @@ protected QueryRunner createQueryRunner()
.build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).build();

Path dataDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data");
Path catalogDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").resolve("catalog");

queryRunner.installPlugin(new IcebergPlugin());
Map<String, String> icebergProperties = ImmutableMap.<String, String>builder()
.put("hive.metastore", "file")
.put("hive.metastore.catalog.dir", dataDir.toString() + "/catalog")
.put("hive.metastore.catalog.dir", catalogDir.toFile().toURI().toString())
.build();

queryRunner.createCatalog(ICEBERG_CATALOG, "iceberg", icebergProperties);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ public void setupServer()
server.installPlugin(new HiveHadoop2Plugin());
server.createCatalog("hive", "hive-hadoop2", ImmutableMap.<String, String>builder()
.put("hive.metastore", "file")
.put("hive.metastore.catalog.dir", server.getBaseDataDir().resolve("hive").toAbsolutePath().toString())
.put("hive.metastore.catalog.dir", server.getBaseDataDir().resolve("hive").toFile().toURI().toString())
.put("hive.security", "sql-standard")
.build());

Expand Down