diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java index bab7898a03b4..7ce9aaf31e2e 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java @@ -185,10 +185,15 @@ protected BaseHiveConnectorTest() protected static QueryRunner createHiveQueryRunner(Map extraProperties, Consumer additionalSetup) throws Exception { + // Use faster compression codec in tests. TODO remove explicit config when default changes + verify(new HiveConfig().getHiveCompressionCodec() == HiveCompressionOption.GZIP); + String hiveCompressionCodec = HiveCompressionCodec.ZSTD.name(); + DistributedQueryRunner queryRunner = HiveQueryRunner.builder() .setExtraProperties(extraProperties) .setAdditionalSetup(additionalSetup) .setHiveProperties(ImmutableMap.of( + "hive.compression-codec", hiveCompressionCodec, "hive.allow-register-partition-procedure", "true", // Reduce writer sort buffer size to ensure SortingFileWriter gets used "hive.writer-sort-buffer-size", "1MB", diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveQueryRunner.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveQueryRunner.java index ea02b4db2e3a..3a12ac6d7090 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveQueryRunner.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveQueryRunner.java @@ -248,8 +248,10 @@ public DistributedQueryRunner build() .put("hive.max-initial-split-size", "10kB") // so that each bucket has multiple splits .put("hive.max-split-size", "10kB") // so that each bucket has multiple splits .put("hive.storage-format", "TEXTFILE") // so that there's no minimum split size for the file - .put("hive.compression-codec", "NONE") // so that the file is splittable .buildOrThrow(); + hiveBucketedProperties = new HashMap<>(hiveBucketedProperties); + hiveBucketedProperties.put("hive.compression-codec", "NONE"); // so that the file is splittable + queryRunner.createCatalog(HIVE_CATALOG, "hive", hiveProperties); queryRunner.createCatalog(HIVE_BUCKETED_CATALOG, "hive", hiveBucketedProperties);