diff --git a/hudi-client/hudi-client-common/src/test/java/org/apache/hudi/testutils/HoodieWriteableTestTable.java b/hudi-client/hudi-client-common/src/test/java/org/apache/hudi/testutils/HoodieWriteableTestTable.java index e8fda35b36e1b..32fd200145e9b 100644 --- a/hudi-client/hudi-client-common/src/test/java/org/apache/hudi/testutils/HoodieWriteableTestTable.java +++ b/hudi-client/hudi-client-common/src/test/java/org/apache/hudi/testutils/HoodieWriteableTestTable.java @@ -100,6 +100,12 @@ public HoodieWriteableTestTable withInserts(String partition, String fileId, Lis FileCreateUtils.createPartitionMetaFile(basePath, partition); String fileName = baseFileName(currentInstantTime, fileId); + Path baseFilePath = new Path(Paths.get(basePath, partition, fileName).toString()); + if (this.fs.exists(baseFilePath)) { + LOG.warn("Deleting the existing base file " + baseFilePath); + this.fs.delete(baseFilePath, true); + } + if (HoodieTableConfig.BASE_FILE_FORMAT.defaultValue().equals(HoodieFileFormat.PARQUET)) { HoodieAvroWriteSupport writeSupport = new HoodieAvroWriteSupport( new AvroSchemaConverter().convert(schema), schema, Option.of(filter)); diff --git a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieIndex.java b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieIndex.java index fa5a3537bb1f8..01a3d967d7486 100644 --- a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieIndex.java +++ b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieIndex.java @@ -25,6 +25,7 @@ import org.apache.hudi.common.model.HoodieKey; import org.apache.hudi.common.model.HoodieRecord; import org.apache.hudi.common.model.HoodieTableType; +import org.apache.hudi.common.model.WriteOperationType; import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.view.FileSystemViewStorageConfig; import org.apache.hudi.common.table.view.FileSystemViewStorageType; @@ -39,6 +40,8 @@ import org.apache.hudi.config.HoodieWriteConfig; import org.apache.hudi.index.HoodieIndex; import org.apache.hudi.index.HoodieIndex.IndexType; +import org.apache.hudi.metadata.HoodieTableMetadataWriter; +import org.apache.hudi.metadata.SparkHoodieBackedTableMetadataWriter; import org.apache.hudi.table.HoodieSparkTable; import org.apache.hudi.table.HoodieTable; import org.apache.hudi.testutils.Assertions; @@ -374,11 +377,18 @@ public void testSimpleGlobalIndexTagLocationWhenShouldUpdatePartitionPath() thro .withGlobalSimpleIndexUpdatePartitionPath(true) .withBloomIndexUpdatePartitionPath(true) .build()) - .withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(false).build()).build(); + .withMetadataConfig( + HoodieMetadataConfig.newBuilder().enable(true).build()) + .build(); writeClient = getHoodieWriteClient(config); index = writeClient.getIndex(); + HoodieTable hoodieTable = HoodieSparkTable.create(config, context, metaClient); - HoodieSparkWriteableTestTable testTable = HoodieSparkWriteableTestTable.of(hoodieTable, SCHEMA); + HoodieTableMetadataWriter metadataWriter = SparkHoodieBackedTableMetadataWriter.create( + writeClient.getEngineContext().getHadoopConf().get(), config, writeClient.getEngineContext()); + HoodieSparkWriteableTestTable testTable = HoodieSparkWriteableTestTable.of(hoodieTable.getMetaClient(), + SCHEMA, metadataWriter); + final String p1 = "2016/01/31"; final String p2 = "2016/02/28"; @@ -415,8 +425,14 @@ public void testSimpleGlobalIndexTagLocationWhenShouldUpdatePartitionPath() thro new HoodieKey(incomingPayloadSamePartition.getRowKey(), incomingPayloadSamePartition.getPartitionPath()), incomingPayloadSamePartition); + final String file1P1C0 = UUID.randomUUID().toString(); + Map>> c1PartitionToFilesNameLengthMap = new HashMap<>(); + c1PartitionToFilesNameLengthMap.put(p1, Collections.singletonList(Pair.of(file1P1C0, 100))); + testTable.doWriteOperation("1000", WriteOperationType.INSERT, Arrays.asList(p1), + c1PartitionToFilesNameLengthMap, false, false); + // We have some records to be tagged (two different partitions) - testTable.addCommit("1000").getFileIdWithInserts(p1, originalRecord); + testTable.withInserts(p1, file1P1C0, originalRecord); // test against incoming record with a different partition JavaRDD recordRDD = jsc.parallelize(Collections.singletonList(incomingRecord));