diff --git a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieClientOnMergeOnReadStorage.java b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieClientOnMergeOnReadStorage.java index be979c892f321..72f2688fb5062 100644 --- a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieClientOnMergeOnReadStorage.java +++ b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieClientOnMergeOnReadStorage.java @@ -234,6 +234,41 @@ public void testLogCompactionOnMORTableWithoutBaseFile() throws Exception { hadoopConf, Arrays.asList(HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH)); } + /** + * Test scheduling log-compaction twice. It should only create generate one plan for log compaction in the timeline. + */ + @Test + public void testSchedulingLogCompactionTwice() throws Exception { + HoodieCompactionConfig compactionConfig = HoodieCompactionConfig.newBuilder() + .withMaxNumDeltaCommitsBeforeCompaction(1) + .withLogCompactionBlocksThreshold(1) + .build(); + HoodieWriteConfig config = getConfigBuilder(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA, + HoodieIndex.IndexType.INMEMORY).withAutoCommit(true).withCompactionConfig(compactionConfig).build(); + SparkRDDWriteClient client = getHoodieWriteClient(config); + + // First insert + String newCommitTime = HoodieActiveTimeline.createNewInstantTime(); + insertBatch(config, client, newCommitTime, "000", 100, + SparkRDDWriteClient::insert, false, false, 100, 100, + 1, Option.empty()); + + String prevCommitTime = newCommitTime; + // Upsert + newCommitTime = HoodieActiveTimeline.createNewInstantTime(); + updateBatch(config, client, newCommitTime, prevCommitTime, + Option.of(Arrays.asList(prevCommitTime)), "000", 50, SparkRDDWriteClient::upsert, + false, false, 50, 100, 2, config.populateMetaFields()); + + // Schedule log compaction + Option logCompactionTimeStamp = client.scheduleLogCompaction(Option.empty()); + assertTrue(logCompactionTimeStamp.isPresent()); + + // Try scheduling log compaction, it won't succeed. + Option logCompactionTimeStamp2 = client.scheduleLogCompaction(Option.empty()); + assertFalse(logCompactionTimeStamp2.isPresent()); + } + /** * Test scheduling log-compaction right after scheduling compaction. This should fail. */ diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieDefaultTimeline.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieDefaultTimeline.java index a5d56c91d5e3e..86a37bc2d4e8f 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieDefaultTimeline.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieDefaultTimeline.java @@ -119,7 +119,8 @@ public HoodieTimeline filterCompletedInstants() { @Override public HoodieTimeline filterCompletedAndCompactionInstants() { return new HoodieDefaultTimeline(getInstantsAsStream().filter(s -> s.isCompleted() - || s.getAction().equals(HoodieTimeline.COMPACTION_ACTION)), details); + || s.getAction().equals(HoodieTimeline.COMPACTION_ACTION) + || s.getAction().equals(HoodieTimeline.LOG_COMPACTION_ACTION)), details); } @Override