diff --git a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedTableMetadata.java b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedTableMetadata.java index cac1e1bbe81e2..51860ac9eeffc 100644 --- a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedTableMetadata.java +++ b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedTableMetadata.java @@ -18,6 +18,7 @@ package org.apache.hudi.client.functional; +import org.apache.hadoop.fs.Path; import org.apache.hudi.common.model.HoodieTableType; import org.apache.hudi.common.table.view.TableFileSystemView; import org.apache.hudi.common.testutils.HoodieTestTable; @@ -109,4 +110,17 @@ public void testMetadataTableKeyGenerator(final HoodieTableType tableType) throw tableMetadata.getMetadataMetaClient().getTableConfig().getKeyGeneratorClassName()); } + /** + * [HUDI-2852] Table metadata returns empty for non-exist partition. + */ + @ParameterizedTest + @EnumSource(HoodieTableType.class) + public void testNotExistPartition(final HoodieTableType tableType) throws Exception { + init(tableType); + HoodieBackedTableMetadata tableMetadata = new HoodieBackedTableMetadata(context, + writeConfig.getMetadataConfig(), writeConfig.getBasePath(), writeConfig.getSpillableMapBasePath(), false); + FileStatus[] allFilesInPartition = + tableMetadata.getAllFilesInPartition(new Path(writeConfig.getBasePath() + "dummy")); + assertEquals(allFilesInPartition.length, 0); + } } diff --git a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java index 766bc68ec1151..8a9f8552c15b1 100644 --- a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java +++ b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java @@ -121,7 +121,8 @@ private void initIfNeeded() { @Override protected Option> getRecordByKey(String key, String partitionName) { - return getRecordsByKeys(Collections.singletonList(key), partitionName).get(0).getValue(); + List>>> recordsByKeys = getRecordsByKeys(Collections.singletonList(key), partitionName); + return recordsByKeys.size() == 0 ? Option.empty() : recordsByKeys.get(0).getValue(); } protected List>>> getRecordsByKeys(List keys, String partitionName) { @@ -131,6 +132,10 @@ protected List>>> getRec HoodieFileReader baseFileReader = readers.getKey(); HoodieMetadataMergedLogRecordReader logRecordScanner = readers.getRight(); + if (baseFileReader == null && logRecordScanner == null) { + return Collections.emptyList(); + } + // local map to assist in merging with base file records Map>> logRecords = readLogRecords(logRecordScanner, keys, timings); List>>> result = readFromBaseAndMergeWithLogRecords( @@ -241,6 +246,10 @@ private Pair openReadersI // Metadata is in sync till the latest completed instant on the dataset HoodieTimer timer = new HoodieTimer().startTimer(); List latestFileSlices = HoodieTableMetadataUtil.loadPartitionFileGroupsWithLatestFileSlices(metadataMetaClient, partitionName); + if (latestFileSlices.size() == 0) { + // empty partition + return Pair.of(null, null); + } ValidationUtils.checkArgument(latestFileSlices.size() == 1, String.format("Invalid number of file slices: found=%d, required=%d", latestFileSlices.size(), 1)); final FileSlice slice = latestFileSlices.get(HoodieTableMetadataUtil.mapRecordKeyToFileGroupIndex(key, latestFileSlices.size()));