|
18 | 18 |
|
19 | 19 | package org.apache.hudi.internal; |
20 | 20 |
|
21 | | -import org.apache.hudi.common.model.HoodieRecord; |
22 | 21 | import org.apache.hudi.common.testutils.HoodieTestDataGenerator; |
23 | 22 | import org.apache.hudi.common.util.Option; |
24 | 23 | import org.apache.hudi.config.HoodieWriteConfig; |
25 | 24 | import org.apache.hudi.table.HoodieSparkTable; |
26 | 25 | import org.apache.hudi.table.HoodieTable; |
27 | | -import org.apache.hudi.testutils.SparkDatasetTestUtils; |
28 | | - |
29 | 26 | import org.apache.spark.sql.Dataset; |
30 | 27 | import org.apache.spark.sql.Row; |
31 | 28 | import org.apache.spark.sql.catalyst.InternalRow; |
32 | | -import org.junit.jupiter.api.Assertions; |
33 | | -import org.junit.jupiter.api.Disabled; |
34 | 29 | import org.junit.jupiter.api.Test; |
35 | 30 | import org.junit.jupiter.params.ParameterizedTest; |
36 | 31 | import org.junit.jupiter.params.provider.Arguments; |
|
50 | 45 | /** |
51 | 46 | * Unit tests {@link HoodieBulkInsertDataInternalWriter}. |
52 | 47 | */ |
53 | | -@Disabled("temp") |
54 | 48 | public class TestHoodieBulkInsertDataInternalWriter extends |
55 | 49 | HoodieBulkInsertInternalWriterTestBase { |
56 | 50 |
|
@@ -106,7 +100,7 @@ public void testDataInternalWriter(boolean sorted, boolean populateMetaFields) t |
106 | 100 | Option<List<String>> fileNames = Option.of(new ArrayList<>()); |
107 | 101 |
|
108 | 102 | // verify write statuses |
109 | | - assertWriteStatuses(commitMetadata.getWriteStatuses(), batches, size, sorted, fileAbsPaths, fileNames); |
| 103 | + assertWriteStatuses(commitMetadata.getWriteStatuses(), batches, size, sorted, fileAbsPaths, fileNames,false); |
110 | 104 |
|
111 | 105 | // verify rows |
112 | 106 | Dataset<Row> result = sqlContext.read().parquet(fileAbsPaths.get().toArray(new String[0])); |
@@ -148,14 +142,11 @@ public void testDataInternalWriterHiveStylePartitioning() throws Exception { |
148 | 142 | Option<List<String>> fileNames = Option.of(new ArrayList<>()); |
149 | 143 |
|
150 | 144 | // verify write statuses |
151 | | - assertWriteStatuses(commitMetadata.getWriteStatuses(), batches, size, sorted, fileAbsPaths, fileNames); |
| 145 | + assertWriteStatuses(commitMetadata.getWriteStatuses(), batches, size, sorted, fileAbsPaths, fileNames, true); |
152 | 146 |
|
153 | 147 | // verify rows |
154 | 148 | Dataset<Row> result = sqlContext.read().parquet(fileAbsPaths.get().toArray(new String[0])); |
155 | 149 | assertOutput(totalInputRows, result, instantTime, fileNames, populateMetaFields); |
156 | | - |
157 | | - result.collectAsList().forEach(entry -> Assertions.assertTrue(entry.getAs(HoodieRecord.PARTITION_PATH_METADATA_FIELD).toString() |
158 | | - .contains(SparkDatasetTestUtils.PARTITION_PATH_FIELD_NAME + "="))); |
159 | 150 | } |
160 | 151 | } |
161 | 152 |
|
@@ -204,7 +195,7 @@ public void testGlobalFailure() throws Exception { |
204 | 195 | Option<List<String>> fileAbsPaths = Option.of(new ArrayList<>()); |
205 | 196 | Option<List<String>> fileNames = Option.of(new ArrayList<>()); |
206 | 197 | // verify write statuses |
207 | | - assertWriteStatuses(commitMetadata.getWriteStatuses(), 1, size / 2, false, fileAbsPaths, fileNames); |
| 198 | + assertWriteStatuses(commitMetadata.getWriteStatuses(), 1, size / 2, false, fileAbsPaths, fileNames, false); |
208 | 199 |
|
209 | 200 | // verify rows |
210 | 201 | Dataset<Row> result = sqlContext.read().parquet(fileAbsPaths.get().toArray(new String[0])); |
|
0 commit comments