Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,13 @@ public static String getPartitionPathFromGenericRecord(GenericRecord genericReco
*/
public static String[] extractRecordKeys(String recordKey) {
String[] fieldKV = recordKey.split(",");

return Arrays.stream(fieldKV).map(kv -> {
final String[] kvArray = kv.split(":");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, why a simple key uses the key form: name:val instead of just val ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, why a simple key uses the key form: name:val instead of just val ?

options.put(FlinkOptions.RECORD_KEY_FIELD.key(), "uuid");
options.put(FlinkOptions.PARTITION_PATH_FIELD.key(), "partition,name");
options.put(FlinkOptions.KEYGEN_TYPE.key(), KeyGeneratorType.COMPLEX.name());
If pk is "uuid", partition is "partition,name", in flink-sql we'll use COMPLEX KeyGenerator, then uuid will stored as "uuid:danny", I've tested it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@danny0405 I've explained it, looking forward to your reply.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So why we configure a COMPLEX key generator while the key is just simple here?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So why we configure a COMPLEX key generator while the key is just simple here?
Because flink-sql's default logic is COMPLEX KeyGenerator, when boolean complexHoodieKey = pks.length > 1 || partitions.length > 1;
https://github.com/apache/hudi/blob/master/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableFactory.java#L239

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, i have merged #6539 , so this pr can be closed.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, i have merged #6539 , so this pr can be closed.

@danny0405 #6539 has little problem, if it's single pk and simple key generator, we'll store 'danny' not 'id:danny', so kvArray[1] will be null point

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, we can rebase the PR and fix it.

Copy link
Contributor Author

@flashJd flashJd Sep 5, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@danny0405 , I've rebased this PR to master to fix the issue.
By the way, can you review #6429, thanks

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have fixed it in master, so this PR can be closed, would review #6429 then.

if (kvArray[1].equals(NULL_RECORDKEY_PLACEHOLDER)) {
// a simple key
if (kvArray.length == 1) {
return kvArray[0];
} else if (kvArray[1].equals(NULL_RECORDKEY_PLACEHOLDER)) {
return null;
} else if (kvArray[1].equals(EMPTY_RECORDKEY_PLACEHOLDER)) {
return "";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@ public class TestKeyGenUtils {

@Test
public void testExtractRecordKeys() {
String[] s0 = KeyGenUtils.extractRecordKeys("1");
Assertions.assertArrayEquals(new String[]{"1"}, s0);

String[] s1 = KeyGenUtils.extractRecordKeys("id:1");
Assertions.assertArrayEquals(new String[]{"1"}, s1);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import org.apache.hudi.common.table.timeline.HoodieInstant;
import org.apache.hudi.configuration.FlinkOptions;
import org.apache.hudi.configuration.HadoopConfigurations;
import org.apache.hudi.keygen.constant.KeyGeneratorType;
import org.apache.hudi.table.HoodieTableSource;
import org.apache.hudi.table.format.cow.CopyOnWriteInputFormat;
import org.apache.hudi.table.format.mor.MergeOnReadInputFormat;
Expand All @@ -48,6 +49,7 @@
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
Expand Down Expand Up @@ -290,6 +292,60 @@ void testReadWithDeletesMOR() throws Exception {
assertThat(actual, is(expected));
}

@Test
void testReadWithDeletesMORWithSimpleKeyGenWithChangeLogDisabled() throws Exception {
Map<String, String> options = new HashMap<>();
options.put(FlinkOptions.CHANGELOG_ENABLED.key(), "false");
options.put(FlinkOptions.PARTITION_PATH_FIELD.key(), "partition");
options.put(FlinkOptions.KEYGEN_TYPE.key(), KeyGeneratorType.SIMPLE.name());
beforeEach(HoodieTableType.MERGE_ON_READ, options);

// write another commit to read again
TestData.writeData(TestData.DATA_SET_UPDATE_DELETE, conf);

InputFormat<RowData, ?> inputFormat = this.tableSource.getInputFormat();
assertThat(inputFormat, instanceOf(MergeOnReadInputFormat.class));
((MergeOnReadInputFormat) inputFormat).isEmitDelete(true);

List<RowData> result = readData(inputFormat);

final String actual = TestData.rowDataToString(result);
final String expected = "["
+ "+I[id1, Danny, 24, 1970-01-01T00:00:00.001, par1], "
+ "+I[id2, Stephen, 34, 1970-01-01T00:00:00.002, par1], "
+ "-D[id3, null, null, null, null], "
+ "-D[id5, null, null, null, null], "
+ "-D[id9, null, null, null, null]]";
assertThat(actual, is(expected));
}

@Test
void testReadWithDeletesMORWithComplexKeyGenWithChangeLogDisabled() throws Exception {
Map<String, String> options = new HashMap<>();
options.put(FlinkOptions.CHANGELOG_ENABLED.key(), "false");
options.put(FlinkOptions.PARTITION_PATH_FIELD.key(), "partition,name");
options.put(FlinkOptions.KEYGEN_TYPE.key(), KeyGeneratorType.COMPLEX.name());
beforeEach(HoodieTableType.MERGE_ON_READ, options);

// write another commit to read again
TestData.writeData(TestData.DATA_SET_UPDATE_DELETE, conf);

InputFormat<RowData, ?> inputFormat = this.tableSource.getInputFormat();
assertThat(inputFormat, instanceOf(MergeOnReadInputFormat.class));
((MergeOnReadInputFormat) inputFormat).isEmitDelete(true);

List<RowData> result = readData(inputFormat);

final String actual = TestData.rowDataToString(result);
final String expected = "["
+ "+I[id1, Danny, 24, 1970-01-01T00:00:00.001, par1], "
+ "+I[id2, Stephen, 34, 1970-01-01T00:00:00.002, par1], "
+ "-D[id3, null, null, null, null], "
+ "-D[id5, null, null, null, null], "
+ "-D[id9, null, null, null, null]]";
assertThat(actual, is(expected));
}

@Test
void testReadWithDeletesCOW() throws Exception {
beforeEach(HoodieTableType.COPY_ON_WRITE);
Expand Down Expand Up @@ -626,7 +682,7 @@ private HoodieTableSource getTableSource(Configuration conf) {
return new HoodieTableSource(
TestConfigurations.TABLE_SCHEMA,
new Path(tempFile.getAbsolutePath()),
Collections.singletonList("partition"),
Arrays.asList(conf.getString(FlinkOptions.PARTITION_PATH_FIELD).split(",")),
"default",
conf);
}
Expand Down