diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/SingleSparkJobExecutionStrategy.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/SingleSparkJobExecutionStrategy.java index 1158d0ada42f0..bb6d3df5f105e 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/SingleSparkJobExecutionStrategy.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/SingleSparkJobExecutionStrategy.java @@ -136,7 +136,7 @@ private Stream runClusteringForGroup(ClusteringGroupInfo clustering /** * Execute clustering to write inputRecords into new files as defined by rules in strategy parameters. * The number of new file groups created is bounded by numOutputGroups. - * Note that commit is not done as part of strategy. commit is callers responsibility. + * Note that commit is not done as part of strategy. Commit is callers responsibility. */ public abstract Iterator> performClusteringWithRecordsIterator(final Iterator> records, final int numOutputGroups, final String instantTime, diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/utils/SparkInternalSchemaConverter.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/utils/SparkInternalSchemaConverter.java index 8e086c2927e42..098870a60a526 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/utils/SparkInternalSchemaConverter.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/utils/SparkInternalSchemaConverter.java @@ -81,7 +81,7 @@ private SparkInternalSchemaConverter() { public static final String HOODIE_VALID_COMMITS_LIST = "hoodie.valid.commits.list"; /** - * Converts a spark schema to an hudi internal schema. Fields without IDs are kept and assigned fallback IDs. + * Convert a spark schema to an hudi internal schema. Fields without IDs are kept and assigned fallback IDs. * * @param sparkSchema a spark schema * @return a matching internal schema for the provided spark schema @@ -157,7 +157,7 @@ public static Type buildTypeFromStructType(DataType sparkType, Boolean firstVisi } /** - * Converts Spark schema to Hudi internal schema, and prune fields. + * Convert Spark schema to Hudi internal schema, and prune fields. * Fields without IDs are kept and assigned fallback IDs. * * @param sparkSchema a pruned spark schema diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/utils/SparkValidatorUtils.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/utils/SparkValidatorUtils.java index fd083f2c89a46..a6d03eae2b361 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/utils/SparkValidatorUtils.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/utils/SparkValidatorUtils.java @@ -50,7 +50,7 @@ import scala.collection.JavaConverters; /** - * Spark validator utils to verify and run any precommit validators configured. + * Spark validator utils to verify and run any pre-commit validators configured. */ public class SparkValidatorUtils { private static final Logger LOG = LogManager.getLogger(BaseSparkCommitActionExecutor.class); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieAvroDataBlock.java b/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieAvroDataBlock.java index 491c6700c9067..9e74d14c048f2 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieAvroDataBlock.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieAvroDataBlock.java @@ -308,7 +308,7 @@ public byte[] getBytes(Schema schema) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream output = new DataOutputStream(baos); - // 2. Compress and Write schema out + // 1. Compress and Write schema out byte[] schemaContent = compress(schema.toString()); output.writeInt(schemaContent.length); output.write(schemaContent); @@ -318,10 +318,10 @@ public byte[] getBytes(Schema schema) throws IOException { recordItr.forEachRemaining(records::add); } - // 3. Write total number of records + // 2. Write total number of records output.writeInt(records.size()); - // 4. Write the records + // 3. Write the records Iterator itr = records.iterator(); while (itr.hasNext()) { IndexedRecord s = itr.next();