diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java index cfdedb9c4021b..7802f121940d2 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java @@ -67,7 +67,7 @@ @Component public class CompactionCommand implements CommandMarker { - private static Logger log = LogManager.getLogger(CompactionCommand.class); + private static final Logger LOG = LogManager.getLogger(CompactionCommand.class); private static final String TMP_DIR = "/tmp/"; @@ -249,7 +249,7 @@ private T deSerializeOperationResult(String inputP, FileSystem fs) throws Ex ObjectInputStream in = new ObjectInputStream(fsDataInputStream); try { T result = (T) in.readObject(); - log.info("Result : " + result); + LOG.info("Result : " + result); return result; } finally { in.close(); diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/HDFSParquetImportCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/HDFSParquetImportCommand.java index 7209789a6d7a0..e892ca939b10b 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/HDFSParquetImportCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/HDFSParquetImportCommand.java @@ -41,7 +41,7 @@ @Component public class HDFSParquetImportCommand implements CommandMarker { - private static Logger log = LogManager.getLogger(HDFSParquetImportCommand.class); + private static final Logger LOG = LogManager.getLogger(HDFSParquetImportCommand.class); @CliCommand(value = "hdfsparquetimport", help = "Imports Parquet dataset to a hoodie dataset") public String convert( diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkMain.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkMain.java index 058e2c57360b3..5d08921184650 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkMain.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkMain.java @@ -41,7 +41,7 @@ */ public class SparkMain { - protected static final Logger LOG = Logger.getLogger(SparkMain.class); + private static final Logger LOG = Logger.getLogger(SparkMain.class); /** * Commands. diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/InputStreamConsumer.java b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/InputStreamConsumer.java index 625793c72ada1..73aa45c500d09 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/InputStreamConsumer.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/InputStreamConsumer.java @@ -29,7 +29,7 @@ */ public class InputStreamConsumer extends Thread { - protected static final Logger LOG = Logger.getLogger(InputStreamConsumer.class.getName()); + private static final Logger LOG = Logger.getLogger(InputStreamConsumer.class.getName()); private InputStream is; public InputStreamConsumer(InputStream is) { diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/SparkUtil.java b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/SparkUtil.java index 7b957ee470306..1625c67a89306 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/SparkUtil.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/SparkUtil.java @@ -36,7 +36,7 @@ */ public class SparkUtil { - public static Logger logger = Logger.getLogger(SparkUtil.class); + private static final Logger LOG = Logger.getLogger(SparkUtil.class); public static final String DEFUALT_SPARK_MASTER = "yarn-client"; /** diff --git a/hudi-client/src/main/java/org/apache/hudi/CompactionAdminClient.java b/hudi-client/src/main/java/org/apache/hudi/CompactionAdminClient.java index 76b66319ef5e5..968bc69e5a824 100644 --- a/hudi-client/src/main/java/org/apache/hudi/CompactionAdminClient.java +++ b/hudi-client/src/main/java/org/apache/hudi/CompactionAdminClient.java @@ -65,7 +65,7 @@ */ public class CompactionAdminClient extends AbstractHoodieClient { - private static Logger log = LogManager.getLogger(CompactionAdminClient.class); + private static final Logger LOG = LogManager.getLogger(CompactionAdminClient.class); public CompactionAdminClient(JavaSparkContext jsc, String basePath) { super(jsc, HoodieWriteConfig.newBuilder().withPath(basePath).build()); @@ -350,25 +350,25 @@ private ValidationOpResult validateCompactionOperation(HoodieTableMetaClient met private List runRenamingOps(HoodieTableMetaClient metaClient, List> renameActions, int parallelism, boolean dryRun) { if (renameActions.isEmpty()) { - log.info("No renaming of log-files needed. Proceeding to removing file-id from compaction-plan"); + LOG.info("No renaming of log-files needed. Proceeding to removing file-id from compaction-plan"); return new ArrayList<>(); } else { - log.info("The following compaction renaming operations needs to be performed to un-schedule"); + LOG.info("The following compaction renaming operations needs to be performed to un-schedule"); if (!dryRun) { return jsc.parallelize(renameActions, parallelism).map(lfPair -> { try { - log.info("RENAME " + lfPair.getLeft().getPath() + " => " + lfPair.getRight().getPath()); + LOG.info("RENAME " + lfPair.getLeft().getPath() + " => " + lfPair.getRight().getPath()); renameLogFile(metaClient, lfPair.getLeft(), lfPair.getRight()); return new RenameOpResult(lfPair, true, Option.empty()); } catch (IOException e) { - log.error("Error renaming log file", e); - log.error("\n\n\n***NOTE Compaction is in inconsistent state. Try running \"compaction repair " + LOG.error("Error renaming log file", e); + LOG.error("\n\n\n***NOTE Compaction is in inconsistent state. Try running \"compaction repair " + lfPair.getLeft().getBaseCommitTime() + "\" to recover from failure ***\n\n\n"); return new RenameOpResult(lfPair, false, Option.of(e)); } }).collect(); } else { - log.info("Dry-Run Mode activated for rename operations"); + LOG.info("Dry-Run Mode activated for rename operations"); return renameActions.parallelStream().map(lfPair -> new RenameOpResult(lfPair, false, false, Option.empty())) .collect(Collectors.toList()); } @@ -393,7 +393,7 @@ protected List> getRenamingActionsForUnschedu : new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline()); HoodieCompactionPlan plan = getCompactionPlan(metaClient, compactionInstant); if (plan.getOperations() != null) { - log.info( + LOG.info( "Number of Compaction Operations :" + plan.getOperations().size() + " for instant :" + compactionInstant); List ops = plan.getOperations().stream() .map(CompactionOperation::convertFromAvroRecordInstance).collect(Collectors.toList()); @@ -408,7 +408,7 @@ protected List> getRenamingActionsForUnschedu } }).collect(); } - log.warn("No operations for compaction instant : " + compactionInstant); + LOG.warn("No operations for compaction instant : " + compactionInstant); return new ArrayList<>(); } diff --git a/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java b/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java index c35a0b4f74e94..5792f6b7beeb7 100644 --- a/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java +++ b/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java @@ -48,7 +48,7 @@ public class HoodieCleanClient extends AbstractHoodieClient { - private static Logger logger = LogManager.getLogger(HoodieCleanClient.class); + private static final Logger LOG = LogManager.getLogger(HoodieCleanClient.class); private final transient HoodieMetrics metrics; public HoodieCleanClient(JavaSparkContext jsc, HoodieWriteConfig clientConfig, HoodieMetrics metrics) { @@ -85,7 +85,7 @@ protected HoodieCleanMetadata clean(String startCleanTime) throws HoodieIOExcept // If there are inflight(failed) or previously requested clean operation, first perform them table.getCleanTimeline().filterInflightsAndRequested().getInstants().forEach(hoodieInstant -> { - logger.info("There were previously unfinished cleaner operations. Finishing Instant=" + hoodieInstant); + LOG.info("There were previously unfinished cleaner operations. Finishing Instant=" + hoodieInstant); runClean(table, hoodieInstant.getTimestamp()); }); @@ -122,9 +122,9 @@ protected Option scheduleClean(String startCleanTime) { // Save to both aux and timeline folder try { table.getActiveTimeline().saveToCleanRequested(cleanInstant, AvroUtils.serializeCleanerPlan(cleanerPlan)); - logger.info("Requesting Cleaning with instant time " + cleanInstant); + LOG.info("Requesting Cleaning with instant time " + cleanInstant); } catch (IOException e) { - logger.error("Got exception when saving cleaner requested file", e); + LOG.error("Got exception when saving cleaner requested file", e); throw new HoodieIOException(e.getMessage(), e); } return Option.of(cleanerPlan); @@ -147,7 +147,7 @@ protected HoodieCleanMetadata runClean(HoodieTable table, String cleanInstant cleanInstant.getState().equals(State.REQUESTED) || cleanInstant.getState().equals(State.INFLIGHT)); try { - logger.info("Cleaner started"); + LOG.info("Cleaner started"); final Timer.Context context = metrics.getCleanCtx(); if (!cleanInstant.isInflight()) { @@ -165,20 +165,20 @@ protected HoodieCleanMetadata runClean(HoodieTable table, String cleanInstant Option durationInMs = Option.empty(); if (context != null) { durationInMs = Option.of(metrics.getDurationInMs(context.stop())); - logger.info("cleanerElaspsedTime (Minutes): " + durationInMs.get() / (1000 * 60)); + LOG.info("cleanerElaspsedTime (Minutes): " + durationInMs.get() / (1000 * 60)); } HoodieTableMetaClient metaClient = createMetaClient(true); // Create the metadata and save it HoodieCleanMetadata metadata = CleanerUtils.convertCleanMetadata(metaClient, cleanInstant.getTimestamp(), durationInMs, cleanStats); - logger.info("Cleaned " + metadata.getTotalFilesDeleted() + " files. Earliest Retained :" + metadata.getEarliestCommitToRetain()); + LOG.info("Cleaned " + metadata.getTotalFilesDeleted() + " files. Earliest Retained :" + metadata.getEarliestCommitToRetain()); metrics.updateCleanMetrics(durationInMs.orElseGet(() -> -1L), metadata.getTotalFilesDeleted()); table.getActiveTimeline().transitionCleanInflightToComplete( new HoodieInstant(true, HoodieTimeline.CLEAN_ACTION, cleanInstant.getTimestamp()), AvroUtils.serializeCleanMetadata(metadata)); - logger.info("Marked clean started on " + cleanInstant.getTimestamp() + " as complete"); + LOG.info("Marked clean started on " + cleanInstant.getTimestamp() + " as complete"); return metadata; } catch (IOException e) { throw new HoodieIOException("Failed to clean up after commit", e); diff --git a/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java b/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java index 4fdcc1f2aa554..652abe52b42e5 100644 --- a/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java +++ b/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java @@ -98,7 +98,7 @@ */ public class HoodieWriteClient extends AbstractHoodieClient { - private static Logger logger = LogManager.getLogger(HoodieWriteClient.class); + private static final Logger LOG = LogManager.getLogger(HoodieWriteClient.class); private static final String UPDATE_STR = "update"; private static final String LOOKUP_STR = "lookup"; private final boolean rollbackInFlight; @@ -397,13 +397,13 @@ private JavaRDD bulkInsertInternal(JavaRDD> deduped private void commitOnAutoCommit(String commitTime, JavaRDD resultRDD, String actionType) { if (config.shouldAutoCommit()) { - logger.info("Auto commit enabled: Committing " + commitTime); + LOG.info("Auto commit enabled: Committing " + commitTime); boolean commitResult = commit(commitTime, resultRDD, Option.empty(), actionType); if (!commitResult) { throw new HoodieCommitException("Failed to commit " + commitTime); } } else { - logger.info("Auto commit disabled for " + commitTime); + LOG.info("Auto commit disabled for " + commitTime); } } @@ -451,13 +451,13 @@ private JavaRDD upsertRecordsInternal(JavaRDD> prep if (preppedRecords.getStorageLevel() == StorageLevel.NONE()) { preppedRecords.persist(StorageLevel.MEMORY_AND_DISK_SER()); } else { - logger.info("RDD PreppedRecords was persisted at: " + preppedRecords.getStorageLevel()); + LOG.info("RDD PreppedRecords was persisted at: " + preppedRecords.getStorageLevel()); } WorkloadProfile profile = null; if (hoodieTable.isWorkloadProfileNeeded()) { profile = new WorkloadProfile(preppedRecords); - logger.info("Workload profile :" + profile); + LOG.info("Workload profile :" + profile); saveWorkloadProfileMetadataToInflight(profile, hoodieTable, commitTime); } @@ -523,7 +523,7 @@ public boolean commit(String commitTime, JavaRDD writeStatuses, private boolean commit(String commitTime, JavaRDD writeStatuses, Option> extraMetadata, String actionType) { - logger.info("Commiting " + commitTime); + LOG.info("Commiting " + commitTime); // Create a Hoodie table which encapsulated the commits and files visible HoodieTable table = HoodieTable.getHoodieTable(createMetaClient(true), config, jsc); @@ -559,10 +559,10 @@ private boolean commit(String commitTime, JavaRDD writeStatuses, archiveLog.archiveIfRequired(jsc); if (config.isAutoClean()) { // Call clean to cleanup if there is anything to cleanup after the commit, - logger.info("Auto cleaning is enabled. Running cleaner now"); + LOG.info("Auto cleaning is enabled. Running cleaner now"); clean(commitTime); } else { - logger.info("Auto cleaning is not enabled. Not running cleaner now"); + LOG.info("Auto cleaning is not enabled. Not running cleaner now"); } if (writeContext != null) { long durationInMs = metrics.getDurationInMs(writeContext.stop()); @@ -570,7 +570,7 @@ private boolean commit(String commitTime, JavaRDD writeStatuses, metadata, actionType); writeContext = null; } - logger.info("Committed " + commitTime); + LOG.info("Committed " + commitTime); } catch (IOException e) { throw new HoodieCommitException("Failed to complete commit " + config.getBasePath() + " at time " + commitTime, e); @@ -604,7 +604,7 @@ public boolean savepoint(String user, String comment) { } String latestCommit = table.getCompletedCommitsTimeline().lastInstant().get().getTimestamp(); - logger.info("Savepointing latest commit " + latestCommit); + LOG.info("Savepointing latest commit " + latestCommit); return savepoint(latestCommit, user, comment); } @@ -655,7 +655,7 @@ public boolean savepoint(String commitTime, String user, String comment) { config.shouldAssumeDatePartitioning())) .mapToPair((PairFunction>) partitionPath -> { // Scan all partitions files with this commit time - logger.info("Collecting latest files in partition path " + partitionPath); + LOG.info("Collecting latest files in partition path " + partitionPath); ReadOptimizedView view = table.getROFileSystemView(); List latestFiles = view.getLatestDataFilesBeforeOrOn(partitionPath, commitTime) .map(HoodieDataFile::getFileName).collect(Collectors.toList()); @@ -666,7 +666,7 @@ public boolean savepoint(String commitTime, String user, String comment) { // Nothing to save in the savepoint table.getActiveTimeline().saveAsComplete(new HoodieInstant(true, HoodieTimeline.SAVEPOINT_ACTION, commitTime), AvroUtils.serializeSavepointMetadata(metadata)); - logger.info("Savepoint " + commitTime + " created"); + LOG.info("Savepoint " + commitTime + " created"); return true; } catch (IOException e) { throw new HoodieSavepointException("Failed to savepoint " + commitTime, e); @@ -690,13 +690,13 @@ public void deleteSavepoint(String savepointTime) { HoodieInstant savePoint = new HoodieInstant(false, HoodieTimeline.SAVEPOINT_ACTION, savepointTime); boolean isSavepointPresent = table.getCompletedSavepointTimeline().containsInstant(savePoint); if (!isSavepointPresent) { - logger.warn("No savepoint present " + savepointTime); + LOG.warn("No savepoint present " + savepointTime); return; } activeTimeline.revertToInflight(savePoint); activeTimeline.deleteInflight(new HoodieInstant(true, HoodieTimeline.SAVEPOINT_ACTION, savepointTime)); - logger.info("Savepoint " + savepointTime + " deleted"); + LOG.info("Savepoint " + savepointTime + " deleted"); } /** @@ -724,7 +724,7 @@ private void deleteRequestedCompaction(String compactionTime) { } else { throw new IllegalArgumentException("Compaction is not in requested state " + compactionTime); } - logger.info("Compaction " + compactionTime + " deleted"); + LOG.info("Compaction " + compactionTime + " deleted"); } /** @@ -752,7 +752,7 @@ public boolean rollbackToSavepoint(String savepointTime) { List commitsToRollback = commitTimeline.findInstantsAfter(savepointTime, Integer.MAX_VALUE).getInstants() .map(HoodieInstant::getTimestamp).collect(Collectors.toList()); - logger.info("Rolling back commits " + commitsToRollback); + LOG.info("Rolling back commits " + commitsToRollback); restoreToInstant(savepointTime); @@ -810,7 +810,7 @@ public void restoreToInstant(final String instantTime) throws HoodieRollbackExce // delete these files when it does not see a corresponding instant file under .hoodie List statsForCompaction = doRollbackAndGetStats(instant.getTimestamp()); instantsToStats.put(instant.getTimestamp(), statsForCompaction); - logger.info("Deleted compaction instant " + instant); + LOG.info("Deleted compaction instant " + instant); break; default: throw new IllegalArgumentException("invalid action name " + instant.getAction()); @@ -852,7 +852,7 @@ private List doRollbackAndGetStats(final String commitToRoll if (commitTimeline.empty() && inflightCommitTimeline.empty()) { // nothing to rollback - logger.info("No commits to rollback " + commitToRollback); + LOG.info("No commits to rollback " + commitToRollback); } // Make sure only the last n commits are being rolled back @@ -874,13 +874,13 @@ private List doRollbackAndGetStats(final String commitToRoll List stats = table.rollback(jsc, commitToRollback, true); - logger.info("Deleted inflight commits " + commitToRollback); + LOG.info("Deleted inflight commits " + commitToRollback); // cleanup index entries if (!index.rollbackCommit(commitToRollback)) { throw new HoodieRollbackException("Rollback index changes failed, for time :" + commitToRollback); } - logger.info("Index rolled back for commits " + commitToRollback); + LOG.info("Index rolled back for commits " + commitToRollback); return stats; } @@ -897,10 +897,10 @@ private void finishRollback(final Timer.Context context, List { @@ -1031,7 +1031,7 @@ private void startCommit(String instantTime) { */ public Option scheduleCompaction(Option> extraMetadata) throws IOException { String instantTime = HoodieActiveTimeline.createNewCommitTime(); - logger.info("Generate a new instant time " + instantTime); + LOG.info("Generate a new instant time " + instantTime); boolean notEmpty = scheduleCompactionAtInstant(instantTime, extraMetadata); return notEmpty ? Option.of(instantTime) : Option.empty(); } @@ -1276,9 +1276,9 @@ protected void commitCompaction(JavaRDD compactedStatuses, HoodieTa + config.getBasePath() + " at time " + compactionCommitTime, e); } } - logger.info("Compacted successfully on commit " + compactionCommitTime); + LOG.info("Compacted successfully on commit " + compactionCommitTime); } else { - logger.info("Compaction did not run for commit " + compactionCommitTime); + LOG.info("Compaction did not run for commit " + compactionCommitTime); } } @@ -1289,7 +1289,7 @@ private void finalizeWrite(HoodieTable table, String instantTime, List durationInMs = Option.of(metrics.getDurationInMs(finalizeCtx.stop())); durationInMs.ifPresent(duration -> { - logger.info("Finalize write elapsed time (milliseconds): " + duration); + LOG.info("Finalize write elapsed time (milliseconds): " + duration); metrics.updateFinalizeWriteMetrics(duration, stats.size()); }); } @@ -1331,7 +1331,7 @@ private HoodieCommitMetadata doCompactionCommit(HoodieTable table, JavaRDD f // of buckets and assigns buckets in the same order as file groups. If we were to simply round robin, then buckets // for a file group is more or less guaranteed to be placed on different partitions all the time. int minBucketsPerPartition = Math.max((int) Math.floor((1.0 * totalBuckets) / partitions), 1); - logger.info(String.format("TotalBuckets %d, min_buckets/partition %d", totalBuckets, minBucketsPerPartition)); + LOG.info(String.format("TotalBuckets %d, min_buckets/partition %d", totalBuckets, minBucketsPerPartition)); int[] bucketsFilled = new int[partitions]; Map bucketsFilledPerFileGroup = new HashMap<>(); int partitionIndex = 0; @@ -127,13 +127,13 @@ public BucketizedBloomCheckPartitioner(int targetPartitions, Map f } } - if (logger.isDebugEnabled()) { - logger.debug("Partitions assigned per file groups :" + fileGroupToPartitions); + if (LOG.isDebugEnabled()) { + LOG.debug("Partitions assigned per file groups :" + fileGroupToPartitions); StringBuilder str = new StringBuilder(); for (int i = 0; i < bucketsFilled.length; i++) { str.append("p" + i + " : " + bucketsFilled[i] + ","); } - logger.debug("Num buckets assigned per file group :" + str); + LOG.debug("Num buckets assigned per file group :" + str); } } diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java index 0ffdf1111361d..227788956c43c 100644 --- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java +++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java @@ -63,7 +63,7 @@ public class HoodieBloomIndex extends HoodieIndex private static final int SPARK_MAXIMUM_BYTES_PER_PARTITION = 1500 * 1024 * 1024; // this is how much a triplet of (partitionPath, fileId, recordKey) costs. private static final int BYTES_PER_PARTITION_FILE_KEY_TRIPLET = 300; - private static Logger logger = LogManager.getLogger(HoodieBloomIndex.class); + private static final Logger LOG = LogManager.getLogger(HoodieBloomIndex.class); private static int MAX_ITEMS_PER_SHUFFLE_PARTITION = SPARK_MAXIMUM_BYTES_PER_PARTITION / BYTES_PER_PARTITION_FILE_KEY_TRIPLET; @@ -92,9 +92,9 @@ public JavaRDD> tagLocation(JavaRDD> recordRDD, if (config.getBloomIndexUseCaching()) { keyFilenamePairRDD.persist(StorageLevel.MEMORY_AND_DISK_SER()); } - if (logger.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { long totalTaggedRecords = keyFilenamePairRDD.count(); - logger.debug("Number of update records (ones tagged with a fileID): " + totalTaggedRecords); + LOG.debug("Number of update records (ones tagged with a fileID): " + totalTaggedRecords); } // Step 4: Tag the incoming records, as inserts or updates, by joining with existing record keys @@ -206,7 +206,7 @@ int computeSafeParallelism(Map recordsPerPartition, Map> loadInvolvedFiles(List partitio String[] minMaxKeys = rangeInfoHandle.getMinMaxKeys(); return new Tuple2<>(pf.getKey(), new BloomIndexFileInfo(pf.getValue(), minMaxKeys[0], minMaxKeys[1])); } catch (MetadataNotFoundException me) { - logger.warn("Unable to find range metadata in file :" + pf); + LOG.warn("Unable to find range metadata in file :" + pf); return new Tuple2<>(pf.getKey(), new BloomIndexFileInfo(pf.getValue())); } }).collect(); diff --git a/hudi-client/src/main/java/org/apache/hudi/index/hbase/DefaultHBaseQPSResourceAllocator.java b/hudi-client/src/main/java/org/apache/hudi/index/hbase/DefaultHBaseQPSResourceAllocator.java index 8fdf25c0f2baa..e3a49041ee45c 100644 --- a/hudi-client/src/main/java/org/apache/hudi/index/hbase/DefaultHBaseQPSResourceAllocator.java +++ b/hudi-client/src/main/java/org/apache/hudi/index/hbase/DefaultHBaseQPSResourceAllocator.java @@ -25,7 +25,7 @@ public class DefaultHBaseQPSResourceAllocator implements HBaseIndexQPSResourceAllocator { private HoodieWriteConfig hoodieWriteConfig; - private static Logger logger = LogManager.getLogger(DefaultHBaseQPSResourceAllocator.class); + private static final Logger LOG = LogManager.getLogger(DefaultHBaseQPSResourceAllocator.class); public DefaultHBaseQPSResourceAllocator(HoodieWriteConfig hoodieWriteConfig) { this.hoodieWriteConfig = hoodieWriteConfig; @@ -46,7 +46,7 @@ public float acquireQPSResources(final float desiredQPSFraction, final long numP @Override public void releaseQPSResources() { // Do nothing, as there are no resources locked in default implementation - logger.info(String.format("Release QPS resources called for %s with default implementation, do nothing", + LOG.info(String.format("Release QPS resources called for %s with default implementation, do nothing", this.hoodieWriteConfig.getHbaseTableName())); } } diff --git a/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java b/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java index 87d555795793f..dfe9dab96d3d4 100644 --- a/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java +++ b/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java @@ -82,7 +82,7 @@ public class HBaseIndex extends HoodieIndex { private static final byte[] PARTITION_PATH_COLUMN = Bytes.toBytes("partition_path"); private static final int SLEEP_TIME_MILLISECONDS = 100; - private static Logger logger = LogManager.getLogger(HBaseIndex.class); + private static final Logger LOG = LogManager.getLogger(HBaseIndex.class); private static Connection hbaseConnection = null; private HBaseIndexQPSResourceAllocator hBaseIndexQPSResourceAllocator = null; private float qpsFraction; @@ -114,12 +114,12 @@ private void init(HoodieWriteConfig config) { @VisibleForTesting public HBaseIndexQPSResourceAllocator createQPSResourceAllocator(HoodieWriteConfig config) { try { - logger.info("createQPSResourceAllocator :" + config.getHBaseQPSResourceAllocatorClass()); + LOG.info("createQPSResourceAllocator :" + config.getHBaseQPSResourceAllocatorClass()); final HBaseIndexQPSResourceAllocator resourceAllocator = (HBaseIndexQPSResourceAllocator) ReflectionUtils .loadClass(config.getHBaseQPSResourceAllocatorClass(), config); return resourceAllocator; } catch (Exception e) { - logger.warn("error while instantiating HBaseIndexQPSResourceAllocator", e); + LOG.warn("error while instantiating HBaseIndexQPSResourceAllocator", e); } return new DefaultHBaseQPSResourceAllocator(config); } @@ -321,7 +321,7 @@ private Function2, Iterator> updateL doPutsAndDeletes(hTable, puts, deletes); } catch (Exception e) { Exception we = new Exception("Error updating index for " + writeStatus, e); - logger.error(we); + LOG.error(we); writeStatus.setGlobalError(we); } writeStatusList.add(writeStatus); @@ -361,7 +361,7 @@ private static void sleepForTime(int sleepTimeMs) { try { Thread.sleep(sleepTimeMs); } catch (InterruptedException e) { - logger.error("Sleep interrupted during throttling", e); + LOG.error("Sleep interrupted during throttling", e); throw new RuntimeException(e); } } @@ -371,7 +371,7 @@ public JavaRDD updateLocation(JavaRDD writeStatusRDD, HoodieTable hoodieTable) { final HBaseIndexQPSResourceAllocator hBaseIndexQPSResourceAllocator = createQPSResourceAllocator(this.config); setPutBatchSize(writeStatusRDD, hBaseIndexQPSResourceAllocator, jsc); - logger.info("multiPutBatchSize: before hbase puts" + multiPutBatchSize); + LOG.info("multiPutBatchSize: before hbase puts" + multiPutBatchSize); JavaRDD writeStatusJavaRDD = writeStatusRDD.mapPartitionsWithIndex(updateLocationFunction(), true); // caching the index updated status RDD writeStatusJavaRDD = writeStatusJavaRDD.persist(config.getWriteStatusStorageLevel()); @@ -399,15 +399,15 @@ private void setPutBatchSize(JavaRDD writeStatusRDD, this.numRegionServersForTable = getNumRegionServersAliveForTable(); final float desiredQPSFraction = hBaseIndexQPSResourceAllocator.calculateQPSFractionForPutsTime(numPuts, this.numRegionServersForTable); - logger.info("Desired QPSFraction :" + desiredQPSFraction); - logger.info("Number HBase puts :" + numPuts); - logger.info("Hbase Puts Parallelism :" + hbasePutsParallelism); + LOG.info("Desired QPSFraction :" + desiredQPSFraction); + LOG.info("Number HBase puts :" + numPuts); + LOG.info("Hbase Puts Parallelism :" + hbasePutsParallelism); final float availableQpsFraction = hBaseIndexQPSResourceAllocator.acquireQPSResources(desiredQPSFraction, numPuts); - logger.info("Allocated QPS Fraction :" + availableQpsFraction); + LOG.info("Allocated QPS Fraction :" + availableQpsFraction); multiPutBatchSize = putBatchSizeCalculator.getBatchSize(numRegionServersForTable, maxQpsPerRegionServer, hbasePutsParallelism, maxExecutors, SLEEP_TIME_MILLISECONDS, availableQpsFraction); - logger.info("multiPutBatchSize :" + multiPutBatchSize); + LOG.info("multiPutBatchSize :" + multiPutBatchSize); } } @@ -421,7 +421,7 @@ public Tuple2 getHBasePutAccessParallelism(final JavaRDD e.getServerName()).distinct().count()); return numRegionServersForTable; } catch (IOException e) { - logger.error(e); + LOG.error(e); throw new RuntimeException(e); } } diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java index 712702ac5240f..a4400d03eb353 100644 --- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java +++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java @@ -64,7 +64,7 @@ */ public class HoodieAppendHandle extends HoodieWriteHandle { - private static Logger logger = LogManager.getLogger(HoodieAppendHandle.class); + private static final Logger LOG = LogManager.getLogger(HoodieAppendHandle.class); // This acts as the sequenceID for records written private static AtomicLong recordIndex = new AtomicLong(1); private final String fileId; @@ -123,7 +123,7 @@ private void init(HoodieRecord record) { } else { // This means there is no base data file, start appending to a new log file fileSlice = Option.of(new FileSlice(partitionPath, baseInstantTime, this.fileId)); - logger.info("New InsertHandle for partition :" + partitionPath); + LOG.info("New InsertHandle for partition :" + partitionPath); } writeStatus.getStat().setPrevCommit(baseInstantTime); writeStatus.setFileId(fileId); @@ -137,7 +137,7 @@ private void init(HoodieRecord record) { ((HoodieDeltaWriteStat) writeStatus.getStat()).setLogVersion(currentLogFile.getLogVersion()); ((HoodieDeltaWriteStat) writeStatus.getStat()).setLogOffset(writer.getCurrentSize()); } catch (Exception e) { - logger.error("Error in update task at commit " + instantTime, e); + LOG.error("Error in update task at commit " + instantTime, e); writeStatus.setGlobalError(e); throw new HoodieUpsertException("Failed to initialize HoodieAppendHandle for FileId: " + fileId + " on commit " + instantTime + " on HDFS path " + hoodieTable.getMetaClient().getBasePath() + partitionPath, e); @@ -179,7 +179,7 @@ private Option getIndexedRecord(HoodieRecord hoodieRecord) { hoodieRecord.deflate(); return avroRecord; } catch (Exception e) { - logger.error("Error writing record " + hoodieRecord, e); + LOG.error("Error writing record " + hoodieRecord, e); writeStatus.markFailure(hoodieRecord, e, recordMetadata); } return Option.empty(); @@ -232,7 +232,7 @@ public void write(HoodieRecord record, Option insertValue) { // Not throwing exception from here, since we don't want to fail the entire job // for a single record writeStatus.markFailure(record, t, recordMetadata); - logger.error("Error writing record " + record, t); + LOG.error("Error writing record " + record, t); } } @@ -259,7 +259,7 @@ public WriteStatus close() { runtimeStats.setTotalUpsertTime(timer.endTimer()); stat.setRuntimeStats(runtimeStats); - logger.info(String.format("AppendHandle for partitionPath %s fileID %s, took %d ms.", stat.getPartitionPath(), + LOG.info(String.format("AppendHandle for partitionPath %s fileID %s, took %d ms.", stat.getPartitionPath(), stat.getFileId(), runtimeStats.getTotalUpsertTime())); return writeStatus; @@ -308,7 +308,7 @@ private void flushToDiskIfRequired(HoodieRecord record) { if (numberOfRecords >= (int) (maxBlockSize / averageRecordSize)) { // Recompute averageRecordSize before writing a new block and update existing value with // avg of new and old - logger.info("AvgRecordSize => " + averageRecordSize); + LOG.info("AvgRecordSize => " + averageRecordSize); averageRecordSize = (averageRecordSize + SizeEstimator.estimate(record)) / 2; doAppend(header); estimatedNumberOfBytesWritten += averageRecordSize * numberOfRecords; diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieCleanHelper.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieCleanHelper.java index eaaf72f47e36d..5f5aa15fef852 100644 --- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieCleanHelper.java +++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieCleanHelper.java @@ -61,7 +61,7 @@ */ public class HoodieCleanHelper> implements Serializable { - private static Logger logger = LogManager.getLogger(HoodieCleanHelper.class); + private static final Logger LOG = LogManager.getLogger(HoodieCleanHelper.class); private final SyncableFileSystemView fileSystemView; private final HoodieTimeline commitTimeline; @@ -99,7 +99,7 @@ public List getPartitionPathsToClean(Option newInstantToR .deserializeHoodieCleanMetadata(hoodieTable.getActiveTimeline().getInstantDetails(lastClean.get()).get()); if ((cleanMetadata.getEarliestCommitToRetain() != null) && (cleanMetadata.getEarliestCommitToRetain().length() > 0)) { - logger.warn("Incremental Cleaning mode is enabled. Looking up partition-paths that have since changed " + LOG.warn("Incremental Cleaning mode is enabled. Looking up partition-paths that have since changed " + "since last cleaned at " + cleanMetadata.getEarliestCommitToRetain() + ". New Instant to retain : " + newInstantToRetain); return hoodieTable.getCompletedCommitsTimeline().getInstants().filter(instant -> { @@ -129,7 +129,7 @@ public List getPartitionPathsToClean(Option newInstantToR * single file (i.e run it with versionsRetained = 1) */ private List getFilesToCleanKeepingLatestVersions(String partitionPath) throws IOException { - logger.info("Cleaning " + partitionPath + ", retaining latest " + config.getCleanerFileVersionsRetained() + LOG.info("Cleaning " + partitionPath + ", retaining latest " + config.getCleanerFileVersionsRetained() + " file versions. "); List fileGroups = fileSystemView.getAllFileGroups(partitionPath).collect(Collectors.toList()); List deletePaths = new ArrayList<>(); @@ -189,7 +189,7 @@ private List getFilesToCleanKeepingLatestVersions(String partitionPath) */ private List getFilesToCleanKeepingLatestCommits(String partitionPath) throws IOException { int commitsRetained = config.getCleanerCommitsRetained(); - logger.info("Cleaning " + partitionPath + ", retaining latest " + commitsRetained + " commits. "); + LOG.info("Cleaning " + partitionPath + ", retaining latest " + commitsRetained + " commits. "); List deletePaths = new ArrayList<>(); // Collect all the datafiles savepointed by all the savepoints @@ -276,7 +276,7 @@ public List getDeletePaths(String partitionPath) throws IOException { } else { throw new IllegalArgumentException("Unknown cleaning policy : " + policy.name()); } - logger.info(deletePaths.size() + " patterns used to delete in partition path:" + partitionPath); + LOG.info(deletePaths.size() + " patterns used to delete in partition path:" + partitionPath); return deletePaths; } diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieCommitArchiveLog.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieCommitArchiveLog.java index e7e0cfe96633e..61fd318cc6f74 100644 --- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieCommitArchiveLog.java +++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieCommitArchiveLog.java @@ -68,7 +68,7 @@ */ public class HoodieCommitArchiveLog { - private static Logger log = LogManager.getLogger(HoodieCommitArchiveLog.class); + private static final Logger LOG = LogManager.getLogger(HoodieCommitArchiveLog.class); private final Path archiveFilePath; private final HoodieTableMetaClient metaClient; @@ -114,11 +114,11 @@ public boolean archiveIfRequired(final JavaSparkContext jsc) throws IOException boolean success = true; if (instantsToArchive.iterator().hasNext()) { this.writer = openWriter(); - log.info("Archiving instants " + instantsToArchive); + LOG.info("Archiving instants " + instantsToArchive); archive(instantsToArchive); success = deleteArchivedInstants(instantsToArchive); } else { - log.info("No Instants to archive"); + LOG.info("No Instants to archive"); } return success; } finally { @@ -175,14 +175,14 @@ private Stream getInstantsToArchive(JavaSparkContext jsc) { } private boolean deleteArchivedInstants(List archivedInstants) throws IOException { - log.info("Deleting instants " + archivedInstants); + LOG.info("Deleting instants " + archivedInstants); boolean success = true; for (HoodieInstant archivedInstant : archivedInstants) { Path commitFile = new Path(metaClient.getMetaPath(), archivedInstant.getFileName()); try { if (metaClient.getFs().exists(commitFile)) { success &= metaClient.getFs().delete(commitFile, false); - log.info("Archived and deleted instant file " + commitFile); + LOG.info("Archived and deleted instant file " + commitFile); } } catch (IOException e) { throw new HoodieIOException("Failed to delete archived instant " + archivedInstant, e); @@ -217,11 +217,11 @@ private boolean deleteAllInstantsOlderorEqualsInAuxMetaFolder(HoodieInstant thre boolean success = true; for (HoodieInstant deleteInstant : instantsToBeDeleted) { - log.info("Deleting instant " + deleteInstant + " in auxiliary meta path " + metaClient.getMetaAuxiliaryPath()); + LOG.info("Deleting instant " + deleteInstant + " in auxiliary meta path " + metaClient.getMetaAuxiliaryPath()); Path metaFile = new Path(metaClient.getMetaAuxiliaryPath(), deleteInstant.getFileName()); if (metaClient.getFs().exists(metaFile)) { success &= metaClient.getFs().delete(metaFile, false); - log.info("Deleted instant file in auxiliary metapath : " + metaFile); + LOG.info("Deleted instant file in auxiliary metapath : " + metaFile); } } return success; @@ -231,7 +231,7 @@ public void archive(List instants) throws HoodieCommitException { try { HoodieTimeline commitTimeline = metaClient.getActiveTimeline().getAllCommitsTimeline().filterCompletedInstants(); Schema wrapperSchema = HoodieArchivedMetaEntry.getClassSchema(); - log.info("Wrapper schema " + wrapperSchema.toString()); + LOG.info("Wrapper schema " + wrapperSchema.toString()); List records = new ArrayList<>(); for (HoodieInstant hoodieInstant : instants) { try { @@ -240,7 +240,7 @@ public void archive(List instants) throws HoodieCommitException { writeToFile(wrapperSchema, records); } } catch (Exception e) { - log.error("Failed to archive commits, .commit file: " + hoodieInstant.getFileName(), e); + LOG.error("Failed to archive commits, .commit file: " + hoodieInstant.getFileName(), e); if (this.config.isFailOnTimelineArchivingEnabled()) { throw e; } diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java index c4769084bc4c9..6916bad788bb4 100644 --- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java +++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java @@ -45,7 +45,7 @@ public class HoodieCreateHandle extends HoodieWriteHandle { - private static Logger logger = LogManager.getLogger(HoodieCreateHandle.class); + private static final Logger LOG = LogManager.getLogger(HoodieCreateHandle.class); private final HoodieStorageWriter storageWriter; private final Path path; @@ -73,7 +73,7 @@ public HoodieCreateHandle(HoodieWriteConfig config, String commitTime, HoodieTab } catch (IOException e) { throw new HoodieInsertException("Failed to initialize HoodieStorageWriter for path " + path, e); } - logger.info("New CreateHandle for partition :" + partitionPath + " with fileId " + fileId); + LOG.info("New CreateHandle for partition :" + partitionPath + " with fileId " + fileId); } /** @@ -119,7 +119,7 @@ public void write(HoodieRecord record, Option avroRecord) { // Not throwing exception from here, since we don't want to fail the entire job // for a single record writeStatus.markFailure(record, t, recordMetadata); - logger.error("Error writing record " + record, t); + LOG.error("Error writing record " + record, t); } } @@ -151,7 +151,7 @@ public WriteStatus getWriteStatus() { */ @Override public WriteStatus close() { - logger + LOG .info("Closing the file " + writeStatus.getFileId() + " as we are done with all the records " + recordsWritten); try { @@ -174,7 +174,7 @@ public WriteStatus close() { stat.setRuntimeStats(runtimeStats); writeStatus.setStat(stat); - logger.info(String.format("CreateHandle for partitionPath %s fileID %s, took %d ms.", stat.getPartitionPath(), + LOG.info(String.format("CreateHandle for partitionPath %s fileID %s, took %d ms.", stat.getPartitionPath(), stat.getFileId(), runtimeStats.getTotalCreateTime())); return writeStatus; diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieKeyLookupHandle.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieKeyLookupHandle.java index d2cde542ecc6e..ca1dc5af20c88 100644 --- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieKeyLookupHandle.java +++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieKeyLookupHandle.java @@ -44,7 +44,7 @@ */ public class HoodieKeyLookupHandle extends HoodieReadHandle { - private static Logger logger = LogManager.getLogger(HoodieKeyLookupHandle.class); + private static final Logger LOG = LogManager.getLogger(HoodieKeyLookupHandle.class); private final HoodieTableType tableType; @@ -63,7 +63,7 @@ public HoodieKeyLookupHandle(HoodieWriteConfig config, HoodieTable hoodieTabl HoodieTimer timer = new HoodieTimer().startTimer(); this.bloomFilter = ParquetUtils.readBloomFilterFromParquetMetadata(hoodieTable.getHadoopConf(), new Path(getLatestDataFile().getPath())); - logger.info(String.format("Read bloom filter from %s in %d ms", partitionPathFilePair, timer.endTimer())); + LOG.info(String.format("Read bloom filter from %s in %d ms", partitionPathFilePair, timer.endTimer())); } /** @@ -79,10 +79,10 @@ public static List checkCandidatesAgainstFile(Configuration configuratio Set fileRowKeys = ParquetUtils.filterParquetRowKeys(configuration, filePath, new HashSet<>(candidateRecordKeys)); foundRecordKeys.addAll(fileRowKeys); - logger.info(String.format("Checked keys against file %s, in %d ms. #candidates (%d) #found (%d)", filePath, + LOG.info(String.format("Checked keys against file %s, in %d ms. #candidates (%d) #found (%d)", filePath, timer.endTimer(), candidateRecordKeys.size(), foundRecordKeys.size())); - if (logger.isDebugEnabled()) { - logger.debug("Keys matching for file " + filePath + " => " + foundRecordKeys); + if (LOG.isDebugEnabled()) { + LOG.debug("Keys matching for file " + filePath + " => " + foundRecordKeys); } } } catch (Exception e) { @@ -97,8 +97,8 @@ public static List checkCandidatesAgainstFile(Configuration configuratio public void addKey(String recordKey) { // check record key against bloom filter of current file & add to possible keys if needed if (bloomFilter.mightContain(recordKey)) { - if (logger.isDebugEnabled()) { - logger.debug("Record key " + recordKey + " matches bloom filter in " + partitionPathFilePair); + if (LOG.isDebugEnabled()) { + LOG.debug("Record key " + recordKey + " matches bloom filter in " + partitionPathFilePair); } candidateRecordKeys.add(recordKey); } @@ -109,14 +109,14 @@ public void addKey(String recordKey) { * Of all the keys, that were added, return a list of keys that were actually found in the file group. */ public KeyLookupResult getLookupResult() { - if (logger.isDebugEnabled()) { - logger.debug("#The candidate row keys for " + partitionPathFilePair + " => " + candidateRecordKeys); + if (LOG.isDebugEnabled()) { + LOG.debug("#The candidate row keys for " + partitionPathFilePair + " => " + candidateRecordKeys); } HoodieDataFile dataFile = getLatestDataFile(); List matchingKeys = checkCandidatesAgainstFile(hoodieTable.getHadoopConf(), candidateRecordKeys, new Path(dataFile.getPath())); - logger.info( + LOG.info( String.format("Total records (%d), bloom filter candidates (%d)/fp(%d), actual matches (%d)", totalKeysChecked, candidateRecordKeys.size(), candidateRecordKeys.size() - matchingKeys.size(), matchingKeys.size())); return new KeyLookupResult(partitionPathFilePair.getRight(), partitionPathFilePair.getLeft(), diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java index 075be1ae73f3b..45519acca806f 100644 --- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java +++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java @@ -56,7 +56,7 @@ @SuppressWarnings("Duplicates") public class HoodieMergeHandle extends HoodieWriteHandle { - private static Logger logger = LogManager.getLogger(HoodieMergeHandle.class); + private static final Logger LOG = LogManager.getLogger(HoodieMergeHandle.class); private Map> keyToNewRecords; private Set writtenRecordKeys; @@ -132,7 +132,7 @@ public void write(HoodieRecord record, Option avroRecord, Option< if (exception.isPresent() && exception.get() instanceof Throwable) { // Not throwing exception from here, since we don't want to fail the entire job for a single record writeStatus.markFailure(record, exception.get(), recordMetadata); - logger.error("Error writing record " + record, exception.get()); + LOG.error("Error writing record " + record, exception.get()); } else { write(record, avroRecord); } @@ -149,7 +149,7 @@ protected GenericRecord rewriteRecord(GenericRecord record) { * Extract old file path, initialize StorageWriter and WriteStatus. */ private void init(String fileId, String partitionPath, HoodieDataFile dataFileToBeMerged) { - logger.info("partitionPath:" + partitionPath + ", fileId to be merged:" + fileId); + LOG.info("partitionPath:" + partitionPath + ", fileId to be merged:" + fileId); this.writtenRecordKeys = new HashSet<>(); writeStatus.setStat(new HoodieWriteStat()); try { @@ -165,7 +165,7 @@ private void init(String fileId, String partitionPath, HoodieDataFile dataFileTo + FSUtils.makeDataFileName(instantTime, writeToken, fileId)).toString(); newFilePath = new Path(config.getBasePath(), relativePath); - logger.info(String.format("Merging new data into oldPath %s, as newPath %s", oldFilePath.toString(), + LOG.info(String.format("Merging new data into oldPath %s, as newPath %s", oldFilePath.toString(), newFilePath.toString())); // file name is same for all records, in this bunch writeStatus.setFileId(fileId); @@ -181,7 +181,7 @@ private void init(String fileId, String partitionPath, HoodieDataFile dataFileTo storageWriter = HoodieStorageWriterFactory.getStorageWriter(instantTime, newFilePath, hoodieTable, config, writerSchema); } catch (IOException io) { - logger.error("Error in update task at commit " + instantTime, io); + LOG.error("Error in update task at commit " + instantTime, io); writeStatus.setGlobalError(io); throw new HoodieUpsertException("Failed to initialize HoodieUpdateHandle for FileId: " + fileId + " on commit " + instantTime + " on path " + hoodieTable.getMetaClient().getBasePath(), io); @@ -195,7 +195,7 @@ private String init(String fileId, Iterator> newRecordsItr) { try { // Load the new records in a map long memoryForMerge = config.getMaxMemoryPerPartitionMerge(); - logger.info("MaxMemoryPerPartitionMerge => " + memoryForMerge); + LOG.info("MaxMemoryPerPartitionMerge => " + memoryForMerge); this.keyToNewRecords = new ExternalSpillableMap<>(memoryForMerge, config.getSpillableMapBasePath(), new DefaultSizeEstimator(), new HoodieRecordSizeEstimator(originalSchema)); } catch (IOException io) { @@ -212,7 +212,7 @@ private String init(String fileId, Iterator> newRecordsItr) { // NOTE: Once Records are added to map (spillable-map), DO NOT change it as they won't persist keyToNewRecords.put(record.getRecordKey(), record); } - logger.info("Number of entries in MemoryBasedMap => " + LOG.info("Number of entries in MemoryBasedMap => " + ((ExternalSpillableMap) keyToNewRecords).getInMemoryMapNumEntries() + "Total size in bytes of MemoryBasedMap => " + ((ExternalSpillableMap) keyToNewRecords).getCurrentInMemoryMapSize() + "Number of entries in DiskBasedMap => " @@ -247,7 +247,7 @@ private boolean writeRecord(HoodieRecord hoodieRecord, Option hoodieRecord.deflate(); return true; } catch (Exception e) { - logger.error("Error writing record " + hoodieRecord, e); + LOG.error("Error writing record " + hoodieRecord, e); writeStatus.markFailure(hoodieRecord, e, recordMetadata); } return false; @@ -289,11 +289,11 @@ public void write(GenericRecord oldRecord) { try { storageWriter.writeAvro(key, oldRecord); } catch (ClassCastException e) { - logger.error("Schema mismatch when rewriting old record " + oldRecord + " from file " + getOldFilePath() + LOG.error("Schema mismatch when rewriting old record " + oldRecord + " from file " + getOldFilePath() + " to file " + newFilePath + " with writerSchema " + writerSchema.toString(true)); throw new HoodieUpsertException(errMsg, e); } catch (IOException e) { - logger.error("Failed to merge old record into new file for key " + key + " from old file " + getOldFilePath() + LOG.error("Failed to merge old record into new file for key " + key + " from old file " + getOldFilePath() + " to new file " + newFilePath, e); throw new HoodieUpsertException(errMsg, e); } @@ -337,7 +337,7 @@ public WriteStatus close() { runtimeStats.setTotalUpsertTime(timer.endTimer()); stat.setRuntimeStats(runtimeStats); - logger.info(String.format("MergeHandle for partitionPath %s fileID %s, took %d ms.", stat.getPartitionPath(), + LOG.info(String.format("MergeHandle for partitionPath %s fileID %s, took %d ms.", stat.getPartitionPath(), stat.getFileId(), runtimeStats.getTotalUpsertTime())); return writeStatus; diff --git a/hudi-client/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java b/hudi-client/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java index c7e6f9a2cafef..7a1939a4747dc 100644 --- a/hudi-client/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java +++ b/hudi-client/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java @@ -47,7 +47,7 @@ */ public abstract class HoodieWriteHandle extends HoodieIOHandle { - private static Logger logger = LogManager.getLogger(HoodieWriteHandle.class); + private static final Logger LOG = LogManager.getLogger(HoodieWriteHandle.class); protected final Schema originalSchema; protected final Schema writerSchema; protected HoodieTimer timer; @@ -97,7 +97,7 @@ public Path makeNewPath(String partitionPath) { protected void createMarkerFile(String partitionPath) { Path markerPath = makeNewMarkerPath(partitionPath); try { - logger.info("Creating Marker Path=" + markerPath); + LOG.info("Creating Marker Path=" + markerPath); fs.create(markerPath, false).close(); } catch (IOException e) { throw new HoodieException("Failed to create marker file " + markerPath, e); @@ -147,7 +147,7 @@ public void write(HoodieRecord record, Option avroRecord, Option< if (exception.isPresent() && exception.get() instanceof Throwable) { // Not throwing exception from here, since we don't want to fail the entire job for a single record writeStatus.markFailure(record, exception.get(), recordMetadata); - logger.error("Error writing record " + record, exception.get()); + LOG.error("Error writing record " + record, exception.get()); } else { write(record, avroRecord); } diff --git a/hudi-client/src/main/java/org/apache/hudi/io/compact/HoodieRealtimeTableCompactor.java b/hudi-client/src/main/java/org/apache/hudi/io/compact/HoodieRealtimeTableCompactor.java index 2f8f8d0925673..8c4b009a45c4f 100644 --- a/hudi-client/src/main/java/org/apache/hudi/io/compact/HoodieRealtimeTableCompactor.java +++ b/hudi-client/src/main/java/org/apache/hudi/io/compact/HoodieRealtimeTableCompactor.java @@ -74,7 +74,7 @@ */ public class HoodieRealtimeTableCompactor implements HoodieCompactor { - private static Logger log = LogManager.getLogger(HoodieRealtimeTableCompactor.class); + private static final Logger LOG = LogManager.getLogger(HoodieRealtimeTableCompactor.class); // Accumulator to keep track of total log files for a dataset private AccumulatorV2 totalLogFiles; // Accumulator to keep track of total log file slices for a dataset @@ -92,7 +92,7 @@ public JavaRDD compact(JavaSparkContext jsc, HoodieCompactionPlan c HoodieCopyOnWriteTable table = new HoodieCopyOnWriteTable(config, jsc); List operations = compactionPlan.getOperations().stream() .map(CompactionOperation::convertFromAvroRecordInstance).collect(toList()); - log.info("Compactor compacting " + operations + " files"); + LOG.info("Compactor compacting " + operations + " files"); return jsc.parallelize(operations, operations.size()) .map(s -> compact(table, metaClient, config, s, compactionInstantTime)).flatMap(List::iterator); @@ -103,7 +103,7 @@ private List compact(HoodieCopyOnWriteTable hoodieCopyOnWriteTable, FileSystem fs = metaClient.getFs(); Schema readerSchema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(config.getSchema())); - log.info("Compacting base " + operation.getDataFileName() + " with delta files " + operation.getDeltaFileNames() + LOG.info("Compacting base " + operation.getDataFileName() + " with delta files " + operation.getDeltaFileNames() + " for commit " + commitTime); // TODO - FIX THIS // Reads the entire avro file. Always only specific blocks should be read from the avro file @@ -115,7 +115,7 @@ private List compact(HoodieCopyOnWriteTable hoodieCopyOnWriteTable, .getActiveTimeline().getTimelineOfActions(Sets.newHashSet(HoodieTimeline.COMMIT_ACTION, HoodieTimeline.ROLLBACK_ACTION, HoodieTimeline.DELTA_COMMIT_ACTION)) .filterCompletedInstants().lastInstant().get().getTimestamp(); - log.info("MaxMemoryPerCompaction => " + config.getMaxMemoryPerCompaction()); + LOG.info("MaxMemoryPerCompaction => " + config.getMaxMemoryPerCompaction()); List logFiles = operation.getDeltaFileNames().stream().map( p -> new Path(FSUtils.getPartitionPath(metaClient.getBasePath(), operation.getPartitionPath()), p).toString()) @@ -176,7 +176,7 @@ public HoodieCompactionPlan generateCompactionPlan(JavaSparkContext jsc, HoodieT // TODO : check if maxMemory is not greater than JVM or spark.executor memory // TODO - rollback any compactions in flight HoodieTableMetaClient metaClient = hoodieTable.getMetaClient(); - log.info("Compacting " + metaClient.getBasePath() + " with commit " + compactionCommitTime); + LOG.info("Compacting " + metaClient.getBasePath() + " with commit " + compactionCommitTime); List partitionPaths = FSUtils.getAllPartitionPaths(metaClient.getFs(), metaClient.getBasePath(), config.shouldAssumeDatePartitioning()); @@ -189,7 +189,7 @@ public HoodieCompactionPlan generateCompactionPlan(JavaSparkContext jsc, HoodieT } RealtimeView fileSystemView = hoodieTable.getRTFileSystemView(); - log.info("Compaction looking for files to compact in " + partitionPaths + " partitions"); + LOG.info("Compaction looking for files to compact in " + partitionPaths + " partitions"); List operations = jsc.parallelize(partitionPaths, partitionPaths.size()) .flatMap((FlatMapFunction) partitionPath -> fileSystemView .getLatestFileSlices(partitionPath) @@ -206,10 +206,10 @@ public HoodieCompactionPlan generateCompactionPlan(JavaSparkContext jsc, HoodieT config.getCompactionStrategy().captureMetrics(config, dataFile, partitionPath, logFiles)); }).filter(c -> !c.getDeltaFileNames().isEmpty()).collect(toList()).iterator()) .collect().stream().map(CompactionUtils::buildHoodieCompactionOperation).collect(toList()); - log.info("Total of " + operations.size() + " compactions are retrieved"); - log.info("Total number of latest files slices " + totalFileSlices.value()); - log.info("Total number of log files " + totalLogFiles.value()); - log.info("Total number of file slices " + totalFileSlices.value()); + LOG.info("Total of " + operations.size() + " compactions are retrieved"); + LOG.info("Total number of latest files slices " + totalFileSlices.value()); + LOG.info("Total number of log files " + totalLogFiles.value()); + LOG.info("Total number of file slices " + totalFileSlices.value()); // Filter the compactions with the passed in filter. This lets us choose most effective // compactions only HoodieCompactionPlan compactionPlan = config.getCompactionStrategy().generateCompactionPlan(config, operations, @@ -221,7 +221,7 @@ public HoodieCompactionPlan generateCompactionPlan(JavaSparkContext jsc, HoodieT + "Please fix your strategy implementation." + "FileIdsWithPendingCompactions :" + fgIdsInPendingCompactions + ", Selected workload :" + compactionPlan); if (compactionPlan.getOperations().isEmpty()) { - log.warn("After filtering, Nothing to compact for " + metaClient.getBasePath()); + LOG.warn("After filtering, Nothing to compact for " + metaClient.getBasePath()); } return compactionPlan; } diff --git a/hudi-client/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java b/hudi-client/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java index d0f61f218fd70..0cfd5c62b38d7 100644 --- a/hudi-client/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java +++ b/hudi-client/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java @@ -32,7 +32,7 @@ */ public class HoodieMetrics { - private static Logger logger = LogManager.getLogger(HoodieMetrics.class); + private static final Logger LOG = LogManager.getLogger(HoodieMetrics.class); // Some timers public String rollbackTimerName = null; public String cleanTimerName = null; @@ -155,7 +155,7 @@ public void updateCommitMetrics(long commitEpochTimeInMs, long durationInMs, Hoo public void updateRollbackMetrics(long durationInMs, long numFilesDeleted) { if (config.isMetricsOn()) { - logger.info( + LOG.info( String.format("Sending rollback metrics (duration=%d, numFilesDeleted=%d)", durationInMs, numFilesDeleted)); Metrics.registerGauge(getMetricsName("rollback", "duration"), durationInMs); Metrics.registerGauge(getMetricsName("rollback", "numFilesDeleted"), numFilesDeleted); @@ -164,7 +164,7 @@ public void updateRollbackMetrics(long durationInMs, long numFilesDeleted) { public void updateCleanMetrics(long durationInMs, int numFilesDeleted) { if (config.isMetricsOn()) { - logger.info( + LOG.info( String.format("Sending clean metrics (duration=%d, numFilesDeleted=%d)", durationInMs, numFilesDeleted)); Metrics.registerGauge(getMetricsName("clean", "duration"), durationInMs); Metrics.registerGauge(getMetricsName("clean", "numFilesDeleted"), numFilesDeleted); @@ -173,7 +173,7 @@ public void updateCleanMetrics(long durationInMs, int numFilesDeleted) { public void updateFinalizeWriteMetrics(long durationInMs, long numFilesFinalized) { if (config.isMetricsOn()) { - logger.info(String.format("Sending finalize write metrics (duration=%d, numFilesFinalized=%d)", durationInMs, + LOG.info(String.format("Sending finalize write metrics (duration=%d, numFilesFinalized=%d)", durationInMs, numFilesFinalized)); Metrics.registerGauge(getMetricsName("finalize", "duration"), durationInMs); Metrics.registerGauge(getMetricsName("finalize", "numFilesFinalized"), numFilesFinalized); @@ -182,7 +182,7 @@ public void updateFinalizeWriteMetrics(long durationInMs, long numFilesFinalized public void updateIndexMetrics(final String action, final long durationInMs) { if (config.isMetricsOn()) { - logger.info(String.format("Sending index metrics (%s.duration, %d)", action, durationInMs)); + LOG.info(String.format("Sending index metrics (%s.duration, %d)", action, durationInMs)); Metrics.registerGauge(getMetricsName("index", String.format("%s.duration", action)), durationInMs); } } diff --git a/hudi-client/src/main/java/org/apache/hudi/metrics/JmxMetricsReporter.java b/hudi-client/src/main/java/org/apache/hudi/metrics/JmxMetricsReporter.java index d00ec677df540..98a6b30fccc31 100644 --- a/hudi-client/src/main/java/org/apache/hudi/metrics/JmxMetricsReporter.java +++ b/hudi-client/src/main/java/org/apache/hudi/metrics/JmxMetricsReporter.java @@ -38,7 +38,7 @@ */ public class JmxMetricsReporter extends MetricsReporter { - private static Logger logger = LogManager.getLogger(JmxMetricsReporter.class); + private static final Logger LOG = LogManager.getLogger(JmxMetricsReporter.class); private final JMXConnectorServer connector; private String host; private int port; @@ -61,7 +61,7 @@ public JmxMetricsReporter(HoodieWriteConfig config) { .newJMXConnectorServer(url, null, ManagementFactory.getPlatformMBeanServer()); } catch (Exception e) { String msg = "Jmx initialize failed: "; - logger.error(msg, e); + LOG.error(msg, e); throw new HoodieException(msg, e); } } diff --git a/hudi-client/src/main/java/org/apache/hudi/metrics/Metrics.java b/hudi-client/src/main/java/org/apache/hudi/metrics/Metrics.java index df9d9cc6c6ef4..4b194416a3578 100644 --- a/hudi-client/src/main/java/org/apache/hudi/metrics/Metrics.java +++ b/hudi-client/src/main/java/org/apache/hudi/metrics/Metrics.java @@ -33,7 +33,7 @@ * This is the main class of the metrics system. */ public class Metrics { - private static Logger logger = LogManager.getLogger(Metrics.class); + private static final Logger LOG = LogManager.getLogger(Metrics.class); private static volatile boolean initialized = false; private static Metrics metrics = null; @@ -87,7 +87,7 @@ public static void registerGauge(String metricName, final long value) { // Here we catch all exception, so the major upsert pipeline will not be affected if the // metrics system // has some issues. - logger.error("Failed to send metrics: ", e); + LOG.error("Failed to send metrics: ", e); } } diff --git a/hudi-client/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java b/hudi-client/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java index 171174a4914fe..aac6c708f54dc 100644 --- a/hudi-client/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java +++ b/hudi-client/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java @@ -36,7 +36,7 @@ */ public class MetricsGraphiteReporter extends MetricsReporter { - private static Logger logger = LogManager.getLogger(MetricsGraphiteReporter.class); + private static final Logger LOG = LogManager.getLogger(MetricsGraphiteReporter.class); private final MetricRegistry registry; private final GraphiteReporter graphiteReporter; private final HoodieWriteConfig config; @@ -63,7 +63,7 @@ public void start() { if (graphiteReporter != null) { graphiteReporter.start(30, TimeUnit.SECONDS); } else { - logger.error("Cannot start as the graphiteReporter is null."); + LOG.error("Cannot start as the graphiteReporter is null."); } } @@ -72,7 +72,7 @@ public void report() { if (graphiteReporter != null) { graphiteReporter.report(); } else { - logger.error("Cannot report metrics as the graphiteReporter is null."); + LOG.error("Cannot report metrics as the graphiteReporter is null."); } } diff --git a/hudi-client/src/main/java/org/apache/hudi/metrics/MetricsReporterFactory.java b/hudi-client/src/main/java/org/apache/hudi/metrics/MetricsReporterFactory.java index 962abd4964645..b9d433d9457f3 100644 --- a/hudi-client/src/main/java/org/apache/hudi/metrics/MetricsReporterFactory.java +++ b/hudi-client/src/main/java/org/apache/hudi/metrics/MetricsReporterFactory.java @@ -29,7 +29,7 @@ */ public class MetricsReporterFactory { - private static Logger logger = LogManager.getLogger(MetricsReporterFactory.class); + private static final Logger LOG = LogManager.getLogger(MetricsReporterFactory.class); public static MetricsReporter createReporter(HoodieWriteConfig config, MetricRegistry registry) { MetricsReporterType type = config.getMetricsReporterType(); @@ -45,7 +45,7 @@ public static MetricsReporter createReporter(HoodieWriteConfig config, MetricReg reporter = new JmxMetricsReporter(config); break; default: - logger.error("Reporter type[" + type + "] is not supported."); + LOG.error("Reporter type[" + type + "] is not supported."); break; } return reporter; diff --git a/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java b/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java index b6c8b6f4f124f..016d3ceae11f7 100644 --- a/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java +++ b/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java @@ -92,7 +92,7 @@ */ public class HoodieCopyOnWriteTable extends HoodieTable { - private static Logger logger = LogManager.getLogger(HoodieCopyOnWriteTable.class); + private static final Logger LOG = LogManager.getLogger(HoodieCopyOnWriteTable.class); public HoodieCopyOnWriteTable(HoodieWriteConfig config, JavaSparkContext jsc) { super(config, jsc); @@ -125,11 +125,11 @@ private static PairFlatMapFunction>, String, Par private static Boolean deleteFileAndGetResult(FileSystem fs, String deletePathStr) throws IOException { Path deletePath = new Path(deletePathStr); - logger.debug("Working on delete path :" + deletePath); + LOG.debug("Working on delete path :" + deletePath); try { boolean deleteResult = fs.delete(deletePath, false); if (deleteResult) { - logger.debug("Cleaned file at path :" + deletePath); + LOG.debug("Cleaned file at path :" + deletePath); } return deleteResult; } catch (FileNotFoundException fio) { @@ -171,7 +171,7 @@ public Iterator> handleUpdate(String commitTime, String fileId throws IOException { // This is needed since sometimes some buckets are never picked in getPartition() and end up with 0 records if (!recordItr.hasNext()) { - logger.info("Empty partition with fileId => " + fileId); + LOG.info("Empty partition with fileId => " + fileId); return Collections.singletonList((List) Collections.EMPTY_LIST).iterator(); } // these are updates @@ -211,7 +211,7 @@ protected Iterator> handleUpdateInternal(HoodieMergeHandle ups // TODO(vc): This needs to be revisited if (upsertHandle.getWriteStatus().getPartitionPath() == null) { - logger.info("Upsert Handle has partition path as null " + upsertHandle.getOldFilePath() + ", " + LOG.info("Upsert Handle has partition path as null " + upsertHandle.getOldFilePath() + ", " + upsertHandle.getWriteStatus()); } return Collections.singletonList(Collections.singletonList(upsertHandle.getWriteStatus())).iterator(); @@ -230,7 +230,7 @@ public Iterator> handleInsert(String commitTime, String idPfx, throws Exception { // This is needed since sometimes some buckets are never picked in getPartition() and end up with 0 records if (!recordItr.hasNext()) { - logger.info("Empty partition"); + LOG.info("Empty partition"); return Collections.singletonList((List) Collections.EMPTY_LIST).iterator(); } return new CopyOnWriteLazyInsertIterable<>(recordItr, config, commitTime, this, idPfx); @@ -261,7 +261,7 @@ public Iterator> handleUpsertPartition(String commitTime, Inte } } catch (Throwable t) { String msg = "Error upserting bucketType " + btype + " for partition :" + partition; - logger.error(msg, t); + LOG.error(msg, t); throw new HoodieUpsertException(msg, t); } } @@ -286,13 +286,13 @@ public HoodieCleanerPlan scheduleClean(JavaSparkContext jsc) { List partitionsToClean = cleaner.getPartitionPathsToClean(earliestInstant); if (partitionsToClean.isEmpty()) { - logger.info("Nothing to clean here. It is already clean"); + LOG.info("Nothing to clean here. It is already clean"); return HoodieCleanerPlan.newBuilder().setPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS.name()).build(); } - logger.info( + LOG.info( "Total Partitions to clean : " + partitionsToClean.size() + ", with policy " + config.getCleanerPolicy()); int cleanerParallelism = Math.min(partitionsToClean.size(), config.getCleanerParallelism()); - logger.info("Using cleanerParallelism: " + cleanerParallelism); + LOG.info("Using cleanerParallelism: " + cleanerParallelism); Map> cleanOps = jsc.parallelize(partitionsToClean, cleanerParallelism) .map(partitionPathToClean -> Pair.of(partitionPathToClean, cleaner.getDeletePaths(partitionPathToClean))) @@ -320,7 +320,7 @@ public List clean(JavaSparkContext jsc, HoodieInstant cleanInst int cleanerParallelism = Math.min( (int) (cleanerPlan.getFilesToBeDeletedPerPartition().values().stream().mapToInt(x -> x.size()).count()), config.getCleanerParallelism()); - logger.info("Using cleanerParallelism: " + cleanerParallelism); + LOG.info("Using cleanerParallelism: " + cleanerParallelism); List> partitionCleanStats = jsc .parallelize(cleanerPlan.getFilesToBeDeletedPerPartition().entrySet().stream() .flatMap(x -> x.getValue().stream().map(y -> new Tuple2(x.getKey(), y))) @@ -360,7 +360,7 @@ public List rollback(JavaSparkContext jsc, String commit, bo this.getInflightCommitTimeline().getInstants().map(HoodieInstant::getTimestamp).collect(Collectors.toList()); // Atomically unpublish the commits if (!inflights.contains(commit)) { - logger.info("Unpublishing " + commit); + LOG.info("Unpublishing " + commit); activeTimeline.revertToInflight(new HoodieInstant(false, actionType, commit)); } @@ -368,7 +368,7 @@ public List rollback(JavaSparkContext jsc, String commit, bo Long startTime = System.currentTimeMillis(); // delete all the data files for this commit - logger.info("Clean out all parquet files generated for commit: " + commit); + LOG.info("Clean out all parquet files generated for commit: " + commit); List rollbackRequests = generateRollbackRequests(instantToRollback); // TODO: We need to persist this as rollback workload and use it in case of partial failures @@ -377,7 +377,7 @@ public List rollback(JavaSparkContext jsc, String commit, bo // Delete Inflight instant if enabled deleteInflightInstant(deleteInstants, activeTimeline, new HoodieInstant(true, actionType, commit)); - logger.info("Time(in ms) taken to finish rollback " + (System.currentTimeMillis() - startTime)); + LOG.info("Time(in ms) taken to finish rollback " + (System.currentTimeMillis() - startTime)); return stats; } @@ -403,9 +403,9 @@ protected void deleteInflightInstant(boolean deleteInstant, HoodieActiveTimeline // Remove the rolled back inflight commits if (deleteInstant) { activeTimeline.deleteInflight(instantToBeDeleted); - logger.info("Deleted inflight commit " + instantToBeDeleted); + LOG.info("Deleted inflight commit " + instantToBeDeleted); } else { - logger.warn("Rollback finished without deleting inflight instant file. Instant=" + instantToBeDeleted); + LOG.warn("Rollback finished without deleting inflight instant file. Instant=" + instantToBeDeleted); } } @@ -574,7 +574,7 @@ class UpsertPartitioner extends Partitioner { assignUpdates(profile); assignInserts(profile); - logger.info("Total Buckets :" + totalBuckets + ", " + "buckets info => " + bucketInfoMap + ", \n" + LOG.info("Total Buckets :" + totalBuckets + ", " + "buckets info => " + bucketInfoMap + ", \n" + "Partition to insert buckets => " + partitionPathToInsertBuckets + ", \n" + "UpdateLocations mapped to buckets =>" + updateLocationToBucket); } @@ -604,13 +604,13 @@ private void assignInserts(WorkloadProfile profile) { long averageRecordSize = averageBytesPerRecord(metaClient.getActiveTimeline().getCommitTimeline().filterCompletedInstants(), config.getCopyOnWriteRecordSizeEstimate()); - logger.info("AvgRecordSize => " + averageRecordSize); + LOG.info("AvgRecordSize => " + averageRecordSize); for (String partitionPath : partitionPaths) { WorkloadStat pStat = profile.getWorkloadStat(partitionPath); if (pStat.getNumInserts() > 0) { List smallFiles = getSmallFiles(partitionPath); - logger.info("For partitionPath : " + partitionPath + " Small Files => " + smallFiles); + LOG.info("For partitionPath : " + partitionPath + " Small Files => " + smallFiles); long totalUnassignedInserts = pStat.getNumInserts(); List bucketNumbers = new ArrayList<>(); @@ -625,10 +625,10 @@ private void assignInserts(WorkloadProfile profile) { int bucket; if (updateLocationToBucket.containsKey(smallFile.location.getFileId())) { bucket = updateLocationToBucket.get(smallFile.location.getFileId()); - logger.info("Assigning " + recordsToAppend + " inserts to existing update bucket " + bucket); + LOG.info("Assigning " + recordsToAppend + " inserts to existing update bucket " + bucket); } else { bucket = addUpdateBucket(smallFile.location.getFileId()); - logger.info("Assigning " + recordsToAppend + " inserts to new update bucket " + bucket); + LOG.info("Assigning " + recordsToAppend + " inserts to new update bucket " + bucket); } bucketNumbers.add(bucket); recordsPerBucket.add(recordsToAppend); @@ -644,7 +644,7 @@ private void assignInserts(WorkloadProfile profile) { } int insertBuckets = (int) Math.ceil((1.0 * totalUnassignedInserts) / insertRecordsPerBucket); - logger.info("After small file assignment: unassignedInserts => " + totalUnassignedInserts + LOG.info("After small file assignment: unassignedInserts => " + totalUnassignedInserts + ", totalInsertBuckets => " + insertBuckets + ", recordsPerBucket => " + insertRecordsPerBucket); for (int b = 0; b < insertBuckets; b++) { bucketNumbers.add(totalBuckets); @@ -665,7 +665,7 @@ private void assignInserts(WorkloadProfile profile) { bkt.weight = (1.0 * recordsPerBucket.get(i)) / pStat.getNumInserts(); insertBuckets.add(bkt); } - logger.info("Total insert buckets for partition path " + partitionPath + " => " + insertBuckets); + LOG.info("Total insert buckets for partition path " + partitionPath + " => " + insertBuckets); partitionPathToInsertBuckets.put(partitionPath, insertBuckets); } } @@ -770,7 +770,7 @@ protected static long averageBytesPerRecord(HoodieTimeline commitTimeline, int d } } catch (Throwable t) { // make this fail safe. - logger.error("Error trying to compute average bytes/record ", t); + LOG.error("Error trying to compute average bytes/record ", t); } return avgSize; } diff --git a/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java b/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java index da29d9800a0b0..b3fb47e5eca46 100644 --- a/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java +++ b/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java @@ -78,7 +78,7 @@ */ public class HoodieMergeOnReadTable extends HoodieCopyOnWriteTable { - private static Logger logger = LogManager.getLogger(HoodieMergeOnReadTable.class); + private static final Logger LOG = LogManager.getLogger(HoodieMergeOnReadTable.class); // UpsertPartitioner for MergeOnRead table type private MergeOnReadUpsertPartitioner mergeOnReadUpsertPartitioner; @@ -99,10 +99,10 @@ public Partitioner getUpsertPartitioner(WorkloadProfile profile) { @Override public Iterator> handleUpdate(String commitTime, String fileId, Iterator> recordItr) throws IOException { - logger.info("Merging updates for commit " + commitTime + " for file " + fileId); + LOG.info("Merging updates for commit " + commitTime + " for file " + fileId); if (!index.canIndexLogFiles() && mergeOnReadUpsertPartitioner.getSmallFileIds().contains(fileId)) { - logger.info("Small file corrections for updates for commit " + commitTime + " for file " + fileId); + LOG.info("Small file corrections for updates for commit " + commitTime + " for file " + fileId); return super.handleUpdate(commitTime, fileId, recordItr); } else { HoodieAppendHandle appendHandle = new HoodieAppendHandle<>(config, commitTime, this, fileId, recordItr); @@ -125,7 +125,7 @@ public Iterator> handleInsert(String commitTime, String idPfx, @Override public HoodieCompactionPlan scheduleCompaction(JavaSparkContext jsc, String instantTime) { - logger.info("Checking if compaction needs to be run on " + config.getBasePath()); + LOG.info("Checking if compaction needs to be run on " + config.getBasePath()); Option lastCompaction = getActiveTimeline().getCommitTimeline().filterCompletedInstants().lastInstant(); String deltaCommitsSinceTs = "0"; @@ -136,13 +136,13 @@ public HoodieCompactionPlan scheduleCompaction(JavaSparkContext jsc, String inst int deltaCommitsSinceLastCompaction = getActiveTimeline().getDeltaCommitTimeline() .findInstantsAfter(deltaCommitsSinceTs, Integer.MAX_VALUE).countInstants(); if (config.getInlineCompactDeltaCommitMax() > deltaCommitsSinceLastCompaction) { - logger.info("Not running compaction as only " + deltaCommitsSinceLastCompaction + LOG.info("Not running compaction as only " + deltaCommitsSinceLastCompaction + " delta commits was found since last compaction " + deltaCommitsSinceTs + ". Waiting for " + config.getInlineCompactDeltaCommitMax()); return new HoodieCompactionPlan(); } - logger.info("Compacting merge on read table " + config.getBasePath()); + LOG.info("Compacting merge on read table " + config.getBasePath()); HoodieRealtimeTableCompactor compactor = new HoodieRealtimeTableCompactor(); try { return compactor.generateCompactionPlan(jsc, this, config, instantTime, @@ -184,7 +184,7 @@ public List rollback(JavaSparkContext jsc, String commit, bo if (!instantToRollback.isInflight()) { this.getActiveTimeline().revertToInflight(instantToRollback); } - logger.info("Unpublished " + commit); + LOG.info("Unpublished " + commit); Long startTime = System.currentTimeMillis(); List rollbackRequests = generateRollbackRequests(jsc, instantToRollback); // TODO: We need to persist this as rollback workload and use it in case of partial failures @@ -194,7 +194,7 @@ public List rollback(JavaSparkContext jsc, String commit, bo deleteInflightInstant(deleteInstants, this.getActiveTimeline(), new HoodieInstant(true, instantToRollback.getAction(), instantToRollback.getTimestamp())); - logger.info("Time(in ms) taken to finish rollback " + (System.currentTimeMillis() - startTime)); + LOG.info("Time(in ms) taken to finish rollback " + (System.currentTimeMillis() - startTime)); return allRollbackStats; } @@ -219,7 +219,7 @@ private List generateRollbackRequests(JavaSparkContext jsc, Hoo List partitionRollbackRequests = new ArrayList<>(); switch (instantToRollback.getAction()) { case HoodieTimeline.COMMIT_ACTION: - logger.info( + LOG.info( "Rolling back commit action. There are higher delta commits. So only rolling back this " + "instant"); partitionRollbackRequests.add( RollbackRequest.createRollbackRequestWithDeleteDataAndLogFilesAction(partitionPath, instantToRollback)); @@ -235,14 +235,14 @@ private List generateRollbackRequests(JavaSparkContext jsc, Hoo // and has not yet finished. In this scenario we should delete only the newly created parquet files // and not corresponding base commit log files created with this as baseCommit since updates would // have been written to the log files. - logger.info("Rolling back compaction. There are higher delta commits. So only deleting data files"); + LOG.info("Rolling back compaction. There are higher delta commits. So only deleting data files"); partitionRollbackRequests.add( RollbackRequest.createRollbackRequestWithDeleteDataFilesOnlyAction(partitionPath, instantToRollback)); } else { // No deltacommits present after this compaction commit (inflight or requested). In this case, we // can also delete any log files that were created with this compaction commit as base // commit. - logger.info("Rolling back compaction plan. There are NO higher delta commits. So deleting both data and" + LOG.info("Rolling back compaction plan. There are NO higher delta commits. So deleting both data and" + " log files"); partitionRollbackRequests.add( RollbackRequest.createRollbackRequestWithDeleteDataAndLogFilesAction(partitionPath, instantToRollback)); diff --git a/hudi-client/src/main/java/org/apache/hudi/table/HoodieTable.java b/hudi-client/src/main/java/org/apache/hudi/table/HoodieTable.java index b7f485de3562b..396d57b01a365 100644 --- a/hudi-client/src/main/java/org/apache/hudi/table/HoodieTable.java +++ b/hudi-client/src/main/java/org/apache/hudi/table/HoodieTable.java @@ -73,7 +73,7 @@ */ public abstract class HoodieTable implements Serializable { - private static Logger logger = LogManager.getLogger(HoodieTable.class); + private static final Logger LOG = LogManager.getLogger(HoodieTable.class); protected final HoodieWriteConfig config; protected final HoodieTableMetaClient metaClient; @@ -322,7 +322,7 @@ protected void deleteMarkerDir(String instantTs) { Path markerDir = new Path(metaClient.getMarkerFolderPath(instantTs)); if (fs.exists(markerDir)) { // For append only case, we do not write to marker dir. Hence, the above check - logger.info("Removing marker directory=" + markerDir); + LOG.info("Removing marker directory=" + markerDir); fs.delete(markerDir, true); } } catch (IOException ioe) { @@ -360,7 +360,7 @@ protected void cleanFailedWrites(JavaSparkContext jsc, String instantTs, List deleteCleanedFiles(HoodieTableMetaClient metaClient, HoodieWriteConfig config, Map results, String partitionPath, PathFilter filter) throws IOException { - logger.info("Cleaning path " + partitionPath); + LOG.info("Cleaning path " + partitionPath); FileSystem fs = metaClient.getFs(); FileStatus[] toBeDeleted = fs.listStatus(FSUtils.getPartitionPath(config.getBasePath(), partitionPath), filter); for (FileStatus file : toBeDeleted) { boolean success = fs.delete(file.getPath(), false); results.put(file, success); - logger.info("Delete file " + file.getPath() + "\t" + success); + LOG.info("Delete file " + file.getPath() + "\t" + success); } return results; } @@ -197,7 +197,7 @@ private Map deleteCleanedFiles(HoodieTableMetaClient metaCl */ private Map deleteCleanedFiles(HoodieTableMetaClient metaClient, HoodieWriteConfig config, Map results, String commit, String partitionPath) throws IOException { - logger.info("Cleaning path " + partitionPath); + LOG.info("Cleaning path " + partitionPath); FileSystem fs = metaClient.getFs(); PathFilter filter = (path) -> { if (path.toString().contains(".parquet")) { @@ -210,7 +210,7 @@ private Map deleteCleanedFiles(HoodieTableMetaClient metaCl for (FileStatus file : toBeDeleted) { boolean success = fs.delete(file.getPath(), false); results.put(file, success); - logger.info("Delete file " + file.getPath() + "\t" + success); + LOG.info("Delete file " + file.getPath() + "\t" + success); } return results; } diff --git a/hudi-client/src/test/java/HoodieClientExample.java b/hudi-client/src/test/java/HoodieClientExample.java index c483ca79f683c..38bd8a7e9aea3 100644 --- a/hudi-client/src/test/java/HoodieClientExample.java +++ b/hudi-client/src/test/java/HoodieClientExample.java @@ -51,7 +51,7 @@ */ public class HoodieClientExample { - private static Logger logger = LogManager.getLogger(HoodieClientExample.class); + private static final Logger LOG = LogManager.getLogger(HoodieClientExample.class); @Parameter(names = {"--help", "-h"}, help = true) public Boolean help = false; @Parameter(names = {"--table-path", "-p"}, description = "path for Hoodie sample table") @@ -103,7 +103,7 @@ public void run() throws Exception { * Write 1 (only inserts) */ String newCommitTime = client.startCommit(); - logger.info("Starting commit " + newCommitTime); + LOG.info("Starting commit " + newCommitTime); List records = dataGen.generateInserts(newCommitTime, 100); recordsSoFar.addAll(records); @@ -114,7 +114,7 @@ public void run() throws Exception { * Write 2 (updates) */ newCommitTime = client.startCommit(); - logger.info("Starting commit " + newCommitTime); + LOG.info("Starting commit " + newCommitTime); List toBeUpdated = dataGen.generateUpdates(newCommitTime, 100); records.addAll(toBeUpdated); recordsSoFar.addAll(toBeUpdated); @@ -125,7 +125,7 @@ public void run() throws Exception { * Delete 1 */ newCommitTime = client.startCommit(); - logger.info("Starting commit " + newCommitTime); + LOG.info("Starting commit " + newCommitTime); List toBeDeleted = HoodieClientTestUtils .getKeysToDelete(HoodieClientTestUtils.getHoodieKeys(recordsSoFar), 10); JavaRDD deleteRecords = jsc.parallelize(toBeDeleted, 1); diff --git a/hudi-client/src/test/java/org/apache/hudi/TestCleaner.java b/hudi-client/src/test/java/org/apache/hudi/TestCleaner.java index 57d5fc022b66d..f7e40e7f321a3 100644 --- a/hudi-client/src/test/java/org/apache/hudi/TestCleaner.java +++ b/hudi-client/src/test/java/org/apache/hudi/TestCleaner.java @@ -92,7 +92,7 @@ public class TestCleaner extends TestHoodieClientBase { private static final int BIG_BATCH_INSERT_SIZE = 500; - private static Logger logger = LogManager.getLogger(TestHoodieClientBase.class); + private static final Logger LOG = LogManager.getLogger(TestHoodieClientBase.class); /** * Helper method to do first batch of insert for clean by versions/commits tests. @@ -393,7 +393,7 @@ private void testInsertAndCleanByCommits( for (HoodieFileGroup fileGroup : fileGroups) { Set commitTimes = new HashSet<>(); fileGroup.getAllDataFiles().forEach(value -> { - logger.debug("Data File - " + value); + LOG.debug("Data File - " + value); commitTimes.add(value.getCommitTime()); }); assertEquals("Only contain acceptable versions of file should be present", diff --git a/hudi-client/src/test/java/org/apache/hudi/TestCompactionAdminClient.java b/hudi-client/src/test/java/org/apache/hudi/TestCompactionAdminClient.java index 1ddc0fd764982..dbdccb12ac8f1 100644 --- a/hudi-client/src/test/java/org/apache/hudi/TestCompactionAdminClient.java +++ b/hudi-client/src/test/java/org/apache/hudi/TestCompactionAdminClient.java @@ -31,6 +31,8 @@ import org.apache.hudi.exception.HoodieException; import org.apache.hudi.exception.HoodieIOException; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -48,6 +50,8 @@ public class TestCompactionAdminClient extends TestHoodieClientBase { + private static final Logger LOG = LogManager.getLogger(TestCompactionAdminClient.class); + private HoodieTableMetaClient metaClient; private CompactionAdminClient client; @@ -151,7 +155,7 @@ private void validateRepair(String ingestionInstant, String compactionInstant, i Assert.assertTrue("Rename Files must be empty", renameFiles.isEmpty()); } expRenameFiles.entrySet().stream().forEach(r -> { - logger.info("Key :" + r.getKey() + " renamed to " + r.getValue() + " rolled back to " + LOG.info("Key :" + r.getKey() + " renamed to " + r.getValue() + " rolled back to " + renameFilesFromUndo.get(r.getKey())); }); diff --git a/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientBase.java b/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientBase.java index d35a1e5a3aa19..0b3b88a55ae8f 100644 --- a/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientBase.java +++ b/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientBase.java @@ -70,7 +70,7 @@ */ public class TestHoodieClientBase extends HoodieClientTestHarness { - protected static Logger logger = LogManager.getLogger(TestHoodieClientBase.class); + private static final Logger LOG = LogManager.getLogger(TestHoodieClientBase.class); @Before public void setUp() throws Exception { diff --git a/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientOnCopyOnWriteStorage.java b/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientOnCopyOnWriteStorage.java index 7cf2da7c615f8..4a48e1169acc8 100644 --- a/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientOnCopyOnWriteStorage.java +++ b/hudi-client/src/test/java/org/apache/hudi/TestHoodieClientOnCopyOnWriteStorage.java @@ -47,6 +47,8 @@ import org.apache.avro.generic.GenericRecord; import org.apache.hadoop.fs.Path; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; import org.apache.spark.api.java.JavaRDD; import org.junit.Assert; import org.junit.Test; @@ -76,6 +78,8 @@ @SuppressWarnings("unchecked") public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase { + private static final Logger LOG = LogManager.getLogger(TestHoodieClientOnCopyOnWriteStorage.class); + /** * Test Auto Commit behavior for HoodieWriteClient insert API. */ @@ -867,7 +871,7 @@ private Pair> testConsistencyCheck(HoodieTableMetaCli Path markerFilePath = new Path(String.format("%s/%s", partitionPath, FSUtils.makeMarkerFile(commitTime, "1-0-1", UUID.randomUUID().toString()))); metaClient.getFs().create(markerFilePath); - logger.info("Created a dummy marker path=" + markerFilePath); + LOG.info("Created a dummy marker path=" + markerFilePath); try { client.commit(commitTime, result); diff --git a/hudi-client/src/test/java/org/apache/hudi/TestMultiFS.java b/hudi-client/src/test/java/org/apache/hudi/TestMultiFS.java index 4134832375311..eee7c0981dd12 100644 --- a/hudi-client/src/test/java/org/apache/hudi/TestMultiFS.java +++ b/hudi-client/src/test/java/org/apache/hudi/TestMultiFS.java @@ -48,7 +48,7 @@ public class TestMultiFS extends HoodieClientTestHarness { - private static Logger logger = LogManager.getLogger(TestMultiFS.class); + private static final Logger LOG = LogManager.getLogger(TestMultiFS.class); private String tablePath = "file:///tmp/hoodie/sample-table"; protected String tableName = "hoodie_rt"; private String tableType = HoodieTableType.COPY_ON_WRITE.name(); @@ -92,7 +92,7 @@ public void readLocalWriteHDFS() throws Exception { // Write generated data to hdfs (only inserts) String readCommitTime = hdfsWriteClient.startCommit(); - logger.info("Starting commit " + readCommitTime); + LOG.info("Starting commit " + readCommitTime); List records = dataGen.generateInserts(readCommitTime, 100); JavaRDD writeRecords = jsc.parallelize(records, 1); hdfsWriteClient.upsert(writeRecords, readCommitTime); @@ -109,13 +109,13 @@ public void readLocalWriteHDFS() throws Exception { tableName, HoodieAvroPayload.class.getName()); String writeCommitTime = localWriteClient.startCommit(); - logger.info("Starting write commit " + writeCommitTime); + LOG.info("Starting write commit " + writeCommitTime); List localRecords = dataGen.generateInserts(writeCommitTime, 100); JavaRDD localWriteRecords = jsc.parallelize(localRecords, 1); - logger.info("Writing to path: " + tablePath); + LOG.info("Writing to path: " + tablePath); localWriteClient.upsert(localWriteRecords, writeCommitTime); - logger.info("Reading from path: " + tablePath); + LOG.info("Reading from path: " + tablePath); fs = FSUtils.getFs(tablePath, HoodieTestUtils.getDefaultHadoopConf()); metaClient = new HoodieTableMetaClient(fs.getConf(), tablePath); timeline = new HoodieActiveTimeline(metaClient).getCommitTimeline(); diff --git a/hudi-client/src/test/java/org/apache/hudi/common/HoodieClientTestUtils.java b/hudi-client/src/test/java/org/apache/hudi/common/HoodieClientTestUtils.java index 9c26ffcd2fc89..f80cb7b938223 100644 --- a/hudi-client/src/test/java/org/apache/hudi/common/HoodieClientTestUtils.java +++ b/hudi-client/src/test/java/org/apache/hudi/common/HoodieClientTestUtils.java @@ -72,7 +72,7 @@ */ public class HoodieClientTestUtils { - private static final transient Logger LOG = LogManager.getLogger(HoodieClientTestUtils.class); + private static final Logger LOG = LogManager.getLogger(HoodieClientTestUtils.class); private static final Random RANDOM = new Random(); public static List collectStatuses(Iterator> statusListItr) { diff --git a/hudi-client/src/test/java/org/apache/hudi/table/TestCopyOnWriteTable.java b/hudi-client/src/test/java/org/apache/hudi/table/TestCopyOnWriteTable.java index 686a724015157..ddddc0c08c004 100644 --- a/hudi-client/src/test/java/org/apache/hudi/table/TestCopyOnWriteTable.java +++ b/hudi-client/src/test/java/org/apache/hudi/table/TestCopyOnWriteTable.java @@ -70,7 +70,7 @@ public class TestCopyOnWriteTable extends HoodieClientTestHarness { - protected static Logger log = LogManager.getLogger(TestCopyOnWriteTable.class); + private static final Logger LOG = LogManager.getLogger(TestCopyOnWriteTable.class); @Before public void setUp() throws Exception { @@ -382,7 +382,7 @@ public void testFileSizeUpsertRecords() throws Exception { int counts = 0; for (File file : new File(basePath + "/2016/01/31").listFiles()) { if (file.getName().endsWith(".parquet") && FSUtils.getCommitTime(file.getName()).equals(commitTime)) { - log.info(file.getName() + "-" + file.length()); + LOG.info(file.getName() + "-" + file.length()); counts++; } } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieCommitMetadata.java b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieCommitMetadata.java index c25bf49cbc7e8..fa47dd08cc91f 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieCommitMetadata.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieCommitMetadata.java @@ -43,7 +43,7 @@ public class HoodieCommitMetadata implements Serializable { public static final String SCHEMA_KEY = "schema"; - private static volatile Logger log = LogManager.getLogger(HoodieCommitMetadata.class); + private static final Logger LOG = LogManager.getLogger(HoodieCommitMetadata.class); protected Map> partitionToWriteStats; protected Boolean compacted; @@ -118,7 +118,7 @@ public HashMap getFileIdAndFullPaths(String basePath) { public String toJsonString() throws IOException { if (partitionToWriteStats.containsKey(null)) { - log.info("partition path is null for " + partitionToWriteStats.get(null)); + LOG.info("partition path is null for " + partitionToWriteStats.get(null)); partitionToWriteStats.remove(null); } return getObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(this); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodiePartitionMetadata.java b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodiePartitionMetadata.java index 19089523d220d..61ff1a14ccab7 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodiePartitionMetadata.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodiePartitionMetadata.java @@ -51,7 +51,7 @@ public class HoodiePartitionMetadata { private final FileSystem fs; - private static Logger log = LogManager.getLogger(HoodiePartitionMetadata.class); + private static final Logger LOG = LogManager.getLogger(HoodiePartitionMetadata.class); /** * Construct metadata from existing partition. @@ -101,7 +101,7 @@ public void trySave(int taskPartitionId) { fs.rename(tmpMetaPath, metaPath); } } catch (IOException ioe) { - log.warn("Error trying to save partition metadata (this is okay, as long as " + "atleast 1 of these succced), " + LOG.warn("Error trying to save partition metadata (this is okay, as long as " + "atleast 1 of these succced), " + partitionPath, ioe); } finally { if (!metafileExists) { @@ -111,7 +111,7 @@ public void trySave(int taskPartitionId) { fs.delete(tmpMetaPath, false); } } catch (IOException ioe) { - log.warn("Error trying to clean up temporary files for " + partitionPath, ioe); + LOG.warn("Error trying to clean up temporary files for " + partitionPath, ioe); } } } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRollingStatMetadata.java b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRollingStatMetadata.java index 8b09f6fa766e2..bd1ef948dce8d 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRollingStatMetadata.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRollingStatMetadata.java @@ -31,7 +31,7 @@ */ public class HoodieRollingStatMetadata implements Serializable { - private static volatile Logger log = LogManager.getLogger(HoodieRollingStatMetadata.class); + private static final Logger LOG = LogManager.getLogger(HoodieRollingStatMetadata.class); protected Map> partitionToRollingStats; private String actionType = "DUMMY_ACTION"; public static final String ROLLING_STAT_METADATA_KEY = "ROLLING_STAT"; @@ -78,7 +78,7 @@ public static HoodieRollingStatMetadata fromBytes(byte[] bytes) throws IOExcepti public String toJsonString() throws IOException { if (partitionToRollingStats.containsKey(null)) { - log.info("partition path is null for " + partitionToRollingStats.get(null)); + LOG.info("partition path is null for " + partitionToRollingStats.get(null)); partitionToRollingStats.remove(null); } return HoodieCommitMetadata.getObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(this); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java index bc402c48868c8..1a4a2cfcc341e 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java @@ -47,7 +47,7 @@ */ public class HoodieTableConfig implements Serializable { - private static final transient Logger LOG = LogManager.getLogger(HoodieTableConfig.class); + private static final Logger LOG = LogManager.getLogger(HoodieTableConfig.class); public static final String HOODIE_PROPERTIES_FILE = "hoodie.properties"; public static final String HOODIE_TABLE_NAME_PROP_NAME = "hoodie.table.name"; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableMetaClient.java b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableMetaClient.java index 91874d191a325..d109bcd5212ef 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableMetaClient.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableMetaClient.java @@ -64,7 +64,7 @@ */ public class HoodieTableMetaClient implements Serializable { - private static final transient Logger LOG = LogManager.getLogger(HoodieTableMetaClient.class); + private static final Logger LOG = LogManager.getLogger(HoodieTableMetaClient.class); public static String METAFOLDER_NAME = ".hoodie"; public static String TEMPFOLDER_NAME = METAFOLDER_NAME + File.separator + ".temp"; public static String AUXILIARYFOLDER_NAME = METAFOLDER_NAME + File.separator + ".aux"; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieActiveTimeline.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieActiveTimeline.java index 16264e1e11338..23149d8106485 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieActiveTimeline.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieActiveTimeline.java @@ -67,7 +67,7 @@ public class HoodieActiveTimeline extends HoodieDefaultTimeline { INFLIGHT_CLEAN_EXTENSION, REQUESTED_CLEAN_EXTENSION, INFLIGHT_COMPACTION_EXTENSION, REQUESTED_COMPACTION_EXTENSION, INFLIGHT_RESTORE_EXTENSION, RESTORE_EXTENSION})); - private static final transient Logger LOG = LogManager.getLogger(HoodieActiveTimeline.class); + private static final Logger LOG = LogManager.getLogger(HoodieActiveTimeline.class); protected HoodieTableMetaClient metaClient; private static AtomicReference lastInstantTime = new AtomicReference<>(String.valueOf(Integer.MIN_VALUE)); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieArchivedTimeline.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieArchivedTimeline.java index 952af9fab3a5c..d26a88f213427 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieArchivedTimeline.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieArchivedTimeline.java @@ -54,7 +54,7 @@ public class HoodieArchivedTimeline extends HoodieDefaultTimeline { private HoodieTableMetaClient metaClient; private Map readCommits = new HashMap<>(); - private static final transient Logger LOG = LogManager.getLogger(HoodieArchivedTimeline.class); + private static final Logger LOG = LogManager.getLogger(HoodieArchivedTimeline.class); public HoodieArchivedTimeline(HoodieTableMetaClient metaClient) { // Read back the commits to make sure diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieDefaultTimeline.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieDefaultTimeline.java index 1a07cb5958582..b622835fb5343 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieDefaultTimeline.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieDefaultTimeline.java @@ -47,7 +47,7 @@ */ public class HoodieDefaultTimeline implements HoodieTimeline { - private static final transient Logger LOG = LogManager.getLogger(HoodieDefaultTimeline.class); + private static final Logger LOG = LogManager.getLogger(HoodieDefaultTimeline.class); private static final String HASHING_ALGORITHM = "SHA-256"; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java index 8f38b74aac4af..9beeea0c55e6f 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java @@ -66,7 +66,7 @@ */ public abstract class AbstractTableFileSystemView implements SyncableFileSystemView, Serializable { - private static Logger log = LogManager.getLogger(AbstractTableFileSystemView.class); + private static final Logger LOG = LogManager.getLogger(AbstractTableFileSystemView.class); protected HoodieTableMetaClient metaClient; @@ -123,7 +123,7 @@ protected List addFilesToView(FileStatus[] statuses) { } }); long storePartitionsTs = timer.endTimer(); - log.info("addFilesToView: NumFiles=" + statuses.length + ", FileGroupsCreationTime=" + fgBuildTimeTakenMs + LOG.info("addFilesToView: NumFiles=" + statuses.length + ", FileGroupsCreationTime=" + fgBuildTimeTakenMs + ", StoreTimeTaken=" + storePartitionsTs); return fileGroups; } @@ -216,7 +216,7 @@ private void ensurePartitionLoadedCorrectly(String partition) { if (!isPartitionAvailableInStore(partitionPathStr)) { // Not loaded yet try { - log.info("Building file system view for partition (" + partitionPathStr + ")"); + LOG.info("Building file system view for partition (" + partitionPathStr + ")"); // Create the path if it does not exist already Path partitionPath = FSUtils.getPartitionPath(metaClient.getBasePath(), partitionPathStr); @@ -224,7 +224,7 @@ private void ensurePartitionLoadedCorrectly(String partition) { long beginLsTs = System.currentTimeMillis(); FileStatus[] statuses = metaClient.getFs().listStatus(partitionPath); long endLsTs = System.currentTimeMillis(); - log.info("#files found in partition (" + partitionPathStr + ") =" + statuses.length + ", Time taken =" + LOG.info("#files found in partition (" + partitionPathStr + ") =" + statuses.length + ", Time taken =" + (endLsTs - beginLsTs)); List groups = addFilesToView(statuses); @@ -235,10 +235,10 @@ private void ensurePartitionLoadedCorrectly(String partition) { throw new HoodieIOException("Failed to list data files in partition " + partitionPathStr, e); } } else { - log.debug("View already built for Partition :" + partitionPathStr + ", FOUND is "); + LOG.debug("View already built for Partition :" + partitionPathStr + ", FOUND is "); } long endTs = System.currentTimeMillis(); - log.info("Time to load partition (" + partitionPathStr + ") =" + (endTs - beginTs)); + LOG.info("Time to load partition (" + partitionPathStr + ") =" + (endTs - beginTs)); return true; }); } @@ -289,7 +289,7 @@ protected boolean isDataFileDueToPendingCompaction(HoodieDataFile dataFile) { protected boolean isFileSliceAfterPendingCompaction(FileSlice fileSlice) { Option> compactionWithInstantTime = getPendingCompactionOperationWithInstant(fileSlice.getFileGroupId()); - log.info("Pending Compaction instant for (" + fileSlice + ") is :" + compactionWithInstantTime); + LOG.info("Pending Compaction instant for (" + fileSlice + ") is :" + compactionWithInstantTime); return (compactionWithInstantTime.isPresent()) && fileSlice.getBaseInstantTime().equals(compactionWithInstantTime.get().getKey()); } @@ -302,7 +302,7 @@ protected boolean isFileSliceAfterPendingCompaction(FileSlice fileSlice) { */ protected FileSlice filterDataFileAfterPendingCompaction(FileSlice fileSlice) { if (isFileSliceAfterPendingCompaction(fileSlice)) { - log.info("File Slice (" + fileSlice + ") is in pending compaction"); + LOG.info("File Slice (" + fileSlice + ") is in pending compaction"); // Data file is filtered out of the file-slice as the corresponding compaction // instant not completed yet. FileSlice transformed = diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewManager.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewManager.java index 4cb78c7f246b2..e00b2c754197f 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewManager.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewManager.java @@ -51,7 +51,7 @@ * clients for querying. */ public class FileSystemViewManager { - private static Logger logger = LogManager.getLogger(FileSystemViewManager.class); + private static final Logger LOG = LogManager.getLogger(FileSystemViewManager.class); private final SerializableConfiguration conf; // The View Storage config used to store file-system views @@ -126,7 +126,7 @@ private static RocksDbBasedFileSystemView createRocksDBBasedFileSystemView(Seria */ private static SpillableMapBasedFileSystemView createSpillableMapBasedFileSystemView(SerializableConfiguration conf, FileSystemViewStorageConfig viewConf, String basePath) { - logger.info("Creating SpillableMap based view for basePath " + basePath); + LOG.info("Creating SpillableMap based view for basePath " + basePath); HoodieTableMetaClient metaClient = new HoodieTableMetaClient(conf.newCopy(), basePath, true); HoodieTimeline timeline = metaClient.getActiveTimeline().filterCompletedAndCompactionInstants(); return new SpillableMapBasedFileSystemView(metaClient, timeline, viewConf); @@ -142,7 +142,7 @@ private static SpillableMapBasedFileSystemView createSpillableMapBasedFileSystem */ private static HoodieTableFileSystemView createInMemoryFileSystemView(SerializableConfiguration conf, FileSystemViewStorageConfig viewConf, String basePath) { - logger.info("Creating InMemory based view for basePath " + basePath); + LOG.info("Creating InMemory based view for basePath " + basePath); HoodieTableMetaClient metaClient = new HoodieTableMetaClient(conf.newCopy(), basePath, true); HoodieTimeline timeline = metaClient.getActiveTimeline().filterCompletedAndCompactionInstants(); return new HoodieTableFileSystemView(metaClient, timeline, viewConf.isIncrementalTimelineSyncEnabled()); @@ -158,7 +158,7 @@ private static HoodieTableFileSystemView createInMemoryFileSystemView(Serializab */ private static RemoteHoodieTableFileSystemView createRemoteFileSystemView(SerializableConfiguration conf, FileSystemViewStorageConfig viewConf, HoodieTableMetaClient metaClient) { - logger.info("Creating remote view for basePath " + metaClient.getBasePath() + ". Server=" + LOG.info("Creating remote view for basePath " + metaClient.getBasePath() + ". Server=" + viewConf.getRemoteViewServerHost() + ":" + viewConf.getRemoteViewServerPort()); return new RemoteHoodieTableFileSystemView(viewConf.getRemoteViewServerHost(), viewConf.getRemoteViewServerPort(), metaClient); @@ -173,26 +173,26 @@ private static RemoteHoodieTableFileSystemView createRemoteFileSystemView(Serial */ public static FileSystemViewManager createViewManager(final SerializableConfiguration conf, final FileSystemViewStorageConfig config) { - logger.info("Creating View Manager with storage type :" + config.getStorageType()); + LOG.info("Creating View Manager with storage type :" + config.getStorageType()); switch (config.getStorageType()) { case EMBEDDED_KV_STORE: - logger.info("Creating embedded rocks-db based Table View"); + LOG.info("Creating embedded rocks-db based Table View"); return new FileSystemViewManager(conf, config, (basePath, viewConf) -> createRocksDBBasedFileSystemView(conf, viewConf, basePath)); case SPILLABLE_DISK: - logger.info("Creating Spillable Disk based Table View"); + LOG.info("Creating Spillable Disk based Table View"); return new FileSystemViewManager(conf, config, (basePath, viewConf) -> createSpillableMapBasedFileSystemView(conf, viewConf, basePath)); case MEMORY: - logger.info("Creating in-memory based Table View"); + LOG.info("Creating in-memory based Table View"); return new FileSystemViewManager(conf, config, (basePath, viewConfig) -> createInMemoryFileSystemView(conf, viewConfig, basePath)); case REMOTE_ONLY: - logger.info("Creating remote only table view"); + LOG.info("Creating remote only table view"); return new FileSystemViewManager(conf, config, (basePath, viewConfig) -> createRemoteFileSystemView(conf, viewConfig, new HoodieTableMetaClient(conf.newCopy(), basePath))); case REMOTE_FIRST: - logger.info("Creating remote first table view"); + LOG.info("Creating remote first table view"); return new FileSystemViewManager(conf, config, (basePath, viewConfig) -> { RemoteHoodieTableFileSystemView remoteFileSystemView = createRemoteFileSystemView(conf, viewConfig, new HoodieTableMetaClient(conf.newCopy(), basePath)); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/HoodieTableFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/HoodieTableFileSystemView.java index 393ac77c4c07e..34dadb0fec3e8 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/HoodieTableFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/HoodieTableFileSystemView.java @@ -48,7 +48,7 @@ */ public class HoodieTableFileSystemView extends IncrementalTimelineSyncFileSystemView { - private static Logger log = LogManager.getLogger(HoodieTableFileSystemView.class); + private static final Logger LOG = LogManager.getLogger(HoodieTableFileSystemView.class); // mapping from partition paths to file groups contained within them protected Map> partitionToFileGroupsMap; @@ -193,7 +193,7 @@ protected boolean isPartitionAvailableInStore(String partitionPath) { @Override protected void storePartitionView(String partitionPath, List fileGroups) { - log.info("Adding file-groups for partition :" + partitionPath + ", #FileGroups=" + fileGroups.size()); + LOG.info("Adding file-groups for partition :" + partitionPath + ", #FileGroups=" + fileGroups.size()); List newList = new ArrayList<>(fileGroups); partitionToFileGroupsMap.put(partitionPath, newList); } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java index 0e6b3053aa465..311f1697d9e3a 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java @@ -54,7 +54,7 @@ */ public abstract class IncrementalTimelineSyncFileSystemView extends AbstractTableFileSystemView { - private static Logger log = LogManager.getLogger(IncrementalTimelineSyncFileSystemView.class); + private static final Logger LOG = LogManager.getLogger(IncrementalTimelineSyncFileSystemView.class); // Allows incremental Timeline syncing private final boolean incrementalTimelineSyncEnabled; @@ -78,19 +78,19 @@ protected void runSync(HoodieTimeline oldTimeline, HoodieTimeline newTimeline) { if (incrementalTimelineSyncEnabled) { TimelineDiffResult diffResult = TimelineDiffHelper.getNewInstantsForIncrementalSync(oldTimeline, newTimeline); if (diffResult.canSyncIncrementally()) { - log.info("Doing incremental sync"); + LOG.info("Doing incremental sync"); runIncrementalSync(newTimeline, diffResult); - log.info("Finished incremental sync"); + LOG.info("Finished incremental sync"); // Reset timeline to latest refreshTimeline(newTimeline); return; } } } catch (Exception ioe) { - log.error("Got exception trying to perform incremental sync. Reverting to complete sync", ioe); + LOG.error("Got exception trying to perform incremental sync. Reverting to complete sync", ioe); } - log.warn("Incremental Sync of timeline is turned off or deemed unsafe. Will revert to full syncing"); + LOG.warn("Incremental Sync of timeline is turned off or deemed unsafe. Will revert to full syncing"); super.runSync(oldTimeline, newTimeline); } @@ -102,7 +102,7 @@ protected void runSync(HoodieTimeline oldTimeline, HoodieTimeline newTimeline) { */ private void runIncrementalSync(HoodieTimeline timeline, TimelineDiffResult diffResult) { - log.info("Timeline Diff Result is :" + diffResult); + LOG.info("Timeline Diff Result is :" + diffResult); // First remove pending compaction instants which were completed diffResult.getFinishedCompactionInstants().stream().forEach(instant -> { @@ -143,7 +143,7 @@ private void runIncrementalSync(HoodieTimeline timeline, TimelineDiffResult diff * @param instant Compaction Instant to be removed */ private void removePendingCompactionInstant(HoodieTimeline timeline, HoodieInstant instant) throws IOException { - log.info("Removing completed compaction instant (" + instant + ")"); + LOG.info("Removing completed compaction instant (" + instant + ")"); HoodieCompactionPlan plan = CompactionUtils.getCompactionPlan(metaClient, instant.getTimestamp()); removePendingCompactionOperations(CompactionUtils.getPendingCompactionOperations(instant, plan) .map(instantPair -> Pair.of(instantPair.getValue().getKey(), @@ -157,7 +157,7 @@ private void removePendingCompactionInstant(HoodieTimeline timeline, HoodieInsta * @param instant Compaction Instant */ private void addPendingCompactionInstant(HoodieTimeline timeline, HoodieInstant instant) throws IOException { - log.info("Syncing pending compaction instant (" + instant + ")"); + LOG.info("Syncing pending compaction instant (" + instant + ")"); HoodieCompactionPlan compactionPlan = CompactionUtils.getCompactionPlan(metaClient, instant.getTimestamp()); List> pendingOps = CompactionUtils.getPendingCompactionOperations(instant, compactionPlan) @@ -188,13 +188,13 @@ private void addPendingCompactionInstant(HoodieTimeline timeline, HoodieInstant * @param instant Instant */ private void addCommitInstant(HoodieTimeline timeline, HoodieInstant instant) throws IOException { - log.info("Syncing committed instant (" + instant + ")"); + LOG.info("Syncing committed instant (" + instant + ")"); HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(timeline.getInstantDetails(instant).get(), HoodieCommitMetadata.class); commitMetadata.getPartitionToWriteStats().entrySet().stream().forEach(entry -> { String partition = entry.getKey(); if (isPartitionAvailableInStore(partition)) { - log.info("Syncing partition (" + partition + ") of instant (" + instant + ")"); + LOG.info("Syncing partition (" + partition + ") of instant (" + instant + ")"); FileStatus[] statuses = entry.getValue().stream().map(p -> { FileStatus status = new FileStatus(p.getFileSizeInBytes(), false, 0, 0, 0, 0, null, null, null, new Path(String.format("%s/%s", metaClient.getBasePath(), p.getPath()))); @@ -204,10 +204,10 @@ private void addCommitInstant(HoodieTimeline timeline, HoodieInstant instant) th buildFileGroups(statuses, timeline.filterCompletedAndCompactionInstants(), false); applyDeltaFileSlicesToPartitionView(partition, fileGroups, DeltaApplyMode.ADD); } else { - log.warn("Skipping partition (" + partition + ") when syncing instant (" + instant + ") as it is not loaded"); + LOG.warn("Skipping partition (" + partition + ") when syncing instant (" + instant + ") as it is not loaded"); } }); - log.info("Done Syncing committed instant (" + instant + ")"); + LOG.info("Done Syncing committed instant (" + instant + ")"); } /** @@ -217,7 +217,7 @@ private void addCommitInstant(HoodieTimeline timeline, HoodieInstant instant) th * @param instant Restore Instant */ private void addRestoreInstant(HoodieTimeline timeline, HoodieInstant instant) throws IOException { - log.info("Syncing restore instant (" + instant + ")"); + LOG.info("Syncing restore instant (" + instant + ")"); HoodieRestoreMetadata metadata = AvroUtils.deserializeAvroMetadata(timeline.getInstantDetails(instant).get(), HoodieRestoreMetadata.class); @@ -231,7 +231,7 @@ private void addRestoreInstant(HoodieTimeline timeline, HoodieInstant instant) t removeFileSlicesForPartition(timeline, instant, e.getKey(), e.getValue().stream().map(x -> x.getValue()).collect(Collectors.toList())); }); - log.info("Done Syncing restore instant (" + instant + ")"); + LOG.info("Done Syncing restore instant (" + instant + ")"); } /** @@ -241,14 +241,14 @@ private void addRestoreInstant(HoodieTimeline timeline, HoodieInstant instant) t * @param instant Rollback Instant */ private void addRollbackInstant(HoodieTimeline timeline, HoodieInstant instant) throws IOException { - log.info("Syncing rollback instant (" + instant + ")"); + LOG.info("Syncing rollback instant (" + instant + ")"); HoodieRollbackMetadata metadata = AvroUtils.deserializeAvroMetadata(timeline.getInstantDetails(instant).get(), HoodieRollbackMetadata.class); metadata.getPartitionMetadata().entrySet().stream().forEach(e -> { removeFileSlicesForPartition(timeline, instant, e.getKey(), e.getValue().getSuccessDeleteFiles()); }); - log.info("Done Syncing rollback instant (" + instant + ")"); + LOG.info("Done Syncing rollback instant (" + instant + ")"); } /** @@ -258,7 +258,7 @@ private void addRollbackInstant(HoodieTimeline timeline, HoodieInstant instant) * @param instant Clean instant */ private void addCleanInstant(HoodieTimeline timeline, HoodieInstant instant) throws IOException { - log.info("Syncing cleaner instant (" + instant + ")"); + LOG.info("Syncing cleaner instant (" + instant + ")"); HoodieCleanMetadata cleanMetadata = AvroUtils.deserializeHoodieCleanMetadata(timeline.getInstantDetails(instant).get()); cleanMetadata.getPartitionMetadata().entrySet().stream().forEach(entry -> { @@ -270,13 +270,13 @@ private void addCleanInstant(HoodieTimeline timeline, HoodieInstant instant) thr .collect(Collectors.toList()); removeFileSlicesForPartition(timeline, instant, entry.getKey(), fullPathList); }); - log.info("Done Syncing cleaner instant (" + instant + ")"); + LOG.info("Done Syncing cleaner instant (" + instant + ")"); } private void removeFileSlicesForPartition(HoodieTimeline timeline, HoodieInstant instant, String partition, List paths) { if (isPartitionAvailableInStore(partition)) { - log.info("Removing file slices for partition (" + partition + ") for instant (" + instant + ")"); + LOG.info("Removing file slices for partition (" + partition + ") for instant (" + instant + ")"); FileStatus[] statuses = paths.stream().map(p -> { FileStatus status = new FileStatus(); status.setPath(new Path(p)); @@ -286,7 +286,7 @@ private void removeFileSlicesForPartition(HoodieTimeline timeline, HoodieInstant buildFileGroups(statuses, timeline.filterCompletedAndCompactionInstants(), false); applyDeltaFileSlicesToPartitionView(partition, fileGroups, DeltaApplyMode.REMOVE); } else { - log.warn("Skipping partition (" + partition + ") when syncing instant (" + instant + ") as it is not loaded"); + LOG.warn("Skipping partition (" + partition + ") when syncing instant (" + instant + ") as it is not loaded"); } } @@ -309,7 +309,7 @@ enum DeltaApplyMode { protected void applyDeltaFileSlicesToPartitionView(String partition, List deltaFileGroups, DeltaApplyMode mode) { if (deltaFileGroups.isEmpty()) { - log.info("No delta file groups for partition :" + partition); + LOG.info("No delta file groups for partition :" + partition); return; } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/PriorityBasedFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/PriorityBasedFileSystemView.java index e622cb167a382..e9087db952eb2 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/PriorityBasedFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/PriorityBasedFileSystemView.java @@ -45,7 +45,7 @@ */ public class PriorityBasedFileSystemView implements SyncableFileSystemView, Serializable { - private static Logger log = LogManager.getLogger(PriorityBasedFileSystemView.class); + private static final Logger LOG = LogManager.getLogger(PriorityBasedFileSystemView.class); private final SyncableFileSystemView preferredView; private final SyncableFileSystemView secondaryView; @@ -59,13 +59,13 @@ public PriorityBasedFileSystemView(SyncableFileSystemView preferredView, Syncabl private R execute(Function0 preferredFunction, Function0 secondaryFunction) { if (errorOnPreferredView) { - log.warn("Routing request to secondary file-system view"); + LOG.warn("Routing request to secondary file-system view"); return secondaryFunction.apply(); } else { try { return preferredFunction.apply(); } catch (RuntimeException re) { - log.error("Got error running preferred function. Trying secondary", re); + LOG.error("Got error running preferred function. Trying secondary", re); errorOnPreferredView = true; return secondaryFunction.apply(); } @@ -74,13 +74,13 @@ private R execute(Function0 preferredFunction, Function0 secondaryFunc private R execute(T1 val, Function1 preferredFunction, Function1 secondaryFunction) { if (errorOnPreferredView) { - log.warn("Routing request to secondary file-system view"); + LOG.warn("Routing request to secondary file-system view"); return secondaryFunction.apply(val); } else { try { return preferredFunction.apply(val); } catch (RuntimeException re) { - log.error("Got error running preferred function. Trying secondary", re); + LOG.error("Got error running preferred function. Trying secondary", re); errorOnPreferredView = true; return secondaryFunction.apply(val); } @@ -90,13 +90,13 @@ private R execute(T1 val, Function1 preferredFunction, Function1< private R execute(T1 val, T2 val2, Function2 preferredFunction, Function2 secondaryFunction) { if (errorOnPreferredView) { - log.warn("Routing request to secondary file-system view"); + LOG.warn("Routing request to secondary file-system view"); return secondaryFunction.apply(val, val2); } else { try { return preferredFunction.apply(val, val2); } catch (RuntimeException re) { - log.error("Got error running preferred function. Trying secondary", re); + LOG.error("Got error running preferred function. Trying secondary", re); errorOnPreferredView = true; return secondaryFunction.apply(val, val2); } @@ -106,13 +106,13 @@ private R execute(T1 val, T2 val2, Function2 preferredFun private R execute(T1 val, T2 val2, T3 val3, Function3 preferredFunction, Function3 secondaryFunction) { if (errorOnPreferredView) { - log.warn("Routing request to secondary file-system view"); + LOG.warn("Routing request to secondary file-system view"); return secondaryFunction.apply(val, val2, val3); } else { try { return preferredFunction.apply(val, val2, val3); } catch (RuntimeException re) { - log.error("Got error running preferred function. Trying secondary", re); + LOG.error("Got error running preferred function. Trying secondary", re); errorOnPreferredView = true; return secondaryFunction.apply(val, val2, val3); } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java index 8388d6713a406..c8e625bda1feb 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java @@ -108,7 +108,7 @@ public class RemoteHoodieTableFileSystemView implements SyncableFileSystemView, public static final String INCLUDE_FILES_IN_PENDING_COMPACTION_PARAM = "includependingcompaction"; - private static Logger log = LogManager.getLogger(RemoteHoodieTableFileSystemView.class); + private static final Logger LOG = LogManager.getLogger(RemoteHoodieTableFileSystemView.class); private final String serverHost; private final int serverPort; @@ -148,7 +148,7 @@ private T executeRequest(String requestPath, Map queryParame builder.addParameter(TIMELINE_HASH, timeline.getTimelineHash()); String url = builder.toString(); - log.info("Sending request : (" + url + ")"); + LOG.info("Sending request : (" + url + ")"); Response response = null; int timeout = 1000 * 300; // 5 min timeout switch (method) { diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/RocksDbBasedFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/RocksDbBasedFileSystemView.java index 1dafb4a37b6ac..2502cc154778f 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/RocksDbBasedFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/RocksDbBasedFileSystemView.java @@ -58,7 +58,7 @@ */ public class RocksDbBasedFileSystemView extends IncrementalTimelineSyncFileSystemView { - private static Logger log = LogManager.getLogger(RocksDbBasedFileSystemView.class); + private static final Logger LOG = LogManager.getLogger(RocksDbBasedFileSystemView.class); private final FileSystemViewStorageConfig config; @@ -87,7 +87,7 @@ public RocksDbBasedFileSystemView(HoodieTableMetaClient metaClient, HoodieTimeli protected void init(HoodieTableMetaClient metaClient, HoodieTimeline visibleActiveTimeline) { schemaHelper.getAllColumnFamilies().stream().forEach(rocksDB::addColumnFamily); super.init(metaClient, visibleActiveTimeline); - log.info("Created ROCKSDB based file-system view at " + config.getRocksdbBasePath()); + LOG.info("Created ROCKSDB based file-system view at " + config.getRocksdbBasePath()); } @Override @@ -102,7 +102,7 @@ protected void resetPendingCompactionOperations(Stream fileGroups) { - log.info("Resetting and adding new partition (" + partitionPath + ") to ROCKSDB based file-system view at " + LOG.info("Resetting and adding new partition (" + partitionPath + ") to ROCKSDB based file-system view at " + config.getRocksdbBasePath() + ", Total file-groups=" + fileGroups.size()); String lookupKey = schemaHelper.getKeyForPartitionLookup(partitionPath); @@ -184,7 +184,7 @@ protected void storePartitionView(String partitionPath, List fi // record that partition is loaded. rocksDB.put(schemaHelper.getColFamilyForStoredPartitions(), lookupKey, Boolean.TRUE); - log.info("Finished adding new partition (" + partitionPath + ") to ROCKSDB based file-system view at " + LOG.info("Finished adding new partition (" + partitionPath + ") to ROCKSDB based file-system view at " + config.getRocksdbBasePath() + ", Total file-groups=" + fileGroups.size()); } @@ -202,7 +202,7 @@ protected void applyDeltaFileSlicesToPartitionView(String partition, List> createPartitionToFileGroups() { try { - log.info("Creating Partition To File groups map using external spillable Map. Max Mem=" + maxMemoryForFileGroupMap + LOG.info("Creating Partition To File groups map using external spillable Map. Max Mem=" + maxMemoryForFileGroupMap + ", BaseDir=" + baseStoreDir); new File(baseStoreDir).mkdirs(); return (Map>) (new ExternalSpillableMap<>(maxMemoryForFileGroupMap, baseStoreDir, @@ -79,7 +79,7 @@ protected Map> createPartitionToFileGroups() { protected Map> createFileIdToPendingCompactionMap( Map> fgIdToPendingCompaction) { try { - log.info("Creating Pending Compaction map using external spillable Map. Max Mem=" + maxMemoryForPendingCompaction + LOG.info("Creating Pending Compaction map using external spillable Map. Max Mem=" + maxMemoryForPendingCompaction + ", BaseDir=" + baseStoreDir); new File(baseStoreDir).mkdirs(); Map> pendingMap = new ExternalSpillableMap<>( diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/DFSPropertiesConfiguration.java b/hudi-common/src/main/java/org/apache/hudi/common/util/DFSPropertiesConfiguration.java index acd31a7b341d4..838d4b89d5a45 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/DFSPropertiesConfiguration.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/DFSPropertiesConfiguration.java @@ -41,7 +41,7 @@ */ public class DFSPropertiesConfiguration { - private static volatile Logger log = LogManager.getLogger(DFSPropertiesConfiguration.class); + private static final Logger LOG = LogManager.getLogger(DFSPropertiesConfiguration.class); private final FileSystem fs; @@ -76,7 +76,7 @@ private void visitFile(Path file) { BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(file))); addProperties(reader); } catch (IOException ioe) { - log.error("Error reading in properies from dfs", ioe); + LOG.error("Error reading in properies from dfs", ioe); throw new IllegalArgumentException("Cannot read properties from dfs", ioe); } } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/FailSafeConsistencyGuard.java b/hudi-common/src/main/java/org/apache/hudi/common/util/FailSafeConsistencyGuard.java index 7a54b893f8499..b4a099179ec40 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/FailSafeConsistencyGuard.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/FailSafeConsistencyGuard.java @@ -39,7 +39,7 @@ */ public class FailSafeConsistencyGuard implements ConsistencyGuard { - private static final transient Logger LOG = LogManager.getLogger(FailSafeConsistencyGuard.class); + private static final Logger LOG = LogManager.getLogger(FailSafeConsistencyGuard.class); private final FileSystem fs; private final ConsistencyGuardConfig consistencyGuardConfig; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/HoodieRecordSizeEstimator.java b/hudi-common/src/main/java/org/apache/hudi/common/util/HoodieRecordSizeEstimator.java index 731378cea013f..387b8d0c38b9f 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/HoodieRecordSizeEstimator.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/HoodieRecordSizeEstimator.java @@ -33,7 +33,7 @@ */ public class HoodieRecordSizeEstimator implements SizeEstimator> { - private static Logger log = LogManager.getLogger(HoodieRecordSizeEstimator.class); + private static final Logger LOG = LogManager.getLogger(HoodieRecordSizeEstimator.class); // Schema used to get GenericRecord from HoodieRecordPayload then convert to bytes and vice-versa private final Schema schema; @@ -50,7 +50,7 @@ public long sizeEstimate(HoodieRecord hoodieRecord) { /** {@link ExternalSpillableMap} **/ long sizeOfRecord = ObjectSizeCalculator.getObjectSize(hoodieRecord); long sizeOfSchema = ObjectSizeCalculator.getObjectSize(schema); - log.info("SizeOfRecord => " + sizeOfRecord + " SizeOfSchema => " + sizeOfSchema); + LOG.info("SizeOfRecord => " + sizeOfRecord + " SizeOfSchema => " + sizeOfSchema); return sizeOfRecord; } } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java b/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java index 3241dea6675e1..822f5eb7cff1e 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java @@ -55,7 +55,7 @@ */ public class RocksDBDAO { - protected static final transient Logger LOG = LogManager.getLogger(RocksDBDAO.class); + private static final Logger LOG = LogManager.getLogger(RocksDBDAO.class); private transient ConcurrentHashMap managedHandlesMap; private transient ConcurrentHashMap managedDescriptorMap; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/TimelineDiffHelper.java b/hudi-common/src/main/java/org/apache/hudi/common/util/TimelineDiffHelper.java index b82d6716e29ad..df88b0e92b8b2 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/TimelineDiffHelper.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/TimelineDiffHelper.java @@ -36,7 +36,7 @@ */ public class TimelineDiffHelper { - protected static Logger log = LogManager.getLogger(TimelineDiffHelper.class); + private static final Logger LOG = LogManager.getLogger(TimelineDiffHelper.class); public static TimelineDiffResult getNewInstantsForIncrementalSync(HoodieTimeline oldTimeline, HoodieTimeline newTimeline) { @@ -64,7 +64,7 @@ public static TimelineDiffResult getNewInstantsForIncrementalSync(HoodieTimeline if (!lostPendingCompactions.isEmpty()) { // If a compaction is unscheduled, fall back to complete refresh of fs view since some log files could have been // moved. Its unsafe to incrementally sync in that case. - log.warn("Some pending compactions are no longer in new timeline (unscheduled ?)." + "They are :" + LOG.warn("Some pending compactions are no longer in new timeline (unscheduled ?)." + "They are :" + lostPendingCompactions); return TimelineDiffResult.UNSAFE_SYNC_RESULT; } @@ -77,7 +77,7 @@ public static TimelineDiffResult getNewInstantsForIncrementalSync(HoodieTimeline return new TimelineDiffResult(newInstants, finishedCompactionInstants, true); } else { // One or more timelines is empty - log.warn("One or more timelines is empty"); + LOG.warn("One or more timelines is empty"); return TimelineDiffResult.UNSAFE_SYNC_RESULT; } } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryExecutor.java b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryExecutor.java index dc53f839d6dc3..cec9ab61a9c1b 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryExecutor.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryExecutor.java @@ -44,7 +44,7 @@ */ public class BoundedInMemoryExecutor { - private static Logger logger = LogManager.getLogger(BoundedInMemoryExecutor.class); + private static final Logger LOG = LogManager.getLogger(BoundedInMemoryExecutor.class); // Executor service used for launching writer thread. private final ExecutorService executorService; @@ -91,7 +91,7 @@ public ExecutorCompletionService startProducers() { preExecute(); producer.produce(queue); } catch (Exception e) { - logger.error("error producing records", e); + LOG.error("error producing records", e); queue.markAsFailed(e); throw e; } finally { @@ -115,14 +115,14 @@ public ExecutorCompletionService startProducers() { private Future startConsumer() { return consumer.map(consumer -> { return executorService.submit(() -> { - logger.info("starting consumer thread"); + LOG.info("starting consumer thread"); preExecute(); try { E result = consumer.consume(queue); - logger.info("Queue Consumption is done; notifying producer threads"); + LOG.info("Queue Consumption is done; notifying producer threads"); return result; } catch (Exception e) { - logger.error("error consuming records", e); + LOG.error("error consuming records", e); queue.markAsFailed(e); throw e; } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java index 9ca9f81146a96..2c5ce5d9782a1 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java @@ -58,7 +58,7 @@ public class BoundedInMemoryQueue implements Iterable { public static final int RECORD_SAMPLING_RATE = 64; // maximum records that will be cached private static final int RECORD_CACHING_LIMIT = 128 * 1024; - private static Logger logger = LogManager.getLogger(BoundedInMemoryQueue.class); + private static final Logger LOG = LogManager.getLogger(BoundedInMemoryQueue.class); // It indicates number of records to cache. We will be using sampled record's average size to // determine how many // records we should cache and will change (increase/decrease) permits accordingly. @@ -203,7 +203,7 @@ private Option readNextRecord() { break; } } catch (InterruptedException e) { - logger.error("error reading records from queue", e); + LOG.error("error reading records from queue", e); throw new HoodieException(e); } } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java index 897f3cd3d33b3..71cc5a63fb6ae 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java @@ -71,7 +71,7 @@ @SuppressWarnings("ResultOfMethodCallIgnored") public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness { - private static final transient Logger LOG = LogManager.getLogger(TestHoodieTableFileSystemView.class); + private static final Logger LOG = LogManager.getLogger(TestHoodieTableFileSystemView.class); private static String TEST_WRITE_TOKEN = "1-0-1"; diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java index 89655b50b44d8..4c8f1a4b78a4f 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java @@ -77,7 +77,7 @@ */ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness { - private static final transient Logger LOG = LogManager.getLogger(TestIncrementalFSViewSync.class); + private static final Logger LOG = LogManager.getLogger(TestIncrementalFSViewSync.class); private static String TEST_WRITE_TOKEN = "1-0-1"; diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieParquetInputFormat.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieParquetInputFormat.java index bd9672c9c004b..5d6e52c41abee 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieParquetInputFormat.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieParquetInputFormat.java @@ -60,7 +60,7 @@ @UseFileSplitsFromInputFormat public class HoodieParquetInputFormat extends MapredParquetInputFormat implements Configurable { - private static final transient Logger LOG = LogManager.getLogger(HoodieParquetInputFormat.class); + private static final Logger LOG = LogManager.getLogger(HoodieParquetInputFormat.class); protected Configuration conf; diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java index 14ca2f4ed4d7d..fae8111722eb5 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java @@ -50,7 +50,7 @@ */ public class HoodieROTablePathFilter implements PathFilter, Serializable { - private static final transient Logger LOG = LogManager.getLogger(HoodieROTablePathFilter.class); + private static final Logger LOG = LogManager.getLogger(HoodieROTablePathFilter.class); /** * Its quite common, to have all files from a given partition path be passed into accept(), cache the check for hoodie diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/RecordReaderValueIterator.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/RecordReaderValueIterator.java index 21427cc1c2956..0386186e70f9a 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/RecordReaderValueIterator.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/RecordReaderValueIterator.java @@ -36,7 +36,7 @@ */ public class RecordReaderValueIterator implements Iterator { - private static final transient Logger LOG = LogManager.getLogger(RecordReaderValueIterator.class); + private static final Logger LOG = LogManager.getLogger(RecordReaderValueIterator.class); private final RecordReader reader; private V nextVal = null; diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetRealtimeInputFormat.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetRealtimeInputFormat.java index ef03c0d5988a7..f62f288eb79cc 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetRealtimeInputFormat.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieParquetRealtimeInputFormat.java @@ -65,7 +65,7 @@ @UseFileSplitsFromInputFormat public class HoodieParquetRealtimeInputFormat extends HoodieParquetInputFormat implements Configurable { - private static final transient Logger LOG = LogManager.getLogger(HoodieParquetRealtimeInputFormat.class); + private static final Logger LOG = LogManager.getLogger(HoodieParquetRealtimeInputFormat.class); // These positions have to be deterministic across all tables public static final int HOODIE_COMMIT_TIME_COL_POS = 0; diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieRealtimeRecordReader.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieRealtimeRecordReader.java index 8af2f0851e2fb..cb8606e187221 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieRealtimeRecordReader.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieRealtimeRecordReader.java @@ -39,7 +39,7 @@ public class HoodieRealtimeRecordReader implements RecordReader reader; public HoodieRealtimeRecordReader(HoodieRealtimeFileSplit split, JobConf job, diff --git a/hudi-hive/src/main/java/org/apache/hudi/hive/HiveSyncTool.java b/hudi-hive/src/main/java/org/apache/hudi/hive/HiveSyncTool.java index 5acb3d6548677..6bcb697d848df 100644 --- a/hudi-hive/src/main/java/org/apache/hudi/hive/HiveSyncTool.java +++ b/hudi-hive/src/main/java/org/apache/hudi/hive/HiveSyncTool.java @@ -52,7 +52,7 @@ @SuppressWarnings("WeakerAccess") public class HiveSyncTool { - private static Logger LOG = LogManager.getLogger(HiveSyncTool.class); + private static final Logger LOG = LogManager.getLogger(HiveSyncTool.class); private final HoodieHiveClient hoodieHiveClient; public static final String SUFFIX_REALTIME_TABLE = "_rt"; private final HiveSyncConfig cfg; diff --git a/hudi-hive/src/main/java/org/apache/hudi/hive/HoodieHiveClient.java b/hudi-hive/src/main/java/org/apache/hudi/hive/HoodieHiveClient.java index 1dffee5fbdc1b..1f76f2a6f72b2 100644 --- a/hudi-hive/src/main/java/org/apache/hudi/hive/HoodieHiveClient.java +++ b/hudi-hive/src/main/java/org/apache/hudi/hive/HoodieHiveClient.java @@ -86,7 +86,7 @@ public class HoodieHiveClient { } } - private static Logger LOG = LogManager.getLogger(HoodieHiveClient.class); + private static final Logger LOG = LogManager.getLogger(HoodieHiveClient.class); private final HoodieTableMetaClient metaClient; private final HoodieTableType tableType; private final PartitionValueExtractor partitionValueExtractor; diff --git a/hudi-hive/src/test/java/org/apache/hudi/hive/util/HiveTestService.java b/hudi-hive/src/test/java/org/apache/hudi/hive/util/HiveTestService.java index c9a711ca5ed69..160e50bfe5986 100644 --- a/hudi-hive/src/test/java/org/apache/hudi/hive/util/HiveTestService.java +++ b/hudi-hive/src/test/java/org/apache/hudi/hive/util/HiveTestService.java @@ -59,7 +59,7 @@ public class HiveTestService { - private static Logger LOG = LogManager.getLogger(HiveTestService.class); + private static final Logger LOG = LogManager.getLogger(HiveTestService.class); private static final int CONNECTION_TIMEOUT = 30000; diff --git a/hudi-spark/src/test/java/HoodieJavaApp.java b/hudi-spark/src/test/java/HoodieJavaApp.java index 5370e75de480d..a669f6cc8733a 100644 --- a/hudi-spark/src/test/java/HoodieJavaApp.java +++ b/hudi-spark/src/test/java/HoodieJavaApp.java @@ -85,7 +85,7 @@ public class HoodieJavaApp { @Parameter(names = {"--help", "-h"}, help = true) public Boolean help = false; - private static Logger logger = LogManager.getLogger(HoodieJavaApp.class); + private static final Logger LOG = LogManager.getLogger(HoodieJavaApp.class); public static void main(String[] args) throws Exception { HoodieJavaApp cli = new HoodieJavaApp(); @@ -154,7 +154,7 @@ public void run() throws Exception { // new dataset if needed writer.save(tablePath); // ultimately where the dataset will be placed String commitInstantTime1 = HoodieDataSourceHelpers.latestCommit(fs, tablePath); - logger.info("First commit at instant time :" + commitInstantTime1); + LOG.info("First commit at instant time :" + commitInstantTime1); /** * Commit that updates records @@ -177,7 +177,7 @@ public void run() throws Exception { updateHiveSyncConfig(writer); writer.save(tablePath); String commitInstantTime2 = HoodieDataSourceHelpers.latestCommit(fs, tablePath); - logger.info("Second commit at instant time :" + commitInstantTime2); + LOG.info("Second commit at instant time :" + commitInstantTime2); /** * Commit that Deletes some records @@ -201,7 +201,7 @@ public void run() throws Exception { updateHiveSyncConfig(writer); writer.save(tablePath); String commitInstantTime3 = HoodieDataSourceHelpers.latestCommit(fs, tablePath); - logger.info("Third commit at instant time :" + commitInstantTime3); + LOG.info("Third commit at instant time :" + commitInstantTime3); /** * Read & do some queries @@ -226,7 +226,7 @@ public void run() throws Exception { // For incremental view, pass in the root/base path of dataset .load(tablePath); - logger.info("You will only see records from : " + commitInstantTime2); + LOG.info("You will only see records from : " + commitInstantTime2); hoodieIncViewDF.groupBy(hoodieIncViewDF.col("_hoodie_commit_time")).count().show(); } } @@ -236,7 +236,7 @@ public void run() throws Exception { */ private DataFrameWriter updateHiveSyncConfig(DataFrameWriter writer) { if (enableHiveSync) { - logger.info("Enabling Hive sync to " + hiveJdbcUrl); + LOG.info("Enabling Hive sync to " + hiveJdbcUrl); writer = writer.option(DataSourceWriteOptions.HIVE_TABLE_OPT_KEY(), hiveTable) .option(DataSourceWriteOptions.HIVE_DATABASE_OPT_KEY(), hiveDB) .option(DataSourceWriteOptions.HIVE_URL_OPT_KEY(), hiveJdbcUrl) diff --git a/hudi-spark/src/test/java/HoodieJavaStreamingApp.java b/hudi-spark/src/test/java/HoodieJavaStreamingApp.java index 694ae88bb73ff..1529c1121d4de 100644 --- a/hudi-spark/src/test/java/HoodieJavaStreamingApp.java +++ b/hudi-spark/src/test/java/HoodieJavaStreamingApp.java @@ -94,7 +94,7 @@ public class HoodieJavaStreamingApp { public Boolean help = false; - private static Logger logger = LogManager.getLogger(HoodieJavaStreamingApp.class); + private static final Logger LOG = LogManager.getLogger(HoodieJavaStreamingApp.class); public static void main(String[] args) throws Exception { HoodieJavaStreamingApp cli = new HoodieJavaStreamingApp(); @@ -143,17 +143,17 @@ public void run() throws Exception { // thread for spark strucutured streaming Future streamFuture = executor.submit(() -> { - logger.info("===== Streaming Starting ====="); + LOG.info("===== Streaming Starting ====="); stream(streamingInput); - logger.info("===== Streaming Ends ====="); + LOG.info("===== Streaming Ends ====="); return null; }); // thread for adding data to the streaming source and showing results over time Future showFuture = executor.submit(() -> { - logger.info("===== Showing Starting ====="); + LOG.info("===== Showing Starting ====="); show(spark, fs, inputDF1, inputDF2); - logger.info("===== Showing Ends ====="); + LOG.info("===== Showing Ends ====="); return null; }); @@ -178,13 +178,13 @@ public void show(SparkSession spark, FileSystem fs, Dataset inputDF1, Datas // wait for spark streaming to process one microbatch Thread.sleep(3000); String commitInstantTime1 = HoodieDataSourceHelpers.latestCommit(fs, tablePath); - logger.info("First commit at instant time :" + commitInstantTime1); + LOG.info("First commit at instant time :" + commitInstantTime1); inputDF2.write().mode(SaveMode.Append).json(streamingSourcePath); // wait for spark streaming to process one microbatch Thread.sleep(3000); String commitInstantTime2 = HoodieDataSourceHelpers.latestCommit(fs, tablePath); - logger.info("Second commit at instant time :" + commitInstantTime2); + LOG.info("Second commit at instant time :" + commitInstantTime2); /** * Read & do some queries @@ -209,7 +209,7 @@ public void show(SparkSession spark, FileSystem fs, Dataset inputDF1, Datas // For incremental view, pass in the root/base path of dataset .load(tablePath); - logger.info("You will only see records from : " + commitInstantTime2); + LOG.info("You will only see records from : " + commitInstantTime2); hoodieIncViewDF.groupBy(hoodieIncViewDF.col("_hoodie_commit_time")).count().show(); } } @@ -243,7 +243,7 @@ public void stream(Dataset streamingInput) throws Exception { */ private DataStreamWriter updateHiveSyncConfig(DataStreamWriter writer) { if (enableHiveSync) { - logger.info("Enabling Hive sync to " + hiveJdbcUrl); + LOG.info("Enabling Hive sync to " + hiveJdbcUrl); writer = writer.option(DataSourceWriteOptions.HIVE_TABLE_OPT_KEY(), hiveTable) .option(DataSourceWriteOptions.HIVE_DATABASE_OPT_KEY(), hiveDB) .option(DataSourceWriteOptions.HIVE_URL_OPT_KEY(), hiveJdbcUrl) diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java index e8ac49d3f5ab6..b62591ac562e2 100644 --- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java +++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java @@ -40,7 +40,7 @@ */ public class TimelineService { - private static volatile Logger log = LogManager.getLogger(TimelineService.class); + private static final Logger LOG = LogManager.getLogger(TimelineService.class); private int serverPort; private Configuration conf; @@ -106,7 +106,7 @@ public int startService() throws IOException { app.start(serverPort); // If port = 0, a dynamic port is assigned. Store it. serverPort = app.port(); - log.info("Starting Timeline server on port :" + serverPort); + LOG.info("Starting Timeline server on port :" + serverPort); return serverPort; } @@ -140,11 +140,11 @@ public static FileSystemViewManager buildFileSystemViewManager(Config config, Se } public void close() { - log.info("Closing Timeline Service"); + LOG.info("Closing Timeline Service"); this.app.stop(); this.app = null; this.fsViewsManager.close(); - log.info("Closed Timeline Service"); + LOG.info("Closed Timeline Service"); } public Configuration getConf() { diff --git a/hudi-timeline-service/src/test/java/org/apache/hudi/timeline/table/view/TestRemoteHoodieTableFileSystemView.java b/hudi-timeline-service/src/test/java/org/apache/hudi/timeline/table/view/TestRemoteHoodieTableFileSystemView.java index 5f7267634b59c..e01988ae40e92 100644 --- a/hudi-timeline-service/src/test/java/org/apache/hudi/timeline/table/view/TestRemoteHoodieTableFileSystemView.java +++ b/hudi-timeline-service/src/test/java/org/apache/hudi/timeline/table/view/TestRemoteHoodieTableFileSystemView.java @@ -36,7 +36,7 @@ */ public class TestRemoteHoodieTableFileSystemView extends TestHoodieTableFileSystemView { - private static Logger log = LogManager.getLogger(TestRemoteHoodieTableFileSystemView.class); + private static final Logger LOG = LogManager.getLogger(TestRemoteHoodieTableFileSystemView.class); private TimelineService server; private RemoteHoodieTableFileSystemView view; @@ -51,7 +51,7 @@ protected SyncableFileSystemView getFileSystemView(HoodieTimeline timeline) { } catch (Exception ex) { throw new RuntimeException(ex); } - log.info("Connecting to Timeline Server :" + server.getServerPort()); + LOG.info("Connecting to Timeline Server :" + server.getServerPort()); view = new RemoteHoodieTableFileSystemView("localhost", server.getServerPort(), metaClient); return view; } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java index 069c8bbca43dd..4aa72d0ab9ae6 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java @@ -66,7 +66,7 @@ */ public class HDFSParquetImporter implements Serializable { - private static volatile Logger log = LogManager.getLogger(HDFSParquetImporter.class); + private static final Logger LOG = LogManager.getLogger(HDFSParquetImporter.class); private static final DateTimeFormatter PARTITION_FORMATTER = DateTimeFormatter.ofPattern("yyyy/MM/dd") .withZone(ZoneId.systemDefault()); @@ -103,7 +103,7 @@ public int dataImport(JavaSparkContext jsc, int retry) { this.fs = FSUtils.getFs(cfg.targetPath, jsc.hadoopConfiguration()); this.props = cfg.propsFilePath == null ? UtilHelpers.buildProperties(cfg.configs) : UtilHelpers.readConfig(fs, new Path(cfg.propsFilePath), cfg.configs).getConfig(); - log.info("Starting data import with configs : " + props.toString()); + LOG.info("Starting data import with configs : " + props.toString()); int ret = -1; try { // Verify that targetPath is not present. @@ -114,7 +114,7 @@ public int dataImport(JavaSparkContext jsc, int retry) { ret = dataImport(jsc); } while (ret != 0 && retry-- > 0); } catch (Throwable t) { - log.error(t); + LOG.error(t); } return ret; } @@ -145,7 +145,7 @@ protected int dataImport(JavaSparkContext jsc) throws IOException { JavaRDD writeResponse = load(client, instantTime, hoodieRecords); return UtilHelpers.handleErrors(jsc, instantTime, writeResponse); } catch (Throwable t) { - log.error("Error occurred.", t); + LOG.error("Error occurred.", t); } return -1; } @@ -175,13 +175,13 @@ protected JavaRDD> buildHoodieRecordsForImport throw new HoodieIOException("row field is missing. :" + cfg.rowKey); } String partitionPath = partitionField.toString(); - log.debug("Row Key : " + rowField + ", Partition Path is (" + partitionPath + ")"); + LOG.debug("Row Key : " + rowField + ", Partition Path is (" + partitionPath + ")"); if (partitionField instanceof Number) { try { long ts = (long) (Double.parseDouble(partitionField.toString()) * 1000L); partitionPath = PARTITION_FORMATTER.format(Instant.ofEpochMilli(ts)); } catch (NumberFormatException nfe) { - log.warn("Unable to parse date from partition field. Assuming partition as (" + partitionField + ")"); + LOG.warn("Unable to parse date from partition field. Assuming partition as (" + partitionField + ")"); } } return new HoodieRecord<>(new HoodieKey(rowField.toString(), partitionPath), diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HiveIncrementalPuller.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HiveIncrementalPuller.java index 62ae769affc17..cdd9e044e59f7 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HiveIncrementalPuller.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HiveIncrementalPuller.java @@ -61,7 +61,7 @@ */ public class HiveIncrementalPuller { - private static Logger log = LogManager.getLogger(HiveIncrementalPuller.class); + private static final Logger LOG = LogManager.getLogger(HiveIncrementalPuller.class); private static String driverName = "org.apache.hive.jdbc.HiveDriver"; public static class Config implements Serializable { @@ -129,14 +129,14 @@ public void saveDelta() throws IOException { try { if (config.fromCommitTime == null) { config.fromCommitTime = inferCommitTime(fs); - log.info("FromCommitTime inferred as " + config.fromCommitTime); + LOG.info("FromCommitTime inferred as " + config.fromCommitTime); } - log.info("FromCommitTime - " + config.fromCommitTime); + LOG.info("FromCommitTime - " + config.fromCommitTime); String sourceTableLocation = getTableLocation(config.sourceDb, config.sourceTable); String lastCommitTime = getLastCommitTimePulled(fs, sourceTableLocation); if (lastCommitTime == null) { - log.info("Nothing to pull. However we will continue to create a empty table"); + LOG.info("Nothing to pull. However we will continue to create a empty table"); lastCommitTime = config.fromCommitTime; } @@ -155,9 +155,9 @@ public void saveDelta() throws IOException { initHiveBeelineProperties(stmt); executeIncrementalSQL(tempDbTable, tempDbTablePath, stmt); - log.info("Finished HoodieReader execution"); + LOG.info("Finished HoodieReader execution"); } catch (SQLException e) { - log.error("Exception when executing SQL", e); + LOG.error("Exception when executing SQL", e); throw new IOException("Could not scan " + config.sourceTable + " incrementally", e); } finally { try { @@ -165,7 +165,7 @@ public void saveDelta() throws IOException { stmt.close(); } } catch (SQLException e) { - log.error("Could not close the resultset opened ", e); + LOG.error("Could not close the resultset opened ", e); } } } @@ -180,13 +180,13 @@ private void executeIncrementalSQL(String tempDbTable, String tempDbTablePath, S incrementalPullSQLtemplate.add("storedAsClause", storedAsClause); String incrementalSQL = new Scanner(new File(config.incrementalSQLFile)).useDelimiter("\\Z").next(); if (!incrementalSQL.contains(config.sourceDb + "." + config.sourceTable)) { - log.info("Incremental SQL does not have " + config.sourceDb + "." + config.sourceTable + LOG.info("Incremental SQL does not have " + config.sourceDb + "." + config.sourceTable + ", which means its pulling from a different table. Fencing this from " + "happening."); throw new HoodieIncrementalPullSQLException( "Incremental SQL does not have " + config.sourceDb + "." + config.sourceTable); } if (!incrementalSQL.contains("`_hoodie_commit_time` > '%targetBasePath'")) { - log.info("Incremental SQL : " + incrementalSQL + LOG.info("Incremental SQL : " + incrementalSQL + " does not contain `_hoodie_commit_time` > '%targetBasePath'. Please add " + "this clause for incremental to work properly."); throw new HoodieIncrementalPullSQLException( @@ -205,7 +205,7 @@ private String getStoredAsClause() { } private void initHiveBeelineProperties(Statement stmt) throws SQLException { - log.info("Setting up Hive JDBC Session with properties"); + LOG.info("Setting up Hive JDBC Session with properties"); // set the queue executeStatement("set mapred.job.queue.name=" + config.yarnQueueName, stmt); // Set the inputformat to HoodieCombineHiveInputFormat @@ -224,17 +224,17 @@ private void initHiveBeelineProperties(Statement stmt) throws SQLException { } private boolean deleteHDFSPath(FileSystem fs, String path) throws IOException { - log.info("Deleting path " + path); + LOG.info("Deleting path " + path); return fs.delete(new Path(path), true); } private void executeStatement(String sql, Statement stmt) throws SQLException { - log.info("Executing: " + sql); + LOG.info("Executing: " + sql); stmt.execute(sql); } private String inferCommitTime(FileSystem fs) throws SQLException, IOException { - log.info("FromCommitTime not specified. Trying to infer it from Hoodie dataset " + config.targetDb + "." + LOG.info("FromCommitTime not specified. Trying to infer it from Hoodie dataset " + config.targetDb + "." + config.targetTable); String targetDataLocation = getTableLocation(config.targetDb, config.targetTable); return scanForCommitTime(fs, targetDataLocation); @@ -249,7 +249,7 @@ private String getTableLocation(String db, String table) throws SQLException { resultSet = stmt.executeQuery("describe formatted `" + db + "." + table + "`"); while (resultSet.next()) { if (resultSet.getString(1).trim().equals("Location:")) { - log.info("Inferred table location for " + db + "." + table + " as " + resultSet.getString(2)); + LOG.info("Inferred table location for " + db + "." + table + " as " + resultSet.getString(2)); return resultSet.getString(2); } } @@ -264,7 +264,7 @@ private String getTableLocation(String db, String table) throws SQLException { resultSet.close(); } } catch (SQLException e) { - log.error("Could not close the resultset opened ", e); + LOG.error("Could not close the resultset opened ", e); } } return null; @@ -290,7 +290,7 @@ private String scanForCommitTime(FileSystem fs, String targetDataPath) throws IO private boolean ensureTempPathExists(FileSystem fs, String lastCommitTime) throws IOException { Path targetBaseDirPath = new Path(config.hoodieTmpDir, config.targetTable + "__" + config.sourceTable); if (!fs.exists(targetBaseDirPath)) { - log.info("Creating " + targetBaseDirPath + " with permission drwxrwxrwx"); + LOG.info("Creating " + targetBaseDirPath + " with permission drwxrwxrwx"); boolean result = FileSystem.mkdirs(fs, targetBaseDirPath, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); if (!result) { @@ -305,7 +305,7 @@ private boolean ensureTempPathExists(FileSystem fs, String lastCommitTime) throw throw new HoodieException("Could not delete existing " + targetPath); } } - log.info("Creating " + targetPath + " with permission drwxrwxrwx"); + LOG.info("Creating " + targetPath + " with permission drwxrwxrwx"); return FileSystem.mkdirs(fs, targetBaseDirPath, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); } @@ -315,20 +315,20 @@ private String getLastCommitTimePulled(FileSystem fs, String sourceTableLocation .findInstantsAfter(config.fromCommitTime, config.maxCommits).getInstants().map(HoodieInstant::getTimestamp) .collect(Collectors.toList()); if (commitsToSync.isEmpty()) { - log.warn( + LOG.warn( "Nothing to sync. All commits in " + config.sourceTable + " are " + metadata.getActiveTimeline().getCommitsTimeline() .filterCompletedInstants().getInstants().collect(Collectors.toList()) + " and from commit time is " + config.fromCommitTime); return null; } - log.info("Syncing commits " + commitsToSync); + LOG.info("Syncing commits " + commitsToSync); return commitsToSync.get(commitsToSync.size() - 1); } private Connection getConnection() throws SQLException { if (connection == null) { - log.info("Getting Hive Connection to " + config.hiveJDBCUrl); + LOG.info("Getting Hive Connection to " + config.hiveJDBCUrl); this.connection = DriverManager.getConnection(config.hiveJDBCUrl, config.hiveUsername, config.hivePassword); } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java index 6afcc7a7d127b..9185d9730244c 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java @@ -38,7 +38,7 @@ public class HoodieCleaner { - private static volatile Logger log = LogManager.getLogger(HoodieCleaner.class); + private static final Logger LOG = LogManager.getLogger(HoodieCleaner.class); /** * Config for Cleaner. @@ -66,7 +66,7 @@ public HoodieCleaner(Config cfg, JavaSparkContext jssc) throws IOException { this.fs = FSUtils.getFs(cfg.basePath, jssc.hadoopConfiguration()); this.props = cfg.propsFilePath == null ? UtilHelpers.buildProperties(cfg.configs) : UtilHelpers.readConfig(fs, new Path(cfg.propsFilePath), cfg.configs).getConfig(); - log.info("Creating Cleaner with configs : " + props.toString()); + LOG.info("Creating Cleaner with configs : " + props.toString()); } public void run() throws Exception { diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactor.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactor.java index 48fbbafaca35c..4ace07c240169 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactor.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactor.java @@ -39,7 +39,7 @@ public class HoodieCompactor { - private static volatile Logger logger = LogManager.getLogger(HoodieCompactor.class); + private static final Logger LOG = LogManager.getLogger(HoodieCompactor.class); private final Config cfg; private transient FileSystem fs; private TypedProperties props; @@ -110,7 +110,7 @@ public int compact(JavaSparkContext jsc, int retry) { } } while (ret != 0 && retry-- > 0); } catch (Throwable t) { - logger.error(t); + LOG.error(t); } return ret; } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotCopier.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotCopier.java index 4f76f43f7ef53..06fc5de3af7b1 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotCopier.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotCopier.java @@ -54,7 +54,7 @@ */ public class HoodieSnapshotCopier implements Serializable { - private static Logger logger = LogManager.getLogger(HoodieSnapshotCopier.class); + private static final Logger LOG = LogManager.getLogger(HoodieSnapshotCopier.class); static class Config implements Serializable { @@ -79,21 +79,21 @@ public void snapshot(JavaSparkContext jsc, String baseDir, final String outputDi Option latestCommit = tableMetadata.getActiveTimeline().getCommitsTimeline().filterCompletedInstants().lastInstant(); if (!latestCommit.isPresent()) { - logger.warn("No commits present. Nothing to snapshot"); + LOG.warn("No commits present. Nothing to snapshot"); return; } final String latestCommitTimestamp = latestCommit.get().getTimestamp(); - logger.info(String.format("Starting to snapshot latest version files which are also no-late-than %s.", + LOG.info(String.format("Starting to snapshot latest version files which are also no-late-than %s.", latestCommitTimestamp)); List partitions = FSUtils.getAllPartitionPaths(fs, baseDir, shouldAssumeDatePartitioning); if (partitions.size() > 0) { - logger.info(String.format("The job needs to copy %d partitions.", partitions.size())); + LOG.info(String.format("The job needs to copy %d partitions.", partitions.size())); // Make sure the output directory is empty Path outputPath = new Path(outputDir); if (fs.exists(outputPath)) { - logger.warn(String.format("The output path %s targetBasePath already exists, deleting", outputPath)); + LOG.warn(String.format("The output path %s targetBasePath already exists, deleting", outputPath)); fs.delete(new Path(outputDir), true); } @@ -126,7 +126,7 @@ public void snapshot(JavaSparkContext jsc, String baseDir, final String outputDi }); // Also copy the .commit files - logger.info(String.format("Copying .commit files which are no-late-than %s.", latestCommitTimestamp)); + LOG.info(String.format("Copying .commit files which are no-late-than %s.", latestCommitTimestamp)); FileStatus[] commitFilesToCopy = fs.listStatus(new Path(baseDir + "/" + HoodieTableMetaClient.METAFOLDER_NAME), (commitFilePath) -> { if (commitFilePath.getName().equals(HoodieTableConfig.HOODIE_PROPERTIES_FILE)) { @@ -144,19 +144,19 @@ public void snapshot(JavaSparkContext jsc, String baseDir, final String outputDi fs.mkdirs(targetFilePath.getParent()); } if (fs.exists(targetFilePath)) { - logger.error( + LOG.error( String.format("The target output commit file (%s targetBasePath) already exists.", targetFilePath)); } FileUtil.copy(fs, commitStatus.getPath(), fs, targetFilePath, false, fs.getConf()); } } else { - logger.info("The job has 0 partition to copy."); + LOG.info("The job has 0 partition to copy."); } // Create the _SUCCESS tag Path successTagPath = new Path(outputDir + "/_SUCCESS"); if (!fs.exists(successTagPath)) { - logger.info(String.format("Creating _SUCCESS under targetBasePath: $s", outputDir)); + LOG.info(String.format("Creating _SUCCESS under targetBasePath: $s", outputDir)); fs.createNewFile(successTagPath); } } @@ -165,14 +165,14 @@ public static void main(String[] args) throws IOException { // Take input configs final Config cfg = new Config(); new JCommander(cfg, args); - logger.info(String.format("Snapshot hoodie table from %s targetBasePath to %stargetBasePath", cfg.basePath, + LOG.info(String.format("Snapshot hoodie table from %s targetBasePath to %stargetBasePath", cfg.basePath, cfg.outputPath)); // Create a spark job to do the snapshot copy SparkConf sparkConf = new SparkConf().setAppName("Hoodie-snapshot-copier"); sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer"); JavaSparkContext jsc = new JavaSparkContext(sparkConf); - logger.info("Initializing spark job."); + LOG.info("Initializing spark job."); // Copy HoodieSnapshotCopier copier = new HoodieSnapshotCopier(); diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/UtilHelpers.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/UtilHelpers.java index 663ce0fa97aaf..218934a580519 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/UtilHelpers.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/UtilHelpers.java @@ -58,7 +58,7 @@ * Bunch of helper methods. */ public class UtilHelpers { - private static Logger logger = LogManager.getLogger(UtilHelpers.class); + private static final Logger LOG = LogManager.getLogger(UtilHelpers.class); public static Source createSource(String sourceClass, TypedProperties cfg, JavaSparkContext jssc, SparkSession sparkSession, SchemaProvider schemaProvider) throws IOException { @@ -95,7 +95,7 @@ public static DFSPropertiesConfiguration readConfig(FileSystem fs, Path cfgPath, try { DFSPropertiesConfiguration conf = new DFSPropertiesConfiguration(cfgPath.getFileSystem(fs.getConf()), cfgPath); if (!overriddenProps.isEmpty()) { - logger.info("Adding overridden properties to file properties."); + LOG.info("Adding overridden properties to file properties."); conf.addProperties(new BufferedReader(new StringReader(String.join("\n", overriddenProps)))); } return conf; @@ -206,14 +206,14 @@ public static int handleErrors(JavaSparkContext jsc, String instantTime, JavaRDD writeResponse.foreach(writeStatus -> { if (writeStatus.hasErrors()) { errors.add(1); - logger.error(String.format("Error processing records :writeStatus:%s", writeStatus.getStat().toString())); + LOG.error(String.format("Error processing records :writeStatus:%s", writeStatus.getStat().toString())); } }); if (errors.value() == 0) { - logger.info(String.format("Dataset imported into hoodie dataset with %s instant time.", instantTime)); + LOG.info(String.format("Dataset imported into hoodie dataset with %s instant time.", instantTime)); return 0; } - logger.error(String.format("Import failed with %d errors.", errors.value())); + LOG.error(String.format("Import failed with %d errors.", errors.value())); return -1; } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/adhoc/UpgradePayloadFromUberToApache.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/adhoc/UpgradePayloadFromUberToApache.java index bfbcf71f0a99a..604043797b879 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/adhoc/UpgradePayloadFromUberToApache.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/adhoc/UpgradePayloadFromUberToApache.java @@ -46,7 +46,7 @@ */ public class UpgradePayloadFromUberToApache implements Serializable { - private static Logger logger = LogManager.getLogger(UpgradePayloadFromUberToApache.class); + private static final Logger LOG = LogManager.getLogger(UpgradePayloadFromUberToApache.class); private final Config cfg; @@ -59,13 +59,13 @@ public void run() throws IOException { try (BufferedReader reader = new BufferedReader(new FileReader(cfg.inputPath))) { basePath = reader.readLine(); } catch (IOException e) { - logger.error("Read from path: " + cfg.inputPath + " error.", e); + LOG.error("Read from path: " + cfg.inputPath + " error.", e); } while (basePath != null) { basePath = basePath.trim(); if (!basePath.startsWith("#")) { - logger.info("Performing upgrade for " + basePath); + LOG.info("Performing upgrade for " + basePath); String metaPath = String.format("%s/.hoodie", basePath); HoodieTableMetaClient metaClient = new HoodieTableMetaClient(FSUtils.prepareHadoopConf(new Configuration()), basePath, false); @@ -74,20 +74,20 @@ public void run() throws IOException { Map propsMap = tableConfig.getProps(); if (propsMap.containsKey(HoodieCompactionConfig.PAYLOAD_CLASS_PROP)) { String payloadClass = propsMap.get(HoodieCompactionConfig.PAYLOAD_CLASS_PROP); - logger.info("Found payload class=" + payloadClass); + LOG.info("Found payload class=" + payloadClass); if (payloadClass.startsWith("com.uber.hoodie")) { String newPayloadClass = payloadClass.replace("com.uber.hoodie", "org.apache.hudi"); - logger.info("Replacing payload class (" + payloadClass + ") with (" + newPayloadClass + ")"); + LOG.info("Replacing payload class (" + payloadClass + ") with (" + newPayloadClass + ")"); Map newPropsMap = new HashMap<>(propsMap); newPropsMap.put(HoodieCompactionConfig.PAYLOAD_CLASS_PROP, newPayloadClass); Properties props = new Properties(); props.putAll(newPropsMap); HoodieTableConfig.createHoodieProperties(metaClient.getFs(), new Path(metaPath), props); - logger.info("Finished upgrade for " + basePath); + LOG.info("Finished upgrade for " + basePath); } } } else { - logger.info("Skipping as this table is COW table. BasePath=" + basePath); + LOG.info("Skipping as this table is COW table. BasePath=" + basePath); } } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/AbstractDeltaStreamerService.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/AbstractDeltaStreamerService.java index b6f5306afab54..294b634e22313 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/AbstractDeltaStreamerService.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/AbstractDeltaStreamerService.java @@ -36,7 +36,7 @@ */ public abstract class AbstractDeltaStreamerService implements Serializable { - protected static volatile Logger log = LogManager.getLogger(AbstractDeltaStreamerService.class); + private static final Logger LOG = LogManager.getLogger(AbstractDeltaStreamerService.class); // Flag to track if the service is started. private boolean started; @@ -71,7 +71,7 @@ void waitForShutdown() throws ExecutionException, InterruptedException { try { future.get(); } catch (ExecutionException ex) { - log.error("Service shutdown with error", ex); + LOG.error("Service shutdown with error", ex); throw ex; } } @@ -94,7 +94,7 @@ void shutdown(boolean force) { // Wait for some max time after requesting shutdown executor.awaitTermination(24, TimeUnit.HOURS); } catch (InterruptedException ie) { - log.error("Interrupted while waiting for shutdown", ie); + LOG.error("Interrupted while waiting for shutdown", ie); } } } @@ -128,18 +128,18 @@ public void start(Function onShutdownCallback) { * @param onShutdownCallback */ private void monitorThreads(Function onShutdownCallback) { - log.info("Submitting monitor thread !!"); + LOG.info("Submitting monitor thread !!"); Executors.newSingleThreadExecutor().submit(() -> { boolean error = false; try { - log.info("Monitoring thread(s) !!"); + LOG.info("Monitoring thread(s) !!"); future.get(); } catch (ExecutionException ex) { - log.error("Monitor noticed one or more threads failed." + " Requesting graceful shutdown of other threads", ex); + LOG.error("Monitor noticed one or more threads failed." + " Requesting graceful shutdown of other threads", ex); error = true; shutdown(false); } catch (InterruptedException ie) { - log.error("Got interrupted Monitoring threads", ie); + LOG.error("Got interrupted Monitoring threads", ie); error = true; shutdown(false); } finally { diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/Compactor.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/Compactor.java index 140081aeda611..aa5d892338cff 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/Compactor.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/Compactor.java @@ -37,7 +37,7 @@ */ public class Compactor implements Serializable { - protected static volatile Logger log = LogManager.getLogger(Compactor.class); + private static final Logger LOG = LogManager.getLogger(Compactor.class); private transient HoodieWriteClient compactionClient; private transient JavaSparkContext jssc; @@ -48,12 +48,12 @@ public Compactor(HoodieWriteClient compactionClient, JavaSparkContext jssc) { } public void compact(HoodieInstant instant) throws IOException { - log.info("Compactor executing compaction " + instant); + LOG.info("Compactor executing compaction " + instant); JavaRDD res = compactionClient.compact(instant.getTimestamp()); long numWriteErrors = res.collect().stream().filter(r -> r.hasErrors()).count(); if (numWriteErrors != 0) { // We treat even a single error in compaction as fatal - log.error("Compaction for instant (" + instant + ") failed with write errors. " + "Errors :" + numWriteErrors); + LOG.error("Compaction for instant (" + instant + ") failed with write errors. " + "Errors :" + numWriteErrors); throw new HoodieException( "Compaction for instant (" + instant + ") failed with write errors. " + "Errors :" + numWriteErrors); } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/DeltaSync.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/DeltaSync.java index 91a9bc60f5923..d142fa48c2991 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/DeltaSync.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/DeltaSync.java @@ -83,7 +83,7 @@ */ public class DeltaSync implements Serializable { - protected static volatile Logger log = LogManager.getLogger(DeltaSync.class); + private static final Logger LOG = LogManager.getLogger(DeltaSync.class); public static String CHECKPOINT_KEY = "deltastreamer.checkpoint.key"; public static String CHECKPOINT_RESET_KEY = "deltastreamer.checkpoint.reset_key"; @@ -168,7 +168,7 @@ public DeltaSync(HoodieDeltaStreamer.Config cfg, SparkSession sparkSession, Sche this.tableType = tableType; this.onInitializingHoodieWriteClient = onInitializingHoodieWriteClient; this.props = props; - log.info("Creating delta streamer with configs : " + props.toString()); + LOG.info("Creating delta streamer with configs : " + props.toString()); this.schemaProvider = schemaProvider; refreshTimeline(); @@ -266,7 +266,7 @@ private Pair>> readFromSource if (!resumeCheckpointStr.isPresent() && cfg.checkpoint != null) { resumeCheckpointStr = Option.of(cfg.checkpoint); } - log.info("Checkpoint to resume from : " + resumeCheckpointStr); + LOG.info("Checkpoint to resume from : " + resumeCheckpointStr); final Option> avroRDDOptional; final String checkpointStr; @@ -300,13 +300,13 @@ private Pair>> readFromSource } if (Objects.equals(checkpointStr, resumeCheckpointStr.orElse(null))) { - log.info("No new data, source checkpoint has not changed. Nothing to commit." + "Old checkpoint=(" + LOG.info("No new data, source checkpoint has not changed. Nothing to commit." + "Old checkpoint=(" + resumeCheckpointStr + "). New Checkpoint=(" + checkpointStr + ")"); return null; } if ((!avroRDDOptional.isPresent()) || (avroRDDOptional.get().isEmpty())) { - log.info("No new data, perform empty commit."); + LOG.info("No new data, perform empty commit."); return Pair.of(schemaProvider, Pair.of(checkpointStr, jssc.emptyRDD())); } @@ -342,7 +342,7 @@ private Option writeToSink(JavaRDD records, String checkpo boolean isEmpty = records.isEmpty(); String commitTime = startCommit(); - log.info("Starting commit : " + commitTime); + LOG.info("Starting commit : " + commitTime); JavaRDD writeStatusRDD; if (cfg.operation == Operation.INSERT) { @@ -367,13 +367,13 @@ private Option writeToSink(JavaRDD records, String checkpo } if (hasErrors) { - log.warn("Some records failed to be merged but forcing commit since commitOnErrors set. Errors/Total=" + LOG.warn("Some records failed to be merged but forcing commit since commitOnErrors set. Errors/Total=" + totalErrorRecords + "/" + totalRecords); } boolean success = writeClient.commit(commitTime, writeStatusRDD, Option.of(checkpointCommitMetadata)); if (success) { - log.info("Commit " + commitTime + " successful!"); + LOG.info("Commit " + commitTime + " successful!"); // Schedule compaction if needed if (cfg.isAsyncCompactionEnabled()) { @@ -387,16 +387,16 @@ private Option writeToSink(JavaRDD records, String checkpo hiveSyncTimeMs = hiveSyncContext != null ? hiveSyncContext.stop() : 0; } } else { - log.info("Commit " + commitTime + " failed!"); + LOG.info("Commit " + commitTime + " failed!"); throw new HoodieException("Commit " + commitTime + " failed!"); } } else { - log.error("Delta Sync found errors when writing. Errors/Total=" + totalErrorRecords + "/" + totalRecords); - log.error("Printing out the top 100 errors"); + LOG.error("Delta Sync found errors when writing. Errors/Total=" + totalErrorRecords + "/" + totalRecords); + LOG.error("Printing out the top 100 errors"); writeStatusRDD.filter(ws -> ws.hasErrors()).take(100).forEach(ws -> { - log.error("Global error :", ws.getGlobalError()); + LOG.error("Global error :", ws.getGlobalError()); if (ws.getErrors().size() > 0) { - ws.getErrors().entrySet().forEach(r -> log.trace("Error for key:" + r.getKey() + " is " + r.getValue())); + ws.getErrors().entrySet().forEach(r -> LOG.trace("Error for key:" + r.getKey() + " is " + r.getValue())); } }); // Rolling back instant @@ -420,7 +420,7 @@ private String startCommit() { return writeClient.startCommit(); } catch (IllegalArgumentException ie) { lastException = ie; - log.error("Got error trying to start a new commit. Retrying after sleeping for a sec", ie); + LOG.error("Got error trying to start a new commit. Retrying after sleeping for a sec", ie); retryNum++; try { Thread.sleep(1000); @@ -438,7 +438,7 @@ private String startCommit() { private void syncHive() throws ClassNotFoundException { if (cfg.enableHiveSync) { HiveSyncConfig hiveSyncConfig = DataSourceUtils.buildHiveSyncConfig(props, cfg.targetBasePath); - log.info("Syncing target hoodie table with hive table(" + hiveSyncConfig.tableName + "). Hive metastore URL :" + LOG.info("Syncing target hoodie table with hive table(" + hiveSyncConfig.tableName + "). Hive metastore URL :" + hiveSyncConfig.jdbcUrl + ", basePath :" + cfg.targetBasePath); new HiveSyncTool(hiveSyncConfig, hiveConf, fs).syncHoodieTable(); @@ -451,7 +451,7 @@ private void syncHive() throws ClassNotFoundException { * this constraint. */ public void setupWriteClient() { - log.info("Setting up Hoodie Write Client"); + LOG.info("Setting up Hoodie Write Client"); if ((null != schemaProvider) && (null == writeClient)) { registerAvroSchemas(schemaProvider); HoodieWriteConfig hoodieCfg = getHoodieClientConfig(schemaProvider); @@ -503,7 +503,7 @@ private void registerAvroSchemas(SchemaProvider schemaProvider) { schemas.add(schemaProvider.getTargetSchema()); } - log.info("Registering Schema :" + schemas); + LOG.info("Registering Schema :" + schemas); jssc.sc().getConf().registerAvroSchemas(JavaConversions.asScalaBuffer(schemas).toList()); } } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieDeltaStreamer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieDeltaStreamer.java index baef2ea02e8b4..480956a9aaef5 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieDeltaStreamer.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/HoodieDeltaStreamer.java @@ -78,7 +78,7 @@ */ public class HoodieDeltaStreamer implements Serializable { - private static volatile Logger log = LogManager.getLogger(HoodieDeltaStreamer.class); + private static final Logger LOG = LogManager.getLogger(HoodieDeltaStreamer.class); public static String CHECKPOINT_KEY = "deltastreamer.checkpoint.key"; @@ -115,23 +115,23 @@ public void sync() throws Exception { if (cfg.continuousMode) { deltaSyncService.start(this::onDeltaSyncShutdown); deltaSyncService.waitForShutdown(); - log.info("Delta Sync shutting down"); + LOG.info("Delta Sync shutting down"); } else { - log.info("Delta Streamer running only single round"); + LOG.info("Delta Streamer running only single round"); try { deltaSyncService.getDeltaSync().syncOnce(); } catch (Exception ex) { - log.error("Got error running delta sync once. Shutting down", ex); + LOG.error("Got error running delta sync once. Shutting down", ex); throw ex; } finally { deltaSyncService.close(); - log.info("Shut down deltastreamer"); + LOG.info("Shut down deltastreamer"); } } } private boolean onDeltaSyncShutdown(boolean error) { - log.info("DeltaSync shutdown. Closing write client. Error?" + error); + LOG.info("DeltaSync shutdown. Closing write client. Error?" + error); deltaSyncService.close(); return true; } @@ -363,7 +363,7 @@ public DeltaSyncService(HoodieDeltaStreamer.Config cfg, JavaSparkContext jssc, F } this.props = UtilHelpers.readConfig(fs, new Path(cfg.propsFilePath), cfg.configs).getConfig(); - log.info("Creating delta streamer with configs : " + props.toString()); + LOG.info("Creating delta streamer with configs : " + props.toString()); this.schemaProvider = UtilHelpers.createSchemaProvider(cfg.schemaProviderClassName, props, jssc); if (cfg.filterDupes) { @@ -385,7 +385,7 @@ protected Pair startService() { boolean error = false; if (cfg.isAsyncCompactionEnabled()) { // set Scheduler Pool. - log.info("Setting Spark Pool name for delta-sync to " + SchedulerConfGenerator.DELTASYNC_POOL_NAME); + LOG.info("Setting Spark Pool name for delta-sync to " + SchedulerConfGenerator.DELTASYNC_POOL_NAME); jssc.setLocalProperty("spark.scheduler.pool", SchedulerConfGenerator.DELTASYNC_POOL_NAME); } try { @@ -394,19 +394,19 @@ protected Pair startService() { long start = System.currentTimeMillis(); Option scheduledCompactionInstant = deltaSync.syncOnce(); if (scheduledCompactionInstant.isPresent()) { - log.info("Enqueuing new pending compaction instant (" + scheduledCompactionInstant + ")"); + LOG.info("Enqueuing new pending compaction instant (" + scheduledCompactionInstant + ")"); asyncCompactService.enqueuePendingCompaction(new HoodieInstant(State.REQUESTED, HoodieTimeline.COMPACTION_ACTION, scheduledCompactionInstant.get())); asyncCompactService.waitTillPendingCompactionsReducesTo(cfg.maxPendingCompactions); } long toSleepMs = cfg.minSyncIntervalSeconds * 1000 - (System.currentTimeMillis() - start); if (toSleepMs > 0) { - log.info("Last sync ran less than min sync interval: " + cfg.minSyncIntervalSeconds + " s, sleep: " + LOG.info("Last sync ran less than min sync interval: " + cfg.minSyncIntervalSeconds + " s, sleep: " + toSleepMs + " ms."); Thread.sleep(toSleepMs); } } catch (Exception e) { - log.error("Shutting down delta-sync due to exception", e); + LOG.error("Shutting down delta-sync due to exception", e); error = true; throw new HoodieException(e.getMessage(), e); } @@ -422,9 +422,9 @@ protected Pair startService() { * Shutdown compactor as DeltaSync is shutdown. */ private void shutdownCompactor(boolean error) { - log.info("Delta Sync shutdown. Error ?" + error); + LOG.info("Delta Sync shutdown. Error ?" + error); if (asyncCompactService != null) { - log.warn("Gracefully shutting down compactor"); + LOG.warn("Gracefully shutting down compactor"); asyncCompactService.shutdown(false); } } @@ -537,7 +537,7 @@ public void waitTillPendingCompactionsReducesTo(int numPendingCompactions) throw * @throws InterruptedException */ private HoodieInstant fetchNextCompactionInstant() throws InterruptedException { - log.info("Compactor waiting for next instant for compaction upto 60 seconds"); + LOG.info("Compactor waiting for next instant for compaction upto 60 seconds"); HoodieInstant instant = pendingCompactions.poll(60, TimeUnit.SECONDS); if (instant != null) { try { @@ -560,7 +560,7 @@ protected Pair startService() { IntStream.range(0, maxConcurrentCompaction).mapToObj(i -> CompletableFuture.supplyAsync(() -> { try { // Set Compactor Pool Name for allowing users to prioritize compaction - log.info("Setting Spark Pool name for compaction to " + SchedulerConfGenerator.COMPACT_POOL_NAME); + LOG.info("Setting Spark Pool name for compaction to " + SchedulerConfGenerator.COMPACT_POOL_NAME); jssc.setLocalProperty("spark.scheduler.pool", SchedulerConfGenerator.COMPACT_POOL_NAME); while (!isShutdownRequested()) { @@ -569,11 +569,11 @@ protected Pair startService() { compactor.compact(instant); } } - log.info("Compactor shutting down properly!!"); + LOG.info("Compactor shutting down properly!!"); } catch (InterruptedException ie) { - log.warn("Compactor executor thread got interrupted exception. Stopping", ie); + LOG.warn("Compactor executor thread got interrupted exception. Stopping", ie); } catch (IOException e) { - log.error("Compactor executor failed", e); + LOG.error("Compactor executor failed", e); throw new HoodieIOException(e.getMessage(), e); } return true; diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/SchedulerConfGenerator.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/SchedulerConfGenerator.java index 1e754ba203261..c2abe668a9bba 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/SchedulerConfGenerator.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/deltastreamer/SchedulerConfGenerator.java @@ -39,7 +39,7 @@ */ public class SchedulerConfGenerator { - protected static volatile Logger log = LogManager.getLogger(SchedulerConfGenerator.class); + private static final Logger LOG = LogManager.getLogger(SchedulerConfGenerator.class); public static final String DELTASYNC_POOL_NAME = "hoodiedeltasync"; public static final String COMPACT_POOL_NAME = "hoodiecompact"; @@ -76,7 +76,7 @@ public static Map getSparkSchedulingConfigs(HoodieDeltaStreamer. cfg.compactSchedulingWeight, cfg.deltaSyncSchedulingMinShare, cfg.compactSchedulingMinShare); additionalSparkConfigs.put(SPARK_SCHEDULER_ALLOCATION_FILE_KEY, sparkSchedulingConfFile); } else { - log.warn("Job Scheduling Configs will not be in effect as spark.scheduler.mode " + LOG.warn("Job Scheduling Configs will not be in effect as spark.scheduler.mode " + "is not set to FAIR at instatiation time. Continuing without scheduling configs"); } return additionalSparkConfigs; @@ -88,7 +88,7 @@ private static String generateAndStoreConfig(Integer deltaSyncWeight, Integer co BufferedWriter bw = new BufferedWriter(new FileWriter(tempConfigFile)); bw.write(generateConfig(deltaSyncWeight, compactionWeight, deltaSyncMinShare, compactionMinShare)); bw.close(); - log.info("Configs written to file" + tempConfigFile.getAbsolutePath()); + LOG.info("Configs written to file" + tempConfigFile.getAbsolutePath()); return tempConfigFile.getAbsolutePath(); } } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/perf/TimelineServerPerf.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/perf/TimelineServerPerf.java index a840ff4a2229d..1108f6579f60b 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/perf/TimelineServerPerf.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/perf/TimelineServerPerf.java @@ -58,7 +58,7 @@ public class TimelineServerPerf implements Serializable { - private static volatile Logger logger = LogManager.getLogger(TimelineServerPerf.class); + private static final Logger LOG = LogManager.getLogger(TimelineServerPerf.class); private final Config cfg; private transient TimelineService timelineServer; private final boolean useExternalTimelineServer; @@ -73,10 +73,10 @@ public TimelineServerPerf(Config cfg) throws IOException { private void setHostAddrFromSparkConf(SparkConf sparkConf) { String hostAddr = sparkConf.get("spark.driver.host", null); if (hostAddr != null) { - logger.info("Overriding hostIp to (" + hostAddr + ") found in spark-conf. It was " + this.hostAddr); + LOG.info("Overriding hostIp to (" + hostAddr + ") found in spark-conf. It was " + this.hostAddr); this.hostAddr = hostAddr; } else { - logger.warn("Unable to find driver bind address from spark config"); + LOG.warn("Unable to find driver bind address from spark config"); } } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/AvroKafkaSource.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/AvroKafkaSource.java index da7d016cb4dd4..18ebff419717a 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/AvroKafkaSource.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/AvroKafkaSource.java @@ -40,7 +40,7 @@ */ public class AvroKafkaSource extends AvroSource { - private static Logger log = LogManager.getLogger(AvroKafkaSource.class); + private static final Logger LOG = LogManager.getLogger(AvroKafkaSource.class); private final KafkaOffsetGen offsetGen; @@ -57,7 +57,7 @@ protected InputBatch> fetchNewData(Option lastChe if (totalNewMsgs <= 0) { return new InputBatch<>(Option.empty(), lastCheckpointStr.isPresent() ? lastCheckpointStr.get() : ""); } else { - log.info("About to read " + totalNewMsgs + " from Kafka for topic :" + offsetGen.getTopicName()); + LOG.info("About to read " + totalNewMsgs + " from Kafka for topic :" + offsetGen.getTopicName()); } JavaRDD newDataRDD = toRDD(offsetRanges); return new InputBatch<>(Option.of(newDataRDD), KafkaOffsetGen.CheckpointUtils.offsetsToStr(offsetRanges)); diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HiveIncrPullSource.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HiveIncrPullSource.java index 2b54b452ee113..666c2606bb2ab 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HiveIncrPullSource.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HiveIncrPullSource.java @@ -59,7 +59,7 @@ */ public class HiveIncrPullSource extends AvroSource { - private static volatile Logger log = LogManager.getLogger(HiveIncrPullSource.class); + private static final Logger LOG = LogManager.getLogger(HiveIncrPullSource.class); private final transient FileSystem fs; @@ -86,7 +86,7 @@ public HiveIncrPullSource(TypedProperties props, JavaSparkContext sparkContext, */ private Option findCommitToPull(Option latestTargetCommit) throws IOException { - log.info("Looking for commits "); + LOG.info("Looking for commits "); FileStatus[] commitTimePaths = fs.listStatus(new Path(incrPullRootPath)); List commitTimes = new ArrayList<>(commitTimePaths.length); @@ -95,7 +95,7 @@ private Option findCommitToPull(Option latestTargetCommit) throw commitTimes.add(splits[splits.length - 1]); } Collections.sort(commitTimes); - log.info("Retrieved commit times " + commitTimes); + LOG.info("Retrieved commit times " + commitTimes); if (!latestTargetCommit.isPresent()) { // start from the beginning diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HoodieIncrSource.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HoodieIncrSource.java index 67de9c2a8a249..888eec70207d4 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HoodieIncrSource.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/HoodieIncrSource.java @@ -28,6 +28,8 @@ import org.apache.hudi.utilities.schema.SchemaProvider; import org.apache.hudi.utilities.sources.helpers.IncrSourceHelper; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.sql.DataFrameReader; import org.apache.spark.sql.Dataset; @@ -37,6 +39,9 @@ import java.util.Arrays; public class HoodieIncrSource extends RowSource { + + private static final Logger LOG = LogManager.getLogger(HoodieIncrSource.class); + protected static class Config { /** @@ -104,7 +109,7 @@ public Pair>, String> fetchNextBatch(Option lastCkpt numInstantsPerFetch, beginInstant, readLatestOnMissingCkpt); if (instantEndpts.getKey().equals(instantEndpts.getValue())) { - log.warn("Already caught up. Begin Checkpoint was :" + instantEndpts.getKey()); + LOG.warn("Already caught up. Begin Checkpoint was :" + instantEndpts.getKey()); return Pair.of(Option.empty(), instantEndpts.getKey()); } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonKafkaSource.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonKafkaSource.java index 0da89f90c101a..bd922ac6bd0c3 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonKafkaSource.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/JsonKafkaSource.java @@ -38,7 +38,7 @@ */ public class JsonKafkaSource extends JsonSource { - private static Logger log = LogManager.getLogger(JsonKafkaSource.class); + private static final Logger LOG = LogManager.getLogger(JsonKafkaSource.class); private final KafkaOffsetGen offsetGen; @@ -55,7 +55,7 @@ protected InputBatch> fetchNewData(Option lastCheckpoint if (totalNewMsgs <= 0) { return new InputBatch<>(Option.empty(), lastCheckpointStr.isPresent() ? lastCheckpointStr.get() : ""); } - log.info("About to read " + totalNewMsgs + " from Kafka for topic :" + offsetGen.getTopicName()); + LOG.info("About to read " + totalNewMsgs + " from Kafka for topic :" + offsetGen.getTopicName()); JavaRDD newDataRDD = toRDD(offsetRanges); return new InputBatch<>(Option.of(newDataRDD), CheckpointUtils.offsetsToStr(offsetRanges)); } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java index c724c991c7aa3..2afe8bbfe2c32 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java @@ -33,7 +33,7 @@ * Represents a source from which we can tail data. Assumes a constructor that takes properties. */ public abstract class Source implements Serializable { - protected static volatile Logger log = LogManager.getLogger(Source.class); + private static final Logger LOG = LogManager.getLogger(Source.class); public enum SourceType { JSON, AVRO, ROW, PARQUET diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java index a4e82ddd3738c..c17a5cff7c6ee 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java @@ -51,7 +51,7 @@ */ public class KafkaOffsetGen { - private static volatile Logger log = LogManager.getLogger(KafkaOffsetGen.class); + private static final Logger LOG = LogManager.getLogger(KafkaOffsetGen.class); public static class CheckpointUtils { diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/FlatteningTransformer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/FlatteningTransformer.java index 8bfa7bf4f0c06..aabcb73ae4a10 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/FlatteningTransformer.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/FlatteningTransformer.java @@ -37,7 +37,7 @@ public class FlatteningTransformer implements Transformer { private static final String TMP_TABLE = "HUDI_SRC_TMP_TABLE_"; - private static volatile Logger log = LogManager.getLogger(SqlQueryBasedTransformer.class); + private static final Logger LOG = LogManager.getLogger(SqlQueryBasedTransformer.class); /** * Configs supported. @@ -48,7 +48,7 @@ public Dataset apply(JavaSparkContext jsc, SparkSession sparkSession, Datas // tmp table name doesn't like dashes String tmpTable = TMP_TABLE.concat(UUID.randomUUID().toString().replace("-", "_")); - log.info("Registering tmp table : " + tmpTable); + LOG.info("Registering tmp table : " + tmpTable); rowDataset.registerTempTable(tmpTable); return sparkSession.sql("select " + flattenSchema(rowDataset.schema(), null) + " from " + tmpTable); } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/SqlQueryBasedTransformer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/SqlQueryBasedTransformer.java index d7ec911f75033..8210fb14bc11e 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/SqlQueryBasedTransformer.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/transform/SqlQueryBasedTransformer.java @@ -36,7 +36,7 @@ */ public class SqlQueryBasedTransformer implements Transformer { - private static volatile Logger log = LogManager.getLogger(SqlQueryBasedTransformer.class); + private static final Logger LOG = LogManager.getLogger(SqlQueryBasedTransformer.class); private static final String SRC_PATTERN = ""; private static final String TMP_TABLE = "HOODIE_SRC_TMP_TABLE_"; @@ -59,10 +59,10 @@ public Dataset apply(JavaSparkContext jsc, SparkSession sparkSession, Datas // tmp table name doesn't like dashes String tmpTable = TMP_TABLE.concat(UUID.randomUUID().toString().replace("-", "_")); - log.info("Registering tmp table : " + tmpTable); + LOG.info("Registering tmp table : " + tmpTable); rowDataset.registerTempTable(tmpTable); String sqlStr = transformerSQL.replaceAll(SRC_PATTERN, tmpTable); - log.info("SQL Query for transformation : (" + sqlStr + ")"); + LOG.info("SQL Query for transformation : (" + sqlStr + ")"); return sparkSession.sql(sqlStr); } } diff --git a/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieDeltaStreamer.java b/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieDeltaStreamer.java index f4e39ae937168..4205144a4409c 100644 --- a/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieDeltaStreamer.java +++ b/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieDeltaStreamer.java @@ -89,7 +89,7 @@ public class TestHoodieDeltaStreamer extends UtilitiesTestBase { private static final String PROPS_FILENAME_TEST_SOURCE = "test-source.properties"; private static final String PROPS_FILENAME_TEST_INVALID = "test-invalid.properties"; - private static volatile Logger log = LogManager.getLogger(TestHoodieDeltaStreamer.class); + private static final Logger LOG = LogManager.getLogger(TestHoodieDeltaStreamer.class); @BeforeClass public static void initClass() throws Exception { @@ -247,7 +247,7 @@ static void assertDistanceCountWithExactValue(long expected, String datasetPath, static void assertAtleastNCompactionCommits(int minExpected, String datasetPath, FileSystem fs) { HoodieTableMetaClient meta = new HoodieTableMetaClient(fs.getConf(), datasetPath); HoodieTimeline timeline = meta.getActiveTimeline().getCommitTimeline().filterCompletedInstants(); - log.info("Timeline Instants=" + meta.getActiveTimeline().getInstants().collect(Collectors.toList())); + LOG.info("Timeline Instants=" + meta.getActiveTimeline().getInstants().collect(Collectors.toList())); int numCompactionCommits = (int) timeline.getInstants().count(); assertTrue("Got=" + numCompactionCommits + ", exp >=" + minExpected, minExpected <= numCompactionCommits); } @@ -255,7 +255,7 @@ static void assertAtleastNCompactionCommits(int minExpected, String datasetPath, static void assertAtleastNDeltaCommits(int minExpected, String datasetPath, FileSystem fs) { HoodieTableMetaClient meta = new HoodieTableMetaClient(fs.getConf(), datasetPath); HoodieTimeline timeline = meta.getActiveTimeline().getDeltaCommitTimeline().filterCompletedInstants(); - log.info("Timeline Instants=" + meta.getActiveTimeline().getInstants().collect(Collectors.toList())); + LOG.info("Timeline Instants=" + meta.getActiveTimeline().getInstants().collect(Collectors.toList())); int numDeltaCommits = (int) timeline.getInstants().count(); assertTrue("Got=" + numDeltaCommits + ", exp >=" + minExpected, minExpected <= numDeltaCommits); } @@ -280,7 +280,7 @@ static void waitTillCondition(Function condition, long timeout Thread.sleep(3000); ret = condition.apply(true); } catch (Throwable error) { - log.warn("Got error :", error); + LOG.warn("Got error :", error); ret = false; } } @@ -311,7 +311,7 @@ public void testPropsWithInvalidKeyGenerator() throws Exception { fail("Should error out when setting the key generator class property to an invalid value"); } catch (IOException e) { // expected - log.error("Expected error during getting the key generator", e); + LOG.error("Expected error during getting the key generator", e); assertTrue(e.getMessage().contains("Could not load key generator class")); } } @@ -326,7 +326,7 @@ public void testDatasetCreation() throws Exception { fail("Should error out when pointed out at a dir thats not a dataset"); } catch (DatasetNotFoundException e) { // expected - log.error("Expected error during dataset creation", e); + LOG.error("Expected error during dataset creation", e); } } @@ -497,7 +497,7 @@ public void testNullSchemaProvider() throws Exception { new HoodieDeltaStreamer(cfg, jsc, dfs, hiveServer.getHiveConf()).sync(); fail("Should error out when schema provider is not provided"); } catch (HoodieException e) { - log.error("Expected error during reading data from source ", e); + LOG.error("Expected error during reading data from source ", e); assertTrue(e.getMessage().contains("Please provide a valid schema provider class!")); } } diff --git a/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/AbstractBaseTestSource.java b/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/AbstractBaseTestSource.java index 3dc6b331a611f..745b0f013a25c 100644 --- a/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/AbstractBaseTestSource.java +++ b/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/AbstractBaseTestSource.java @@ -29,6 +29,8 @@ import org.apache.avro.generic.GenericRecord; import org.apache.avro.generic.IndexedRecord; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.sql.SparkSession; @@ -40,6 +42,8 @@ public abstract class AbstractBaseTestSource extends AvroSource { + private static final Logger LOG = LogManager.getLogger(AbstractBaseTestSource.class); + static final int DEFAULT_PARTITION_NUM = 0; // Static instance, helps with reuse across a test. @@ -56,7 +60,7 @@ public static void initDataGen(TypedProperties props, int partition) { TestSourceConfig.DEFAULT_USE_ROCKSDB_FOR_TEST_DATAGEN_KEYS); String baseStoreDir = props.getString(TestSourceConfig.ROCKSDB_BASE_DIR_FOR_TEST_DATAGEN_KEYS, File.createTempFile("test_data_gen", ".keys").getParent()) + "/" + partition; - log.info("useRocksForTestDataGenKeys=" + useRocksForTestDataGenKeys + ", BaseStoreDir=" + baseStoreDir); + LOG.info("useRocksForTestDataGenKeys=" + useRocksForTestDataGenKeys + ", BaseStoreDir=" + baseStoreDir); dataGeneratorMap.put(partition, new HoodieTestDataGenerator(HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS, useRocksForTestDataGenKeys ? new RocksDBBasedMap<>(baseStoreDir) : new HashMap<>())); } catch (IOException e) { @@ -85,11 +89,11 @@ protected static Stream fetchNextBatch(TypedProperties props, int // generate `sourceLimit` number of upserts each time. int numExistingKeys = dataGenerator.getNumExistingKeys(); - log.info("NumExistingKeys=" + numExistingKeys); + LOG.info("NumExistingKeys=" + numExistingKeys); int numUpdates = Math.min(numExistingKeys, sourceLimit / 2); int numInserts = sourceLimit - numUpdates; - log.info("Before adjustments => numInserts=" + numInserts + ", numUpdates=" + numUpdates); + LOG.info("Before adjustments => numInserts=" + numInserts + ", numUpdates=" + numUpdates); if (numInserts + numExistingKeys > maxUniqueKeys) { // Limit inserts so that maxUniqueRecords is maintained @@ -101,9 +105,9 @@ protected static Stream fetchNextBatch(TypedProperties props, int numUpdates = Math.min(numExistingKeys, sourceLimit - numInserts); } - log.info("NumInserts=" + numInserts + ", NumUpdates=" + numUpdates + ", maxUniqueRecords=" + maxUniqueKeys); + LOG.info("NumInserts=" + numInserts + ", NumUpdates=" + numUpdates + ", maxUniqueRecords=" + maxUniqueKeys); long memoryUsage1 = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory(); - log.info("Before DataGen. Memory Usage=" + memoryUsage1 + ", Total Memory=" + Runtime.getRuntime().totalMemory() + LOG.info("Before DataGen. Memory Usage=" + memoryUsage1 + ", Total Memory=" + Runtime.getRuntime().totalMemory() + ", Free Memory=" + Runtime.getRuntime().freeMemory()); Stream updateStream = dataGenerator.generateUniqueUpdatesStream(commitTime, numUpdates) diff --git a/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/DistributedTestDataSource.java b/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/DistributedTestDataSource.java index 9e8b3c4e8d28c..94629cb506e4a 100644 --- a/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/DistributedTestDataSource.java +++ b/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/DistributedTestDataSource.java @@ -24,6 +24,8 @@ import org.apache.hudi.utilities.sources.config.TestSourceConfig; import org.apache.avro.generic.GenericRecord; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.sql.SparkSession; @@ -37,6 +39,8 @@ */ public class DistributedTestDataSource extends AbstractBaseTestSource { + private static final Logger LOG = LogManager.getLogger(DistributedTestDataSource.class); + private final int numTestSourcePartitions; public DistributedTestDataSource(TypedProperties props, JavaSparkContext sparkContext, SparkSession sparkSession, @@ -50,7 +54,7 @@ public DistributedTestDataSource(TypedProperties props, JavaSparkContext sparkCo protected InputBatch> fetchNewData(Option lastCkptStr, long sourceLimit) { int nextCommitNum = lastCkptStr.map(s -> Integer.parseInt(s) + 1).orElse(0); String commitTime = String.format("%05d", nextCommitNum); - log.info("Source Limit is set to " + sourceLimit); + LOG.info("Source Limit is set to " + sourceLimit); // No new data. if (sourceLimit <= 0) { @@ -69,7 +73,7 @@ protected InputBatch> fetchNewData(Option lastCkp JavaRDD avroRDD = sparkContext.parallelize(IntStream.range(0, numTestSourcePartitions).boxed().collect(Collectors.toList()), numTestSourcePartitions).mapPartitionsWithIndex((p, idx) -> { - log.info("Initializing source with newProps=" + newProps); + LOG.info("Initializing source with newProps=" + newProps); if (!dataGeneratorMap.containsKey(p)) { initDataGen(newProps, p); } diff --git a/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/TestDataSource.java b/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/TestDataSource.java index c6130a6214c7b..0b52db96c4b4e 100644 --- a/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/TestDataSource.java +++ b/hudi-utilities/src/test/java/org/apache/hudi/utilities/sources/TestDataSource.java @@ -37,7 +37,7 @@ */ public class TestDataSource extends AbstractBaseTestSource { - private static volatile Logger log = LogManager.getLogger(TestDataSource.class); + private static final Logger LOG = LogManager.getLogger(TestDataSource.class); public TestDataSource(TypedProperties props, JavaSparkContext sparkContext, SparkSession sparkSession, SchemaProvider schemaProvider) { @@ -50,7 +50,7 @@ protected InputBatch> fetchNewData(Option lastChe int nextCommitNum = lastCheckpointStr.map(s -> Integer.parseInt(s) + 1).orElse(0); String commitTime = String.format("%05d", nextCommitNum); - log.info("Source Limit is set to " + sourceLimit); + LOG.info("Source Limit is set to " + sourceLimit); // No new data. if (sourceLimit <= 0) {