diff --git a/hudi-client/pom.xml b/hudi-client/pom.xml
index 66538e0d97d62..d350777cf70c8 100644
--- a/hudi-client/pom.xml
+++ b/hudi-client/pom.xml
@@ -85,11 +85,6 @@
log4j
log4j
-
- org.slf4j
- slf4j-api
- ${slf4j.version}
-
diff --git a/hudi-client/src/main/java/org/apache/hudi/AbstractHoodieClient.java b/hudi-client/src/main/java/org/apache/hudi/AbstractHoodieClient.java
index 8457b908f39fa..dd108be023b08 100644
--- a/hudi-client/src/main/java/org/apache/hudi/AbstractHoodieClient.java
+++ b/hudi-client/src/main/java/org/apache/hudi/AbstractHoodieClient.java
@@ -26,9 +26,9 @@
import org.apache.hudi.config.HoodieWriteConfig;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
import org.apache.spark.api.java.JavaSparkContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.Serializable;
@@ -39,7 +39,7 @@
*/
public abstract class AbstractHoodieClient implements Serializable, AutoCloseable {
- private static final Logger LOG = LoggerFactory.getLogger(AbstractHoodieClient.class);
+ private static final Logger LOG = LogManager.getLogger(AbstractHoodieClient.class);
protected final transient FileSystem fs;
protected final transient JavaSparkContext jsc;
diff --git a/hudi-client/src/main/java/org/apache/hudi/CompactionAdminClient.java b/hudi-client/src/main/java/org/apache/hudi/CompactionAdminClient.java
index 00e0f751ee29c..56a47b73263e6 100644
--- a/hudi-client/src/main/java/org/apache/hudi/CompactionAdminClient.java
+++ b/hudi-client/src/main/java/org/apache/hudi/CompactionAdminClient.java
@@ -45,9 +45,9 @@
import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
import org.apache.spark.api.java.JavaSparkContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import java.io.FileNotFoundException;
import java.io.IOException;
@@ -65,7 +65,7 @@
*/
public class CompactionAdminClient extends AbstractHoodieClient {
- private static final Logger LOG = LoggerFactory.getLogger(CompactionAdminClient.class);
+ private static final Logger LOG = LogManager.getLogger(CompactionAdminClient.class);
public CompactionAdminClient(JavaSparkContext jsc, String basePath) {
super(jsc, HoodieWriteConfig.newBuilder().withPath(basePath).build());
@@ -358,14 +358,13 @@ private List runRenamingOps(HoodieTableMetaClient metaClient,
if (!dryRun) {
return jsc.parallelize(renameActions, parallelism).map(lfPair -> {
try {
- LOG.info("RENAME {} => {}", lfPair.getLeft().getPath(), lfPair.getRight().getPath());
+ LOG.info("RENAME " + lfPair.getLeft().getPath() + " => " + lfPair.getRight().getPath());
renameLogFile(metaClient, lfPair.getLeft(), lfPair.getRight());
return new RenameOpResult(lfPair, true, Option.empty());
} catch (IOException e) {
LOG.error("Error renaming log file", e);
- LOG.error("\n\n\n***NOTE Compaction is in inconsistent state. "
- + "Try running \"compaction repair {} \" to recover from failure ***\n\n\n",
- lfPair.getLeft().getBaseCommitTime());
+ LOG.error("\n\n\n***NOTE Compaction is in inconsistent state. Try running \"compaction repair "
+ + lfPair.getLeft().getBaseCommitTime() + "\" to recover from failure ***\n\n\n");
return new RenameOpResult(lfPair, false, Option.of(e));
}
}).collect();
@@ -396,7 +395,7 @@ protected List> getRenamingActionsForUnschedu
HoodieCompactionPlan plan = getCompactionPlan(metaClient, compactionInstant);
if (plan.getOperations() != null) {
LOG.info(
- "Number of Compaction Operations :{} for instant :{}", plan.getOperations().size(), compactionInstant);
+ "Number of Compaction Operations :" + plan.getOperations().size() + " for instant :" + compactionInstant);
List ops = plan.getOperations().stream()
.map(CompactionOperation::convertFromAvroRecordInstance).collect(Collectors.toList());
return jsc.parallelize(ops, parallelism).flatMap(op -> {
@@ -410,7 +409,7 @@ protected List> getRenamingActionsForUnschedu
}
}).collect();
}
- LOG.warn("No operations for compaction instant : {}", compactionInstant);
+ LOG.warn("No operations for compaction instant : " + compactionInstant);
return new ArrayList<>();
}
diff --git a/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java b/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java
index 68503c67eabce..9411782bc238a 100644
--- a/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java
+++ b/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java
@@ -39,16 +39,16 @@
import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
import org.apache.spark.api.java.JavaSparkContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
public class HoodieCleanClient extends AbstractHoodieClient {
- private static final Logger LOG = LoggerFactory.getLogger(HoodieCleanClient.class);
+ private static final Logger LOG = LogManager.getLogger(HoodieCleanClient.class);
private final transient HoodieMetrics metrics;
public HoodieCleanClient(JavaSparkContext jsc, HoodieWriteConfig clientConfig, HoodieMetrics metrics) {
@@ -85,7 +85,7 @@ protected HoodieCleanMetadata clean(String startCleanTime) throws HoodieIOExcept
// If there are inflight(failed) or previously requested clean operation, first perform them
table.getCleanTimeline().filterInflightsAndRequested().getInstants().forEach(hoodieInstant -> {
- LOG.info("There were previously unfinished cleaner operations. Finishing Instant={}", hoodieInstant);
+ LOG.info("There were previously unfinished cleaner operations. Finishing Instant=" + hoodieInstant);
runClean(table, hoodieInstant);
});
@@ -122,7 +122,7 @@ protected Option scheduleClean(String startCleanTime) {
// Save to both aux and timeline folder
try {
table.getActiveTimeline().saveToCleanRequested(cleanInstant, AvroUtils.serializeCleanerPlan(cleanerPlan));
- LOG.info("Requesting Cleaning with instant time {}", cleanInstant);
+ LOG.info("Requesting Cleaning with instant time " + cleanInstant);
} catch (IOException e) {
LOG.error("Got exception when saving cleaner requested file", e);
throw new HoodieIOException(e.getMessage(), e);
@@ -173,20 +173,20 @@ private HoodieCleanMetadata runClean(HoodieTable table, HoodieInstant cleanIn
Option durationInMs = Option.empty();
if (context != null) {
durationInMs = Option.of(metrics.getDurationInMs(context.stop()));
- LOG.info("cleanerElaspsedTime (Minutes): {}", durationInMs.get() / (1000 * 60));
+ LOG.info("cleanerElaspsedTime (Minutes): " + durationInMs.get() / (1000 * 60));
}
HoodieTableMetaClient metaClient = createMetaClient(true);
// Create the metadata and save it
HoodieCleanMetadata metadata =
CleanerUtils.convertCleanMetadata(metaClient, cleanInstant.getTimestamp(), durationInMs, cleanStats);
- LOG.info("Cleaned {} files. Earliest Retained : {}", metadata.getTotalFilesDeleted(), metadata.getEarliestCommitToRetain());
+ LOG.info("Cleaned " + metadata.getTotalFilesDeleted() + " files. Earliest Retained :" + metadata.getEarliestCommitToRetain());
metrics.updateCleanMetrics(durationInMs.orElseGet(() -> -1L), metadata.getTotalFilesDeleted());
table.getActiveTimeline().transitionCleanInflightToComplete(
new HoodieInstant(true, HoodieTimeline.CLEAN_ACTION, cleanInstant.getTimestamp()),
AvroUtils.serializeCleanMetadata(metadata));
- LOG.info("Marked clean started on {} as complete", cleanInstant.getTimestamp());
+ LOG.info("Marked clean started on " + cleanInstant.getTimestamp() + " as complete");
return metadata;
} catch (IOException e) {
throw new HoodieIOException("Failed to clean up after commit", e);
diff --git a/hudi-client/src/main/java/org/apache/hudi/HoodieReadClient.java b/hudi-client/src/main/java/org/apache/hudi/HoodieReadClient.java
index f309f40573af1..3c4290c89020d 100644
--- a/hudi-client/src/main/java/org/apache/hudi/HoodieReadClient.java
+++ b/hudi-client/src/main/java/org/apache/hudi/HoodieReadClient.java
@@ -35,6 +35,8 @@
import org.apache.hudi.index.HoodieIndex;
import org.apache.hudi.table.HoodieTable;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
@@ -49,8 +51,6 @@
import java.util.Set;
import java.util.stream.Collectors;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import scala.Tuple2;
/**
@@ -58,7 +58,7 @@
*/
public class HoodieReadClient extends AbstractHoodieClient {
- private static final Logger LOG = LoggerFactory.getLogger(HoodieReadClient.class);
+ private static final Logger LOG = LogManager.getLogger(HoodieReadClient.class);
/**
* TODO: We need to persist the index type into hoodie.properties and be able to access the index just with a simple
diff --git a/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java b/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java
index 09e3f58be7935..efb6d20115ff8 100644
--- a/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java
+++ b/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java
@@ -67,6 +67,8 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
import org.apache.spark.Partitioner;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
@@ -84,8 +86,6 @@
import java.util.stream.Collectors;
import java.util.stream.IntStream;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import scala.Tuple2;
/**
@@ -96,7 +96,7 @@
*/
public class HoodieWriteClient extends AbstractHoodieClient {
- private static final Logger LOG = LoggerFactory.getLogger(HoodieWriteClient.class);
+ private static final Logger LOG = LogManager.getLogger(HoodieWriteClient.class);
private static final String UPDATE_STR = "update";
private static final String LOOKUP_STR = "lookup";
private final boolean rollbackPending;
@@ -399,13 +399,13 @@ private JavaRDD bulkInsertInternal(JavaRDD> deduped
private void commitOnAutoCommit(String commitTime, JavaRDD resultRDD, String actionType) {
if (config.shouldAutoCommit()) {
- LOG.info("Auto commit enabled: Committing {}", commitTime);
+ LOG.info("Auto commit enabled: Committing " + commitTime);
boolean commitResult = commit(commitTime, resultRDD, Option.empty(), actionType);
if (!commitResult) {
throw new HoodieCommitException("Failed to commit " + commitTime);
}
} else {
- LOG.info("Auto commit disabled for {}", commitTime);
+ LOG.info("Auto commit disabled for " + commitTime);
}
}
@@ -454,13 +454,13 @@ private JavaRDD upsertRecordsInternal(JavaRDD> prep
if (preppedRecords.getStorageLevel() == StorageLevel.NONE()) {
preppedRecords.persist(StorageLevel.MEMORY_AND_DISK_SER());
} else {
- LOG.info("RDD PreppedRecords was persisted at: {}", preppedRecords.getStorageLevel());
+ LOG.info("RDD PreppedRecords was persisted at: " + preppedRecords.getStorageLevel());
}
WorkloadProfile profile = null;
if (hoodieTable.isWorkloadProfileNeeded()) {
profile = new WorkloadProfile(preppedRecords);
- LOG.info("Workload profile : {}", profile);
+ LOG.info("Workload profile :" + profile);
saveWorkloadProfileMetadataToInflight(profile, hoodieTable, commitTime);
}
@@ -526,7 +526,7 @@ public boolean commit(String commitTime, JavaRDD writeStatuses,
private boolean commit(String commitTime, JavaRDD writeStatuses,
Option