diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java index 01b97ddd2103..829e25347184 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java @@ -96,7 +96,7 @@ public synchronized void close() { try { scheduledExecutorService.awaitTermination(60, TimeUnit.SECONDS); } catch (InterruptedException e) { - LOG.info("{} interrupted while waiting for task completion {}", + LOG.info("{} interrupted while waiting for task completion.", threadName, e); // Re-interrupt the thread while catching InterruptedException Thread.currentThread().interrupt(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java index cb356dadeb23..12c3c7d220dc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java @@ -180,7 +180,7 @@ public static boolean checkVolume(HddsVolume hddsVolume, String scmId, String try { hddsVolume.format(clusterId); } catch (IOException ex) { - logger.error("Error during formatting volume {}, exception is {}", + logger.error("Error during formatting volume {}.", volumeRoot, ex); return false; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java index 8432e29ddbb8..0babe8154f07 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java @@ -140,7 +140,7 @@ public void onMessage(final ContainerReportFromDatanode reportFromDatanode, containerManager.notifyContainerReportProcessing(true, true); } catch (NodeNotFoundException ex) { containerManager.notifyContainerReportProcessing(true, false); - LOG.error("Received container report from unknown datanode {} {}", + LOG.error("Received container report from unknown datanode {}.", datanodeDetails, ex); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java index 257667465e18..4afd1cc6750a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java @@ -463,7 +463,7 @@ public ContainerInfo getMatchingContainer(final long sizeRequired, return containerInfo; } } catch (Exception e) { - LOG.warn("Container allocation failed for pipeline={} requiredSize={} {}", + LOG.warn("Container allocation failed for pipeline={} requiredSize={}.", pipeline, sizeRequired, e); return null; } @@ -519,7 +519,7 @@ private NavigableSet getContainersForOwner( containerIDIterator.remove(); } } catch (ContainerNotFoundException e) { - LOG.error("Could not find container info for container id={} {}", cid, + LOG.error("Could not find container info for container id={}.", cid, e); containerIDIterator.remove(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java index 591acbc3b154..f240293b8ae6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java @@ -125,7 +125,7 @@ private void createPipelines() { try { pipelineManager.scrubPipeline(type, factor); } catch (IOException e) { - LOG.error("Error while scrubbing pipelines {}", e); + LOG.error("Error while scrubbing pipelines.", e); } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java index f45b3a9120b5..9b563efce1ad 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java @@ -83,7 +83,7 @@ public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode, try { processPipelineReport(report, dn, publisher); } catch (IOException e) { - LOGGER.error("Could not process pipeline report={} from dn={} {}", + LOGGER.error("Could not process pipeline report={} from dn={}.", report, dn, e); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 4349d7c185ae..68466f4544dd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -562,7 +562,7 @@ public List getAcl(OzoneObj obj) throws IOException { return bucketInfo.getAcls(); } catch (IOException ex) { if (!(ex instanceof OMException)) { - LOG.error("Get acl operation failed for bucket:{}/{} acl:{}", + LOG.error("Get acl operation failed for bucket:{}/{}.", volume, bucket, ex); } throw ex; @@ -607,7 +607,7 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) if(ex instanceof OMException) { throw (OMException) ex; } - LOG.error("CheckAccess operation failed for bucket:{}/{} acl:{}", + LOG.error("CheckAccess operation failed for bucket:{}/{}.", volume, bucket, ex); throw new OMException("Check access operation failed for " + "bucket:" + bucket, ex, INTERNAL_ERROR); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index aff8a14e2710..da7e98515a99 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -263,7 +263,8 @@ public void start(OzoneConfiguration configuration) throws IOException { // the OM process should be terminated. File markerFile = new File(metaDir, DB_TRANSIENT_MARKER); if (markerFile.exists()) { - LOG.error("File {} marks that OM DB is in an inconsistent state."); + LOG.error("File {} marks that OM DB is in an inconsistent state.", + markerFile); // Note - The marker file should be deleted only after fixing the DB. // In an HA setup, this can be done by replacing this DB with a // checkpoint from another OM. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 3ae843388296..dd526c691b81 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -3124,7 +3124,7 @@ public TermIndex installSnapshotFromLeader(String leaderId) { try { termIndex = installCheckpoint(leaderId, omDBCheckpoint); } catch (Exception ex) { - LOG.error("Failed to install snapshot from Leader OM: {}", ex); + LOG.error("Failed to install snapshot from Leader OM.", ex); } return termIndex; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index 3226f7817797..e2a7702db7cb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -329,7 +329,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, bucketName, keyName); break; case FAILURE: - LOG.error("File create failed. Volume:{}, Bucket:{}, Key{}. Exception:{}", + LOG.error("File create failed. Volume:{}, Bucket:{}, Key{}.", volumeName, bucketName, keyName, exception); break; default: diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 8ee3f17618d7..d9932ef77528 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -229,7 +229,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, bucketName, keyName); break; case FAILURE: - LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}. Exception:{}", + LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, bucketName, keyName, exception); omMetrics.incNumKeyCommitFails(); break; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java index 52b1bb15d36f..374a92ffd69c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java @@ -600,7 +600,7 @@ public void run() { } } catch (InterruptedException ie) { - LOG.error("ExpiredTokenRemover received {}", ie); + LOG.info("ExpiredTokenRemover was interrupted.", ie); Thread.currentThread().interrupt(); } catch (Exception t) { LOG.error("ExpiredTokenRemover thread received unexpected exception", diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaManager.java index 883f90a20617..253e37d75abe 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconSchemaManager.java @@ -49,7 +49,7 @@ public void createReconSchema() { try { reconSchemaDefinition.initializeSchema(); } catch (SQLException e) { - LOG.error("Error creating Recon schema {} : {}", + LOG.error("Error creating Recon schema {}.", reconSchemaDefinition.getClass().getSimpleName(), e); } }); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java index 9e8887213f7c..f1cc78e0ce22 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java @@ -86,7 +86,7 @@ public void onMessage(final IncrementalContainerReportFromDatanode report, LOG.warn("Container {} not found!", replicaProto.getContainerID()); } catch (NodeNotFoundException ex) { success = false; - LOG.error("Received ICR from unknown datanode {} {}", + LOG.error("Received ICR from unknown datanode {}.", report.getDatanodeDetails(), ex); } catch (IOException e) { success = false;