diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index ed9ec6271ec..3b774c6dfdf 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -60,6 +60,7 @@ import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.util.Time; import org.apache.ratis.thirdparty.io.grpc.ManagedChannel; import org.apache.ratis.thirdparty.io.grpc.Status; import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts; @@ -571,7 +572,7 @@ public XceiverClientReply sendCommandAsync( final CompletableFuture replyFuture = new CompletableFuture<>(); semaphore.acquire(); - long requestTime = System.currentTimeMillis(); + long requestTime = Time.monotonicNow(); metrics.incrPendingContainerOpsMetrics(request.getCmdType()); // create a new grpc message stream pair for each call. @@ -601,7 +602,7 @@ public void onCompleted() { private void decreasePendingMetricsAndReleaseSemaphore() { metrics.decrPendingContainerOpsMetrics(request.getCmdType()); - long cost = System.currentTimeMillis() - requestTime; + long cost = Time.monotonicNow() - requestTime; metrics.addContainerOpsLatency(request.getCmdType(), cost); if (LOG.isDebugEnabled()) { LOG.debug("Executed command {} on datanode {}, cost = {}, cmdType = {}", diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index fa2bdee0cfe..f185c3e8557 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.util.Time; import org.apache.ratis.client.RaftClient; import org.apache.ratis.client.api.DataStreamApi; import org.apache.ratis.grpc.GrpcTlsConfig; @@ -363,7 +364,7 @@ private XceiverClientReply handleFailedAllCommit(long index, Collection raftClientReply = sendRequestAsync(request); metrics.incrPendingContainerOpsMetrics(request.getCmdType()); @@ -376,7 +377,7 @@ public XceiverClientReply sendCommandAsync( } metrics.decrPendingContainerOpsMetrics(request.getCmdType()); metrics.addContainerOpsLatency(request.getCmdType(), - System.currentTimeMillis() - requestTime); + Time.monotonicNow() - requestTime); }).thenApply(reply -> { try { if (!reply.isSuccess()) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java index e533b23fd54..1ce378c62e1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -95,9 +96,9 @@ public void run() { LOG.info("Shutdown process invoked a second time: ignoring"); return; } - long started = System.currentTimeMillis(); + long started = Time.monotonicNow(); int timeoutCount = MGR.executeShutdown(); - long ended = System.currentTimeMillis(); + long ended = Time.monotonicNow(); LOG.debug(String.format( "Completed shutdown in %.3f seconds; Timeouts: %d", (ended - started) / 1000.0, timeoutCount)); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java index 6b4ca22ceb0..6ea9e87541e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.metadata.DatanodeStore; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; /** @@ -89,7 +90,7 @@ public static void loadAllHddsVolumeDbStore(MutableVolumeSet hddsVolumeSet, List> futures = new ArrayList<>(); List hddsVolumes = StorageVolumeUtil.getHddsVolumesList( hddsVolumeSet.getVolumesList()); - long start = System.currentTimeMillis(); + long start = Time.monotonicNow(); for (HddsVolume volume : hddsVolumes) { futures.add(CompletableFuture.runAsync( () -> loadVolume(volume, readOnly, logger))); @@ -99,7 +100,7 @@ public static void loadAllHddsVolumeDbStore(MutableVolumeSet hddsVolumeSet, } if (logger != null) { logger.info("Load {} volumes DbStore cost: {}ms", hddsVolumes.size(), - System.currentTimeMillis() - start); + Time.monotonicNow() - start); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 750bb6e2832..c181f283e4a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -95,6 +95,7 @@ import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.SchemaV3; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; +import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Timer; import org.apache.ratis.grpc.GrpcTlsConfig; import org.slf4j.Logger; @@ -315,7 +316,7 @@ public void buildContainerSet() throws IOException { Iterator volumeSetIterator = volumeSet.getVolumesList() .iterator(); ArrayList volumeThreads = new ArrayList<>(); - long startTime = System.currentTimeMillis(); + long startTime = Time.monotonicNow(); // Load container inspectors that may be triggered at startup based on // system properties set. These can inspect and possibly repair @@ -357,7 +358,7 @@ public void buildContainerSet() throws IOException { ContainerInspectorUtil.unload(); LOG.info("Build ContainerSet costs {}s", - (System.currentTimeMillis() - startTime) / 1000); + (Time.monotonicNow() - startTime) / 1000); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index be274562df7..d986758d123 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -70,6 +70,7 @@ import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; +import org.apache.hadoop.util.Time; import org.apache.ozone.test.GenericTestUtils.LogCapturer; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -464,7 +465,7 @@ public void testDeleteContainerTimeout() throws IOException { final ContainerSet containerSet = newContainerSet(); final MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); final Clock clock = mock(Clock.class); - long startTime = System.currentTimeMillis(); + long startTime = Time.monotonicNow(); DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); when(clock.millis()) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index e1ca0866e3a..4688c46214f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -68,6 +68,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; +import org.apache.hadoop.util.Time; import org.apache.ozone.test.GenericTestUtils.LogCapturer; import org.apache.ratis.util.FileUtils; import org.junit.jupiter.api.AfterEach; @@ -450,7 +451,7 @@ public void testMultipleContainerReader(ContainerTestVersionInfo versionInfo) (HddsVolume) volumes.get(i), containerSet, conf, true); threads[i] = new Thread(containerReaders[i]); } - long startTime = System.currentTimeMillis(); + long startTime = Time.monotonicNow(); for (int i = 0; i < volumeNum; i++) { threads[i].start(); } @@ -458,7 +459,7 @@ public void testMultipleContainerReader(ContainerTestVersionInfo versionInfo) threads[i].join(); } System.out.println("Open " + volumeNum + " Volume with " + containerCount + - " costs " + (System.currentTimeMillis() - startTime) / 1000 + "s"); + " costs " + (Time.monotonicNow() - startTime) / 1000 + "s"); assertEquals(containerCount, containerSet.getContainerMap().entrySet().size()); assertEquals(volumeSet.getFailedVolumesList().size(), 0); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java index f2e6ba87cdd..88620156a54 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java @@ -28,6 +28,7 @@ import java.util.UUID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; /** @@ -104,7 +105,7 @@ public void test() throws InterruptedException { ReplicationSupervisor rs = ReplicationSupervisor.newBuilder().build(); - final long start = System.currentTimeMillis(); + final long start = Time.monotonicNow(); //schedule 100 container replication for (int i = 0; i < 100; i++) { @@ -114,7 +115,7 @@ public void test() throws InterruptedException { rs.addTask(new ReplicationTask(fromSources(i, sources), replicator)); } rs.shutdownAfterFinish(); - final long executionTime = System.currentTimeMillis() - start; + final long executionTime = Time.monotonicNow() - start; System.out.println(executionTime); assertThat(executionTime) .withFailMessage("Execution was too slow : " + executionTime + " ms") diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java index 2b3e13e3540..6f48181dcce 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.lease.LeaseExpiredException; import org.apache.hadoop.ozone.lease.LeaseManager; import org.apache.hadoop.ozone.lease.LeaseNotFoundException; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -116,7 +117,7 @@ private synchronized void handleStartEvent(TIMEOUT_PAYLOAD payload, EventPublisher publisher) { metrics.incrementTrackedEvents(); long identifier = payload.getId(); - startTrackingTimes.put(identifier, System.currentTimeMillis()); + startTrackingTimes.put(identifier, Time.monotonicNow()); trackedEventsByID.put(identifier, payload); trackedEvents.add(payload); @@ -139,7 +140,7 @@ protected synchronized void handleCompletion(COMPLETION_PAYLOAD if (trackedEvents.remove(payload)) { metrics.incrementCompletedEvents(); long originalTime = startTrackingTimes.remove(id); - metrics.updateFinishingTime(System.currentTimeMillis() - originalTime); + metrics.updateFinishingTime(Time.monotonicNow() - originalTime); onFinished(publisher, payload); } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/ProtocolMessageMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/ProtocolMessageMetrics.java index f772ea8c921..09e154f9aab 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/ProtocolMessageMetrics.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/ProtocolMessageMetrics.java @@ -28,6 +28,7 @@ import org.apache.hadoop.metrics2.MetricsTag; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.Interns; +import org.apache.hadoop.util.Time; import org.apache.ratis.util.UncheckedAutoCloseable; /** @@ -68,12 +69,12 @@ public void increment(KEY key, long duration) { } public UncheckedAutoCloseable measure(KEY key) { - final long startTime = System.currentTimeMillis(); + final long startTime = Time.monotonicNow(); concurrency.incrementAndGet(); return () -> { concurrency.decrementAndGet(); counters.get(key).incrementAndGet(); - elapsedTimes.get(key).addAndGet(System.currentTimeMillis() - startTime); + elapsedTimes.get(key).addAndGet(Time.monotonicNow() - startTime); }; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java index 5c5247e011a..9ce8e03f69a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.db.RocksDatabase.ColumnFamily; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -297,7 +298,7 @@ private List> getRangeKVs(byte[] startKey, int count, boolean sequential, byte[] prefix, MetadataKeyFilters.MetadataKeyFilter... filters) throws IOException, IllegalArgumentException { - long start = System.currentTimeMillis(); + long start = Time.monotonicNow(); if (count < 0) { throw new IllegalArgumentException( @@ -342,7 +343,7 @@ && get(startKey) == null) { } } } finally { - long end = System.currentTimeMillis(); + long end = Time.monotonicNow(); long timeConsumed = end - start; if (LOG.isDebugEnabled()) { if (filters != null) { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java index 504bda74575..627210ca9de 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java @@ -50,6 +50,7 @@ import org.apache.hadoop.ozone.upgrade.UpgradeException.ResultCodes; import org.apache.hadoop.ozone.upgrade.UpgradeFinalization.Status; import org.apache.hadoop.ozone.upgrade.UpgradeFinalization.StatusAndMessages; +import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.exceptions.NotLeaderException; /** @@ -169,9 +170,9 @@ public void finalizeAndWaitForCompletion( } boolean success = false; - long endTime = System.currentTimeMillis() + + long endTime = Time.monotonicNow() + TimeUnit.SECONDS.toMillis(maxTimeToWaitInSeconds); - while (System.currentTimeMillis() < endTime) { + while (Time.monotonicNow() < endTime) { try { response = reportStatus(upgradeClientID, false); LOG.info("Finalization Messages : {} ", response.msgs()); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java index bae0fcd5363..c09c5af85d0 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java @@ -89,6 +89,7 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.util.Time; import org.apache.ozone.compaction.log.CompactionFileInfo; import org.apache.ozone.compaction.log.CompactionLogEntry; import org.apache.ozone.rocksdb.util.RdbUtil; @@ -758,7 +759,7 @@ void diffAllSnapshots(RocksDBCheckpointDiffer differ) private void createCheckpoint(ManagedRocksDB rocksDB) throws RocksDBException { LOG.trace("Current time: " + System.currentTimeMillis()); - long t1 = System.currentTimeMillis(); + long t1 = Time.monotonicNow(); final long snapshotGeneration = rocksDB.get().getLatestSequenceNumber(); final String cpPath = CP_PATH_PREFIX + snapshotGeneration; @@ -780,7 +781,7 @@ private void createCheckpoint(ManagedRocksDB rocksDB) throws RocksDBException { colHandle)); this.snapshots.add(currentSnapshot); - long t2 = System.currentTimeMillis(); + long t2 = Time.monotonicNow(); LOG.trace("Current time: " + t2); LOG.debug("Time elapsed: " + (t2 - t1) + " ms"); } diff --git a/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/LambdaTestUtils.java b/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/LambdaTestUtils.java index da5a08f615d..b3529ff10b5 100644 --- a/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/LambdaTestUtils.java +++ b/hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/LambdaTestUtils.java @@ -20,6 +20,7 @@ import com.google.common.base.Preconditions; import java.util.concurrent.Callable; import java.util.concurrent.TimeoutException; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -105,7 +106,7 @@ public static int await(int timeoutMillis, "timeoutMillis must be >= 0"); Preconditions.checkNotNull(timeoutHandler); - final long endTime = System.currentTimeMillis() + timeoutMillis; + final long endTime = Time.monotonicNow() + timeoutMillis; Throwable ex = null; boolean running = true; int iterations = 0; @@ -126,7 +127,7 @@ public static int await(int timeoutMillis, LOG.debug("await() iteration {}", iterations, e); ex = e; } - running = System.currentTimeMillis() < endTime; + running = Time.monotonicNow() < endTime; if (running) { int sleeptime = retry.call(); if (sleeptime >= 0) { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java index bb225aba589..9690bbf8d45 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java @@ -21,6 +21,7 @@ import java.util.concurrent.TimeoutException; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.util.Time; import picocli.CommandLine.Command; import picocli.CommandLine.Mixin; import picocli.CommandLine.Option; @@ -48,7 +49,7 @@ public class SafeModeWaitSubcommand implements Callable { @Override public Void call() throws Exception { - startTestTime = System.currentTimeMillis(); + startTestTime = Time.monotonicNow(); while (getRemainingTimeInSec() > 0) { try (ScmClient scmClient = scmOption.createScmClient()) { @@ -85,6 +86,6 @@ public Void call() throws Exception { } private long getRemainingTimeInSec() { - return timeoutSeconds - (System.currentTimeMillis() - startTestTime) / 1000; + return timeoutSeconds - (Time.monotonicNow() - startTestTime) / 1000; } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java index ae9ec43661a..6b2f7818f8c 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,7 +49,7 @@ public List run(OzoneConfiguration configuration, List volumes) throws IOException { List results = new ArrayList<>(); Map> volumeFutures = new HashMap<>(); - long startTime = System.currentTimeMillis(); + long startTime = Time.monotonicNow(); LOG.info("Start to upgrade {} volume(s)", volumes.size()); for (StorageVolume volume : volumes) { @@ -76,7 +77,7 @@ public List run(OzoneConfiguration configuration, } LOG.info("It took {}ms to finish all volume upgrade.", - (System.currentTimeMillis() - startTime)); + (Time.monotonicNow() - startTime)); return results; } @@ -91,7 +92,7 @@ public DatanodeStoreSchemaThreeImpl getDBStore(HddsVolume volume) { public static class Result { private Map resultMap; private final HddsVolume hddsVolume; - private final long startTimeMs = System.currentTimeMillis(); + private final long startTimeMs = Time.monotonicNow(); private long endTimeMs = 0L; private Exception e = null; private Status status = Status.FAIL; @@ -124,12 +125,12 @@ public boolean isSuccess() { } public void success() { - this.endTimeMs = System.currentTimeMillis(); + this.endTimeMs = Time.monotonicNow(); this.status = Status.SUCCESS; } public void fail(Exception exception) { - this.endTimeMs = System.currentTimeMillis(); + this.endTimeMs = Time.monotonicNow(); this.status = Status.FAIL; this.e = exception; } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java index aff031a090e..262abc57fd2 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java @@ -55,6 +55,7 @@ import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -383,7 +384,7 @@ public static class UpgradeContainerResult { private final ContainerData originContainerData; private ContainerData newContainerData; private long totalRow = 0L; - private final long startTimeMs = System.currentTimeMillis(); + private final long startTimeMs = Time.monotonicNow(); private long endTimeMs = 0L; private Status status; @@ -431,7 +432,7 @@ public void setNewContainerFilePath(String newContainerFilePath) { public void success(long rowCount) { this.totalRow = rowCount; - this.endTimeMs = System.currentTimeMillis(); + this.endTimeMs = Time.monotonicNow(); this.status = Status.SUCCESS; } diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java index a8edd3908d6..c4dfc28df47 100644 --- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java +++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; import org.apache.hadoop.util.VersionInfo; import org.apache.ozone.lib.server.BaseService; import org.apache.ozone.lib.server.ServiceException; @@ -112,7 +113,7 @@ synchronized void release() throws IOException { fs = null; lastUse = -1; } else { - lastUse = System.currentTimeMillis(); + lastUse = Time.monotonicNow(); } } } @@ -125,7 +126,7 @@ synchronized void release() throws IOException { synchronized boolean purgeIfIdle() throws IOException { boolean ret = false; if (count == 0 && lastUse != -1 && - (System.currentTimeMillis() - lastUse) > timeout) { + (Time.monotonicNow() - lastUse) > timeout) { fs.close(); fs = null; lastUse = -1; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java index f0e48126154..a4d7c02b4a4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java @@ -53,6 +53,7 @@ import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.Time; import org.apache.hadoop.util.functional.FutureIO; import org.apache.hadoop.util.functional.RemoteIterators; import org.assertj.core.api.Assertions; @@ -1498,13 +1499,13 @@ public static boolean containsDuplicates(Collection paths) { */ public static FileStatus getFileStatusEventually(FileSystem fs, Path path, int timeout) throws IOException, InterruptedException { - long endTime = System.currentTimeMillis() + timeout; + long endTime = Time.monotonicNow() + timeout; FileStatus stat = null; do { try { stat = fs.getFileStatus(path); } catch (FileNotFoundException e) { - if (System.currentTimeMillis() > endTime) { + if (Time.monotonicNow() > endTime) { // timeout, raise an assert with more diagnostics assertPathExists(fs, "Path not found after " + timeout + " mS", path); } else { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index fe56a23a665..1acb1bd90c7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -93,6 +93,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.util.Time; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.GenericTestUtils.LogCapturer; import org.apache.ozone.test.tag.Flaky; @@ -821,7 +822,7 @@ public void testBlockDeleteCommandParallelProcess() throws Exception { // Wait for block delete command sent from OM OzoneTestUtils.flushAndWaitForDeletedBlockLog(scm); - long start = System.currentTimeMillis(); + long start = Time.monotonicNow(); // Wait for all blocks been deleted. GenericTestUtils.waitFor(() -> { try { @@ -833,7 +834,7 @@ public void testBlockDeleteCommandParallelProcess() throws Exception { } return false; }, 100, 30000); - long end = System.currentTimeMillis(); + long end = Time.monotonicNow(); System.out.println("Block deletion costs " + (end - start) + "ms"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java index a3ad7c6a48e..a837cb78510 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.freon.TestOmBucketReadWriteFileOps.ParameterBuilder; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.lock.OMLockMetrics; +import org.apache.hadoop.util.Time; import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -115,7 +116,7 @@ void testOmBucketReadWriteKeyOps(ParameterBuilder parameterBuilder) throws Excep parameterBuilder.getBucketArgs().setBucketLayout(BucketLayout.OBJECT_STORE).build() ); - long startTime = System.currentTimeMillis(); + long startTime = Time.monotonicNow(); String om = cluster().getConf().get(OZONE_OM_ADDRESS_KEY); new Freon().getCmd().execute( "-D", OZONE_OM_ADDRESS_KEY + "=" + om, @@ -132,7 +133,7 @@ void testOmBucketReadWriteKeyOps(ParameterBuilder parameterBuilder) throws Excep "-R", String.valueOf(parameterBuilder.getNumOfReadOperations()), "-W", String.valueOf(parameterBuilder.getNumOfWriteOperations()), "-n", String.valueOf(1)); - long totalTime = System.currentTimeMillis() - startTime; + long totalTime = Time.monotonicNow() - startTime; LOG.info("Total Execution Time: " + totalTime); LOG.info("Started verifying OM bucket read/write ops key generation..."); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java index 8c6cdd4a748..35ae4a60fec 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.ozone.om.helpers.OMNodeDetails; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -188,17 +189,17 @@ public static void downloadFileWithProgress(InputStream inputStream, File target byte[] buffer = new byte[8 * 1024]; long totalBytesRead = 0; int bytesRead; - long lastLoggedTime = System.currentTimeMillis(); + long lastLoggedTime = Time.monotonicNow(); while ((bytesRead = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, bytesRead); totalBytesRead += bytesRead; // Log progress every 30 seconds - if (System.currentTimeMillis() - lastLoggedTime >= 30000) { + if (Time.monotonicNow() - lastLoggedTime >= 30000) { LOG.info("Downloading '{}': {} KB downloaded so far...", targetFile.getName(), totalBytesRead / (1024)); - lastLoggedTime = System.currentTimeMillis(); + lastLoggedTime = Time.monotonicNow(); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java index 05bb957f22c..ef5ee2a4ea4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareResponse; +import org.apache.hadoop.util.Time; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.raftlog.RaftLog; import org.apache.ratis.statemachine.StateMachine; @@ -173,7 +174,7 @@ private static long waitForLogIndex(long minOMDBFlushIndex, Duration flushTimeout, Duration flushCheckInterval) throws InterruptedException, IOException { - long endTime = System.currentTimeMillis() + flushTimeout.toMillis(); + long endTime = Time.monotonicNow() + flushTimeout.toMillis(); boolean omDBFlushed = false; boolean ratisStateMachineApplied = false; @@ -193,7 +194,7 @@ private static long waitForLogIndex(long minOMDBFlushIndex, " to Ratis state machine.", om.getOMNodeId(), minOMDBFlushIndex, minRatisStateMachineIndex); while (!(omDBFlushed && ratisStateMachineApplied) && - System.currentTimeMillis() < endTime) { + Time.monotonicNow() < endTime) { // Check OM DB. lastOMDBFlushIndex = om.getRatisSnapshotIndex(); omDBFlushed = (lastOMDBFlushIndex >= minOMDBFlushIndex); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java index 35ffca10095..b35a95a2e89 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java @@ -57,6 +57,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -346,7 +347,7 @@ private void recalculateUsages( prefixUsageMap, q, isRunning, haveValue))); } int count = 0; - long startTime = System.currentTimeMillis(); + long startTime = Time.monotonicNow(); try (TableIterator> keyIter = table.iterator()) { while (keyIter.hasNext()) { @@ -363,7 +364,7 @@ private void recalculateUsages( f.get(); } LOG.info("Recalculate {} completed, count {} time {}ms", strType, - count, (System.currentTimeMillis() - startTime)); + count, (Time.monotonicNow() - startTime)); } catch (IOException ex) { throw new UncheckedIOException(ex); } catch (InterruptedException ex) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index a590b834d03..ba2ebe0a779 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -83,6 +83,7 @@ import org.apache.hadoop.ozone.recon.scm.ReconContainerReportQueue; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.util.Time; import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao; import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats; import org.jooq.Configuration; @@ -393,12 +394,12 @@ private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSu }); executor.submit(() -> { - long startTime = System.currentTimeMillis(); + long startTime = Time.monotonicNow(); log.info("Rebuilding NSSummary tree..."); try { reconNamespaceSummaryManager.rebuildNSSummaryTree(omMetadataManager); } finally { - long endTime = System.currentTimeMillis(); + long endTime = Time.monotonicNow(); log.info("NSSummary tree rebuild completed in {} ms.", endTime - startTime); } }); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperHelper.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperHelper.java index 7e5a02ff99a..e786aa282a8 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperHelper.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperHelper.java @@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; import org.apache.hadoop.ozone.recon.api.types.KeyPrefixContainer; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -161,7 +162,7 @@ public static boolean process(OMUpdateEventBatch events, Map containerKeyCountMap = new HashMap<>(); // List of the deleted (container, key) pair's List deletedKeyCountList = new ArrayList<>(); - long startTime = System.currentTimeMillis(); + long startTime = Time.monotonicNow(); while (eventIterator.hasNext()) { OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); @@ -211,7 +212,7 @@ public static boolean process(OMUpdateEventBatch events, return false; } LOG.debug("{} successfully processed {} OM DB update event(s) in {} milliseconds.", - taskName, eventCount, (System.currentTimeMillis() - startTime)); + taskName, eventCount, (Time.monotonicNow() - startTime)); return true; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTaskHelper.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTaskHelper.java index f6d26373f00..b8f5302c625 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTaskHelper.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTaskHelper.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.ReconUtils; +import org.apache.hadoop.util.Time; import org.apache.ozone.recon.schema.generated.tables.daos.FileCountBySizeDao; import org.apache.ozone.recon.schema.generated.tables.pojos.FileCountBySize; import org.jooq.DSLContext; @@ -90,7 +91,7 @@ public static ReconOmTask.TaskResult reprocess(OMMetadataManager omMetadataManag String taskName) { LOG.info("Starting Reprocess for {}", taskName); Map fileSizeCountMap = new HashMap<>(); - long startTime = System.currentTimeMillis(); + long startTime = Time.monotonicNow(); truncateTableIfNeeded(dslContext); boolean status = reprocessBucketLayout( bucketLayout, omMetadataManager, fileSizeCountMap, dslContext, fileCountBySizeDao, taskName); @@ -98,7 +99,7 @@ public static ReconOmTask.TaskResult reprocess(OMMetadataManager omMetadataManag return buildTaskResult(taskName, false); } writeCountsToDB(fileSizeCountMap, dslContext, fileCountBySizeDao); - long endTime = System.currentTimeMillis(); + long endTime = Time.monotonicNow(); LOG.info("{} completed Reprocess in {} ms.", taskName, (endTime - startTime)); return buildTaskResult(taskName, true); } @@ -160,7 +161,7 @@ public static ReconOmTask.TaskResult processEvents(OMUpdateEventBatch events, String taskName) { Iterator eventIterator = events.getIterator(); Map fileSizeCountMap = new HashMap<>(); - long startTime = System.currentTimeMillis(); + long startTime = Time.monotonicNow(); while (eventIterator.hasNext()) { OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); if (!tableName.equals(omdbUpdateEvent.getTable())) { @@ -202,7 +203,7 @@ public static ReconOmTask.TaskResult processEvents(OMUpdateEventBatch events, } writeCountsToDB(fileSizeCountMap, dslContext, fileCountBySizeDao); LOG.debug("{} successfully processed in {} milliseconds", taskName, - (System.currentTimeMillis() - startTime)); + (Time.monotonicNow() - startTime)); return buildTaskResult(taskName, true); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java index 7fe9584fdf5..34fd1af6d69 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.util.Time; import org.apache.ozone.recon.schema.generated.tables.daos.GlobalStatsDao; import org.apache.ozone.recon.schema.generated.tables.pojos.GlobalStats; import org.jooq.Configuration; @@ -172,7 +173,7 @@ public TaskResult process(OMUpdateEventBatch events, String tableName; OMDBUpdateEvent omdbUpdateEvent; // Process each update event - long startTime = System.currentTimeMillis(); + long startTime = Time.monotonicNow(); while (eventIterator.hasNext()) { omdbUpdateEvent = eventIterator.next(); tableName = omdbUpdateEvent.getTable(); @@ -215,7 +216,7 @@ public TaskResult process(OMUpdateEventBatch events, writeDataToDB(replicatedSizeMap); } LOG.debug("{} successfully processed in {} milliseconds", - getTaskName(), (System.currentTimeMillis() - startTime)); + getTaskName(), (Time.monotonicNow() - startTime)); return buildTaskResult(true); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/PrepareSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/PrepareSubCommand.java index 4641025c189..fd6b2ec0a41 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/PrepareSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/PrepareSubCommand.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse.PrepareStatus; +import org.apache.hadoop.util.Time; import picocli.CommandLine; /** @@ -112,10 +113,10 @@ public Void call() throws Exception { System.out.println(); System.out.println("Checking individual OM instances for prepare request " + "completion..."); - long endTime = System.currentTimeMillis() + pTimeout.toMillis(); + long endTime = Time.monotonicNow() + pTimeout.toMillis(); int expectedNumPreparedOms = omPreparedStatusMap.size(); int currentNumPreparedOms = 0; - while (System.currentTimeMillis() < endTime && + while (Time.monotonicNow() < endTime && currentNumPreparedOms < expectedNumPreparedOms) { for (Map.Entry e : omPreparedStatusMap.entrySet()) { if (!e.getValue()) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java index eb9be2c292e..9face29009a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java @@ -63,6 +63,7 @@ import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; import org.apache.hadoop.ozone.util.ShutdownHookManager; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -180,7 +181,7 @@ private void taskLoop(TaskProvider provider) { while (!completed.get()) { long counter = attemptCounter.getAndIncrement(); if (timebase) { - if (System.currentTimeMillis() + if (Time.monotonicNow() > startTime + TimeUnit.SECONDS.toMillis(durationInSecond)) { completed.set(true); break; @@ -339,7 +340,7 @@ public void init() { freonCommand.isInteractive(), realTimeStatusSupplier()); progressBar.start(); - startTime = System.currentTimeMillis(); + startTime = Time.monotonicNow(); } public Supplier realTimeStatusSupplier() { @@ -371,7 +372,7 @@ public void printReport() { List messages = new LinkedList<>(); messages.add("Total execution time (sec): " + - Math.round((System.currentTimeMillis() - startTime) / 1000.0)); + Math.round((Time.monotonicNow() - startTime) / 1000.0)); messages.add("Failures: " + failureCounter.get()); messages.add("Successful executions: " + successCounter.get()); if (failureCounter.get() > 0) {