Skip to content

Commit 7dfd8c1

Browse files
authored
HDDS-12060. Replace System.currentTimeMillis() with Time.monotonicNow() for duration calculation (apache#8096)
1 parent a21e362 commit 7dfd8c1

File tree

30 files changed

+96
-66
lines changed

30 files changed

+96
-66
lines changed

hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@
6060
import org.apache.hadoop.ozone.ClientVersion;
6161
import org.apache.hadoop.ozone.OzoneConfigKeys;
6262
import org.apache.hadoop.ozone.OzoneConsts;
63+
import org.apache.hadoop.util.Time;
6364
import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
6465
import org.apache.ratis.thirdparty.io.grpc.Status;
6566
import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts;
@@ -571,7 +572,7 @@ public XceiverClientReply sendCommandAsync(
571572
final CompletableFuture<ContainerCommandResponseProto> replyFuture =
572573
new CompletableFuture<>();
573574
semaphore.acquire();
574-
long requestTime = System.currentTimeMillis();
575+
long requestTime = Time.monotonicNow();
575576
metrics.incrPendingContainerOpsMetrics(request.getCmdType());
576577

577578
// create a new grpc message stream pair for each call.
@@ -601,7 +602,7 @@ public void onCompleted() {
601602

602603
private void decreasePendingMetricsAndReleaseSemaphore() {
603604
metrics.decrPendingContainerOpsMetrics(request.getCmdType());
604-
long cost = System.currentTimeMillis() - requestTime;
605+
long cost = Time.monotonicNow() - requestTime;
605606
metrics.addContainerOpsLatency(request.getCmdType(), cost);
606607
if (LOG.isDebugEnabled()) {
607608
LOG.debug("Executed command {} on datanode {}, cost = {}, cmdType = {}",

hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@
4747
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
4848
import org.apache.hadoop.hdds.security.SecurityConfig;
4949
import org.apache.hadoop.hdds.tracing.TracingUtil;
50+
import org.apache.hadoop.util.Time;
5051
import org.apache.ratis.client.RaftClient;
5152
import org.apache.ratis.client.api.DataStreamApi;
5253
import org.apache.ratis.grpc.GrpcTlsConfig;
@@ -363,7 +364,7 @@ private XceiverClientReply handleFailedAllCommit(long index, Collection<CommitIn
363364
public XceiverClientReply sendCommandAsync(
364365
ContainerCommandRequestProto request) {
365366
XceiverClientReply asyncReply = new XceiverClientReply(null);
366-
long requestTime = System.currentTimeMillis();
367+
long requestTime = Time.monotonicNow();
367368
CompletableFuture<RaftClientReply> raftClientReply =
368369
sendRequestAsync(request);
369370
metrics.incrPendingContainerOpsMetrics(request.getCmdType());
@@ -376,7 +377,7 @@ public XceiverClientReply sendCommandAsync(
376377
}
377378
metrics.decrPendingContainerOpsMetrics(request.getCmdType());
378379
metrics.addContainerOpsLatency(request.getCmdType(),
379-
System.currentTimeMillis() - requestTime);
380+
Time.monotonicNow() - requestTime);
380381
}).thenApply(reply -> {
381382
try {
382383
if (!reply.isSuccess()) {

hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
import org.apache.hadoop.hdds.annotation.InterfaceStability;
4040
import org.apache.hadoop.hdds.conf.ConfigurationSource;
4141
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
42+
import org.apache.hadoop.util.Time;
4243
import org.slf4j.Logger;
4344
import org.slf4j.LoggerFactory;
4445

@@ -95,9 +96,9 @@ public void run() {
9596
LOG.info("Shutdown process invoked a second time: ignoring");
9697
return;
9798
}
98-
long started = System.currentTimeMillis();
99+
long started = Time.monotonicNow();
99100
int timeoutCount = MGR.executeShutdown();
100-
long ended = System.currentTimeMillis();
101+
long ended = Time.monotonicNow();
101102
LOG.debug(String.format(
102103
"Completed shutdown in %.3f seconds; Timeouts: %d",
103104
(ended - started) / 1000.0, timeoutCount));

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
3434
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
3535
import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
36+
import org.apache.hadoop.util.Time;
3637
import org.slf4j.Logger;
3738

3839
/**
@@ -89,7 +90,7 @@ public static void loadAllHddsVolumeDbStore(MutableVolumeSet hddsVolumeSet,
8990
List<CompletableFuture<Void>> futures = new ArrayList<>();
9091
List<HddsVolume> hddsVolumes = StorageVolumeUtil.getHddsVolumesList(
9192
hddsVolumeSet.getVolumesList());
92-
long start = System.currentTimeMillis();
93+
long start = Time.monotonicNow();
9394
for (HddsVolume volume : hddsVolumes) {
9495
futures.add(CompletableFuture.runAsync(
9596
() -> loadVolume(volume, readOnly, logger)));
@@ -99,7 +100,7 @@ public static void loadAllHddsVolumeDbStore(MutableVolumeSet hddsVolumeSet,
99100
}
100101
if (logger != null) {
101102
logger.info("Load {} volumes DbStore cost: {}ms", hddsVolumes.size(),
102-
System.currentTimeMillis() - start);
103+
Time.monotonicNow() - start);
103104
}
104105
}
105106

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@
9696
import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig;
9797
import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.SchemaV3;
9898
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
99+
import org.apache.hadoop.util.Time;
99100
import org.apache.hadoop.util.Timer;
100101
import org.apache.ratis.grpc.GrpcTlsConfig;
101102
import org.slf4j.Logger;
@@ -318,7 +319,7 @@ public void buildContainerSet() throws IOException {
318319
Iterator<StorageVolume> volumeSetIterator = volumeSet.getVolumesList()
319320
.iterator();
320321
ArrayList<Thread> volumeThreads = new ArrayList<>();
321-
long startTime = System.currentTimeMillis();
322+
long startTime = Time.monotonicNow();
322323

323324
// Load container inspectors that may be triggered at startup based on
324325
// system properties set. These can inspect and possibly repair
@@ -360,7 +361,7 @@ public void buildContainerSet() throws IOException {
360361
ContainerInspectorUtil.unload();
361362

362363
LOG.info("Build ContainerSet costs {}s",
363-
(System.currentTimeMillis() - startTime) / 1000);
364+
(Time.monotonicNow() - startTime) / 1000);
364365
}
365366

366367
/**

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@
6868
import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
6969
import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
7070
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
71+
import org.apache.hadoop.util.Time;
7172
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
7273
import org.junit.jupiter.api.BeforeEach;
7374
import org.junit.jupiter.api.Test;
@@ -442,7 +443,7 @@ public void testDeleteContainerTimeout() throws IOException {
442443
final ContainerSet containerSet = newContainerSet();
443444
final MutableVolumeSet volumeSet = mock(MutableVolumeSet.class);
444445
final Clock clock = mock(Clock.class);
445-
long startTime = System.currentTimeMillis();
446+
long startTime = Time.monotonicNow();
446447

447448
DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
448449
when(clock.millis())

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@
6868
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
6969
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
7070
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
71+
import org.apache.hadoop.util.Time;
7172
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
7273
import org.apache.ratis.util.FileUtils;
7374
import org.junit.jupiter.api.AfterEach;
@@ -450,15 +451,15 @@ public void testMultipleContainerReader(ContainerTestVersionInfo versionInfo)
450451
(HddsVolume) volumes.get(i), containerSet, conf, true);
451452
threads[i] = new Thread(containerReaders[i]);
452453
}
453-
long startTime = System.currentTimeMillis();
454+
long startTime = Time.monotonicNow();
454455
for (int i = 0; i < volumeNum; i++) {
455456
threads[i].start();
456457
}
457458
for (int i = 0; i < volumeNum; i++) {
458459
threads[i].join();
459460
}
460461
System.out.println("Open " + volumeNum + " Volume with " + containerCount +
461-
" costs " + (System.currentTimeMillis() - startTime) / 1000 + "s");
462+
" costs " + (Time.monotonicNow() - startTime) / 1000 + "s");
462463
assertEquals(containerCount,
463464
containerSet.getContainerMap().entrySet().size());
464465
assertEquals(volumeSet.getFailedVolumesList().size(), 0);

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
import java.util.UUID;
2929
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
3030
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
31+
import org.apache.hadoop.util.Time;
3132
import org.junit.jupiter.api.Test;
3233

3334
/**
@@ -104,7 +105,7 @@ public void test() throws InterruptedException {
104105

105106
ReplicationSupervisor rs = ReplicationSupervisor.newBuilder().build();
106107

107-
final long start = System.currentTimeMillis();
108+
final long start = Time.monotonicNow();
108109

109110
//schedule 100 container replication
110111
for (int i = 0; i < 100; i++) {
@@ -114,7 +115,7 @@ public void test() throws InterruptedException {
114115
rs.addTask(new ReplicationTask(fromSources(i, sources), replicator));
115116
}
116117
rs.shutdownAfterFinish();
117-
final long executionTime = System.currentTimeMillis() - start;
118+
final long executionTime = Time.monotonicNow() - start;
118119
System.out.println(executionTime);
119120
assertThat(executionTime)
120121
.withFailMessage("Execution was too slow : " + executionTime + " ms")

hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
import org.apache.hadoop.ozone.lease.LeaseExpiredException;
3434
import org.apache.hadoop.ozone.lease.LeaseManager;
3535
import org.apache.hadoop.ozone.lease.LeaseNotFoundException;
36+
import org.apache.hadoop.util.Time;
3637
import org.slf4j.Logger;
3738
import org.slf4j.LoggerFactory;
3839

@@ -116,7 +117,7 @@ private synchronized void handleStartEvent(TIMEOUT_PAYLOAD payload,
116117
EventPublisher publisher) {
117118
metrics.incrementTrackedEvents();
118119
long identifier = payload.getId();
119-
startTrackingTimes.put(identifier, System.currentTimeMillis());
120+
startTrackingTimes.put(identifier, Time.monotonicNow());
120121

121122
trackedEventsByID.put(identifier, payload);
122123
trackedEvents.add(payload);
@@ -139,7 +140,7 @@ protected synchronized void handleCompletion(COMPLETION_PAYLOAD
139140
if (trackedEvents.remove(payload)) {
140141
metrics.incrementCompletedEvents();
141142
long originalTime = startTrackingTimes.remove(id);
142-
metrics.updateFinishingTime(System.currentTimeMillis() - originalTime);
143+
metrics.updateFinishingTime(Time.monotonicNow() - originalTime);
143144
onFinished(publisher, payload);
144145
}
145146
}

hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/ProtocolMessageMetrics.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
import org.apache.hadoop.metrics2.MetricsTag;
2929
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
3030
import org.apache.hadoop.metrics2.lib.Interns;
31+
import org.apache.hadoop.util.Time;
3132
import org.apache.ratis.util.UncheckedAutoCloseable;
3233

3334
/**
@@ -68,12 +69,12 @@ public void increment(KEY key, long duration) {
6869
}
6970

7071
public UncheckedAutoCloseable measure(KEY key) {
71-
final long startTime = System.currentTimeMillis();
72+
final long startTime = Time.monotonicNow();
7273
concurrency.incrementAndGet();
7374
return () -> {
7475
concurrency.decrementAndGet();
7576
counters.get(key).incrementAndGet();
76-
elapsedTimes.get(key).addAndGet(System.currentTimeMillis() - startTime);
77+
elapsedTimes.get(key).addAndGet(Time.monotonicNow() - startTime);
7778
};
7879
}
7980

0 commit comments

Comments
 (0)