@@ -88,15 +88,15 @@ Status DBImpl::MultiBatchWriteImpl(const WriteOptions& write_options,
88
88
WriteContext write_context;
89
89
bool ignore_missing_faimly = write_options.ignore_missing_column_families ;
90
90
if (writer.state == WriteThread::STATE_GROUP_LEADER) {
91
+ PERF_TIMER_STOP (write_pre_and_post_process_time);
92
+ PERF_TIMER_GUARD (write_delay_time);
91
93
if (writer.callback && !writer.callback ->AllowWriteBatching ()) {
92
94
write_thread_.WaitForMemTableWriters ();
93
95
}
94
96
WriteThread::WriteGroup wal_write_group;
95
97
LogContext log_context;
96
- PERF_TIMER_STOP (write_pre_and_post_process_time);
97
98
writer.status =
98
99
PreprocessWrite (write_options, &log_context, &write_context);
99
- PERF_TIMER_START (write_pre_and_post_process_time);
100
100
101
101
// This can set non-OK status if callback fail.
102
102
last_batch_group_size_ =
@@ -132,7 +132,6 @@ Status DBImpl::MultiBatchWriteImpl(const WriteOptions& write_options,
132
132
RecordTick (stats_, BYTES_WRITTEN, total_byte_size);
133
133
RecordInHistogram (stats_, BYTES_PER_WRITE, total_byte_size);
134
134
135
- PERF_TIMER_STOP (write_pre_and_post_process_time);
136
135
if (!write_options.disableWAL ) {
137
136
PERF_TIMER_GUARD (write_wal_time);
138
137
stats->AddDBStats (InternalStats::kIntStatsWriteDoneBySelf , 1 );
@@ -163,14 +162,14 @@ Status DBImpl::MultiBatchWriteImpl(const WriteOptions& write_options,
163
162
bool is_leader_thread = false ;
164
163
WriteThread::WriteGroup memtable_write_group;
165
164
if (writer.state == WriteThread::STATE_MEMTABLE_WRITER_LEADER) {
166
- PERF_TIMER_GUARD (write_memtable_time);
167
165
assert (writer.ShouldWriteToMemtable ());
168
166
write_thread_.EnterAsMemTableWriter (&writer, &memtable_write_group);
169
167
assert (immutable_db_options_.allow_concurrent_memtable_write );
170
168
if (memtable_write_group.size > 1 ) {
171
169
is_leader_thread = true ;
172
170
write_thread_.LaunchParallelMemTableWriters (&memtable_write_group);
173
171
} else {
172
+ PERF_TIMER_GUARD (write_memtable_time);
174
173
auto version_set = versions_->GetColumnFamilySet ();
175
174
memtable_write_group.running .store (0 );
176
175
for (auto it = memtable_write_group.begin ();
@@ -194,6 +193,7 @@ Status DBImpl::MultiBatchWriteImpl(const WriteOptions& write_options,
194
193
}
195
194
if (writer.state == WriteThread::STATE_PARALLEL_MEMTABLE_WRITER) {
196
195
assert (writer.ShouldWriteToMemtable ());
196
+ PERF_TIMER_GUARD (write_memtable_time);
197
197
auto version_set = versions_->GetColumnFamilySet ();
198
198
WriteBatchInternal::AsyncInsertInto (
199
199
&writer, writer.sequence , version_set, &flush_scheduler_,
@@ -640,8 +640,8 @@ Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options,
640
640
LogContext log_context (!write_options.disableWAL && write_options.sync );
641
641
// PreprocessWrite does its own perf timing.
642
642
PERF_TIMER_STOP (write_pre_and_post_process_time);
643
+ PERF_TIMER_GUARD (write_delay_time);
643
644
w.status = PreprocessWrite (write_options, &log_context, &write_context);
644
- PERF_TIMER_START (write_pre_and_post_process_time);
645
645
646
646
// This can set non-OK status if callback fail.
647
647
last_batch_group_size_ =
@@ -678,8 +678,6 @@ Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options,
678
678
RecordTick (stats_, BYTES_WRITTEN, total_byte_size);
679
679
RecordInHistogram (stats_, BYTES_PER_WRITE, total_byte_size);
680
680
681
- PERF_TIMER_STOP (write_pre_and_post_process_time);
682
-
683
681
if (w.status .ok () && !write_options.disableWAL ) {
684
682
PERF_TIMER_GUARD (write_wal_time);
685
683
stats->AddDBStats (InternalStats::kIntStatsWriteDoneBySelf , 1 );
@@ -752,7 +750,7 @@ Status DBImpl::UnorderedWriteMemtable(const WriteOptions& write_options,
752
750
WriteCallback* callback, uint64_t log_ref,
753
751
SequenceNumber seq,
754
752
const size_t sub_batch_cnt) {
755
- PERF_TIMER_GUARD (write_pre_and_post_process_time );
753
+ PERF_TIMER_GUARD (write_memtable_time );
756
754
StopWatch write_sw (env_, immutable_db_options_.statistics .get (), DB_WRITE);
757
755
758
756
WriteThread::Writer w (write_options, my_batch, callback, log_ref,
@@ -824,6 +822,8 @@ Status DBImpl::WriteImplWALOnly(
824
822
// else we are the leader of the write batch group
825
823
assert (w.state == WriteThread::STATE_GROUP_LEADER);
826
824
825
+ PERF_TIMER_STOP (write_pre_and_post_process_time);
826
+ PERF_TIMER_GUARD (write_delay_time);
827
827
if (publish_last_seq == kDoPublishLastSeq ) {
828
828
// Currently we only use kDoPublishLastSeq in unordered_write
829
829
assert (immutable_db_options_.unordered_write );
@@ -884,8 +884,6 @@ Status DBImpl::WriteImplWALOnly(
884
884
}
885
885
RecordInHistogram (stats_, BYTES_PER_WRITE, total_byte_size);
886
886
887
- PERF_TIMER_STOP (write_pre_and_post_process_time);
888
-
889
887
PERF_TIMER_GUARD (write_wal_time);
890
888
// LastAllocatedSequence is increased inside WriteToWAL under
891
889
// wal_write_mutex_ to ensure ordered events in WAL
@@ -934,7 +932,6 @@ Status DBImpl::WriteImplWALOnly(
934
932
status = SyncWAL ();
935
933
}
936
934
}
937
- PERF_TIMER_START (write_pre_and_post_process_time);
938
935
939
936
if (!w.CallbackFailed ()) {
940
937
WriteStatusCheck (status);
@@ -1036,19 +1033,15 @@ Status DBImpl::PreprocessWrite(const WriteOptions& write_options,
1036
1033
}
1037
1034
1038
1035
PERF_TIMER_STOP (write_scheduling_flushes_compactions_time);
1039
- PERF_TIMER_GUARD (write_pre_and_post_process_time);
1040
1036
1041
1037
if (UNLIKELY (status.ok () && (write_controller_.IsStopped () ||
1042
1038
write_controller_.NeedsDelay ()))) {
1043
- PERF_TIMER_STOP (write_pre_and_post_process_time);
1044
- PERF_TIMER_GUARD (write_delay_time);
1045
1039
// We don't know size of curent batch so that we always use the size
1046
1040
// for previous one. It might create a fairness issue that expiration
1047
1041
// might happen for smaller writes but larger writes can go through.
1048
1042
// Can optimize it if it is an issue.
1049
1043
InstrumentedMutexLock l (&mutex_);
1050
1044
status = DelayWrite (last_batch_group_size_, write_options);
1051
- PERF_TIMER_START (write_pre_and_post_process_time);
1052
1045
}
1053
1046
1054
1047
InstrumentedMutexLock l (&log_write_mutex_);
@@ -1634,7 +1627,6 @@ Status DBImpl::ThrottleLowPriWritesIfNeeded(const WriteOptions& write_options,
1634
1627
// is that in case the write is heavy, low pri writes may never have
1635
1628
// a chance to run. Now we guarantee we are still slowly making
1636
1629
// progress.
1637
- PERF_TIMER_GUARD (write_delay_time);
1638
1630
write_controller_.low_pri_rate_limiter ()->Request (
1639
1631
my_batch->GetDataSize (), Env::IO_HIGH, nullptr /* stats */ ,
1640
1632
RateLimiter::OpType::kWrite );
0 commit comments