@@ -341,10 +341,12 @@ void fcsm::ctgtq_cleanup()
341
341
void fcsm::ensure_commit (uint64_t write_off,
342
342
uint64_t write_len,
343
343
struct rpc_task *task,
344
- std::atomic<bool > *conditional_variable)
344
+ std::atomic<bool > *conditional_variable,
345
+ bool commit_full)
345
346
{
346
347
assert (inode->is_flushing );
347
348
assert (!inode->is_stable_write ());
349
+ assert (!commit_full || task == nullptr );
348
350
349
351
/*
350
352
* If any of the flush/commit targets are waiting completion, state machine
@@ -383,13 +385,23 @@ void fcsm::ensure_commit(uint64_t write_off,
383
385
uint64_t commit_bytes =
384
386
inode->get_filecache ()->get_bytes_to_commit ();
385
387
388
+ /*
389
+ * If commit_full flag is true, wait_for_ongoing_flush()
390
+ * committed the commit_pending bytes. Now we need to flush
391
+ * dirty bytes and commit them.
392
+ */
393
+ if (commit_full) {
394
+ assert (commit_bytes == 0 );
395
+ commit_bytes = inode->get_filecache ()->get_bytes_to_flush ();
396
+ }
397
+
386
398
if (commit_bytes == 0 ) {
387
399
/*
388
400
* TODO: Make sure this doesn't result in small-blocks being written.
389
401
*/
390
- const int64_t bytes =
391
- inode->get_filecache ()->get_bytes_to_flush () -
392
- inode-> get_filecache ()-> max_dirty_extent_bytes ();
402
+ const int64_t bytes = (inode-> get_filecache ()-> get_bytes_to_flush () -
403
+ inode->get_filecache ()->max_dirty_extent_bytes ());
404
+
393
405
commit_bytes = std::max (bytes, (int64_t ) 0 );
394
406
}
395
407
@@ -477,11 +489,21 @@ void fcsm::ensure_commit(uint64_t write_off,
477
489
ctgtq.emplace (this ,
478
490
0 /* target flush_seq */ ,
479
491
target_committed_seq_num /* target commit_seq */ ,
480
- task);
492
+ task,
493
+ conditional_variable);
481
494
} else {
482
495
if (task) {
483
496
task->reply_write (task->rpc_api ->write_task .get_size ());
484
497
}
498
+
499
+ /*
500
+ * Flush_cache_and_wait() waiting for dirty_bytes to flushed,
501
+ * can't complete until all dirty bytes flushed. so add the
502
+ * flush target.
503
+ */
504
+ if (commit_full) {
505
+ ensure_flush (0 , 0 , nullptr , conditional_variable);
506
+ }
485
507
}
486
508
487
509
return ;
@@ -514,6 +536,12 @@ void fcsm::ensure_commit(uint64_t write_off,
514
536
515
537
assert (committing_seq_num == (prev_committing_seq_num + bytes));
516
538
assert (committing_seq_num > committed_seq_num);
539
+
540
+ /*
541
+ * Enqueue this target, on_commit_callback() completes
542
+ * this target. Otherwise task/conditional_variable
543
+ * waiting for this to complete never called.
544
+ */
517
545
ctgtq.emplace (this ,
518
546
0 /* target flush_seq */ ,
519
547
target_committed_seq_num /* target commit_seq */ ,
@@ -675,13 +703,21 @@ void fcsm::ensure_flush(uint64_t write_off,
675
703
const uint64_t flushing_seq_num_before = flushing_seq_num;
676
704
assert (flushed_seq_num <= flushing_seq_num);
677
705
678
- // sync_membufs() will update flushing_seq_num() and mark fcsm running.
706
+ /*
707
+ * sync_membufs() will update flushing_seq_num() and mark fcsm running.
708
+ * Task is not passed to sync_membufs, but enqueued to ftgtq.
709
+ */
679
710
inode->sync_membufs (bc_vec, false /* is_flush */ , nullptr );
680
711
681
712
assert (is_running ());
682
713
assert (flushing_seq_num == (flushing_seq_num_before + bytes));
683
714
assert (flushed_seq_num <= flushing_seq_num);
684
715
716
+ /*
717
+ * Enqueue this target, on_flush_complete() completes
718
+ * this target. Otherwise task/conditional_variable
719
+ * waiting for this to complete never called.
720
+ */
685
721
ftgtq.emplace (this ,
686
722
target_flushed_seq_num /* target flush_seq */ ,
687
723
0 /* commit_seq */ ,
0 commit comments