@@ -228,9 +228,9 @@ unsafe impl Sync for SealedBag {}
228
228
impl SealedBag {
229
229
/// Checks if it is safe to drop the bag w.r.t. the given global epoch.
230
230
fn is_expired ( & self , global_epoch : Epoch ) -> bool {
231
- // A pinned participant can witness at most two epoch advancement. Therefore, any bag that
232
- // is within two epoch of the current one cannot be destroyed yet.
233
- global_epoch. wrapping_sub ( self . epoch ) >= 3
231
+ // A pinned participant can witness at most one epoch advancement. Therefore, any bag that
232
+ // is within one epoch of the current one cannot be destroyed yet.
233
+ global_epoch. wrapping_sub ( self . epoch ) >= 2
234
234
}
235
235
}
236
236
@@ -261,8 +261,12 @@ impl Global {
261
261
}
262
262
263
263
/// Pushes the bag into the global queue and replaces the bag with a new empty bag.
264
- pub fn push_bag ( & self , bag : & mut Bag , epoch : Epoch , guard : & Guard ) {
264
+ pub fn push_bag ( & self , bag : & mut Bag , guard : & Guard ) {
265
265
let bag = mem:: replace ( bag, Bag :: new ( ) ) ;
266
+
267
+ atomic:: fence ( Ordering :: SeqCst ) ;
268
+
269
+ let epoch = self . epoch . load ( Ordering :: Relaxed ) ;
266
270
self . queue . push ( bag. seal ( epoch) , guard) ;
267
271
}
268
272
@@ -426,8 +430,7 @@ impl Local {
426
430
let bag = & mut * self . bag . get ( ) ;
427
431
428
432
while let Err ( d) = bag. try_push ( deferred) {
429
- let epoch = self . epoch . load ( Ordering :: Relaxed ) . unpinned ( ) ;
430
- self . global ( ) . push_bag ( bag, epoch, guard) ;
433
+ self . global ( ) . push_bag ( bag, guard) ;
431
434
deferred = d;
432
435
}
433
436
}
@@ -436,8 +439,7 @@ impl Local {
436
439
let bag = unsafe { & mut * self . bag . get ( ) } ;
437
440
438
441
if !bag. is_empty ( ) {
439
- let epoch = self . epoch . load ( Ordering :: Relaxed ) . unpinned ( ) ;
440
- self . global ( ) . push_bag ( bag, epoch, guard) ;
442
+ self . global ( ) . push_bag ( bag, guard) ;
441
443
}
442
444
443
445
self . global ( ) . collect ( guard) ;
@@ -452,47 +454,38 @@ impl Local {
452
454
self . guard_count . set ( guard_count. checked_add ( 1 ) . unwrap ( ) ) ;
453
455
454
456
if guard_count == 0 {
455
- // Now we must store the global epoch into `self.epoch` and execute a `SeqCst` fence.
456
- // The fence makes sure that any future loads from `Atomic`s will not happen before this
457
- // store.
458
- let mut current = Epoch :: starting ( ) ;
459
- let mut new = self . global ( ) . epoch . load ( Ordering :: Relaxed ) . pinned ( ) ;
460
-
461
- loop {
462
- if cfg ! ( any( target_arch = "x86" , target_arch = "x86_64" ) ) {
463
- // HACK(stjepang): On x86 architectures there are two different ways of
464
- // executing a `SeqCst` fence.
465
- //
466
- // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction.
467
- // 2. `_.compare_and_swap(_, _, SeqCst)`, which compiles into a `lock cmpxchg`
468
- // instruction.
469
- //
470
- // Both instructions have the effect of a full barrier, but benchmarks have
471
- // shown that the second one makes pinning faster in this particular case. It
472
- // is not clear that this is permitted by the C++ memory model (SC fences work
473
- // very differently from SC accesses), but experimental evidence suggests that
474
- // this works fine. Using inline assembly would be a viable (and correct)
475
- // alternative, but alas, that is not possible on stable Rust.
476
- let previous = self . epoch . compare_and_swap ( current, new, Ordering :: SeqCst ) ;
477
- debug_assert_eq ! ( current, previous, "participant was expected to be unpinned" ) ;
478
-
479
- // We add a compiler fence to make it less likely for LLVM to do something wrong
480
- // here. Formally, this is not enough to get rid of data races; practically, it
481
- // should go a long way.
482
- atomic:: compiler_fence ( Ordering :: SeqCst ) ;
483
- } else {
484
- self . epoch . store ( new, Ordering :: Relaxed ) ;
485
- atomic:: fence ( Ordering :: SeqCst ) ;
486
- }
487
-
488
- // Now we validate that the value we read from the global epoch is not stale.
489
- let validation = self . global ( ) . epoch . load ( Ordering :: Relaxed ) . pinned ( ) ;
490
- if new == validation {
491
- break ;
492
- }
493
-
494
- current = new;
495
- new = validation;
457
+ let global_epoch = self . global ( ) . epoch . load ( Ordering :: Relaxed ) ;
458
+ let new_epoch = global_epoch. pinned ( ) ;
459
+
460
+ // Now we must store `new_epoch` into `self.epoch` and execute a `SeqCst` fence.
461
+ // The fence makes sure that any future loads from `Atomic`s will not happen before
462
+ // this store.
463
+ if cfg ! ( any( target_arch = "x86" , target_arch = "x86_64" ) ) {
464
+ // HACK(stjepang): On x86 architectures there are two different ways of executing
465
+ // a `SeqCst` fence.
466
+ //
467
+ // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction.
468
+ // 2. `_.compare_and_swap(_, _, SeqCst)`, which compiles into a `lock cmpxchg`
469
+ // instruction.
470
+ //
471
+ // Both instructions have the effect of a full barrier, but benchmarks have shown
472
+ // that the second one makes pinning faster in this particular case. It is not
473
+ // clear that this is permitted by the C++ memory model (SC fences work very
474
+ // differently from SC accesses), but experimental evidence suggests that this
475
+ // works fine. Using inline assembly would be a viable (and correct) alternative,
476
+ // but alas, that is not possible on stable Rust.
477
+ let current = Epoch :: starting ( ) ;
478
+ let previous = self
479
+ . epoch
480
+ . compare_and_swap ( current, new_epoch, Ordering :: SeqCst ) ;
481
+ debug_assert_eq ! ( current, previous, "participant was expected to be unpinned" ) ;
482
+ // We add a compiler fence to make it less likely for LLVM to do something wrong
483
+ // here. Formally, this is not enough to get rid of data races; practically,
484
+ // it should go a long way.
485
+ atomic:: compiler_fence ( Ordering :: SeqCst ) ;
486
+ } else {
487
+ self . epoch . store ( new_epoch, Ordering :: Relaxed ) ;
488
+ atomic:: fence ( Ordering :: SeqCst ) ;
496
489
}
497
490
498
491
// Increment the pin counter.
@@ -580,9 +573,8 @@ impl Local {
580
573
unsafe {
581
574
// Pin and move the local bag into the global queue. It's important that `push_bag`
582
575
// doesn't defer destruction on any new garbage.
583
- let epoch = self . epoch . load ( Ordering :: Relaxed ) . unpinned ( ) ;
584
576
let guard = & self . pin ( ) ;
585
- self . global ( ) . push_bag ( & mut * self . bag . get ( ) , epoch , guard) ;
577
+ self . global ( ) . push_bag ( & mut * self . bag . get ( ) , guard) ;
586
578
}
587
579
// Revert the handle count back to zero.
588
580
self . handle_count . set ( 0 ) ;
0 commit comments