Skip to content

Commit 5c74eb1

Browse files
Merge #517
517: Revert "Require three epoch advancements for deallocation " r=jeehoonkang a=jeehoonkang Reverts #416 I'm suspecting may this PR causes #514. Here's a revert PR, and I'd like to test if SIGSEGV is gone after reverting the PR... Co-authored-by: Jeehoon Kang <[email protected]>
2 parents 3156037 + 044d666 commit 5c74eb1

File tree

2 files changed

+43
-52
lines changed

2 files changed

+43
-52
lines changed

crossbeam-epoch/src/internal.rs

+43-51
Original file line numberDiff line numberDiff line change
@@ -228,9 +228,9 @@ unsafe impl Sync for SealedBag {}
228228
impl SealedBag {
229229
/// Checks if it is safe to drop the bag w.r.t. the given global epoch.
230230
fn is_expired(&self, global_epoch: Epoch) -> bool {
231-
// A pinned participant can witness at most two epoch advancement. Therefore, any bag that
232-
// is within two epoch of the current one cannot be destroyed yet.
233-
global_epoch.wrapping_sub(self.epoch) >= 3
231+
// A pinned participant can witness at most one epoch advancement. Therefore, any bag that
232+
// is within one epoch of the current one cannot be destroyed yet.
233+
global_epoch.wrapping_sub(self.epoch) >= 2
234234
}
235235
}
236236

@@ -261,8 +261,12 @@ impl Global {
261261
}
262262

263263
/// Pushes the bag into the global queue and replaces the bag with a new empty bag.
264-
pub fn push_bag(&self, bag: &mut Bag, epoch: Epoch, guard: &Guard) {
264+
pub fn push_bag(&self, bag: &mut Bag, guard: &Guard) {
265265
let bag = mem::replace(bag, Bag::new());
266+
267+
atomic::fence(Ordering::SeqCst);
268+
269+
let epoch = self.epoch.load(Ordering::Relaxed);
266270
self.queue.push(bag.seal(epoch), guard);
267271
}
268272

@@ -426,8 +430,7 @@ impl Local {
426430
let bag = &mut *self.bag.get();
427431

428432
while let Err(d) = bag.try_push(deferred) {
429-
let epoch = self.epoch.load(Ordering::Relaxed).unpinned();
430-
self.global().push_bag(bag, epoch, guard);
433+
self.global().push_bag(bag, guard);
431434
deferred = d;
432435
}
433436
}
@@ -436,8 +439,7 @@ impl Local {
436439
let bag = unsafe { &mut *self.bag.get() };
437440

438441
if !bag.is_empty() {
439-
let epoch = self.epoch.load(Ordering::Relaxed).unpinned();
440-
self.global().push_bag(bag, epoch, guard);
442+
self.global().push_bag(bag, guard);
441443
}
442444

443445
self.global().collect(guard);
@@ -452,47 +454,38 @@ impl Local {
452454
self.guard_count.set(guard_count.checked_add(1).unwrap());
453455

454456
if guard_count == 0 {
455-
// Now we must store the global epoch into `self.epoch` and execute a `SeqCst` fence.
456-
// The fence makes sure that any future loads from `Atomic`s will not happen before this
457-
// store.
458-
let mut current = Epoch::starting();
459-
let mut new = self.global().epoch.load(Ordering::Relaxed).pinned();
460-
461-
loop {
462-
if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
463-
// HACK(stjepang): On x86 architectures there are two different ways of
464-
// executing a `SeqCst` fence.
465-
//
466-
// 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction.
467-
// 2. `_.compare_and_swap(_, _, SeqCst)`, which compiles into a `lock cmpxchg`
468-
// instruction.
469-
//
470-
// Both instructions have the effect of a full barrier, but benchmarks have
471-
// shown that the second one makes pinning faster in this particular case. It
472-
// is not clear that this is permitted by the C++ memory model (SC fences work
473-
// very differently from SC accesses), but experimental evidence suggests that
474-
// this works fine. Using inline assembly would be a viable (and correct)
475-
// alternative, but alas, that is not possible on stable Rust.
476-
let previous = self.epoch.compare_and_swap(current, new, Ordering::SeqCst);
477-
debug_assert_eq!(current, previous, "participant was expected to be unpinned");
478-
479-
// We add a compiler fence to make it less likely for LLVM to do something wrong
480-
// here. Formally, this is not enough to get rid of data races; practically, it
481-
// should go a long way.
482-
atomic::compiler_fence(Ordering::SeqCst);
483-
} else {
484-
self.epoch.store(new, Ordering::Relaxed);
485-
atomic::fence(Ordering::SeqCst);
486-
}
487-
488-
// Now we validate that the value we read from the global epoch is not stale.
489-
let validation = self.global().epoch.load(Ordering::Relaxed).pinned();
490-
if new == validation {
491-
break;
492-
}
493-
494-
current = new;
495-
new = validation;
457+
let global_epoch = self.global().epoch.load(Ordering::Relaxed);
458+
let new_epoch = global_epoch.pinned();
459+
460+
// Now we must store `new_epoch` into `self.epoch` and execute a `SeqCst` fence.
461+
// The fence makes sure that any future loads from `Atomic`s will not happen before
462+
// this store.
463+
if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
464+
// HACK(stjepang): On x86 architectures there are two different ways of executing
465+
// a `SeqCst` fence.
466+
//
467+
// 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction.
468+
// 2. `_.compare_and_swap(_, _, SeqCst)`, which compiles into a `lock cmpxchg`
469+
// instruction.
470+
//
471+
// Both instructions have the effect of a full barrier, but benchmarks have shown
472+
// that the second one makes pinning faster in this particular case. It is not
473+
// clear that this is permitted by the C++ memory model (SC fences work very
474+
// differently from SC accesses), but experimental evidence suggests that this
475+
// works fine. Using inline assembly would be a viable (and correct) alternative,
476+
// but alas, that is not possible on stable Rust.
477+
let current = Epoch::starting();
478+
let previous = self
479+
.epoch
480+
.compare_and_swap(current, new_epoch, Ordering::SeqCst);
481+
debug_assert_eq!(current, previous, "participant was expected to be unpinned");
482+
// We add a compiler fence to make it less likely for LLVM to do something wrong
483+
// here. Formally, this is not enough to get rid of data races; practically,
484+
// it should go a long way.
485+
atomic::compiler_fence(Ordering::SeqCst);
486+
} else {
487+
self.epoch.store(new_epoch, Ordering::Relaxed);
488+
atomic::fence(Ordering::SeqCst);
496489
}
497490

498491
// Increment the pin counter.
@@ -580,9 +573,8 @@ impl Local {
580573
unsafe {
581574
// Pin and move the local bag into the global queue. It's important that `push_bag`
582575
// doesn't defer destruction on any new garbage.
583-
let epoch = self.epoch.load(Ordering::Relaxed).unpinned();
584576
let guard = &self.pin();
585-
self.global().push_bag(&mut *self.bag.get(), epoch, guard);
577+
self.global().push_bag(&mut *self.bag.get(), guard);
586578
}
587579
// Revert the handle count back to zero.
588580
self.handle_count.set(0);

crossbeam-skiplist/tests/base.rs

-1
Original file line numberDiff line numberDiff line change
@@ -806,7 +806,6 @@ fn drops() {
806806
drop(s);
807807
}
808808

809-
handle.pin().flush();
810809
handle.pin().flush();
811810
handle.pin().flush();
812811
assert_eq!(KEYS.load(Ordering::SeqCst), 8);

0 commit comments

Comments
 (0)