1
- use core:: intrinsics ;
1
+ use core:: arch ;
2
2
use core:: mem;
3
+ use core:: sync:: atomic:: { AtomicU32 , Ordering } ;
3
4
4
5
// Kernel-provided user-mode helper functions:
5
6
// https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
6
7
unsafe fn __kuser_cmpxchg ( oldval : u32 , newval : u32 , ptr : * mut u32 ) -> bool {
7
8
let f: extern "C" fn ( u32 , u32 , * mut u32 ) -> u32 = mem:: transmute ( 0xffff0fc0usize as * const ( ) ) ;
8
9
f ( oldval, newval, ptr) == 0
9
10
}
11
+
10
12
unsafe fn __kuser_memory_barrier ( ) {
11
13
let f: extern "C" fn ( ) = mem:: transmute ( 0xffff0fa0usize as * const ( ) ) ;
12
14
f ( ) ;
@@ -54,13 +56,45 @@ fn insert_aligned(aligned: u32, val: u32, shift: u32, mask: u32) -> u32 {
54
56
( aligned & !( mask << shift) ) | ( ( val & mask) << shift)
55
57
}
56
58
59
+ /// Atomically loads the value at `ptr`. The size of `T` is how many of the bytes pointed to by
60
+ /// `ptr` are in bounds: if `T` is smaller than 4, then inline ASM is used to bypass the Rust
61
+ /// requirement that all pointer reads must be in bounds.
62
+ ///
63
+ /// # Safety
64
+ ///
65
+ /// `ptr` must be aligned and point to memory within a page that allows read access.
66
+ /// If `T` has a size of 4, then `ptr` must be valid for a relaxed atomic read.
67
+ unsafe fn atomic_load_aligned < T > ( ptr : * mut u32 ) -> u32 {
68
+ if mem:: size_of :: < T > ( ) == 4 {
69
+ // SAFETY: As `T` has a size of 4, the caller garantees this is sound.
70
+ unsafe { AtomicU32 :: from_ptr ( ptr) . load ( Ordering :: Relaxed ) }
71
+ } else {
72
+ // SAFETY:
73
+ // As all 4 bytes pointed to by `ptr` might not be dereferenceable due to being out of
74
+ // bounds when doing atomic operations on a `u8`/`i8`/`u16`/`i16`, inline ASM is used to
75
+ // avoid causing undefined behaviour. The `ldr` instruction does not touch the stack or
76
+ // flags, or write to memory, so `nostack`, `preserves_flags` and `readonly` are sound. The
77
+ // caller garantees that `ptr` is aligned, as required by `ldr`.
78
+ unsafe {
79
+ let res: u32 ;
80
+ arch:: asm!(
81
+ "ldr {res}, [{ptr}]" ,
82
+ ptr = in( reg) ptr,
83
+ res = lateout( reg) res,
84
+ options( nostack, preserves_flags, readonly)
85
+ ) ;
86
+ res
87
+ }
88
+ }
89
+ }
90
+
57
91
// Generic atomic read-modify-write operation
58
92
unsafe fn atomic_rmw < T , F : Fn ( u32 ) -> u32 , G : Fn ( u32 , u32 ) -> u32 > ( ptr : * mut T , f : F , g : G ) -> u32 {
59
93
let aligned_ptr = align_ptr ( ptr) ;
60
94
let ( shift, mask) = get_shift_mask ( ptr) ;
61
95
62
96
loop {
63
- let curval_aligned = intrinsics :: atomic_load_unordered ( aligned_ptr) ;
97
+ let curval_aligned = atomic_load_aligned :: < T > ( aligned_ptr) ;
64
98
let curval = extract_aligned ( curval_aligned, shift, mask) ;
65
99
let newval = f ( curval) ;
66
100
let newval_aligned = insert_aligned ( curval_aligned, newval, shift, mask) ;
@@ -76,7 +110,7 @@ unsafe fn atomic_cmpxchg<T>(ptr: *mut T, oldval: u32, newval: u32) -> u32 {
76
110
let ( shift, mask) = get_shift_mask ( ptr) ;
77
111
78
112
loop {
79
- let curval_aligned = intrinsics :: atomic_load_unordered ( aligned_ptr) ;
113
+ let curval_aligned = atomic_load_aligned :: < T > ( aligned_ptr) ;
80
114
let curval = extract_aligned ( curval_aligned, shift, mask) ;
81
115
if curval != oldval {
82
116
return curval;
0 commit comments