Skip to main content

kernel/sync/atomic/
predefine.rs

1// SPDX-License-Identifier: GPL-2.0
2
3//! Pre-defined atomic types
4
5use crate::static_assert;
6use core::mem::{align_of, size_of};
7use ffi::c_void;
8
9// Ensure size and alignment requirements are checked.
10static_assert!(size_of::<bool>() == size_of::<i8>());
11static_assert!(align_of::<bool>() == align_of::<i8>());
12
13// SAFETY: `bool` has the same size and alignment as `i8`, and Rust guarantees that `bool` has
14// only two valid bit patterns: 0 (false) and 1 (true). Those are valid `i8` values, so `bool` is
15// round-trip transmutable to `i8`.
16unsafe impl super::AtomicType for bool {
17    type Repr = i8;
18}
19
20// SAFETY: `i8` has the same size and alignment with itself, and is round-trip transmutable to
21// itself.
22unsafe impl super::AtomicType for i8 {
23    type Repr = i8;
24}
25
26// SAFETY: `i16` has the same size and alignment with itself, and is round-trip transmutable to
27// itself.
28unsafe impl super::AtomicType for i16 {
29    type Repr = i16;
30}
31
32// SAFETY:
33//
34// - `*mut T` has the same size and alignment with `*const c_void`, and is round-trip
35//   transmutable to `*const c_void`.
36// - `*mut T` is safe to transfer between execution contexts. See the safety requirement of
37//   [`AtomicType`].
38unsafe impl<T: Sized> super::AtomicType for *mut T {
39    type Repr = *const c_void;
40}
41
42// SAFETY:
43//
44// - `*const T` has the same size and alignment with `*const c_void`, and is round-trip
45//   transmutable to `*const c_void`.
46// - `*const T` is safe to transfer between execution contexts. See the safety requirement of
47//   [`AtomicType`].
48unsafe impl<T: Sized> super::AtomicType for *const T {
49    type Repr = *const c_void;
50}
51
52// SAFETY: `i32` has the same size and alignment with itself, and is round-trip transmutable to
53// itself.
54unsafe impl super::AtomicType for i32 {
55    type Repr = i32;
56}
57
58// SAFETY: The wrapping add result of two `i32`s is a valid `i32`.
59unsafe impl super::AtomicAdd<i32> for i32 {
60    fn rhs_into_delta(rhs: i32) -> i32 {
61        rhs
62    }
63}
64
65// SAFETY: `i64` has the same size and alignment with itself, and is round-trip transmutable to
66// itself.
67unsafe impl super::AtomicType for i64 {
68    type Repr = i64;
69}
70
71// SAFETY: The wrapping add result of two `i64`s is a valid `i64`.
72unsafe impl super::AtomicAdd<i64> for i64 {
73    fn rhs_into_delta(rhs: i64) -> i64 {
74        rhs
75    }
76}
77
78// Defines an internal type that always maps to the integer type which has the same size alignment
79// as `isize` and `usize`, and `isize` and `usize` are always bi-directional transmutable to
80// `isize_atomic_repr`, which also always implements `AtomicImpl`.
81#[allow(non_camel_case_types)]
82#[cfg(not(testlib))]
83#[cfg(not(CONFIG_64BIT))]
84type isize_atomic_repr = i32;
85#[allow(non_camel_case_types)]
86#[cfg(not(testlib))]
87#[cfg(CONFIG_64BIT)]
88type isize_atomic_repr = i64;
89
90#[allow(non_camel_case_types)]
91#[cfg(testlib)]
92#[cfg(target_pointer_width = "32")]
93type isize_atomic_repr = i32;
94#[allow(non_camel_case_types)]
95#[cfg(testlib)]
96#[cfg(target_pointer_width = "64")]
97type isize_atomic_repr = i64;
98
99// Ensure size and alignment requirements are checked.
100static_assert!(size_of::<isize>() == size_of::<isize_atomic_repr>());
101static_assert!(align_of::<isize>() == align_of::<isize_atomic_repr>());
102static_assert!(size_of::<usize>() == size_of::<isize_atomic_repr>());
103static_assert!(align_of::<usize>() == align_of::<isize_atomic_repr>());
104
105// SAFETY: `isize` has the same size and alignment with `isize_atomic_repr`, and is round-trip
106// transmutable to `isize_atomic_repr`.
107unsafe impl super::AtomicType for isize {
108    type Repr = isize_atomic_repr;
109}
110
111// SAFETY: The wrapping add result of two `isize_atomic_repr`s is a valid `usize`.
112unsafe impl super::AtomicAdd<isize> for isize {
113    fn rhs_into_delta(rhs: isize) -> isize_atomic_repr {
114        rhs as isize_atomic_repr
115    }
116}
117
118// SAFETY: `u32` and `i32` has the same size and alignment, and `u32` is round-trip transmutable to
119// `i32`.
120unsafe impl super::AtomicType for u32 {
121    type Repr = i32;
122}
123
124// SAFETY: The wrapping add result of two `i32`s is a valid `u32`.
125unsafe impl super::AtomicAdd<u32> for u32 {
126    fn rhs_into_delta(rhs: u32) -> i32 {
127        rhs as i32
128    }
129}
130
131// SAFETY: `u64` and `i64` has the same size and alignment, and `u64` is round-trip transmutable to
132// `i64`.
133unsafe impl super::AtomicType for u64 {
134    type Repr = i64;
135}
136
137// SAFETY: The wrapping add result of two `i64`s is a valid `u64`.
138unsafe impl super::AtomicAdd<u64> for u64 {
139    fn rhs_into_delta(rhs: u64) -> i64 {
140        rhs as i64
141    }
142}
143
144// SAFETY: `usize` has the same size and alignment with `isize_atomic_repr`, and is round-trip
145// transmutable to `isize_atomic_repr`.
146unsafe impl super::AtomicType for usize {
147    type Repr = isize_atomic_repr;
148}
149
150// SAFETY: The wrapping add result of two `isize_atomic_repr`s is a valid `usize`.
151unsafe impl super::AtomicAdd<usize> for usize {
152    fn rhs_into_delta(rhs: usize) -> isize_atomic_repr {
153        rhs as isize_atomic_repr
154    }
155}
156
157use crate::macros::kunit_tests;
158
159#[kunit_tests(rust_atomics)]
160mod tests {
161    use super::super::*;
162
163    // Call $fn($val) with each $type of $val.
164    macro_rules! for_each_type {
165        ($val:literal in [$($type:ty),*] $fn:expr) => {
166            $({
167                let v: $type = $val;
168
169                $fn(v);
170            })*
171        }
172    }
173
174    #[test]
175    fn atomic_basic_tests() {
176        for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
177            let x = Atomic::new(v);
178
179            assert_eq!(v, x.load(Relaxed));
180        });
181
182        for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
183            let x = Atomic::new(v);
184            let ptr = x.as_ptr();
185
186            // SAFETY: `ptr` is a valid pointer and no concurrent access.
187            assert_eq!(v, unsafe { atomic_load(ptr, Relaxed) });
188        });
189    }
190
191    #[test]
192    fn atomic_acquire_release_tests() {
193        for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
194            let x = Atomic::new(0);
195
196            x.store(v, Release);
197            assert_eq!(v, x.load(Acquire));
198        });
199
200        for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
201            let x = Atomic::new(0);
202            let ptr = x.as_ptr();
203
204            // SAFETY: `ptr` is a valid pointer and no concurrent access.
205            unsafe { atomic_store(ptr, v, Release) };
206
207            // SAFETY: `ptr` is a valid pointer and no concurrent access.
208            assert_eq!(v, unsafe { atomic_load(ptr, Acquire) });
209        });
210    }
211
212    #[test]
213    fn atomic_xchg_tests() {
214        for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
215            let x = Atomic::new(v);
216
217            let old = v;
218            let new = v + 1;
219
220            assert_eq!(old, x.xchg(new, Full));
221            assert_eq!(new, x.load(Relaxed));
222        });
223
224        for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
225            let x = Atomic::new(v);
226            let ptr = x.as_ptr();
227
228            let old = v;
229            let new = v + 1;
230
231            // SAFETY: `ptr` is a valid pointer and no concurrent access.
232            assert_eq!(old, unsafe { xchg(ptr, new, Full) });
233            assert_eq!(new, x.load(Relaxed));
234        });
235    }
236
237    #[test]
238    fn atomic_cmpxchg_tests() {
239        for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
240            let x = Atomic::new(v);
241
242            let old = v;
243            let new = v + 1;
244
245            assert_eq!(Err(old), x.cmpxchg(new, new, Full));
246            assert_eq!(old, x.load(Relaxed));
247            assert_eq!(Ok(old), x.cmpxchg(old, new, Relaxed));
248            assert_eq!(new, x.load(Relaxed));
249        });
250
251        for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
252            let x = Atomic::new(v);
253            let ptr = x.as_ptr();
254
255            let old = v;
256            let new = v + 1;
257
258            // SAFETY: `ptr` is a valid pointer and no concurrent access.
259            assert_eq!(Err(old), unsafe { cmpxchg(ptr, new, new, Full) });
260            assert_eq!(old, x.load(Relaxed));
261            // SAFETY: `ptr` is a valid pointer and no concurrent access.
262            assert_eq!(Ok(old), unsafe { cmpxchg(ptr, old, new, Relaxed) });
263            assert_eq!(new, x.load(Relaxed));
264        });
265    }
266
267    #[test]
268    fn atomic_arithmetic_tests() {
269        for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| {
270            let x = Atomic::new(v);
271
272            assert_eq!(v, x.fetch_add(12, Full));
273            assert_eq!(v + 12, x.load(Relaxed));
274
275            x.add(13, Relaxed);
276
277            assert_eq!(v + 25, x.load(Relaxed));
278        });
279    }
280
281    #[test]
282    fn atomic_bool_tests() {
283        let x = Atomic::new(false);
284
285        assert_eq!(false, x.load(Relaxed));
286        x.store(true, Relaxed);
287        assert_eq!(true, x.load(Relaxed));
288
289        assert_eq!(true, x.xchg(false, Relaxed));
290        assert_eq!(false, x.load(Relaxed));
291
292        assert_eq!(Err(false), x.cmpxchg(true, true, Relaxed));
293        assert_eq!(false, x.load(Relaxed));
294        assert_eq!(Ok(false), x.cmpxchg(false, true, Full));
295    }
296
297    #[test]
298    fn atomic_ptr_tests() {
299        let mut v = 42;
300        let mut u = 43;
301        let x = Atomic::new(&raw mut v);
302
303        assert_eq!(x.load(Acquire), &raw mut v);
304        assert_eq!(x.cmpxchg(&raw mut u, &raw mut u, Relaxed), Err(&raw mut v));
305        assert_eq!(x.cmpxchg(&raw mut v, &raw mut u, Relaxed), Ok(&raw mut v));
306        assert_eq!(x.load(Relaxed), &raw mut u);
307
308        let x = Atomic::new(&raw const v);
309
310        assert_eq!(x.load(Acquire), &raw const v);
311        assert_eq!(
312            x.cmpxchg(&raw const u, &raw const u, Relaxed),
313            Err(&raw const v)
314        );
315        assert_eq!(
316            x.cmpxchg(&raw const v, &raw const u, Relaxed),
317            Ok(&raw const v)
318        );
319        assert_eq!(x.load(Relaxed), &raw const u);
320    }
321
322    #[test]
323    fn atomic_flag_tests() {
324        let mut flag = AtomicFlag::new(false);
325
326        assert_eq!(false, flag.load(Relaxed));
327
328        *flag.get_mut() = true;
329        assert_eq!(true, flag.load(Relaxed));
330
331        assert_eq!(true, flag.xchg(false, Relaxed));
332        assert_eq!(false, flag.load(Relaxed));
333
334        *flag.get_mut() = true;
335        assert_eq!(Ok(true), flag.cmpxchg(true, false, Full));
336        assert_eq!(false, flag.load(Relaxed));
337    }
338}