Skip to main content

kernel/sync/
atomic.rs

1// SPDX-License-Identifier: GPL-2.0
2
3//! Atomic primitives.
4//!
5//! These primitives have the same semantics as their C counterparts: and the precise definitions of
6//! semantics can be found at [`LKMM`]. Note that Linux Kernel Memory (Consistency) Model is the
7//! only model for Rust code in kernel, and Rust's own atomics should be avoided.
8//!
9//! # Data races
10//!
11//! [`LKMM`] atomics have different rules regarding data races:
12//!
13//! - A normal write from C side is treated as an atomic write if
14//!   CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=y.
15//! - Mixed-size atomic accesses don't cause data races.
16//!
17//! [`LKMM`]: srctree/tools/memory-model/
18
19mod internal;
20pub mod ordering;
21mod predefine;
22
23pub use internal::AtomicImpl;
24pub use ordering::{Acquire, Full, Relaxed, Release};
25
26pub(crate) use internal::{AtomicArithmeticOps, AtomicBasicOps, AtomicExchangeOps};
27
28use crate::build_error;
29use internal::AtomicRepr;
30use ordering::OrderingType;
31
32/// A memory location which can be safely modified from multiple execution contexts.
33///
34/// This has the same size, alignment and bit validity as the underlying type `T`. And it disables
35/// niche optimization for the same reason as [`UnsafeCell`].
36///
37/// The atomic operations are implemented in a way that is fully compatible with the [Linux Kernel
38/// Memory (Consistency) Model][LKMM], hence they should be modeled as the corresponding
39/// [`LKMM`][LKMM] atomic primitives. With the help of [`Atomic::from_ptr()`] and
40/// [`Atomic::as_ptr()`], this provides a way to interact with [C-side atomic operations]
41/// (including those without the `atomic` prefix, e.g. `READ_ONCE()`, `WRITE_ONCE()`,
42/// `smp_load_acquire()` and `smp_store_release()`).
43///
44/// # Invariants
45///
46/// `self.0` is a valid `T`.
47///
48/// [`UnsafeCell`]: core::cell::UnsafeCell
49/// [LKMM]: srctree/tools/memory-model/
50/// [C-side atomic operations]: srctree/Documentation/atomic_t.txt
51#[repr(transparent)]
52pub struct Atomic<T: AtomicType>(AtomicRepr<T::Repr>);
53
54// SAFETY: `Atomic<T>` is safe to transfer between execution contexts because of the safety
55// requirement of `AtomicType`.
56unsafe impl<T: AtomicType> Send for Atomic<T> {}
57
58// SAFETY: `Atomic<T>` is safe to share among execution contexts because all accesses are atomic.
59unsafe impl<T: AtomicType> Sync for Atomic<T> {}
60
61/// Types that support basic atomic operations.
62///
63/// # Round-trip transmutability
64///
65/// `T` is round-trip transmutable to `U` if and only if both of these properties hold:
66///
67/// - Any valid bit pattern for `T` is also a valid bit pattern for `U`.
68/// - Transmuting (e.g. using [`transmute()`]) a value of type `T` to `U` and then to `T` again
69///   yields a value that is in all aspects equivalent to the original value.
70///
71/// # Safety
72///
73/// - [`Self`] must have the same size and alignment as [`Self::Repr`].
74/// - [`Self`] must be [round-trip transmutable] to  [`Self::Repr`].
75/// - [`Self`] must be safe to transfer between execution contexts, if it's [`Send`], this is
76///   automatically satisfied. The exception is pointer types that are even though marked as
77///   `!Send` (e.g. raw pointers and [`NonNull<T>`]) but requiring `unsafe` to do anything
78///   meaningful on them. This is because transferring pointer values between execution contexts is
79///   safe as long as the actual `unsafe` dereferencing is justified.
80///
81/// Note that this is more relaxed than requiring the bi-directional transmutability (i.e.
82/// [`transmute()`] is always sound between `U` and `T`) because of the support for atomic
83/// variables over unit-only enums, see [Examples].
84///
85/// # Limitations
86///
87/// Because C primitives are used to implement the atomic operations, and a C function requires a
88/// valid object of a type to operate on (i.e. no `MaybeUninit<_>`), hence at the Rust <-> C
89/// surface, only types with all the bits initialized can be passed. As a result, types like `(u8,
90/// u16)` (padding bytes are uninitialized) are currently not supported.
91///
92/// # Examples
93///
94/// A unit-only enum that implements [`AtomicType`]:
95///
96/// ```
97/// use kernel::sync::atomic::{AtomicType, Atomic, Relaxed};
98///
99/// #[derive(Clone, Copy, PartialEq, Eq)]
100/// #[repr(i32)]
101/// enum State {
102///     Uninit = 0,
103///     Working = 1,
104///     Done = 2,
105/// };
106///
107/// // SAFETY: `State` and `i32` has the same size and alignment, and it's round-trip
108/// // transmutable to `i32`.
109/// unsafe impl AtomicType for State {
110///     type Repr = i32;
111/// }
112///
113/// let s = Atomic::new(State::Uninit);
114///
115/// assert_eq!(State::Uninit, s.load(Relaxed));
116/// ```
117/// [`transmute()`]: core::mem::transmute
118/// [round-trip transmutable]: AtomicType#round-trip-transmutability
119/// [Examples]: AtomicType#examples
120/// [`NonNull<T>`]: core::ptr::NonNull
121pub unsafe trait AtomicType: Sized + Copy {
122    /// The backing atomic implementation type.
123    type Repr: AtomicImpl;
124}
125
126/// Types that support atomic add operations.
127///
128/// # Safety
129///
130// TODO: Properly defines `wrapping_add` in the following comment.
131/// `wrapping_add` any value of type `Self::Repr::Delta` obtained by [`Self::rhs_into_delta()`] to
132/// any value of type `Self::Repr` obtained through transmuting a value of type `Self` to must
133/// yield a value with a bit pattern also valid for `Self`.
134pub unsafe trait AtomicAdd<Rhs = Self>: AtomicType {
135    /// Converts `Rhs` into the `Delta` type of the atomic implementation.
136    fn rhs_into_delta(rhs: Rhs) -> <Self::Repr as AtomicImpl>::Delta;
137}
138
139#[inline(always)]
140const fn into_repr<T: AtomicType>(v: T) -> T::Repr {
141    // SAFETY: Per the safety requirement of `AtomicType`, `T` is round-trip transmutable to
142    // `T::Repr`, therefore the transmute operation is sound.
143    unsafe { core::mem::transmute_copy(&v) }
144}
145
146/// # Safety
147///
148/// `r` must be a valid bit pattern of `T`.
149#[inline(always)]
150const unsafe fn from_repr<T: AtomicType>(r: T::Repr) -> T {
151    // SAFETY: Per the safety requirement of the function, the transmute operation is sound.
152    unsafe { core::mem::transmute_copy(&r) }
153}
154
155impl<T: AtomicType> Atomic<T> {
156    /// Creates a new atomic `T`.
157    pub const fn new(v: T) -> Self {
158        // INVARIANT: Per the safety requirement of `AtomicType`, `into_repr(v)` is a valid `T`.
159        Self(AtomicRepr::new(into_repr(v)))
160    }
161
162    /// Creates a reference to an atomic `T` from a pointer of `T`.
163    ///
164    /// This usually is used when communicating with C side or manipulating a C struct, see
165    /// examples below.
166    ///
167    /// # Safety
168    ///
169    /// - `ptr` is aligned to `align_of::<T>()`.
170    /// - `ptr` is valid for reads and writes for `'a`.
171    /// - For the duration of `'a`, other accesses to `*ptr` must not cause data races (defined
172    ///   by [`LKMM`]) against atomic operations on the returned reference. Note that if all other
173    ///   accesses are atomic, then this safety requirement is trivially fulfilled.
174    ///
175    /// [`LKMM`]: srctree/tools/memory-model
176    ///
177    /// # Examples
178    ///
179    /// Using [`Atomic::from_ptr()`] combined with [`Atomic::load()`] or [`Atomic::store()`] can
180    /// achieve the same functionality as `READ_ONCE()`/`smp_load_acquire()` or
181    /// `WRITE_ONCE()`/`smp_store_release()` in C side:
182    ///
183    /// ```
184    /// # use kernel::types::Opaque;
185    /// use kernel::sync::atomic::{Atomic, Relaxed, Release};
186    ///
187    /// // Assume there is a C struct `foo`.
188    /// mod cbindings {
189    ///     #[repr(C)]
190    ///     pub(crate) struct foo {
191    ///         pub(crate) a: i32,
192    ///         pub(crate) b: i32
193    ///     }
194    /// }
195    ///
196    /// let tmp = Opaque::new(cbindings::foo { a: 1, b: 2 });
197    ///
198    /// // struct foo *foo_ptr = ..;
199    /// let foo_ptr = tmp.get();
200    ///
201    /// // SAFETY: `foo_ptr` is valid, and `.a` is in bounds.
202    /// let foo_a_ptr = unsafe { &raw mut (*foo_ptr).a };
203    ///
204    /// // a = READ_ONCE(foo_ptr->a);
205    /// //
206    /// // SAFETY: `foo_a_ptr` is valid for read, and all other accesses on it is atomic, so no
207    /// // data race.
208    /// let a = unsafe { Atomic::from_ptr(foo_a_ptr) }.load(Relaxed);
209    /// # assert_eq!(a, 1);
210    ///
211    /// // smp_store_release(&foo_ptr->a, 2);
212    /// //
213    /// // SAFETY: `foo_a_ptr` is valid for writes, and all other accesses on it is atomic, so
214    /// // no data race.
215    /// unsafe { Atomic::from_ptr(foo_a_ptr) }.store(2, Release);
216    /// ```
217    pub unsafe fn from_ptr<'a>(ptr: *mut T) -> &'a Self {
218        // CAST: `T` and `Atomic<T>` have the same size, alignment and bit validity.
219        // SAFETY: Per function safety requirement, `ptr` is a valid pointer and the object will
220        // live long enough. It's safe to return a `&Atomic<T>` because function safety requirement
221        // guarantees other accesses won't cause data races.
222        unsafe { &*ptr.cast::<Self>() }
223    }
224
225    /// Returns a pointer to the underlying atomic `T`.
226    ///
227    /// Note that use of the return pointer must not cause data races defined by [`LKMM`].
228    ///
229    /// # Guarantees
230    ///
231    /// The returned pointer is valid and properly aligned (i.e. aligned to [`align_of::<T>()`]).
232    ///
233    /// [`LKMM`]: srctree/tools/memory-model
234    /// [`align_of::<T>()`]: core::mem::align_of
235    pub const fn as_ptr(&self) -> *mut T {
236        // GUARANTEE: Per the function guarantee of `AtomicRepr::as_ptr()`, the `self.0.as_ptr()`
237        // must be a valid and properly aligned pointer for `T::Repr`, and per the safety guarantee
238        // of `AtomicType`, it's a valid and properly aligned pointer of `T`.
239        self.0.as_ptr().cast()
240    }
241
242    /// Returns a mutable reference to the underlying atomic `T`.
243    ///
244    /// This is safe because the mutable reference of the atomic `T` guarantees exclusive access.
245    ///
246    /// # Examples
247    ///
248    /// ```
249    /// use kernel::sync::atomic::{Atomic, Relaxed};
250    ///
251    /// let mut atomic_val = Atomic::new(0u32);
252    /// let val_mut = atomic_val.get_mut();
253    /// *val_mut = 101;
254    /// assert_eq!(101, atomic_val.load(Relaxed));
255    /// ```
256    pub fn get_mut(&mut self) -> &mut T {
257        // CAST: `T` and `T::Repr` has the same size and alignment per the safety requirement of
258        // `AtomicType`, and per the type invariants `self.0` is a valid `T`, therefore the casting
259        // result is a valid pointer of `T`.
260        // SAFETY: The pointer is valid per the CAST comment above, and the mutable reference
261        // guarantees exclusive access.
262        unsafe { &mut *self.0.as_ptr().cast() }
263    }
264}
265
266impl<T: AtomicType> Atomic<T>
267where
268    T::Repr: AtomicBasicOps,
269{
270    /// Loads the value from the atomic `T`.
271    ///
272    /// # Examples
273    ///
274    /// ```
275    /// use kernel::sync::atomic::{Atomic, Relaxed};
276    ///
277    /// let x = Atomic::new(42i32);
278    ///
279    /// assert_eq!(42, x.load(Relaxed));
280    ///
281    /// let x = Atomic::new(42i64);
282    ///
283    /// assert_eq!(42, x.load(Relaxed));
284    /// ```
285    #[doc(alias("atomic_read", "atomic64_read"))]
286    #[inline(always)]
287    pub fn load<Ordering: ordering::AcquireOrRelaxed>(&self, _: Ordering) -> T {
288        let v = {
289            match Ordering::TYPE {
290                OrderingType::Relaxed => T::Repr::atomic_read(&self.0),
291                OrderingType::Acquire => T::Repr::atomic_read_acquire(&self.0),
292                _ => build_error!("Wrong ordering"),
293            }
294        };
295
296        // SAFETY: `v` comes from reading `self.0`, which is a valid `T` per the type invariants.
297        unsafe { from_repr(v) }
298    }
299
300    /// Stores a value to the atomic `T`.
301    ///
302    /// # Examples
303    ///
304    /// ```
305    /// use kernel::sync::atomic::{Atomic, Relaxed};
306    ///
307    /// let x = Atomic::new(42i32);
308    ///
309    /// assert_eq!(42, x.load(Relaxed));
310    ///
311    /// x.store(43, Relaxed);
312    ///
313    /// assert_eq!(43, x.load(Relaxed));
314    /// ```
315    #[doc(alias("atomic_set", "atomic64_set"))]
316    #[inline(always)]
317    pub fn store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: T, _: Ordering) {
318        let v = into_repr(v);
319
320        // INVARIANT: `v` is a valid `T`, and is stored to `self.0` by `atomic_set*()`.
321        match Ordering::TYPE {
322            OrderingType::Relaxed => T::Repr::atomic_set(&self.0, v),
323            OrderingType::Release => T::Repr::atomic_set_release(&self.0, v),
324            _ => build_error!("Wrong ordering"),
325        }
326    }
327}
328
329impl<T: AtomicType + core::fmt::Debug> core::fmt::Debug for Atomic<T>
330where
331    T::Repr: AtomicBasicOps,
332{
333    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
334        core::fmt::Debug::fmt(&self.load(Relaxed), f)
335    }
336}
337
338impl<T: AtomicType> Atomic<T>
339where
340    T::Repr: AtomicExchangeOps,
341{
342    /// Atomic exchange.
343    ///
344    /// Atomically updates `*self` to `v` and returns the old value of `*self`.
345    ///
346    /// # Examples
347    ///
348    /// ```
349    /// use kernel::sync::atomic::{Atomic, Acquire, Relaxed};
350    ///
351    /// let x = Atomic::new(42);
352    ///
353    /// assert_eq!(42, x.xchg(52, Acquire));
354    /// assert_eq!(52, x.load(Relaxed));
355    /// ```
356    #[doc(alias("atomic_xchg", "atomic64_xchg", "swap"))]
357    #[inline(always)]
358    pub fn xchg<Ordering: ordering::Ordering>(&self, v: T, _: Ordering) -> T {
359        let v = into_repr(v);
360
361        // INVARIANT: `self.0` is a valid `T` after `atomic_xchg*()` because `v` is transmutable to
362        // `T`.
363        let ret = {
364            match Ordering::TYPE {
365                OrderingType::Full => T::Repr::atomic_xchg(&self.0, v),
366                OrderingType::Acquire => T::Repr::atomic_xchg_acquire(&self.0, v),
367                OrderingType::Release => T::Repr::atomic_xchg_release(&self.0, v),
368                OrderingType::Relaxed => T::Repr::atomic_xchg_relaxed(&self.0, v),
369            }
370        };
371
372        // SAFETY: `ret` comes from reading `*self`, which is a valid `T` per type invariants.
373        unsafe { from_repr(ret) }
374    }
375
376    /// Atomic compare and exchange.
377    ///
378    /// If `*self` == `old`, atomically updates `*self` to `new`. Otherwise, `*self` is not
379    /// modified.
380    ///
381    /// Compare: The comparison is done via the byte level comparison between `*self` and `old`.
382    ///
383    /// Ordering: When succeeds, provides the corresponding ordering as the `Ordering` type
384    /// parameter indicates, and a failed one doesn't provide any ordering, the load part of a
385    /// failed cmpxchg is a [`Relaxed`] load.
386    ///
387    /// Returns `Ok(value)` if cmpxchg succeeds, and `value` is guaranteed to be equal to `old`,
388    /// otherwise returns `Err(value)`, and `value` is the current value of `*self`.
389    ///
390    /// # Examples
391    ///
392    /// ```
393    /// use kernel::sync::atomic::{Atomic, Full, Relaxed};
394    ///
395    /// let x = Atomic::new(42);
396    ///
397    /// // Checks whether cmpxchg succeeded.
398    /// let success = x.cmpxchg(52, 64, Relaxed).is_ok();
399    /// # assert!(!success);
400    ///
401    /// // Checks whether cmpxchg failed.
402    /// let failure = x.cmpxchg(52, 64, Relaxed).is_err();
403    /// # assert!(failure);
404    ///
405    /// // Uses the old value if failed, probably re-try cmpxchg.
406    /// match x.cmpxchg(52, 64, Relaxed) {
407    ///     Ok(_) => { },
408    ///     Err(old) => {
409    ///         // do something with `old`.
410    ///         # assert_eq!(old, 42);
411    ///     }
412    /// }
413    ///
414    /// // Uses the latest value regardlessly, same as atomic_cmpxchg() in C.
415    /// let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old);
416    /// # assert_eq!(42, latest);
417    /// assert_eq!(64, x.load(Relaxed));
418    /// ```
419    ///
420    /// [`Relaxed`]: ordering::Relaxed
421    #[doc(alias(
422        "atomic_cmpxchg",
423        "atomic64_cmpxchg",
424        "atomic_try_cmpxchg",
425        "atomic64_try_cmpxchg",
426        "compare_exchange"
427    ))]
428    #[inline(always)]
429    pub fn cmpxchg<Ordering: ordering::Ordering>(
430        &self,
431        mut old: T,
432        new: T,
433        o: Ordering,
434    ) -> Result<T, T> {
435        // Note on code generation:
436        //
437        // try_cmpxchg() is used to implement cmpxchg(), and if the helper functions are inlined,
438        // the compiler is able to figure out that branch is not needed if the users don't care
439        // about whether the operation succeeds or not. One exception is on x86, due to commit
440        // 44fe84459faf ("locking/atomic: Fix atomic_try_cmpxchg() semantics"), the
441        // atomic_try_cmpxchg() on x86 has a branch even if the caller doesn't care about the
442        // success of cmpxchg and only wants to use the old value. For example, for code like:
443        //
444        //     let latest = x.cmpxchg(42, 64, Full).unwrap_or_else(|old| old);
445        //
446        // It will still generate code:
447        //
448        //     movl    $0x40, %ecx
449        //     movl    $0x34, %eax
450        //     lock
451        //     cmpxchgl        %ecx, 0x4(%rsp)
452        //     jne     1f
453        //     2:
454        //     ...
455        //     1:  movl    %eax, %ecx
456        //     jmp 2b
457        //
458        // This might be "fixed" by introducing a try_cmpxchg_exclusive() that knows the "*old"
459        // location in the C function is always safe to write.
460        if self.try_cmpxchg(&mut old, new, o) {
461            Ok(old)
462        } else {
463            Err(old)
464        }
465    }
466
467    /// Atomic compare and exchange and returns whether the operation succeeds.
468    ///
469    /// If `*self` == `old`, atomically updates `*self` to `new`. Otherwise, `*self` is not
470    /// modified, `*old` is updated to the current value of `*self`.
471    ///
472    /// "Compare" and "Ordering" part are the same as [`Atomic::cmpxchg()`].
473    ///
474    /// Returns `true` means the cmpxchg succeeds otherwise returns `false`.
475    #[inline(always)]
476    fn try_cmpxchg<Ordering: ordering::Ordering>(&self, old: &mut T, new: T, _: Ordering) -> bool {
477        let mut tmp = into_repr(*old);
478        let new = into_repr(new);
479
480        // INVARIANT: `self.0` is a valid `T` after `atomic_try_cmpxchg*()` because `new` is
481        // transmutable to `T`.
482        let ret = {
483            match Ordering::TYPE {
484                OrderingType::Full => T::Repr::atomic_try_cmpxchg(&self.0, &mut tmp, new),
485                OrderingType::Acquire => {
486                    T::Repr::atomic_try_cmpxchg_acquire(&self.0, &mut tmp, new)
487                }
488                OrderingType::Release => {
489                    T::Repr::atomic_try_cmpxchg_release(&self.0, &mut tmp, new)
490                }
491                OrderingType::Relaxed => {
492                    T::Repr::atomic_try_cmpxchg_relaxed(&self.0, &mut tmp, new)
493                }
494            }
495        };
496
497        // SAFETY: `tmp` comes from reading `*self`, which is a valid `T` per type invariants.
498        *old = unsafe { from_repr(tmp) };
499
500        ret
501    }
502}
503
504impl<T: AtomicType> Atomic<T>
505where
506    T::Repr: AtomicArithmeticOps,
507{
508    /// Atomic add.
509    ///
510    /// Atomically updates `*self` to `(*self).wrapping_add(v)`.
511    ///
512    /// # Examples
513    ///
514    /// ```
515    /// use kernel::sync::atomic::{Atomic, Relaxed};
516    ///
517    /// let x = Atomic::new(42);
518    ///
519    /// assert_eq!(42, x.load(Relaxed));
520    ///
521    /// x.add(12, Relaxed);
522    ///
523    /// assert_eq!(54, x.load(Relaxed));
524    /// ```
525    #[inline(always)]
526    pub fn add<Rhs>(&self, v: Rhs, _: ordering::Relaxed)
527    where
528        T: AtomicAdd<Rhs>,
529    {
530        let v = T::rhs_into_delta(v);
531
532        // INVARIANT: `self.0` is a valid `T` after `atomic_add()` due to safety requirement of
533        // `AtomicAdd`.
534        T::Repr::atomic_add(&self.0, v);
535    }
536
537    /// Atomic fetch and add.
538    ///
539    /// Atomically updates `*self` to `(*self).wrapping_add(v)`, and returns the value of `*self`
540    /// before the update.
541    ///
542    /// # Examples
543    ///
544    /// ```
545    /// use kernel::sync::atomic::{Atomic, Acquire, Full, Relaxed};
546    ///
547    /// let x = Atomic::new(42);
548    /// assert_eq!(42, x.load(Relaxed));
549    /// assert_eq!(42, x.fetch_add(12, Acquire));
550    /// assert_eq!(54, x.load(Relaxed));
551    ///
552    /// let x = Atomic::new(42);
553    /// assert_eq!(42, x.load(Relaxed));
554    /// assert_eq!(42, x.fetch_add(12, Full));
555    /// assert_eq!(54, x.load(Relaxed));
556    /// ```
557    #[inline(always)]
558    pub fn fetch_add<Rhs, Ordering: ordering::Ordering>(&self, v: Rhs, _: Ordering) -> T
559    where
560        T: AtomicAdd<Rhs>,
561    {
562        let v = T::rhs_into_delta(v);
563
564        // INVARIANT: `self.0` is a valid `T` after `atomic_fetch_add*()` due to safety requirement
565        // of `AtomicAdd`.
566        let ret = {
567            match Ordering::TYPE {
568                OrderingType::Full => T::Repr::atomic_fetch_add(&self.0, v),
569                OrderingType::Acquire => T::Repr::atomic_fetch_add_acquire(&self.0, v),
570                OrderingType::Release => T::Repr::atomic_fetch_add_release(&self.0, v),
571                OrderingType::Relaxed => T::Repr::atomic_fetch_add_relaxed(&self.0, v),
572            }
573        };
574
575        // SAFETY: `ret` comes from reading `self.0`, which is a valid `T` per type invariants.
576        unsafe { from_repr(ret) }
577    }
578
579    /// Atomic fetch and subtract.
580    ///
581    /// Atomically updates `*self` to `(*self).wrapping_sub(v)`, and returns the value of `*self`
582    /// before the update.
583    ///
584    /// # Examples
585    ///
586    /// ```
587    /// use kernel::sync::atomic::{Atomic, Acquire, Full, Relaxed};
588    ///
589    /// let x = Atomic::new(42);
590    /// assert_eq!(42, x.load(Relaxed));
591    /// assert_eq!(42, x.fetch_sub(12, Acquire));
592    /// assert_eq!(30, x.load(Relaxed));
593    ///
594    /// let x = Atomic::new(42);
595    /// assert_eq!(42, x.load(Relaxed));
596    /// assert_eq!(42, x.fetch_sub(12, Full));
597    /// assert_eq!(30, x.load(Relaxed));
598    /// ```
599    #[inline(always)]
600    pub fn fetch_sub<Rhs, Ordering: ordering::Ordering>(&self, v: Rhs, _: Ordering) -> T
601    where
602        // Types that support addition also support subtraction.
603        T: AtomicAdd<Rhs>,
604    {
605        let v = T::rhs_into_delta(v);
606
607        // INVARIANT: `self.0` is a valid `T` after `atomic_fetch_sub*()` due to safety requirement
608        // of `AtomicAdd`.
609        let ret = {
610            match Ordering::TYPE {
611                OrderingType::Full => T::Repr::atomic_fetch_sub(&self.0, v),
612                OrderingType::Acquire => T::Repr::atomic_fetch_sub_acquire(&self.0, v),
613                OrderingType::Release => T::Repr::atomic_fetch_sub_release(&self.0, v),
614                OrderingType::Relaxed => T::Repr::atomic_fetch_sub_relaxed(&self.0, v),
615            }
616        };
617
618        // SAFETY: `ret` comes from reading `self.0`, which is a valid `T` per type invariants.
619        unsafe { from_repr(ret) }
620    }
621}
622
623#[cfg(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64))]
624#[repr(C)]
625#[derive(Clone, Copy)]
626struct Flag {
627    bool_field: bool,
628}
629
630/// # Invariants
631///
632/// `padding` must be all zeroes.
633#[cfg(not(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64)))]
634#[repr(C, align(4))]
635#[derive(Clone, Copy)]
636struct Flag {
637    #[cfg(target_endian = "big")]
638    padding: [u8; 3],
639    bool_field: bool,
640    #[cfg(target_endian = "little")]
641    padding: [u8; 3],
642}
643
644impl Flag {
645    #[inline(always)]
646    const fn new(b: bool) -> Self {
647        // INVARIANT: `padding` is all zeroes.
648        Self {
649            bool_field: b,
650            #[cfg(not(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64)))]
651            padding: [0; 3],
652        }
653    }
654}
655
656// SAFETY: `Flag` and `Repr` have the same size and alignment, and `Flag` is round-trip
657// transmutable to the selected representation (`i8` or `i32`).
658unsafe impl AtomicType for Flag {
659    #[cfg(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64))]
660    type Repr = i8;
661    #[cfg(not(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64)))]
662    type Repr = i32;
663}
664
665/// An atomic flag type intended to be backed by performance-optimal integer type.
666///
667/// The backing integer type is an implementation detail; it may vary by architecture and change
668/// in the future.
669///
670/// [`AtomicFlag`] is generally preferable to [`Atomic<bool>`] when you need read-modify-write
671/// (RMW) operations (e.g. [`Atomic::xchg()`]/[`Atomic::cmpxchg()`]) or when [`Atomic<bool>`] does
672/// not save memory due to padding. On some architectures that do not support byte-sized atomic
673/// RMW operations, RMW operations on [`Atomic<bool>`] are slower.
674///
675/// If you only use [`Atomic::load()`]/[`Atomic::store()`], [`Atomic<bool>`] is fine.
676///
677/// # Examples
678///
679/// ```
680/// use kernel::sync::atomic::{AtomicFlag, Relaxed};
681///
682/// let flag = AtomicFlag::new(false);
683/// assert_eq!(false, flag.load(Relaxed));
684/// flag.store(true, Relaxed);
685/// assert_eq!(true, flag.load(Relaxed));
686/// ```
687pub struct AtomicFlag(Atomic<Flag>);
688
689impl AtomicFlag {
690    /// Creates a new atomic flag.
691    #[inline(always)]
692    pub const fn new(b: bool) -> Self {
693        Self(Atomic::new(Flag::new(b)))
694    }
695
696    /// Returns a mutable reference to the underlying flag as a [`bool`].
697    ///
698    /// This is safe because the mutable reference of the atomic flag guarantees exclusive access.
699    ///
700    /// # Examples
701    ///
702    /// ```
703    /// use kernel::sync::atomic::{AtomicFlag, Relaxed};
704    ///
705    /// let mut atomic_flag = AtomicFlag::new(false);
706    /// assert_eq!(false, atomic_flag.load(Relaxed));
707    /// *atomic_flag.get_mut() = true;
708    /// assert_eq!(true, atomic_flag.load(Relaxed));
709    /// ```
710    #[inline(always)]
711    pub fn get_mut(&mut self) -> &mut bool {
712        &mut self.0.get_mut().bool_field
713    }
714
715    /// Loads the value from the atomic flag.
716    #[inline(always)]
717    pub fn load<Ordering: ordering::AcquireOrRelaxed>(&self, o: Ordering) -> bool {
718        self.0.load(o).bool_field
719    }
720
721    /// Stores a value to the atomic flag.
722    #[inline(always)]
723    pub fn store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: bool, o: Ordering) {
724        self.0.store(Flag::new(v), o);
725    }
726
727    /// Stores a value to the atomic flag and returns the previous value.
728    #[inline(always)]
729    pub fn xchg<Ordering: ordering::Ordering>(&self, new: bool, o: Ordering) -> bool {
730        self.0.xchg(Flag::new(new), o).bool_field
731    }
732
733    /// Store a value to the atomic flag if the current value is equal to `old`.
734    #[inline(always)]
735    pub fn cmpxchg<Ordering: ordering::Ordering>(
736        &self,
737        old: bool,
738        new: bool,
739        o: Ordering,
740    ) -> Result<bool, bool> {
741        match self.0.cmpxchg(Flag::new(old), Flag::new(new), o) {
742            Ok(_) => Ok(old),
743            Err(f) => Err(f.bool_field),
744        }
745    }
746}
747
748/// Atomic load over raw pointers.
749///
750/// This function provides a short-cut of `Atomic::from_ptr().load(..)`, and can be used to work
751/// with C side on synchronizations:
752///
753/// - `atomic_load(.., Relaxed)` maps to `READ_ONCE()` when used for inter-thread communication.
754/// - `atomic_load(.., Acquire)` maps to `smp_load_acquire()`.
755///
756/// # Safety
757///
758/// - `ptr` is a valid pointer to `T` and aligned to `align_of::<T>()`.
759/// - If there is a concurrent store from kernel (C or Rust), it has to be atomic.
760#[doc(alias("READ_ONCE", "smp_load_acquire"))]
761#[inline(always)]
762pub unsafe fn atomic_load<T: AtomicType, Ordering: ordering::AcquireOrRelaxed>(
763    ptr: *mut T,
764    o: Ordering,
765) -> T
766where
767    T::Repr: AtomicBasicOps,
768{
769    // SAFETY: Per the function safety requirement, `ptr` is valid and aligned to
770    // `align_of::<T>()`, and all concurrent stores from kernel are atomic, hence no data race per
771    // LKMM.
772    unsafe { Atomic::from_ptr(ptr) }.load(o)
773}
774
775/// Atomic store over raw pointers.
776///
777/// This function provides a short-cut of `Atomic::from_ptr().load(..)`, and can be used to work
778/// with C side on synchronizations:
779///
780/// - `atomic_store(.., Relaxed)` maps to `WRITE_ONCE()` when used for inter-thread communication.
781/// - `atomic_load(.., Release)` maps to `smp_store_release()`.
782///
783/// # Safety
784///
785/// - `ptr` is a valid pointer to `T` and aligned to `align_of::<T>()`.
786/// - If there is a concurrent access from kernel (C or Rust), it has to be atomic.
787#[doc(alias("WRITE_ONCE", "smp_store_release"))]
788#[inline(always)]
789pub unsafe fn atomic_store<T: AtomicType, Ordering: ordering::ReleaseOrRelaxed>(
790    ptr: *mut T,
791    v: T,
792    o: Ordering,
793) where
794    T::Repr: AtomicBasicOps,
795{
796    // SAFETY: Per the function safety requirement, `ptr` is valid and aligned to
797    // `align_of::<T>()`, and all concurrent accesses from kernel are atomic, hence no data race
798    // per LKMM.
799    unsafe { Atomic::from_ptr(ptr) }.store(v, o);
800}
801
802/// Atomic exchange over raw pointers.
803///
804/// This function provides a short-cut of `Atomic::from_ptr().xchg(..)`, and can be used to work
805/// with C side on synchronizations.
806///
807/// # Safety
808///
809/// - `ptr` is a valid pointer to `T` and aligned to `align_of::<T>()`.
810/// - If there is a concurrent access from kernel (C or Rust), it has to be atomic.
811#[inline(always)]
812pub unsafe fn xchg<T: AtomicType, Ordering: ordering::Ordering>(
813    ptr: *mut T,
814    new: T,
815    o: Ordering,
816) -> T
817where
818    T::Repr: AtomicExchangeOps,
819{
820    // SAFETY: Per the function safety requirement, `ptr` is valid and aligned to
821    // `align_of::<T>()`, and all concurrent accesses from kernel are atomic, hence no data race
822    // per LKMM.
823    unsafe { Atomic::from_ptr(ptr) }.xchg(new, o)
824}
825
826/// Atomic compare and exchange over raw pointers.
827///
828/// This function provides a short-cut of `Atomic::from_ptr().cmpxchg(..)`, and can be used to work
829/// with C side on synchronizations.
830///
831/// # Safety
832///
833/// - `ptr` is a valid pointer to `T` and aligned to `align_of::<T>()`.
834/// - If there is a concurrent access from kernel (C or Rust), it has to be atomic.
835#[doc(alias("try_cmpxchg"))]
836#[inline(always)]
837pub unsafe fn cmpxchg<T: AtomicType, Ordering: ordering::Ordering>(
838    ptr: *mut T,
839    old: T,
840    new: T,
841    o: Ordering,
842) -> Result<T, T>
843where
844    T::Repr: AtomicExchangeOps,
845{
846    // SAFETY: Per the function safety requirement, `ptr` is valid and aligned to
847    // `align_of::<T>()`, and all concurrent accesses from kernel are atomic, hence no data race
848    // per LKMM.
849    unsafe { Atomic::from_ptr(ptr) }.cmpxchg(old, new, o)
850}