kernel/alloc.rs
1// SPDX-License-Identifier: GPL-2.0
2
3//! Implementation of the kernel's memory allocation infrastructure.
4
5#[cfg(not(any(test, testlib)))]
6pub mod allocator;
7pub mod kbox;
8pub mod kvec;
9pub mod layout;
10
11#[cfg(any(test, testlib))]
12pub mod allocator_test;
13
14#[cfg(any(test, testlib))]
15pub use self::allocator_test as allocator;
16
17pub use self::kbox::Box;
18pub use self::kbox::KBox;
19pub use self::kbox::KVBox;
20pub use self::kbox::VBox;
21
22pub use self::kvec::IntoIter;
23pub use self::kvec::KVVec;
24pub use self::kvec::KVec;
25pub use self::kvec::VVec;
26pub use self::kvec::Vec;
27
28/// Indicates an allocation error.
29#[derive(Copy, Clone, PartialEq, Eq, Debug)]
30pub struct AllocError;
31
32use crate::error::{code::EINVAL, Result};
33use core::{alloc::Layout, ptr::NonNull};
34
35/// Flags to be used when allocating memory.
36///
37/// They can be combined with the operators `|`, `&`, and `!`.
38///
39/// Values can be used from the [`flags`] module.
40#[derive(Clone, Copy, PartialEq)]
41pub struct Flags(u32);
42
43impl Flags {
44 /// Get the raw representation of this flag.
45 pub(crate) fn as_raw(self) -> u32 {
46 self.0
47 }
48
49 /// Check whether `flags` is contained in `self`.
50 pub fn contains(self, flags: Flags) -> bool {
51 (self & flags) == flags
52 }
53}
54
55impl core::ops::BitOr for Flags {
56 type Output = Self;
57 fn bitor(self, rhs: Self) -> Self::Output {
58 Self(self.0 | rhs.0)
59 }
60}
61
62impl core::ops::BitAnd for Flags {
63 type Output = Self;
64 fn bitand(self, rhs: Self) -> Self::Output {
65 Self(self.0 & rhs.0)
66 }
67}
68
69impl core::ops::Not for Flags {
70 type Output = Self;
71 fn not(self) -> Self::Output {
72 Self(!self.0)
73 }
74}
75
76/// Allocation flags.
77///
78/// These are meant to be used in functions that can allocate memory.
79pub mod flags {
80 use super::Flags;
81
82 /// Zeroes out the allocated memory.
83 ///
84 /// This is normally or'd with other flags.
85 pub const __GFP_ZERO: Flags = Flags(bindings::__GFP_ZERO);
86
87 /// Allow the allocation to be in high memory.
88 ///
89 /// Allocations in high memory may not be mapped into the kernel's address space, so this can't
90 /// be used with `kmalloc` and other similar methods.
91 ///
92 /// This is normally or'd with other flags.
93 pub const __GFP_HIGHMEM: Flags = Flags(bindings::__GFP_HIGHMEM);
94
95 /// Users can not sleep and need the allocation to succeed.
96 ///
97 /// A lower watermark is applied to allow access to "atomic reserves". The current
98 /// implementation doesn't support NMI and few other strict non-preemptive contexts (e.g.
99 /// `raw_spin_lock`). The same applies to [`GFP_NOWAIT`].
100 pub const GFP_ATOMIC: Flags = Flags(bindings::GFP_ATOMIC);
101
102 /// Typical for kernel-internal allocations. The caller requires `ZONE_NORMAL` or a lower zone
103 /// for direct access but can direct reclaim.
104 pub const GFP_KERNEL: Flags = Flags(bindings::GFP_KERNEL);
105
106 /// The same as [`GFP_KERNEL`], except the allocation is accounted to kmemcg.
107 pub const GFP_KERNEL_ACCOUNT: Flags = Flags(bindings::GFP_KERNEL_ACCOUNT);
108
109 /// For kernel allocations that should not stall for direct reclaim, start physical IO or
110 /// use any filesystem callback. It is very likely to fail to allocate memory, even for very
111 /// small allocations.
112 pub const GFP_NOWAIT: Flags = Flags(bindings::GFP_NOWAIT);
113
114 /// Suppresses allocation failure reports.
115 ///
116 /// This is normally or'd with other flags.
117 pub const __GFP_NOWARN: Flags = Flags(bindings::__GFP_NOWARN);
118}
119
120/// Non Uniform Memory Access (NUMA) node identifier.
121#[derive(Clone, Copy, PartialEq)]
122pub struct NumaNode(i32);
123
124impl NumaNode {
125 /// Create a new NUMA node identifier (non-negative integer).
126 ///
127 /// Returns [`EINVAL`] if a negative id or an id exceeding [`bindings::MAX_NUMNODES`] is
128 /// specified.
129 pub fn new(node: i32) -> Result<Self> {
130 // MAX_NUMNODES never exceeds 2**10 because NODES_SHIFT is 0..10.
131 if node < 0 || node >= bindings::MAX_NUMNODES as i32 {
132 return Err(EINVAL);
133 }
134 Ok(Self(node))
135 }
136}
137
138/// Specify necessary constant to pass the information to Allocator that the caller doesn't care
139/// about the NUMA node to allocate memory from.
140impl NumaNode {
141 /// No node preference.
142 pub const NO_NODE: NumaNode = NumaNode(bindings::NUMA_NO_NODE);
143}
144
145/// The kernel's [`Allocator`] trait.
146///
147/// An implementation of [`Allocator`] can allocate, re-allocate and free memory buffers described
148/// via [`Layout`].
149///
150/// [`Allocator`] is designed to be implemented as a ZST; [`Allocator`] functions do not operate on
151/// an object instance.
152///
153/// In order to be able to support `#[derive(CoercePointee)]` later on, we need to avoid a design
154/// that requires an `Allocator` to be instantiated, hence its functions must not contain any kind
155/// of `self` parameter.
156///
157/// # Safety
158///
159/// - A memory allocation returned from an allocator must remain valid until it is explicitly freed.
160///
161/// - Any pointer to a valid memory allocation must be valid to be passed to any other [`Allocator`]
162/// function of the same type.
163///
164/// - Implementers must ensure that all trait functions abide by the guarantees documented in the
165/// `# Guarantees` sections.
166pub unsafe trait Allocator {
167 /// The minimum alignment satisfied by all allocations from this allocator.
168 ///
169 /// # Guarantees
170 ///
171 /// Any pointer allocated by this allocator is guaranteed to be aligned to `MIN_ALIGN` even if
172 /// the requested layout has a smaller alignment.
173 const MIN_ALIGN: usize;
174
175 /// Allocate memory based on `layout`, `flags` and `nid`.
176 ///
177 /// On success, returns a buffer represented as `NonNull<[u8]>` that satisfies the layout
178 /// constraints (i.e. minimum size and alignment as specified by `layout`).
179 ///
180 /// This function is equivalent to `realloc` when called with `None`.
181 ///
182 /// # Guarantees
183 ///
184 /// When the return value is `Ok(ptr)`, then `ptr` is
185 /// - valid for reads and writes for `layout.size()` bytes, until it is passed to
186 /// [`Allocator::free`] or [`Allocator::realloc`],
187 /// - aligned to `layout.align()`,
188 ///
189 /// Additionally, `Flags` are honored as documented in
190 /// <https://docs.kernel.org/core-api/mm-api.html#mm-api-gfp-flags>.
191 fn alloc(layout: Layout, flags: Flags, nid: NumaNode) -> Result<NonNull<[u8]>, AllocError> {
192 // SAFETY: Passing `None` to `realloc` is valid by its safety requirements and asks for a
193 // new memory allocation.
194 unsafe { Self::realloc(None, layout, Layout::new::<()>(), flags, nid) }
195 }
196
197 /// Re-allocate an existing memory allocation to satisfy the requested `layout` and
198 /// a specific NUMA node request to allocate the memory for.
199 ///
200 /// Systems employing a Non Uniform Memory Access (NUMA) architecture contain collections of
201 /// hardware resources including processors, memory, and I/O buses, that comprise what is
202 /// commonly known as a NUMA node.
203 ///
204 /// `nid` stands for NUMA id, i. e. NUMA node identifier, which is a non-negative integer
205 /// if a node needs to be specified, or [`NumaNode::NO_NODE`] if the caller doesn't care.
206 ///
207 /// If the requested size is zero, `realloc` behaves equivalent to `free`.
208 ///
209 /// If the requested size is larger than the size of the existing allocation, a successful call
210 /// to `realloc` guarantees that the new or grown buffer has at least `Layout::size` bytes, but
211 /// may also be larger.
212 ///
213 /// If the requested size is smaller than the size of the existing allocation, `realloc` may or
214 /// may not shrink the buffer; this is implementation specific to the allocator.
215 ///
216 /// On allocation failure, the existing buffer, if any, remains valid.
217 ///
218 /// The buffer is represented as `NonNull<[u8]>`.
219 ///
220 /// # Safety
221 ///
222 /// - If `ptr == Some(p)`, then `p` must point to an existing and valid memory allocation
223 /// created by this [`Allocator`]; if `old_layout` is zero-sized `p` does not need to be a
224 /// pointer returned by this [`Allocator`].
225 /// - `ptr` is allowed to be `None`; in this case a new memory allocation is created and
226 /// `old_layout` is ignored.
227 /// - `old_layout` must match the `Layout` the allocation has been created with.
228 ///
229 /// # Guarantees
230 ///
231 /// This function has the same guarantees as [`Allocator::alloc`]. When `ptr == Some(p)`, then
232 /// it additionally guarantees that:
233 /// - the contents of the memory pointed to by `p` are preserved up to the lesser of the new
234 /// and old size, i.e. `ret_ptr[0..min(layout.size(), old_layout.size())] ==
235 /// p[0..min(layout.size(), old_layout.size())]`.
236 /// - when the return value is `Err(AllocError)`, then `ptr` is still valid.
237 unsafe fn realloc(
238 ptr: Option<NonNull<u8>>,
239 layout: Layout,
240 old_layout: Layout,
241 flags: Flags,
242 nid: NumaNode,
243 ) -> Result<NonNull<[u8]>, AllocError>;
244
245 /// Free an existing memory allocation.
246 ///
247 /// # Safety
248 ///
249 /// - `ptr` must point to an existing and valid memory allocation created by this [`Allocator`];
250 /// if `old_layout` is zero-sized `p` does not need to be a pointer returned by this
251 /// [`Allocator`].
252 /// - `layout` must match the `Layout` the allocation has been created with.
253 /// - The memory allocation at `ptr` must never again be read from or written to.
254 unsafe fn free(ptr: NonNull<u8>, layout: Layout) {
255 // SAFETY: The caller guarantees that `ptr` points at a valid allocation created by this
256 // allocator. We are passing a `Layout` with the smallest possible alignment, so it is
257 // smaller than or equal to the alignment previously used with this allocation.
258 let _ = unsafe {
259 Self::realloc(
260 Some(ptr),
261 Layout::new::<()>(),
262 layout,
263 Flags(0),
264 NumaNode::NO_NODE,
265 )
266 };
267 }
268}
269
270/// Returns a properly aligned dangling pointer from the given `layout`.
271pub(crate) fn dangling_from_layout(layout: Layout) -> NonNull<u8> {
272 let ptr = layout.align() as *mut u8;
273
274 // SAFETY: `layout.align()` (and hence `ptr`) is guaranteed to be non-zero.
275 unsafe { NonNull::new_unchecked(ptr) }
276}