aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2023-03-29 10:48:39 +0200
committerVlastimil Babka <vbabka@suse.cz>2023-03-29 10:48:39 +0200
commited4cdfbeb8735c36a2e31009866dfc2dfa26db3f (patch)
tree714a066c2656d0cadfba415a2cf7ebb5e9b84afe /include/linux/slab.h
parent8f0293bf7aeb9339f724e306e7a0a741f633c738 (diff)
parentae65a5211d90e54ae604012ce9cf234c48780929 (diff)
downloadlinux-ed4cdfbeb8735c36a2e31009866dfc2dfa26db3f.tar.gz
Merge branch 'slab/for-6.4/slob-removal' into slab/for-next
A series by myself to remove CONFIG_SLOB: The SLOB allocator was deprecated in 6.2 and there have been no complaints so far so let's proceed with the removal. Besides the code cleanup, the main immediate benefit will be allowing kfree() family of function to work on kmem_cache_alloc() objects, which was incompatible with SLOB. This includes kfree_rcu() which had no kmem_cache_free_rcu() counterpart yet and now it shouldn't be necessary anymore. Otherwise it's all straightforward removal. After this series, 'git grep slob' or 'git grep SLOB' will have 3 remaining relevant hits in non-mm code: - tomoyo - patch submitted and carried there, doesn't need to wait for this series - skbuff - patch to cleanup now-unnecessary #ifdefs will be posted to netdev after this is merged, as requested to avoid conflicts - ftrace ring_buffer - patch to remove obsolete comment is carried there The rest of 'git grep SLOB' hits are false positives, or intentional (CREDITS, and mm/Kconfig SLUB_TINY description to help those that will happen to migrate later).
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h39
1 files changed, 0 insertions, 39 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 87d687c43d8c4..7db48f9f0d9d8 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -298,19 +298,6 @@ static inline unsigned int arch_slab_minalign(void)
#endif
#endif
-#ifdef CONFIG_SLOB
-/*
- * SLOB passes all requests larger than one page to the page allocator.
- * No kmalloc array is necessary since objects of different sizes can
- * be allocated from the same page.
- */
-#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
-#ifndef KMALLOC_SHIFT_LOW
-#define KMALLOC_SHIFT_LOW 3
-#endif
-#endif
-
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
@@ -366,7 +353,6 @@ enum kmalloc_cache_type {
NR_KMALLOC_TYPES
};
-#ifndef CONFIG_SLOB
extern struct kmem_cache *
kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
@@ -458,7 +444,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
}
static_assert(PAGE_SHIFT <= 20);
#define kmalloc_index(s) __kmalloc_index(s, true)
-#endif /* !CONFIG_SLOB */
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
@@ -487,10 +472,6 @@ void kmem_cache_free(struct kmem_cache *s, void *objp);
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
-/*
- * Caller must not use kfree_bulk() on memory not originally allocated
- * by kmalloc(), because the SLOB allocator cannot handle this.
- */
static __always_inline void kfree_bulk(size_t size, void **p)
{
kmem_cache_free_bulk(NULL, size, p);
@@ -567,7 +548,6 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
* Try really hard to succeed the allocation but fail
* eventually.
*/
-#ifndef CONFIG_SLOB
static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size) && size) {
@@ -583,17 +563,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
}
return __kmalloc(size, flags);
}
-#else
-static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
-{
- if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large(size, flags);
-
- return __kmalloc(size, flags);
-}
-#endif
-#ifndef CONFIG_SLOB
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) && size) {
@@ -609,15 +579,6 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
}
return __kmalloc_node(size, flags, node);
}
-#else
-static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large_node(size, flags, node);
-
- return __kmalloc_node(size, flags, node);
-}
-#endif
/**
* kmalloc_array - allocate memory for an array.