aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2022-03-24 18:10:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-24 19:06:46 -0700
commitb42090ae6f3aa07b0a39403545d688489548a6a8 (patch)
treef1404502b91d3df67ade0a4a83a5a7074a0d5ded
parentb8491b9052fef036aac0ca3afc18ef223aef6f61 (diff)
downloadlinux-b42090ae6f3aa07b0a39403545d688489548a6a8.tar.gz
kasan, page_alloc: merge kasan_alloc_pages into post_alloc_hook
Currently, the code responsible for initializing and poisoning memory in post_alloc_hook() is scattered across two locations: kasan_alloc_pages() hook for HW_TAGS KASAN and post_alloc_hook() itself. This is confusing. This and a few following patches combine the code from these two locations. Along the way, these patches do a step-by-step restructure the many performed checks to make them easier to follow. Replace the only caller of kasan_alloc_pages() with its implementation. As kasan_has_integrated_init() is only true when CONFIG_KASAN_HW_TAGS is enabled, moving the code does no functional changes. Also move init and init_tags variables definitions out of kasan_has_integrated_init() clause in post_alloc_hook(), as they have the same values regardless of what the if condition evaluates to. This patch is not useful by itself but makes the simplifications in the following patches easier to follow. Link: https://lkml.kernel.org/r/5ac7e0b30f5cbb177ec363ddd7878a3141289592.1643047180.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Acked-by: Marco Elver <elver@google.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Collingbourne <pcc@google.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/kasan.h9
-rw-r--r--mm/kasan/common.c2
-rw-r--r--mm/kasan/hw_tags.c22
-rw-r--r--mm/page_alloc.c20
4 files changed, 16 insertions, 37 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index dd2161af84e9fa..4ed94616c8f077 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -84,17 +84,8 @@ static inline void kasan_disable_current(void) {}
#ifdef CONFIG_KASAN_HW_TAGS
-void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
-
#else /* CONFIG_KASAN_HW_TAGS */
-static __always_inline void kasan_alloc_pages(struct page *page,
- unsigned int order, gfp_t flags)
-{
- /* Only available for integrated init. */
- BUILD_BUG();
-}
-
#endif /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_has_integrated_init(void)
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index a0082fad48b12e..d9079ec11f3131 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -538,7 +538,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
return NULL;
/*
- * The object has already been unpoisoned by kasan_alloc_pages() for
+ * The object has already been unpoisoned by kasan_unpoison_pages() for
* alloc_pages() or by kasan_krealloc() for krealloc().
*/
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index c643740b859969..76cf2b6229c799 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -192,28 +192,6 @@ void __init kasan_init_hw_tags(void)
kasan_stack_collection_enabled() ? "on" : "off");
}
-void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
-{
- /*
- * This condition should match the one in post_alloc_hook() in
- * page_alloc.c.
- */
- bool init = !want_init_on_free() && want_init_on_alloc(flags);
- bool init_tags = init && (flags & __GFP_ZEROTAGS);
-
- if (flags & __GFP_SKIP_KASAN_POISON)
- SetPageSkipKASanPoison(page);
-
- if (init_tags) {
- int i;
-
- for (i = 0; i != 1 << order; ++i)
- tag_clear_highpage(page + i);
- } else {
- kasan_unpoison_pages(page, order, init);
- }
-}
-
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
void kasan_enable_tagging_sync(void)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d2d6e48d2d8365..db82facf14e75e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2346,6 +2346,9 @@ static inline bool check_new_pcp(struct page *page, unsigned int order)
inline void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags)
{
+ bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
+ bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
+
set_page_private(page, 0);
set_page_refcounted(page);
@@ -2361,15 +2364,22 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
/*
* As memory initialization might be integrated into KASAN,
- * kasan_alloc_pages and kernel_init_free_pages must be
+ * KASAN unpoisoning and memory initializion code must be
* kept together to avoid discrepancies in behavior.
*/
if (kasan_has_integrated_init()) {
- kasan_alloc_pages(page, order, gfp_flags);
- } else {
- bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
- bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
+ if (gfp_flags & __GFP_SKIP_KASAN_POISON)
+ SetPageSkipKASanPoison(page);
+
+ if (init_tags) {
+ int i;
+ for (i = 0; i != 1 << order; ++i)
+ tag_clear_highpage(page + i);
+ } else {
+ kasan_unpoison_pages(page, order, init);
+ }
+ } else {
kasan_unpoison_pages(page, order, init);
if (init_tags) {