diff options
author | Andrew Morton <akpm@linux-foundation.org> | 2024-04-04 14:09:08 -0700 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-04-04 14:09:08 -0700 |
commit | 3cc903845731f4b389d53bfd4b54038cf1eec0cb (patch) | |
tree | e217e35901782465e57c40392352c829290f8e86 | |
parent | 46d7b2ff314dfa671abffe5c5e8faaab673e68ec (diff) | |
download | 25-new-3cc903845731f4b389d53bfd4b54038cf1eec0cb.tar.gz |
foo
40 files changed, 319 insertions, 320 deletions
diff --git a/patches/kasan-hw_tags-include-linux-vmalloch.patch b/patches/fix-missing-vmalloch-includes-fix-5.patch index 00907d79f..a055e0381 100644 --- a/patches/kasan-hw_tags-include-linux-vmalloch.patch +++ b/patches/fix-missing-vmalloch-includes-fix-5.patch @@ -28,13 +28,15 @@ Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> +Cc: Kent Overstreet <kent.overstreet@linux.dev> +Cc: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- mm/kasan/hw_tags.c | 1 + 1 file changed, 1 insertion(+) ---- a/mm/kasan/hw_tags.c~kasan-hw_tags-include-linux-vmalloch +--- a/mm/kasan/hw_tags.c~fix-missing-vmalloch-includes-fix-5 +++ a/mm/kasan/hw_tags.c @@ -16,6 +16,7 @@ #include <linux/static_key.h> diff --git a/patches/mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix.patch b/patches/mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix.patch new file mode 100644 index 000000000..80e50042b --- /dev/null +++ b/patches/mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix.patch @@ -0,0 +1,39 @@ +From: Andrew Morton <akpm@linux-foundation.org> +Subject: mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix +Date: Thu Apr 4 01:27:47 PM PDT 2024 + +simplify PageAnonExclusive(), per Matthew + +Link: https://lkml.kernel.org/r/Zg3u5Sh9EbbYPhaI@casper.infradead.org +Cc: David Hildenbrand <david@redhat.com> +Cc: Huacai Chen <chenhuacai@kernel.org> +Cc: Jason Gunthorpe <jgg@nvidia.com> +Cc: Matthew Wilcox (Oracle) <willy@infradead.org> +Cc: Nathan Chancellor <nathan@kernel.org> +Cc: Peter Xu <peterx@redhat.com> +Cc: Ryan Roberts <ryan.roberts@arm.com> +Cc: WANG Xuerui <kernel@xen0n.name> +Signed-off-by: Andrew Morton <akpm@linux-foundation.org> +--- + + include/linux/page-flags.h | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +--- a/include/linux/page-flags.h~mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix ++++ a/include/linux/page-flags.h +@@ -1096,11 +1096,10 @@ static __always_inline int PageAnonExclu + { + VM_BUG_ON_PGFLAGS(!PageAnon(page), page); + /* +- * Allow the anon-exclusive check to work on hugetlb tail pages. +- * Here hugetlb pages will always guarantee the anon-exclusiveness +- * of the head page represents the tail pages. ++ * HugeTLB stores this information on the head page; THP keeps it per ++ * page + */ +- if (PageHuge(page) && !PageHead(page)) ++ if (PageHuge(page)) + page = compound_head(page); + return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); + } +_ diff --git a/patches/mm-cma-drop-incorrect-alignment-check-in-cma_init_reserved_mem.patch b/patches/mm-cma-drop-incorrect-alignment-check-in-cma_init_reserved_mem.patch new file mode 100644 index 000000000..060f53c35 --- /dev/null +++ b/patches/mm-cma-drop-incorrect-alignment-check-in-cma_init_reserved_mem.patch @@ -0,0 +1,41 @@ +From: Frank van der Linden <fvdl@google.com> +Subject: mm/cma: drop incorrect alignment check in cma_init_reserved_mem +Date: Thu, 4 Apr 2024 16:25:14 +0000 + +cma_init_reserved_mem uses IS_ALIGNED to check if the size represented by +one bit in the cma allocation bitmask is aligned with +CMA_MIN_ALIGNMENT_BYTES (pageblock size). + +However, this is too strict, as this will fail if order_per_bit > +pageblock_order, which is a valid configuration. + +We could check IS_ALIGNED both ways, but since both numbers are powers of +two, no check is needed at all. + +Link: https://lkml.kernel.org/r/20240404162515.527802-1-fvdl@google.com +Fixes: de9e14eebf33 ("drivers: dma-contiguous: add initialization from device tree") +Signed-off-by: Frank van der Linden <fvdl@google.com> +Cc: Marek Szyprowski <m.szyprowski@samsung.com> +Cc: David Hildenbrand <david@redhat.com> +Cc: Muchun Song <muchun.song@linux.dev> +Cc: Roman Gushchin <roman.gushchin@linux.dev> +Signed-off-by: Andrew Morton <akpm@linux-foundation.org> +--- + + mm/cma.c | 4 ---- + 1 file changed, 4 deletions(-) + +--- a/mm/cma.c~mm-cma-drop-incorrect-alignment-check-in-cma_init_reserved_mem ++++ a/mm/cma.c +@@ -182,10 +182,6 @@ int __init cma_init_reserved_mem(phys_ad + if (!size || !memblock_is_region_reserved(base, size)) + return -EINVAL; + +- /* alignment should be aligned with order_per_bit */ +- if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit)) +- return -EINVAL; +- + /* ensure minimal alignment required by mm core */ + if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES)) + return -EINVAL; +_ diff --git a/patches/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.patch b/patches/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.patch new file mode 100644 index 000000000..11365b433 --- /dev/null +++ b/patches/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.patch @@ -0,0 +1,42 @@ +From: Frank van der Linden <fvdl@google.com> +Subject: mm/hugetlb: pass correct order_per_bit to cma_declare_contiguous_nid +Date: Thu, 4 Apr 2024 16:25:15 +0000 + +The hugetlb_cma code passes 0 in the order_per_bit argument to +cma_declare_contiguous_nid (the alignment, computed using the page order, +is correctly passed in). + +This causes a bit in the cma allocation bitmap to always represent a 4k +page, making the bitmaps potentially very large, and slower. + +So, correctly pass in the order instead. + +Link: https://lkml.kernel.org/r/20240404162515.527802-2-fvdl@google.com +Fixes: cf11e85fc08c ("mm: hugetlb: optionally allocate gigantic hugepages using cma") +Signed-off-by: Frank van der Linden <fvdl@google.com> +Cc: Roman Gushchin <roman.gushchin@linux.dev> +Cc: David Hildenbrand <david@redhat.com> +Cc: Marek Szyprowski <m.szyprowski@samsung.com> +Cc: Muchun Song <muchun.song@linux.dev> +Signed-off-by: Andrew Morton <akpm@linux-foundation.org> +--- + + mm/hugetlb.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/mm/hugetlb.c~mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid ++++ a/mm/hugetlb.c +@@ -7783,9 +7783,9 @@ void __init hugetlb_cma_reserve(int orde + * huge page demotion. + */ + res = cma_declare_contiguous_nid(0, size, 0, +- PAGE_SIZE << HUGETLB_PAGE_ORDER, +- 0, false, name, +- &hugetlb_cma[nid], nid); ++ PAGE_SIZE << HUGETLB_PAGE_ORDER, ++ HUGETLB_PAGE_ORDER, false, name, ++ &hugetlb_cma[nid], nid); + if (res) { + pr_warn("hugetlb_cma: reservation failed: err %d, node %d", + res, nid); +_ diff --git a/patches/mmpage_owner-fix-refcount-imbalance.patch b/patches/mmpage_owner-fix-refcount-imbalance.patch deleted file mode 100644 index 6a4d22bcf..000000000 --- a/patches/mmpage_owner-fix-refcount-imbalance.patch +++ /dev/null @@ -1,234 +0,0 @@ -From: Oscar Salvador <osalvador@suse.de> -Subject: mm,page_owner: fix refcount imbalance -Date: Tue, 26 Mar 2024 07:30:35 +0100 - -Current code does not contemplate scenarios were an allocation and free -operation on the same pages do not handle it in the same amount at once. - -To give an example, page_alloc_exact(), where we will allocate a page of -enough order to stafisfy the size request, but we will free the remainings -right away. - -In the above example, we will increment the stack_record refcount only -once, but we will decrease it the same number of times as number of unused -pages we have to free. This will lead to a warning because of refcount -imbalance. - -Fix this by recording the number of base pages in the refcount field. - -Link: https://lkml.kernel.org/r/20240326063036.6242-3-osalvador@suse.de -Fixes: 217b2119b9e2 ("mm,page_owner: implement the tracking of the stacks count") -Signed-off-by: Oscar Salvador <osalvador@suse.de> -Reported-by: syzbot+41bbfdb8d41003d12c0f@syzkaller.appspotmail.com -Closes: https://lore.kernel.org/linux-mm/00000000000090e8ff0613eda0e5@google.com -Reviewed-by: Vlastimil Babka <vbabka@suse.cz> -Tested-by: Alexandre Ghiti <alexghiti@rivosinc.com> -Cc: Alexander Potapenko <glider@google.com> -Cc: Andrey Konovalov <andreyknvl@gmail.com> -Cc: Marco Elver <elver@google.com> -Cc: Michal Hocko <mhocko@suse.com> -Signed-off-by: Andrew Morton <akpm@linux-foundation.org> ---- - - Documentation/mm/page_owner.rst | 73 +++++++++++++++--------------- - mm/page_owner.c | 34 ++++++++----- - 2 files changed, 58 insertions(+), 49 deletions(-) - ---- a/Documentation/mm/page_owner.rst~mmpage_owner-fix-refcount-imbalance -+++ a/Documentation/mm/page_owner.rst -@@ -24,10 +24,10 @@ fragmentation statistics can be obtained - each page. It is already implemented and activated if page owner is - enabled. Other usages are more than welcome. - --It can also be used to show all the stacks and their outstanding --allocations, which gives us a quick overview of where the memory is going --without the need to screen through all the pages and match the allocation --and free operation. -+It can also be used to show all the stacks and their current number of -+allocated base pages, which gives us a quick overview of where the memory -+is going without the need to screen through all the pages and match the -+allocation and free operation. - - page owner is disabled by default. So, if you'd like to use it, you need - to add "page_owner=on" to your boot cmdline. If the kernel is built -@@ -75,42 +75,45 @@ Usage - - cat /sys/kernel/debug/page_owner_stacks/show_stacks > stacks.txt - cat stacks.txt -- prep_new_page+0xa9/0x120 -- get_page_from_freelist+0x7e6/0x2140 -- __alloc_pages+0x18a/0x370 -- new_slab+0xc8/0x580 -- ___slab_alloc+0x1f2/0xaf0 -- __slab_alloc.isra.86+0x22/0x40 -- kmem_cache_alloc+0x31b/0x350 -- __khugepaged_enter+0x39/0x100 -- dup_mmap+0x1c7/0x5ce -- copy_process+0x1afe/0x1c90 -- kernel_clone+0x9a/0x3c0 -- __do_sys_clone+0x66/0x90 -- do_syscall_64+0x7f/0x160 -- entry_SYSCALL_64_after_hwframe+0x6c/0x74 -- stack_count: 234 -+ post_alloc_hook+0x177/0x1a0 -+ get_page_from_freelist+0xd01/0xd80 -+ __alloc_pages+0x39e/0x7e0 -+ allocate_slab+0xbc/0x3f0 -+ ___slab_alloc+0x528/0x8a0 -+ kmem_cache_alloc+0x224/0x3b0 -+ sk_prot_alloc+0x58/0x1a0 -+ sk_alloc+0x32/0x4f0 -+ inet_create+0x427/0xb50 -+ __sock_create+0x2e4/0x650 -+ inet_ctl_sock_create+0x30/0x180 -+ igmp_net_init+0xc1/0x130 -+ ops_init+0x167/0x410 -+ setup_net+0x304/0xa60 -+ copy_net_ns+0x29b/0x4a0 -+ create_new_namespaces+0x4a1/0x820 -+ nr_base_pages: 16 - ... - ... - echo 7000 > /sys/kernel/debug/page_owner_stacks/count_threshold - cat /sys/kernel/debug/page_owner_stacks/show_stacks> stacks_7000.txt - cat stacks_7000.txt -- prep_new_page+0xa9/0x120 -- get_page_from_freelist+0x7e6/0x2140 -- __alloc_pages+0x18a/0x370 -- alloc_pages_mpol+0xdf/0x1e0 -- folio_alloc+0x14/0x50 -- filemap_alloc_folio+0xb0/0x100 -- page_cache_ra_unbounded+0x97/0x180 -- filemap_fault+0x4b4/0x1200 -- __do_fault+0x2d/0x110 -- do_pte_missing+0x4b0/0xa30 -- __handle_mm_fault+0x7fa/0xb70 -- handle_mm_fault+0x125/0x300 -- do_user_addr_fault+0x3c9/0x840 -- exc_page_fault+0x68/0x150 -- asm_exc_page_fault+0x22/0x30 -- stack_count: 8248 -+ post_alloc_hook+0x177/0x1a0 -+ get_page_from_freelist+0xd01/0xd80 -+ __alloc_pages+0x39e/0x7e0 -+ alloc_pages_mpol+0x22e/0x490 -+ folio_alloc+0xd5/0x110 -+ filemap_alloc_folio+0x78/0x230 -+ page_cache_ra_order+0x287/0x6f0 -+ filemap_get_pages+0x517/0x1160 -+ filemap_read+0x304/0x9f0 -+ xfs_file_buffered_read+0xe6/0x1d0 [xfs] -+ xfs_file_read_iter+0x1f0/0x380 [xfs] -+ __kernel_read+0x3b9/0x730 -+ kernel_read_file+0x309/0x4d0 -+ __do_sys_finit_module+0x381/0x730 -+ do_syscall_64+0x8d/0x150 -+ entry_SYSCALL_64_after_hwframe+0x62/0x6a -+ nr_base_pages: 20824 - ... - - cat /sys/kernel/debug/page_owner > page_owner_full.txt ---- a/mm/page_owner.c~mmpage_owner-fix-refcount-imbalance -+++ a/mm/page_owner.c -@@ -196,7 +196,8 @@ static void add_stack_record_to_list(str - spin_unlock_irqrestore(&stack_list_lock, flags); - } - --static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask) -+static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask, -+ int nr_base_pages) - { - struct stack_record *stack_record = __stack_depot_get_stack_record(handle); - -@@ -217,15 +218,20 @@ static void inc_stack_record_count(depot - /* Add the new stack_record to our list */ - add_stack_record_to_list(stack_record, gfp_mask); - } -- refcount_inc(&stack_record->count); -+ refcount_add(nr_base_pages, &stack_record->count); - } - --static void dec_stack_record_count(depot_stack_handle_t handle) -+static void dec_stack_record_count(depot_stack_handle_t handle, -+ int nr_base_pages) - { - struct stack_record *stack_record = __stack_depot_get_stack_record(handle); - -- if (stack_record) -- refcount_dec(&stack_record->count); -+ if (!stack_record) -+ return; -+ -+ if (refcount_sub_and_test(nr_base_pages, &stack_record->count)) -+ pr_warn("%s: refcount went to 0 for %u handle\n", __func__, -+ handle); - } - - static inline void __update_page_owner_handle(struct page_ext *page_ext, -@@ -306,7 +312,7 @@ void __reset_page_owner(struct page *pag - * the machinery is not ready yet, we cannot decrement - * their refcount either. - */ -- dec_stack_record_count(alloc_handle); -+ dec_stack_record_count(alloc_handle, 1 << order); - } - - noinline void __set_page_owner(struct page *page, unsigned short order, -@@ -325,7 +331,7 @@ noinline void __set_page_owner(struct pa - current->pid, current->tgid, ts_nsec, - current->comm); - page_ext_put(page_ext); -- inc_stack_record_count(handle, gfp_mask); -+ inc_stack_record_count(handle, gfp_mask, 1 << order); - } - - void __set_page_owner_migrate_reason(struct page *page, int reason) -@@ -872,11 +878,11 @@ static void *stack_next(struct seq_file - return stack; - } - --static unsigned long page_owner_stack_threshold; -+static unsigned long page_owner_pages_threshold; - - static int stack_print(struct seq_file *m, void *v) - { -- int i, stack_count; -+ int i, nr_base_pages; - struct stack *stack = v; - unsigned long *entries; - unsigned long nr_entries; -@@ -887,14 +893,14 @@ static int stack_print(struct seq_file * - - nr_entries = stack_record->size; - entries = stack_record->entries; -- stack_count = refcount_read(&stack_record->count) - 1; -+ nr_base_pages = refcount_read(&stack_record->count) - 1; - -- if (stack_count < 1 || stack_count < page_owner_stack_threshold) -+ if (nr_base_pages < 1 || nr_base_pages < page_owner_pages_threshold) - return 0; - - for (i = 0; i < nr_entries; i++) - seq_printf(m, " %pS\n", (void *)entries[i]); -- seq_printf(m, "stack_count: %d\n\n", stack_count); -+ seq_printf(m, "nr_base_pages: %d\n\n", nr_base_pages); - - return 0; - } -@@ -924,13 +930,13 @@ static const struct file_operations page - - static int page_owner_threshold_get(void *data, u64 *val) - { -- *val = READ_ONCE(page_owner_stack_threshold); -+ *val = READ_ONCE(page_owner_pages_threshold); - return 0; - } - - static int page_owner_threshold_set(void *data, u64 val) - { -- WRITE_ONCE(page_owner_stack_threshold, val); -+ WRITE_ONCE(page_owner_pages_threshold, val); - return 0; - } - -_ diff --git a/patches/mmpage_owner-fix-accounting-of-pages-when-migrating.patch b/patches/old/mmpage_owner-fix-accounting-of-pages-when-migrating.patch index dfa3ded70..dfa3ded70 100644 --- a/patches/mmpage_owner-fix-accounting-of-pages-when-migrating.patch +++ b/patches/old/mmpage_owner-fix-accounting-of-pages-when-migrating.patch diff --git a/patches/old/mmpage_owner-fix-refcount-imbalance.patch b/patches/old/mmpage_owner-fix-refcount-imbalance.patch index 2acc07651..6a4d22bcf 100644 --- a/patches/old/mmpage_owner-fix-refcount-imbalance.patch +++ b/patches/old/mmpage_owner-fix-refcount-imbalance.patch @@ -1,6 +1,6 @@ From: Oscar Salvador <osalvador@suse.de> Subject: mm,page_owner: fix refcount imbalance -Date: Tue, 19 Mar 2024 19:32:11 +0100 +Date: Tue, 26 Mar 2024 07:30:35 +0100 Current code does not contemplate scenarios were an allocation and free operation on the same pages do not handle it in the same amount at once. @@ -16,24 +16,23 @@ imbalance. Fix this by recording the number of base pages in the refcount field. -Link: https://lkml.kernel.org/r/20240319183212.17156-2-osalvador@suse.de +Link: https://lkml.kernel.org/r/20240326063036.6242-3-osalvador@suse.de Fixes: 217b2119b9e2 ("mm,page_owner: implement the tracking of the stacks count") Signed-off-by: Oscar Salvador <osalvador@suse.de> Reported-by: syzbot+41bbfdb8d41003d12c0f@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-mm/00000000000090e8ff0613eda0e5@google.com +Reviewed-by: Vlastimil Babka <vbabka@suse.cz> +Tested-by: Alexandre Ghiti <alexghiti@rivosinc.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Marco Elver <elver@google.com> Cc: Michal Hocko <mhocko@suse.com> -Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> -Cc: Vlastimil Babka <vbabka@suse.cz> -Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- Documentation/mm/page_owner.rst | 73 +++++++++++++++--------------- - mm/page_owner.c | 38 +++++++-------- - 2 files changed, 56 insertions(+), 55 deletions(-) + mm/page_owner.c | 34 ++++++++----- + 2 files changed, 58 insertions(+), 49 deletions(-) --- a/Documentation/mm/page_owner.rst~mmpage_owner-fix-refcount-imbalance +++ a/Documentation/mm/page_owner.rst @@ -131,7 +130,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org> cat /sys/kernel/debug/page_owner > page_owner_full.txt --- a/mm/page_owner.c~mmpage_owner-fix-refcount-imbalance +++ a/mm/page_owner.c -@@ -196,9 +196,11 @@ static void add_stack_record_to_list(str +@@ -196,7 +196,8 @@ static void add_stack_record_to_list(str spin_unlock_irqrestore(&stack_list_lock, flags); } @@ -140,24 +139,12 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org> + int nr_base_pages) { struct stack_record *stack_record = __stack_depot_get_stack_record(handle); -+ int old = REFCOUNT_SATURATED; - - if (!stack_record) - return; -@@ -210,22 +212,18 @@ static void inc_stack_record_count(depot - * Since we do not use STACK_DEPOT_FLAG_GET API, let us - * set a refcount of 1 ourselves. - */ -- if (refcount_read(&stack_record->count) == REFCOUNT_SATURATED) { -- int old = REFCOUNT_SATURATED; -- -- if (atomic_try_cmpxchg_relaxed(&stack_record->count.refs, &old, 1)) -- /* Add the new stack_record to our list */ -- add_stack_record_to_list(stack_record, gfp_mask); -- } + +@@ -217,15 +218,20 @@ static void inc_stack_record_count(depot + /* Add the new stack_record to our list */ + add_stack_record_to_list(stack_record, gfp_mask); + } - refcount_inc(&stack_record->count); -+ if (atomic_try_cmpxchg_relaxed(&stack_record->count.refs, &old, 1)) -+ add_stack_record_to_list(stack_record, gfp_mask); + refcount_add(nr_base_pages, &stack_record->count); } @@ -167,13 +154,18 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org> { struct stack_record *stack_record = __stack_depot_get_stack_record(handle); - if (stack_record) +- if (stack_record) - refcount_dec(&stack_record->count); -+ refcount_sub_and_test(nr_base_pages, &stack_record->count); ++ if (!stack_record) ++ return; ++ ++ if (refcount_sub_and_test(nr_base_pages, &stack_record->count)) ++ pr_warn("%s: refcount went to 0 for %u handle\n", __func__, ++ handle); } - void __reset_page_owner(struct page *page, unsigned short order) -@@ -263,7 +261,7 @@ void __reset_page_owner(struct page *pag + static inline void __update_page_owner_handle(struct page_ext *page_ext, +@@ -306,7 +312,7 @@ void __reset_page_owner(struct page *pag * the machinery is not ready yet, we cannot decrement * their refcount either. */ @@ -181,17 +173,17 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org> + dec_stack_record_count(alloc_handle, 1 << order); } - static inline void __set_page_owner_handle(struct page_ext *page_ext, -@@ -305,7 +303,7 @@ noinline void __set_page_owner(struct pa - return; - __set_page_owner_handle(page_ext, handle, order, gfp_mask); + noinline void __set_page_owner(struct page *page, unsigned short order, +@@ -325,7 +331,7 @@ noinline void __set_page_owner(struct pa + current->pid, current->tgid, ts_nsec, + current->comm); page_ext_put(page_ext); - inc_stack_record_count(handle, gfp_mask); + inc_stack_record_count(handle, gfp_mask, 1 << order); } void __set_page_owner_migrate_reason(struct page *page, int reason) -@@ -861,11 +859,11 @@ static void *stack_next(struct seq_file +@@ -872,11 +878,11 @@ static void *stack_next(struct seq_file return stack; } @@ -205,7 +197,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org> struct stack *stack = v; unsigned long *entries; unsigned long nr_entries; -@@ -876,14 +874,14 @@ static int stack_print(struct seq_file * +@@ -887,14 +893,14 @@ static int stack_print(struct seq_file * nr_entries = stack_record->size; entries = stack_record->entries; @@ -223,7 +215,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org> return 0; } -@@ -913,13 +911,13 @@ static const struct file_operations page +@@ -924,13 +930,13 @@ static const struct file_operations page static int page_owner_threshold_get(void *data, u64 *val) { diff --git a/patches/mmpage_owner-update-metada-for-tail-pages.patch b/patches/old/mmpage_owner-update-metada-for-tail-pages.patch index 1c072b9fe..1c072b9fe 100644 --- a/patches/mmpage_owner-update-metada-for-tail-pages.patch +++ b/patches/old/mmpage_owner-update-metada-for-tail-pages.patch diff --git a/patches/userfaultfd-change-src_folio-after-ensuring-its-unpinned-in-uffdio_move.patch b/patches/userfaultfd-change-src_folio-after-ensuring-its-unpinned-in-uffdio_move.patch new file mode 100644 index 000000000..416711193 --- /dev/null +++ b/patches/userfaultfd-change-src_folio-after-ensuring-its-unpinned-in-uffdio_move.patch @@ -0,0 +1,52 @@ +From: Lokesh Gidra <lokeshgidra@google.com> +Subject: userfaultfd: change src_folio after ensuring it's unpinned in UFFDIO_MOVE +Date: Thu, 4 Apr 2024 10:17:26 -0700 + +Commit d7a08838ab74 ("mm: userfaultfd: fix unexpected change to src_folio +when UFFDIO_MOVE fails") moved the src_folio->{mapping, index} changing to +after clearing the page-table and ensuring that it's not pinned. This +avoids failure of swapout+migration and possibly memory corruption. + +However, the commit missed fixing it in the huge-page case. + +Link: https://lkml.kernel.org/r/20240404171726.2302435-1-lokeshgidra@google.com +Fixes: adef440691ba ("userfaultfd: UFFDIO_MOVE uABI") +Signed-off-by: Lokesh Gidra <lokeshgidra@google.com> +Cc: Andrea Arcangeli <aarcange@redhat.com> +Cc: David Hildenbrand <david@redhat.com> +Cc: Kalesh Singh <kaleshsingh@google.com> +Cc: Lokesh Gidra <lokeshgidra@google.com> +Cc: Nicolas Geoffray <ngeoffray@google.com> +Cc: Peter Xu <peterx@redhat.com> +Cc: Qi Zheng <zhengqi.arch@bytedance.com> +Cc: Matthew Wilcox <willy@infradead.org> +Cc: <stable@vger.kernel.org> +Signed-off-by: Andrew Morton <akpm@linux-foundation.org> +--- + + mm/huge_memory.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/mm/huge_memory.c~userfaultfd-change-src_folio-after-ensuring-its-unpinned-in-uffdio_move ++++ a/mm/huge_memory.c +@@ -2259,9 +2259,6 @@ int move_pages_huge_pmd(struct mm_struct + goto unlock_ptls; + } + +- folio_move_anon_rmap(src_folio, dst_vma); +- WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr)); +- + src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); + /* Folio got pinned from under us. Put it back and fail the move. */ + if (folio_maybe_dma_pinned(src_folio)) { +@@ -2270,6 +2267,9 @@ int move_pages_huge_pmd(struct mm_struct + goto unlock_ptls; + } + ++ folio_move_anon_rmap(src_folio, dst_vma); ++ WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr)); ++ + _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot); + /* Follow mremap() behavior and treat the entry dirty after the move */ + _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma); +_ diff --git a/pc/devel-series b/pc/devel-series index 13eff6166..8c811a172 100644 --- a/pc/devel-series +++ b/pc/devel-series @@ -69,6 +69,9 @@ x86-mm-pat-fix-vm_pat-handling-in-cow-mappings.patch # stackdepot-rename-pool_index-to-pool_index_plus_1.patch # +#userfaultfd-change-src_folio-after-ensuring-its-unpinned-in-uffdio_move.patch: acks? +userfaultfd-change-src_folio-after-ensuring-its-unpinned-in-uffdio_move.patch +# ### hfe # #ENDBRANCH mm-hotfixes-unstable @@ -129,6 +132,7 @@ fix-missing-vmalloch-includes-fix.patch fix-missing-vmalloch-includes-fix-2.patch fix-missing-vmalloch-includes-fix-3.patch fix-missing-vmalloch-includes-fix-4.patch +fix-missing-vmalloch-includes-fix-5.patch #asm-generic-ioh-kill-vmalloch-dependency.patch: https://lkml.kernel.org/r/202403290536.7f9zGl5Q-lkp@intel.com https://lkml.kernel.org/r/202404031246.aq5Yr5KO-lkp@intel.com asm-generic-ioh-kill-vmalloch-dependency.patch mm-slub-mark-slab_free_freelist_hook-__always_inline.patch @@ -160,7 +164,7 @@ rust-add-a-rust-helper-for-krealloc.patch mm-slab-enable-slab-allocation-tagging-for-kmalloc-and-friends.patch mm-slab-enable-slab-allocation-tagging-for-kmalloc-and-friends-fix.patch mm-slab-enable-slab-allocation-tagging-for-kmalloc-and-friends-fix-2.patch -#mempool-hook-up-to-memory-allocation-profiling.patch: https://lkml.kernel.org/r/202404010132.6v0zt6oa-lkp@intel.com +#mempool-hook-up-to-memory-allocation-profiling.patch: https://lkml.kernel.org/r/202404041707.4Bl4ifTI-lkp@intel.com mempool-hook-up-to-memory-allocation-profiling.patch mempool-hook-up-to-memory-allocation-profiling-fix.patch mempool-hook-up-to-memory-allocation-profiling-fix-2.patch @@ -306,10 +310,6 @@ mm-slab-move-memcg-charging-to-post-alloc-hook-fix.patch mm-slab-move-memcg-charging-to-post-alloc-hook-fix-2.patch mm-slab-move-slab_memcg-hooks-to-mm-memcontrolc.patch # -#mmpage_owner-update-metada-for-tail-pages.patch+N: https://lkml.kernel.org/r/59860bb4-0dff-4575-b4cb-b88e6e1ccb77@huawei.com -mmpage_owner-update-metada-for-tail-pages.patch -mmpage_owner-fix-refcount-imbalance.patch -mmpage_owner-fix-accounting-of-pages-when-migrating.patch # #mm-move-array-mem_section-init-code-out-of-memory_present.patch: https://lkml.kernel.org/r/Zgu_jjcLtEF-TlUj@kernel.org mm-move-array-mem_section-init-code-out-of-memory_present.patch @@ -352,8 +352,8 @@ mm-gup-handle-huge-pmd-for-follow_pmd_mask-fix.patch mm-gup-handle-hugepd-for-follow_page.patch mm-gup-handle-hugetlb-in-the-generic-follow_page_mask-code.patch # -#mm-allow-anon-exclusive-check-over-hugetlb-tail-pages.patch: https://lkml.kernel.org/r/Zg3u5Sh9EbbYPhaI@casper.infradead.org mm-allow-anon-exclusive-check-over-hugetlb-tail-pages.patch +mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix.patch # mm-use-rwsem-assertion-macros-for-mmap_lock.patch # @@ -465,12 +465,14 @@ hugetlb-convert-hugetlb_fault-to-use-struct-vm_fault.patch hugetlb-convert-hugetlb_no_page-to-use-struct-vm_fault.patch hugetlb-convert-hugetlb_wp-to-use-struct-vm_fault.patch # -#kasan-hw_tags-include-linux-vmalloch.patch: fixes what? -kasan-hw_tags-include-linux-vmalloch.patch -# selftests-break-the-dependency-upon-local-header-files.patch selftests-mm-fix-additional-build-errors-for-selftests.patch # +#mm-cma-drop-incorrect-alignment-check-in-cma_init_reserved_mem.patch+1: effects? -stable? +mm-cma-drop-incorrect-alignment-check-in-cma_init_reserved_mem.patch +#mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.patch: https://lkml.kernel.org/r/e74cfee3-565f-4c69-bb7b-bdd40d01d212@redhat.com +mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.patch +# # # # diff --git a/pc/kasan-hw_tags-include-linux-vmalloch.pc b/pc/fix-missing-vmalloch-includes-fix-5.pc index 8cc173fa1..8cc173fa1 100644 --- a/pc/kasan-hw_tags-include-linux-vmalloch.pc +++ b/pc/fix-missing-vmalloch-includes-fix-5.pc diff --git a/pc/mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix.pc b/pc/mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix.pc new file mode 100644 index 000000000..4c8d1ed04 --- /dev/null +++ b/pc/mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix.pc @@ -0,0 +1,2 @@ +include/linux/page-flags.h +mm/internal.h diff --git a/pc/mm-cma-drop-incorrect-alignment-check-in-cma_init_reserved_mem.pc b/pc/mm-cma-drop-incorrect-alignment-check-in-cma_init_reserved_mem.pc new file mode 100644 index 000000000..f84b4d840 --- /dev/null +++ b/pc/mm-cma-drop-incorrect-alignment-check-in-cma_init_reserved_mem.pc @@ -0,0 +1 @@ +mm/cma.c diff --git a/pc/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.pc b/pc/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.pc new file mode 100644 index 000000000..6dc98425d --- /dev/null +++ b/pc/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.pc @@ -0,0 +1 @@ +mm/hugetlb.c diff --git a/pc/mmpage_owner-fix-accounting-of-pages-when-migrating.pc b/pc/mmpage_owner-fix-accounting-of-pages-when-migrating.pc deleted file mode 100644 index 89fe6a5de..000000000 --- a/pc/mmpage_owner-fix-accounting-of-pages-when-migrating.pc +++ /dev/null @@ -1 +0,0 @@ -mm/page_owner.c diff --git a/pc/mmpage_owner-fix-refcount-imbalance.pc b/pc/mmpage_owner-fix-refcount-imbalance.pc deleted file mode 100644 index 5a9efe536..000000000 --- a/pc/mmpage_owner-fix-refcount-imbalance.pc +++ /dev/null @@ -1,2 +0,0 @@ -Documentation/mm/page_owner.rst -mm/page_owner.c diff --git a/pc/mmpage_owner-update-metada-for-tail-pages.pc b/pc/mmpage_owner-update-metada-for-tail-pages.pc deleted file mode 100644 index 89fe6a5de..000000000 --- a/pc/mmpage_owner-update-metada-for-tail-pages.pc +++ /dev/null @@ -1 +0,0 @@ -mm/page_owner.c diff --git a/pc/userfaultfd-change-src_folio-after-ensuring-its-unpinned-in-uffdio_move.pc b/pc/userfaultfd-change-src_folio-after-ensuring-its-unpinned-in-uffdio_move.pc new file mode 100644 index 000000000..b35bccbe3 --- /dev/null +++ b/pc/userfaultfd-change-src_folio-after-ensuring-its-unpinned-in-uffdio_move.pc @@ -0,0 +1 @@ +mm/huge_memory.c diff --git a/txt/dax-use-huge_zero_folio.txt b/txt/dax-use-huge_zero_folio.txt index fa295b039..c67971957 100644 --- a/txt/dax-use-huge_zero_folio.txt +++ b/txt/dax-use-huge_zero_folio.txt @@ -6,3 +6,4 @@ Convert from huge_zero_page to huge_zero_folio. Link: https://lkml.kernel.org/r/20240326202833.523759-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> +Reviewed-by: David Hildenbrand <david@redhat.com> diff --git a/txt/filemap-remove-__set_page_dirty.txt b/txt/filemap-remove-__set_page_dirty.txt index 36d686bde..ec2593a81 100644 --- a/txt/filemap-remove-__set_page_dirty.txt +++ b/txt/filemap-remove-__set_page_dirty.txt @@ -6,3 +6,4 @@ All callers have been converted to use folios; remove this wrapper. Link: https://lkml.kernel.org/r/20240327185447.1076689-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> +Reviewed-by: David Hildenbrand <david@redhat.com> diff --git a/txt/kasan-hw_tags-include-linux-vmalloch.txt b/txt/fix-missing-vmalloch-includes-fix-5.txt index 07b97ab84..18f912d66 100644 --- a/txt/kasan-hw_tags-include-linux-vmalloch.txt +++ b/txt/fix-missing-vmalloch-includes-fix-5.txt @@ -28,3 +28,5 @@ Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> +Cc: Kent Overstreet <kent.overstreet@linux.dev> +Cc: Suren Baghdasaryan <surenb@google.com> diff --git a/txt/mm-add-is_huge_zero_folio.txt b/txt/mm-add-is_huge_zero_folio.txt index 023f4ff14..29b3ad625 100644 --- a/txt/mm-add-is_huge_zero_folio.txt +++ b/txt/mm-add-is_huge_zero_folio.txt @@ -8,3 +8,4 @@ getting confused when the predicate returns false. Link: https://lkml.kernel.org/r/20240326202833.523759-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> +Reviewed-by: David Hildenbrand <david@redhat.com> diff --git a/txt/mm-add-pmd_folio.txt b/txt/mm-add-pmd_folio.txt index 0536d47d8..066b709ec 100644 --- a/txt/mm-add-pmd_folio.txt +++ b/txt/mm-add-pmd_folio.txt @@ -8,3 +8,4 @@ write it, but it might end up being more efficient later. Link: https://lkml.kernel.org/r/20240326202833.523759-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> +Reviewed-by: David Hildenbrand <david@redhat.com> diff --git a/txt/mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix.txt b/txt/mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix.txt new file mode 100644 index 000000000..cd1501134 --- /dev/null +++ b/txt/mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix.txt @@ -0,0 +1,15 @@ +From: Andrew Morton <akpm@linux-foundation.org> +Subject: mm-allow-anon-exclusive-check-over-hugetlb-tail-pages-fix +Date: Thu Apr 4 01:27:47 PM PDT 2024 + +simplify PageAnonExclusive(), per Matthew + +Link: https://lkml.kernel.org/r/Zg3u5Sh9EbbYPhaI@casper.infradead.org +Cc: David Hildenbrand <david@redhat.com> +Cc: Huacai Chen <chenhuacai@kernel.org> +Cc: Jason Gunthorpe <jgg@nvidia.com> +Cc: Matthew Wilcox (Oracle) <willy@infradead.org> +Cc: Nathan Chancellor <nathan@kernel.org> +Cc: Peter Xu <peterx@redhat.com> +Cc: Ryan Roberts <ryan.roberts@arm.com> +Cc: WANG Xuerui <kernel@xen0n.name> diff --git a/txt/mm-cma-drop-incorrect-alignment-check-in-cma_init_reserved_mem.txt b/txt/mm-cma-drop-incorrect-alignment-check-in-cma_init_reserved_mem.txt new file mode 100644 index 000000000..66dfc2d73 --- /dev/null +++ b/txt/mm-cma-drop-incorrect-alignment-check-in-cma_init_reserved_mem.txt @@ -0,0 +1,21 @@ +From: Frank van der Linden <fvdl@google.com> +Subject: mm/cma: drop incorrect alignment check in cma_init_reserved_mem +Date: Thu, 4 Apr 2024 16:25:14 +0000 + +cma_init_reserved_mem uses IS_ALIGNED to check if the size represented by +one bit in the cma allocation bitmask is aligned with +CMA_MIN_ALIGNMENT_BYTES (pageblock size). + +However, this is too strict, as this will fail if order_per_bit > +pageblock_order, which is a valid configuration. + +We could check IS_ALIGNED both ways, but since both numbers are powers of +two, no check is needed at all. + +Link: https://lkml.kernel.org/r/20240404162515.527802-1-fvdl@google.com +Fixes: de9e14eebf33 ("drivers: dma-contiguous: add initialization from device tree") +Signed-off-by: Frank van der Linden <fvdl@google.com> +Acked-by: David Hildenbrand <david@redhat.com> +Cc: Marek Szyprowski <m.szyprowski@samsung.com> +Cc: Muchun Song <muchun.song@linux.dev> +Cc: Roman Gushchin <roman.gushchin@linux.dev> diff --git a/txt/mm-convert-do_huge_pmd_anonymous_page-to-huge_zero_folio.txt b/txt/mm-convert-do_huge_pmd_anonymous_page-to-huge_zero_folio.txt index 39cf20741..aa40602a1 100644 --- a/txt/mm-convert-do_huge_pmd_anonymous_page-to-huge_zero_folio.txt +++ b/txt/mm-convert-do_huge_pmd_anonymous_page-to-huge_zero_folio.txt @@ -6,3 +6,4 @@ Use folios more widely. Link: https://lkml.kernel.org/r/20240326202833.523759-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> +Reviewed-by: David Hildenbrand <david@redhat.com> diff --git a/txt/mm-convert-huge_zero_page-to-huge_zero_folio.txt b/txt/mm-convert-huge_zero_page-to-huge_zero_folio.txt index db58f14b9..102d23e71 100644 --- a/txt/mm-convert-huge_zero_page-to-huge_zero_folio.txt +++ b/txt/mm-convert-huge_zero_page-to-huge_zero_folio.txt @@ -7,3 +7,4 @@ huge_zero_page itself from being a compound page to a folio. Link: https://lkml.kernel.org/r/20240326202833.523759-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> +Reviewed-by: David Hildenbrand <david@redhat.com> diff --git a/txt/mm-convert-migrate_vma_collect_pmd-to-use-a-folio.txt b/txt/mm-convert-migrate_vma_collect_pmd-to-use-a-folio.txt index 442bc4da7..8b4d89eea 100644 --- a/txt/mm-convert-migrate_vma_collect_pmd-to-use-a-folio.txt +++ b/txt/mm-convert-migrate_vma_collect_pmd-to-use-a-folio.txt @@ -7,3 +7,4 @@ compound_head() into one. Link: https://lkml.kernel.org/r/20240326202833.523759-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> +Reviewed-by: David Hildenbrand <david@redhat.com> diff --git a/txt/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.txt b/txt/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.txt new file mode 100644 index 000000000..3491f5271 --- /dev/null +++ b/txt/mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.txt @@ -0,0 +1,20 @@ +From: Frank van der Linden <fvdl@google.com> +Subject: mm/hugetlb: pass correct order_per_bit to cma_declare_contiguous_nid +Date: Thu, 4 Apr 2024 16:25:15 +0000 + +The hugetlb_cma code passes 0 in the order_per_bit argument to +cma_declare_contiguous_nid (the alignment, computed using the page order, +is correctly passed in). + +This causes a bit in the cma allocation bitmap to always represent a 4k +page, making the bitmaps potentially very large, and slower. + +So, correctly pass in the order instead. + +Link: https://lkml.kernel.org/r/20240404162515.527802-2-fvdl@google.com +Fixes: cf11e85fc08c ("mm: hugetlb: optionally allocate gigantic hugepages using cma") +Signed-off-by: Frank van der Linden <fvdl@google.com> +Cc: Roman Gushchin <roman.gushchin@linux.dev> +Cc: David Hildenbrand <david@redhat.com> +Cc: Marek Szyprowski <m.szyprowski@samsung.com> +Cc: Muchun Song <muchun.song@linux.dev> diff --git a/txt/mm-page-flags-make-__pagemovable-return-bool.txt b/txt/mm-page-flags-make-__pagemovable-return-bool.txt index 33687323e..c32bb3db6 100644 --- a/txt/mm-page-flags-make-__pagemovable-return-bool.txt +++ b/txt/mm-page-flags-make-__pagemovable-return-bool.txt @@ -6,4 +6,4 @@ Make __PageMovable() return bool like __folio_test_movable(). Link: https://lkml.kernel.org/r/20240321032256.82063-1-gehao@kylinos.cn Signed-off-by: Hao Ge <gehao@kylinos.cn> -Cc: Hao Ge <gehao@kylinos.cn> +Reviewed-by: David Hildenbrand <david@redhat.com> diff --git a/txt/mm-page-flags-make-pagemappingflags-return-bool.txt b/txt/mm-page-flags-make-pagemappingflags-return-bool.txt index dcc296fa6..484236f26 100644 --- a/txt/mm-page-flags-make-pagemappingflags-return-bool.txt +++ b/txt/mm-page-flags-make-pagemappingflags-return-bool.txt @@ -7,3 +7,4 @@ Make PageMappingFlags() return bool like folio_mapping_flags(). Link: https://lkml.kernel.org/r/20240321030712.80618-1-gehao@kylinos.cn Signed-off-by: Hao Ge <gehao@kylinos.cn> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> +Reviewed-by: David Hildenbrand <david@redhat.com> diff --git a/txt/mm-page_alloc-use-the-correct-thp-order-for-thp-pcp.txt b/txt/mm-page_alloc-use-the-correct-thp-order-for-thp-pcp.txt index c3ff28e00..49461f4ec 100644 --- a/txt/mm-page_alloc-use-the-correct-thp-order-for-thp-pcp.txt +++ b/txt/mm-page_alloc-use-the-correct-thp-order-for-thp-pcp.txt @@ -15,5 +15,5 @@ THP for PCP will fix this issue Link: https://lkml.kernel.org/r/a25c9e14cd03907d5978b60546a69e6aa3fc2a7d.1712151833.git.baolin.wang@linux.alibaba.com Fixes: 44042b449872 ("mm/page_alloc: allow high-order pages to be stored on the per-cpu lists") Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> +Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> -Cc: Vlastimil Babka <vbabka@suse.cz> diff --git a/txt/mm-rename-mm_put_huge_zero_page-to-mm_put_huge_zero_folio.txt b/txt/mm-rename-mm_put_huge_zero_page-to-mm_put_huge_zero_folio.txt index 19901c9f8..12424b775 100644 --- a/txt/mm-rename-mm_put_huge_zero_page-to-mm_put_huge_zero_folio.txt +++ b/txt/mm-rename-mm_put_huge_zero_page-to-mm_put_huge_zero_folio.txt @@ -6,3 +6,4 @@ Also remove mm_get_huge_zero_page() now it has no users. Link: https://lkml.kernel.org/r/20240326202833.523759-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> +Reviewed-by: David Hildenbrand <david@redhat.com> diff --git a/txt/mmpage_owner-fix-refcount-imbalance.txt b/txt/mmpage_owner-fix-refcount-imbalance.txt deleted file mode 100644 index 5f936cd8f..000000000 --- a/txt/mmpage_owner-fix-refcount-imbalance.txt +++ /dev/null @@ -1,29 +0,0 @@ -From: Oscar Salvador <osalvador@suse.de> -Subject: mm,page_owner: fix refcount imbalance -Date: Tue, 26 Mar 2024 07:30:35 +0100 - -Current code does not contemplate scenarios were an allocation and free -operation on the same pages do not handle it in the same amount at once. - -To give an example, page_alloc_exact(), where we will allocate a page of -enough order to stafisfy the size request, but we will free the remainings -right away. - -In the above example, we will increment the stack_record refcount only -once, but we will decrease it the same number of times as number of unused -pages we have to free. This will lead to a warning because of refcount -imbalance. - -Fix this by recording the number of base pages in the refcount field. - -Link: https://lkml.kernel.org/r/20240326063036.6242-3-osalvador@suse.de -Fixes: 217b2119b9e2 ("mm,page_owner: implement the tracking of the stacks count") -Signed-off-by: Oscar Salvador <osalvador@suse.de> -Reported-by: syzbot+41bbfdb8d41003d12c0f@syzkaller.appspotmail.com -Closes: https://lore.kernel.org/linux-mm/00000000000090e8ff0613eda0e5@google.com -Reviewed-by: Vlastimil Babka <vbabka@suse.cz> -Tested-by: Alexandre Ghiti <alexghiti@rivosinc.com> -Cc: Alexander Potapenko <glider@google.com> -Cc: Andrey Konovalov <andreyknvl@gmail.com> -Cc: Marco Elver <elver@google.com> -Cc: Michal Hocko <mhocko@suse.com> diff --git a/txt/mmpage_owner-fix-accounting-of-pages-when-migrating.txt b/txt/old/mmpage_owner-fix-accounting-of-pages-when-migrating.txt index a6478d7aa..a6478d7aa 100644 --- a/txt/mmpage_owner-fix-accounting-of-pages-when-migrating.txt +++ b/txt/old/mmpage_owner-fix-accounting-of-pages-when-migrating.txt diff --git a/txt/old/mmpage_owner-fix-refcount-imbalance.txt b/txt/old/mmpage_owner-fix-refcount-imbalance.txt index fa81c10f9..5f936cd8f 100644 --- a/txt/old/mmpage_owner-fix-refcount-imbalance.txt +++ b/txt/old/mmpage_owner-fix-refcount-imbalance.txt @@ -1,6 +1,6 @@ From: Oscar Salvador <osalvador@suse.de> Subject: mm,page_owner: fix refcount imbalance -Date: Tue, 19 Mar 2024 19:32:11 +0100 +Date: Tue, 26 Mar 2024 07:30:35 +0100 Current code does not contemplate scenarios were an allocation and free operation on the same pages do not handle it in the same amount at once. @@ -16,15 +16,14 @@ imbalance. Fix this by recording the number of base pages in the refcount field. -Link: https://lkml.kernel.org/r/20240319183212.17156-2-osalvador@suse.de +Link: https://lkml.kernel.org/r/20240326063036.6242-3-osalvador@suse.de Fixes: 217b2119b9e2 ("mm,page_owner: implement the tracking of the stacks count") Signed-off-by: Oscar Salvador <osalvador@suse.de> Reported-by: syzbot+41bbfdb8d41003d12c0f@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-mm/00000000000090e8ff0613eda0e5@google.com +Reviewed-by: Vlastimil Babka <vbabka@suse.cz> +Tested-by: Alexandre Ghiti <alexghiti@rivosinc.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Marco Elver <elver@google.com> Cc: Michal Hocko <mhocko@suse.com> -Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> -Cc: Vlastimil Babka <vbabka@suse.cz> -Cc: Matthew Wilcox <willy@infradead.org> diff --git a/txt/mmpage_owner-update-metada-for-tail-pages.txt b/txt/old/mmpage_owner-update-metada-for-tail-pages.txt index 13a62f3a9..13a62f3a9 100644 --- a/txt/mmpage_owner-update-metada-for-tail-pages.txt +++ b/txt/old/mmpage_owner-update-metada-for-tail-pages.txt diff --git a/txt/sparc-use-is_huge_zero_pmd.txt b/txt/sparc-use-is_huge_zero_pmd.txt index a5fa218b2..1a089d705 100644 --- a/txt/sparc-use-is_huge_zero_pmd.txt +++ b/txt/sparc-use-is_huge_zero_pmd.txt @@ -18,3 +18,4 @@ the pmd whether it is a huge zero page or not. Saves 60 bytes of text. Link: https://lkml.kernel.org/r/20240326202833.523759-1-willy@infradead.org Link: https://lkml.kernel.org/r/20240326202833.523759-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> +Acked-by: David Hildenbrand <david@redhat.com> diff --git a/txt/userfaultfd-change-src_folio-after-ensuring-its-unpinned-in-uffdio_move.txt b/txt/userfaultfd-change-src_folio-after-ensuring-its-unpinned-in-uffdio_move.txt new file mode 100644 index 000000000..1991982d8 --- /dev/null +++ b/txt/userfaultfd-change-src_folio-after-ensuring-its-unpinned-in-uffdio_move.txt @@ -0,0 +1,23 @@ +From: Lokesh Gidra <lokeshgidra@google.com> +Subject: userfaultfd: change src_folio after ensuring it's unpinned in UFFDIO_MOVE +Date: Thu, 4 Apr 2024 10:17:26 -0700 + +Commit d7a08838ab74 ("mm: userfaultfd: fix unexpected change to src_folio +when UFFDIO_MOVE fails") moved the src_folio->{mapping, index} changing to +after clearing the page-table and ensuring that it's not pinned. This +avoids failure of swapout+migration and possibly memory corruption. + +However, the commit missed fixing it in the huge-page case. + +Link: https://lkml.kernel.org/r/20240404171726.2302435-1-lokeshgidra@google.com +Fixes: adef440691ba ("userfaultfd: UFFDIO_MOVE uABI") +Signed-off-by: Lokesh Gidra <lokeshgidra@google.com> +Acked-by: David Hildenbrand <david@redhat.com> +Cc: Andrea Arcangeli <aarcange@redhat.com> +Cc: Kalesh Singh <kaleshsingh@google.com> +Cc: Lokesh Gidra <lokeshgidra@google.com> +Cc: Nicolas Geoffray <ngeoffray@google.com> +Cc: Peter Xu <peterx@redhat.com> +Cc: Qi Zheng <zhengqi.arch@bytedance.com> +Cc: Matthew Wilcox <willy@infradead.org> +Cc: <stable@vger.kernel.org> diff --git a/txt/x86-mm-pat-fix-vm_pat-handling-in-cow-mappings.txt b/txt/x86-mm-pat-fix-vm_pat-handling-in-cow-mappings.txt index 3808f62f0..73816f472 100644 --- a/txt/x86-mm-pat-fix-vm_pat-handling-in-cow-mappings.txt +++ b/txt/x86-mm-pat-fix-vm_pat-handling-in-cow-mappings.txt @@ -117,3 +117,4 @@ Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Borislav Petkov <bp@alien8.de> Cc: "H. Peter Anvin" <hpa@zytor.com> +Cc: <stable@vger.kernel.org> |