summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2024-04-24 15:03:02 -0700
committerAndrew Morton <akpm@linux-foundation.org>2024-04-24 15:03:02 -0700
commitfc19818c729ff3b5cbf3145cb76a262090137b98 (patch)
treee80c5056e79bcbcf505c38198db38b4389a1ec46
parent62071c2027900f6a019bbcc079e4590c2f659418 (diff)
download25-new-fc19818c729ff3b5cbf3145cb76a262090137b98.tar.gz
foo
-rw-r--r--patches/f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.patch35
-rw-r--r--patches/fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.patch55
-rw-r--r--patches/gup-use-folios-for-gup_devmap.patch62
-rw-r--r--patches/kfifo-dont-use-proxy-headers.patch83
-rw-r--r--patches/lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add.patch78
-rw-r--r--patches/maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver.patch42
-rw-r--r--patches/media-rc-add-missing-ioh.patch82
-rw-r--r--patches/media-stih-cec-add-missing-ioh.patch39
-rw-r--r--patches/memcg-fix-data-race-kcsan-bug-in-rstats.patch97
-rw-r--r--patches/memory-failure-remove-calls-to-page_mapping.patch47
-rw-r--r--patches/migrate-expand-the-use-of-folio-in-__migrate_device_pages.patch60
-rw-r--r--patches/mm-add-kernel-doc-for-folio_mark_accessed.patch44
-rw-r--r--patches/mm-allow-for-detecting-underflows-with-page_mapcount-again-fix.patch49
-rw-r--r--patches/mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.patch120
-rw-r--r--patches/mm-free-non-hugetlb-large-folios-in-a-batch-fix.patch94
-rw-r--r--patches/mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared.patch217
-rw-r--r--patches/mm-make-folio_mapcount-return-0-for-small-typed-folios-fix.patch49
-rw-r--r--patches/mm-page_owner-fixing-wrong-information-in-dump_page_owner.patch43
-rw-r--r--patches/mm-remove-page_cache_alloc.patch37
-rw-r--r--patches/mm-remove-page_mapping.patch44
-rw-r--r--patches/mm-remove-page_ref_sub_return.patch42
-rw-r--r--patches/mm-remove-pagereferenced.patch31
-rw-r--r--patches/mm-remove-put_devmap_managed_page.patch39
-rw-r--r--patches/mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped.patch85
-rw-r--r--patches/mseal-add-mseal-syscall-fix.patch129
-rw-r--r--patches/nilfs2-convert-to-use-the-new-mount-api.patch615
-rw-r--r--patches/ocfs2-remove-redundant-assignment-to-variable-status.patch40
-rw-r--r--patches/old/mm-update-shuffle-documentation-to-match-its-current-state.patch (renamed from patches/mm-update-shuffle-documentation-to-match-its-current-state.patch)0
-rw-r--r--patches/old/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.patch (renamed from patches/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.patch)0
-rw-r--r--patches/old/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.patch (renamed from patches/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.patch)0
-rw-r--r--patches/old/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.patch (renamed from patches/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.patch)0
-rw-r--r--patches/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.patch (renamed from patches/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.patch)0
-rw-r--r--patches/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.patch (renamed from patches/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.patch)0
-rw-r--r--patches/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.patch (renamed from patches/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.patch)0
-rw-r--r--patches/selftests-mm-soft-dirty-should-fail-if-a-testcase-fails.patch28
-rw-r--r--patches/tools-fix-userspace-compilation-with-new-test_xarray-changes.patch51
-rw-r--r--patches/tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code.patch63
-rw-r--r--patches/userfault-expand-folio-use-in-mfill_atomic_install_pte.patch41
-rw-r--r--pc/devel-series53
-rw-r--r--pc/f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.pc1
-rw-r--r--pc/fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.pc1
-rw-r--r--pc/gup-use-folios-for-gup_devmap.pc1
-rw-r--r--pc/kfifo-dont-use-proxy-headers.pc3
-rw-r--r--pc/lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add.pc1
-rw-r--r--pc/maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver.pc1
-rw-r--r--pc/media-rc-add-missing-ioh.pc4
-rw-r--r--pc/media-stih-cec-add-missing-ioh.pc1
-rw-r--r--pc/memcg-fix-data-race-kcsan-bug-in-rstats.pc1
-rw-r--r--pc/memory-failure-remove-calls-to-page_mapping.pc1
-rw-r--r--pc/migrate-expand-the-use-of-folio-in-__migrate_device_pages.pc1
-rw-r--r--pc/mm-add-kernel-doc-for-folio_mark_accessed.pc1
-rw-r--r--pc/mm-allow-for-detecting-underflows-with-page_mapcount-again-fix.pc1
-rw-r--r--pc/mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.pc4
-rw-r--r--pc/mm-free-non-hugetlb-large-folios-in-a-batch-fix.pc1
-rw-r--r--pc/mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared.pc2
-rw-r--r--pc/mm-make-folio_mapcount-return-0-for-small-typed-folios-fix.pc1
-rw-r--r--pc/mm-page_owner-fixing-wrong-information-in-dump_page_owner.pc1
-rw-r--r--pc/mm-remove-page_cache_alloc.pc1
-rw-r--r--pc/mm-remove-page_mapping.pc2
-rw-r--r--pc/mm-remove-page_ref_sub_return.pc1
-rw-r--r--pc/mm-remove-pagereferenced.pc1
-rw-r--r--pc/mm-remove-put_devmap_managed_page.pc1
-rw-r--r--pc/mm-update-shuffle-documentation-to-match-its-current-state.pc2
-rw-r--r--pc/mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped.pc1
-rw-r--r--pc/mseal-add-mseal-syscall-fix.pc5
-rw-r--r--pc/nilfs2-convert-to-use-the-new-mount-api.pc4
-rw-r--r--pc/ocfs2-remove-redundant-assignment-to-variable-status.pc1
-rw-r--r--pc/selftests-mm-soft-dirty-should-fail-if-a-testcase-fails.pc1
-rw-r--r--pc/tools-fix-userspace-compilation-with-new-test_xarray-changes.pc1
-rw-r--r--pc/tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code.pc2
-rw-r--r--pc/userfault-expand-folio-use-in-mfill_atomic_install_pte.pc1
-rw-r--r--pc/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.pc1
-rw-r--r--pc/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.pc1
-rw-r--r--pc/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.pc1
-rw-r--r--pc/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.pc1
-rw-r--r--pc/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.pc1
-rw-r--r--pc/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.pc3
-rw-r--r--txt/f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.txt11
-rw-r--r--txt/fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.txt21
-rw-r--r--txt/gup-use-folios-for-gup_devmap.txt11
-rw-r--r--txt/kfifo-dont-use-proxy-headers.txt21
-rw-r--r--txt/lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add.txt44
-rw-r--r--txt/maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver.txt16
-rw-r--r--txt/media-rc-add-missing-ioh.txt32
-rw-r--r--txt/media-stih-cec-add-missing-ioh.txt22
-rw-r--r--txt/memcg-fix-data-race-kcsan-bug-in-rstats.txt47
-rw-r--r--txt/memory-failure-remove-calls-to-page_mapping.txt11
-rw-r--r--txt/migrate-expand-the-use-of-folio-in-__migrate_device_pages.txt11
-rw-r--r--txt/mm-add-kernel-doc-for-folio_mark_accessed.txt9
-rw-r--r--txt/mm-allow-for-detecting-underflows-with-page_mapcount-again-fix.txt26
-rw-r--r--txt/mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.txt9
-rw-r--r--txt/mm-free-non-hugetlb-large-folios-in-a-batch-fix.txt77
-rw-r--r--txt/mm-hugetlb-fix-debug_locks_warn_on1-when-dissolve_free_hugetlb_folio.txt2
-rw-r--r--txt/mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared.txt144
-rw-r--r--txt/mm-make-folio_mapcount-return-0-for-small-typed-folios-fix.txt26
-rw-r--r--txt/mm-page_owner-fixing-wrong-information-in-dump_page_owner.txt25
-rw-r--r--txt/mm-remove-page_cache_alloc.txt16
-rw-r--r--txt/mm-remove-page_mapping.txt11
-rw-r--r--txt/mm-remove-page_ref_sub_return.txt9
-rw-r--r--txt/mm-remove-pagereferenced.txt9
-rw-r--r--txt/mm-remove-put_devmap_managed_page.txt9
-rw-r--r--txt/mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped.txt45
-rw-r--r--txt/mm-vmscan-avoid-split-pmd-mapped-thp-during-shrink_folio_list.txt9
-rw-r--r--txt/mseal-add-mseal-syscall-fix.txt27
-rw-r--r--txt/nilfs2-convert-to-use-the-new-mount-api.txt11
-rw-r--r--txt/ocfs2-remove-redundant-assignment-to-variable-status.txt23
-rw-r--r--txt/old/mm-update-shuffle-documentation-to-match-its-current-state.txt (renamed from txt/mm-update-shuffle-documentation-to-match-its-current-state.txt)0
-rw-r--r--txt/old/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.txt (renamed from txt/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.txt)0
-rw-r--r--txt/old/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.txt (renamed from txt/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.txt)0
-rw-r--r--txt/old/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.txt (renamed from txt/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.txt)0
-rw-r--r--txt/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.txt (renamed from txt/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.txt)0
-rw-r--r--txt/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.txt (renamed from txt/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.txt)0
-rw-r--r--txt/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.txt (renamed from txt/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.txt)0
-rw-r--r--txt/selftests-mm-soft-dirty-should-fail-if-a-testcase-fails.txt12
-rw-r--r--txt/tools-fix-userspace-compilation-with-new-test_xarray-changes.txt33
-rw-r--r--txt/tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code.txt26
-rw-r--r--txt/userfault-expand-folio-use-in-mfill_atomic_install_pte.txt12
117 files changed, 3447 insertions, 23 deletions
diff --git a/patches/f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.patch b/patches/f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.patch
new file mode 100644
index 000000000..19b0d665c
--- /dev/null
+++ b/patches/f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.patch
@@ -0,0 +1,35 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: f2fs: convert f2fs_clear_page_cache_dirty_tag to use a folio
+Date: Tue, 23 Apr 2024 23:55:33 +0100
+
+Removes uses of page_mapping() and page_index().
+
+Link: https://lkml.kernel.org/r/20240423225552.4113447-3-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Eric Biggers <ebiggers@google.com>
+Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ fs/f2fs/data.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/f2fs/data.c~f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio
++++ a/fs/f2fs/data.c
+@@ -4082,11 +4082,12 @@ const struct address_space_operations f2
+
+ void f2fs_clear_page_cache_dirty_tag(struct page *page)
+ {
+- struct address_space *mapping = page_mapping(page);
++ struct folio *folio = page_folio(page);
++ struct address_space *mapping = folio->mapping;
+ unsigned long flags;
+
+ xa_lock_irqsave(&mapping->i_pages, flags);
+- __xa_clear_mark(&mapping->i_pages, page_index(page),
++ __xa_clear_mark(&mapping->i_pages, folio->index,
+ PAGECACHE_TAG_DIRTY);
+ xa_unlock_irqrestore(&mapping->i_pages, flags);
+ }
+_
diff --git a/patches/fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.patch b/patches/fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.patch
new file mode 100644
index 000000000..48ebc7271
--- /dev/null
+++ b/patches/fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.patch
@@ -0,0 +1,55 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: fscrypt: convert bh_get_inode_and_lblk_num to use a folio
+Date: Tue, 23 Apr 2024 23:55:32 +0100
+
+Patch series "Remove page_mapping()".
+
+There are only a few users left. Convert them all to either call
+folio_mapping() or just use folio->mapping directly.
+
+
+This patch (of 6):
+
+Remove uses of page->index, page_mapping() and b_page. Saves a call
+to compound_head().
+
+Link: https://lkml.kernel.org/r/20240423225552.4113447-1-willy@infradead.org
+Link: https://lkml.kernel.org/r/20240423225552.4113447-2-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ fs/crypto/inline_crypt.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/crypto/inline_crypt.c~fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio
++++ a/fs/crypto/inline_crypt.c
+@@ -284,7 +284,7 @@ static bool bh_get_inode_and_lblk_num(co
+ const struct inode **inode_ret,
+ u64 *lblk_num_ret)
+ {
+- struct page *page = bh->b_page;
++ struct folio *folio = bh->b_folio;
+ const struct address_space *mapping;
+ const struct inode *inode;
+
+@@ -292,13 +292,13 @@ static bool bh_get_inode_and_lblk_num(co
+ * The ext4 journal (jbd2) can submit a buffer_head it directly created
+ * for a non-pagecache page. fscrypt doesn't care about these.
+ */
+- mapping = page_mapping(page);
++ mapping = folio_mapping(folio);
+ if (!mapping)
+ return false;
+ inode = mapping->host;
+
+ *inode_ret = inode;
+- *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) +
++ *lblk_num_ret = ((u64)folio->index << (PAGE_SHIFT - inode->i_blkbits)) +
+ (bh_offset(bh) >> inode->i_blkbits);
+ return true;
+ }
+_
diff --git a/patches/gup-use-folios-for-gup_devmap.patch b/patches/gup-use-folios-for-gup_devmap.patch
new file mode 100644
index 000000000..5eeae5802
--- /dev/null
+++ b/patches/gup-use-folios-for-gup_devmap.patch
@@ -0,0 +1,62 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: gup: use folios for gup_devmap
+Date: Wed, 24 Apr 2024 20:19:10 +0100
+
+Use try_grab_folio() instead of try_grab_page() so we get the folio back
+that we calculated, and then use folio_set_referenced() instead of
+SetPageReferenced(). Correspondingly, use gup_put_folio() to put any
+unneeded references.
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-6-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/gup.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+--- a/mm/gup.c~gup-use-folios-for-gup_devmap
++++ a/mm/gup.c
+@@ -2877,13 +2877,10 @@ static void __maybe_unused gup_fast_undo
+ unsigned int flags, struct page **pages)
+ {
+ while ((*nr) - nr_start) {
+- struct page *page = pages[--(*nr)];
++ struct folio *folio = page_folio(pages[--(*nr)]);
+
+- ClearPageReferenced(page);
+- if (flags & FOLL_PIN)
+- unpin_user_page(page);
+- else
+- put_page(page);
++ folio_clear_referenced(folio);
++ gup_put_folio(folio, 1, flags);
+ }
+ }
+
+@@ -3024,6 +3021,7 @@ static int gup_fast_devmap_leaf(unsigned
+ struct dev_pagemap *pgmap = NULL;
+
+ do {
++ struct folio *folio;
+ struct page *page = pfn_to_page(pfn);
+
+ pgmap = get_dev_pagemap(pfn, pgmap);
+@@ -3037,12 +3035,13 @@ static int gup_fast_devmap_leaf(unsigned
+ break;
+ }
+
+- SetPageReferenced(page);
+- pages[*nr] = page;
+- if (unlikely(try_grab_page(page, flags))) {
++ folio = try_grab_folio(page, 1, flags);
++ if (!folio) {
+ gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
+ break;
+ }
++ folio_set_referenced(folio);
++ pages[*nr] = page;
+ (*nr)++;
+ pfn++;
+ } while (addr += PAGE_SIZE, addr != end);
+_
diff --git a/patches/kfifo-dont-use-proxy-headers.patch b/patches/kfifo-dont-use-proxy-headers.patch
new file mode 100644
index 000000000..c8cf1fc36
--- /dev/null
+++ b/patches/kfifo-dont-use-proxy-headers.patch
@@ -0,0 +1,83 @@
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Subject: kfifo: don't use "proxy" headers
+Date: Tue, 23 Apr 2024 22:23:10 +0300
+
+Update header inclusions to follow IWYU (Include What You Use) principle.
+
+Link: https://lkml.kernel.org/r/20240423192529.3249134-4-andriy.shevchenko@linux.intel.com
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: Alain Volmat <alain.volmat@foss.st.com>
+Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Cc: Chen-Yu Tsai <wens@csie.org>
+Cc: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Cc: Jernej Skrabec <jernej.skrabec@gmail.com>
+Cc: Matthias Brugger <matthias.bgg@gmail.com>
+Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
+Cc: Patrice Chotard <patrice.chotard@foss.st.com>
+Cc: Rob Herring <robh@kernel.org>
+Cc: Samuel Holland <samuel@sholland.org>
+Cc: Sean Wang <sean.wang@mediatek.com>
+Cc: Sean Young <sean@mess.org>
+Cc: Stefani Seibold <stefani@seibold.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/kfifo.h | 9 +++++++--
+ lib/kfifo.c | 8 ++++----
+ samples/kfifo/dma-example.c | 3 ++-
+ 3 files changed, 13 insertions(+), 7 deletions(-)
+
+--- a/include/linux/kfifo.h~kfifo-dont-use-proxy-headers
++++ a/include/linux/kfifo.h
+@@ -36,10 +36,15 @@
+ * to lock the reader.
+ */
+
+-#include <linux/kernel.h>
++#include <linux/array_size.h>
+ #include <linux/spinlock.h>
+ #include <linux/stddef.h>
+-#include <linux/scatterlist.h>
++#include <linux/types.h>
++
++#include <asm/barrier.h>
++#include <asm/errno.h>
++
++struct scatterlist;
+
+ struct __kfifo {
+ unsigned int in;
+--- a/lib/kfifo.c~kfifo-dont-use-proxy-headers
++++ a/lib/kfifo.c
+@@ -5,13 +5,13 @@
+ * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
+ */
+
+-#include <linux/kernel.h>
+-#include <linux/export.h>
+-#include <linux/slab.h>
+ #include <linux/err.h>
++#include <linux/export.h>
++#include <linux/kfifo.h>
+ #include <linux/log2.h>
++#include <linux/scatterlist.h>
++#include <linux/slab.h>
+ #include <linux/uaccess.h>
+-#include <linux/kfifo.h>
+
+ /*
+ * internal helper to calculate the unused elements in a fifo
+--- a/samples/kfifo/dma-example.c~kfifo-dont-use-proxy-headers
++++ a/samples/kfifo/dma-example.c
+@@ -6,8 +6,9 @@
+ */
+
+ #include <linux/init.h>
+-#include <linux/module.h>
+ #include <linux/kfifo.h>
++#include <linux/module.h>
++#include <linux/scatterlist.h>
+
+ /*
+ * This module shows how to handle fifo dma operations.
+_
diff --git a/patches/lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add.patch b/patches/lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add.patch
new file mode 100644
index 000000000..08c22255f
--- /dev/null
+++ b/patches/lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add.patch
@@ -0,0 +1,78 @@
+From: Luis Chamberlain <mcgrof@kernel.org>
+Subject: lib/test_xarray.c: fix error assumptions on check_xa_multi_store_adv_add()
+Date: Tue, 23 Apr 2024 12:22:21 -0700
+
+While testing lib/test_xarray in userspace I've noticed we can fail with:
+
+make -C tools/testing/radix-tree
+./tools/testing/radix-tree/xarray
+
+BUG at check_xa_multi_store_adv_add:749
+xarray: 0x55905fb21a00x head 0x55905fa1d8e0x flags 0 marks 0 0 0
+0: 0x55905fa1d8e0x
+xarray: ../../../lib/test_xarray.c:749: check_xa_multi_store_adv_add: Assertion `0' failed.
+Aborted
+
+We get a failure with a BUG_ON(), and that is because we actually can
+fail due to -ENOMEM, the check in xas_nomem() will fix this for us so
+it makes no sense to expect no failure inside the loop. So modify the
+check and since this is also useful for instructional purposes clarify
+the situation.
+
+The check for XA_BUG_ON(xa, xa_load(xa, index) != p) is already done
+at the end of the loop so just remove the bogus on inside the loop.
+
+With this we now pass the test in both kernel and userspace:
+
+In userspace:
+
+./tools/testing/radix-tree/xarray
+XArray: 149092856 of 149092856 tests passed
+
+In kernel space:
+
+XArray: 148257077 of 148257077 tests passed
+
+Link: https://lkml.kernel.org/r/20240423192221.301095-3-mcgrof@kernel.org
+Fixes: a60cc288a1a2 ("test_xarray: add tests for advanced multi-index use")
+Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
+Cc: Daniel Gomez <da.gomez@samsung.com>
+Cc: Darrick J. Wong <djwong@kernel.org>
+Cc: Dave Chinner <david@fromorbit.com>
+Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Pankaj Raghav <p.raghav@samsung.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ lib/test_xarray.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/lib/test_xarray.c~lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add
++++ a/lib/test_xarray.c
+@@ -744,15 +744,20 @@ static noinline void check_xa_multi_stor
+
+ do {
+ xas_lock_irq(&xas);
+-
+ xas_store(&xas, p);
+- XA_BUG_ON(xa, xas_error(&xas));
+- XA_BUG_ON(xa, xa_load(xa, index) != p);
+-
+ xas_unlock_irq(&xas);
++ /*
++ * In our selftest case the only failure we can expect is for
++ * there not to be enough memory as we're not mimicking the
++ * entire page cache, so verify that's the only error we can run
++ * into here. The xas_nomem() which follows will ensure to fix
++ * that condition for us so to chug on on the loop.
++ */
++ XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, xas_error(&xas));
++ XA_BUG_ON(xa, xa_load(xa, index) != p);
+ }
+
+ /* mimics page_cache_delete() */
+_
diff --git a/patches/maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver.patch b/patches/maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver.patch
new file mode 100644
index 000000000..b0969b835
--- /dev/null
+++ b/patches/maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver.patch
@@ -0,0 +1,42 @@
+From: Jarkko Sakkinen <jarkko@kernel.org>
+Subject: MAINTAINERS: update URL's for KEYS/KEYRINGS_INTEGRITY and TPM DEVICE DRIVER
+Date: Wed, 24 Apr 2024 00:45:49 +0300
+
+Add TPM driver test suite URL to the MAINTAINERS files and move the wiki
+URL to more appropriate location.
+
+Link: https://gitlab.com/jarkkojs/linux-tpmdd-test
+Link: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
+Link: https://lkml.kernel.org/r/20240423214549.8242-1-jarkko@kernel.org
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Acked-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Mimi Zohar <zohar@linux.ibm.com>
+Cc: Peter Huewe <peterhuewe@gmx.de>
+Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ MAINTAINERS | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/MAINTAINERS~maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver
++++ a/MAINTAINERS
+@@ -12042,6 +12042,7 @@ M: Mimi Zohar <zohar@linux.ibm.com>
+ L: linux-integrity@vger.kernel.org
+ L: keyrings@vger.kernel.org
+ S: Supported
++W: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
+ F: security/integrity/platform_certs
+
+ KFENCE
+@@ -22420,7 +22421,7 @@ M: Jarkko Sakkinen <jarkko@kernel.org>
+ R: Jason Gunthorpe <jgg@ziepe.ca>
+ L: linux-integrity@vger.kernel.org
+ S: Maintained
+-W: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
++W: https://gitlab.com/jarkkojs/linux-tpmdd-test
+ Q: https://patchwork.kernel.org/project/linux-integrity/list/
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git
+ F: Documentation/devicetree/bindings/tpm/
+_
diff --git a/patches/media-rc-add-missing-ioh.patch b/patches/media-rc-add-missing-ioh.patch
new file mode 100644
index 000000000..89647d04f
--- /dev/null
+++ b/patches/media-rc-add-missing-ioh.patch
@@ -0,0 +1,82 @@
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Subject: media: rc: add missing io.h
+Date: Tue, 23 Apr 2024 22:23:08 +0300
+
+Patch series "kfifo: Clean up kfifo.h", v2.
+
+To reduce dependency hell a degree, clean up kfifo.h (mainly getting rid
+of kernel.h in the global header).
+
+
+This patch (of 3):
+
+In many remote control drivers the io.h is implied by others. This is not
+good as it prevents from cleanups done in other headers. Add missing
+include.
+
+Link: https://lkml.kernel.org/r/20240423192529.3249134-1-andriy.shevchenko@linux.intel.com
+Link: https://lkml.kernel.org/r/20240423192529.3249134-2-andriy.shevchenko@linux.intel.com
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: Alain Volmat <alain.volmat@foss.st.com>
+Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Cc: Chen-Yu Tsai <wens@csie.org>
+Cc: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Cc: Jernej Skrabec <jernej.skrabec@gmail.com>
+Cc: Matthias Brugger <matthias.bgg@gmail.com>
+Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
+Cc: Patrice Chotard <patrice.chotard@foss.st.com>
+Cc: Rob Herring <robh@kernel.org>
+Cc: Samuel Holland <samuel@sholland.org>
+Cc: Sean Wang <sean.wang@mediatek.com>
+Cc: Sean Young <sean@mess.org>
+Cc: Stefani Seibold <stefani@seibold.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ drivers/media/rc/mtk-cir.c | 1 +
+ drivers/media/rc/serial_ir.c | 1 +
+ drivers/media/rc/st_rc.c | 1 +
+ drivers/media/rc/sunxi-cir.c | 1 +
+ 4 files changed, 4 insertions(+)
+
+--- a/drivers/media/rc/mtk-cir.c~media-rc-add-missing-ioh
++++ a/drivers/media/rc/mtk-cir.c
+@@ -8,6 +8,7 @@
+ #include <linux/clk.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/io.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/reset.h>
+--- a/drivers/media/rc/serial_ir.c~media-rc-add-missing-ioh
++++ a/drivers/media/rc/serial_ir.c
+@@ -18,6 +18,7 @@
+ #include <linux/module.h>
+ #include <linux/errno.h>
+ #include <linux/interrupt.h>
++#include <linux/io.h>
+ #include <linux/kernel.h>
+ #include <linux/serial_reg.h>
+ #include <linux/types.h>
+--- a/drivers/media/rc/st_rc.c~media-rc-add-missing-ioh
++++ a/drivers/media/rc/st_rc.c
+@@ -6,6 +6,7 @@
+ #include <linux/kernel.h>
+ #include <linux/clk.h>
+ #include <linux/interrupt.h>
++#include <linux/io.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+--- a/drivers/media/rc/sunxi-cir.c~media-rc-add-missing-ioh
++++ a/drivers/media/rc/sunxi-cir.c
+@@ -12,6 +12,7 @@
+
+ #include <linux/clk.h>
+ #include <linux/interrupt.h>
++#include <linux/io.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+_
diff --git a/patches/media-stih-cec-add-missing-ioh.patch b/patches/media-stih-cec-add-missing-ioh.patch
new file mode 100644
index 000000000..05b92584f
--- /dev/null
+++ b/patches/media-stih-cec-add-missing-ioh.patch
@@ -0,0 +1,39 @@
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Subject: media: stih-cec: add missing io.h
+Date: Tue, 23 Apr 2024 22:23:09 +0300
+
+In the driver the io.h is implied by others. This is not good as it
+prevents from cleanups done in other headers. Add missing include.
+
+Link: https://lkml.kernel.org/r/20240423192529.3249134-3-andriy.shevchenko@linux.intel.com
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: Alain Volmat <alain.volmat@foss.st.com>
+Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Cc: Chen-Yu Tsai <wens@csie.org>
+Cc: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Cc: Jernej Skrabec <jernej.skrabec@gmail.com>
+Cc: Matthias Brugger <matthias.bgg@gmail.com>
+Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
+Cc: Patrice Chotard <patrice.chotard@foss.st.com>
+Cc: Rob Herring <robh@kernel.org>
+Cc: Samuel Holland <samuel@sholland.org>
+Cc: Sean Wang <sean.wang@mediatek.com>
+Cc: Sean Young <sean@mess.org>
+Cc: Stefani Seibold <stefani@seibold.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ drivers/media/cec/platform/sti/stih-cec.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/media/cec/platform/sti/stih-cec.c~media-stih-cec-add-missing-ioh
++++ a/drivers/media/cec/platform/sti/stih-cec.c
+@@ -6,6 +6,7 @@
+ */
+ #include <linux/clk.h>
+ #include <linux/interrupt.h>
++#include <linux/io.h>
+ #include <linux/kernel.h>
+ #include <linux/mfd/syscon.h>
+ #include <linux/module.h>
+_
diff --git a/patches/memcg-fix-data-race-kcsan-bug-in-rstats.patch b/patches/memcg-fix-data-race-kcsan-bug-in-rstats.patch
new file mode 100644
index 000000000..d463b9759
--- /dev/null
+++ b/patches/memcg-fix-data-race-kcsan-bug-in-rstats.patch
@@ -0,0 +1,97 @@
+From: Breno Leitao <leitao@debian.org>
+Subject: memcg: fix data-race KCSAN bug in rstats
+Date: Wed, 24 Apr 2024 05:59:39 -0700
+
+A data-race issue in memcg rstat occurs when two distinct code paths
+access the same 4-byte region concurrently. KCSAN detection triggers the
+following BUG as a result.
+
+ BUG: KCSAN: data-race in __count_memcg_events / mem_cgroup_css_rstat_flush
+
+ write to 0xffffe8ffff98e300 of 4 bytes by task 5274 on cpu 17:
+ mem_cgroup_css_rstat_flush (mm/memcontrol.c:5850)
+ cgroup_rstat_flush_locked (kernel/cgroup/rstat.c:243 (discriminator 7))
+ cgroup_rstat_flush (./include/linux/spinlock.h:401 kernel/cgroup/rstat.c:278)
+ mem_cgroup_flush_stats.part.0 (mm/memcontrol.c:767)
+ memory_numa_stat_show (mm/memcontrol.c:6911)
+<snip>
+
+ read to 0xffffe8ffff98e300 of 4 bytes by task 410848 on cpu 27:
+ __count_memcg_events (mm/memcontrol.c:725 mm/memcontrol.c:962)
+ count_memcg_event_mm.part.0 (./include/linux/memcontrol.h:1097 ./include/linux/memcontrol.h:1120)
+ handle_mm_fault (mm/memory.c:5483 mm/memory.c:5622)
+<snip>
+
+ value changed: 0x00000029 -> 0x00000000
+
+The race occurs because two code paths access the same "stats_updates"
+location. Although "stats_updates" is a per-CPU variable, it is remotely
+accessed by another CPU at
+cgroup_rstat_flush_locked()->mem_cgroup_css_rstat_flush(), leading to the
+data race mentioned.
+
+Considering that memcg_rstat_updated() is in the hot code path, adding a
+lock to protect it may not be desirable, especially since this variable
+pertains solely to statistics.
+
+Therefore, annotating accesses to stats_updates with READ/WRITE_ONCE() can
+prevent KCSAN splats and potential partial reads/writes.
+
+Link: https://lkml.kernel.org/r/20240424125940.2410718-1-leitao@debian.org
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Suggested-by: Shakeel Butt <shakeel.butt@linux.dev>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Roman Gushchin <roman.gushchin@linux.dev>
+Cc: Muchun Song <songmuchun@bytedance.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/memcontrol.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/mm/memcontrol.c~memcg-fix-data-race-kcsan-bug-in-rstats
++++ a/mm/memcontrol.c
+@@ -715,6 +715,7 @@ static inline void memcg_rstat_updated(s
+ {
+ struct memcg_vmstats_percpu *statc;
+ int cpu = smp_processor_id();
++ unsigned int stats_updates;
+
+ if (!val)
+ return;
+@@ -722,8 +723,9 @@ static inline void memcg_rstat_updated(s
+ cgroup_rstat_updated(memcg->css.cgroup, cpu);
+ statc = this_cpu_ptr(memcg->vmstats_percpu);
+ for (; statc; statc = statc->parent) {
+- statc->stats_updates += abs(val);
+- if (statc->stats_updates < MEMCG_CHARGE_BATCH)
++ stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
++ WRITE_ONCE(statc->stats_updates, stats_updates);
++ if (stats_updates < MEMCG_CHARGE_BATCH)
+ continue;
+
+ /*
+@@ -731,9 +733,9 @@ static inline void memcg_rstat_updated(s
+ * redundant. Avoid the overhead of the atomic update.
+ */
+ if (!memcg_vmstats_needs_flush(statc->vmstats))
+- atomic64_add(statc->stats_updates,
++ atomic64_add(stats_updates,
+ &statc->vmstats->stats_updates);
+- statc->stats_updates = 0;
++ WRITE_ONCE(statc->stats_updates, 0);
+ }
+ }
+
+@@ -5887,7 +5889,7 @@ static void mem_cgroup_css_rstat_flush(s
+ }
+ }
+ }
+- statc->stats_updates = 0;
++ WRITE_ONCE(statc->stats_updates, 0);
+ /* We are in a per-cpu loop here, only do the atomic write once */
+ if (atomic64_read(&memcg->vmstats->stats_updates))
+ atomic64_set(&memcg->vmstats->stats_updates, 0);
+_
diff --git a/patches/memory-failure-remove-calls-to-page_mapping.patch b/patches/memory-failure-remove-calls-to-page_mapping.patch
new file mode 100644
index 000000000..3a9575ddb
--- /dev/null
+++ b/patches/memory-failure-remove-calls-to-page_mapping.patch
@@ -0,0 +1,47 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: memory-failure: remove calls to page_mapping()
+Date: Tue, 23 Apr 2024 23:55:34 +0100
+
+This is mostly just inlining page_mapping() into the two callers.
+
+Link: https://lkml.kernel.org/r/20240423225552.4113447-4-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/memory-failure.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/mm/memory-failure.c~memory-failure-remove-calls-to-page_mapping
++++ a/mm/memory-failure.c
+@@ -216,6 +216,7 @@ EXPORT_SYMBOL_GPL(hwpoison_filter_flags_
+
+ static int hwpoison_filter_dev(struct page *p)
+ {
++ struct folio *folio = page_folio(p);
+ struct address_space *mapping;
+ dev_t dev;
+
+@@ -223,7 +224,7 @@ static int hwpoison_filter_dev(struct pa
+ hwpoison_filter_dev_minor == ~0U)
+ return 0;
+
+- mapping = page_mapping(p);
++ mapping = folio_mapping(folio);
+ if (mapping == NULL || mapping->host == NULL)
+ return -EINVAL;
+
+@@ -1088,7 +1089,8 @@ out:
+ */
+ static int me_pagecache_dirty(struct page_state *ps, struct page *p)
+ {
+- struct address_space *mapping = page_mapping(p);
++ struct folio *folio = page_folio(p);
++ struct address_space *mapping = folio_mapping(folio);
+
+ SetPageError(p);
+ /* TBD: print more information about the file. */
+_
diff --git a/patches/migrate-expand-the-use-of-folio-in-__migrate_device_pages.patch b/patches/migrate-expand-the-use-of-folio-in-__migrate_device_pages.patch
new file mode 100644
index 000000000..3a92fd86d
--- /dev/null
+++ b/patches/migrate-expand-the-use-of-folio-in-__migrate_device_pages.patch
@@ -0,0 +1,60 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: migrate: expand the use of folio in __migrate_device_pages()
+Date: Tue, 23 Apr 2024 23:55:35 +0100
+
+Removes a few calls to compound_head() and a call to page_mapping().
+
+Link: https://lkml.kernel.org/r/20240423225552.4113447-5-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Eric Biggers <ebiggers@google.com>
+Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/migrate_device.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/mm/migrate_device.c~migrate-expand-the-use-of-folio-in-__migrate_device_pages
++++ a/mm/migrate_device.c
+@@ -696,6 +696,7 @@ static void __migrate_device_pages(unsig
+ struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
+ struct page *page = migrate_pfn_to_page(src_pfns[i]);
+ struct address_space *mapping;
++ struct folio *folio;
+ int r;
+
+ if (!newpage) {
+@@ -730,15 +731,12 @@ static void __migrate_device_pages(unsig
+ continue;
+ }
+
+- mapping = page_mapping(page);
++ folio = page_folio(page);
++ mapping = folio_mapping(folio);
+
+ if (is_device_private_page(newpage) ||
+ is_device_coherent_page(newpage)) {
+ if (mapping) {
+- struct folio *folio;
+-
+- folio = page_folio(page);
+-
+ /*
+ * For now only support anonymous memory migrating to
+ * device private or coherent memory.
+@@ -761,11 +759,10 @@ static void __migrate_device_pages(unsig
+
+ if (migrate && migrate->fault_page == page)
+ r = migrate_folio_extra(mapping, page_folio(newpage),
+- page_folio(page),
+- MIGRATE_SYNC_NO_COPY, 1);
++ folio, MIGRATE_SYNC_NO_COPY, 1);
+ else
+ r = migrate_folio(mapping, page_folio(newpage),
+- page_folio(page), MIGRATE_SYNC_NO_COPY);
++ folio, MIGRATE_SYNC_NO_COPY);
+ if (r != MIGRATEPAGE_SUCCESS)
+ src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
+ }
+_
diff --git a/patches/mm-add-kernel-doc-for-folio_mark_accessed.patch b/patches/mm-add-kernel-doc-for-folio_mark_accessed.patch
new file mode 100644
index 000000000..e346f20fe
--- /dev/null
+++ b/patches/mm-add-kernel-doc-for-folio_mark_accessed.patch
@@ -0,0 +1,44 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: add kernel-doc for folio_mark_accessed()
+Date: Wed, 24 Apr 2024 20:19:11 +0100
+
+Convert the existing documentation to kernel-doc and remove references to
+pages.
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-7-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/swap.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+--- a/mm/swap.c~mm-add-kernel-doc-for-folio_mark_accessed
++++ a/mm/swap.c
+@@ -448,15 +448,18 @@ static void folio_inc_refs(struct folio
+ }
+ #endif /* CONFIG_LRU_GEN */
+
+-/*
+- * Mark a page as having seen activity.
++/**
++ * folio_mark_accessed - Mark a folio as having seen activity.
++ * @folio: The folio to mark.
+ *
+- * inactive,unreferenced -> inactive,referenced
+- * inactive,referenced -> active,unreferenced
+- * active,unreferenced -> active,referenced
++ * This function will perform one of the following transitions:
+ *
+- * When a newly allocated page is not yet visible, so safe for non-atomic ops,
+- * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
++ * * inactive,unreferenced -> inactive,referenced
++ * * inactive,referenced -> active,unreferenced
++ * * active,unreferenced -> active,referenced
++ *
++ * When a newly allocated folio is not yet visible, so safe for non-atomic ops,
++ * __folio_set_referenced() may be substituted for folio_mark_accessed().
+ */
+ void folio_mark_accessed(struct folio *folio)
+ {
+_
diff --git a/patches/mm-allow-for-detecting-underflows-with-page_mapcount-again-fix.patch b/patches/mm-allow-for-detecting-underflows-with-page_mapcount-again-fix.patch
new file mode 100644
index 000000000..6b0ea807c
--- /dev/null
+++ b/patches/mm-allow-for-detecting-underflows-with-page_mapcount-again-fix.patch
@@ -0,0 +1,49 @@
+From: David Hildenbrand <david@redhat.com>
+Subject: mm-allow-for-detecting-underflows-with-page_mapcount-again-fix
+Date: Wed, 24 Apr 2024 10:50:09 +0200
+
+Let's make page_mapcount() slighly more efficient by inlining the
+page_type_has_type() check.
+
+Link: https://lkml.kernel.org/r/1af4fd61-7926-47c8-be45-833c0dbec08b@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Cc: Chris Zankel <chris@zankel.net>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Max Filippov <jcmvbkbc@gmail.com>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Richard Chang <richardycc@google.com>
+Cc: Rich Felker <dalias@libc.org>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Yin Fengwei <fengwei.yin@intel.com>
+Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
+Cc: Zi Yan <ziy@nvidia.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/mm.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/include/linux/mm.h~mm-allow-for-detecting-underflows-with-page_mapcount-again-fix
++++ a/include/linux/mm.h
+@@ -1229,10 +1229,11 @@ static inline void page_mapcount_reset(s
+ */
+ static inline int page_mapcount(struct page *page)
+ {
+- int mapcount = atomic_read(&page->_mapcount);
++ int mapcount = atomic_read(&page->_mapcount) + 1;
+
+ /* Handle page_has_type() pages */
+- mapcount = page_type_has_type(mapcount) ? 0 : mapcount + 1;
++ if (mapcount < PAGE_MAPCOUNT_RESERVE + 1)
++ mapcount = 0;
+ if (unlikely(PageCompound(page)))
+ mapcount += folio_entire_mapcount(page_folio(page));
+
+_
diff --git a/patches/mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.patch b/patches/mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.patch
new file mode 100644
index 000000000..ca8af3419
--- /dev/null
+++ b/patches/mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.patch
@@ -0,0 +1,120 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: convert put_devmap_managed_page_refs() to put_devmap_managed_folio_refs()
+Date: Wed, 24 Apr 2024 20:19:08 +0100
+
+All callers have a folio so we can remove this use of
+page_ref_sub_return().
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-4-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/mm.h | 12 ++++++------
+ mm/gup.c | 6 +++---
+ mm/memremap.c | 10 +++++-----
+ mm/swap.c | 2 +-
+ 4 files changed, 15 insertions(+), 15 deletions(-)
+
+--- a/include/linux/mm.h~mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs
++++ a/include/linux/mm.h
+@@ -1437,17 +1437,17 @@ vm_fault_t finish_fault(struct vm_fault
+ #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
+ DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
+
+-bool __put_devmap_managed_page_refs(struct page *page, int refs);
+-static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
++bool __put_devmap_managed_folio_refs(struct folio *folio, int refs);
++static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
+ {
+ if (!static_branch_unlikely(&devmap_managed_key))
+ return false;
+- if (!is_zone_device_page(page))
++ if (!folio_is_zone_device(folio))
+ return false;
+- return __put_devmap_managed_page_refs(page, refs);
++ return __put_devmap_managed_folio_refs(folio, refs);
+ }
+ #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
+-static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
++static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
+ {
+ return false;
+ }
+@@ -1571,7 +1571,7 @@ static inline void put_page(struct page
+ * For some devmap managed pages we need to catch refcount transition
+ * from 2 to 1:
+ */
+- if (put_devmap_managed_page_refs(&folio->page, 1))
++ if (put_devmap_managed_folio_refs(folio, 1))
+ return;
+ folio_put(folio);
+ }
+--- a/mm/gup.c~mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs
++++ a/mm/gup.c
+@@ -89,7 +89,7 @@ retry:
+ * belongs to this folio.
+ */
+ if (unlikely(page_folio(page) != folio)) {
+- if (!put_devmap_managed_page_refs(&folio->page, refs))
++ if (!put_devmap_managed_folio_refs(folio, refs))
+ folio_put_refs(folio, refs);
+ goto retry;
+ }
+@@ -156,7 +156,7 @@ struct folio *try_grab_folio(struct page
+ */
+ if (unlikely((flags & FOLL_LONGTERM) &&
+ !folio_is_longterm_pinnable(folio))) {
+- if (!put_devmap_managed_page_refs(&folio->page, refs))
++ if (!put_devmap_managed_folio_refs(folio, refs))
+ folio_put_refs(folio, refs);
+ return NULL;
+ }
+@@ -198,7 +198,7 @@ static void gup_put_folio(struct folio *
+ refs *= GUP_PIN_COUNTING_BIAS;
+ }
+
+- if (!put_devmap_managed_page_refs(&folio->page, refs))
++ if (!put_devmap_managed_folio_refs(folio, refs))
+ folio_put_refs(folio, refs);
+ }
+
+--- a/mm/memremap.c~mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs
++++ a/mm/memremap.c
+@@ -512,9 +512,9 @@ void zone_device_page_init(struct page *
+ EXPORT_SYMBOL_GPL(zone_device_page_init);
+
+ #ifdef CONFIG_FS_DAX
+-bool __put_devmap_managed_page_refs(struct page *page, int refs)
++bool __put_devmap_managed_folio_refs(struct folio *folio, int refs)
+ {
+- if (page->pgmap->type != MEMORY_DEVICE_FS_DAX)
++ if (folio->page.pgmap->type != MEMORY_DEVICE_FS_DAX)
+ return false;
+
+ /*
+@@ -522,9 +522,9 @@ bool __put_devmap_managed_page_refs(stru
+ * refcount is 1, then the page is free and the refcount is
+ * stable because nobody holds a reference on the page.
+ */
+- if (page_ref_sub_return(page, refs) == 1)
+- wake_up_var(&page->_refcount);
++ if (folio_ref_sub_return(folio, refs) == 1)
++ wake_up_var(&folio->_refcount);
+ return true;
+ }
+-EXPORT_SYMBOL(__put_devmap_managed_page_refs);
++EXPORT_SYMBOL(__put_devmap_managed_folio_refs);
+ #endif /* CONFIG_FS_DAX */
+--- a/mm/swap.c~mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs
++++ a/mm/swap.c
+@@ -981,7 +981,7 @@ void folios_put_refs(struct folio_batch
+ unlock_page_lruvec_irqrestore(lruvec, flags);
+ lruvec = NULL;
+ }
+- if (put_devmap_managed_page_refs(&folio->page, nr_refs))
++ if (put_devmap_managed_folio_refs(folio, nr_refs))
+ continue;
+ if (folio_ref_sub_and_test(folio, nr_refs))
+ free_zone_device_folio(folio);
+_
diff --git a/patches/mm-free-non-hugetlb-large-folios-in-a-batch-fix.patch b/patches/mm-free-non-hugetlb-large-folios-in-a-batch-fix.patch
new file mode 100644
index 000000000..df170cdcb
--- /dev/null
+++ b/patches/mm-free-non-hugetlb-large-folios-in-a-batch-fix.patch
@@ -0,0 +1,94 @@
+From: Peter Xu <peterx@redhat.com>
+Subject: mm-free-non-hugetlb-large-folios-in-a-batch-fix
+Date: Wed, 24 Apr 2024 11:20:28 -0400
+
+On Fri, Apr 05, 2024 at 04:32:23PM +0100, Matthew Wilcox (Oracle) wrote:
+> free_unref_folios() can now handle non-hugetlb large folios, so keep
+> normal large folios in the batch. hugetlb folios still need to be
+> handled specially. I believe that folios freed using put_pages_list()
+> cannot be accounted to a memcg (or the small folios would trip the "page
+> still charged to cgroup" warning), but put an assertion in to check that.
+
+There's such user, iommu uses put_pages_list() to free IOMMU pgtables, and
+they can be memcg accounted; since 2023 iommu_map switched to use
+GFP_KERNEL_ACCOUNT.
+
+I hit below panic when testing my local branch over mm-everthing when
+running some VFIO workloads.
+
+For this specific vfio use case, see 160912fc3d4a ("vfio/type1: account
+iommu allocations").
+
+I think we should remove the VM_BUG_ON_FOLIO() line, as the memcg will then
+be properly taken care of later in free_pages_prepare(). Fixup attached at
+the end that will fix this crash for me.
+
+[ 10.092411] kernel BUG at mm/swap.c:152!
+[ 10.092686] invalid opcode: 0000 [#1] PREEMPT SMP NOPTI
+[ 10.093034] CPU: 3 PID: 634 Comm: vfio-pci-mmap-t Tainted: G W 6.9.0-rc4-peterx+ #2
+[ 10.093628] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
+[ 10.094361] RIP: 0010:put_pages_list+0x12b/0x150
+[ 10.094675] Code: 6d 08 48 81 c4 00 01 00 00 5b 5d c3 cc cc cc cc 48 c7 c6 f0 fd 9f 82 e8 63 e8 03 00 0f 0b 48 c7 c6 48 00 a0 82 e8 55 e8 03 00 <0f> 0b 48 c7 c6 28 fe 9f 82 e8 47f
+[ 10.095896] RSP: 0018:ffffc9000221bc50 EFLAGS: 00010282
+[ 10.096242] RAX: 0000000000000038 RBX: ffffea00042695c0 RCX: 0000000000000000
+[ 10.096707] RDX: 0000000000000001 RSI: 0000000000000027 RDI: 00000000ffffffff
+[ 10.097177] RBP: ffffc9000221bd68 R08: 0000000000000000 R09: 0000000000000003
+[ 10.097642] R10: ffffc9000221bb08 R11: ffffffff8335db48 R12: ffff8881070172c0
+[ 10.098113] R13: ffff888102fd0000 R14: ffff888107017210 R15: ffff888110a6c7c0
+[ 10.098586] FS: 0000000000000000(0000) GS:ffff888276a00000(0000) knlGS:0000000000000000
+[ 10.099117] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 10.099494] CR2: 00007f1910000000 CR3: 000000000323c006 CR4: 0000000000770ef0
+[ 10.099972] PKRU: 55555554
+[ 10.100154] Call Trace:
+[ 10.100321] <TASK>
+[ 10.100466] ? die+0x32/0x80
+[ 10.100666] ? do_trap+0xd9/0x100
+[ 10.100897] ? put_pages_list+0x12b/0x150
+[ 10.101168] ? put_pages_list+0x12b/0x150
+[ 10.101434] ? do_error_trap+0x81/0x110
+[ 10.101688] ? put_pages_list+0x12b/0x150
+[ 10.101957] ? exc_invalid_op+0x4c/0x60
+[ 10.102216] ? put_pages_list+0x12b/0x150
+[ 10.102484] ? asm_exc_invalid_op+0x16/0x20
+[ 10.102771] ? put_pages_list+0x12b/0x150
+[ 10.103026] ? 0xffffffff81000000
+[ 10.103246] ? dma_pte_list_pagetables.isra.0+0x38/0xa0
+[ 10.103592] ? dma_pte_list_pagetables.isra.0+0x9b/0xa0
+[ 10.103933] ? dma_pte_clear_level+0x18c/0x1a0
+[ 10.104228] ? domain_unmap+0x65/0x130
+[ 10.104481] ? domain_unmap+0xe6/0x130
+[ 10.104735] domain_exit+0x47/0x80
+[ 10.104968] vfio_iommu_type1_detach_group+0x3f1/0x5f0
+[ 10.105308] ? vfio_group_detach_container+0x3c/0x1a0
+[ 10.105644] vfio_group_detach_container+0x60/0x1a0
+[ 10.105977] vfio_group_fops_release+0x46/0x80
+[ 10.106274] __fput+0x9a/0x2d0
+[ 10.106479] task_work_run+0x55/0x90
+[ 10.106717] do_exit+0x32f/0xb70
+[ 10.106945] ? _raw_spin_unlock_irq+0x24/0x50
+[ 10.107237] do_group_exit+0x32/0xa0
+[ 10.107481] __x64_sys_exit_group+0x14/0x20
+[ 10.107760] do_syscall_64+0x75/0x190
+[ 10.108007] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Link: https://lkml.kernel.org/r/ZikjPB0Dt5HA8-uL@x1n
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Cc: Alex Williamson <alex.williamson@redhat.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/swap.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/mm/swap.c~mm-free-non-hugetlb-large-folios-in-a-batch-fix
++++ a/mm/swap.c
+@@ -162,7 +162,6 @@ void put_pages_list(struct list_head *pa
+ free_huge_folio(folio);
+ continue;
+ }
+- VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
+ /* LRU flag must be clear because it's passed using the lru */
+ if (folio_batch_add(&fbatch, folio) > 0)
+ continue;
+_
diff --git a/patches/mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared.patch b/patches/mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared.patch
new file mode 100644
index 000000000..8d8faeb27
--- /dev/null
+++ b/patches/mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared.patch
@@ -0,0 +1,217 @@
+From: David Hildenbrand <david@redhat.com>
+Subject: mm/khugepaged: replace page_mapcount() check by folio_likely_mapped_shared()
+Date: Wed, 24 Apr 2024 14:26:30 +0200
+
+We want to limit the use of page_mapcount() to places where absolutely
+required, to prepare for kernel configs where we won't keep track of
+per-page mapcounts in large folios.
+
+khugepaged is one of the remaining "more challenging" page_mapcount()
+users, but we might be able to move away from page_mapcount() without
+resulting in a significant behavior change that would warrant
+special-casing based on kernel configs.
+
+In 2020, we first added support to khugepaged for collapsing COW-shared
+pages via commit 9445689f3b61 ("khugepaged: allow to collapse a page
+shared across fork"), followed by support for collapsing PTE-mapped THP in
+commit 5503fbf2b0b8 ("khugepaged: allow to collapse PTE-mapped compound
+pages") and limiting the memory waste via the "page_count() > 1" check in
+commit 71a2c112a0f6 ("khugepaged: introduce 'max_ptes_shared' tunable").
+
+As a default, khugepaged will allow up to half of the PTEs to map shared
+pages: where page_mapcount() > 1. MADV_COLLAPSE ignores the khugepaged
+setting.
+
+khugepaged does currently not care about swapcache page references, and
+does not check under folio lock: so in some corner cases the "shared vs.
+exclusive" detection might be a bit off, making us detect "exclusive" when
+it's actually "shared".
+
+Most of our anonymous folios in the system are usually exclusive. We
+frequently see sharing of anonymous folios for a short period of time,
+after which our short-lived suprocesses either quit or exec().
+
+There are some famous examples, though, where child processes exist for a
+long time, and where memory is COW-shared with a lot of processes
+(webservers, webbrowsers, sshd, ...) and COW-sharing is crucial for
+reducing the memory footprint. We don't want to suddenly change the
+behavior to result in a significant increase in memory waste.
+
+Interestingly, khugepaged will only collapse an anonymous THP if at least
+one PTE is writable. After fork(), that means that something (usually a
+page fault) populated at least a single exclusive anonymous THP in that
+PMD range.
+
+So ... what happens when we switch to "is this folio mapped shared"
+instead of "is this page mapped shared" by using
+folio_likely_mapped_shared()?
+
+For "not-COW-shared" folios, small folios and for THPs (large folios) that
+are completely mapped into at least one process, switching to
+folio_likely_mapped_shared() will not result in a change.
+
+We'll only see a change for COW-shared PTE-mapped THPs that are partially
+mapped into all involved processes.
+
+There are two cases to consider:
+
+(A) folio_likely_mapped_shared() returns "false" for a PTE-mapped THP
+
+ If the folio is detected as exclusive, and it actually is exclusive,
+ there is no change: page_mapcount() == 1. This is the common case
+ without fork() or with short-lived child processes.
+
+ folio_likely_mapped_shared() might currently still detect a folio as
+ exclusive although it is shared (false negatives): if the first page is
+ not mapped multiple times and if the average per-page mapcount is smaller
+ than 1, implying that (1) the folio is partially mapped and (2) if we are
+ responsible for many mapcounts by mapping many pages others can't
+ ("mostly exclusive") (3) if we are not responsible for many mapcounts by
+ mapping little pages ("mostly shared") it won't make a big impact on the
+ end result.
+
+ So while we might now detect a page as "exclusive" although it isn't,
+ it's not expected to make a big difference in common cases.
+
+(B) folio_likely_mapped_shared() returns "true" for a PTE-mapped THP
+
+ folio_likely_mapped_shared() will never detect a large anonymous folio
+ as shared although it is exclusive: there are no false positives.
+
+ If we detect a THP as shared, at least one page of the THP is mapped by
+ another process. It could well be that some pages are actually exclusive.
+ For example, our child processes could have unmapped/COW'ed some pages
+ such that they would now be exclusive to out process, which we now
+ would treat as still-shared.
+
+ Examples:
+ (1) Parent maps all pages of a THP, child maps some pages. We detect
+ all pages in the parent as shared although some are actually
+ exclusive.
+ (2) Parent maps all but some page of a THP, child maps the remainder.
+ We detect all pages of the THP that the parent maps as shared
+ although they are all exclusive.
+
+ In (1) we wouldn't collapse a THP right now already: no PTE
+ is writable, because a write fault would have resulted in COW of a
+ single page and the parent would no longer map all pages of that THP.
+
+ For (2) we would have collapsed a THP in the parent so far, now we
+ wouldn't as long as the child process is still alive: unless the child
+ process unmaps the remaining THP pages or we decide to split that THP.
+
+ Possibly, the child COW'ed many pages, meaning that it's likely that
+ we can populate a THP for our child first, and then for our parent.
+
+ For (2), we are making really bad use of the THP in the first
+ place (not even mapped completely in at least one process). If the
+ THP would be completely partially mapped, it would be on the deferred
+ split queue where we would split it lazily later.
+
+ For short-running child processes, we don't particularly care. For
+ long-running processes, the expectation is that such scenarios are
+ rather rare: further, a THP might be best placed if most data in the
+ PMD range is actually written, implying that we'll have to COW more
+ pages first before khugepaged would collapse it.
+
+To summarize, in the common case, this change is not expected to matter
+much. The more common application of khugepaged operates on exclusive
+pages, either before fork() or after a child quit.
+
+Can we improve (A)? Yes, if we implement more precise tracking of "mapped
+shared" vs. "mapped exclusively", we could get rid of the false negatives
+completely.
+
+Can we improve (B)? We could count how many pages of a large folio we map
+inside the current page table and detect that we are responsible for most
+of the folio mapcount and conclude "as good as exclusive", which might
+help in some cases. ... but likely, some other mechanism should detect
+that the THP is not a good use in the scenario (not even mapped completely
+in a single process) and try splitting that folio lazily etc.
+
+We'll move the folio_test_anon() check before our "shared" check, so we
+might get more expressive results for SCAN_EXCEED_SHARED_PTE: this order
+of checks now matches the one in __collapse_huge_page_isolate(). Extend
+documentation.
+
+Link: https://lkml.kernel.org/r/20240424122630.495788-1-david@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: Yang Shi <yang.shi@linux.alibaba.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ Documentation/admin-guide/mm/transhuge.rst | 3 +-
+ mm/khugepaged.c | 22 ++++++++++++-------
+ 2 files changed, 17 insertions(+), 8 deletions(-)
+
+--- a/Documentation/admin-guide/mm/transhuge.rst~mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared
++++ a/Documentation/admin-guide/mm/transhuge.rst
+@@ -278,7 +278,8 @@ collapsed, resulting fewer pages being c
+ THPs, and lower memory access performance.
+
+ ``max_ptes_shared`` specifies how many pages can be shared across multiple
+-processes. Exceeding the number would block the collapse::
++processes. khugepaged might treat pages of THPs as shared if any page of
++that THP is shared. Exceeding the number would block the collapse::
+
+ /sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_shared
+
+--- a/mm/khugepaged.c~mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared
++++ a/mm/khugepaged.c
+@@ -583,7 +583,8 @@ static int __collapse_huge_page_isolate(
+ folio = page_folio(page);
+ VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
+
+- if (page_mapcount(page) > 1) {
++ /* See hpage_collapse_scan_pmd(). */
++ if (folio_likely_mapped_shared(folio)) {
+ ++shared;
+ if (cc->is_khugepaged &&
+ shared > khugepaged_max_ptes_shared) {
+@@ -1317,8 +1318,20 @@ static int hpage_collapse_scan_pmd(struc
+ result = SCAN_PAGE_NULL;
+ goto out_unmap;
+ }
++ folio = page_folio(page);
+
+- if (page_mapcount(page) > 1) {
++ if (!folio_test_anon(folio)) {
++ result = SCAN_PAGE_ANON;
++ goto out_unmap;
++ }
++
++ /*
++ * We treat a single page as shared if any part of the THP
++ * is shared. "False negatives" from
++ * folio_likely_mapped_shared() are not expected to matter
++ * much in practice.
++ */
++ if (folio_likely_mapped_shared(folio)) {
+ ++shared;
+ if (cc->is_khugepaged &&
+ shared > khugepaged_max_ptes_shared) {
+@@ -1328,7 +1341,6 @@ static int hpage_collapse_scan_pmd(struc
+ }
+ }
+
+- folio = page_folio(page);
+ /*
+ * Record which node the original page is from and save this
+ * information to cc->node_load[].
+@@ -1349,10 +1361,6 @@ static int hpage_collapse_scan_pmd(struc
+ result = SCAN_PAGE_LOCK;
+ goto out_unmap;
+ }
+- if (!folio_test_anon(folio)) {
+- result = SCAN_PAGE_ANON;
+- goto out_unmap;
+- }
+
+ /*
+ * Check if the page has any GUP (or other external) pins.
+_
diff --git a/patches/mm-make-folio_mapcount-return-0-for-small-typed-folios-fix.patch b/patches/mm-make-folio_mapcount-return-0-for-small-typed-folios-fix.patch
new file mode 100644
index 000000000..206764e04
--- /dev/null
+++ b/patches/mm-make-folio_mapcount-return-0-for-small-typed-folios-fix.patch
@@ -0,0 +1,49 @@
+From: David Hildenbrand <david@redhat.com>
+Subject: mm-make-folio_mapcount-return-0-for-small-typed-folios-fix
+Date: Wed, 24 Apr 2024 10:56:17 +0200
+
+Just like page_mapcount(), let's make folio_mapcount() slightly more
+efficient.
+
+Link: https://lkml.kernel.org/r/c30fcda1-ed87-46f5-8297-cdedbddac009@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Cc: Chris Zankel <chris@zankel.net>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Max Filippov <jcmvbkbc@gmail.com>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Richard Chang <richardycc@google.com>
+Cc: Rich Felker <dalias@libc.org>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Yin Fengwei <fengwei.yin@intel.com>
+Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
+Cc: Zi Yan <ziy@nvidia.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/mm.h | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/include/linux/mm.h~mm-make-folio_mapcount-return-0-for-small-typed-folios-fix
++++ a/include/linux/mm.h
+@@ -1270,8 +1270,11 @@ static inline int folio_mapcount(const s
+ int mapcount;
+
+ if (likely(!folio_test_large(folio))) {
+- mapcount = atomic_read(&folio->_mapcount);
+- return page_type_has_type(mapcount) ? 0 : mapcount + 1;
++ mapcount = atomic_read(&folio->_mapcount) + 1;
++ /* Handle page_has_type() pages */
++ if (mapcount < PAGE_MAPCOUNT_RESERVE + 1)
++ mapcount = 0;
++ return mapcount;
+ }
+ return folio_large_mapcount(folio);
+ }
+_
diff --git a/patches/mm-page_owner-fixing-wrong-information-in-dump_page_owner.patch b/patches/mm-page_owner-fixing-wrong-information-in-dump_page_owner.patch
new file mode 100644
index 000000000..c37870a03
--- /dev/null
+++ b/patches/mm-page_owner-fixing-wrong-information-in-dump_page_owner.patch
@@ -0,0 +1,43 @@
+From: Maninder Singh <maninder1.s@samsung.com>
+Subject: mm: page_owner: fix wrong information in dump_page_owner
+Date: Wed, 24 Apr 2024 16:48:37 +0530
+
+With commit ea4b5b33bf8a ("mm,page_owner: update metadata for tail
+pages"), new API __update_page_owner_handle was introduced and arguemnt
+was passed in wrong order from __set_page_owner and thus page_owner is
+giving wrong data.
+
+[ 15.982420] page last allocated via order 0, migratetype Unmovable, gfp_mask 0xcc0(GFP_KERNEL), pid 80, tgid -1210279584 (insmod), ts 80, free_ts 0
+
+Fixing the same.
+Correct output:
+[ 14.556482] page last allocated via order 0, migratetype Unmovable, gfp_mask 0xcc0(GFP_KERNEL), pid 80, tgid 80 (insmod), ts 14552004992, free_ts 0
+
+Link: https://lkml.kernel.org/r/20240424111838.3782931-1-hariom1.p@samsung.com
+Fixes: ea4b5b33bf8a ("mm,page_owner: update metadata for tail pages")
+Signed-off-by: Maninder Singh <maninder1.s@samsung.com>
+Signed-off-by: Hariom Panthi <hariom1.p@samsung.com>
+Acked-by: Oscar Salvador <osalvador@suse.de>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Lorenzo Stoakes <lstoakes@gmail.com>
+Cc: Rohit Thapliyal <r.thapliyal@samsung.com>
+Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/page_owner.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/page_owner.c~mm-page_owner-fixing-wrong-information-in-dump_page_owner
++++ a/mm/page_owner.c
+@@ -328,7 +328,7 @@ noinline void __set_page_owner(struct pa
+ if (unlikely(!page_ext))
+ return;
+ __update_page_owner_handle(page_ext, handle, order, gfp_mask, -1,
+- current->pid, current->tgid, ts_nsec,
++ ts_nsec, current->pid, current->tgid,
+ current->comm);
+ page_ext_put(page_ext);
+ inc_stack_record_count(handle, gfp_mask, 1 << order);
+_
diff --git a/patches/mm-remove-page_cache_alloc.patch b/patches/mm-remove-page_cache_alloc.patch
new file mode 100644
index 000000000..ac65d874a
--- /dev/null
+++ b/patches/mm-remove-page_cache_alloc.patch
@@ -0,0 +1,37 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: remove page_cache_alloc()
+Date: Wed, 24 Apr 2024 20:19:06 +0100
+
+Patch series "More folio compat code removal".
+
+More code removal with bonus kernel-doc addition.
+
+
+This patch (of 7):
+
+All callers have now been converted to filemap_alloc_folio().
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-1-willy@infradead.org
+Link: https://lkml.kernel.org/r/20240424191914.361554-2-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/pagemap.h | 5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/include/linux/pagemap.h~mm-remove-page_cache_alloc
++++ a/include/linux/pagemap.h
+@@ -558,11 +558,6 @@ static inline struct page *__page_cache_
+ return &filemap_alloc_folio(gfp, 0)->page;
+ }
+
+-static inline struct page *page_cache_alloc(struct address_space *x)
+-{
+- return __page_cache_alloc(mapping_gfp_mask(x));
+-}
+-
+ static inline gfp_t readahead_gfp_mask(struct address_space *x)
+ {
+ return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
+_
diff --git a/patches/mm-remove-page_mapping.patch b/patches/mm-remove-page_mapping.patch
new file mode 100644
index 000000000..114d3937e
--- /dev/null
+++ b/patches/mm-remove-page_mapping.patch
@@ -0,0 +1,44 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: remove page_mapping()
+Date: Tue, 23 Apr 2024 23:55:37 +0100
+
+All callers are now converted, delete this compatibility wrapper.
+
+Link: https://lkml.kernel.org/r/20240423225552.4113447-7-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Eric Biggers <ebiggers@google.com>
+Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/pagemap.h | 1 -
+ mm/folio-compat.c | 6 ------
+ 2 files changed, 7 deletions(-)
+
+--- a/include/linux/pagemap.h~mm-remove-page_mapping
++++ a/include/linux/pagemap.h
+@@ -399,7 +399,6 @@ static inline void filemap_nr_thps_dec(s
+ #endif
+ }
+
+-struct address_space *page_mapping(struct page *);
+ struct address_space *folio_mapping(struct folio *);
+ struct address_space *swapcache_mapping(struct folio *);
+
+--- a/mm/folio-compat.c~mm-remove-page_mapping
++++ a/mm/folio-compat.c
+@@ -10,12 +10,6 @@
+ #include <linux/swap.h>
+ #include "internal.h"
+
+-struct address_space *page_mapping(struct page *page)
+-{
+- return folio_mapping(page_folio(page));
+-}
+-EXPORT_SYMBOL(page_mapping);
+-
+ void unlock_page(struct page *page)
+ {
+ return folio_unlock(page_folio(page));
+_
diff --git a/patches/mm-remove-page_ref_sub_return.patch b/patches/mm-remove-page_ref_sub_return.patch
new file mode 100644
index 000000000..395a40625
--- /dev/null
+++ b/patches/mm-remove-page_ref_sub_return.patch
@@ -0,0 +1,42 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: remove page_ref_sub_return()
+Date: Wed, 24 Apr 2024 20:19:09 +0100
+
+With all callers converted to folios, we can act directly on
+folio->_refcount.
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-5-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/page_ref.h | 11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+--- a/include/linux/page_ref.h~mm-remove-page_ref_sub_return
++++ a/include/linux/page_ref.h
+@@ -139,20 +139,15 @@ static inline void folio_ref_sub(struct
+ page_ref_sub(&folio->page, nr);
+ }
+
+-static inline int page_ref_sub_return(struct page *page, int nr)
++static inline int folio_ref_sub_return(struct folio *folio, int nr)
+ {
+- int ret = atomic_sub_return(nr, &page->_refcount);
++ int ret = atomic_sub_return(nr, &folio->_refcount);
+
+ if (page_ref_tracepoint_active(page_ref_mod_and_return))
+- __page_ref_mod_and_return(page, -nr, ret);
++ __page_ref_mod_and_return(&folio->page, -nr, ret);
+ return ret;
+ }
+
+-static inline int folio_ref_sub_return(struct folio *folio, int nr)
+-{
+- return page_ref_sub_return(&folio->page, nr);
+-}
+-
+ static inline void page_ref_inc(struct page *page)
+ {
+ atomic_inc(&page->_refcount);
+_
diff --git a/patches/mm-remove-pagereferenced.patch b/patches/mm-remove-pagereferenced.patch
new file mode 100644
index 000000000..5dbe969e5
--- /dev/null
+++ b/patches/mm-remove-pagereferenced.patch
@@ -0,0 +1,31 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: remove PageReferenced
+Date: Wed, 24 Apr 2024 20:19:12 +0100
+
+All callers now use folio_*_referenced() so we can remove the
+PageReferenced family of functions.
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-8-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/page-flags.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/include/linux/page-flags.h~mm-remove-pagereferenced
++++ a/include/linux/page-flags.h
+@@ -512,9 +512,9 @@ static inline int TestClearPage##uname(s
+ __PAGEFLAG(Locked, locked, PF_NO_TAIL)
+ FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
+ PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
+-PAGEFLAG(Referenced, referenced, PF_HEAD)
+- TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
+- __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
++FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
++ FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
++ __FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
+ PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
+ __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
+ PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
+_
diff --git a/patches/mm-remove-put_devmap_managed_page.patch b/patches/mm-remove-put_devmap_managed_page.patch
new file mode 100644
index 000000000..f18cf8f85
--- /dev/null
+++ b/patches/mm-remove-put_devmap_managed_page.patch
@@ -0,0 +1,39 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: remove put_devmap_managed_page()
+Date: Wed, 24 Apr 2024 20:19:07 +0100
+
+It only has one caller; convert that caller to use
+put_devmap_managed_page_refs() instead.
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-3-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ include/linux/mm.h | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+--- a/include/linux/mm.h~mm-remove-put_devmap_managed_page
++++ a/include/linux/mm.h
+@@ -1453,11 +1453,6 @@ static inline bool put_devmap_managed_pa
+ }
+ #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
+
+-static inline bool put_devmap_managed_page(struct page *page)
+-{
+- return put_devmap_managed_page_refs(page, 1);
+-}
+-
+ /* 127: arbitrary random number, small enough to assemble well */
+ #define folio_ref_zero_or_close_to_overflow(folio) \
+ ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
+@@ -1576,7 +1571,7 @@ static inline void put_page(struct page
+ * For some devmap managed pages we need to catch refcount transition
+ * from 2 to 1:
+ */
+- if (put_devmap_managed_page(&folio->page))
++ if (put_devmap_managed_page_refs(&folio->page, 1))
+ return;
+ folio_put(folio);
+ }
+_
diff --git a/patches/mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped.patch b/patches/mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped.patch
new file mode 100644
index 000000000..3c3ff3fbc
--- /dev/null
+++ b/patches/mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped.patch
@@ -0,0 +1,85 @@
+From: Hariom Panthi <hariom1.p@samsung.com>
+Subject: mm: vmalloc: dump page owner info if page is already mapped
+Date: Wed, 24 Apr 2024 16:48:38 +0530
+
+In vmap_pte_range, BUG_ON is called when page is already mapped,
+It doesn't give enough information to debug further.
+Dumping page owner information alongwith BUG_ON will be more useful
+in case of multiple page mapping.
+
+Example:
+[ 14.552875] page: refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x10b923
+[ 14.553440] flags: 0xbffff0000000000(node=0|zone=2|lastcpupid=0x3ffff)
+[ 14.554001] page_type: 0xffffffff()
+[ 14.554783] raw: 0bffff0000000000 0000000000000000 dead000000000122 0000000000000000
+[ 14.555230] raw: 0000000000000000 0000000000000000 00000001ffffffff 0000000000000000
+[ 14.555768] page dumped because: remapping already mapped page
+[ 14.556172] page_owner tracks the page as allocated
+[ 14.556482] page last allocated via order 0, migratetype Unmovable, gfp_mask 0xcc0(GFP_KERNEL), pid 80, tgid 80 (insmod), ts 14552004992, free_ts 0
+[ 14.557286] prep_new_page+0xa8/0x10c
+[ 14.558052] get_page_from_freelist+0x7f8/0x1248
+[ 14.558298] __alloc_pages+0x164/0x2b4
+[ 14.558514] alloc_pages_mpol+0x88/0x230
+[ 14.558904] alloc_pages+0x4c/0x7c
+[ 14.559157] load_module+0x74/0x1af4
+[ 14.559361] __do_sys_init_module+0x190/0x1fc
+[ 14.559615] __arm64_sys_init_module+0x1c/0x28
+[ 14.559883] invoke_syscall+0x44/0x108
+[ 14.560109] el0_svc_common.constprop.0+0x40/0xe0
+[ 14.560371] do_el0_svc_compat+0x1c/0x34
+[ 14.560600] el0_svc_compat+0x2c/0x80
+[ 14.560820] el0t_32_sync_handler+0x90/0x140
+[ 14.561040] el0t_32_sync+0x194/0x198
+[ 14.561329] page_owner free stack trace missing
+[ 14.562049] ------------[ cut here ]------------
+[ 14.562314] kernel BUG at mm/vmalloc.c:113!
+
+Link: https://lkml.kernel.org/r/20240424111838.3782931-2-hariom1.p@samsung.com
+Signed-off-by: Hariom Panthi <hariom1.p@samsung.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Lorenzo Stoakes <lstoakes@gmail.com>
+Cc: Maninder Singh <maninder1.s@samsung.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Rohit Thapliyal <r.thapliyal@samsung.com>
+Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/vmalloc.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/mm/vmalloc.c~mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped
++++ a/mm/vmalloc.c
+@@ -42,6 +42,7 @@
+ #include <linux/sched/mm.h>
+ #include <asm/tlbflush.h>
+ #include <asm/shmparam.h>
++#include <linux/page_owner.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/vmalloc.h>
+@@ -96,6 +97,7 @@ static int vmap_pte_range(pmd_t *pmd, un
+ {
+ pte_t *pte;
+ u64 pfn;
++ struct page *page;
+ unsigned long size = PAGE_SIZE;
+
+ pfn = phys_addr >> PAGE_SHIFT;
+@@ -103,7 +105,13 @@ static int vmap_pte_range(pmd_t *pmd, un
+ if (!pte)
+ return -ENOMEM;
+ do {
+- BUG_ON(!pte_none(ptep_get(pte)));
++ if (!pte_none(ptep_get(pte))) {
++ if (pfn_valid(pfn)) {
++ page = pfn_to_page(pfn);
++ dump_page(page, "remapping already mapped page");
++ }
++ BUG();
++ }
+
+ #ifdef CONFIG_HUGETLB_PAGE
+ size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
+_
diff --git a/patches/mseal-add-mseal-syscall-fix.patch b/patches/mseal-add-mseal-syscall-fix.patch
new file mode 100644
index 000000000..b1b7346b7
--- /dev/null
+++ b/patches/mseal-add-mseal-syscall-fix.patch
@@ -0,0 +1,129 @@
+From: Jeff Xu <jeffxu@chromium.org>
+Subject: mseal: add branch prediction hint
+Date: Tue, 23 Apr 2024 19:28:25 +0000
+
+It is unlikely that application calls mm syscall, such as mprotect, on
+already sealed mappings, adding branch prediction hint.
+
+Link: https://lkml.kernel.org/r/20240423192825.1273679-2-jeffxu@chromium.org
+Signed-off-by: Jeff Xu <jeffxu@chromium.org>
+Suggested-by: Pedro Falcato <pedro.falcato@gmail.com>
+Cc: Amer Al Shanawany <amer.shanawany@gmail.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Guenter Roeck <groeck@chromium.org>
+Cc: Jann Horn <jannh@google.com>
+Cc: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+Cc: Jeff Xu <jeffxu@google.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Jorge Lucangeli Obes <jorgelo@chromium.org>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Stephen Röttger <sroettger@google.com>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/madvise.c | 2 +-
+ mm/mmap.c | 4 ++--
+ mm/mprotect.c | 2 +-
+ mm/mremap.c | 4 ++--
+ mm/mseal.c | 6 +++---
+ 5 files changed, 9 insertions(+), 9 deletions(-)
+
+--- a/mm/madvise.c~mseal-add-mseal-syscall-fix
++++ a/mm/madvise.c
+@@ -1436,7 +1436,7 @@ int do_madvise(struct mm_struct *mm, uns
+ * Check if the address range is sealed for do_madvise().
+ * can_modify_mm_madv assumes we have acquired the lock on MM.
+ */
+- if (!can_modify_mm_madv(mm, start, end, behavior)) {
++ if (unlikely(!can_modify_mm_madv(mm, start, end, behavior))) {
+ error = -EPERM;
+ goto out;
+ }
+--- a/mm/mmap.c~mseal-add-mseal-syscall-fix
++++ a/mm/mmap.c
+@@ -2740,7 +2740,7 @@ int do_vmi_munmap(struct vma_iterator *v
+ * Prevent unmapping a sealed VMA.
+ * can_modify_mm assumes we have acquired the lock on MM.
+ */
+- if (!can_modify_mm(mm, start, end))
++ if (unlikely(!can_modify_mm(mm, start, end)))
+ return -EPERM;
+
+ /* arch_unmap() might do unmaps itself. */
+@@ -3163,7 +3163,7 @@ int do_vma_munmap(struct vma_iterator *v
+ * Prevent unmapping a sealed VMA.
+ * can_modify_mm assumes we have acquired the lock on MM.
+ */
+- if (!can_modify_mm(mm, start, end))
++ if (unlikely(!can_modify_mm(mm, start, end)))
+ return -EPERM;
+
+ arch_unmap(mm, start, end);
+--- a/mm/mprotect.c~mseal-add-mseal-syscall-fix
++++ a/mm/mprotect.c
+@@ -749,7 +749,7 @@ static int do_mprotect_pkey(unsigned lon
+ * checking if memory is sealed.
+ * can_modify_mm assumes we have acquired the lock on MM.
+ */
+- if (!can_modify_mm(current->mm, start, end)) {
++ if (unlikely(!can_modify_mm(current->mm, start, end))) {
+ error = -EPERM;
+ goto out;
+ }
+--- a/mm/mremap.c~mseal-add-mseal-syscall-fix
++++ a/mm/mremap.c
+@@ -912,7 +912,7 @@ static unsigned long mremap_to(unsigned
+ *
+ * can_modify_mm assumes we have acquired the lock on MM.
+ */
+- if (!can_modify_mm(mm, addr, addr + old_len))
++ if (unlikely(!can_modify_mm(mm, addr, addr + old_len)))
+ return -EPERM;
+
+ if (flags & MREMAP_FIXED) {
+@@ -1087,7 +1087,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, a
+ * Place can_modify_mm here so we can keep the logic related to
+ * shrink/expand together.
+ */
+- if (!can_modify_mm(mm, addr, addr + old_len)) {
++ if (unlikely(!can_modify_mm(mm, addr, addr + old_len))) {
+ ret = -EPERM;
+ goto out;
+ }
+--- a/mm/mseal.c~mseal-add-mseal-syscall-fix
++++ a/mm/mseal.c
+@@ -32,7 +32,7 @@ static inline void set_vma_sealed(struct
+ */
+ static bool can_modify_vma(struct vm_area_struct *vma)
+ {
+- if (vma_is_sealed(vma))
++ if (unlikely(vma_is_sealed(vma)))
+ return false;
+
+ return true;
+@@ -75,7 +75,7 @@ bool can_modify_mm(struct mm_struct *mm,
+
+ /* going through each vma to check. */
+ for_each_vma_range(vmi, vma, end) {
+- if (!can_modify_vma(vma))
++ if (unlikely(!can_modify_vma(vma)))
+ return false;
+ }
+
+@@ -100,7 +100,7 @@ bool can_modify_mm_madv(struct mm_struct
+
+ /* going through each vma to check. */
+ for_each_vma_range(vmi, vma, end)
+- if (is_ro_anon(vma) && !can_modify_vma(vma))
++ if (unlikely(is_ro_anon(vma) && !can_modify_vma(vma)))
+ return false;
+
+ /* Allow by default. */
+_
diff --git a/patches/nilfs2-convert-to-use-the-new-mount-api.patch b/patches/nilfs2-convert-to-use-the-new-mount-api.patch
new file mode 100644
index 000000000..7ee0ab2c1
--- /dev/null
+++ b/patches/nilfs2-convert-to-use-the-new-mount-api.patch
@@ -0,0 +1,615 @@
+From: Eric Sandeen <sandeen@redhat.com>
+Subject: nilfs2: convert to use the new mount API
+Date: Thu, 25 Apr 2024 03:27:16 +0900
+
+Convert nilfs2 to use the new mount API.
+
+[konishi.ryusuke: fixed missing SB_RDONLY flag repair in nilfs_reconfigure]
+Link: https://lkml.kernel.org/r/33d078a7-9072-4d8e-a3a9-dec23d4191da@redhat.com
+Link: https://lkml.kernel.org/r/20240424182716.6024-1-konishi.ryusuke@gmail.com
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ fs/nilfs2/nilfs.h | 4
+ fs/nilfs2/super.c | 374 +++++++++++++++++-----------------------
+ fs/nilfs2/the_nilfs.c | 4
+ fs/nilfs2/the_nilfs.h | 6
+ 4 files changed, 164 insertions(+), 224 deletions(-)
+
+--- a/fs/nilfs2/nilfs.h~nilfs2-convert-to-use-the-new-mount-api
++++ a/fs/nilfs2/nilfs.h
+@@ -335,8 +335,8 @@ void __nilfs_error(struct super_block *s
+
+ extern struct nilfs_super_block *
+ nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **);
+-extern int nilfs_store_magic_and_option(struct super_block *,
+- struct nilfs_super_block *, char *);
++extern int nilfs_store_magic(struct super_block *sb,
++ struct nilfs_super_block *sbp);
+ extern int nilfs_check_feature_compatibility(struct super_block *,
+ struct nilfs_super_block *);
+ extern void nilfs_set_log_cursor(struct nilfs_super_block *,
+--- a/fs/nilfs2/super.c~nilfs2-convert-to-use-the-new-mount-api
++++ a/fs/nilfs2/super.c
+@@ -29,7 +29,8 @@
+ #include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/blkdev.h>
+-#include <linux/parser.h>
++#include <linux/fs_context.h>
++#include <linux/fs_parser.h>
+ #include <linux/crc32.h>
+ #include <linux/vfs.h>
+ #include <linux/writeback.h>
+@@ -61,7 +62,6 @@ struct kmem_cache *nilfs_segbuf_cachep;
+ struct kmem_cache *nilfs_btree_path_cache;
+
+ static int nilfs_setup_super(struct super_block *sb, int is_mount);
+-static int nilfs_remount(struct super_block *sb, int *flags, char *data);
+
+ void __nilfs_msg(struct super_block *sb, const char *fmt, ...)
+ {
+@@ -702,105 +702,98 @@ static const struct super_operations nil
+ .freeze_fs = nilfs_freeze,
+ .unfreeze_fs = nilfs_unfreeze,
+ .statfs = nilfs_statfs,
+- .remount_fs = nilfs_remount,
+ .show_options = nilfs_show_options
+ };
+
+ enum {
+- Opt_err_cont, Opt_err_panic, Opt_err_ro,
+- Opt_barrier, Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
+- Opt_discard, Opt_nodiscard, Opt_err,
++ Opt_err, Opt_barrier, Opt_snapshot, Opt_order, Opt_norecovery,
++ Opt_discard,
+ };
+
+-static match_table_t tokens = {
+- {Opt_err_cont, "errors=continue"},
+- {Opt_err_panic, "errors=panic"},
+- {Opt_err_ro, "errors=remount-ro"},
+- {Opt_barrier, "barrier"},
+- {Opt_nobarrier, "nobarrier"},
+- {Opt_snapshot, "cp=%u"},
+- {Opt_order, "order=%s"},
+- {Opt_norecovery, "norecovery"},
+- {Opt_discard, "discard"},
+- {Opt_nodiscard, "nodiscard"},
+- {Opt_err, NULL}
++static const struct constant_table nilfs_param_err[] = {
++ {"continue", NILFS_MOUNT_ERRORS_CONT},
++ {"panic", NILFS_MOUNT_ERRORS_PANIC},
++ {"remount-ro", NILFS_MOUNT_ERRORS_RO},
++ {}
+ };
+
+-static int parse_options(char *options, struct super_block *sb, int is_remount)
+-{
+- struct the_nilfs *nilfs = sb->s_fs_info;
+- char *p;
+- substring_t args[MAX_OPT_ARGS];
+-
+- if (!options)
+- return 1;
+-
+- while ((p = strsep(&options, ",")) != NULL) {
+- int token;
++static const struct fs_parameter_spec nilfs_param_spec[] = {
++ fsparam_enum ("errors", Opt_err, nilfs_param_err),
++ fsparam_flag_no ("barrier", Opt_barrier),
++ fsparam_u64 ("cp", Opt_snapshot),
++ fsparam_string ("order", Opt_order),
++ fsparam_flag ("norecovery", Opt_norecovery),
++ fsparam_flag_no ("discard", Opt_discard),
++ {}
++};
+
+- if (!*p)
+- continue;
++struct nilfs_fs_context {
++ unsigned long ns_mount_opt;
++ __u64 cno;
++};
+
+- token = match_token(p, tokens, args);
+- switch (token) {
+- case Opt_barrier:
+- nilfs_set_opt(nilfs, BARRIER);
+- break;
+- case Opt_nobarrier:
++static int nilfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
++{
++ struct nilfs_fs_context *nilfs = fc->fs_private;
++ int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
++ struct fs_parse_result result;
++ int opt;
++
++ opt = fs_parse(fc, nilfs_param_spec, param, &result);
++ if (opt < 0)
++ return opt;
++
++ switch (opt) {
++ case Opt_barrier:
++ if (result.negated)
+ nilfs_clear_opt(nilfs, BARRIER);
+- break;
+- case Opt_order:
+- if (strcmp(args[0].from, "relaxed") == 0)
+- /* Ordered data semantics */
+- nilfs_clear_opt(nilfs, STRICT_ORDER);
+- else if (strcmp(args[0].from, "strict") == 0)
+- /* Strict in-order semantics */
+- nilfs_set_opt(nilfs, STRICT_ORDER);
+- else
+- return 0;
+- break;
+- case Opt_err_panic:
+- nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_PANIC);
+- break;
+- case Opt_err_ro:
+- nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_RO);
+- break;
+- case Opt_err_cont:
+- nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_CONT);
+- break;
+- case Opt_snapshot:
+- if (is_remount) {
+- nilfs_err(sb,
+- "\"%s\" option is invalid for remount",
+- p);
+- return 0;
+- }
+- break;
+- case Opt_norecovery:
+- nilfs_set_opt(nilfs, NORECOVERY);
+- break;
+- case Opt_discard:
+- nilfs_set_opt(nilfs, DISCARD);
+- break;
+- case Opt_nodiscard:
+- nilfs_clear_opt(nilfs, DISCARD);
+- break;
+- default:
+- nilfs_err(sb, "unrecognized mount option \"%s\"", p);
+- return 0;
++ else
++ nilfs_set_opt(nilfs, BARRIER);
++ break;
++ case Opt_order:
++ if (strcmp(param->string, "relaxed") == 0)
++ /* Ordered data semantics */
++ nilfs_clear_opt(nilfs, STRICT_ORDER);
++ else if (strcmp(param->string, "strict") == 0)
++ /* Strict in-order semantics */
++ nilfs_set_opt(nilfs, STRICT_ORDER);
++ else
++ return -EINVAL;
++ break;
++ case Opt_err:
++ nilfs->ns_mount_opt &= ~NILFS_MOUNT_ERROR_MODE;
++ nilfs->ns_mount_opt |= result.uint_32;
++ break;
++ case Opt_snapshot:
++ if (is_remount) {
++ struct super_block *sb = fc->root->d_sb;
++
++ nilfs_err(sb,
++ "\"%s\" option is invalid for remount",
++ param->key);
++ return -EINVAL;
++ }
++ if (result.uint_64 == 0) {
++ nilfs_err(NULL,
++ "invalid option \"cp=0\": invalid checkpoint number 0");
++ return -EINVAL;
+ }
++ nilfs->cno = result.uint_64;
++ break;
++ case Opt_norecovery:
++ nilfs_set_opt(nilfs, NORECOVERY);
++ break;
++ case Opt_discard:
++ if (result.negated)
++ nilfs_clear_opt(nilfs, DISCARD);
++ else
++ nilfs_set_opt(nilfs, DISCARD);
++ break;
++ default:
++ return -EINVAL;
+ }
+- return 1;
+-}
+
+-static inline void
+-nilfs_set_default_options(struct super_block *sb,
+- struct nilfs_super_block *sbp)
+-{
+- struct the_nilfs *nilfs = sb->s_fs_info;
+-
+- nilfs->ns_mount_opt =
+- NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER;
++ return 0;
+ }
+
+ static int nilfs_setup_super(struct super_block *sb, int is_mount)
+@@ -857,9 +850,8 @@ struct nilfs_super_block *nilfs_read_sup
+ return (struct nilfs_super_block *)((char *)(*pbh)->b_data + offset);
+ }
+
+-int nilfs_store_magic_and_option(struct super_block *sb,
+- struct nilfs_super_block *sbp,
+- char *data)
++int nilfs_store_magic(struct super_block *sb,
++ struct nilfs_super_block *sbp)
+ {
+ struct the_nilfs *nilfs = sb->s_fs_info;
+
+@@ -870,14 +862,12 @@ int nilfs_store_magic_and_option(struct
+ sb->s_flags |= SB_NOATIME;
+ #endif
+
+- nilfs_set_default_options(sb, sbp);
+-
+ nilfs->ns_resuid = le16_to_cpu(sbp->s_def_resuid);
+ nilfs->ns_resgid = le16_to_cpu(sbp->s_def_resgid);
+ nilfs->ns_interval = le32_to_cpu(sbp->s_c_interval);
+ nilfs->ns_watermark = le32_to_cpu(sbp->s_c_block_max);
+
+- return !parse_options(data, sb, 0) ? -EINVAL : 0;
++ return 0;
+ }
+
+ int nilfs_check_feature_compatibility(struct super_block *sb,
+@@ -1042,10 +1032,11 @@ int nilfs_checkpoint_is_mounted(struct s
+ * So, the recovery process is protected from other simultaneous mounts.
+ */
+ static int
+-nilfs_fill_super(struct super_block *sb, void *data, int silent)
++nilfs_fill_super(struct super_block *sb, struct fs_context *fc)
+ {
+ struct the_nilfs *nilfs;
+ struct nilfs_root *fsroot;
++ struct nilfs_fs_context *ctx = fc->fs_private;
+ __u64 cno;
+ int err;
+
+@@ -1055,10 +1046,13 @@ nilfs_fill_super(struct super_block *sb,
+
+ sb->s_fs_info = nilfs;
+
+- err = init_nilfs(nilfs, sb, (char *)data);
++ err = init_nilfs(nilfs, sb);
+ if (err)
+ goto failed_nilfs;
+
++ /* Copy in parsed mount options */
++ nilfs->ns_mount_opt = ctx->ns_mount_opt;
++
+ sb->s_op = &nilfs_sops;
+ sb->s_export_op = &nilfs_export_ops;
+ sb->s_root = NULL;
+@@ -1117,34 +1111,25 @@ nilfs_fill_super(struct super_block *sb,
+ return err;
+ }
+
+-static int nilfs_remount(struct super_block *sb, int *flags, char *data)
++static int nilfs_reconfigure(struct fs_context *fc)
+ {
++ struct nilfs_fs_context *ctx = fc->fs_private;
++ struct super_block *sb = fc->root->d_sb;
+ struct the_nilfs *nilfs = sb->s_fs_info;
+- unsigned long old_sb_flags;
+- unsigned long old_mount_opt;
+ int err;
+
+ sync_filesystem(sb);
+- old_sb_flags = sb->s_flags;
+- old_mount_opt = nilfs->ns_mount_opt;
+-
+- if (!parse_options(data, sb, 1)) {
+- err = -EINVAL;
+- goto restore_opts;
+- }
+- sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
+
+ err = -EINVAL;
+
+ if (!nilfs_valid_fs(nilfs)) {
+ nilfs_warn(sb,
+ "couldn't remount because the filesystem is in an incomplete recovery state");
+- goto restore_opts;
++ goto ignore_opts;
+ }
+-
+- if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
++ if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb))
+ goto out;
+- if (*flags & SB_RDONLY) {
++ if (fc->sb_flags & SB_RDONLY) {
+ sb->s_flags |= SB_RDONLY;
+
+ /*
+@@ -1172,138 +1157,66 @@ static int nilfs_remount(struct super_bl
+ "couldn't remount RDWR because of unsupported optional features (%llx)",
+ (unsigned long long)features);
+ err = -EROFS;
+- goto restore_opts;
++ goto ignore_opts;
+ }
+
+ sb->s_flags &= ~SB_RDONLY;
+
+ root = NILFS_I(d_inode(sb->s_root))->i_root;
+ err = nilfs_attach_log_writer(sb, root);
+- if (err)
+- goto restore_opts;
++ if (err) {
++ sb->s_flags |= SB_RDONLY;
++ goto ignore_opts;
++ }
+
+ down_write(&nilfs->ns_sem);
+ nilfs_setup_super(sb, true);
+ up_write(&nilfs->ns_sem);
+ }
+ out:
+- return 0;
+-
+- restore_opts:
+- sb->s_flags = old_sb_flags;
+- nilfs->ns_mount_opt = old_mount_opt;
+- return err;
+-}
+-
+-struct nilfs_super_data {
+- __u64 cno;
+- int flags;
+-};
+-
+-static int nilfs_parse_snapshot_option(const char *option,
+- const substring_t *arg,
+- struct nilfs_super_data *sd)
+-{
+- unsigned long long val;
+- const char *msg = NULL;
+- int err;
+-
+- if (!(sd->flags & SB_RDONLY)) {
+- msg = "read-only option is not specified";
+- goto parse_error;
+- }
+-
+- err = kstrtoull(arg->from, 0, &val);
+- if (err) {
+- if (err == -ERANGE)
+- msg = "too large checkpoint number";
+- else
+- msg = "malformed argument";
+- goto parse_error;
+- } else if (val == 0) {
+- msg = "invalid checkpoint number 0";
+- goto parse_error;
+- }
+- sd->cno = val;
+- return 0;
+-
+-parse_error:
+- nilfs_err(NULL, "invalid option \"%s\": %s", option, msg);
+- return 1;
+-}
+-
+-/**
+- * nilfs_identify - pre-read mount options needed to identify mount instance
+- * @data: mount options
+- * @sd: nilfs_super_data
+- */
+-static int nilfs_identify(char *data, struct nilfs_super_data *sd)
+-{
+- char *p, *options = data;
+- substring_t args[MAX_OPT_ARGS];
+- int token;
+- int ret = 0;
+-
+- do {
+- p = strsep(&options, ",");
+- if (p != NULL && *p) {
+- token = match_token(p, tokens, args);
+- if (token == Opt_snapshot)
+- ret = nilfs_parse_snapshot_option(p, &args[0],
+- sd);
+- }
+- if (!options)
+- break;
+- BUG_ON(options == data);
+- *(options - 1) = ',';
+- } while (!ret);
+- return ret;
+-}
++ sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
++ /* Copy over parsed remount options */
++ nilfs->ns_mount_opt = ctx->ns_mount_opt;
+
+-static int nilfs_set_bdev_super(struct super_block *s, void *data)
+-{
+- s->s_dev = *(dev_t *)data;
+ return 0;
+-}
+
+-static int nilfs_test_bdev_super(struct super_block *s, void *data)
+-{
+- return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data;
++ ignore_opts:
++ return err;
+ }
+
+-static struct dentry *
+-nilfs_mount(struct file_system_type *fs_type, int flags,
+- const char *dev_name, void *data)
++static int
++nilfs_get_tree(struct fs_context *fc)
+ {
+- struct nilfs_super_data sd = { .flags = flags };
++ struct nilfs_fs_context *ctx = fc->fs_private;
+ struct super_block *s;
+ dev_t dev;
+ int err;
+
+- if (nilfs_identify(data, &sd))
+- return ERR_PTR(-EINVAL);
++ if (ctx->cno && !(fc->sb_flags & SB_RDONLY)) {
++ nilfs_err(s, "invalid option \"cp=%llu\": read-only option is not specified",
++ ctx->cno);
++ return -EINVAL;
++ }
+
+- err = lookup_bdev(dev_name, &dev);
++ err = lookup_bdev(fc->source, &dev);
+ if (err)
+- return ERR_PTR(err);
++ return err;
+
+- s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, flags,
+- &dev);
++ s = sget_dev(fc, dev);
+ if (IS_ERR(s))
+- return ERR_CAST(s);
++ return PTR_ERR(s);
+
+ if (!s->s_root) {
+- err = setup_bdev_super(s, flags, NULL);
++ err = setup_bdev_super(s, fc->sb_flags, fc);
+ if (!err)
+- err = nilfs_fill_super(s, data,
+- flags & SB_SILENT ? 1 : 0);
++ err = nilfs_fill_super(s, fc);
+ if (err)
+ goto failed_super;
+
+ s->s_flags |= SB_ACTIVE;
+- } else if (!sd.cno) {
++ } else if (!ctx->cno) {
+ if (nilfs_tree_is_busy(s->s_root)) {
+- if ((flags ^ s->s_flags) & SB_RDONLY) {
++ if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
+ nilfs_err(s,
+ "the device already has a %s mount.",
+ sb_rdonly(s) ? "read-only" : "read/write");
+@@ -1315,34 +1228,65 @@ nilfs_mount(struct file_system_type *fs_
+ * Try remount to setup mount states if the current
+ * tree is not mounted and only snapshots use this sb.
+ */
+- err = nilfs_remount(s, &flags, data);
++ fc->root = s->s_root;
++ err = nilfs_reconfigure(fc);
+ if (err)
+ goto failed_super;
+ }
+ }
+
+- if (sd.cno) {
++ if (ctx->cno) {
+ struct dentry *root_dentry;
+
+- err = nilfs_attach_snapshot(s, sd.cno, &root_dentry);
++ err = nilfs_attach_snapshot(s, ctx->cno, &root_dentry);
+ if (err)
+ goto failed_super;
+- return root_dentry;
++ fc->root = root_dentry;
++ return 0;
+ }
+
+- return dget(s->s_root);
++ fc->root = dget(s->s_root);
++ return 0;
+
+ failed_super:
+ deactivate_locked_super(s);
+- return ERR_PTR(err);
++ return err;
++}
++
++static void nilfs_free_fc(struct fs_context *fc)
++{
++ kfree(fc->fs_private);
++}
++
++static const struct fs_context_operations nilfs_context_ops = {
++ .parse_param = nilfs_parse_param,
++ .get_tree = nilfs_get_tree,
++ .reconfigure = nilfs_reconfigure,
++ .free = nilfs_free_fc,
++};
++
++static int nilfs_init_fs_context(struct fs_context *fc)
++{
++ struct nilfs_fs_context *ctx;
++
++ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++ if (!ctx)
++ return -ENOMEM;
++
++ ctx->ns_mount_opt = NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER;
++ fc->fs_private = ctx;
++ fc->ops = &nilfs_context_ops;
++
++ return 0;
+ }
+
+ struct file_system_type nilfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nilfs2",
+- .mount = nilfs_mount,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
++ .init_fs_context = nilfs_init_fs_context,
++ .parameters = nilfs_param_spec,
+ };
+ MODULE_ALIAS_FS("nilfs2");
+
+--- a/fs/nilfs2/the_nilfs.c~nilfs2-convert-to-use-the-new-mount-api
++++ a/fs/nilfs2/the_nilfs.c
+@@ -668,7 +668,7 @@ static int nilfs_load_super_block(struct
+ * Return Value: On success, 0 is returned. On error, a negative error
+ * code is returned.
+ */
+-int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
++int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
+ {
+ struct nilfs_super_block *sbp;
+ int blocksize;
+@@ -686,7 +686,7 @@ int init_nilfs(struct the_nilfs *nilfs,
+ if (err)
+ goto out;
+
+- err = nilfs_store_magic_and_option(sb, sbp, data);
++ err = nilfs_store_magic(sb, sbp);
+ if (err)
+ goto failed_sbh;
+
+--- a/fs/nilfs2/the_nilfs.h~nilfs2-convert-to-use-the-new-mount-api
++++ a/fs/nilfs2/the_nilfs.h
+@@ -219,10 +219,6 @@ THE_NILFS_FNS(PURGING, purging)
+ #define nilfs_set_opt(nilfs, opt) \
+ ((nilfs)->ns_mount_opt |= NILFS_MOUNT_##opt)
+ #define nilfs_test_opt(nilfs, opt) ((nilfs)->ns_mount_opt & NILFS_MOUNT_##opt)
+-#define nilfs_write_opt(nilfs, mask, opt) \
+- ((nilfs)->ns_mount_opt = \
+- (((nilfs)->ns_mount_opt & ~NILFS_MOUNT_##mask) | \
+- NILFS_MOUNT_##opt)) \
+
+ /**
+ * struct nilfs_root - nilfs root object
+@@ -276,7 +272,7 @@ static inline int nilfs_sb_will_flip(str
+ void nilfs_set_last_segment(struct the_nilfs *, sector_t, u64, __u64);
+ struct the_nilfs *alloc_nilfs(struct super_block *sb);
+ void destroy_nilfs(struct the_nilfs *nilfs);
+-int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data);
++int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb);
+ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb);
+ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs);
+ void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs);
+_
diff --git a/patches/ocfs2-remove-redundant-assignment-to-variable-status.patch b/patches/ocfs2-remove-redundant-assignment-to-variable-status.patch
new file mode 100644
index 000000000..5b32c8683
--- /dev/null
+++ b/patches/ocfs2-remove-redundant-assignment-to-variable-status.patch
@@ -0,0 +1,40 @@
+From: Colin Ian King <colin.i.king@gmail.com>
+Subject: ocfs2: remove redundant assignment to variable status
+Date: Tue, 23 Apr 2024 23:30:18 +0100
+
+Variable status is being assigned and error code that is never read, it is
+being assigned inside of a do-while loop. The assignment is redundant and
+can be removed.
+
+Cleans up clang scan build warning:
+fs/ocfs2/dlm/dlmdomain.c:1530:2: warning: Value stored to 'status' is never
+read [deadcode.DeadStores]
+
+Link: https://lkml.kernel.org/r/20240423223018.1573213-1-colin.i.king@gmail.com
+Signed-off-by: Colin Ian King <colin.i.king@gmail.com>
+Acked-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Gang He <ghe@suse.com>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: Heming Zhao <heming.zhao@suse.com>
+Cc: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ fs/ocfs2/dlm/dlmdomain.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/fs/ocfs2/dlm/dlmdomain.c~ocfs2-remove-redundant-assignment-to-variable-status
++++ a/fs/ocfs2/dlm/dlmdomain.c
+@@ -1527,7 +1527,6 @@ static void dlm_send_join_asserts(struct
+ {
+ int status, node, live;
+
+- status = 0;
+ node = -1;
+ while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
+ node + 1)) < O2NM_MAX_NODES) {
+_
diff --git a/patches/mm-update-shuffle-documentation-to-match-its-current-state.patch b/patches/old/mm-update-shuffle-documentation-to-match-its-current-state.patch
index 23d65b0b2..23d65b0b2 100644
--- a/patches/mm-update-shuffle-documentation-to-match-its-current-state.patch
+++ b/patches/old/mm-update-shuffle-documentation-to-match-its-current-state.patch
diff --git a/patches/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.patch b/patches/old/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.patch
index d0f58867f..d0f58867f 100644
--- a/patches/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.patch
+++ b/patches/old/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.patch
diff --git a/patches/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.patch b/patches/old/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.patch
index 977979721..977979721 100644
--- a/patches/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.patch
+++ b/patches/old/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.patch
diff --git a/patches/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.patch b/patches/old/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.patch
index d15fa8449..d15fa8449 100644
--- a/patches/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.patch
+++ b/patches/old/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.patch
diff --git a/patches/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.patch b/patches/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.patch
index a2729b493..a2729b493 100644
--- a/patches/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.patch
+++ b/patches/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.patch
diff --git a/patches/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.patch b/patches/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.patch
index ebbb4509e..ebbb4509e 100644
--- a/patches/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.patch
+++ b/patches/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.patch
diff --git a/patches/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.patch b/patches/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.patch
index 16ff52f5d..16ff52f5d 100644
--- a/patches/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.patch
+++ b/patches/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.patch
diff --git a/patches/selftests-mm-soft-dirty-should-fail-if-a-testcase-fails.patch b/patches/selftests-mm-soft-dirty-should-fail-if-a-testcase-fails.patch
new file mode 100644
index 000000000..a5d3b5aa8
--- /dev/null
+++ b/patches/selftests-mm-soft-dirty-should-fail-if-a-testcase-fails.patch
@@ -0,0 +1,28 @@
+From: Ryan Roberts <ryan.roberts@arm.com>
+Subject: selftests/mm: soft-dirty should fail if a testcase fails
+Date: Wed, 24 Apr 2024 11:53:01 +0100
+
+Previously soft-dirty was unconditionally exiting with success, even if
+one of its testcases failed. Let's fix that so that failure can be
+reported to automated systems properly.
+
+Link: https://lkml.kernel.org/r/20240424105301.3157695-1-ryan.roberts@arm.com
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Reviewed-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ tools/testing/selftests/mm/soft-dirty.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/testing/selftests/mm/soft-dirty.c~selftests-mm-soft-dirty-should-fail-if-a-testcase-fails
++++ a/tools/testing/selftests/mm/soft-dirty.c
+@@ -209,5 +209,5 @@ int main(int argc, char **argv)
+
+ close(pagemap_fd);
+
+- return ksft_exit_pass();
++ ksft_finished();
+ }
+_
diff --git a/patches/tools-fix-userspace-compilation-with-new-test_xarray-changes.patch b/patches/tools-fix-userspace-compilation-with-new-test_xarray-changes.patch
new file mode 100644
index 000000000..b6b26af1d
--- /dev/null
+++ b/patches/tools-fix-userspace-compilation-with-new-test_xarray-changes.patch
@@ -0,0 +1,51 @@
+From: Luis Chamberlain <mcgrof@kernel.org>
+Subject: tools: fix userspace compilation with new test_xarray changes
+Date: Tue, 23 Apr 2024 12:22:20 -0700
+
+Patch series "test_xarray: couple of fixes for v6-9-rc6", v2.
+
+Here are a couple of fixes which should be merged into the queue for
+v6.9-rc6. The first one was reported by Liam, after fixing that I noticed
+an issue with a test, and a fix for that is in the second patch.
+
+
+This patch (of 2):
+
+Liam reported that compiling the test_xarray on userspace was broken. I
+was not even aware that was possible but you can via and you can run these
+tests in userspace with:
+
+make -C tools/testing/radix-tree
+./tools/testing/radix-tree/xarray
+
+Add the two helpers we need to fix compilation. We don't need a userspace
+schedule() so just make it do nothing.
+
+Link: https://lkml.kernel.org/r/20240423192221.301095-1-mcgrof@kernel.org
+Link: https://lkml.kernel.org/r/20240423192221.301095-2-mcgrof@kernel.org
+Fixes: a60cc288a1a2 ("test_xarray: add tests for advanced multi-index use")
+Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
+Reported-by: "Liam R. Howlett" <Liam.Howlett@oracle.com>
+Cc: Daniel Gomez <da.gomez@samsung.com>
+Cc: Darrick J. Wong <djwong@kernel.org>
+Cc: Dave Chinner <david@fromorbit.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Pankaj Raghav <p.raghav@samsung.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ tools/testing/radix-tree/linux/kernel.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/tools/testing/radix-tree/linux/kernel.h~tools-fix-userspace-compilation-with-new-test_xarray-changes
++++ a/tools/testing/radix-tree/linux/kernel.h
+@@ -18,6 +18,8 @@
+ #define pr_info printk
+ #define pr_debug printk
+ #define pr_cont printk
++#define schedule()
++#define PAGE_SHIFT 12
+
+ #define __acquires(x)
+ #define __releases(x)
+_
diff --git a/patches/tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code.patch b/patches/tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code.patch
new file mode 100644
index 000000000..ad2a2e657
--- /dev/null
+++ b/patches/tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code.patch
@@ -0,0 +1,63 @@
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+Subject: tools lib rbtree: Pick some improvements from the kernel rbtree code
+Date: Tue, 23 Apr 2024 17:27:57 -0300
+
+The tools/lib/rbtree.c code came from the kernel. Remove the
+EXPORT_SYMBOL() that make sense only there. Unfortunately it is not being
+checked with tools/perf/check_headers.sh. Will try to remedy this. Until
+then pick the improvements from:
+
+ b0687c1119b4e8c8 ("lib/rbtree: use '+' instead of '|' for setting color.")
+
+That I noticed by doing:
+
+ diff -u tools/lib/rbtree.c lib/rbtree.c
+ diff -u tools/include/linux/rbtree_augmented.h include/linux/rbtree_augmented.h
+
+There is one other cases, but lets pick it in separate patches.
+
+Link: https://lkml.kernel.org/r/ZigZzeFoukzRKG1Q@x1
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Ian Rogers <irogers@google.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Noah Goldstein <goldstein.w.n@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ tools/include/linux/rbtree_augmented.h | 4 ++--
+ tools/lib/rbtree.c | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/tools/include/linux/rbtree_augmented.h~tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code
++++ a/tools/include/linux/rbtree_augmented.h
+@@ -158,13 +158,13 @@ RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME,
+
+ static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
+ {
+- rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
++ rb->__rb_parent_color = rb_color(rb) + (unsigned long)p;
+ }
+
+ static inline void rb_set_parent_color(struct rb_node *rb,
+ struct rb_node *p, int color)
+ {
+- rb->__rb_parent_color = (unsigned long)p | color;
++ rb->__rb_parent_color = (unsigned long)p + color;
+ }
+
+ static inline void
+--- a/tools/lib/rbtree.c~tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code
++++ a/tools/lib/rbtree.c
+@@ -58,7 +58,7 @@
+
+ static inline void rb_set_black(struct rb_node *rb)
+ {
+- rb->__rb_parent_color |= RB_BLACK;
++ rb->__rb_parent_color += RB_BLACK;
+ }
+
+ static inline struct rb_node *rb_red_parent(struct rb_node *red)
+_
diff --git a/patches/userfault-expand-folio-use-in-mfill_atomic_install_pte.patch b/patches/userfault-expand-folio-use-in-mfill_atomic_install_pte.patch
new file mode 100644
index 000000000..0c9ccea45
--- /dev/null
+++ b/patches/userfault-expand-folio-use-in-mfill_atomic_install_pte.patch
@@ -0,0 +1,41 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: userfault; expand folio use in mfill_atomic_install_pte()
+Date: Tue, 23 Apr 2024 23:55:36 +0100
+
+Call page_folio() a little earlier so we can use folio_mapping()
+instead of page_mapping(), saving a call to compound_head().
+
+Link: https://lkml.kernel.org/r/20240423225552.4113447-6-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Eric Biggers <ebiggers@google.com>
+Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/userfaultfd.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/mm/userfaultfd.c~userfault-expand-folio-use-in-mfill_atomic_install_pte
++++ a/mm/userfaultfd.c
+@@ -180,9 +180,9 @@ int mfill_atomic_install_pte(pmd_t *dst_
+ pte_t _dst_pte, *dst_pte;
+ bool writable = dst_vma->vm_flags & VM_WRITE;
+ bool vm_shared = dst_vma->vm_flags & VM_SHARED;
+- bool page_in_cache = page_mapping(page);
+ spinlock_t *ptl;
+- struct folio *folio;
++ struct folio *folio = page_folio(page);
++ bool page_in_cache = folio_mapping(folio);
+
+ _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
+ _dst_pte = pte_mkdirty(_dst_pte);
+@@ -212,7 +212,6 @@ int mfill_atomic_install_pte(pmd_t *dst_
+ if (!pte_none_mostly(ptep_get(dst_pte)))
+ goto out_unlock;
+
+- folio = page_folio(page);
+ if (page_in_cache) {
+ /* Usually, cache pages are already added to LRU */
+ if (newly_allocated)
+_
diff --git a/pc/devel-series b/pc/devel-series
index 756e52dbd..df84216da 100644
--- a/pc/devel-series
+++ b/pc/devel-series
@@ -134,6 +134,13 @@ mm-userfaultfd-reset-ptes-when-close-for-wr-protected-ones.patch
#
maple_tree-fix-mas_empty_area_rev-null-pointer-dereference.patch
#
+mm-page_owner-fixing-wrong-information-in-dump_page_owner.patch
+#
+maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver.patch
+#
+tools-fix-userspace-compilation-with-new-test_xarray-changes.patch
+lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add.patch
+#
### hfe
#
#ENDBRANCH mm-hotfixes-unstable
@@ -549,6 +556,7 @@ mm-hugetlb-pass-correct-order_per_bit-to-cma_declare_contiguous_nid.patch
mm-convert-pagecache_isize_extended-to-use-a-folio.patch
#
mm-free-non-hugetlb-large-folios-in-a-batch.patch
+mm-free-non-hugetlb-large-folios-in-a-batch-fix.patch
mm-combine-free_the_page-and-free_unref_page.patch
mm-inline-destroy_large_folio-into-__folio_put_large.patch
mm-combine-__folio_put_small-__folio_put_large-and-__folio_put.patch
@@ -573,11 +581,13 @@ mm-pass-vma-instead-of-mm-to-follow_pte.patch
mm-follow_pte-improvements.patch
#
mm-allow-for-detecting-underflows-with-page_mapcount-again.patch
+mm-allow-for-detecting-underflows-with-page_mapcount-again-fix.patch
mm-rmap-always-inline-anon-file-rmap-duplication-of-a-single-pte.patch
mm-rmap-add-fast-path-for-small-folios-when-adding-removing-duplicating.patch
mm-track-mapcount-of-large-folios-in-single-value.patch
mm-improve-folio_likely_mapped_shared-using-the-mapcount-of-large-folios.patch
mm-make-folio_mapcount-return-0-for-small-typed-folios.patch
+mm-make-folio_mapcount-return-0-for-small-typed-folios-fix.patch
mm-memory-use-folio_mapcount-in-zap_present_folio_ptes.patch
mm-huge_memory-use-folio_mapcount-in-zap_huge_pmd-sanity-check.patch
mm-memory-failure-use-folio_mapcount-in-hwpoison_user_mappings.patch
@@ -629,6 +639,7 @@ mm-page_alloc-allowing-mthp-compaction-to-capture-the-freed-page-directly.patch
#mseal-wire-up-mseal-syscall.patch: https://lkml.kernel.org/r/CAJuCfpFLwJg4n7wPpT+u9vC4XHoLE_BPPZ0tDKf7W45hGky4_Q@mail.gmail.com
mseal-wire-up-mseal-syscall.patch
mseal-add-mseal-syscall.patch
+mseal-add-mseal-syscall-fix.patch
selftest-mm-mseal-memory-sealing.patch
mseal-add-documentation.patch
selftest-mm-mseal-read-only-elf-memory-segment.patch
@@ -678,16 +689,9 @@ mm-arm64-override-clear_young_dirty_ptes-batch-helper.patch
mm-memory-add-any_dirty-optional-pointer-to-folio_pte_batch.patch
mm-madvise-optimize-lazyfreeing-with-mthp-in-madvise_free.patch
#
-writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.patch
-#writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.patch: TBU
-writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.patch
-writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.patch
-writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.patch
-writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.patch
-writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.patch
#
-mm-update-shuffle-documentation-to-match-its-current-state.patch
#
+#mm-vmscan-avoid-split-pmd-mapped-thp-during-shrink_folio_list.patch: check review
mm-vmscan-avoid-split-pmd-mapped-thp-during-shrink_folio_list.patch
#
mm-page-flags-make-pageuptodate-return-bool.patch
@@ -699,6 +703,30 @@ memcg-simple-cleanup-of-stats-update-functions.patch
xarray-use-bits_per_longs.patch
xarray-dont-use-proxy-headers.patch
#
+fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.patch
+f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.patch
+memory-failure-remove-calls-to-page_mapping.patch
+migrate-expand-the-use-of-folio-in-__migrate_device_pages.patch
+userfault-expand-folio-use-in-mfill_atomic_install_pte.patch
+mm-remove-page_mapping.patch
+#
+mm-remove-page_cache_alloc.patch
+mm-remove-put_devmap_managed_page.patch
+mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.patch
+mm-remove-page_ref_sub_return.patch
+gup-use-folios-for-gup_devmap.patch
+mm-add-kernel-doc-for-folio_mark_accessed.patch
+mm-remove-pagereferenced.patch
+#
+memcg-fix-data-race-kcsan-bug-in-rstats.patch
+#
+mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared.patch
+#
+mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped.patch
+#
+selftests-mm-soft-dirty-should-fail-if-a-testcase-fails.patch
+#
+#
#
#
#
@@ -858,5 +886,14 @@ crash-add-prefix-for-crash-dumping-messages.patch
binfmt_elf_fdpic-fix-proc-pid-auxv.patch
binfmt_elf_fdpic-fix-proc-pid-auxv-checkpatch-fixes.patch
#
+nilfs2-convert-to-use-the-new-mount-api.patch
+#
+ocfs2-remove-redundant-assignment-to-variable-status.patch
+#
+tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code.patch
+#
+media-rc-add-missing-ioh.patch
+media-stih-cec-add-missing-ioh.patch
+kfifo-dont-use-proxy-headers.patch
#
#ENDBRANCH mm-nonmm-unstable
diff --git a/pc/f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.pc b/pc/f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.pc
new file mode 100644
index 000000000..6cb8861f8
--- /dev/null
+++ b/pc/f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.pc
@@ -0,0 +1 @@
+fs/f2fs/data.c
diff --git a/pc/fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.pc b/pc/fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.pc
new file mode 100644
index 000000000..593cf155b
--- /dev/null
+++ b/pc/fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.pc
@@ -0,0 +1 @@
+fs/crypto/inline_crypt.c
diff --git a/pc/gup-use-folios-for-gup_devmap.pc b/pc/gup-use-folios-for-gup_devmap.pc
new file mode 100644
index 000000000..efad84764
--- /dev/null
+++ b/pc/gup-use-folios-for-gup_devmap.pc
@@ -0,0 +1 @@
+mm/gup.c
diff --git a/pc/kfifo-dont-use-proxy-headers.pc b/pc/kfifo-dont-use-proxy-headers.pc
new file mode 100644
index 000000000..ba16564f0
--- /dev/null
+++ b/pc/kfifo-dont-use-proxy-headers.pc
@@ -0,0 +1,3 @@
+include/linux/kfifo.h
+lib/kfifo.c
+samples/kfifo/dma-example.c
diff --git a/pc/lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add.pc b/pc/lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add.pc
new file mode 100644
index 000000000..ff56fc76e
--- /dev/null
+++ b/pc/lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add.pc
@@ -0,0 +1 @@
+lib/test_xarray.c
diff --git a/pc/maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver.pc b/pc/maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver.pc
new file mode 100644
index 000000000..a04c4ff68
--- /dev/null
+++ b/pc/maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver.pc
@@ -0,0 +1 @@
+MAINTAINERS
diff --git a/pc/media-rc-add-missing-ioh.pc b/pc/media-rc-add-missing-ioh.pc
new file mode 100644
index 000000000..6beb314fa
--- /dev/null
+++ b/pc/media-rc-add-missing-ioh.pc
@@ -0,0 +1,4 @@
+drivers/media/rc/mtk-cir.c
+drivers/media/rc/serial_ir.c
+drivers/media/rc/st_rc.c
+drivers/media/rc/sunxi-cir.c
diff --git a/pc/media-stih-cec-add-missing-ioh.pc b/pc/media-stih-cec-add-missing-ioh.pc
new file mode 100644
index 000000000..c5dba41c5
--- /dev/null
+++ b/pc/media-stih-cec-add-missing-ioh.pc
@@ -0,0 +1 @@
+drivers/media/cec/platform/sti/stih-cec.c
diff --git a/pc/memcg-fix-data-race-kcsan-bug-in-rstats.pc b/pc/memcg-fix-data-race-kcsan-bug-in-rstats.pc
new file mode 100644
index 000000000..ba4010b8e
--- /dev/null
+++ b/pc/memcg-fix-data-race-kcsan-bug-in-rstats.pc
@@ -0,0 +1 @@
+mm/memcontrol.c
diff --git a/pc/memory-failure-remove-calls-to-page_mapping.pc b/pc/memory-failure-remove-calls-to-page_mapping.pc
new file mode 100644
index 000000000..709648673
--- /dev/null
+++ b/pc/memory-failure-remove-calls-to-page_mapping.pc
@@ -0,0 +1 @@
+mm/memory-failure.c
diff --git a/pc/migrate-expand-the-use-of-folio-in-__migrate_device_pages.pc b/pc/migrate-expand-the-use-of-folio-in-__migrate_device_pages.pc
new file mode 100644
index 000000000..fc348dde7
--- /dev/null
+++ b/pc/migrate-expand-the-use-of-folio-in-__migrate_device_pages.pc
@@ -0,0 +1 @@
+mm/migrate_device.c
diff --git a/pc/mm-add-kernel-doc-for-folio_mark_accessed.pc b/pc/mm-add-kernel-doc-for-folio_mark_accessed.pc
new file mode 100644
index 000000000..edb691fab
--- /dev/null
+++ b/pc/mm-add-kernel-doc-for-folio_mark_accessed.pc
@@ -0,0 +1 @@
+mm/swap.c
diff --git a/pc/mm-allow-for-detecting-underflows-with-page_mapcount-again-fix.pc b/pc/mm-allow-for-detecting-underflows-with-page_mapcount-again-fix.pc
new file mode 100644
index 000000000..476581c1d
--- /dev/null
+++ b/pc/mm-allow-for-detecting-underflows-with-page_mapcount-again-fix.pc
@@ -0,0 +1 @@
+include/linux/mm.h
diff --git a/pc/mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.pc b/pc/mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.pc
new file mode 100644
index 000000000..2e0c87333
--- /dev/null
+++ b/pc/mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.pc
@@ -0,0 +1,4 @@
+include/linux/mm.h
+mm/gup.c
+mm/memremap.c
+mm/swap.c
diff --git a/pc/mm-free-non-hugetlb-large-folios-in-a-batch-fix.pc b/pc/mm-free-non-hugetlb-large-folios-in-a-batch-fix.pc
new file mode 100644
index 000000000..edb691fab
--- /dev/null
+++ b/pc/mm-free-non-hugetlb-large-folios-in-a-batch-fix.pc
@@ -0,0 +1 @@
+mm/swap.c
diff --git a/pc/mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared.pc b/pc/mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared.pc
new file mode 100644
index 000000000..9f6dc515c
--- /dev/null
+++ b/pc/mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared.pc
@@ -0,0 +1,2 @@
+Documentation/admin-guide/mm/transhuge.rst
+mm/khugepaged.c
diff --git a/pc/mm-make-folio_mapcount-return-0-for-small-typed-folios-fix.pc b/pc/mm-make-folio_mapcount-return-0-for-small-typed-folios-fix.pc
new file mode 100644
index 000000000..476581c1d
--- /dev/null
+++ b/pc/mm-make-folio_mapcount-return-0-for-small-typed-folios-fix.pc
@@ -0,0 +1 @@
+include/linux/mm.h
diff --git a/pc/mm-page_owner-fixing-wrong-information-in-dump_page_owner.pc b/pc/mm-page_owner-fixing-wrong-information-in-dump_page_owner.pc
new file mode 100644
index 000000000..89fe6a5de
--- /dev/null
+++ b/pc/mm-page_owner-fixing-wrong-information-in-dump_page_owner.pc
@@ -0,0 +1 @@
+mm/page_owner.c
diff --git a/pc/mm-remove-page_cache_alloc.pc b/pc/mm-remove-page_cache_alloc.pc
new file mode 100644
index 000000000..310bb983f
--- /dev/null
+++ b/pc/mm-remove-page_cache_alloc.pc
@@ -0,0 +1 @@
+include/linux/pagemap.h
diff --git a/pc/mm-remove-page_mapping.pc b/pc/mm-remove-page_mapping.pc
new file mode 100644
index 000000000..2df142d85
--- /dev/null
+++ b/pc/mm-remove-page_mapping.pc
@@ -0,0 +1,2 @@
+include/linux/pagemap.h
+mm/folio-compat.c
diff --git a/pc/mm-remove-page_ref_sub_return.pc b/pc/mm-remove-page_ref_sub_return.pc
new file mode 100644
index 000000000..aa927269b
--- /dev/null
+++ b/pc/mm-remove-page_ref_sub_return.pc
@@ -0,0 +1 @@
+include/linux/page_ref.h
diff --git a/pc/mm-remove-pagereferenced.pc b/pc/mm-remove-pagereferenced.pc
new file mode 100644
index 000000000..0eafe3b40
--- /dev/null
+++ b/pc/mm-remove-pagereferenced.pc
@@ -0,0 +1 @@
+include/linux/page-flags.h
diff --git a/pc/mm-remove-put_devmap_managed_page.pc b/pc/mm-remove-put_devmap_managed_page.pc
new file mode 100644
index 000000000..476581c1d
--- /dev/null
+++ b/pc/mm-remove-put_devmap_managed_page.pc
@@ -0,0 +1 @@
+include/linux/mm.h
diff --git a/pc/mm-update-shuffle-documentation-to-match-its-current-state.pc b/pc/mm-update-shuffle-documentation-to-match-its-current-state.pc
deleted file mode 100644
index f41f12ec9..000000000
--- a/pc/mm-update-shuffle-documentation-to-match-its-current-state.pc
+++ /dev/null
@@ -1,2 +0,0 @@
-Documentation/admin-guide/kernel-parameters.txt
-mm/Kconfig
diff --git a/pc/mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped.pc b/pc/mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped.pc
new file mode 100644
index 000000000..ba2d8ce46
--- /dev/null
+++ b/pc/mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped.pc
@@ -0,0 +1 @@
+mm/vmalloc.c
diff --git a/pc/mseal-add-mseal-syscall-fix.pc b/pc/mseal-add-mseal-syscall-fix.pc
new file mode 100644
index 000000000..ff3efac21
--- /dev/null
+++ b/pc/mseal-add-mseal-syscall-fix.pc
@@ -0,0 +1,5 @@
+mm/madvise.c
+mm/mmap.c
+mm/mprotect.c
+mm/mremap.c
+mm/mseal.c
diff --git a/pc/nilfs2-convert-to-use-the-new-mount-api.pc b/pc/nilfs2-convert-to-use-the-new-mount-api.pc
new file mode 100644
index 000000000..845e88d86
--- /dev/null
+++ b/pc/nilfs2-convert-to-use-the-new-mount-api.pc
@@ -0,0 +1,4 @@
+fs/nilfs2/nilfs.h
+fs/nilfs2/super.c
+fs/nilfs2/the_nilfs.c
+fs/nilfs2/the_nilfs.h
diff --git a/pc/ocfs2-remove-redundant-assignment-to-variable-status.pc b/pc/ocfs2-remove-redundant-assignment-to-variable-status.pc
new file mode 100644
index 000000000..6d5403cf8
--- /dev/null
+++ b/pc/ocfs2-remove-redundant-assignment-to-variable-status.pc
@@ -0,0 +1 @@
+fs/ocfs2/dlm/dlmdomain.c
diff --git a/pc/selftests-mm-soft-dirty-should-fail-if-a-testcase-fails.pc b/pc/selftests-mm-soft-dirty-should-fail-if-a-testcase-fails.pc
new file mode 100644
index 000000000..803c452f8
--- /dev/null
+++ b/pc/selftests-mm-soft-dirty-should-fail-if-a-testcase-fails.pc
@@ -0,0 +1 @@
+tools/testing/selftests/mm/soft-dirty.c
diff --git a/pc/tools-fix-userspace-compilation-with-new-test_xarray-changes.pc b/pc/tools-fix-userspace-compilation-with-new-test_xarray-changes.pc
new file mode 100644
index 000000000..f27cee2ea
--- /dev/null
+++ b/pc/tools-fix-userspace-compilation-with-new-test_xarray-changes.pc
@@ -0,0 +1 @@
+tools/testing/radix-tree/linux/kernel.h
diff --git a/pc/tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code.pc b/pc/tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code.pc
new file mode 100644
index 000000000..7d3e8bccc
--- /dev/null
+++ b/pc/tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code.pc
@@ -0,0 +1,2 @@
+tools/include/linux/rbtree_augmented.h
+tools/lib/rbtree.c
diff --git a/pc/userfault-expand-folio-use-in-mfill_atomic_install_pte.pc b/pc/userfault-expand-folio-use-in-mfill_atomic_install_pte.pc
new file mode 100644
index 000000000..d6f75b4ac
--- /dev/null
+++ b/pc/userfault-expand-folio-use-in-mfill_atomic_install_pte.pc
@@ -0,0 +1 @@
+mm/userfaultfd.c
diff --git a/pc/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.pc b/pc/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.pc
deleted file mode 100644
index 9fc77577f..000000000
--- a/pc/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.pc
+++ /dev/null
@@ -1 +0,0 @@
-tools/writeback/wb_monitor.py
diff --git a/pc/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.pc b/pc/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.pc
deleted file mode 100644
index 671f14319..000000000
--- a/pc/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.pc
+++ /dev/null
@@ -1 +0,0 @@
-mm/backing-dev.c
diff --git a/pc/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.pc b/pc/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.pc
deleted file mode 100644
index fff722845..000000000
--- a/pc/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.pc
+++ /dev/null
@@ -1 +0,0 @@
-mm/page-writeback.c
diff --git a/pc/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.pc b/pc/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.pc
deleted file mode 100644
index 671f14319..000000000
--- a/pc/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.pc
+++ /dev/null
@@ -1 +0,0 @@
-mm/backing-dev.c
diff --git a/pc/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.pc b/pc/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.pc
deleted file mode 100644
index 671f14319..000000000
--- a/pc/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.pc
+++ /dev/null
@@ -1 +0,0 @@
-mm/backing-dev.c
diff --git a/pc/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.pc b/pc/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.pc
deleted file mode 100644
index 0f50686a4..000000000
--- a/pc/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.pc
+++ /dev/null
@@ -1,3 +0,0 @@
-include/linux/writeback.h
-mm/backing-dev.c
-mm/page-writeback.c
diff --git a/txt/f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.txt b/txt/f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.txt
new file mode 100644
index 000000000..734af664a
--- /dev/null
+++ b/txt/f2fs-convert-f2fs_clear_page_cache_dirty_tag-to-use-a-folio.txt
@@ -0,0 +1,11 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: f2fs: convert f2fs_clear_page_cache_dirty_tag to use a folio
+Date: Tue, 23 Apr 2024 23:55:33 +0100
+
+Removes uses of page_mapping() and page_index().
+
+Link: https://lkml.kernel.org/r/20240423225552.4113447-3-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Eric Biggers <ebiggers@google.com>
+Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
diff --git a/txt/fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.txt b/txt/fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.txt
new file mode 100644
index 000000000..a5978bf89
--- /dev/null
+++ b/txt/fscrypt-convert-bh_get_inode_and_lblk_num-to-use-a-folio.txt
@@ -0,0 +1,21 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: fscrypt: convert bh_get_inode_and_lblk_num to use a folio
+Date: Tue, 23 Apr 2024 23:55:32 +0100
+
+Patch series "Remove page_mapping()".
+
+There are only a few users left. Convert them all to either call
+folio_mapping() or just use folio->mapping directly.
+
+
+This patch (of 6):
+
+Remove uses of page->index, page_mapping() and b_page. Saves a call
+to compound_head().
+
+Link: https://lkml.kernel.org/r/20240423225552.4113447-1-willy@infradead.org
+Link: https://lkml.kernel.org/r/20240423225552.4113447-2-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
diff --git a/txt/gup-use-folios-for-gup_devmap.txt b/txt/gup-use-folios-for-gup_devmap.txt
new file mode 100644
index 000000000..fe7ba0d8a
--- /dev/null
+++ b/txt/gup-use-folios-for-gup_devmap.txt
@@ -0,0 +1,11 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: gup: use folios for gup_devmap
+Date: Wed, 24 Apr 2024 20:19:10 +0100
+
+Use try_grab_folio() instead of try_grab_page() so we get the folio back
+that we calculated, and then use folio_set_referenced() instead of
+SetPageReferenced(). Correspondingly, use gup_put_folio() to put any
+unneeded references.
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-6-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
diff --git a/txt/kfifo-dont-use-proxy-headers.txt b/txt/kfifo-dont-use-proxy-headers.txt
new file mode 100644
index 000000000..bfa7f4638
--- /dev/null
+++ b/txt/kfifo-dont-use-proxy-headers.txt
@@ -0,0 +1,21 @@
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Subject: kfifo: don't use "proxy" headers
+Date: Tue, 23 Apr 2024 22:23:10 +0300
+
+Update header inclusions to follow IWYU (Include What You Use) principle.
+
+Link: https://lkml.kernel.org/r/20240423192529.3249134-4-andriy.shevchenko@linux.intel.com
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: Alain Volmat <alain.volmat@foss.st.com>
+Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Cc: Chen-Yu Tsai <wens@csie.org>
+Cc: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Cc: Jernej Skrabec <jernej.skrabec@gmail.com>
+Cc: Matthias Brugger <matthias.bgg@gmail.com>
+Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
+Cc: Patrice Chotard <patrice.chotard@foss.st.com>
+Cc: Rob Herring <robh@kernel.org>
+Cc: Samuel Holland <samuel@sholland.org>
+Cc: Sean Wang <sean.wang@mediatek.com>
+Cc: Sean Young <sean@mess.org>
+Cc: Stefani Seibold <stefani@seibold.net>
diff --git a/txt/lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add.txt b/txt/lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add.txt
new file mode 100644
index 000000000..3ae196adf
--- /dev/null
+++ b/txt/lib-test_xarrayc-fix-error-assumptions-on-check_xa_multi_store_adv_add.txt
@@ -0,0 +1,44 @@
+From: Luis Chamberlain <mcgrof@kernel.org>
+Subject: lib/test_xarray.c: fix error assumptions on check_xa_multi_store_adv_add()
+Date: Tue, 23 Apr 2024 12:22:21 -0700
+
+While testing lib/test_xarray in userspace I've noticed we can fail with:
+
+make -C tools/testing/radix-tree
+./tools/testing/radix-tree/xarray
+
+BUG at check_xa_multi_store_adv_add:749
+xarray: 0x55905fb21a00x head 0x55905fa1d8e0x flags 0 marks 0 0 0
+0: 0x55905fa1d8e0x
+xarray: ../../../lib/test_xarray.c:749: check_xa_multi_store_adv_add: Assertion `0' failed.
+Aborted
+
+We get a failure with a BUG_ON(), and that is because we actually can
+fail due to -ENOMEM, the check in xas_nomem() will fix this for us so
+it makes no sense to expect no failure inside the loop. So modify the
+check and since this is also useful for instructional purposes clarify
+the situation.
+
+The check for XA_BUG_ON(xa, xa_load(xa, index) != p) is already done
+at the end of the loop so just remove the bogus on inside the loop.
+
+With this we now pass the test in both kernel and userspace:
+
+In userspace:
+
+./tools/testing/radix-tree/xarray
+XArray: 149092856 of 149092856 tests passed
+
+In kernel space:
+
+XArray: 148257077 of 148257077 tests passed
+
+Link: https://lkml.kernel.org/r/20240423192221.301095-3-mcgrof@kernel.org
+Fixes: a60cc288a1a2 ("test_xarray: add tests for advanced multi-index use")
+Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
+Cc: Daniel Gomez <da.gomez@samsung.com>
+Cc: Darrick J. Wong <djwong@kernel.org>
+Cc: Dave Chinner <david@fromorbit.com>
+Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Pankaj Raghav <p.raghav@samsung.com>
diff --git a/txt/maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver.txt b/txt/maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver.txt
new file mode 100644
index 000000000..bf38cee9b
--- /dev/null
+++ b/txt/maintainers-update-urls-for-keys-keyrings_integrity-and-tpm-device-driver.txt
@@ -0,0 +1,16 @@
+From: Jarkko Sakkinen <jarkko@kernel.org>
+Subject: MAINTAINERS: update URL's for KEYS/KEYRINGS_INTEGRITY and TPM DEVICE DRIVER
+Date: Wed, 24 Apr 2024 00:45:49 +0300
+
+Add TPM driver test suite URL to the MAINTAINERS files and move the wiki
+URL to more appropriate location.
+
+Link: https://gitlab.com/jarkkojs/linux-tpmdd-test
+Link: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
+Link: https://lkml.kernel.org/r/20240423214549.8242-1-jarkko@kernel.org
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Acked-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Mimi Zohar <zohar@linux.ibm.com>
+Cc: Peter Huewe <peterhuewe@gmx.de>
+Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
diff --git a/txt/media-rc-add-missing-ioh.txt b/txt/media-rc-add-missing-ioh.txt
new file mode 100644
index 000000000..e86c9958d
--- /dev/null
+++ b/txt/media-rc-add-missing-ioh.txt
@@ -0,0 +1,32 @@
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Subject: media: rc: add missing io.h
+Date: Tue, 23 Apr 2024 22:23:08 +0300
+
+Patch series "kfifo: Clean up kfifo.h", v2.
+
+To reduce dependency hell a degree, clean up kfifo.h (mainly getting rid
+of kernel.h in the global header).
+
+
+This patch (of 3):
+
+In many remote control drivers the io.h is implied by others. This is not
+good as it prevents from cleanups done in other headers. Add missing
+include.
+
+Link: https://lkml.kernel.org/r/20240423192529.3249134-1-andriy.shevchenko@linux.intel.com
+Link: https://lkml.kernel.org/r/20240423192529.3249134-2-andriy.shevchenko@linux.intel.com
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: Alain Volmat <alain.volmat@foss.st.com>
+Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Cc: Chen-Yu Tsai <wens@csie.org>
+Cc: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Cc: Jernej Skrabec <jernej.skrabec@gmail.com>
+Cc: Matthias Brugger <matthias.bgg@gmail.com>
+Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
+Cc: Patrice Chotard <patrice.chotard@foss.st.com>
+Cc: Rob Herring <robh@kernel.org>
+Cc: Samuel Holland <samuel@sholland.org>
+Cc: Sean Wang <sean.wang@mediatek.com>
+Cc: Sean Young <sean@mess.org>
+Cc: Stefani Seibold <stefani@seibold.net>
diff --git a/txt/media-stih-cec-add-missing-ioh.txt b/txt/media-stih-cec-add-missing-ioh.txt
new file mode 100644
index 000000000..fa9efd65f
--- /dev/null
+++ b/txt/media-stih-cec-add-missing-ioh.txt
@@ -0,0 +1,22 @@
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Subject: media: stih-cec: add missing io.h
+Date: Tue, 23 Apr 2024 22:23:09 +0300
+
+In the driver the io.h is implied by others. This is not good as it
+prevents from cleanups done in other headers. Add missing include.
+
+Link: https://lkml.kernel.org/r/20240423192529.3249134-3-andriy.shevchenko@linux.intel.com
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: Alain Volmat <alain.volmat@foss.st.com>
+Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+Cc: Chen-Yu Tsai <wens@csie.org>
+Cc: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Cc: Jernej Skrabec <jernej.skrabec@gmail.com>
+Cc: Matthias Brugger <matthias.bgg@gmail.com>
+Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
+Cc: Patrice Chotard <patrice.chotard@foss.st.com>
+Cc: Rob Herring <robh@kernel.org>
+Cc: Samuel Holland <samuel@sholland.org>
+Cc: Sean Wang <sean.wang@mediatek.com>
+Cc: Sean Young <sean@mess.org>
+Cc: Stefani Seibold <stefani@seibold.net>
diff --git a/txt/memcg-fix-data-race-kcsan-bug-in-rstats.txt b/txt/memcg-fix-data-race-kcsan-bug-in-rstats.txt
new file mode 100644
index 000000000..ac58c3629
--- /dev/null
+++ b/txt/memcg-fix-data-race-kcsan-bug-in-rstats.txt
@@ -0,0 +1,47 @@
+From: Breno Leitao <leitao@debian.org>
+Subject: memcg: fix data-race KCSAN bug in rstats
+Date: Wed, 24 Apr 2024 05:59:39 -0700
+
+A data-race issue in memcg rstat occurs when two distinct code paths
+access the same 4-byte region concurrently. KCSAN detection triggers the
+following BUG as a result.
+
+ BUG: KCSAN: data-race in __count_memcg_events / mem_cgroup_css_rstat_flush
+
+ write to 0xffffe8ffff98e300 of 4 bytes by task 5274 on cpu 17:
+ mem_cgroup_css_rstat_flush (mm/memcontrol.c:5850)
+ cgroup_rstat_flush_locked (kernel/cgroup/rstat.c:243 (discriminator 7))
+ cgroup_rstat_flush (./include/linux/spinlock.h:401 kernel/cgroup/rstat.c:278)
+ mem_cgroup_flush_stats.part.0 (mm/memcontrol.c:767)
+ memory_numa_stat_show (mm/memcontrol.c:6911)
+<snip>
+
+ read to 0xffffe8ffff98e300 of 4 bytes by task 410848 on cpu 27:
+ __count_memcg_events (mm/memcontrol.c:725 mm/memcontrol.c:962)
+ count_memcg_event_mm.part.0 (./include/linux/memcontrol.h:1097 ./include/linux/memcontrol.h:1120)
+ handle_mm_fault (mm/memory.c:5483 mm/memory.c:5622)
+<snip>
+
+ value changed: 0x00000029 -> 0x00000000
+
+The race occurs because two code paths access the same "stats_updates"
+location. Although "stats_updates" is a per-CPU variable, it is remotely
+accessed by another CPU at
+cgroup_rstat_flush_locked()->mem_cgroup_css_rstat_flush(), leading to the
+data race mentioned.
+
+Considering that memcg_rstat_updated() is in the hot code path, adding a
+lock to protect it may not be desirable, especially since this variable
+pertains solely to statistics.
+
+Therefore, annotating accesses to stats_updates with READ/WRITE_ONCE() can
+prevent KCSAN splats and potential partial reads/writes.
+
+Link: https://lkml.kernel.org/r/20240424125940.2410718-1-leitao@debian.org
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Suggested-by: Shakeel Butt <shakeel.butt@linux.dev>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Roman Gushchin <roman.gushchin@linux.dev>
+Cc: Muchun Song <songmuchun@bytedance.com>
diff --git a/txt/memory-failure-remove-calls-to-page_mapping.txt b/txt/memory-failure-remove-calls-to-page_mapping.txt
new file mode 100644
index 000000000..de1fa4e58
--- /dev/null
+++ b/txt/memory-failure-remove-calls-to-page_mapping.txt
@@ -0,0 +1,11 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: memory-failure: remove calls to page_mapping()
+Date: Tue, 23 Apr 2024 23:55:34 +0100
+
+This is mostly just inlining page_mapping() into the two callers.
+
+Link: https://lkml.kernel.org/r/20240423225552.4113447-4-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Eric Biggers <ebiggers@google.com>
diff --git a/txt/migrate-expand-the-use-of-folio-in-__migrate_device_pages.txt b/txt/migrate-expand-the-use-of-folio-in-__migrate_device_pages.txt
new file mode 100644
index 000000000..321982f92
--- /dev/null
+++ b/txt/migrate-expand-the-use-of-folio-in-__migrate_device_pages.txt
@@ -0,0 +1,11 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: migrate: expand the use of folio in __migrate_device_pages()
+Date: Tue, 23 Apr 2024 23:55:35 +0100
+
+Removes a few calls to compound_head() and a call to page_mapping().
+
+Link: https://lkml.kernel.org/r/20240423225552.4113447-5-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Eric Biggers <ebiggers@google.com>
+Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
diff --git a/txt/mm-add-kernel-doc-for-folio_mark_accessed.txt b/txt/mm-add-kernel-doc-for-folio_mark_accessed.txt
new file mode 100644
index 000000000..26838d138
--- /dev/null
+++ b/txt/mm-add-kernel-doc-for-folio_mark_accessed.txt
@@ -0,0 +1,9 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: add kernel-doc for folio_mark_accessed()
+Date: Wed, 24 Apr 2024 20:19:11 +0100
+
+Convert the existing documentation to kernel-doc and remove references to
+pages.
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-7-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
diff --git a/txt/mm-allow-for-detecting-underflows-with-page_mapcount-again-fix.txt b/txt/mm-allow-for-detecting-underflows-with-page_mapcount-again-fix.txt
new file mode 100644
index 000000000..e7e57ae5d
--- /dev/null
+++ b/txt/mm-allow-for-detecting-underflows-with-page_mapcount-again-fix.txt
@@ -0,0 +1,26 @@
+From: David Hildenbrand <david@redhat.com>
+Subject: mm-allow-for-detecting-underflows-with-page_mapcount-again-fix
+Date: Wed, 24 Apr 2024 10:50:09 +0200
+
+Let's make page_mapcount() slighly more efficient by inlining the
+page_type_has_type() check.
+
+Link: https://lkml.kernel.org/r/1af4fd61-7926-47c8-be45-833c0dbec08b@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Cc: Chris Zankel <chris@zankel.net>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Max Filippov <jcmvbkbc@gmail.com>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Richard Chang <richardycc@google.com>
+Cc: Rich Felker <dalias@libc.org>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Yin Fengwei <fengwei.yin@intel.com>
+Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
+Cc: Zi Yan <ziy@nvidia.com>
diff --git a/txt/mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.txt b/txt/mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.txt
new file mode 100644
index 000000000..5afedb817
--- /dev/null
+++ b/txt/mm-convert-put_devmap_managed_page_refs-to-put_devmap_managed_folio_refs.txt
@@ -0,0 +1,9 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: convert put_devmap_managed_page_refs() to put_devmap_managed_folio_refs()
+Date: Wed, 24 Apr 2024 20:19:08 +0100
+
+All callers have a folio so we can remove this use of
+page_ref_sub_return().
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-4-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
diff --git a/txt/mm-free-non-hugetlb-large-folios-in-a-batch-fix.txt b/txt/mm-free-non-hugetlb-large-folios-in-a-batch-fix.txt
new file mode 100644
index 000000000..e4704d84b
--- /dev/null
+++ b/txt/mm-free-non-hugetlb-large-folios-in-a-batch-fix.txt
@@ -0,0 +1,77 @@
+From: Peter Xu <peterx@redhat.com>
+Subject: mm-free-non-hugetlb-large-folios-in-a-batch-fix
+Date: Wed, 24 Apr 2024 11:20:28 -0400
+
+On Fri, Apr 05, 2024 at 04:32:23PM +0100, Matthew Wilcox (Oracle) wrote:
+> free_unref_folios() can now handle non-hugetlb large folios, so keep
+> normal large folios in the batch. hugetlb folios still need to be
+> handled specially. I believe that folios freed using put_pages_list()
+> cannot be accounted to a memcg (or the small folios would trip the "page
+> still charged to cgroup" warning), but put an assertion in to check that.
+
+There's such user, iommu uses put_pages_list() to free IOMMU pgtables, and
+they can be memcg accounted; since 2023 iommu_map switched to use
+GFP_KERNEL_ACCOUNT.
+
+I hit below panic when testing my local branch over mm-everthing when
+running some VFIO workloads.
+
+For this specific vfio use case, see 160912fc3d4a ("vfio/type1: account
+iommu allocations").
+
+I think we should remove the VM_BUG_ON_FOLIO() line, as the memcg will then
+be properly taken care of later in free_pages_prepare(). Fixup attached at
+the end that will fix this crash for me.
+
+[ 10.092411] kernel BUG at mm/swap.c:152!
+[ 10.092686] invalid opcode: 0000 [#1] PREEMPT SMP NOPTI
+[ 10.093034] CPU: 3 PID: 634 Comm: vfio-pci-mmap-t Tainted: G W 6.9.0-rc4-peterx+ #2
+[ 10.093628] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
+[ 10.094361] RIP: 0010:put_pages_list+0x12b/0x150
+[ 10.094675] Code: 6d 08 48 81 c4 00 01 00 00 5b 5d c3 cc cc cc cc 48 c7 c6 f0 fd 9f 82 e8 63 e8 03 00 0f 0b 48 c7 c6 48 00 a0 82 e8 55 e8 03 00 <0f> 0b 48 c7 c6 28 fe 9f 82 e8 47f
+[ 10.095896] RSP: 0018:ffffc9000221bc50 EFLAGS: 00010282
+[ 10.096242] RAX: 0000000000000038 RBX: ffffea00042695c0 RCX: 0000000000000000
+[ 10.096707] RDX: 0000000000000001 RSI: 0000000000000027 RDI: 00000000ffffffff
+[ 10.097177] RBP: ffffc9000221bd68 R08: 0000000000000000 R09: 0000000000000003
+[ 10.097642] R10: ffffc9000221bb08 R11: ffffffff8335db48 R12: ffff8881070172c0
+[ 10.098113] R13: ffff888102fd0000 R14: ffff888107017210 R15: ffff888110a6c7c0
+[ 10.098586] FS: 0000000000000000(0000) GS:ffff888276a00000(0000) knlGS:0000000000000000
+[ 10.099117] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 10.099494] CR2: 00007f1910000000 CR3: 000000000323c006 CR4: 0000000000770ef0
+[ 10.099972] PKRU: 55555554
+[ 10.100154] Call Trace:
+[ 10.100321] <TASK>
+[ 10.100466] ? die+0x32/0x80
+[ 10.100666] ? do_trap+0xd9/0x100
+[ 10.100897] ? put_pages_list+0x12b/0x150
+[ 10.101168] ? put_pages_list+0x12b/0x150
+[ 10.101434] ? do_error_trap+0x81/0x110
+[ 10.101688] ? put_pages_list+0x12b/0x150
+[ 10.101957] ? exc_invalid_op+0x4c/0x60
+[ 10.102216] ? put_pages_list+0x12b/0x150
+[ 10.102484] ? asm_exc_invalid_op+0x16/0x20
+[ 10.102771] ? put_pages_list+0x12b/0x150
+[ 10.103026] ? 0xffffffff81000000
+[ 10.103246] ? dma_pte_list_pagetables.isra.0+0x38/0xa0
+[ 10.103592] ? dma_pte_list_pagetables.isra.0+0x9b/0xa0
+[ 10.103933] ? dma_pte_clear_level+0x18c/0x1a0
+[ 10.104228] ? domain_unmap+0x65/0x130
+[ 10.104481] ? domain_unmap+0xe6/0x130
+[ 10.104735] domain_exit+0x47/0x80
+[ 10.104968] vfio_iommu_type1_detach_group+0x3f1/0x5f0
+[ 10.105308] ? vfio_group_detach_container+0x3c/0x1a0
+[ 10.105644] vfio_group_detach_container+0x60/0x1a0
+[ 10.105977] vfio_group_fops_release+0x46/0x80
+[ 10.106274] __fput+0x9a/0x2d0
+[ 10.106479] task_work_run+0x55/0x90
+[ 10.106717] do_exit+0x32f/0xb70
+[ 10.106945] ? _raw_spin_unlock_irq+0x24/0x50
+[ 10.107237] do_group_exit+0x32/0xa0
+[ 10.107481] __x64_sys_exit_group+0x14/0x20
+[ 10.107760] do_syscall_64+0x75/0x190
+[ 10.108007] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Link: https://lkml.kernel.org/r/ZikjPB0Dt5HA8-uL@x1n
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Cc: Alex Williamson <alex.williamson@redhat.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
diff --git a/txt/mm-hugetlb-fix-debug_locks_warn_on1-when-dissolve_free_hugetlb_folio.txt b/txt/mm-hugetlb-fix-debug_locks_warn_on1-when-dissolve_free_hugetlb_folio.txt
index e6929e265..6f7dd5f77 100644
--- a/txt/mm-hugetlb-fix-debug_locks_warn_on1-when-dissolve_free_hugetlb_folio.txt
+++ b/txt/mm-hugetlb-fix-debug_locks_warn_on1-when-dissolve_free_hugetlb_folio.txt
@@ -112,5 +112,5 @@ checking clear_flag to close the race window.
Link: https://lkml.kernel.org/r/20240419085819.1901645-1-linmiaohe@huawei.com
Fixes: 32c877191e02 ("hugetlb: do not clear hugetlb dtor until allocating vmemmap")
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
-Cc: Oscar Salvador <osalvador@suse.de>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: <stable@vger.kernel.org>
diff --git a/txt/mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared.txt b/txt/mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared.txt
new file mode 100644
index 000000000..f8e5c120f
--- /dev/null
+++ b/txt/mm-khugepaged-replace-page_mapcount-check-by-folio_likely_mapped_shared.txt
@@ -0,0 +1,144 @@
+From: David Hildenbrand <david@redhat.com>
+Subject: mm/khugepaged: replace page_mapcount() check by folio_likely_mapped_shared()
+Date: Wed, 24 Apr 2024 14:26:30 +0200
+
+We want to limit the use of page_mapcount() to places where absolutely
+required, to prepare for kernel configs where we won't keep track of
+per-page mapcounts in large folios.
+
+khugepaged is one of the remaining "more challenging" page_mapcount()
+users, but we might be able to move away from page_mapcount() without
+resulting in a significant behavior change that would warrant
+special-casing based on kernel configs.
+
+In 2020, we first added support to khugepaged for collapsing COW-shared
+pages via commit 9445689f3b61 ("khugepaged: allow to collapse a page
+shared across fork"), followed by support for collapsing PTE-mapped THP in
+commit 5503fbf2b0b8 ("khugepaged: allow to collapse PTE-mapped compound
+pages") and limiting the memory waste via the "page_count() > 1" check in
+commit 71a2c112a0f6 ("khugepaged: introduce 'max_ptes_shared' tunable").
+
+As a default, khugepaged will allow up to half of the PTEs to map shared
+pages: where page_mapcount() > 1. MADV_COLLAPSE ignores the khugepaged
+setting.
+
+khugepaged does currently not care about swapcache page references, and
+does not check under folio lock: so in some corner cases the "shared vs.
+exclusive" detection might be a bit off, making us detect "exclusive" when
+it's actually "shared".
+
+Most of our anonymous folios in the system are usually exclusive. We
+frequently see sharing of anonymous folios for a short period of time,
+after which our short-lived suprocesses either quit or exec().
+
+There are some famous examples, though, where child processes exist for a
+long time, and where memory is COW-shared with a lot of processes
+(webservers, webbrowsers, sshd, ...) and COW-sharing is crucial for
+reducing the memory footprint. We don't want to suddenly change the
+behavior to result in a significant increase in memory waste.
+
+Interestingly, khugepaged will only collapse an anonymous THP if at least
+one PTE is writable. After fork(), that means that something (usually a
+page fault) populated at least a single exclusive anonymous THP in that
+PMD range.
+
+So ... what happens when we switch to "is this folio mapped shared"
+instead of "is this page mapped shared" by using
+folio_likely_mapped_shared()?
+
+For "not-COW-shared" folios, small folios and for THPs (large folios) that
+are completely mapped into at least one process, switching to
+folio_likely_mapped_shared() will not result in a change.
+
+We'll only see a change for COW-shared PTE-mapped THPs that are partially
+mapped into all involved processes.
+
+There are two cases to consider:
+
+(A) folio_likely_mapped_shared() returns "false" for a PTE-mapped THP
+
+ If the folio is detected as exclusive, and it actually is exclusive,
+ there is no change: page_mapcount() == 1. This is the common case
+ without fork() or with short-lived child processes.
+
+ folio_likely_mapped_shared() might currently still detect a folio as
+ exclusive although it is shared (false negatives): if the first page is
+ not mapped multiple times and if the average per-page mapcount is smaller
+ than 1, implying that (1) the folio is partially mapped and (2) if we are
+ responsible for many mapcounts by mapping many pages others can't
+ ("mostly exclusive") (3) if we are not responsible for many mapcounts by
+ mapping little pages ("mostly shared") it won't make a big impact on the
+ end result.
+
+ So while we might now detect a page as "exclusive" although it isn't,
+ it's not expected to make a big difference in common cases.
+
+(B) folio_likely_mapped_shared() returns "true" for a PTE-mapped THP
+
+ folio_likely_mapped_shared() will never detect a large anonymous folio
+ as shared although it is exclusive: there are no false positives.
+
+ If we detect a THP as shared, at least one page of the THP is mapped by
+ another process. It could well be that some pages are actually exclusive.
+ For example, our child processes could have unmapped/COW'ed some pages
+ such that they would now be exclusive to out process, which we now
+ would treat as still-shared.
+
+ Examples:
+ (1) Parent maps all pages of a THP, child maps some pages. We detect
+ all pages in the parent as shared although some are actually
+ exclusive.
+ (2) Parent maps all but some page of a THP, child maps the remainder.
+ We detect all pages of the THP that the parent maps as shared
+ although they are all exclusive.
+
+ In (1) we wouldn't collapse a THP right now already: no PTE
+ is writable, because a write fault would have resulted in COW of a
+ single page and the parent would no longer map all pages of that THP.
+
+ For (2) we would have collapsed a THP in the parent so far, now we
+ wouldn't as long as the child process is still alive: unless the child
+ process unmaps the remaining THP pages or we decide to split that THP.
+
+ Possibly, the child COW'ed many pages, meaning that it's likely that
+ we can populate a THP for our child first, and then for our parent.
+
+ For (2), we are making really bad use of the THP in the first
+ place (not even mapped completely in at least one process). If the
+ THP would be completely partially mapped, it would be on the deferred
+ split queue where we would split it lazily later.
+
+ For short-running child processes, we don't particularly care. For
+ long-running processes, the expectation is that such scenarios are
+ rather rare: further, a THP might be best placed if most data in the
+ PMD range is actually written, implying that we'll have to COW more
+ pages first before khugepaged would collapse it.
+
+To summarize, in the common case, this change is not expected to matter
+much. The more common application of khugepaged operates on exclusive
+pages, either before fork() or after a child quit.
+
+Can we improve (A)? Yes, if we implement more precise tracking of "mapped
+shared" vs. "mapped exclusively", we could get rid of the false negatives
+completely.
+
+Can we improve (B)? We could count how many pages of a large folio we map
+inside the current page table and detect that we are responsible for most
+of the folio mapcount and conclude "as good as exclusive", which might
+help in some cases. ... but likely, some other mechanism should detect
+that the THP is not a good use in the scenario (not even mapped completely
+in a single process) and try splitting that folio lazily etc.
+
+We'll move the folio_test_anon() check before our "shared" check, so we
+might get more expressive results for SCAN_EXCEED_SHARED_PTE: this order
+of checks now matches the one in __collapse_huge_page_isolate(). Extend
+documentation.
+
+Link: https://lkml.kernel.org/r/20240424122630.495788-1-david@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: Yang Shi <yang.shi@linux.alibaba.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
diff --git a/txt/mm-make-folio_mapcount-return-0-for-small-typed-folios-fix.txt b/txt/mm-make-folio_mapcount-return-0-for-small-typed-folios-fix.txt
new file mode 100644
index 000000000..fb1abd711
--- /dev/null
+++ b/txt/mm-make-folio_mapcount-return-0-for-small-typed-folios-fix.txt
@@ -0,0 +1,26 @@
+From: David Hildenbrand <david@redhat.com>
+Subject: mm-make-folio_mapcount-return-0-for-small-typed-folios-fix
+Date: Wed, 24 Apr 2024 10:56:17 +0200
+
+Just like page_mapcount(), let's make folio_mapcount() slightly more
+efficient.
+
+Link: https://lkml.kernel.org/r/c30fcda1-ed87-46f5-8297-cdedbddac009@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Cc: Chris Zankel <chris@zankel.net>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Max Filippov <jcmvbkbc@gmail.com>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Richard Chang <richardycc@google.com>
+Cc: Rich Felker <dalias@libc.org>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Yin Fengwei <fengwei.yin@intel.com>
+Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
+Cc: Zi Yan <ziy@nvidia.com>
diff --git a/txt/mm-page_owner-fixing-wrong-information-in-dump_page_owner.txt b/txt/mm-page_owner-fixing-wrong-information-in-dump_page_owner.txt
new file mode 100644
index 000000000..77c3d597a
--- /dev/null
+++ b/txt/mm-page_owner-fixing-wrong-information-in-dump_page_owner.txt
@@ -0,0 +1,25 @@
+From: Maninder Singh <maninder1.s@samsung.com>
+Subject: mm: page_owner: fix wrong information in dump_page_owner
+Date: Wed, 24 Apr 2024 16:48:37 +0530
+
+With commit ea4b5b33bf8a ("mm,page_owner: update metadata for tail
+pages"), new API __update_page_owner_handle was introduced and arguemnt
+was passed in wrong order from __set_page_owner and thus page_owner is
+giving wrong data.
+
+[ 15.982420] page last allocated via order 0, migratetype Unmovable, gfp_mask 0xcc0(GFP_KERNEL), pid 80, tgid -1210279584 (insmod), ts 80, free_ts 0
+
+Fixing the same.
+Correct output:
+[ 14.556482] page last allocated via order 0, migratetype Unmovable, gfp_mask 0xcc0(GFP_KERNEL), pid 80, tgid 80 (insmod), ts 14552004992, free_ts 0
+
+Link: https://lkml.kernel.org/r/20240424111838.3782931-1-hariom1.p@samsung.com
+Fixes: ea4b5b33bf8a ("mm,page_owner: update metadata for tail pages")
+Signed-off-by: Maninder Singh <maninder1.s@samsung.com>
+Signed-off-by: Hariom Panthi <hariom1.p@samsung.com>
+Acked-by: Oscar Salvador <osalvador@suse.de>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Lorenzo Stoakes <lstoakes@gmail.com>
+Cc: Rohit Thapliyal <r.thapliyal@samsung.com>
+Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
diff --git a/txt/mm-remove-page_cache_alloc.txt b/txt/mm-remove-page_cache_alloc.txt
new file mode 100644
index 000000000..6e52a1960
--- /dev/null
+++ b/txt/mm-remove-page_cache_alloc.txt
@@ -0,0 +1,16 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: remove page_cache_alloc()
+Date: Wed, 24 Apr 2024 20:19:06 +0100
+
+Patch series "More folio compat code removal".
+
+More code removal with bonus kernel-doc addition.
+
+
+This patch (of 7):
+
+All callers have now been converted to filemap_alloc_folio().
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-1-willy@infradead.org
+Link: https://lkml.kernel.org/r/20240424191914.361554-2-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
diff --git a/txt/mm-remove-page_mapping.txt b/txt/mm-remove-page_mapping.txt
new file mode 100644
index 000000000..e0bb2433f
--- /dev/null
+++ b/txt/mm-remove-page_mapping.txt
@@ -0,0 +1,11 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: remove page_mapping()
+Date: Tue, 23 Apr 2024 23:55:37 +0100
+
+All callers are now converted, delete this compatibility wrapper.
+
+Link: https://lkml.kernel.org/r/20240423225552.4113447-7-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Eric Biggers <ebiggers@google.com>
+Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
diff --git a/txt/mm-remove-page_ref_sub_return.txt b/txt/mm-remove-page_ref_sub_return.txt
new file mode 100644
index 000000000..f6fac02ae
--- /dev/null
+++ b/txt/mm-remove-page_ref_sub_return.txt
@@ -0,0 +1,9 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: remove page_ref_sub_return()
+Date: Wed, 24 Apr 2024 20:19:09 +0100
+
+With all callers converted to folios, we can act directly on
+folio->_refcount.
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-5-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
diff --git a/txt/mm-remove-pagereferenced.txt b/txt/mm-remove-pagereferenced.txt
new file mode 100644
index 000000000..9442877a0
--- /dev/null
+++ b/txt/mm-remove-pagereferenced.txt
@@ -0,0 +1,9 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: remove PageReferenced
+Date: Wed, 24 Apr 2024 20:19:12 +0100
+
+All callers now use folio_*_referenced() so we can remove the
+PageReferenced family of functions.
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-8-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
diff --git a/txt/mm-remove-put_devmap_managed_page.txt b/txt/mm-remove-put_devmap_managed_page.txt
new file mode 100644
index 000000000..0c61c3dce
--- /dev/null
+++ b/txt/mm-remove-put_devmap_managed_page.txt
@@ -0,0 +1,9 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: mm: remove put_devmap_managed_page()
+Date: Wed, 24 Apr 2024 20:19:07 +0100
+
+It only has one caller; convert that caller to use
+put_devmap_managed_page_refs() instead.
+
+Link: https://lkml.kernel.org/r/20240424191914.361554-3-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
diff --git a/txt/mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped.txt b/txt/mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped.txt
new file mode 100644
index 000000000..d7427f361
--- /dev/null
+++ b/txt/mm-vmalloc-dump-page-owner-info-if-page-is-already-mapped.txt
@@ -0,0 +1,45 @@
+From: Hariom Panthi <hariom1.p@samsung.com>
+Subject: mm: vmalloc: dump page owner info if page is already mapped
+Date: Wed, 24 Apr 2024 16:48:38 +0530
+
+In vmap_pte_range, BUG_ON is called when page is already mapped,
+It doesn't give enough information to debug further.
+Dumping page owner information alongwith BUG_ON will be more useful
+in case of multiple page mapping.
+
+Example:
+[ 14.552875] page: refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x10b923
+[ 14.553440] flags: 0xbffff0000000000(node=0|zone=2|lastcpupid=0x3ffff)
+[ 14.554001] page_type: 0xffffffff()
+[ 14.554783] raw: 0bffff0000000000 0000000000000000 dead000000000122 0000000000000000
+[ 14.555230] raw: 0000000000000000 0000000000000000 00000001ffffffff 0000000000000000
+[ 14.555768] page dumped because: remapping already mapped page
+[ 14.556172] page_owner tracks the page as allocated
+[ 14.556482] page last allocated via order 0, migratetype Unmovable, gfp_mask 0xcc0(GFP_KERNEL), pid 80, tgid 80 (insmod), ts 14552004992, free_ts 0
+[ 14.557286] prep_new_page+0xa8/0x10c
+[ 14.558052] get_page_from_freelist+0x7f8/0x1248
+[ 14.558298] __alloc_pages+0x164/0x2b4
+[ 14.558514] alloc_pages_mpol+0x88/0x230
+[ 14.558904] alloc_pages+0x4c/0x7c
+[ 14.559157] load_module+0x74/0x1af4
+[ 14.559361] __do_sys_init_module+0x190/0x1fc
+[ 14.559615] __arm64_sys_init_module+0x1c/0x28
+[ 14.559883] invoke_syscall+0x44/0x108
+[ 14.560109] el0_svc_common.constprop.0+0x40/0xe0
+[ 14.560371] do_el0_svc_compat+0x1c/0x34
+[ 14.560600] el0_svc_compat+0x2c/0x80
+[ 14.560820] el0t_32_sync_handler+0x90/0x140
+[ 14.561040] el0t_32_sync+0x194/0x198
+[ 14.561329] page_owner free stack trace missing
+[ 14.562049] ------------[ cut here ]------------
+[ 14.562314] kernel BUG at mm/vmalloc.c:113!
+
+Link: https://lkml.kernel.org/r/20240424111838.3782931-2-hariom1.p@samsung.com
+Signed-off-by: Hariom Panthi <hariom1.p@samsung.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Lorenzo Stoakes <lstoakes@gmail.com>
+Cc: Maninder Singh <maninder1.s@samsung.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Rohit Thapliyal <r.thapliyal@samsung.com>
+Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
diff --git a/txt/mm-vmscan-avoid-split-pmd-mapped-thp-during-shrink_folio_list.txt b/txt/mm-vmscan-avoid-split-pmd-mapped-thp-during-shrink_folio_list.txt
index ebcea39b4..66667fbf6 100644
--- a/txt/mm-vmscan-avoid-split-pmd-mapped-thp-during-shrink_folio_list.txt
+++ b/txt/mm-vmscan-avoid-split-pmd-mapped-thp-during-shrink_folio_list.txt
@@ -7,10 +7,11 @@ madvise(MADV_FREE) to mark the pages as lazy free. IMO, they would not
typically rewrite to the given range.
At present, PMD-mapped THPs that are marked as lazyfree during
-shrink_folio_list() are unconditionally split, which may be unnecessary.
-If the THP is clean, its PMD is also clean, and there are no unexpected
-references, then we can attempt to remove the PMD mapping from it. This
-change will improve the efficiency of memory reclamation in this case.
+shrink_folio_list() are unconditionally split, which may be
+unnecessary. If the THP and its PMD are both marked as clean, and
+there are no unexpected references, then we can attempt to remove the
+PMD mapping from it. This change will improve the efficiency of memory
+reclamation in this case.
On an Intel i5 CPU, reclaiming 1GiB of PMD-mapped THPs using
mem_cgroup_force_empty() results in the following runtimes in seconds
diff --git a/txt/mseal-add-mseal-syscall-fix.txt b/txt/mseal-add-mseal-syscall-fix.txt
new file mode 100644
index 000000000..b0c3149dc
--- /dev/null
+++ b/txt/mseal-add-mseal-syscall-fix.txt
@@ -0,0 +1,27 @@
+From: Jeff Xu <jeffxu@chromium.org>
+Subject: mseal: add branch prediction hint
+Date: Tue, 23 Apr 2024 19:28:25 +0000
+
+It is unlikely that application calls mm syscall, such as mprotect, on
+already sealed mappings, adding branch prediction hint.
+
+Link: https://lkml.kernel.org/r/20240423192825.1273679-2-jeffxu@chromium.org
+Signed-off-by: Jeff Xu <jeffxu@chromium.org>
+Suggested-by: Pedro Falcato <pedro.falcato@gmail.com>
+Cc: Amer Al Shanawany <amer.shanawany@gmail.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Guenter Roeck <groeck@chromium.org>
+Cc: Jann Horn <jannh@google.com>
+Cc: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+Cc: Jeff Xu <jeffxu@google.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Jorge Lucangeli Obes <jorgelo@chromium.org>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Stephen Röttger <sroettger@google.com>
+Cc: Suren Baghdasaryan <surenb@google.com>
diff --git a/txt/nilfs2-convert-to-use-the-new-mount-api.txt b/txt/nilfs2-convert-to-use-the-new-mount-api.txt
new file mode 100644
index 000000000..e7b1d367d
--- /dev/null
+++ b/txt/nilfs2-convert-to-use-the-new-mount-api.txt
@@ -0,0 +1,11 @@
+From: Eric Sandeen <sandeen@redhat.com>
+Subject: nilfs2: convert to use the new mount API
+Date: Thu, 25 Apr 2024 03:27:16 +0900
+
+Convert nilfs2 to use the new mount API.
+
+[konishi.ryusuke: fixed missing SB_RDONLY flag repair in nilfs_reconfigure]
+Link: https://lkml.kernel.org/r/33d078a7-9072-4d8e-a3a9-dec23d4191da@redhat.com
+Link: https://lkml.kernel.org/r/20240424182716.6024-1-konishi.ryusuke@gmail.com
+Signed-off-by: Eric Sandeen <sandeen@redhat.com>
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
diff --git a/txt/ocfs2-remove-redundant-assignment-to-variable-status.txt b/txt/ocfs2-remove-redundant-assignment-to-variable-status.txt
new file mode 100644
index 000000000..e56dbe397
--- /dev/null
+++ b/txt/ocfs2-remove-redundant-assignment-to-variable-status.txt
@@ -0,0 +1,23 @@
+From: Colin Ian King <colin.i.king@gmail.com>
+Subject: ocfs2: remove redundant assignment to variable status
+Date: Tue, 23 Apr 2024 23:30:18 +0100
+
+Variable status is being assigned and error code that is never read, it is
+being assigned inside of a do-while loop. The assignment is redundant and
+can be removed.
+
+Cleans up clang scan build warning:
+fs/ocfs2/dlm/dlmdomain.c:1530:2: warning: Value stored to 'status' is never
+read [deadcode.DeadStores]
+
+Link: https://lkml.kernel.org/r/20240423223018.1573213-1-colin.i.king@gmail.com
+Signed-off-by: Colin Ian King <colin.i.king@gmail.com>
+Acked-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Gang He <ghe@suse.com>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: Heming Zhao <heming.zhao@suse.com>
+Cc: Dan Carpenter <dan.carpenter@linaro.org>
diff --git a/txt/mm-update-shuffle-documentation-to-match-its-current-state.txt b/txt/old/mm-update-shuffle-documentation-to-match-its-current-state.txt
index 6051cc068..6051cc068 100644
--- a/txt/mm-update-shuffle-documentation-to-match-its-current-state.txt
+++ b/txt/old/mm-update-shuffle-documentation-to-match-its-current-state.txt
diff --git a/txt/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.txt b/txt/old/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.txt
index e82af78e1..e82af78e1 100644
--- a/txt/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.txt
+++ b/txt/old/writeback-add-wb_monitorpy-script-to-monitor-writeback-info-on-bdi.txt
diff --git a/txt/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.txt b/txt/old/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.txt
index 077293065..077293065 100644
--- a/txt/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.txt
+++ b/txt/old/writeback-collect-stats-of-all-wb-of-bdi-in-bdi_debug_stats_show.txt
diff --git a/txt/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.txt b/txt/old/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.txt
index 3c12e2b2e..3c12e2b2e 100644
--- a/txt/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.txt
+++ b/txt/old/writeback-rename-nr_reclaimable-to-nr_dirty-in-balance_dirty_pages.txt
diff --git a/txt/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.txt b/txt/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.txt
index a523a9146..a523a9146 100644
--- a/txt/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.txt
+++ b/txt/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix-2.txt
diff --git a/txt/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.txt b/txt/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.txt
index adc186995..adc186995 100644
--- a/txt/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.txt
+++ b/txt/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi-fix.txt
diff --git a/txt/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.txt b/txt/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.txt
index 3d4aeccda..3d4aeccda 100644
--- a/txt/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.txt
+++ b/txt/old/writeback-support-retrieving-per-group-debug-writeback-stats-of-bdi.txt
diff --git a/txt/selftests-mm-soft-dirty-should-fail-if-a-testcase-fails.txt b/txt/selftests-mm-soft-dirty-should-fail-if-a-testcase-fails.txt
new file mode 100644
index 000000000..9463bfee5
--- /dev/null
+++ b/txt/selftests-mm-soft-dirty-should-fail-if-a-testcase-fails.txt
@@ -0,0 +1,12 @@
+From: Ryan Roberts <ryan.roberts@arm.com>
+Subject: selftests/mm: soft-dirty should fail if a testcase fails
+Date: Wed, 24 Apr 2024 11:53:01 +0100
+
+Previously soft-dirty was unconditionally exiting with success, even if
+one of its testcases failed. Let's fix that so that failure can be
+reported to automated systems properly.
+
+Link: https://lkml.kernel.org/r/20240424105301.3157695-1-ryan.roberts@arm.com
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Reviewed-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
diff --git a/txt/tools-fix-userspace-compilation-with-new-test_xarray-changes.txt b/txt/tools-fix-userspace-compilation-with-new-test_xarray-changes.txt
new file mode 100644
index 000000000..abc553f27
--- /dev/null
+++ b/txt/tools-fix-userspace-compilation-with-new-test_xarray-changes.txt
@@ -0,0 +1,33 @@
+From: Luis Chamberlain <mcgrof@kernel.org>
+Subject: tools: fix userspace compilation with new test_xarray changes
+Date: Tue, 23 Apr 2024 12:22:20 -0700
+
+Patch series "test_xarray: couple of fixes for v6-9-rc6", v2.
+
+Here are a couple of fixes which should be merged into the queue for
+v6.9-rc6. The first one was reported by Liam, after fixing that I noticed
+an issue with a test, and a fix for that is in the second patch.
+
+
+This patch (of 2):
+
+Liam reported that compiling the test_xarray on userspace was broken. I
+was not even aware that was possible but you can via and you can run these
+tests in userspace with:
+
+make -C tools/testing/radix-tree
+./tools/testing/radix-tree/xarray
+
+Add the two helpers we need to fix compilation. We don't need a userspace
+schedule() so just make it do nothing.
+
+Link: https://lkml.kernel.org/r/20240423192221.301095-1-mcgrof@kernel.org
+Link: https://lkml.kernel.org/r/20240423192221.301095-2-mcgrof@kernel.org
+Fixes: a60cc288a1a2 ("test_xarray: add tests for advanced multi-index use")
+Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
+Reported-by: "Liam R. Howlett" <Liam.Howlett@oracle.com>
+Cc: Daniel Gomez <da.gomez@samsung.com>
+Cc: Darrick J. Wong <djwong@kernel.org>
+Cc: Dave Chinner <david@fromorbit.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Pankaj Raghav <p.raghav@samsung.com>
diff --git a/txt/tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code.txt b/txt/tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code.txt
new file mode 100644
index 000000000..c8d66f452
--- /dev/null
+++ b/txt/tools-lib-rbtree-pick-some-improvements-from-the-kernel-rbtree-code.txt
@@ -0,0 +1,26 @@
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+Subject: tools lib rbtree: Pick some improvements from the kernel rbtree code
+Date: Tue, 23 Apr 2024 17:27:57 -0300
+
+The tools/lib/rbtree.c code came from the kernel. Remove the
+EXPORT_SYMBOL() that make sense only there. Unfortunately it is not being
+checked with tools/perf/check_headers.sh. Will try to remedy this. Until
+then pick the improvements from:
+
+ b0687c1119b4e8c8 ("lib/rbtree: use '+' instead of '|' for setting color.")
+
+That I noticed by doing:
+
+ diff -u tools/lib/rbtree.c lib/rbtree.c
+ diff -u tools/include/linux/rbtree_augmented.h include/linux/rbtree_augmented.h
+
+There is one other cases, but lets pick it in separate patches.
+
+Link: https://lkml.kernel.org/r/ZigZzeFoukzRKG1Q@x1
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Ian Rogers <irogers@google.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Noah Goldstein <goldstein.w.n@gmail.com>
diff --git a/txt/userfault-expand-folio-use-in-mfill_atomic_install_pte.txt b/txt/userfault-expand-folio-use-in-mfill_atomic_install_pte.txt
new file mode 100644
index 000000000..f15a5569c
--- /dev/null
+++ b/txt/userfault-expand-folio-use-in-mfill_atomic_install_pte.txt
@@ -0,0 +1,12 @@
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Subject: userfault; expand folio use in mfill_atomic_install_pte()
+Date: Tue, 23 Apr 2024 23:55:36 +0100
+
+Call page_folio() a little earlier so we can use folio_mapping()
+instead of page_mapping(), saving a call to compound_head().
+
+Link: https://lkml.kernel.org/r/20240423225552.4113447-6-willy@infradead.org
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Eric Biggers <ebiggers@google.com>
+Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>