summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2024-04-25 21:01:06 -0700
committerAndrew Morton <akpm@linux-foundation.org>2024-04-25 21:01:06 -0700
commite006bf2f3181790a706531c01894b6f660d78a87 (patch)
tree80ef0d98317d3abcae6f7fe0c77573912c0e38bc
parent2bed7f4dc8f0068285570fd817d54bc32d448b1a (diff)
download25-new-e006bf2f3181790a706531c01894b6f660d78a87.tar.gz
foo
-rw-r--r--patches/mm-gup-handle-huge-pmd-for-follow_pmd_mask.patch17
1 files changed, 8 insertions, 9 deletions
diff --git a/patches/mm-gup-handle-huge-pmd-for-follow_pmd_mask.patch b/patches/mm-gup-handle-huge-pmd-for-follow_pmd_mask.patch
index fa63624b6..46248c689 100644
--- a/patches/mm-gup-handle-huge-pmd-for-follow_pmd_mask.patch
+++ b/patches/mm-gup-handle-huge-pmd-for-follow_pmd_mask.patch
@@ -16,6 +16,8 @@ it when the page is valid. It was not a bug to set it before even if GUP
failed (page==NULL), because follow_page_mask() callers always ignores
page_mask if so. But doing so makes the code cleaner.
+[peterx@redhat.com: allow follow_pmd_mask() to take hugetlb tail pages]
+ Link: https://lkml.kernel.org/r/20240403013249.1418299-3-peterx@redhat.com
Link: https://lkml.kernel.org/r/20240327152332.950956-12-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
@@ -41,14 +43,14 @@ Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
- mm/gup.c | 107 ++++++++++++++++++++++++++++++++++++++++++---
- mm/huge_memory.c | 86 ------------------------------------
+ mm/gup.c | 104 ++++++++++++++++++++++++++++++++++++++++++---
+ mm/huge_memory.c | 86 -------------------------------------
mm/internal.h | 5 --
- 3 files changed, 105 insertions(+), 93 deletions(-)
+ 3 files changed, 102 insertions(+), 93 deletions(-)
--- a/mm/gup.c~mm-gup-handle-huge-pmd-for-follow_pmd_mask
+++ a/mm/gup.c
-@@ -580,6 +580,93 @@ static struct page *follow_huge_pud(stru
+@@ -580,6 +580,90 @@ static struct page *follow_huge_pud(stru
return page;
}
@@ -104,8 +106,6 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+ assert_spin_locked(pmd_lockptr(mm, pmd));
+
+ page = pmd_page(pmdval);
-+ VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
-+
+ if ((flags & FOLL_WRITE) &&
+ !can_follow_write_pmd(pmdval, page, vma, flags))
+ return NULL;
@@ -134,7 +134,6 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+
+ page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
+ ctx->page_mask = HPAGE_PMD_NR - 1;
-+ VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
+
+ return page;
+}
@@ -142,7 +141,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
static struct page *follow_huge_pud(struct vm_area_struct *vma,
unsigned long addr, pud_t *pudp,
-@@ -587,6 +674,14 @@ static struct page *follow_huge_pud(stru
+@@ -587,6 +671,14 @@ static struct page *follow_huge_pud(stru
{
return NULL;
}
@@ -157,7 +156,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
#endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
-@@ -784,31 +879,31 @@ static struct page *follow_pmd_mask(stru
+@@ -784,31 +876,31 @@ static struct page *follow_pmd_mask(stru
return page;
return no_page_table(vma, flags, address);
}