aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-01-16 19:18:09 +0000
committerAndrew Morton <akpm@linux-foundation.org>2023-02-02 22:33:18 -0800
commit6bc56a4d855303705802c5ede4625973637484c7 (patch)
tree0ddb237630acc16d09cf3f9bbcf2e0c122083151 /mm/memory.c
parentc5792d9384113de4085dfbce6940e2a853debb67 (diff)
downloadlinux-6bc56a4d855303705802c5ede4625973637484c7.tar.gz
mm: add vma_alloc_zeroed_movable_folio()
Replace alloc_zeroed_user_highpage_movable(). The main difference is returning a folio containing a single page instead of returning the page, but take the opportunity to rename the function to match other allocation functions a little better and rewrite the documentation to place more emphasis on the zeroing rather than the highmem aspect. Link: https://lkml.kernel.org/r/20230116191813.2145215-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 87b33b4967c23e..b6358ffbccaa84 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3056,10 +3056,12 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
goto oom;
if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
- new_page = alloc_zeroed_user_highpage_movable(vma,
- vmf->address);
- if (!new_page)
+ struct folio *new_folio;
+
+ new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+ if (!new_folio)
goto oom;
+ new_page = &new_folio->page;
} else {
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
vmf->address);
@@ -3995,6 +3997,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page;
+ struct folio *folio;
vm_fault_t ret = 0;
pte_t entry;
@@ -4044,11 +4047,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
/* Allocate our own private page. */
if (unlikely(anon_vma_prepare(vma)))
goto oom;
- page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
- if (!page)
+ folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
+ if (!folio)
goto oom;
- if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
+ page = &folio->page;
+ if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
goto oom_free_page;
cgroup_throttle_swaprate(page, GFP_KERNEL);