aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2021-11-26 12:03:49 +0100
committerIngo Molnar <mingo@kernel.org>2022-03-15 12:57:29 +0100
commitf98988b406e4afc023ef49119e13ec9ce3673d87 (patch)
tree41526970e09fa39c29e424d2036c134c00d0bf41
parent5020ff4bfa6aa7060baf3bef271f837af437ca93 (diff)
downloadtip-f98988b406e4afc023ef49119e13ec9ce3673d87.tar.gz
headers/deps: mm: Move rare & expensive APIs from <linux/mm_api.h> to <linux/mm_api_extra.h>"
Move methods that rely on <linux/pgtable_api.h> over into a new header, <linux/mm_api_extra.h>. This new header is only used ~78 times - while <linux/mm_api.h> is included in over 3,800 .c files. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/mm_api.h124
-rw-r--r--include/linux/mm_api_extra.h132
2 files changed, 133 insertions, 123 deletions
diff --git a/include/linux/mm_api.h b/include/linux/mm_api.h
index af41d060629c2..99a5ecb782aab 100644
--- a/include/linux/mm_api.h
+++ b/include/linux/mm_api.h
@@ -604,27 +604,6 @@ static inline void set_compound_order(struct page *page, unsigned int order)
void free_compound_page(struct page *page);
-#ifdef CONFIG_MMU
-/*
- * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
- * servicing faults for write access. In the normal case, do always want
- * pte_mkwrite. But get_user_pages can cause write faults for mappings
- * that do not have writing enabled, when used by access_process_vm.
- */
-static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
-{
- if (likely(vma->vm_flags & VM_WRITE))
- pte = pte_mkwrite(pte);
- return pte;
-}
-
-vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
-void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
-
-vm_fault_t finish_fault(struct vm_fault *vmf);
-vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
-#endif
-
/*
* Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of
@@ -1176,20 +1155,6 @@ static inline unsigned long folio_pfn(struct folio *folio)
return page_to_pfn(&folio->page);
}
-/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */
-#ifdef CONFIG_MIGRATION
-static inline bool is_pinnable_page(struct page *page)
-{
- return !(is_zone_movable_page(page) || is_migrate_cma_page(page)) ||
- is_zero_pfn(page_to_pfn(page));
-}
-#else
-static inline bool is_pinnable_page(struct page *page)
-{
- return true;
-}
-#endif
-
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
@@ -1671,61 +1636,6 @@ static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
#endif
-#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
-static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
- unsigned long address)
-{
- return 0;
-}
-static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
-static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
-
-#else
-int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
-
-static inline void mm_inc_nr_puds(struct mm_struct *mm)
-{
- if (mm_pud_folded(mm))
- return;
- atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
-}
-
-static inline void mm_dec_nr_puds(struct mm_struct *mm)
-{
- if (mm_pud_folded(mm))
- return;
- atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
-}
-#endif
-
-#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
-static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
- unsigned long address)
-{
- return 0;
-}
-
-static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
-static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
-
-#else
-int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
-
-static inline void mm_inc_nr_pmds(struct mm_struct *mm)
-{
- if (mm_pmd_folded(mm))
- return;
- atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
-}
-
-static inline void mm_dec_nr_pmds(struct mm_struct *mm)
-{
- if (mm_pmd_folded(mm))
- return;
- atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
-}
-#endif
-
#ifdef CONFIG_MMU
static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
{
@@ -1761,29 +1671,6 @@ static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
int __pte_alloc_kernel(pmd_t *pmd);
-#if defined(CONFIG_MMU)
-
-static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
- unsigned long address)
-{
- return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
- NULL : p4d_offset(pgd, address);
-}
-
-static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
- unsigned long address)
-{
- return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
- NULL : pud_offset(p4d, address);
-}
-
-static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
-{
- return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
- NULL: pmd_offset(pud, address);
-}
-#endif /* CONFIG_MMU */
-
extern void __init pagecache_init(void);
extern void __init free_area_init_memoryless_node(int nid);
extern void free_initmem(void);
@@ -2215,15 +2102,6 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
-#ifndef io_remap_pfn_range
-static inline int io_remap_pfn_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn,
- unsigned long size, pgprot_t prot)
-{
- return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
-}
-#endif
-
static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)
@@ -2694,4 +2572,6 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
#endif /* !__ASSEMBLY__ */
+#include <linux/mm_api_extra.h>
+
#endif /* _LINUX_MM_API_H */
diff --git a/include/linux/mm_api_extra.h b/include/linux/mm_api_extra.h
index a5ace2b198b89..34dbc3e20e789 100644
--- a/include/linux/mm_api_extra.h
+++ b/include/linux/mm_api_extra.h
@@ -1 +1,131 @@
-#include <linux/mm.h>
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MM_API_EXTRA_H
+#define _LINUX_MM_API_EXTRA_H
+
+#include <linux/mm_api.h>
+
+#include <linux/pgtable_api.h>
+
+#ifdef CONFIG_MMU
+/*
+ * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
+ * servicing faults for write access. In the normal case, do always want
+ * pte_mkwrite. But get_user_pages can cause write faults for mappings
+ * that do not have writing enabled, when used by access_process_vm.
+ */
+static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ if (likely(vma->vm_flags & VM_WRITE))
+ pte = pte_mkwrite(pte);
+ return pte;
+}
+
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
+void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
+
+vm_fault_t finish_fault(struct vm_fault *vmf);
+vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
+#endif
+
+/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */
+#ifdef CONFIG_MIGRATION
+static inline bool is_pinnable_page(struct page *page)
+{
+ return !(is_zone_movable_page(page) || is_migrate_cma_page(page)) ||
+ is_zero_pfn(page_to_pfn(page));
+}
+#else
+static inline bool is_pinnable_page(struct page *page)
+{
+ return true;
+}
+#endif
+
+#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
+static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
+ unsigned long address)
+{
+ return 0;
+}
+static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
+static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
+
+#else
+int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
+
+static inline void mm_inc_nr_puds(struct mm_struct *mm)
+{
+ if (mm_pud_folded(mm))
+ return;
+ atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
+}
+
+static inline void mm_dec_nr_puds(struct mm_struct *mm)
+{
+ if (mm_pud_folded(mm))
+ return;
+ atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
+}
+#endif
+
+#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
+static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+ unsigned long address)
+{
+ return 0;
+}
+
+static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
+static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
+
+#else
+int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
+
+static inline void mm_inc_nr_pmds(struct mm_struct *mm)
+{
+ if (mm_pmd_folded(mm))
+ return;
+ atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
+}
+
+static inline void mm_dec_nr_pmds(struct mm_struct *mm)
+{
+ if (mm_pmd_folded(mm))
+ return;
+ atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
+}
+#endif
+
+#if defined(CONFIG_MMU)
+
+static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long address)
+{
+ return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
+ NULL : p4d_offset(pgd, address);
+}
+
+static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
+ unsigned long address)
+{
+ return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
+ NULL : pud_offset(p4d, address);
+}
+
+static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+{
+ return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
+ NULL: pmd_offset(pud, address);
+}
+#endif /* CONFIG_MMU */
+
+#ifndef io_remap_pfn_range
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn,
+ unsigned long size, pgprot_t prot)
+{
+ return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
+}
+#endif
+
+#endif /* _LINUX_MM_API_EXTRA_H */