aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2021-12-13 12:54:28 +0100
committerIngo Molnar <mingo@kernel.org>2022-03-15 12:57:36 +0100
commit1850808076ba1de6e85e95b177b4ab4749ca5d5d (patch)
tree14e40d155d93d1defdc34cdc870b994bcb99ffea
parentb5668721e9d15796c4b63f00a05e1ada5cf54b0b (diff)
downloadtip-1850808076ba1de6e85e95b177b4ab4749ca5d5d.tar.gz
headers/deps: mm: Introduce <linux/highmem_extra.h> for rarely used APIs
Move various rarely used APIs to the new header, the main <linux/highmem.h> header is included over 2,700 times on a typical distro kernel build. The new header is only used in ~130 .c files directly & indirectly. The motivation is to reduce header dependencies of <linux/highmem.h>. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/bvec_api.h1
-rw-r--r--include/linux/highmem.h245
-rw-r--r--include/linux/highmem_extra.h251
3 files changed, 252 insertions, 245 deletions
diff --git a/include/linux/bvec_api.h b/include/linux/bvec_api.h
index 91bc70df0fd354..75699845a3ed28 100644
--- a/include/linux/bvec_api.h
+++ b/include/linux/bvec_api.h
@@ -9,6 +9,7 @@
#include <linux/bvec_types.h>
+#include <linux/highmem_extra.h>
#include <linux/highmem.h>
/**
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index a6273310573cbc..6414a8c2959680 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -167,249 +167,4 @@ void *kmap_atomic(struct page *page);
* preemption.
*/
-/* Highmem related interfaces for management code */
-static inline unsigned int nr_free_highpages(void);
-static inline unsigned long totalhigh_pages(void);
-
-#ifndef ARCH_HAS_FLUSH_ANON_PAGE
-static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
-{
-}
-#endif
-
-#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
-}
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
-}
-#endif
-
-/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
-#ifndef clear_user_highpage
-static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
-{
- void *addr = kmap_local_page(page);
- clear_user_page(addr, vaddr, page);
- kunmap_local(addr);
-}
-#endif
-
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
-/**
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
- *
- * This function will allocate a page for a VMA that the caller knows will
- * be able to migrate in the future using move_pages() or reclaimed
- *
- * An architecture may override this function by defining
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
- * implementation.
- */
-static inline struct page *
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
- unsigned long vaddr)
-{
- struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
-
- if (page)
- clear_user_highpage(page, vaddr);
-
- return page;
-}
-#endif
-
-static inline void clear_highpage(struct page *page)
-{
- void *kaddr = kmap_local_page(page);
- clear_page(kaddr);
- kunmap_local(kaddr);
-}
-
-#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
-
-static inline void tag_clear_highpage(struct page *page)
-{
-}
-
-#endif
-
-/*
- * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
- * If we pass in a head page, we can zero up to the size of the compound page.
- */
-#ifdef CONFIG_HIGHMEM
-void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
- unsigned start2, unsigned end2);
-#else
-static inline void zero_user_segments(struct page *page,
- unsigned start1, unsigned end1,
- unsigned start2, unsigned end2)
-{
- void *kaddr = kmap_local_page(page);
- unsigned int i;
-
- BUG_ON(end1 > page_size(page) || end2 > page_size(page));
-
- if (end1 > start1)
- memset(kaddr + start1, 0, end1 - start1);
-
- if (end2 > start2)
- memset(kaddr + start2, 0, end2 - start2);
-
- kunmap_local(kaddr);
- for (i = 0; i < compound_nr(page); i++)
- flush_dcache_page(page + i);
-}
-#endif
-
-static inline void zero_user_segment(struct page *page,
- unsigned start, unsigned end)
-{
- zero_user_segments(page, start, end, 0, 0);
-}
-
-static inline void zero_user(struct page *page,
- unsigned start, unsigned size)
-{
- zero_user_segments(page, start, start + size, 0, 0);
-}
-
-#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
-
-static inline void copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma)
-{
- char *vfrom, *vto;
-
- vfrom = kmap_local_page(from);
- vto = kmap_local_page(to);
- copy_user_page(vto, vfrom, vaddr, to);
- kunmap_local(vto);
- kunmap_local(vfrom);
-}
-
-#endif
-
-#ifndef __HAVE_ARCH_COPY_HIGHPAGE
-
-static inline void copy_highpage(struct page *to, struct page *from)
-{
- char *vfrom, *vto;
-
- vfrom = kmap_local_page(from);
- vto = kmap_local_page(to);
- copy_page(vto, vfrom);
- kunmap_local(vto);
- kunmap_local(vfrom);
-}
-
-#endif
-
-static inline void memcpy_page(struct page *dst_page, size_t dst_off,
- struct page *src_page, size_t src_off,
- size_t len)
-{
- char *dst = kmap_local_page(dst_page);
- char *src = kmap_local_page(src_page);
-
- VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
- memcpy(dst + dst_off, src + src_off, len);
- kunmap_local(src);
- kunmap_local(dst);
-}
-
-static inline void memmove_page(struct page *dst_page, size_t dst_off,
- struct page *src_page, size_t src_off,
- size_t len)
-{
- char *dst = kmap_local_page(dst_page);
- char *src = kmap_local_page(src_page);
-
- VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
- memmove(dst + dst_off, src + src_off, len);
- kunmap_local(src);
- kunmap_local(dst);
-}
-
-static inline void memset_page(struct page *page, size_t offset, int val,
- size_t len)
-{
- char *addr = kmap_local_page(page);
-
- VM_BUG_ON(offset + len > PAGE_SIZE);
- memset(addr + offset, val, len);
- kunmap_local(addr);
-}
-
-static inline void memcpy_from_page(char *to, struct page *page,
- size_t offset, size_t len)
-{
- char *from = kmap_local_page(page);
-
- VM_BUG_ON(offset + len > PAGE_SIZE);
- memcpy(to, from + offset, len);
- kunmap_local(from);
-}
-
-static inline void memcpy_to_page(struct page *page, size_t offset,
- const char *from, size_t len)
-{
- char *to = kmap_local_page(page);
-
- VM_BUG_ON(offset + len > PAGE_SIZE);
- memcpy(to + offset, from, len);
- flush_dcache_page(page);
- kunmap_local(to);
-}
-
-static inline void memzero_page(struct page *page, size_t offset, size_t len)
-{
- char *addr = kmap_local_page(page);
- memset(addr + offset, 0, len);
- flush_dcache_page(page);
- kunmap_local(addr);
-}
-
-/**
- * folio_zero_segments() - Zero two byte ranges in a folio.
- * @folio: The folio to write to.
- * @start1: The first byte to zero.
- * @xend1: One more than the last byte in the first range.
- * @start2: The first byte to zero in the second range.
- * @xend2: One more than the last byte in the second range.
- */
-static inline void folio_zero_segments(struct folio *folio,
- size_t start1, size_t xend1, size_t start2, size_t xend2)
-{
- zero_user_segments(&folio->page, start1, xend1, start2, xend2);
-}
-
-/**
- * folio_zero_segment() - Zero a byte range in a folio.
- * @folio: The folio to write to.
- * @start: The first byte to zero.
- * @xend: One more than the last byte to zero.
- */
-static inline void folio_zero_segment(struct folio *folio,
- size_t start, size_t xend)
-{
- zero_user_segments(&folio->page, start, xend, 0, 0);
-}
-
-/**
- * folio_zero_range() - Zero a byte range in a folio.
- * @folio: The folio to write to.
- * @start: The first byte to zero.
- * @length: The number of bytes to zero.
- */
-static inline void folio_zero_range(struct folio *folio,
- size_t start, size_t length)
-{
- zero_user_segments(&folio->page, start, start + length, 0, 0);
-}
-
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/highmem_extra.h b/include/linux/highmem_extra.h
index 9a515c9a213137..9b9e1d8059b553 100644
--- a/include/linux/highmem_extra.h
+++ b/include/linux/highmem_extra.h
@@ -1 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HIGHMEM_EXTRA_H
+#define _LINUX_HIGHMEM_EXTRA_H
+
#include <linux/highmem.h>
+
+/* Highmem related interfaces for management code */
+static inline unsigned int nr_free_highpages(void);
+static inline unsigned long totalhigh_pages(void);
+
+#ifndef ARCH_HAS_FLUSH_ANON_PAGE
+static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+{
+}
+#endif
+
+#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
+static inline void flush_kernel_vmap_range(void *vaddr, int size)
+{
+}
+static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+}
+#endif
+
+/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
+#ifndef clear_user_highpage
+static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ void *addr = kmap_local_page(page);
+ clear_user_page(addr, vaddr, page);
+ kunmap_local(addr);
+}
+#endif
+
+#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+/**
+ * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
+ * @vma: The VMA the page is to be allocated for
+ * @vaddr: The virtual address the page will be inserted into
+ *
+ * This function will allocate a page for a VMA that the caller knows will
+ * be able to migrate in the future using move_pages() or reclaimed
+ *
+ * An architecture may override this function by defining
+ * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
+ * implementation.
+ */
+static inline struct page *
+alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
+
+ if (page)
+ clear_user_highpage(page, vaddr);
+
+ return page;
+}
+#endif
+
+static inline void clear_highpage(struct page *page)
+{
+ void *kaddr = kmap_local_page(page);
+ clear_page(kaddr);
+ kunmap_local(kaddr);
+}
+
+#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
+
+static inline void tag_clear_highpage(struct page *page)
+{
+}
+
+#endif
+
+/*
+ * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
+ * If we pass in a head page, we can zero up to the size of the compound page.
+ */
+#ifdef CONFIG_HIGHMEM
+void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2);
+#else
+static inline void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
+{
+ void *kaddr = kmap_local_page(page);
+ unsigned int i;
+
+ BUG_ON(end1 > page_size(page) || end2 > page_size(page));
+
+ if (end1 > start1)
+ memset(kaddr + start1, 0, end1 - start1);
+
+ if (end2 > start2)
+ memset(kaddr + start2, 0, end2 - start2);
+
+ kunmap_local(kaddr);
+ for (i = 0; i < compound_nr(page); i++)
+ flush_dcache_page(page + i);
+}
+#endif
+
+static inline void zero_user_segment(struct page *page,
+ unsigned start, unsigned end)
+{
+ zero_user_segments(page, start, end, 0, 0);
+}
+
+static inline void zero_user(struct page *page,
+ unsigned start, unsigned size)
+{
+ zero_user_segments(page, start, start + size, 0, 0);
+}
+
+#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+static inline void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ char *vfrom, *vto;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ copy_user_page(vto, vfrom, vaddr, to);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
+}
+
+#endif
+
+#ifndef __HAVE_ARCH_COPY_HIGHPAGE
+
+static inline void copy_highpage(struct page *to, struct page *from)
+{
+ char *vfrom, *vto;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ copy_page(vto, vfrom);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
+}
+
+#endif
+
+static inline void memcpy_page(struct page *dst_page, size_t dst_off,
+ struct page *src_page, size_t src_off,
+ size_t len)
+{
+ char *dst = kmap_local_page(dst_page);
+ char *src = kmap_local_page(src_page);
+
+ VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
+ memcpy(dst + dst_off, src + src_off, len);
+ kunmap_local(src);
+ kunmap_local(dst);
+}
+
+static inline void memmove_page(struct page *dst_page, size_t dst_off,
+ struct page *src_page, size_t src_off,
+ size_t len)
+{
+ char *dst = kmap_local_page(dst_page);
+ char *src = kmap_local_page(src_page);
+
+ VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
+ memmove(dst + dst_off, src + src_off, len);
+ kunmap_local(src);
+ kunmap_local(dst);
+}
+
+static inline void memset_page(struct page *page, size_t offset, int val,
+ size_t len)
+{
+ char *addr = kmap_local_page(page);
+
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memset(addr + offset, val, len);
+ kunmap_local(addr);
+}
+
+static inline void memcpy_from_page(char *to, struct page *page,
+ size_t offset, size_t len)
+{
+ char *from = kmap_local_page(page);
+
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memcpy(to, from + offset, len);
+ kunmap_local(from);
+}
+
+static inline void memcpy_to_page(struct page *page, size_t offset,
+ const char *from, size_t len)
+{
+ char *to = kmap_local_page(page);
+
+ VM_BUG_ON(offset + len > PAGE_SIZE);
+ memcpy(to + offset, from, len);
+ flush_dcache_page(page);
+ kunmap_local(to);
+}
+
+static inline void memzero_page(struct page *page, size_t offset, size_t len)
+{
+ char *addr = kmap_local_page(page);
+ memset(addr + offset, 0, len);
+ flush_dcache_page(page);
+ kunmap_local(addr);
+}
+
+/**
+ * folio_zero_segments() - Zero two byte ranges in a folio.
+ * @folio: The folio to write to.
+ * @start1: The first byte to zero.
+ * @xend1: One more than the last byte in the first range.
+ * @start2: The first byte to zero in the second range.
+ * @xend2: One more than the last byte in the second range.
+ */
+static inline void folio_zero_segments(struct folio *folio,
+ size_t start1, size_t xend1, size_t start2, size_t xend2)
+{
+ zero_user_segments(&folio->page, start1, xend1, start2, xend2);
+}
+
+/**
+ * folio_zero_segment() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @xend: One more than the last byte to zero.
+ */
+static inline void folio_zero_segment(struct folio *folio,
+ size_t start, size_t xend)
+{
+ zero_user_segments(&folio->page, start, xend, 0, 0);
+}
+
+/**
+ * folio_zero_range() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @length: The number of bytes to zero.
+ */
+static inline void folio_zero_range(struct folio *folio,
+ size_t start, size_t length)
+{
+ zero_user_segments(&folio->page, start, start + length, 0, 0);
+}
+
+#endif /* _LINUX_HIGHMEM_EXTRA_H */