aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/pgtable_64.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2021-12-16 20:33:42 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2021-12-20 12:13:32 +1100
commit467ba14e1660b52a2f9338b484704c461bd23019 (patch)
tree7c68c13ec4aa1c400f4561713f63f4774c30ff28 /arch/powerpc/mm/pgtable_64.c
parenta605b39e8ef703828b9e26750ea1925a6a5ef848 (diff)
downloadlinux-467ba14e1660b52a2f9338b484704c461bd23019.tar.gz
powerpc/64s/radix: Fix huge vmap false positive
pmd_huge() is defined to false when HUGETLB_PAGE is not configured, but the vmap code still installs huge PMDs. This leads to false bad PMD errors when vunmapping because it is not seen as a huge PTE, and the bad PMD check catches it. The end result may not be much more serious than some bad pmd warning messages, because the pmd_none_or_clear_bad() does what we wanted and clears the huge PTE anyway. Fix this by checking pmd_is_leaf(), which checks for a PTE regardless of config options. The whole huge/large/leaf stuff is a tangled mess but that's kernel-wide and not something we can improve much in arch/powerpc code. pmd_page(), pud_page(), etc., called by vmalloc_to_page() on huge vmaps can similarly trigger a false VM_BUG_ON when CONFIG_HUGETLB_PAGE=n, so those checks are adjusted. The checks were added by commit d6eacedd1f0e ("powerpc/book3s: Use config independent helpers for page table walk"), while implementing a similar fix for other page table walking functions. Fixes: d909f9109c30 ("powerpc/64s/radix: Enable HAVE_ARCH_HUGE_VMAP") Cc: stable@vger.kernel.org # v5.3+ Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20211216103342.609192-1-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/mm/pgtable_64.c')
-rw-r--r--arch/powerpc/mm/pgtable_64.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 78c8cf01db5f9..175aabf101e87 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -102,7 +102,8 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
struct page *p4d_page(p4d_t p4d)
{
if (p4d_is_leaf(p4d)) {
- VM_WARN_ON(!p4d_huge(p4d));
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
+ VM_WARN_ON(!p4d_huge(p4d));
return pte_page(p4d_pte(p4d));
}
return virt_to_page(p4d_pgtable(p4d));
@@ -112,7 +113,8 @@ struct page *p4d_page(p4d_t p4d)
struct page *pud_page(pud_t pud)
{
if (pud_is_leaf(pud)) {
- VM_WARN_ON(!pud_huge(pud));
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
+ VM_WARN_ON(!pud_huge(pud));
return pte_page(pud_pte(pud));
}
return virt_to_page(pud_pgtable(pud));
@@ -125,7 +127,13 @@ struct page *pud_page(pud_t pud)
struct page *pmd_page(pmd_t pmd)
{
if (pmd_is_leaf(pmd)) {
- VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
+ /*
+ * vmalloc_to_page may be called on any vmap address (not only
+ * vmalloc), and it uses pmd_page() etc., when huge vmap is
+ * enabled so these checks can't be used.
+ */
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
+ VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
return pte_page(pmd_pte(pmd));
}
return virt_to_page(pmd_page_vaddr(pmd));