summaryrefslogtreecommitdiffstats
path: root/powerpc-Replace-kmap_atomic-with-kmap-in-pte_offset_.patch
blob: a5e31c0c4284c74fe036db6f52d857536a5598bd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
From 1ada8088b43e71f7da2b9032172dad4841d4a9be Mon Sep 17 00:00:00 2001
From: Kevin Hao <kexin.hao@windriver.com>
Date: Tue, 2 Mar 2010 16:51:57 -0500
Subject: [PATCH] powerpc: Replace kmap_atomic with kmap in pte_offset_map

commit 1268870be30e4571b0e007c986231c434c3b4912 in tip.

The pte_offset_map/pte_offset_map_nested use kmap_atomic to get the
virtual address for the pte table, but kmap_atomic will disable preempt.
Hence there will be call trace if we acquire a spin lock after invoking
pte_offset_map/pte_offset_map_nested in preempt-rt.  To fix it, I've
replaced kmap_atomic with kmap in these macros.

Signed-off-by: Kevin Hao <kexin.hao@windriver.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
LKML-Reference: <ffaf532c138188b526a8c623ed3c7f5067da6d68.1267566249.git.paul.gortmaker@windriver.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/powerpc/include/asm/pgtable-ppc32.h |   12 ++++++++++++
 1 files changed, 12 insertions(+), 0 deletions(-)

diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 55646ad..a838099 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -307,6 +307,17 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 #define pte_offset_kernel(dir, addr)	\
 	((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
+#ifdef CONFIG_PREEMPT_RT
+#define pte_offset_map(dir, addr)		\
+	((pte_t *) kmap(pmd_page(*(dir))) + pte_index(addr))
+#define pte_offset_map_nested(dir, addr)	\
+	((pte_t *) kmap(pmd_page(*(dir))) + pte_index(addr))
+
+#define pte_unmap(pte)	\
+	kunmap((struct page *)_ALIGN_DOWN((unsigned int)pte, PAGE_SIZE))
+#define pte_unmap_nested(pte)	\
+	kunmap((struct page *)_ALIGN_DOWN((unsigned int)pte, PAGE_SIZE))
+#else
 #define pte_offset_map(dir, addr)		\
 	((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
 #define pte_offset_map_nested(dir, addr)	\
@@ -314,6 +325,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
 
 #define pte_unmap(pte)		kunmap_atomic(pte, KM_PTE0)
 #define pte_unmap_nested(pte)	kunmap_atomic(pte, KM_PTE1)
+#endif
 
 /*
  * Encode and decode a swap entry.
-- 
1.7.0.4