aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-parisc/cacheflush.h
blob: 2bc41f2e0271c21813c6c62273af0875a36db0df (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
#ifndef _PARISC_CACHEFLUSH_H
#define _PARISC_CACHEFLUSH_H

#include <linux/mm.h>
#include <asm/cache.h>	/* for flush_user_dcache_range_asm() proto */

/* The usual comment is "Caches aren't brain-dead on the <architecture>".
 * Unfortunately, that doesn't apply to PA-RISC. */

/* Cache flush operations */

#ifdef CONFIG_SMP
#define flush_cache_mm(mm) flush_cache_all()
#else
#define flush_cache_mm(mm) flush_cache_all_local()
#endif

#define flush_kernel_dcache_range(start,size) \
	flush_kernel_dcache_range_asm((start), (start)+(size));

extern void flush_cache_all_local(void);

static inline void cacheflush_h_tmp_function(void *dummy)
{
	flush_cache_all_local();
}

static inline void flush_cache_all(void)
{
	on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
}

#define flush_cache_vmap(start, end)		flush_cache_all()
#define flush_cache_vunmap(start, end)		flush_cache_all()

extern int parisc_cache_flush_threshold;
void parisc_setup_cache_timing(void);

static inline void
flush_user_dcache_range(unsigned long start, unsigned long end)
{
	if ((end - start) < parisc_cache_flush_threshold)
		flush_user_dcache_range_asm(start,end);
	else
		flush_data_cache();
}

static inline void
flush_user_icache_range(unsigned long start, unsigned long end)
{
	if ((end - start) < parisc_cache_flush_threshold)
		flush_user_icache_range_asm(start,end);
	else
		flush_instruction_cache();
}

extern void flush_dcache_page(struct page *page);

#define flush_dcache_mmap_lock(mapping) \
	write_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) \
	write_unlock_irq(&(mapping)->tree_lock)

#define flush_icache_page(vma,page)	do { flush_kernel_dcache_page(page); flush_kernel_icache_page(page_address(page)); } while (0)

#define flush_icache_range(s,e)		do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)

#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
	memcpy(dst, src, len); \
	flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
} while (0)

#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
	memcpy(dst, src, len); \
} while (0)

static inline void flush_cache_range(struct vm_area_struct *vma,
		unsigned long start, unsigned long end)
{
	int sr3;

	if (!vma->vm_mm->context) {
		BUG();
		return;
	}

	sr3 = mfsp(3);
	if (vma->vm_mm->context == sr3) {
		flush_user_dcache_range(start,end);
		flush_user_icache_range(start,end);
	} else {
		flush_cache_all();
	}
}

/* Simple function to work out if we have an existing address translation
 * for a user space vma. */
static inline int translation_exists(struct vm_area_struct *vma,
				unsigned long addr, unsigned long pfn)
{
	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
	pmd_t *pmd;
	pte_t pte;

	if(pgd_none(*pgd))
		return 0;

	pmd = pmd_offset(pgd, addr);
	if(pmd_none(*pmd) || pmd_bad(*pmd))
		return 0;

	/* We cannot take the pte lock here: flush_cache_page is usually
	 * called with pte lock already held.  Whereas flush_dcache_page
	 * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
	 * the vma itself is secure, but the pte might come or go racily.
	 */
	pte = *pte_offset_map(pmd, addr);
	/* But pte_unmap() does nothing on this architecture */

	/* Filter out coincidental file entries and swap entries */
	if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
		return 0;

	return pte_pfn(pte) == pfn;
}

/* Private function to flush a page from the cache of a non-current
 * process.  cr25 contains the Page Directory of the current user
 * process; we're going to hijack both it and the user space %sr3 to
 * temporarily make the non-current process current.  We have to do
 * this because cache flushing may cause a non-access tlb miss which
 * the handlers have to fill in from the pgd of the non-current
 * process. */
static inline void
flush_user_cache_page_non_current(struct vm_area_struct *vma,
				  unsigned long vmaddr)
{
	/* save the current process space and pgd */
	unsigned long space = mfsp(3), pgd = mfctl(25);

	/* we don't mind taking interrups since they may not
	 * do anything with user space, but we can't
	 * be preempted here */
	preempt_disable();

	/* make us current */
	mtctl(__pa(vma->vm_mm->pgd), 25);
	mtsp(vma->vm_mm->context, 3);

	flush_user_dcache_page(vmaddr);
	if(vma->vm_flags & VM_EXEC)
		flush_user_icache_page(vmaddr);

	/* put the old current process back */
	mtsp(space, 3);
	mtctl(pgd, 25);
	preempt_enable();
}

static inline void
__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
	if (likely(vma->vm_mm->context == mfsp(3))) {
		flush_user_dcache_page(vmaddr);
		if (vma->vm_flags & VM_EXEC)
			flush_user_icache_page(vmaddr);
	} else {
		flush_user_cache_page_non_current(vma, vmaddr);
	}
}

static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
	BUG_ON(!vma->vm_mm->context);

	if (likely(translation_exists(vma, vmaddr, pfn)))
		__flush_cache_page(vma, vmaddr);

}

static inline void
flush_anon_page(struct page *page, unsigned long vmaddr)
{
	if (PageAnon(page))
		flush_user_dcache_page(vmaddr);
}
#define ARCH_HAS_FLUSH_ANON_PAGE

#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
void flush_kernel_dcache_page_addr(void *addr);
static inline void flush_kernel_dcache_page(struct page *page)
{
	flush_kernel_dcache_page_addr(page_address(page));
}

#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
#endif

#ifdef CONFIG_PA8X00
/* Only pa8800, pa8900 needs this */
#define ARCH_HAS_KMAP

void kunmap_parisc(void *addr);

static inline void *kmap(struct page *page)
{
	might_sleep();
	return page_address(page);
}

#define kunmap(page)			kunmap_parisc(page_address(page))

#define kmap_atomic(page, idx)		page_address(page)

#define kunmap_atomic(addr, idx)	kunmap_parisc(addr)

#define kmap_atomic_pfn(pfn, idx)	page_address(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr)	virt_to_page(ptr)
#endif

#endif /* _PARISC_CACHEFLUSH_H */