aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache-sh4.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-21 18:21:07 +0900
committerPaul Mundt <lethal@linux-sh.org>2009-08-21 18:21:07 +0900
commit64a6d72213dd810dd55bd0a503c36150af41c3c3 (patch)
tree81f2f6e66d3a38f5cb7a27f0a85b365b25469fe4 /arch/sh/mm/cache-sh4.c
parentf26b2a562b46ab186c8383993ab1332673ac4a47 (diff)
downloadlinux-64a6d72213dd810dd55bd0a503c36150af41c3c3.tar.gz
sh: Kill off now redundant local irq disabling.
on_each_cpu() takes care of IRQ and preempt handling, the localized handling in each of the called functions can be killed off. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache-sh4.c')
-rw-r--r--arch/sh/mm/cache-sh4.c61
1 files changed, 26 insertions, 35 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 9201b37c7cca7d..e3b77f0fa47035 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -48,48 +48,44 @@ static void sh4_flush_icache_range(void *args)
struct flusher_data *data = args;
int icacheaddr;
unsigned long start, end;
- unsigned long flags, v;
+ unsigned long v;
int i;
start = data->addr1;
end = data->addr2;
- /* If there are too many pages then just blow the caches */
- if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
- local_flush_cache_all(args);
- } else {
- /* selectively flush d-cache then invalidate the i-cache */
- /* this is inefficient, so only use for small ranges */
- start &= ~(L1_CACHE_BYTES-1);
- end += L1_CACHE_BYTES-1;
- end &= ~(L1_CACHE_BYTES-1);
-
- local_irq_save(flags);
- jump_to_uncached();
-
- for (v = start; v < end; v+=L1_CACHE_BYTES) {
- asm volatile("ocbwb %0"
- : /* no output */
- : "m" (__m(v)));
-
- icacheaddr = CACHE_IC_ADDRESS_ARRAY | (
- v & cpu_data->icache.entry_mask);
-
- for (i = 0; i < cpu_data->icache.ways;
- i++, icacheaddr += cpu_data->icache.way_incr)
- /* Clear i-cache line valid-bit */
- ctrl_outl(0, icacheaddr);
- }
+ /* If there are too many pages then just blow the caches */
+ if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
+ local_flush_cache_all(args);
+ } else {
+ /* selectively flush d-cache then invalidate the i-cache */
+ /* this is inefficient, so only use for small ranges */
+ start &= ~(L1_CACHE_BYTES-1);
+ end += L1_CACHE_BYTES-1;
+ end &= ~(L1_CACHE_BYTES-1);
+
+ jump_to_uncached();
+
+ for (v = start; v < end; v+=L1_CACHE_BYTES) {
+ __ocbwb(v);
+
+ icacheaddr = CACHE_IC_ADDRESS_ARRAY |
+ (v & cpu_data->icache.entry_mask);
+
+ for (i = 0; i < cpu_data->icache.ways;
+ i++, icacheaddr += cpu_data->icache.way_incr)
+ /* Clear i-cache line valid-bit */
+ ctrl_outl(0, icacheaddr);
+ }
back_to_cached();
- local_irq_restore(flags);
}
}
static inline void flush_cache_4096(unsigned long start,
unsigned long phys)
{
- unsigned long flags, exec_offset = 0;
+ unsigned long exec_offset = 0;
/*
* All types of SH-4 require PC to be in P2 to operate on the I-cache.
@@ -99,10 +95,8 @@ static inline void flush_cache_4096(unsigned long start,
(start < CACHE_OC_ADDRESS_ARRAY))
exec_offset = 0x20000000;
- local_irq_save(flags);
__flush_cache_4096(start | SH_CACHE_ASSOC,
P1SEGADDR(phys), exec_offset);
- local_irq_restore(flags);
}
/*
@@ -135,9 +129,8 @@ static void sh4_flush_dcache_page(void *page)
/* TODO: Selective icache invalidation through IC address array.. */
static void __uses_jump_to_uncached flush_icache_all(void)
{
- unsigned long flags, ccr;
+ unsigned long ccr;
- local_irq_save(flags);
jump_to_uncached();
/* Flush I-cache */
@@ -149,9 +142,7 @@ static void __uses_jump_to_uncached flush_icache_all(void)
* back_to_cached() will take care of the barrier for us, don't add
* another one!
*/
-
back_to_cached();
- local_irq_restore(flags);
}
static inline void flush_dcache_all(void)