From b638d0b921dc95229af0dfd09cd24850336a2f75 Mon Sep 17 00:00:00 2001 From: Richard Curnow Date: Wed, 27 Sep 2006 14:09:26 +0900 Subject: sh: Optimized cache handling for SH-4/SH-4A caches. This reworks some of the SH-4 cache handling code to more easily accomodate newer-style caches (particularly for the > direct-mapped case), as well as optimizing some of the old code. Signed-off-by: Richard Curnow Signed-off-by: Paul Mundt --- include/asm-sh/cache.h | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) (limited to 'include/asm-sh/cache.h') diff --git a/include/asm-sh/cache.h b/include/asm-sh/cache.h index 656fdfe9e8b445..33f13367054b89 100644 --- a/include/asm-sh/cache.h +++ b/include/asm-sh/cache.h @@ -23,15 +23,29 @@ #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) struct cache_info { - unsigned int ways; - unsigned int sets; - unsigned int linesz; + unsigned int ways; /* Number of cache ways */ + unsigned int sets; /* Number of cache sets */ + unsigned int linesz; /* Cache line size (bytes) */ - unsigned int way_incr; + unsigned int way_size; /* sets * line size */ + /* + * way_incr is the address offset for accessing the next way + * in memory mapped cache array ops. + */ + unsigned int way_incr; unsigned int entry_shift; unsigned int entry_mask; + /* + * Compute a mask which selects the address bits which overlap between + * 1. those used to select the cache set during indexing + * 2. those in the physical page number. + */ + unsigned int alias_mask; + + unsigned int n_aliases; /* Number of aliases */ + unsigned long flags; }; -- cgit 1.2.3-korg