| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * arch/sh/mm/cache-sh4.c | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 1999, 2000, 2002  Niibe Yutaka | 
| Paul Mundt | d10040f | 2007-09-24 16:38:25 +0900 | [diff] [blame] | 5 | * Copyright (C) 2001 - 2007  Paul Mundt | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * Copyright (C) 2003  Richard Curnow | 
| Chris Smith | 09b5a10 | 2008-07-02 15:17:11 +0900 | [diff] [blame] | 7 | * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * | 
|  | 9 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 10 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 11 | * for more details. | 
|  | 12 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/init.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mm.h> | 
| Paul Mundt | 52e2778 | 2006-11-21 11:09:41 +0900 | [diff] [blame] | 15 | #include <linux/io.h> | 
|  | 16 | #include <linux/mutex.h> | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 17 | #include <linux/fs.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <asm/mmu_context.h> | 
|  | 19 | #include <asm/cacheflush.h> | 
|  | 20 |  | 
| Paul Mundt | 28ccf7f | 2006-09-27 18:30:07 +0900 | [diff] [blame] | 21 | /* | 
|  | 22 | * The maximum number of pages we support up to when doing ranged dcache | 
|  | 23 | * flushing. Anything exceeding this will simply flush the dcache in its | 
|  | 24 | * entirety. | 
|  | 25 | */ | 
|  | 26 | #define MAX_DCACHE_PAGES	64	/* XXX: Tune for ways */ | 
| Chris Smith | 09b5a10 | 2008-07-02 15:17:11 +0900 | [diff] [blame] | 27 | #define MAX_ICACHE_PAGES	32 | 
| Paul Mundt | 28ccf7f | 2006-09-27 18:30:07 +0900 | [diff] [blame] | 28 |  | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 29 | static void __flush_cache_4096(unsigned long addr, unsigned long phys, | 
| Paul Mundt | a252710 | 2006-09-27 11:29:55 +0900 | [diff] [blame] | 30 | unsigned long exec_offset); | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 31 |  | 
|  | 32 | /* | 
|  | 33 | * This is initialised here to ensure that it is not placed in the BSS.  If | 
|  | 34 | * that were to happen, note that cache_init gets called before the BSS is | 
|  | 35 | * cleared, so this would get nulled out which would be hopeless. | 
|  | 36 | */ | 
|  | 37 | static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = | 
|  | 38 | (void (*)(unsigned long, unsigned long))0xdeadbeef; | 
|  | 39 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | * Write back the range of D-cache, and purge the I-cache. | 
|  | 42 | * | 
| Chris Smith | 09b5a10 | 2008-07-02 15:17:11 +0900 | [diff] [blame] | 43 | * Called from kernel/module.c:sys_init_module and routine for a.out format, | 
|  | 44 | * signal handler code and kprobes code | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | */ | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 46 | static void sh4_flush_icache_range(void *args) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | { | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 48 | struct flusher_data *data = args; | 
| Chris Smith | 09b5a10 | 2008-07-02 15:17:11 +0900 | [diff] [blame] | 49 | int icacheaddr; | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 50 | unsigned long start, end; | 
| Paul Mundt | 64a6d72 | 2009-08-21 18:21:07 +0900 | [diff] [blame^] | 51 | unsigned long v; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | int i; | 
|  | 53 |  | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 54 | start = data->addr1; | 
|  | 55 | end = data->addr2; | 
|  | 56 |  | 
| Paul Mundt | 64a6d72 | 2009-08-21 18:21:07 +0900 | [diff] [blame^] | 57 | /* If there are too many pages then just blow the caches */ | 
|  | 58 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { | 
|  | 59 | local_flush_cache_all(args); | 
|  | 60 | } else { | 
|  | 61 | /* selectively flush d-cache then invalidate the i-cache */ | 
|  | 62 | /* this is inefficient, so only use for small ranges */ | 
|  | 63 | start &= ~(L1_CACHE_BYTES-1); | 
|  | 64 | end += L1_CACHE_BYTES-1; | 
|  | 65 | end &= ~(L1_CACHE_BYTES-1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 |  | 
| Paul Mundt | 64a6d72 | 2009-08-21 18:21:07 +0900 | [diff] [blame^] | 67 | jump_to_uncached(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 |  | 
| Paul Mundt | 64a6d72 | 2009-08-21 18:21:07 +0900 | [diff] [blame^] | 69 | for (v = start; v < end; v+=L1_CACHE_BYTES) { | 
|  | 70 | __ocbwb(v); | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 71 |  | 
| Paul Mundt | 64a6d72 | 2009-08-21 18:21:07 +0900 | [diff] [blame^] | 72 | icacheaddr = CACHE_IC_ADDRESS_ARRAY | | 
|  | 73 | (v & cpu_data->icache.entry_mask); | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 74 |  | 
| Paul Mundt | 64a6d72 | 2009-08-21 18:21:07 +0900 | [diff] [blame^] | 75 | for (i = 0; i < cpu_data->icache.ways; | 
|  | 76 | i++, icacheaddr += cpu_data->icache.way_incr) | 
|  | 77 | /* Clear i-cache line valid-bit */ | 
|  | 78 | ctrl_outl(0, icacheaddr); | 
|  | 79 | } | 
| Chris Smith | 09b5a10 | 2008-07-02 15:17:11 +0900 | [diff] [blame] | 80 |  | 
|  | 81 | back_to_cached(); | 
| Chris Smith | 09b5a10 | 2008-07-02 15:17:11 +0900 | [diff] [blame] | 82 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | } | 
|  | 84 |  | 
|  | 85 | static inline void flush_cache_4096(unsigned long start, | 
|  | 86 | unsigned long phys) | 
|  | 87 | { | 
| Paul Mundt | 64a6d72 | 2009-08-21 18:21:07 +0900 | [diff] [blame^] | 88 | unsigned long exec_offset = 0; | 
| Paul Mundt | 33573c0 | 2006-09-27 18:37:30 +0900 | [diff] [blame] | 89 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | /* | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 91 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. | 
|  | 92 | * Some types of SH-4 require PC to be in P2 to operate on the D-cache. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | */ | 
| Paul Mundt | 7ec9d6f | 2007-09-21 18:05:20 +0900 | [diff] [blame] | 94 | if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || | 
| Paul Mundt | 33573c0 | 2006-09-27 18:37:30 +0900 | [diff] [blame] | 95 | (start < CACHE_OC_ADDRESS_ARRAY)) | 
| Paul Mundt | 510c72ad | 2006-11-27 12:06:26 +0900 | [diff] [blame] | 96 | exec_offset = 0x20000000; | 
| Paul Mundt | 28ccf7f | 2006-09-27 18:30:07 +0900 | [diff] [blame] | 97 |  | 
| Paul Mundt | 33573c0 | 2006-09-27 18:37:30 +0900 | [diff] [blame] | 98 | __flush_cache_4096(start | SH_CACHE_ASSOC, | 
|  | 99 | P1SEGADDR(phys), exec_offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | } | 
|  | 101 |  | 
|  | 102 | /* | 
|  | 103 | * Write back & invalidate the D-cache of the page. | 
|  | 104 | * (To avoid "alias" issues) | 
|  | 105 | */ | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 106 | static void sh4_flush_dcache_page(void *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | { | 
| Paul Mundt | c139a59 | 2009-08-20 15:24:41 +0900 | [diff] [blame] | 108 | #ifndef CONFIG_SMP | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 109 | struct address_space *mapping = page_mapping(page); | 
|  | 110 |  | 
| Paul Mundt | 2277ab4 | 2009-07-22 19:20:49 +0900 | [diff] [blame] | 111 | if (mapping && !mapping_mapped(mapping)) | 
|  | 112 | set_bit(PG_dcache_dirty, &page->flags); | 
|  | 113 | else | 
|  | 114 | #endif | 
|  | 115 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | unsigned long phys = PHYSADDR(page_address(page)); | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 117 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; | 
|  | 118 | int i, n; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 |  | 
|  | 120 | /* Loop all the D-cache */ | 
| Paul Mundt | 7ec9d6f | 2007-09-21 18:05:20 +0900 | [diff] [blame] | 121 | n = boot_cpu_data.dcache.n_aliases; | 
| Paul Mundt | 510c72ad | 2006-11-27 12:06:26 +0900 | [diff] [blame] | 122 | for (i = 0; i < n; i++, addr += 4096) | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 123 | flush_cache_4096(addr, phys); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | } | 
| Paul Mundt | fdfc74f | 2006-09-27 14:05:52 +0900 | [diff] [blame] | 125 |  | 
|  | 126 | wmb(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | } | 
|  | 128 |  | 
| Paul Mundt | 28ccf7f | 2006-09-27 18:30:07 +0900 | [diff] [blame] | 129 | /* TODO: Selective icache invalidation through IC address array.. */ | 
| Paul Mundt | 205a3b4 | 2008-09-05 18:00:29 +0900 | [diff] [blame] | 130 | static void __uses_jump_to_uncached flush_icache_all(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | { | 
| Paul Mundt | 64a6d72 | 2009-08-21 18:21:07 +0900 | [diff] [blame^] | 132 | unsigned long ccr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 |  | 
| Stuart Menefy | cbaa118 | 2007-11-30 17:06:36 +0900 | [diff] [blame] | 134 | jump_to_uncached(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 |  | 
|  | 136 | /* Flush I-cache */ | 
|  | 137 | ccr = ctrl_inl(CCR); | 
|  | 138 | ccr |= CCR_CACHE_ICI; | 
|  | 139 | ctrl_outl(ccr, CCR); | 
|  | 140 |  | 
| Paul Mundt | 2984762 | 2006-09-27 14:57:44 +0900 | [diff] [blame] | 141 | /* | 
| Stuart Menefy | cbaa118 | 2007-11-30 17:06:36 +0900 | [diff] [blame] | 142 | * back_to_cached() will take care of the barrier for us, don't add | 
| Paul Mundt | 2984762 | 2006-09-27 14:57:44 +0900 | [diff] [blame] | 143 | * another one! | 
|  | 144 | */ | 
| Stuart Menefy | cbaa118 | 2007-11-30 17:06:36 +0900 | [diff] [blame] | 145 | back_to_cached(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | } | 
|  | 147 |  | 
| Paul Mundt | 0b445dc | 2009-08-15 11:22:50 +0900 | [diff] [blame] | 148 | static inline void flush_dcache_all(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | { | 
| Paul Mundt | 7ec9d6f | 2007-09-21 18:05:20 +0900 | [diff] [blame] | 150 | (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); | 
| Paul Mundt | fdfc74f | 2006-09-27 14:05:52 +0900 | [diff] [blame] | 151 | wmb(); | 
| Paul Mundt | a252710 | 2006-09-27 11:29:55 +0900 | [diff] [blame] | 152 | } | 
|  | 153 |  | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 154 | static void sh4_flush_cache_all(void *unused) | 
| Paul Mundt | a252710 | 2006-09-27 11:29:55 +0900 | [diff] [blame] | 155 | { | 
|  | 156 | flush_dcache_all(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | flush_icache_all(); | 
|  | 158 | } | 
|  | 159 |  | 
| Paul Mundt | 28ccf7f | 2006-09-27 18:30:07 +0900 | [diff] [blame] | 160 | static void __flush_cache_mm(struct mm_struct *mm, unsigned long start, | 
|  | 161 | unsigned long end) | 
|  | 162 | { | 
|  | 163 | unsigned long d = 0, p = start & PAGE_MASK; | 
| Paul Mundt | 7ec9d6f | 2007-09-21 18:05:20 +0900 | [diff] [blame] | 164 | unsigned long alias_mask = boot_cpu_data.dcache.alias_mask; | 
|  | 165 | unsigned long n_aliases = boot_cpu_data.dcache.n_aliases; | 
| Paul Mundt | 28ccf7f | 2006-09-27 18:30:07 +0900 | [diff] [blame] | 166 | unsigned long select_bit; | 
|  | 167 | unsigned long all_aliases_mask; | 
|  | 168 | unsigned long addr_offset; | 
|  | 169 | pgd_t *dir; | 
|  | 170 | pmd_t *pmd; | 
|  | 171 | pud_t *pud; | 
|  | 172 | pte_t *pte; | 
|  | 173 | int i; | 
|  | 174 |  | 
|  | 175 | dir = pgd_offset(mm, p); | 
|  | 176 | pud = pud_offset(dir, p); | 
|  | 177 | pmd = pmd_offset(pud, p); | 
|  | 178 | end = PAGE_ALIGN(end); | 
|  | 179 |  | 
|  | 180 | all_aliases_mask = (1 << n_aliases) - 1; | 
|  | 181 |  | 
|  | 182 | do { | 
|  | 183 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) { | 
|  | 184 | p &= PMD_MASK; | 
|  | 185 | p += PMD_SIZE; | 
|  | 186 | pmd++; | 
|  | 187 |  | 
|  | 188 | continue; | 
|  | 189 | } | 
|  | 190 |  | 
|  | 191 | pte = pte_offset_kernel(pmd, p); | 
|  | 192 |  | 
|  | 193 | do { | 
|  | 194 | unsigned long phys; | 
|  | 195 | pte_t entry = *pte; | 
|  | 196 |  | 
|  | 197 | if (!(pte_val(entry) & _PAGE_PRESENT)) { | 
|  | 198 | pte++; | 
|  | 199 | p += PAGE_SIZE; | 
|  | 200 | continue; | 
|  | 201 | } | 
|  | 202 |  | 
|  | 203 | phys = pte_val(entry) & PTE_PHYS_MASK; | 
|  | 204 |  | 
|  | 205 | if ((p ^ phys) & alias_mask) { | 
|  | 206 | d |= 1 << ((p & alias_mask) >> PAGE_SHIFT); | 
|  | 207 | d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT); | 
|  | 208 |  | 
|  | 209 | if (d == all_aliases_mask) | 
|  | 210 | goto loop_exit; | 
|  | 211 | } | 
|  | 212 |  | 
|  | 213 | pte++; | 
|  | 214 | p += PAGE_SIZE; | 
|  | 215 | } while (p < end && ((unsigned long)pte & ~PAGE_MASK)); | 
|  | 216 | pmd++; | 
|  | 217 | } while (p < end); | 
|  | 218 |  | 
|  | 219 | loop_exit: | 
|  | 220 | addr_offset = 0; | 
|  | 221 | select_bit = 1; | 
|  | 222 |  | 
|  | 223 | for (i = 0; i < n_aliases; i++) { | 
|  | 224 | if (d & select_bit) { | 
|  | 225 | (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE); | 
|  | 226 | wmb(); | 
|  | 227 | } | 
|  | 228 |  | 
|  | 229 | select_bit <<= 1; | 
|  | 230 | addr_offset += PAGE_SIZE; | 
|  | 231 | } | 
|  | 232 | } | 
|  | 233 |  | 
|  | 234 | /* | 
|  | 235 | * Note : (RPC) since the caches are physically tagged, the only point | 
|  | 236 | * of flush_cache_mm for SH-4 is to get rid of aliases from the | 
|  | 237 | * D-cache.  The assumption elsewhere, e.g. flush_cache_range, is that | 
|  | 238 | * lines can stay resident so long as the virtual address they were | 
|  | 239 | * accessed with (hence cache set) is in accord with the physical | 
|  | 240 | * address (i.e. tag).  It's no different here.  So I reckon we don't | 
|  | 241 | * need to flush the I-cache, since aliases don't matter for that.  We | 
|  | 242 | * should try that. | 
|  | 243 | * | 
|  | 244 | * Caller takes mm->mmap_sem. | 
|  | 245 | */ | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 246 | static void sh4_flush_cache_mm(void *arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | { | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 248 | struct mm_struct *mm = arg; | 
|  | 249 |  | 
| Paul Mundt | e7b8b7f | 2009-08-15 02:21:16 +0900 | [diff] [blame] | 250 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) | 
|  | 251 | return; | 
|  | 252 |  | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 253 | /* | 
| Paul Mundt | 28ccf7f | 2006-09-27 18:30:07 +0900 | [diff] [blame] | 254 | * If cache is only 4k-per-way, there are never any 'aliases'.  Since | 
|  | 255 | * the cache is physically tagged, the data can just be left in there. | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 256 | */ | 
| Paul Mundt | 7ec9d6f | 2007-09-21 18:05:20 +0900 | [diff] [blame] | 257 | if (boot_cpu_data.dcache.n_aliases == 0) | 
| Paul Mundt | 28ccf7f | 2006-09-27 18:30:07 +0900 | [diff] [blame] | 258 | return; | 
|  | 259 |  | 
|  | 260 | /* | 
|  | 261 | * Don't bother groveling around the dcache for the VMA ranges | 
|  | 262 | * if there are too many PTEs to make it worthwhile. | 
|  | 263 | */ | 
|  | 264 | if (mm->nr_ptes >= MAX_DCACHE_PAGES) | 
|  | 265 | flush_dcache_all(); | 
|  | 266 | else { | 
|  | 267 | struct vm_area_struct *vma; | 
|  | 268 |  | 
|  | 269 | /* | 
|  | 270 | * In this case there are reasonably sized ranges to flush, | 
|  | 271 | * iterate through the VMA list and take care of any aliases. | 
|  | 272 | */ | 
|  | 273 | for (vma = mm->mmap; vma; vma = vma->vm_next) | 
|  | 274 | __flush_cache_mm(mm, vma->vm_start, vma->vm_end); | 
|  | 275 | } | 
|  | 276 |  | 
|  | 277 | /* Only touch the icache if one of the VMAs has VM_EXEC set. */ | 
|  | 278 | if (mm->exec_vm) | 
|  | 279 | flush_icache_all(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | } | 
|  | 281 |  | 
|  | 282 | /* | 
|  | 283 | * Write back and invalidate I/D-caches for the page. | 
|  | 284 | * | 
|  | 285 | * ADDR: Virtual Address (U0 address) | 
|  | 286 | * PFN: Physical page number | 
|  | 287 | */ | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 288 | static void sh4_flush_cache_page(void *args) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | { | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 290 | struct flusher_data *data = args; | 
|  | 291 | struct vm_area_struct *vma; | 
|  | 292 | unsigned long address, pfn, phys; | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 293 | unsigned int alias_mask; | 
|  | 294 |  | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 295 | vma = data->vma; | 
|  | 296 | address = data->addr1; | 
|  | 297 | pfn = data->addr2; | 
|  | 298 | phys = pfn << PAGE_SHIFT; | 
|  | 299 |  | 
| Paul Mundt | e7b8b7f | 2009-08-15 02:21:16 +0900 | [diff] [blame] | 300 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | 
|  | 301 | return; | 
|  | 302 |  | 
| Paul Mundt | 7ec9d6f | 2007-09-21 18:05:20 +0900 | [diff] [blame] | 303 | alias_mask = boot_cpu_data.dcache.alias_mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 |  | 
|  | 305 | /* We only need to flush D-cache when we have alias */ | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 306 | if ((address^phys) & alias_mask) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | /* Loop 4K of the D-cache */ | 
|  | 308 | flush_cache_4096( | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 309 | CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | phys); | 
|  | 311 | /* Loop another 4K of the D-cache */ | 
|  | 312 | flush_cache_4096( | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 313 | CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | phys); | 
|  | 315 | } | 
|  | 316 |  | 
| Paul Mundt | 7ec9d6f | 2007-09-21 18:05:20 +0900 | [diff] [blame] | 317 | alias_mask = boot_cpu_data.icache.alias_mask; | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 318 | if (vma->vm_flags & VM_EXEC) { | 
|  | 319 | /* | 
|  | 320 | * Evict entries from the portion of the cache from which code | 
|  | 321 | * may have been executed at this address (virtual).  There's | 
|  | 322 | * no need to evict from the portion corresponding to the | 
|  | 323 | * physical address as for the D-cache, because we know the | 
|  | 324 | * kernel has never executed the code through its identity | 
|  | 325 | * translation. | 
|  | 326 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | flush_cache_4096( | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 328 | CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | phys); | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 330 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | } | 
|  | 332 |  | 
|  | 333 | /* | 
|  | 334 | * Write back and invalidate D-caches. | 
|  | 335 | * | 
|  | 336 | * START, END: Virtual Address (U0 address) | 
|  | 337 | * | 
|  | 338 | * NOTE: We need to flush the _physical_ page entry. | 
|  | 339 | * Flushing the cache lines for U0 only isn't enough. | 
|  | 340 | * We need to flush for P1 too, which may contain aliases. | 
|  | 341 | */ | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 342 | static void sh4_flush_cache_range(void *args) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | { | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 344 | struct flusher_data *data = args; | 
|  | 345 | struct vm_area_struct *vma; | 
|  | 346 | unsigned long start, end; | 
|  | 347 |  | 
|  | 348 | vma = data->vma; | 
|  | 349 | start = data->addr1; | 
|  | 350 | end = data->addr2; | 
|  | 351 |  | 
| Paul Mundt | e7b8b7f | 2009-08-15 02:21:16 +0900 | [diff] [blame] | 352 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | 
|  | 353 | return; | 
|  | 354 |  | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 355 | /* | 
|  | 356 | * If cache is only 4k-per-way, there are never any 'aliases'.  Since | 
|  | 357 | * the cache is physically tagged, the data can just be left in there. | 
|  | 358 | */ | 
| Paul Mundt | 7ec9d6f | 2007-09-21 18:05:20 +0900 | [diff] [blame] | 359 | if (boot_cpu_data.dcache.n_aliases == 0) | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 360 | return; | 
|  | 361 |  | 
| Paul Mundt | a252710 | 2006-09-27 11:29:55 +0900 | [diff] [blame] | 362 | /* | 
|  | 363 | * Don't bother with the lookup and alias check if we have a | 
|  | 364 | * wide range to cover, just blow away the dcache in its | 
|  | 365 | * entirety instead. -- PFM. | 
|  | 366 | */ | 
| Paul Mundt | 28ccf7f | 2006-09-27 18:30:07 +0900 | [diff] [blame] | 367 | if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES) | 
| Paul Mundt | a252710 | 2006-09-27 11:29:55 +0900 | [diff] [blame] | 368 | flush_dcache_all(); | 
| Paul Mundt | 28ccf7f | 2006-09-27 18:30:07 +0900 | [diff] [blame] | 369 | else | 
|  | 370 | __flush_cache_mm(vma->vm_mm, start, end); | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 371 |  | 
|  | 372 | if (vma->vm_flags & VM_EXEC) { | 
|  | 373 | /* | 
|  | 374 | * TODO: Is this required???  Need to look at how I-cache | 
|  | 375 | * coherency is assured when new programs are loaded to see if | 
|  | 376 | * this matters. | 
|  | 377 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | flush_icache_all(); | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 379 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | } | 
|  | 381 |  | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 382 | /** | 
|  | 383 | * __flush_cache_4096 | 
|  | 384 | * | 
|  | 385 | * @addr:  address in memory mapped cache array | 
|  | 386 | * @phys:  P1 address to flush (has to match tags if addr has 'A' bit | 
|  | 387 | *         set i.e. associative write) | 
|  | 388 | * @exec_offset: set to 0x20000000 if flush has to be executed from P2 | 
|  | 389 | *               region else 0x0 | 
|  | 390 | * | 
|  | 391 | * The offset into the cache array implied by 'addr' selects the | 
|  | 392 | * 'colour' of the virtual address range that will be flushed.  The | 
|  | 393 | * operation (purge/write-back) is selected by the lower 2 bits of | 
|  | 394 | * 'phys'. | 
|  | 395 | */ | 
|  | 396 | static void __flush_cache_4096(unsigned long addr, unsigned long phys, | 
|  | 397 | unsigned long exec_offset) | 
|  | 398 | { | 
|  | 399 | int way_count; | 
|  | 400 | unsigned long base_addr = addr; | 
|  | 401 | struct cache_info *dcache; | 
|  | 402 | unsigned long way_incr; | 
|  | 403 | unsigned long a, ea, p; | 
|  | 404 | unsigned long temp_pc; | 
|  | 405 |  | 
| Paul Mundt | 7ec9d6f | 2007-09-21 18:05:20 +0900 | [diff] [blame] | 406 | dcache = &boot_cpu_data.dcache; | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 407 | /* Write this way for better assembly. */ | 
|  | 408 | way_count = dcache->ways; | 
|  | 409 | way_incr = dcache->way_incr; | 
|  | 410 |  | 
|  | 411 | /* | 
|  | 412 | * Apply exec_offset (i.e. branch to P2 if required.). | 
|  | 413 | * | 
|  | 414 | * FIXME: | 
|  | 415 | * | 
|  | 416 | *	If I write "=r" for the (temp_pc), it puts this in r6 hence | 
|  | 417 | *	trashing exec_offset before it's been added on - why?  Hence | 
|  | 418 | *	"=&r" as a 'workaround' | 
|  | 419 | */ | 
|  | 420 | asm volatile("mov.l 1f, %0\n\t" | 
|  | 421 | "add   %1, %0\n\t" | 
|  | 422 | "jmp   @%0\n\t" | 
|  | 423 | "nop\n\t" | 
|  | 424 | ".balign 4\n\t" | 
|  | 425 | "1:  .long 2f\n\t" | 
|  | 426 | "2:\n" : "=&r" (temp_pc) : "r" (exec_offset)); | 
|  | 427 |  | 
|  | 428 | /* | 
|  | 429 | * We know there will be >=1 iteration, so write as do-while to avoid | 
|  | 430 | * pointless nead-of-loop check for 0 iterations. | 
|  | 431 | */ | 
|  | 432 | do { | 
|  | 433 | ea = base_addr + PAGE_SIZE; | 
|  | 434 | a = base_addr; | 
|  | 435 | p = phys; | 
|  | 436 |  | 
|  | 437 | do { | 
|  | 438 | *(volatile unsigned long *)a = p; | 
|  | 439 | /* | 
|  | 440 | * Next line: intentionally not p+32, saves an add, p | 
|  | 441 | * will do since only the cache tag bits need to | 
|  | 442 | * match. | 
|  | 443 | */ | 
|  | 444 | *(volatile unsigned long *)(a+32) = p; | 
|  | 445 | a += 64; | 
|  | 446 | p += 64; | 
|  | 447 | } while (a < ea); | 
|  | 448 |  | 
|  | 449 | base_addr += way_incr; | 
|  | 450 | } while (--way_count != 0); | 
|  | 451 | } | 
|  | 452 |  | 
|  | 453 | /* | 
|  | 454 | * Break the 1, 2 and 4 way variants of this out into separate functions to | 
|  | 455 | * avoid nearly all the overhead of having the conditional stuff in the function | 
|  | 456 | * bodies (+ the 1 and 2 way cases avoid saving any registers too). | 
|  | 457 | */ | 
|  | 458 | static void __flush_dcache_segment_1way(unsigned long start, | 
|  | 459 | unsigned long extent_per_way) | 
|  | 460 | { | 
|  | 461 | unsigned long orig_sr, sr_with_bl; | 
|  | 462 | unsigned long base_addr; | 
|  | 463 | unsigned long way_incr, linesz, way_size; | 
|  | 464 | struct cache_info *dcache; | 
|  | 465 | register unsigned long a0, a0e; | 
|  | 466 |  | 
|  | 467 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | 
|  | 468 | sr_with_bl = orig_sr | (1<<28); | 
|  | 469 | base_addr = ((unsigned long)&empty_zero_page[0]); | 
|  | 470 |  | 
|  | 471 | /* | 
|  | 472 | * The previous code aligned base_addr to 16k, i.e. the way_size of all | 
|  | 473 | * existing SH-4 D-caches.  Whilst I don't see a need to have this | 
|  | 474 | * aligned to any better than the cache line size (which it will be | 
|  | 475 | * anyway by construction), let's align it to at least the way_size of | 
|  | 476 | * any existing or conceivable SH-4 D-cache.  -- RPC | 
|  | 477 | */ | 
|  | 478 | base_addr = ((base_addr >> 16) << 16); | 
|  | 479 | base_addr |= start; | 
|  | 480 |  | 
| Paul Mundt | 7ec9d6f | 2007-09-21 18:05:20 +0900 | [diff] [blame] | 481 | dcache = &boot_cpu_data.dcache; | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 482 | linesz = dcache->linesz; | 
|  | 483 | way_incr = dcache->way_incr; | 
|  | 484 | way_size = dcache->way_size; | 
|  | 485 |  | 
|  | 486 | a0 = base_addr; | 
|  | 487 | a0e = base_addr + extent_per_way; | 
|  | 488 | do { | 
|  | 489 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | 
|  | 490 | asm volatile("movca.l r0, @%0\n\t" | 
|  | 491 | "ocbi @%0" : : "r" (a0)); | 
|  | 492 | a0 += linesz; | 
|  | 493 | asm volatile("movca.l r0, @%0\n\t" | 
|  | 494 | "ocbi @%0" : : "r" (a0)); | 
|  | 495 | a0 += linesz; | 
|  | 496 | asm volatile("movca.l r0, @%0\n\t" | 
|  | 497 | "ocbi @%0" : : "r" (a0)); | 
|  | 498 | a0 += linesz; | 
|  | 499 | asm volatile("movca.l r0, @%0\n\t" | 
|  | 500 | "ocbi @%0" : : "r" (a0)); | 
|  | 501 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | 
|  | 502 | a0 += linesz; | 
|  | 503 | } while (a0 < a0e); | 
|  | 504 | } | 
|  | 505 |  | 
|  | 506 | static void __flush_dcache_segment_2way(unsigned long start, | 
|  | 507 | unsigned long extent_per_way) | 
|  | 508 | { | 
|  | 509 | unsigned long orig_sr, sr_with_bl; | 
|  | 510 | unsigned long base_addr; | 
|  | 511 | unsigned long way_incr, linesz, way_size; | 
|  | 512 | struct cache_info *dcache; | 
|  | 513 | register unsigned long a0, a1, a0e; | 
|  | 514 |  | 
|  | 515 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | 
|  | 516 | sr_with_bl = orig_sr | (1<<28); | 
|  | 517 | base_addr = ((unsigned long)&empty_zero_page[0]); | 
|  | 518 |  | 
|  | 519 | /* See comment under 1-way above */ | 
|  | 520 | base_addr = ((base_addr >> 16) << 16); | 
|  | 521 | base_addr |= start; | 
|  | 522 |  | 
| Paul Mundt | 7ec9d6f | 2007-09-21 18:05:20 +0900 | [diff] [blame] | 523 | dcache = &boot_cpu_data.dcache; | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 524 | linesz = dcache->linesz; | 
|  | 525 | way_incr = dcache->way_incr; | 
|  | 526 | way_size = dcache->way_size; | 
|  | 527 |  | 
|  | 528 | a0 = base_addr; | 
|  | 529 | a1 = a0 + way_incr; | 
|  | 530 | a0e = base_addr + extent_per_way; | 
|  | 531 | do { | 
|  | 532 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | 
|  | 533 | asm volatile("movca.l r0, @%0\n\t" | 
|  | 534 | "movca.l r0, @%1\n\t" | 
|  | 535 | "ocbi @%0\n\t" | 
|  | 536 | "ocbi @%1" : : | 
|  | 537 | "r" (a0), "r" (a1)); | 
|  | 538 | a0 += linesz; | 
|  | 539 | a1 += linesz; | 
|  | 540 | asm volatile("movca.l r0, @%0\n\t" | 
|  | 541 | "movca.l r0, @%1\n\t" | 
|  | 542 | "ocbi @%0\n\t" | 
|  | 543 | "ocbi @%1" : : | 
|  | 544 | "r" (a0), "r" (a1)); | 
|  | 545 | a0 += linesz; | 
|  | 546 | a1 += linesz; | 
|  | 547 | asm volatile("movca.l r0, @%0\n\t" | 
|  | 548 | "movca.l r0, @%1\n\t" | 
|  | 549 | "ocbi @%0\n\t" | 
|  | 550 | "ocbi @%1" : : | 
|  | 551 | "r" (a0), "r" (a1)); | 
|  | 552 | a0 += linesz; | 
|  | 553 | a1 += linesz; | 
|  | 554 | asm volatile("movca.l r0, @%0\n\t" | 
|  | 555 | "movca.l r0, @%1\n\t" | 
|  | 556 | "ocbi @%0\n\t" | 
|  | 557 | "ocbi @%1" : : | 
|  | 558 | "r" (a0), "r" (a1)); | 
|  | 559 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | 
|  | 560 | a0 += linesz; | 
|  | 561 | a1 += linesz; | 
|  | 562 | } while (a0 < a0e); | 
|  | 563 | } | 
|  | 564 |  | 
|  | 565 | static void __flush_dcache_segment_4way(unsigned long start, | 
|  | 566 | unsigned long extent_per_way) | 
|  | 567 | { | 
|  | 568 | unsigned long orig_sr, sr_with_bl; | 
|  | 569 | unsigned long base_addr; | 
|  | 570 | unsigned long way_incr, linesz, way_size; | 
|  | 571 | struct cache_info *dcache; | 
|  | 572 | register unsigned long a0, a1, a2, a3, a0e; | 
|  | 573 |  | 
|  | 574 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | 
|  | 575 | sr_with_bl = orig_sr | (1<<28); | 
|  | 576 | base_addr = ((unsigned long)&empty_zero_page[0]); | 
|  | 577 |  | 
|  | 578 | /* See comment under 1-way above */ | 
|  | 579 | base_addr = ((base_addr >> 16) << 16); | 
|  | 580 | base_addr |= start; | 
|  | 581 |  | 
| Paul Mundt | 7ec9d6f | 2007-09-21 18:05:20 +0900 | [diff] [blame] | 582 | dcache = &boot_cpu_data.dcache; | 
| Richard Curnow | b638d0b | 2006-09-27 14:09:26 +0900 | [diff] [blame] | 583 | linesz = dcache->linesz; | 
|  | 584 | way_incr = dcache->way_incr; | 
|  | 585 | way_size = dcache->way_size; | 
|  | 586 |  | 
|  | 587 | a0 = base_addr; | 
|  | 588 | a1 = a0 + way_incr; | 
|  | 589 | a2 = a1 + way_incr; | 
|  | 590 | a3 = a2 + way_incr; | 
|  | 591 | a0e = base_addr + extent_per_way; | 
|  | 592 | do { | 
|  | 593 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | 
|  | 594 | asm volatile("movca.l r0, @%0\n\t" | 
|  | 595 | "movca.l r0, @%1\n\t" | 
|  | 596 | "movca.l r0, @%2\n\t" | 
|  | 597 | "movca.l r0, @%3\n\t" | 
|  | 598 | "ocbi @%0\n\t" | 
|  | 599 | "ocbi @%1\n\t" | 
|  | 600 | "ocbi @%2\n\t" | 
|  | 601 | "ocbi @%3\n\t" : : | 
|  | 602 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | 
|  | 603 | a0 += linesz; | 
|  | 604 | a1 += linesz; | 
|  | 605 | a2 += linesz; | 
|  | 606 | a3 += linesz; | 
|  | 607 | asm volatile("movca.l r0, @%0\n\t" | 
|  | 608 | "movca.l r0, @%1\n\t" | 
|  | 609 | "movca.l r0, @%2\n\t" | 
|  | 610 | "movca.l r0, @%3\n\t" | 
|  | 611 | "ocbi @%0\n\t" | 
|  | 612 | "ocbi @%1\n\t" | 
|  | 613 | "ocbi @%2\n\t" | 
|  | 614 | "ocbi @%3\n\t" : : | 
|  | 615 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | 
|  | 616 | a0 += linesz; | 
|  | 617 | a1 += linesz; | 
|  | 618 | a2 += linesz; | 
|  | 619 | a3 += linesz; | 
|  | 620 | asm volatile("movca.l r0, @%0\n\t" | 
|  | 621 | "movca.l r0, @%1\n\t" | 
|  | 622 | "movca.l r0, @%2\n\t" | 
|  | 623 | "movca.l r0, @%3\n\t" | 
|  | 624 | "ocbi @%0\n\t" | 
|  | 625 | "ocbi @%1\n\t" | 
|  | 626 | "ocbi @%2\n\t" | 
|  | 627 | "ocbi @%3\n\t" : : | 
|  | 628 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | 
|  | 629 | a0 += linesz; | 
|  | 630 | a1 += linesz; | 
|  | 631 | a2 += linesz; | 
|  | 632 | a3 += linesz; | 
|  | 633 | asm volatile("movca.l r0, @%0\n\t" | 
|  | 634 | "movca.l r0, @%1\n\t" | 
|  | 635 | "movca.l r0, @%2\n\t" | 
|  | 636 | "movca.l r0, @%3\n\t" | 
|  | 637 | "ocbi @%0\n\t" | 
|  | 638 | "ocbi @%1\n\t" | 
|  | 639 | "ocbi @%2\n\t" | 
|  | 640 | "ocbi @%3\n\t" : : | 
|  | 641 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | 
|  | 642 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | 
|  | 643 | a0 += linesz; | 
|  | 644 | a1 += linesz; | 
|  | 645 | a2 += linesz; | 
|  | 646 | a3 += linesz; | 
|  | 647 | } while (a0 < a0e); | 
|  | 648 | } | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 649 |  | 
|  | 650 | extern void __weak sh4__flush_region_init(void); | 
|  | 651 |  | 
|  | 652 | /* | 
|  | 653 | * SH-4 has virtually indexed and physically tagged cache. | 
|  | 654 | */ | 
|  | 655 | void __init sh4_cache_init(void) | 
|  | 656 | { | 
|  | 657 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | 
|  | 658 | ctrl_inl(CCN_PVR), | 
|  | 659 | ctrl_inl(CCN_CVR), | 
|  | 660 | ctrl_inl(CCN_PRR)); | 
|  | 661 |  | 
|  | 662 | switch (boot_cpu_data.dcache.ways) { | 
|  | 663 | case 1: | 
|  | 664 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | 
|  | 665 | break; | 
|  | 666 | case 2: | 
|  | 667 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | 
|  | 668 | break; | 
|  | 669 | case 4: | 
|  | 670 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | 
|  | 671 | break; | 
|  | 672 | default: | 
|  | 673 | panic("unknown number of cache ways\n"); | 
|  | 674 | break; | 
|  | 675 | } | 
|  | 676 |  | 
| Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 677 | local_flush_icache_range	= sh4_flush_icache_range; | 
|  | 678 | local_flush_dcache_page		= sh4_flush_dcache_page; | 
|  | 679 | local_flush_cache_all		= sh4_flush_cache_all; | 
|  | 680 | local_flush_cache_mm		= sh4_flush_cache_mm; | 
|  | 681 | local_flush_cache_dup_mm	= sh4_flush_cache_mm; | 
|  | 682 | local_flush_cache_page		= sh4_flush_cache_page; | 
|  | 683 | local_flush_cache_range		= sh4_flush_cache_range; | 
| Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 684 |  | 
|  | 685 | sh4__flush_region_init(); | 
|  | 686 | } |