Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Paul Mundt | a23ba43 | 2007-11-28 20:19:38 +0900 | [diff] [blame] | 2 | * arch/sh/mm/cache-sh5.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 4 | * Copyright (C) 2000, 2001 Paolo Alberelli |
| 5 | * Copyright (C) 2002 Benedict Gaster |
| 6 | * Copyright (C) 2003 Richard Curnow |
| 7 | * Copyright (C) 2003 - 2008 Paul Mundt |
Paul Mundt | a23ba43 | 2007-11-28 20:19:38 +0900 | [diff] [blame] | 8 | * |
| 9 | * This file is subject to the terms and conditions of the GNU General Public |
| 10 | * License. See the file "COPYING" in the main directory of this archive |
| 11 | * for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/init.h> |
| 14 | #include <linux/mman.h> |
| 15 | #include <linux/mm.h> |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 16 | #include <asm/tlb.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <asm/processor.h> |
| 18 | #include <asm/cache.h> |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 19 | #include <asm/pgalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/uaccess.h> |
| 21 | #include <asm/mmu_context.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 23 | extern void __weak sh4__flush_region_init(void); |
| 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | /* Wired TLB entry for the D-cache */ |
| 26 | static unsigned long long dtlb_cache_slot; |
| 27 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 28 | /* |
| 29 | * The following group of functions deal with mapping and unmapping a |
| 30 | * temporary page into a DTLB slot that has been set aside for exclusive |
| 31 | * use. |
| 32 | */ |
| 33 | static inline void |
| 34 | sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, |
| 35 | unsigned long paddr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | { |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 37 | local_irq_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); |
| 39 | } |
| 40 | |
| 41 | static inline void sh64_teardown_dtlb_cache_slot(void) |
| 42 | { |
| 43 | sh64_teardown_tlb_slot(dtlb_cache_slot); |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 44 | local_irq_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | } |
| 46 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 47 | static inline void sh64_icache_inv_all(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | { |
| 49 | unsigned long long addr, flag, data; |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 50 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 52 | addr = ICCR0; |
| 53 | flag = ICCR0_ICI; |
| 54 | data = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 56 | /* Make this a critical section for safety (probably not strictly necessary.) */ |
| 57 | local_irq_save(flags); |
| 58 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | /* Without %1 it gets unexplicably wrong */ |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 60 | __asm__ __volatile__ ( |
| 61 | "getcfg %3, 0, %0\n\t" |
| 62 | "or %0, %2, %0\n\t" |
| 63 | "putcfg %3, 0, %0\n\t" |
| 64 | "synci" |
| 65 | : "=&r" (data) |
| 66 | : "0" (data), "r" (flag), "r" (addr)); |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 67 | |
| 68 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | } |
| 70 | |
| 71 | static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) |
| 72 | { |
| 73 | /* Invalidate range of addresses [start,end] from the I-cache, where |
| 74 | * the addresses lie in the kernel superpage. */ |
| 75 | |
| 76 | unsigned long long ullend, addr, aligned_start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | aligned_start = (unsigned long long)(signed long long)(signed long) start; |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 78 | addr = L1_CACHE_ALIGN(aligned_start); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | ullend = (unsigned long long) (signed long long) (signed long) end; |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 80 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | while (addr <= ullend) { |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 82 | __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | addr += L1_CACHE_BYTES; |
| 84 | } |
| 85 | } |
| 86 | |
| 87 | static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr) |
| 88 | { |
| 89 | /* If we get called, we know that vma->vm_flags contains VM_EXEC. |
| 90 | Also, eaddr is page-aligned. */ |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 91 | unsigned int cpu = smp_processor_id(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | unsigned long long addr, end_addr; |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 93 | unsigned long flags = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | unsigned long running_asid, vma_asid; |
| 95 | addr = eaddr; |
| 96 | end_addr = addr + PAGE_SIZE; |
| 97 | |
| 98 | /* Check whether we can use the current ASID for the I-cache |
| 99 | invalidation. For example, if we're called via |
| 100 | access_process_vm->flush_cache_page->here, (e.g. when reading from |
| 101 | /proc), 'running_asid' will be that of the reader, not of the |
| 102 | victim. |
| 103 | |
| 104 | Also, note the risk that we might get pre-empted between the ASID |
| 105 | compare and blocking IRQs, and before we regain control, the |
| 106 | pid->ASID mapping changes. However, the whole cache will get |
| 107 | invalidated when the mapping is renewed, so the worst that can |
| 108 | happen is that the loop below ends up invalidating somebody else's |
| 109 | cache entries. |
| 110 | */ |
| 111 | |
| 112 | running_asid = get_asid(); |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 113 | vma_asid = cpu_asid(cpu, vma->vm_mm); |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 114 | if (running_asid != vma_asid) { |
| 115 | local_irq_save(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | switch_and_save_asid(vma_asid); |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 117 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | while (addr < end_addr) { |
| 119 | /* Worth unrolling a little */ |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 120 | __asm__ __volatile__("icbi %0, 0" : : "r" (addr)); |
| 121 | __asm__ __volatile__("icbi %0, 32" : : "r" (addr)); |
| 122 | __asm__ __volatile__("icbi %0, 64" : : "r" (addr)); |
| 123 | __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | addr += 128; |
| 125 | } |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 126 | if (running_asid != vma_asid) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | switch_and_save_asid(running_asid); |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 128 | local_irq_restore(flags); |
| 129 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | } |
| 131 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | static void sh64_icache_inv_user_page_range(struct mm_struct *mm, |
| 133 | unsigned long start, unsigned long end) |
| 134 | { |
| 135 | /* Used for invalidating big chunks of I-cache, i.e. assume the range |
| 136 | is whole pages. If 'start' or 'end' is not page aligned, the code |
| 137 | is conservative and invalidates to the ends of the enclosing pages. |
| 138 | This is functionally OK, just a performance loss. */ |
| 139 | |
| 140 | /* See the comments below in sh64_dcache_purge_user_range() regarding |
| 141 | the choice of algorithm. However, for the I-cache option (2) isn't |
| 142 | available because there are no physical tags so aliases can't be |
| 143 | resolved. The icbi instruction has to be used through the user |
| 144 | mapping. Because icbi is cheaper than ocbp on a cache hit, it |
| 145 | would be cheaper to use the selective code for a large range than is |
| 146 | possible with the D-cache. Just assume 64 for now as a working |
| 147 | figure. |
| 148 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | int n_pages; |
| 150 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 151 | if (!mm) |
| 152 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | |
| 154 | n_pages = ((end - start) >> PAGE_SHIFT); |
| 155 | if (n_pages >= 64) { |
| 156 | sh64_icache_inv_all(); |
| 157 | } else { |
| 158 | unsigned long aligned_start; |
| 159 | unsigned long eaddr; |
| 160 | unsigned long after_last_page_start; |
| 161 | unsigned long mm_asid, current_asid; |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 162 | unsigned long flags = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 164 | mm_asid = cpu_asid(smp_processor_id(), mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | current_asid = get_asid(); |
| 166 | |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 167 | if (mm_asid != current_asid) { |
| 168 | /* Switch ASID and run the invalidate loop under cli */ |
| 169 | local_irq_save(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | switch_and_save_asid(mm_asid); |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 171 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | |
| 173 | aligned_start = start & PAGE_MASK; |
| 174 | after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); |
| 175 | |
| 176 | while (aligned_start < after_last_page_start) { |
| 177 | struct vm_area_struct *vma; |
| 178 | unsigned long vma_end; |
| 179 | vma = find_vma(mm, aligned_start); |
| 180 | if (!vma || (aligned_start <= vma->vm_end)) { |
| 181 | /* Avoid getting stuck in an error condition */ |
| 182 | aligned_start += PAGE_SIZE; |
| 183 | continue; |
| 184 | } |
| 185 | vma_end = vma->vm_end; |
| 186 | if (vma->vm_flags & VM_EXEC) { |
| 187 | /* Executable */ |
| 188 | eaddr = aligned_start; |
| 189 | while (eaddr < vma_end) { |
| 190 | sh64_icache_inv_user_page(vma, eaddr); |
| 191 | eaddr += PAGE_SIZE; |
| 192 | } |
| 193 | } |
| 194 | aligned_start = vma->vm_end; /* Skip to start of next region */ |
| 195 | } |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 196 | |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 197 | if (mm_asid != current_asid) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | switch_and_save_asid(current_asid); |
Paul Mundt | 983f4c5 | 2009-09-01 21:12:55 +0900 | [diff] [blame] | 199 | local_irq_restore(flags); |
| 200 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | } |
| 202 | } |
| 203 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) |
| 205 | { |
| 206 | /* The icbi instruction never raises ITLBMISS. i.e. if there's not a |
| 207 | cache hit on the virtual tag the instruction ends there, without a |
| 208 | TLB lookup. */ |
| 209 | |
| 210 | unsigned long long aligned_start; |
| 211 | unsigned long long ull_end; |
| 212 | unsigned long long addr; |
| 213 | |
| 214 | ull_end = end; |
| 215 | |
| 216 | /* Just invalidate over the range using the natural addresses. TLB |
| 217 | miss handling will be OK (TBC). Since it's for the current process, |
| 218 | either we're already in the right ASID context, or the ASIDs have |
| 219 | been recycled since we were last active in which case we might just |
| 220 | invalidate another processes I-cache entries : no worries, just a |
| 221 | performance drop for him. */ |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 222 | aligned_start = L1_CACHE_ALIGN(start); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | addr = aligned_start; |
| 224 | while (addr < ull_end) { |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 225 | __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr)); |
| 226 | __asm__ __volatile__ ("nop"); |
| 227 | __asm__ __volatile__ ("nop"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | addr += L1_CACHE_BYTES; |
| 229 | } |
| 230 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | /* Buffer used as the target of alloco instructions to purge data from cache |
| 233 | sets by natural eviction. -- RPC */ |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 234 | #define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, }; |
| 236 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 237 | static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | { |
| 239 | /* Purge all ways in a particular block of sets, specified by the base |
| 240 | set number and number of sets. Can handle wrap-around, if that's |
| 241 | needed. */ |
| 242 | |
| 243 | int dummy_buffer_base_set; |
| 244 | unsigned long long eaddr, eaddr0, eaddr1; |
| 245 | int j; |
| 246 | int set_offset; |
| 247 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 248 | dummy_buffer_base_set = ((int)&dummy_alloco_area & |
| 249 | cpu_data->dcache.entry_mask) >> |
| 250 | cpu_data->dcache.entry_shift; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | set_offset = sets_to_purge_base - dummy_buffer_base_set; |
| 252 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 253 | for (j = 0; j < n_sets; j++, set_offset++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | set_offset &= (cpu_data->dcache.sets - 1); |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 255 | eaddr0 = (unsigned long long)dummy_alloco_area + |
| 256 | (set_offset << cpu_data->dcache.entry_shift); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 258 | /* |
| 259 | * Do one alloco which hits the required set per cache |
| 260 | * way. For write-back mode, this will purge the #ways |
| 261 | * resident lines. There's little point unrolling this |
| 262 | * loop because the allocos stall more if they're too |
| 263 | * close together. |
| 264 | */ |
| 265 | eaddr1 = eaddr0 + cpu_data->dcache.way_size * |
| 266 | cpu_data->dcache.ways; |
| 267 | |
| 268 | for (eaddr = eaddr0; eaddr < eaddr1; |
| 269 | eaddr += cpu_data->dcache.way_size) { |
| 270 | __asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr)); |
| 271 | __asm__ __volatile__ ("synco"); /* TAKum03020 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | } |
| 273 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 274 | eaddr1 = eaddr0 + cpu_data->dcache.way_size * |
| 275 | cpu_data->dcache.ways; |
| 276 | |
| 277 | for (eaddr = eaddr0; eaddr < eaddr1; |
| 278 | eaddr += cpu_data->dcache.way_size) { |
| 279 | /* |
| 280 | * Load from each address. Required because |
| 281 | * alloco is a NOP if the cache is write-through. |
| 282 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags))) |
Paul Mundt | 2fedaac | 2009-05-09 14:38:49 +0900 | [diff] [blame] | 284 | __raw_readb((unsigned long)eaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | } |
| 286 | } |
| 287 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 288 | /* |
| 289 | * Don't use OCBI to invalidate the lines. That costs cycles |
| 290 | * directly. If the dummy block is just left resident, it will |
| 291 | * naturally get evicted as required. |
| 292 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | } |
| 294 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 295 | /* |
| 296 | * Purge the entire contents of the dcache. The most efficient way to |
| 297 | * achieve this is to use alloco instructions on a region of unused |
| 298 | * memory equal in size to the cache, thereby causing the current |
| 299 | * contents to be discarded by natural eviction. The alternative, namely |
| 300 | * reading every tag, setting up a mapping for the corresponding page and |
| 301 | * doing an OCBP for the line, would be much more expensive. |
| 302 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | static void sh64_dcache_purge_all(void) |
| 304 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
| 306 | sh64_dcache_purge_sets(0, cpu_data->dcache.sets); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | } |
| 308 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | |
| 310 | /* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for |
| 311 | anything else in the kernel */ |
| 312 | #define MAGIC_PAGE0_START 0xffffffffec000000ULL |
| 313 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 314 | /* Purge the physical page 'paddr' from the cache. It's known that any |
| 315 | * cache lines requiring attention have the same page colour as the the |
| 316 | * address 'eaddr'. |
| 317 | * |
| 318 | * This relies on the fact that the D-cache matches on physical tags when |
| 319 | * no virtual tag matches. So we create an alias for the original page |
| 320 | * and purge through that. (Alternatively, we could have done this by |
| 321 | * switching ASID to match the original mapping and purged through that, |
| 322 | * but that involves ASID switching cost + probably a TLBMISS + refill |
| 323 | * anyway.) |
| 324 | */ |
| 325 | static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, |
| 326 | unsigned long eaddr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | unsigned long long magic_page_start; |
| 329 | unsigned long long magic_eaddr, magic_eaddr_end; |
| 330 | |
| 331 | magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK); |
| 332 | |
| 333 | /* As long as the kernel is not pre-emptible, this doesn't need to be |
| 334 | under cli/sti. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr); |
| 336 | |
| 337 | magic_eaddr = magic_page_start; |
| 338 | magic_eaddr_end = magic_eaddr + PAGE_SIZE; |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 339 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | while (magic_eaddr < magic_eaddr_end) { |
| 341 | /* Little point in unrolling this loop - the OCBPs are blocking |
| 342 | and won't go any quicker (i.e. the loop overhead is parallel |
| 343 | to part of the OCBP execution.) */ |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 344 | __asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | magic_eaddr += L1_CACHE_BYTES; |
| 346 | } |
| 347 | |
| 348 | sh64_teardown_dtlb_cache_slot(); |
| 349 | } |
| 350 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 351 | /* |
| 352 | * Purge a page given its physical start address, by creating a temporary |
| 353 | * 1 page mapping and purging across that. Even if we know the virtual |
| 354 | * address (& vma or mm) of the page, the method here is more elegant |
| 355 | * because it avoids issues of coping with page faults on the purge |
| 356 | * instructions (i.e. no special-case code required in the critical path |
| 357 | * in the TLB miss handling). |
| 358 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | static void sh64_dcache_purge_phy_page(unsigned long paddr) |
| 360 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | unsigned long long eaddr_start, eaddr, eaddr_end; |
| 362 | int i; |
| 363 | |
| 364 | /* As long as the kernel is not pre-emptible, this doesn't need to be |
| 365 | under cli/sti. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | eaddr_start = MAGIC_PAGE0_START; |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 367 | for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr); |
| 369 | |
| 370 | eaddr = eaddr_start; |
| 371 | eaddr_end = eaddr + PAGE_SIZE; |
| 372 | while (eaddr < eaddr_end) { |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 373 | __asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | eaddr += L1_CACHE_BYTES; |
| 375 | } |
| 376 | |
| 377 | sh64_teardown_dtlb_cache_slot(); |
| 378 | eaddr_start += PAGE_SIZE; |
| 379 | } |
| 380 | } |
| 381 | |
Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 382 | static void sh64_dcache_purge_user_pages(struct mm_struct *mm, |
| 383 | unsigned long addr, unsigned long end) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | { |
| 385 | pgd_t *pgd; |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 386 | pud_t *pud; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | pmd_t *pmd; |
| 388 | pte_t *pte; |
| 389 | pte_t entry; |
Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 390 | spinlock_t *ptl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | unsigned long paddr; |
| 392 | |
Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 393 | if (!mm) |
| 394 | return; /* No way to find physical address of page */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | |
Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 396 | pgd = pgd_offset(mm, addr); |
| 397 | if (pgd_bad(*pgd)) |
| 398 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 400 | pud = pud_offset(pgd, addr); |
| 401 | if (pud_none(*pud) || pud_bad(*pud)) |
| 402 | return; |
| 403 | |
| 404 | pmd = pmd_offset(pud, addr); |
Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 405 | if (pmd_none(*pmd) || pmd_bad(*pmd)) |
| 406 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | |
Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 408 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
| 409 | do { |
| 410 | entry = *pte; |
| 411 | if (pte_none(entry) || !pte_present(entry)) |
| 412 | continue; |
| 413 | paddr = pte_val(entry) & PAGE_MASK; |
| 414 | sh64_dcache_purge_coloured_phy_page(paddr, addr); |
| 415 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 416 | pte_unmap_unlock(pte - 1, ptl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 419 | /* |
| 420 | * There are at least 5 choices for the implementation of this, with |
| 421 | * pros (+), cons(-), comments(*): |
| 422 | * |
| 423 | * 1. ocbp each line in the range through the original user's ASID |
| 424 | * + no lines spuriously evicted |
| 425 | * - tlbmiss handling (must either handle faults on demand => extra |
| 426 | * special-case code in tlbmiss critical path), or map the page in |
| 427 | * advance (=> flush_tlb_range in advance to avoid multiple hits) |
| 428 | * - ASID switching |
| 429 | * - expensive for large ranges |
| 430 | * |
| 431 | * 2. temporarily map each page in the range to a special effective |
| 432 | * address and ocbp through the temporary mapping; relies on the |
| 433 | * fact that SH-5 OCB* always do TLB lookup and match on ptags (they |
| 434 | * never look at the etags) |
| 435 | * + no spurious evictions |
| 436 | * - expensive for large ranges |
| 437 | * * surely cheaper than (1) |
| 438 | * |
| 439 | * 3. walk all the lines in the cache, check the tags, if a match |
| 440 | * occurs create a page mapping to ocbp the line through |
| 441 | * + no spurious evictions |
| 442 | * - tag inspection overhead |
| 443 | * - (especially for small ranges) |
| 444 | * - potential cost of setting up/tearing down page mapping for |
| 445 | * every line that matches the range |
| 446 | * * cost partly independent of range size |
| 447 | * |
| 448 | * 4. walk all the lines in the cache, check the tags, if a match |
| 449 | * occurs use 4 * alloco to purge the line (+3 other probably |
| 450 | * innocent victims) by natural eviction |
| 451 | * + no tlb mapping overheads |
| 452 | * - spurious evictions |
| 453 | * - tag inspection overhead |
| 454 | * |
| 455 | * 5. implement like flush_cache_all |
| 456 | * + no tag inspection overhead |
| 457 | * - spurious evictions |
| 458 | * - bad for small ranges |
| 459 | * |
| 460 | * (1) can be ruled out as more expensive than (2). (2) appears best |
| 461 | * for small ranges. The choice between (3), (4) and (5) for large |
| 462 | * ranges and the range size for the large/small boundary need |
| 463 | * benchmarking to determine. |
| 464 | * |
| 465 | * For now use approach (2) for small ranges and (5) for large ones. |
| 466 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | static void sh64_dcache_purge_user_range(struct mm_struct *mm, |
| 468 | unsigned long start, unsigned long end) |
| 469 | { |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 470 | int n_pages = ((end - start) >> PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | |
Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 472 | if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | sh64_dcache_purge_all(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | } else { |
Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 475 | /* Small range, covered by a single page table page */ |
| 476 | start &= PAGE_MASK; /* should already be so */ |
| 477 | end = PAGE_ALIGN(end); /* should already be so */ |
| 478 | sh64_dcache_purge_user_pages(mm, start, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | } |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 481 | |
| 482 | /* |
| 483 | * Invalidate the entire contents of both caches, after writing back to |
| 484 | * memory any dirty data from the D-cache. |
| 485 | */ |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 486 | static void sh5_flush_cache_all(void *unused) |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 487 | { |
| 488 | sh64_dcache_purge_all(); |
| 489 | sh64_icache_inv_all(); |
| 490 | } |
| 491 | |
| 492 | /* |
| 493 | * Invalidate an entire user-address space from both caches, after |
| 494 | * writing back dirty data (e.g. for shared mmap etc). |
| 495 | * |
| 496 | * This could be coded selectively by inspecting all the tags then |
| 497 | * doing 4*alloco on any set containing a match (as for |
| 498 | * flush_cache_range), but fork/exit/execve (where this is called from) |
| 499 | * are expensive anyway. |
| 500 | * |
| 501 | * Have to do a purge here, despite the comments re I-cache below. |
| 502 | * There could be odd-coloured dirty data associated with the mm still |
| 503 | * in the cache - if this gets written out through natural eviction |
| 504 | * after the kernel has reused the page there will be chaos. |
| 505 | * |
| 506 | * The mm being torn down won't ever be active again, so any Icache |
| 507 | * lines tagged with its ASID won't be visible for the rest of the |
| 508 | * lifetime of this ASID cycle. Before the ASID gets reused, there |
| 509 | * will be a flush_cache_all. Hence we don't need to touch the |
| 510 | * I-cache. This is similar to the lack of action needed in |
| 511 | * flush_tlb_mm - see fault.c. |
| 512 | */ |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 513 | static void sh5_flush_cache_mm(void *unused) |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 514 | { |
| 515 | sh64_dcache_purge_all(); |
| 516 | } |
| 517 | |
| 518 | /* |
| 519 | * Invalidate (from both caches) the range [start,end) of virtual |
| 520 | * addresses from the user address space specified by mm, after writing |
| 521 | * back any dirty data. |
| 522 | * |
| 523 | * Note, 'end' is 1 byte beyond the end of the range to flush. |
| 524 | */ |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 525 | static void sh5_flush_cache_range(void *args) |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 526 | { |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 527 | struct flusher_data *data = args; |
| 528 | struct vm_area_struct *vma; |
| 529 | unsigned long start, end; |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 530 | |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 531 | vma = data->vma; |
| 532 | start = data->addr1; |
| 533 | end = data->addr2; |
| 534 | |
| 535 | sh64_dcache_purge_user_range(vma->vm_mm, start, end); |
| 536 | sh64_icache_inv_user_page_range(vma->vm_mm, start, end); |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 537 | } |
| 538 | |
| 539 | /* |
| 540 | * Invalidate any entries in either cache for the vma within the user |
| 541 | * address space vma->vm_mm for the page starting at virtual address |
| 542 | * 'eaddr'. This seems to be used primarily in breaking COW. Note, |
| 543 | * the I-cache must be searched too in case the page in question is |
| 544 | * both writable and being executed from (e.g. stack trampolines.) |
| 545 | * |
| 546 | * Note, this is called with pte lock held. |
| 547 | */ |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 548 | static void sh5_flush_cache_page(void *args) |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 549 | { |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 550 | struct flusher_data *data = args; |
| 551 | struct vm_area_struct *vma; |
| 552 | unsigned long eaddr, pfn; |
| 553 | |
| 554 | vma = data->vma; |
| 555 | eaddr = data->addr1; |
| 556 | pfn = data->addr2; |
| 557 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 558 | sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); |
| 559 | |
| 560 | if (vma->vm_flags & VM_EXEC) |
| 561 | sh64_icache_inv_user_page(vma, eaddr); |
| 562 | } |
| 563 | |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 564 | static void sh5_flush_dcache_page(void *page) |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 565 | { |
Paul Mundt | 3af539e | 2009-11-12 17:03:28 +0900 | [diff] [blame^] | 566 | sh64_dcache_purge_phy_page(page_to_phys((struct page *)page)); |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 567 | wmb(); |
| 568 | } |
| 569 | |
| 570 | /* |
| 571 | * Flush the range [start,end] of kernel virtual adddress space from |
| 572 | * the I-cache. The corresponding range must be purged from the |
| 573 | * D-cache also because the SH-5 doesn't have cache snooping between |
| 574 | * the caches. The addresses will be visible through the superpage |
| 575 | * mapping, therefore it's guaranteed that there no cache entries for |
| 576 | * the range in cache sets of the wrong colour. |
| 577 | */ |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 578 | static void sh5_flush_icache_range(void *args) |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 579 | { |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 580 | struct flusher_data *data = args; |
| 581 | unsigned long start, end; |
| 582 | |
| 583 | start = data->addr1; |
| 584 | end = data->addr2; |
| 585 | |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 586 | __flush_purge_region((void *)start, end); |
| 587 | wmb(); |
| 588 | sh64_icache_inv_kernel_range(start, end); |
| 589 | } |
| 590 | |
| 591 | /* |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 592 | * For the address range [start,end), write back the data from the |
| 593 | * D-cache and invalidate the corresponding region of the I-cache for the |
| 594 | * current process. Used to flush signal trampolines on the stack to |
| 595 | * make them executable. |
| 596 | */ |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 597 | static void sh5_flush_cache_sigtramp(void *vaddr) |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 598 | { |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 599 | unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES; |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 600 | |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 601 | __flush_wback_region(vaddr, L1_CACHE_BYTES); |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 602 | wmb(); |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 603 | sh64_icache_inv_current_user_range((unsigned long)vaddr, end); |
Paul Mundt | 38350e0 | 2008-02-13 20:14:10 +0900 | [diff] [blame] | 604 | } |
| 605 | |
Paul Mundt | 94ecd22 | 2009-08-16 01:50:17 +0900 | [diff] [blame] | 606 | void __init sh5_cache_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | { |
Paul Mundt | f26b2a5 | 2009-08-21 17:23:14 +0900 | [diff] [blame] | 608 | local_flush_cache_all = sh5_flush_cache_all; |
| 609 | local_flush_cache_mm = sh5_flush_cache_mm; |
| 610 | local_flush_cache_dup_mm = sh5_flush_cache_mm; |
| 611 | local_flush_cache_page = sh5_flush_cache_page; |
| 612 | local_flush_cache_range = sh5_flush_cache_range; |
| 613 | local_flush_dcache_page = sh5_flush_dcache_page; |
| 614 | local_flush_icache_range = sh5_flush_icache_range; |
| 615 | local_flush_cache_sigtramp = sh5_flush_cache_sigtramp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | |
Paul Mundt | 94ecd22 | 2009-08-16 01:50:17 +0900 | [diff] [blame] | 617 | /* Reserve a slot for dcache colouring in the DTLB */ |
| 618 | dtlb_cache_slot = sh64_get_wired_dtlb_entry(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | |
Paul Mundt | 94ecd22 | 2009-08-16 01:50:17 +0900 | [diff] [blame] | 620 | sh4__flush_region_init(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | } |