Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Inline assembly cache operations. |
| 7 | * |
Justin P. Mattock | 79add62 | 2011-04-04 14:15:29 -0700 | [diff] [blame] | 8 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org) |
| 10 | * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org) |
| 11 | */ |
| 12 | #ifndef _ASM_R4KCACHE_H |
| 13 | #define _ASM_R4KCACHE_H |
| 14 | |
| 15 | #include <asm/asm.h> |
| 16 | #include <asm/cacheops.h> |
Atsushi Nemoto | 41700e7 | 2006-02-10 00:39:06 +0900 | [diff] [blame] | 17 | #include <asm/cpu-features.h> |
Ralf Baechle | 14bd8c0 | 2013-09-25 18:21:26 +0200 | [diff] [blame] | 18 | #include <asm/cpu-type.h> |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 19 | #include <asm/mipsmtregs.h> |
Leonid Yegoshin | de8974e | 2013-12-16 11:46:33 +0000 | [diff] [blame] | 20 | #include <asm/uaccess.h> /* for segment_eq() */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
Deng-Cheng Zhu | d116e81 | 2014-06-26 12:11:34 -0700 | [diff] [blame] | 22 | extern void (*r4k_blast_dcache)(void); |
| 23 | extern void (*r4k_blast_icache)(void); |
| 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | /* |
| 26 | * This macro return a properly sign-extended address suitable as base address |
| 27 | * for indexed cache operations. Two issues here: |
| 28 | * |
| 29 | * - The MIPS32 and MIPS64 specs permit an implementation to directly derive |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 30 | * the index bits from the virtual address. This breaks with tradition |
| 31 | * set by the R4000. To keep unpleasant surprises from happening we pick |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | * an address in KSEG0 / CKSEG0. |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 33 | * - We need a properly sign extended address for 64-bit code. To get away |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | * without ifdefs we let the compiler do it by a type cast. |
| 35 | */ |
| 36 | #define INDEX_BASE CKSEG0 |
| 37 | |
| 38 | #define cache_op(op,addr) \ |
| 39 | __asm__ __volatile__( \ |
Thiemo Seufer | 2fe25f6 | 2005-09-01 08:59:55 +0000 | [diff] [blame] | 40 | " .set push \n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | " .set noreorder \n" \ |
Ralf Baechle | a809d46 | 2014-03-30 13:20:10 +0200 | [diff] [blame] | 42 | " .set arch=r4000 \n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | " cache %0, %1 \n" \ |
Thiemo Seufer | 2fe25f6 | 2005-09-01 08:59:55 +0000 | [diff] [blame] | 44 | " .set pop \n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | : \ |
Ralf Baechle | 675055b | 2006-04-03 23:32:39 +0100 | [diff] [blame] | 46 | : "i" (op), "R" (*(unsigned char *)(addr))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 48 | #ifdef CONFIG_MIPS_MT |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 49 | |
Ralf Baechle | b633648 | 2014-05-23 16:29:44 +0200 | [diff] [blame] | 50 | /* |
| 51 | * Optionally force single-threaded execution during I-cache flushes. |
| 52 | */ |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 53 | #define PROTECT_CACHE_FLUSHES 1 |
| 54 | |
| 55 | #ifdef PROTECT_CACHE_FLUSHES |
| 56 | |
| 57 | extern int mt_protiflush; |
| 58 | extern int mt_protdflush; |
| 59 | extern void mt_cflush_lockdown(void); |
| 60 | extern void mt_cflush_release(void); |
| 61 | |
| 62 | #define BEGIN_MT_IPROT \ |
| 63 | unsigned long flags = 0; \ |
| 64 | unsigned long mtflags = 0; \ |
| 65 | if(mt_protiflush) { \ |
| 66 | local_irq_save(flags); \ |
| 67 | ehb(); \ |
| 68 | mtflags = dvpe(); \ |
| 69 | mt_cflush_lockdown(); \ |
| 70 | } |
| 71 | |
| 72 | #define END_MT_IPROT \ |
| 73 | if(mt_protiflush) { \ |
| 74 | mt_cflush_release(); \ |
| 75 | evpe(mtflags); \ |
| 76 | local_irq_restore(flags); \ |
| 77 | } |
| 78 | |
| 79 | #define BEGIN_MT_DPROT \ |
| 80 | unsigned long flags = 0; \ |
| 81 | unsigned long mtflags = 0; \ |
| 82 | if(mt_protdflush) { \ |
| 83 | local_irq_save(flags); \ |
| 84 | ehb(); \ |
| 85 | mtflags = dvpe(); \ |
| 86 | mt_cflush_lockdown(); \ |
| 87 | } |
| 88 | |
| 89 | #define END_MT_DPROT \ |
| 90 | if(mt_protdflush) { \ |
| 91 | mt_cflush_release(); \ |
| 92 | evpe(mtflags); \ |
| 93 | local_irq_restore(flags); \ |
| 94 | } |
| 95 | |
| 96 | #else |
| 97 | |
| 98 | #define BEGIN_MT_IPROT |
| 99 | #define BEGIN_MT_DPROT |
| 100 | #define END_MT_IPROT |
| 101 | #define END_MT_DPROT |
| 102 | |
| 103 | #endif /* PROTECT_CACHE_FLUSHES */ |
| 104 | |
| 105 | #define __iflush_prologue \ |
| 106 | unsigned long redundance; \ |
| 107 | extern int mt_n_iflushes; \ |
| 108 | BEGIN_MT_IPROT \ |
| 109 | for (redundance = 0; redundance < mt_n_iflushes; redundance++) { |
| 110 | |
| 111 | #define __iflush_epilogue \ |
| 112 | END_MT_IPROT \ |
| 113 | } |
| 114 | |
| 115 | #define __dflush_prologue \ |
| 116 | unsigned long redundance; \ |
| 117 | extern int mt_n_dflushes; \ |
| 118 | BEGIN_MT_DPROT \ |
| 119 | for (redundance = 0; redundance < mt_n_dflushes; redundance++) { |
| 120 | |
| 121 | #define __dflush_epilogue \ |
| 122 | END_MT_DPROT \ |
| 123 | } |
| 124 | |
| 125 | #define __inv_dflush_prologue __dflush_prologue |
| 126 | #define __inv_dflush_epilogue __dflush_epilogue |
| 127 | #define __sflush_prologue { |
| 128 | #define __sflush_epilogue } |
| 129 | #define __inv_sflush_prologue __sflush_prologue |
| 130 | #define __inv_sflush_epilogue __sflush_epilogue |
| 131 | |
| 132 | #else /* CONFIG_MIPS_MT */ |
| 133 | |
| 134 | #define __iflush_prologue { |
| 135 | #define __iflush_epilogue } |
| 136 | #define __dflush_prologue { |
| 137 | #define __dflush_epilogue } |
| 138 | #define __inv_dflush_prologue { |
| 139 | #define __inv_dflush_epilogue } |
| 140 | #define __sflush_prologue { |
| 141 | #define __sflush_epilogue } |
| 142 | #define __inv_sflush_prologue { |
| 143 | #define __inv_sflush_epilogue } |
| 144 | |
| 145 | #endif /* CONFIG_MIPS_MT */ |
| 146 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | static inline void flush_icache_line_indexed(unsigned long addr) |
| 148 | { |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 149 | __iflush_prologue |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | cache_op(Index_Invalidate_I, addr); |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 151 | __iflush_epilogue |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | static inline void flush_dcache_line_indexed(unsigned long addr) |
| 155 | { |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 156 | __dflush_prologue |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | cache_op(Index_Writeback_Inv_D, addr); |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 158 | __dflush_epilogue |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | } |
| 160 | |
| 161 | static inline void flush_scache_line_indexed(unsigned long addr) |
| 162 | { |
| 163 | cache_op(Index_Writeback_Inv_SD, addr); |
| 164 | } |
| 165 | |
| 166 | static inline void flush_icache_line(unsigned long addr) |
| 167 | { |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 168 | __iflush_prologue |
Ralf Baechle | 14bd8c0 | 2013-09-25 18:21:26 +0200 | [diff] [blame] | 169 | switch (boot_cpu_type()) { |
| 170 | case CPU_LOONGSON2: |
Huacai Chen | bad009f | 2014-01-14 17:56:37 -0800 | [diff] [blame] | 171 | cache_op(Hit_Invalidate_I_Loongson2, addr); |
Ralf Baechle | 14bd8c0 | 2013-09-25 18:21:26 +0200 | [diff] [blame] | 172 | break; |
| 173 | |
| 174 | default: |
| 175 | cache_op(Hit_Invalidate_I, addr); |
| 176 | break; |
| 177 | } |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 178 | __iflush_epilogue |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | } |
| 180 | |
| 181 | static inline void flush_dcache_line(unsigned long addr) |
| 182 | { |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 183 | __dflush_prologue |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | cache_op(Hit_Writeback_Inv_D, addr); |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 185 | __dflush_epilogue |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | static inline void invalidate_dcache_line(unsigned long addr) |
| 189 | { |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 190 | __dflush_prologue |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | cache_op(Hit_Invalidate_D, addr); |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 192 | __dflush_epilogue |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | } |
| 194 | |
| 195 | static inline void invalidate_scache_line(unsigned long addr) |
| 196 | { |
| 197 | cache_op(Hit_Invalidate_SD, addr); |
| 198 | } |
| 199 | |
| 200 | static inline void flush_scache_line(unsigned long addr) |
| 201 | { |
| 202 | cache_op(Hit_Writeback_Inv_SD, addr); |
| 203 | } |
| 204 | |
Atsushi Nemoto | 41700e7 | 2006-02-10 00:39:06 +0900 | [diff] [blame] | 205 | #define protected_cache_op(op,addr) \ |
| 206 | __asm__ __volatile__( \ |
| 207 | " .set push \n" \ |
| 208 | " .set noreorder \n" \ |
Ralf Baechle | a809d46 | 2014-03-30 13:20:10 +0200 | [diff] [blame] | 209 | " .set arch=r4000 \n" \ |
Atsushi Nemoto | 41700e7 | 2006-02-10 00:39:06 +0900 | [diff] [blame] | 210 | "1: cache %0, (%1) \n" \ |
| 211 | "2: .set pop \n" \ |
| 212 | " .section __ex_table,\"a\" \n" \ |
| 213 | " "STR(PTR)" 1b, 2b \n" \ |
| 214 | " .previous" \ |
| 215 | : \ |
| 216 | : "i" (op), "r" (addr)) |
| 217 | |
Leonid Yegoshin | a805385 | 2013-12-16 11:38:00 +0000 | [diff] [blame] | 218 | #define protected_cachee_op(op,addr) \ |
| 219 | __asm__ __volatile__( \ |
| 220 | " .set push \n" \ |
| 221 | " .set noreorder \n" \ |
| 222 | " .set mips0 \n" \ |
| 223 | " .set eva \n" \ |
| 224 | "1: cachee %0, (%1) \n" \ |
| 225 | "2: .set pop \n" \ |
| 226 | " .section __ex_table,\"a\" \n" \ |
| 227 | " "STR(PTR)" 1b, 2b \n" \ |
| 228 | " .previous" \ |
| 229 | : \ |
| 230 | : "i" (op), "r" (addr)) |
| 231 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | /* |
| 233 | * The next two are for badland addresses like signal trampolines. |
| 234 | */ |
| 235 | static inline void protected_flush_icache_line(unsigned long addr) |
| 236 | { |
Ralf Baechle | 14bd8c0 | 2013-09-25 18:21:26 +0200 | [diff] [blame] | 237 | switch (boot_cpu_type()) { |
| 238 | case CPU_LOONGSON2: |
Huacai Chen | bad009f | 2014-01-14 17:56:37 -0800 | [diff] [blame] | 239 | protected_cache_op(Hit_Invalidate_I_Loongson2, addr); |
Ralf Baechle | 14bd8c0 | 2013-09-25 18:21:26 +0200 | [diff] [blame] | 240 | break; |
| 241 | |
| 242 | default: |
Leonid Yegoshin | a805385 | 2013-12-16 11:38:00 +0000 | [diff] [blame] | 243 | #ifdef CONFIG_EVA |
| 244 | protected_cachee_op(Hit_Invalidate_I, addr); |
| 245 | #else |
Ralf Baechle | 14bd8c0 | 2013-09-25 18:21:26 +0200 | [diff] [blame] | 246 | protected_cache_op(Hit_Invalidate_I, addr); |
Leonid Yegoshin | a805385 | 2013-12-16 11:38:00 +0000 | [diff] [blame] | 247 | #endif |
Ralf Baechle | 14bd8c0 | 2013-09-25 18:21:26 +0200 | [diff] [blame] | 248 | break; |
| 249 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | } |
| 251 | |
| 252 | /* |
| 253 | * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D |
| 254 | * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style |
| 255 | * caches. We're talking about one cacheline unnecessarily getting invalidated |
Thiemo Seufer | 2fe25f6 | 2005-09-01 08:59:55 +0000 | [diff] [blame] | 256 | * here so the penalty isn't overly hard. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | */ |
| 258 | static inline void protected_writeback_dcache_line(unsigned long addr) |
| 259 | { |
Markos Chandras | 83fd434 | 2014-11-05 08:25:37 +0000 | [diff] [blame^] | 260 | #ifdef CONFIG_EVA |
| 261 | protected_cachee_op(Hit_Writeback_Inv_D, addr); |
| 262 | #else |
Atsushi Nemoto | 41700e7 | 2006-02-10 00:39:06 +0900 | [diff] [blame] | 263 | protected_cache_op(Hit_Writeback_Inv_D, addr); |
Markos Chandras | 83fd434 | 2014-11-05 08:25:37 +0000 | [diff] [blame^] | 264 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | } |
| 266 | |
| 267 | static inline void protected_writeback_scache_line(unsigned long addr) |
| 268 | { |
Atsushi Nemoto | 41700e7 | 2006-02-10 00:39:06 +0900 | [diff] [blame] | 269 | protected_cache_op(Hit_Writeback_Inv_SD, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | } |
| 271 | |
| 272 | /* |
| 273 | * This one is RM7000-specific |
| 274 | */ |
| 275 | static inline void invalidate_tcache_page(unsigned long addr) |
| 276 | { |
| 277 | cache_op(Page_Invalidate_T, addr); |
| 278 | } |
| 279 | |
| 280 | #define cache16_unroll32(base,op) \ |
| 281 | __asm__ __volatile__( \ |
Thiemo Seufer | 2fe25f6 | 2005-09-01 08:59:55 +0000 | [diff] [blame] | 282 | " .set push \n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | " .set noreorder \n" \ |
| 284 | " .set mips3 \n" \ |
| 285 | " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \ |
| 286 | " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \ |
| 287 | " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \ |
| 288 | " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \ |
| 289 | " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \ |
| 290 | " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \ |
| 291 | " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \ |
| 292 | " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \ |
| 293 | " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \ |
| 294 | " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \ |
| 295 | " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \ |
| 296 | " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \ |
| 297 | " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \ |
| 298 | " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \ |
| 299 | " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \ |
| 300 | " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \ |
Thiemo Seufer | 2fe25f6 | 2005-09-01 08:59:55 +0000 | [diff] [blame] | 301 | " .set pop \n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | : \ |
| 303 | : "r" (base), \ |
| 304 | "i" (op)); |
| 305 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | #define cache32_unroll32(base,op) \ |
| 307 | __asm__ __volatile__( \ |
Thiemo Seufer | 2fe25f6 | 2005-09-01 08:59:55 +0000 | [diff] [blame] | 308 | " .set push \n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | " .set noreorder \n" \ |
| 310 | " .set mips3 \n" \ |
| 311 | " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \ |
| 312 | " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \ |
| 313 | " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \ |
| 314 | " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \ |
| 315 | " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \ |
| 316 | " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \ |
| 317 | " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \ |
| 318 | " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \ |
| 319 | " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \ |
| 320 | " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \ |
| 321 | " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \ |
| 322 | " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \ |
| 323 | " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \ |
| 324 | " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \ |
| 325 | " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \ |
| 326 | " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \ |
Thiemo Seufer | 2fe25f6 | 2005-09-01 08:59:55 +0000 | [diff] [blame] | 327 | " .set pop \n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | : \ |
| 329 | : "r" (base), \ |
| 330 | "i" (op)); |
| 331 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | #define cache64_unroll32(base,op) \ |
| 333 | __asm__ __volatile__( \ |
Thiemo Seufer | 2fe25f6 | 2005-09-01 08:59:55 +0000 | [diff] [blame] | 334 | " .set push \n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | " .set noreorder \n" \ |
| 336 | " .set mips3 \n" \ |
| 337 | " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \ |
| 338 | " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \ |
| 339 | " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \ |
| 340 | " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \ |
| 341 | " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \ |
| 342 | " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \ |
| 343 | " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \ |
| 344 | " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \ |
| 345 | " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \ |
| 346 | " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \ |
| 347 | " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \ |
| 348 | " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \ |
| 349 | " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \ |
| 350 | " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \ |
| 351 | " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \ |
| 352 | " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \ |
Thiemo Seufer | 2fe25f6 | 2005-09-01 08:59:55 +0000 | [diff] [blame] | 353 | " .set pop \n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | : \ |
| 355 | : "r" (base), \ |
| 356 | "i" (op)); |
| 357 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | #define cache128_unroll32(base,op) \ |
| 359 | __asm__ __volatile__( \ |
Thiemo Seufer | 2fe25f6 | 2005-09-01 08:59:55 +0000 | [diff] [blame] | 360 | " .set push \n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | " .set noreorder \n" \ |
| 362 | " .set mips3 \n" \ |
| 363 | " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \ |
| 364 | " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \ |
| 365 | " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \ |
| 366 | " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \ |
| 367 | " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \ |
| 368 | " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \ |
| 369 | " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \ |
| 370 | " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \ |
| 371 | " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \ |
| 372 | " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \ |
| 373 | " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \ |
| 374 | " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \ |
| 375 | " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \ |
| 376 | " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \ |
| 377 | " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \ |
| 378 | " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \ |
Thiemo Seufer | 2fe25f6 | 2005-09-01 08:59:55 +0000 | [diff] [blame] | 379 | " .set pop \n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | : \ |
| 381 | : "r" (base), \ |
| 382 | "i" (op)); |
| 383 | |
Leonid Yegoshin | de8974e | 2013-12-16 11:46:33 +0000 | [diff] [blame] | 384 | /* |
| 385 | * Perform the cache operation specified by op using a user mode virtual |
| 386 | * address while in kernel mode. |
| 387 | */ |
| 388 | #define cache16_unroll32_user(base,op) \ |
| 389 | __asm__ __volatile__( \ |
| 390 | " .set push \n" \ |
| 391 | " .set noreorder \n" \ |
| 392 | " .set mips0 \n" \ |
| 393 | " .set eva \n" \ |
| 394 | " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \ |
| 395 | " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \ |
| 396 | " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \ |
| 397 | " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \ |
| 398 | " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \ |
| 399 | " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \ |
| 400 | " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \ |
| 401 | " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \ |
| 402 | " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \ |
| 403 | " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \ |
| 404 | " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \ |
| 405 | " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \ |
| 406 | " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \ |
| 407 | " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \ |
| 408 | " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \ |
| 409 | " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \ |
| 410 | " .set pop \n" \ |
| 411 | : \ |
| 412 | : "r" (base), \ |
| 413 | "i" (op)); |
| 414 | |
| 415 | #define cache32_unroll32_user(base, op) \ |
| 416 | __asm__ __volatile__( \ |
| 417 | " .set push \n" \ |
| 418 | " .set noreorder \n" \ |
| 419 | " .set mips0 \n" \ |
| 420 | " .set eva \n" \ |
| 421 | " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \ |
| 422 | " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \ |
| 423 | " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \ |
| 424 | " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \ |
| 425 | " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \ |
| 426 | " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \ |
| 427 | " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \ |
| 428 | " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \ |
| 429 | " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \ |
| 430 | " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \ |
| 431 | " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \ |
| 432 | " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \ |
| 433 | " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \ |
| 434 | " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \ |
| 435 | " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \ |
| 436 | " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \ |
| 437 | " .set pop \n" \ |
| 438 | : \ |
| 439 | : "r" (base), \ |
| 440 | "i" (op)); |
| 441 | |
| 442 | #define cache64_unroll32_user(base, op) \ |
| 443 | __asm__ __volatile__( \ |
| 444 | " .set push \n" \ |
| 445 | " .set noreorder \n" \ |
| 446 | " .set mips0 \n" \ |
| 447 | " .set eva \n" \ |
| 448 | " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \ |
| 449 | " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \ |
| 450 | " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \ |
| 451 | " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \ |
| 452 | " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \ |
| 453 | " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \ |
| 454 | " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \ |
| 455 | " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \ |
| 456 | " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \ |
| 457 | " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \ |
| 458 | " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \ |
| 459 | " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \ |
| 460 | " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \ |
| 461 | " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \ |
| 462 | " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \ |
| 463 | " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \ |
| 464 | " .set pop \n" \ |
| 465 | : \ |
| 466 | : "r" (base), \ |
| 467 | "i" (op)); |
| 468 | |
Atsushi Nemoto | 76f072a | 2006-01-29 02:30:55 +0900 | [diff] [blame] | 469 | /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ |
Aaro Koskinen | 43a0684 | 2014-01-14 17:56:38 -0800 | [diff] [blame] | 470 | #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \ |
| 471 | static inline void extra##blast_##pfx##cache##lsize(void) \ |
Atsushi Nemoto | 76f072a | 2006-01-29 02:30:55 +0900 | [diff] [blame] | 472 | { \ |
| 473 | unsigned long start = INDEX_BASE; \ |
| 474 | unsigned long end = start + current_cpu_data.desc.waysize; \ |
| 475 | unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ |
| 476 | unsigned long ws_end = current_cpu_data.desc.ways << \ |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 477 | current_cpu_data.desc.waybit; \ |
Atsushi Nemoto | 76f072a | 2006-01-29 02:30:55 +0900 | [diff] [blame] | 478 | unsigned long ws, addr; \ |
| 479 | \ |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 480 | __##pfx##flush_prologue \ |
| 481 | \ |
Atsushi Nemoto | 76f072a | 2006-01-29 02:30:55 +0900 | [diff] [blame] | 482 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
| 483 | for (addr = start; addr < end; addr += lsize * 32) \ |
Ralf Baechle | 21a151d | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 484 | cache##lsize##_unroll32(addr|ws, indexop); \ |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 485 | \ |
| 486 | __##pfx##flush_epilogue \ |
Atsushi Nemoto | 76f072a | 2006-01-29 02:30:55 +0900 | [diff] [blame] | 487 | } \ |
| 488 | \ |
Aaro Koskinen | 43a0684 | 2014-01-14 17:56:38 -0800 | [diff] [blame] | 489 | static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \ |
Atsushi Nemoto | 76f072a | 2006-01-29 02:30:55 +0900 | [diff] [blame] | 490 | { \ |
| 491 | unsigned long start = page; \ |
| 492 | unsigned long end = page + PAGE_SIZE; \ |
| 493 | \ |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 494 | __##pfx##flush_prologue \ |
| 495 | \ |
Atsushi Nemoto | 76f072a | 2006-01-29 02:30:55 +0900 | [diff] [blame] | 496 | do { \ |
Ralf Baechle | 21a151d | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 497 | cache##lsize##_unroll32(start, hitop); \ |
Atsushi Nemoto | 76f072a | 2006-01-29 02:30:55 +0900 | [diff] [blame] | 498 | start += lsize * 32; \ |
| 499 | } while (start < end); \ |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 500 | \ |
| 501 | __##pfx##flush_epilogue \ |
Atsushi Nemoto | 76f072a | 2006-01-29 02:30:55 +0900 | [diff] [blame] | 502 | } \ |
| 503 | \ |
Aaro Koskinen | 43a0684 | 2014-01-14 17:56:38 -0800 | [diff] [blame] | 504 | static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ |
Atsushi Nemoto | 76f072a | 2006-01-29 02:30:55 +0900 | [diff] [blame] | 505 | { \ |
Atsushi Nemoto | de62893 | 2006-03-13 18:23:03 +0900 | [diff] [blame] | 506 | unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ |
| 507 | unsigned long start = INDEX_BASE + (page & indexmask); \ |
Atsushi Nemoto | 76f072a | 2006-01-29 02:30:55 +0900 | [diff] [blame] | 508 | unsigned long end = start + PAGE_SIZE; \ |
| 509 | unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \ |
| 510 | unsigned long ws_end = current_cpu_data.desc.ways << \ |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 511 | current_cpu_data.desc.waybit; \ |
Atsushi Nemoto | 76f072a | 2006-01-29 02:30:55 +0900 | [diff] [blame] | 512 | unsigned long ws, addr; \ |
| 513 | \ |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 514 | __##pfx##flush_prologue \ |
| 515 | \ |
Atsushi Nemoto | 76f072a | 2006-01-29 02:30:55 +0900 | [diff] [blame] | 516 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
| 517 | for (addr = start; addr < end; addr += lsize * 32) \ |
Ralf Baechle | 21a151d | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 518 | cache##lsize##_unroll32(addr|ws, indexop); \ |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 519 | \ |
| 520 | __##pfx##flush_epilogue \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | } |
| 522 | |
Aaro Koskinen | 43a0684 | 2014-01-14 17:56:38 -0800 | [diff] [blame] | 523 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, ) |
| 524 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, ) |
| 525 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, ) |
| 526 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, ) |
| 527 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, ) |
| 528 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_) |
| 529 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, ) |
| 530 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, ) |
| 531 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, ) |
| 532 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, ) |
David Daney | 18a8cd6 | 2014-05-28 23:52:09 +0200 | [diff] [blame] | 533 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, ) |
| 534 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, ) |
Aaro Koskinen | 43a0684 | 2014-01-14 17:56:38 -0800 | [diff] [blame] | 535 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, ) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | |
Aaro Koskinen | 43a0684 | 2014-01-14 17:56:38 -0800 | [diff] [blame] | 537 | __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, ) |
| 538 | __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, ) |
| 539 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, ) |
| 540 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, ) |
| 541 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, ) |
| 542 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, ) |
Thomas Bogendoerfer | e9c3357 | 2007-11-26 23:40:01 +0100 | [diff] [blame] | 543 | |
Leonid Yegoshin | de8974e | 2013-12-16 11:46:33 +0000 | [diff] [blame] | 544 | #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \ |
| 545 | static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \ |
| 546 | { \ |
| 547 | unsigned long start = page; \ |
| 548 | unsigned long end = page + PAGE_SIZE; \ |
| 549 | \ |
| 550 | __##pfx##flush_prologue \ |
| 551 | \ |
| 552 | do { \ |
| 553 | cache##lsize##_unroll32_user(start, hitop); \ |
| 554 | start += lsize * 32; \ |
| 555 | } while (start < end); \ |
| 556 | \ |
| 557 | __##pfx##flush_epilogue \ |
| 558 | } |
| 559 | |
| 560 | __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, |
| 561 | 16) |
| 562 | __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) |
| 563 | __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, |
| 564 | 32) |
| 565 | __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) |
| 566 | __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, |
| 567 | 64) |
| 568 | __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) |
| 569 | |
Atsushi Nemoto | 41700e7 | 2006-02-10 00:39:06 +0900 | [diff] [blame] | 570 | /* build blast_xxx_range, protected_blast_xxx_range */ |
Ralf Baechle | 14bd8c0 | 2013-09-25 18:21:26 +0200 | [diff] [blame] | 571 | #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ |
| 572 | static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \ |
Atsushi Nemoto | 41700e7 | 2006-02-10 00:39:06 +0900 | [diff] [blame] | 573 | unsigned long end) \ |
| 574 | { \ |
| 575 | unsigned long lsize = cpu_##desc##_line_size(); \ |
| 576 | unsigned long addr = start & ~(lsize - 1); \ |
| 577 | unsigned long aend = (end - 1) & ~(lsize - 1); \ |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 578 | \ |
| 579 | __##pfx##flush_prologue \ |
| 580 | \ |
Atsushi Nemoto | 41700e7 | 2006-02-10 00:39:06 +0900 | [diff] [blame] | 581 | while (1) { \ |
| 582 | prot##cache_op(hitop, addr); \ |
| 583 | if (addr == aend) \ |
| 584 | break; \ |
| 585 | addr += lsize; \ |
| 586 | } \ |
Ralf Baechle | 41c594a | 2006-04-05 09:45:45 +0100 | [diff] [blame] | 587 | \ |
| 588 | __##pfx##flush_epilogue \ |
Atsushi Nemoto | 41700e7 | 2006-02-10 00:39:06 +0900 | [diff] [blame] | 589 | } |
| 590 | |
Leonid Yegoshin | de8974e | 2013-12-16 11:46:33 +0000 | [diff] [blame] | 591 | #ifndef CONFIG_EVA |
| 592 | |
Ralf Baechle | 14bd8c0 | 2013-09-25 18:21:26 +0200 | [diff] [blame] | 593 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) |
Ralf Baechle | 14bd8c0 | 2013-09-25 18:21:26 +0200 | [diff] [blame] | 594 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) |
Leonid Yegoshin | de8974e | 2013-12-16 11:46:33 +0000 | [diff] [blame] | 595 | |
| 596 | #else |
| 597 | |
| 598 | #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \ |
| 599 | static inline void protected_blast_##pfx##cache##_range(unsigned long start,\ |
| 600 | unsigned long end) \ |
| 601 | { \ |
| 602 | unsigned long lsize = cpu_##desc##_line_size(); \ |
| 603 | unsigned long addr = start & ~(lsize - 1); \ |
| 604 | unsigned long aend = (end - 1) & ~(lsize - 1); \ |
| 605 | \ |
| 606 | __##pfx##flush_prologue \ |
| 607 | \ |
| 608 | if (segment_eq(get_fs(), USER_DS)) { \ |
| 609 | while (1) { \ |
| 610 | protected_cachee_op(hitop, addr); \ |
| 611 | if (addr == aend) \ |
| 612 | break; \ |
| 613 | addr += lsize; \ |
| 614 | } \ |
| 615 | } else { \ |
| 616 | while (1) { \ |
| 617 | protected_cache_op(hitop, addr); \ |
| 618 | if (addr == aend) \ |
| 619 | break; \ |
| 620 | addr += lsize; \ |
| 621 | } \ |
| 622 | \ |
| 623 | } \ |
| 624 | __##pfx##flush_epilogue \ |
| 625 | } |
| 626 | |
| 627 | __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D) |
| 628 | __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I) |
| 629 | |
| 630 | #endif |
| 631 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) |
Huacai Chen | bad009f | 2014-01-14 17:56:37 -0800 | [diff] [blame] | 632 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \ |
| 633 | protected_, loongson2_) |
Ralf Baechle | 14bd8c0 | 2013-09-25 18:21:26 +0200 | [diff] [blame] | 634 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) |
Leonid Yegoshin | 41e62b0 | 2013-12-16 11:24:13 +0000 | [diff] [blame] | 635 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , ) |
Ralf Baechle | 14bd8c0 | 2013-09-25 18:21:26 +0200 | [diff] [blame] | 636 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) |
Atsushi Nemoto | 41700e7 | 2006-02-10 00:39:06 +0900 | [diff] [blame] | 637 | /* blast_inv_dcache_range */ |
Ralf Baechle | 14bd8c0 | 2013-09-25 18:21:26 +0200 | [diff] [blame] | 638 | __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , ) |
| 639 | __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , ) |
Atsushi Nemoto | 41700e7 | 2006-02-10 00:39:06 +0900 | [diff] [blame] | 640 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | #endif /* _ASM_R4KCACHE_H */ |