Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle |
| 7 | * Copyright (C) 1996 by Paul M. Antoine |
| 8 | * Copyright (C) 1999 Silicon Graphics |
| 9 | * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com |
| 10 | * Copyright (C) 2000 MIPS Technologies, Inc. |
| 11 | */ |
| 12 | #ifndef _ASM_SYSTEM_H |
| 13 | #define _ASM_SYSTEM_H |
| 14 | |
| 15 | #include <linux/config.h> |
| 16 | #include <linux/types.h> |
| 17 | |
| 18 | #include <asm/addrspace.h> |
| 19 | #include <asm/cpu-features.h> |
| 20 | #include <asm/ptrace.h> |
| 21 | #include <asm/war.h> |
| 22 | #include <asm/interrupt.h> |
| 23 | |
| 24 | /* |
| 25 | * read_barrier_depends - Flush all pending reads that subsequents reads |
| 26 | * depend on. |
| 27 | * |
| 28 | * No data-dependent reads from memory-like regions are ever reordered |
| 29 | * over this barrier. All reads preceding this primitive are guaranteed |
| 30 | * to access memory (but not necessarily other CPUs' caches) before any |
| 31 | * reads following this primitive that depend on the data return by |
| 32 | * any of the preceding reads. This primitive is much lighter weight than |
| 33 | * rmb() on most CPUs, and is never heavier weight than is |
| 34 | * rmb(). |
| 35 | * |
| 36 | * These ordering constraints are respected by both the local CPU |
| 37 | * and the compiler. |
| 38 | * |
| 39 | * Ordering is not guaranteed by anything other than these primitives, |
| 40 | * not even by data dependencies. See the documentation for |
| 41 | * memory_barrier() for examples and URLs to more information. |
| 42 | * |
| 43 | * For example, the following code would force ordering (the initial |
| 44 | * value of "a" is zero, "b" is one, and "p" is "&a"): |
| 45 | * |
| 46 | * <programlisting> |
| 47 | * CPU 0 CPU 1 |
| 48 | * |
| 49 | * b = 2; |
| 50 | * memory_barrier(); |
| 51 | * p = &b; q = p; |
| 52 | * read_barrier_depends(); |
| 53 | * d = *q; |
| 54 | * </programlisting> |
| 55 | * |
| 56 | * because the read of "*q" depends on the read of "p" and these |
| 57 | * two reads are separated by a read_barrier_depends(). However, |
| 58 | * the following code, with the same initial values for "a" and "b": |
| 59 | * |
| 60 | * <programlisting> |
| 61 | * CPU 0 CPU 1 |
| 62 | * |
| 63 | * a = 2; |
| 64 | * memory_barrier(); |
| 65 | * b = 3; y = b; |
| 66 | * read_barrier_depends(); |
| 67 | * x = a; |
| 68 | * </programlisting> |
| 69 | * |
| 70 | * does not enforce ordering, since there is no data dependency between |
| 71 | * the read of "a" and the read of "b". Therefore, on some CPUs, such |
| 72 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() |
| 73 | * in cases like thiswhere there are no data dependencies. |
| 74 | */ |
| 75 | |
| 76 | #define read_barrier_depends() do { } while(0) |
| 77 | |
| 78 | #ifdef CONFIG_CPU_HAS_SYNC |
| 79 | #define __sync() \ |
| 80 | __asm__ __volatile__( \ |
| 81 | ".set push\n\t" \ |
| 82 | ".set noreorder\n\t" \ |
| 83 | ".set mips2\n\t" \ |
| 84 | "sync\n\t" \ |
| 85 | ".set pop" \ |
| 86 | : /* no output */ \ |
| 87 | : /* no input */ \ |
| 88 | : "memory") |
| 89 | #else |
| 90 | #define __sync() do { } while(0) |
| 91 | #endif |
| 92 | |
| 93 | #define __fast_iob() \ |
| 94 | __asm__ __volatile__( \ |
| 95 | ".set push\n\t" \ |
| 96 | ".set noreorder\n\t" \ |
| 97 | "lw $0,%0\n\t" \ |
| 98 | "nop\n\t" \ |
| 99 | ".set pop" \ |
| 100 | : /* no output */ \ |
| 101 | : "m" (*(int *)CKSEG1) \ |
| 102 | : "memory") |
| 103 | |
| 104 | #define fast_wmb() __sync() |
| 105 | #define fast_rmb() __sync() |
| 106 | #define fast_mb() __sync() |
| 107 | #define fast_iob() \ |
| 108 | do { \ |
| 109 | __sync(); \ |
| 110 | __fast_iob(); \ |
| 111 | } while (0) |
| 112 | |
| 113 | #ifdef CONFIG_CPU_HAS_WB |
| 114 | |
| 115 | #include <asm/wbflush.h> |
| 116 | |
| 117 | #define wmb() fast_wmb() |
| 118 | #define rmb() fast_rmb() |
| 119 | #define mb() wbflush() |
| 120 | #define iob() wbflush() |
| 121 | |
| 122 | #else /* !CONFIG_CPU_HAS_WB */ |
| 123 | |
| 124 | #define wmb() fast_wmb() |
| 125 | #define rmb() fast_rmb() |
| 126 | #define mb() fast_mb() |
| 127 | #define iob() fast_iob() |
| 128 | |
| 129 | #endif /* !CONFIG_CPU_HAS_WB */ |
| 130 | |
| 131 | #ifdef CONFIG_SMP |
| 132 | #define smp_mb() mb() |
| 133 | #define smp_rmb() rmb() |
| 134 | #define smp_wmb() wmb() |
| 135 | #define smp_read_barrier_depends() read_barrier_depends() |
| 136 | #else |
| 137 | #define smp_mb() barrier() |
| 138 | #define smp_rmb() barrier() |
| 139 | #define smp_wmb() barrier() |
| 140 | #define smp_read_barrier_depends() do { } while(0) |
| 141 | #endif |
| 142 | |
| 143 | #define set_mb(var, value) \ |
| 144 | do { var = value; mb(); } while (0) |
| 145 | |
| 146 | #define set_wmb(var, value) \ |
| 147 | do { var = value; wmb(); } while (0) |
| 148 | |
| 149 | /* |
| 150 | * switch_to(n) should switch tasks to task nr n, first |
| 151 | * checking that n isn't the current task, in which case it does nothing. |
| 152 | */ |
| 153 | extern asmlinkage void *resume(void *last, void *next, void *next_ti); |
| 154 | |
| 155 | struct task_struct; |
| 156 | |
| 157 | #define switch_to(prev,next,last) \ |
| 158 | do { \ |
| 159 | (last) = resume(prev, next, next->thread_info); \ |
| 160 | } while(0) |
| 161 | |
| 162 | #define ROT_IN_PIECES \ |
| 163 | " .set noreorder \n" \ |
| 164 | " .set reorder \n" |
| 165 | |
| 166 | static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) |
| 167 | { |
| 168 | __u32 retval; |
| 169 | |
| 170 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 171 | unsigned long dummy; |
| 172 | |
| 173 | __asm__ __volatile__( |
| 174 | "1: ll %0, %3 # xchg_u32 \n" |
| 175 | " move %2, %z4 \n" |
| 176 | " sc %2, %1 \n" |
| 177 | " beqzl %2, 1b \n" |
| 178 | ROT_IN_PIECES |
| 179 | #ifdef CONFIG_SMP |
| 180 | " sync \n" |
| 181 | #endif |
| 182 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
| 183 | : "R" (*m), "Jr" (val) |
| 184 | : "memory"); |
| 185 | } else if (cpu_has_llsc) { |
| 186 | unsigned long dummy; |
| 187 | |
| 188 | __asm__ __volatile__( |
| 189 | "1: ll %0, %3 # xchg_u32 \n" |
| 190 | " move %2, %z4 \n" |
| 191 | " sc %2, %1 \n" |
| 192 | " beqz %2, 1b \n" |
| 193 | #ifdef CONFIG_SMP |
| 194 | " sync \n" |
| 195 | #endif |
| 196 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
| 197 | : "R" (*m), "Jr" (val) |
| 198 | : "memory"); |
| 199 | } else { |
| 200 | unsigned long flags; |
| 201 | |
| 202 | local_irq_save(flags); |
| 203 | retval = *m; |
| 204 | *m = val; |
| 205 | local_irq_restore(flags); /* implies memory barrier */ |
| 206 | } |
| 207 | |
| 208 | return retval; |
| 209 | } |
| 210 | |
| 211 | #ifdef CONFIG_MIPS64 |
| 212 | static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) |
| 213 | { |
| 214 | __u64 retval; |
| 215 | |
| 216 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 217 | unsigned long dummy; |
| 218 | |
| 219 | __asm__ __volatile__( |
| 220 | "1: lld %0, %3 # xchg_u64 \n" |
| 221 | " move %2, %z4 \n" |
| 222 | " scd %2, %1 \n" |
| 223 | " beqzl %2, 1b \n" |
| 224 | ROT_IN_PIECES |
| 225 | #ifdef CONFIG_SMP |
| 226 | " sync \n" |
| 227 | #endif |
| 228 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
| 229 | : "R" (*m), "Jr" (val) |
| 230 | : "memory"); |
| 231 | } else if (cpu_has_llsc) { |
| 232 | unsigned long dummy; |
| 233 | |
| 234 | __asm__ __volatile__( |
| 235 | "1: lld %0, %3 # xchg_u64 \n" |
| 236 | " move %2, %z4 \n" |
| 237 | " scd %2, %1 \n" |
| 238 | " beqz %2, 1b \n" |
| 239 | #ifdef CONFIG_SMP |
| 240 | " sync \n" |
| 241 | #endif |
| 242 | : "=&r" (retval), "=m" (*m), "=&r" (dummy) |
| 243 | : "R" (*m), "Jr" (val) |
| 244 | : "memory"); |
| 245 | } else { |
| 246 | unsigned long flags; |
| 247 | |
| 248 | local_irq_save(flags); |
| 249 | retval = *m; |
| 250 | *m = val; |
| 251 | local_irq_restore(flags); /* implies memory barrier */ |
| 252 | } |
| 253 | |
| 254 | return retval; |
| 255 | } |
| 256 | #else |
| 257 | extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); |
| 258 | #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels |
| 259 | #endif |
| 260 | |
| 261 | /* This function doesn't exist, so you'll get a linker error |
| 262 | if something tries to do an invalid xchg(). */ |
| 263 | extern void __xchg_called_with_bad_pointer(void); |
| 264 | |
| 265 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) |
| 266 | { |
| 267 | switch (size) { |
| 268 | case 4: |
| 269 | return __xchg_u32(ptr, x); |
| 270 | case 8: |
| 271 | return __xchg_u64(ptr, x); |
| 272 | } |
| 273 | __xchg_called_with_bad_pointer(); |
| 274 | return x; |
| 275 | } |
| 276 | |
| 277 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) |
| 278 | #define tas(ptr) (xchg((ptr),1)) |
| 279 | |
| 280 | #define __HAVE_ARCH_CMPXCHG 1 |
| 281 | |
| 282 | static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, |
| 283 | unsigned long new) |
| 284 | { |
| 285 | __u32 retval; |
| 286 | |
| 287 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 288 | __asm__ __volatile__( |
| 289 | " .set noat \n" |
| 290 | "1: ll %0, %2 # __cmpxchg_u32 \n" |
| 291 | " bne %0, %z3, 2f \n" |
| 292 | " move $1, %z4 \n" |
| 293 | " sc $1, %1 \n" |
| 294 | " beqzl $1, 1b \n" |
| 295 | ROT_IN_PIECES |
| 296 | #ifdef CONFIG_SMP |
| 297 | " sync \n" |
| 298 | #endif |
| 299 | "2: \n" |
| 300 | " .set at \n" |
| 301 | : "=&r" (retval), "=m" (*m) |
| 302 | : "R" (*m), "Jr" (old), "Jr" (new) |
| 303 | : "memory"); |
| 304 | } else if (cpu_has_llsc) { |
| 305 | __asm__ __volatile__( |
| 306 | " .set noat \n" |
| 307 | "1: ll %0, %2 # __cmpxchg_u32 \n" |
| 308 | " bne %0, %z3, 2f \n" |
| 309 | " move $1, %z4 \n" |
| 310 | " sc $1, %1 \n" |
| 311 | " beqz $1, 1b \n" |
| 312 | #ifdef CONFIG_SMP |
| 313 | " sync \n" |
| 314 | #endif |
| 315 | "2: \n" |
| 316 | " .set at \n" |
| 317 | : "=&r" (retval), "=m" (*m) |
| 318 | : "R" (*m), "Jr" (old), "Jr" (new) |
| 319 | : "memory"); |
| 320 | } else { |
| 321 | unsigned long flags; |
| 322 | |
| 323 | local_irq_save(flags); |
| 324 | retval = *m; |
| 325 | if (retval == old) |
| 326 | *m = new; |
| 327 | local_irq_restore(flags); /* implies memory barrier */ |
| 328 | } |
| 329 | |
| 330 | return retval; |
| 331 | } |
| 332 | |
| 333 | #ifdef CONFIG_MIPS64 |
| 334 | static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, |
| 335 | unsigned long new) |
| 336 | { |
| 337 | __u64 retval; |
| 338 | |
| 339 | if (cpu_has_llsc) { |
| 340 | __asm__ __volatile__( |
| 341 | " .set noat \n" |
| 342 | "1: lld %0, %2 # __cmpxchg_u64 \n" |
| 343 | " bne %0, %z3, 2f \n" |
| 344 | " move $1, %z4 \n" |
| 345 | " scd $1, %1 \n" |
| 346 | " beqzl $1, 1b \n" |
| 347 | ROT_IN_PIECES |
| 348 | #ifdef CONFIG_SMP |
| 349 | " sync \n" |
| 350 | #endif |
| 351 | "2: \n" |
| 352 | " .set at \n" |
| 353 | : "=&r" (retval), "=m" (*m) |
| 354 | : "R" (*m), "Jr" (old), "Jr" (new) |
| 355 | : "memory"); |
| 356 | } else if (cpu_has_llsc) { |
| 357 | __asm__ __volatile__( |
| 358 | " .set noat \n" |
| 359 | "1: lld %0, %2 # __cmpxchg_u64 \n" |
| 360 | " bne %0, %z3, 2f \n" |
| 361 | " move $1, %z4 \n" |
| 362 | " scd $1, %1 \n" |
| 363 | " beqz $1, 1b \n" |
| 364 | #ifdef CONFIG_SMP |
| 365 | " sync \n" |
| 366 | #endif |
| 367 | "2: \n" |
| 368 | " .set at \n" |
| 369 | : "=&r" (retval), "=m" (*m) |
| 370 | : "R" (*m), "Jr" (old), "Jr" (new) |
| 371 | : "memory"); |
| 372 | } else { |
| 373 | unsigned long flags; |
| 374 | |
| 375 | local_irq_save(flags); |
| 376 | retval = *m; |
| 377 | if (retval == old) |
| 378 | *m = new; |
| 379 | local_irq_restore(flags); /* implies memory barrier */ |
| 380 | } |
| 381 | |
| 382 | return retval; |
| 383 | } |
| 384 | #else |
| 385 | extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( |
| 386 | volatile int * m, unsigned long old, unsigned long new); |
| 387 | #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels |
| 388 | #endif |
| 389 | |
| 390 | /* This function doesn't exist, so you'll get a linker error |
| 391 | if something tries to do an invalid cmpxchg(). */ |
| 392 | extern void __cmpxchg_called_with_bad_pointer(void); |
| 393 | |
| 394 | static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, |
| 395 | unsigned long new, int size) |
| 396 | { |
| 397 | switch (size) { |
| 398 | case 4: |
| 399 | return __cmpxchg_u32(ptr, old, new); |
| 400 | case 8: |
| 401 | return __cmpxchg_u64(ptr, old, new); |
| 402 | } |
| 403 | __cmpxchg_called_with_bad_pointer(); |
| 404 | return old; |
| 405 | } |
| 406 | |
| 407 | #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) |
| 408 | |
| 409 | extern void *set_except_vector(int n, void *addr); |
| 410 | extern void per_cpu_trap_init(void); |
| 411 | |
| 412 | extern NORET_TYPE void __die(const char *, struct pt_regs *, const char *file, |
| 413 | const char *func, unsigned long line); |
| 414 | extern void __die_if_kernel(const char *, struct pt_regs *, const char *file, |
| 415 | const char *func, unsigned long line); |
| 416 | |
| 417 | #define die(msg, regs) \ |
| 418 | __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__) |
| 419 | #define die_if_kernel(msg, regs) \ |
| 420 | __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__) |
| 421 | |
| 422 | extern int stop_a_enabled; |
| 423 | |
| 424 | /* |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 425 | * See include/asm-ia64/system.h; prevents deadlock on SMP |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | * systems. |
| 427 | */ |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 428 | #define __ARCH_WANT_UNLOCKED_CTXSW |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | |
| 430 | #define arch_align_stack(x) (x) |
| 431 | |
| 432 | #endif /* _ASM_SYSTEM_H */ |