David Howells | d550bbd | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 1 | #ifndef __SPARC64_BARRIER_H |
| 2 | #define __SPARC64_BARRIER_H |
| 3 | |
| 4 | /* These are here in an effort to more fully work around Spitfire Errata |
| 5 | * #51. Essentially, if a memory barrier occurs soon after a mispredicted |
| 6 | * branch, the chip can stop executing instructions until a trap occurs. |
| 7 | * Therefore, if interrupts are disabled, the chip can hang forever. |
| 8 | * |
| 9 | * It used to be believed that the memory barrier had to be right in the |
| 10 | * delay slot, but a case has been traced recently wherein the memory barrier |
| 11 | * was one instruction after the branch delay slot and the chip still hung. |
| 12 | * The offending sequence was the following in sym_wakeup_done() of the |
| 13 | * sym53c8xx_2 driver: |
| 14 | * |
| 15 | * call sym_ccb_from_dsa, 0 |
| 16 | * movge %icc, 0, %l0 |
| 17 | * brz,pn %o0, .LL1303 |
| 18 | * mov %o0, %l2 |
| 19 | * membar #LoadLoad |
| 20 | * |
| 21 | * The branch has to be mispredicted for the bug to occur. Therefore, we put |
| 22 | * the memory barrier explicitly into a "branch always, predicted taken" |
| 23 | * delay slot to avoid the problem case. |
| 24 | */ |
| 25 | #define membar_safe(type) \ |
| 26 | do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ |
| 27 | " membar " type "\n" \ |
| 28 | "1:\n" \ |
| 29 | : : : "memory"); \ |
| 30 | } while (0) |
| 31 | |
| 32 | /* The kernel always executes in TSO memory model these days, |
| 33 | * and furthermore most sparc64 chips implement more stringent |
| 34 | * memory ordering than required by the specifications. |
| 35 | */ |
| 36 | #define mb() membar_safe("#StoreLoad") |
| 37 | #define rmb() __asm__ __volatile__("":::"memory") |
| 38 | #define wmb() __asm__ __volatile__("":::"memory") |
| 39 | |
Alexander Duyck | 1077fa3 | 2014-12-11 15:02:06 -0800 | [diff] [blame] | 40 | #define dma_rmb() rmb() |
| 41 | #define dma_wmb() wmb() |
| 42 | |
David Howells | d550bbd | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 43 | #define set_mb(__var, __value) \ |
| 44 | do { __var = __value; membar_safe("#StoreLoad"); } while(0) |
| 45 | |
| 46 | #ifdef CONFIG_SMP |
| 47 | #define smp_mb() mb() |
| 48 | #define smp_rmb() rmb() |
| 49 | #define smp_wmb() wmb() |
| 50 | #else |
| 51 | #define smp_mb() __asm__ __volatile__("":::"memory") |
| 52 | #define smp_rmb() __asm__ __volatile__("":::"memory") |
| 53 | #define smp_wmb() __asm__ __volatile__("":::"memory") |
| 54 | #endif |
| 55 | |
Alexander Duyck | 8a44971 | 2014-12-11 15:01:55 -0800 | [diff] [blame] | 56 | #define read_barrier_depends() do { } while (0) |
| 57 | #define smp_read_barrier_depends() do { } while (0) |
David Howells | d550bbd | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 58 | |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 59 | #define smp_store_release(p, v) \ |
| 60 | do { \ |
| 61 | compiletime_assert_atomic_type(*p); \ |
| 62 | barrier(); \ |
| 63 | ACCESS_ONCE(*p) = (v); \ |
| 64 | } while (0) |
| 65 | |
| 66 | #define smp_load_acquire(p) \ |
| 67 | ({ \ |
| 68 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ |
| 69 | compiletime_assert_atomic_type(*p); \ |
| 70 | barrier(); \ |
| 71 | ___p1; \ |
| 72 | }) |
| 73 | |
Peter Zijlstra | 56d3648 | 2014-03-13 19:00:35 +0100 | [diff] [blame] | 74 | #define smp_mb__before_atomic() barrier() |
| 75 | #define smp_mb__after_atomic() barrier() |
| 76 | |
David Howells | d550bbd | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 77 | #endif /* !(__SPARC64_BARRIER_H) */ |