James Hogan | 1e57372 | 2012-10-05 16:22:14 +0100 | [diff] [blame] | 1 | #ifndef _ASM_METAG_BARRIER_H |
| 2 | #define _ASM_METAG_BARRIER_H |
| 3 | |
| 4 | #include <asm/metag_mem.h> |
| 5 | |
| 6 | #define nop() asm volatile ("NOP") |
| 7 | #define mb() wmb() |
| 8 | #define rmb() barrier() |
| 9 | |
| 10 | #ifdef CONFIG_METAG_META21 |
| 11 | |
| 12 | /* HTP and above have a system event to fence writes */ |
| 13 | static inline void wr_fence(void) |
| 14 | { |
| 15 | volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE; |
| 16 | barrier(); |
| 17 | *flushptr = 0; |
| 18 | } |
| 19 | |
| 20 | #else /* CONFIG_METAG_META21 */ |
| 21 | |
| 22 | /* |
| 23 | * ATP doesn't have system event to fence writes, so it is necessary to flush |
| 24 | * the processor write queues as well as possibly the write combiner (depending |
| 25 | * on the page being written). |
| 26 | * To ensure the write queues are flushed we do 4 writes to a system event |
| 27 | * register (in this case write combiner flush) which will also flush the write |
| 28 | * combiner. |
| 29 | */ |
| 30 | static inline void wr_fence(void) |
| 31 | { |
| 32 | volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_COMBINE_FLUSH; |
| 33 | barrier(); |
| 34 | *flushptr = 0; |
| 35 | *flushptr = 0; |
| 36 | *flushptr = 0; |
| 37 | *flushptr = 0; |
| 38 | } |
| 39 | |
| 40 | #endif /* !CONFIG_METAG_META21 */ |
| 41 | |
| 42 | static inline void wmb(void) |
| 43 | { |
| 44 | /* flush writes through the write combiner */ |
| 45 | wr_fence(); |
| 46 | } |
| 47 | |
| 48 | #define read_barrier_depends() do { } while (0) |
| 49 | |
| 50 | #ifndef CONFIG_SMP |
| 51 | #define fence() do { } while (0) |
| 52 | #define smp_mb() barrier() |
| 53 | #define smp_rmb() barrier() |
| 54 | #define smp_wmb() barrier() |
| 55 | #else |
| 56 | |
| 57 | #ifdef CONFIG_METAG_SMP_WRITE_REORDERING |
| 58 | /* |
| 59 | * Write to the atomic memory unlock system event register (command 0). This is |
| 60 | * needed before a write to shared memory in a critical section, to prevent |
| 61 | * external reordering of writes before the fence on other threads with writes |
| 62 | * after the fence on this thread (and to prevent the ensuing cache-memory |
| 63 | * incoherence). It is therefore ineffective if used after and on the same |
| 64 | * thread as a write. |
| 65 | */ |
| 66 | static inline void fence(void) |
| 67 | { |
| 68 | volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK; |
| 69 | barrier(); |
| 70 | *flushptr = 0; |
| 71 | } |
| 72 | #define smp_mb() fence() |
| 73 | #define smp_rmb() fence() |
| 74 | #define smp_wmb() barrier() |
| 75 | #else |
| 76 | #define fence() do { } while (0) |
| 77 | #define smp_mb() barrier() |
| 78 | #define smp_rmb() barrier() |
| 79 | #define smp_wmb() barrier() |
| 80 | #endif |
| 81 | #endif |
| 82 | #define smp_read_barrier_depends() do { } while (0) |
| 83 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) |
| 84 | |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 85 | #define smp_store_release(p, v) \ |
| 86 | do { \ |
| 87 | compiletime_assert_atomic_type(*p); \ |
| 88 | smp_mb(); \ |
| 89 | ACCESS_ONCE(*p) = (v); \ |
| 90 | } while (0) |
| 91 | |
| 92 | #define smp_load_acquire(p) \ |
| 93 | ({ \ |
| 94 | typeof(*p) ___p1 = ACCESS_ONCE(*p); \ |
| 95 | compiletime_assert_atomic_type(*p); \ |
| 96 | smp_mb(); \ |
| 97 | ___p1; \ |
| 98 | }) |
| 99 | |
Peter Zijlstra | 40074de | 2014-03-13 19:00:36 +0100 | [diff] [blame^] | 100 | #define smp_mb__before_atomic() barrier() |
| 101 | #define smp_mb__after_atomic() barrier() |
| 102 | |
James Hogan | 1e57372 | 2012-10-05 16:22:14 +0100 | [diff] [blame] | 103 | #endif /* _ASM_METAG_BARRIER_H */ |