David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> |
| 3 | */ |
| 4 | #ifndef _ASM_POWERPC_BARRIER_H |
| 5 | #define _ASM_POWERPC_BARRIER_H |
| 6 | |
| 7 | /* |
| 8 | * Memory barrier. |
| 9 | * The sync instruction guarantees that all memory accesses initiated |
| 10 | * by this processor have been performed (with respect to all other |
| 11 | * mechanisms that access memory). The eieio instruction is a barrier |
| 12 | * providing an ordering (separately) for (a) cacheable stores and (b) |
| 13 | * loads and stores to non-cacheable memory (e.g. I/O devices). |
| 14 | * |
| 15 | * mb() prevents loads and stores being reordered across this point. |
| 16 | * rmb() prevents loads being reordered across this point. |
| 17 | * wmb() prevents stores being reordered across this point. |
| 18 | * read_barrier_depends() prevents data-dependent loads being reordered |
| 19 | * across this point (nop on PPC). |
| 20 | * |
| 21 | * *mb() variants without smp_ prefix must order all types of memory |
| 22 | * operations with one another. sync is the only instruction sufficient |
| 23 | * to do this. |
| 24 | * |
| 25 | * For the smp_ barriers, ordering is for cacheable memory operations |
| 26 | * only. We have to use the sync instruction for smp_mb(), since lwsync |
| 27 | * doesn't order loads with respect to previous stores. Lwsync can be |
| 28 | * used for smp_rmb() and smp_wmb(). |
| 29 | * |
| 30 | * However, on CPUs that don't support lwsync, lwsync actually maps to a |
| 31 | * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. |
| 32 | */ |
| 33 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") |
| 34 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") |
| 35 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 36 | |
Peter Zijlstra | b92b8b3 | 2015-05-12 10:51:55 +0200 | [diff] [blame] | 37 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 38 | |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 39 | #ifdef __SUBARCH_HAS_LWSYNC |
| 40 | # define SMPWMB LWSYNC |
| 41 | #else |
| 42 | # define SMPWMB eieio |
| 43 | #endif |
| 44 | |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 45 | #define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") |
Alexander Duyck | 1077fa3 | 2014-12-11 15:02:06 -0800 | [diff] [blame] | 46 | #define dma_rmb() __lwsync() |
| 47 | #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") |
| 48 | |
| 49 | #ifdef CONFIG_SMP |
| 50 | #define smp_lwsync() __lwsync() |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 51 | |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 52 | #define smp_mb() mb() |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 53 | #define smp_rmb() __lwsync() |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 54 | #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 55 | #else |
Alexander Duyck | 1077fa3 | 2014-12-11 15:02:06 -0800 | [diff] [blame] | 56 | #define smp_lwsync() barrier() |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 57 | |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 58 | #define smp_mb() barrier() |
| 59 | #define smp_rmb() barrier() |
| 60 | #define smp_wmb() barrier() |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 61 | #endif /* CONFIG_SMP */ |
| 62 | |
Alexander Duyck | 8a44971 | 2014-12-11 15:01:55 -0800 | [diff] [blame] | 63 | #define read_barrier_depends() do { } while (0) |
| 64 | #define smp_read_barrier_depends() do { } while (0) |
| 65 | |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 66 | /* |
| 67 | * This is a barrier which prevents following instructions from being |
| 68 | * started until the value of the argument x is known. For example, if |
| 69 | * x is a variable loaded from memory, this prevents following |
| 70 | * instructions from being executed until the load has been performed. |
| 71 | */ |
| 72 | #define data_barrier(x) \ |
| 73 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); |
| 74 | |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 75 | #define smp_store_release(p, v) \ |
| 76 | do { \ |
| 77 | compiletime_assert_atomic_type(*p); \ |
Alexander Duyck | 1077fa3 | 2014-12-11 15:02:06 -0800 | [diff] [blame] | 78 | smp_lwsync(); \ |
Andrey Konovalov | 76695af | 2015-08-02 17:11:04 +0200 | [diff] [blame] | 79 | WRITE_ONCE(*p, v); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 80 | } while (0) |
| 81 | |
| 82 | #define smp_load_acquire(p) \ |
| 83 | ({ \ |
Andrey Konovalov | 76695af | 2015-08-02 17:11:04 +0200 | [diff] [blame] | 84 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 85 | compiletime_assert_atomic_type(*p); \ |
Alexander Duyck | 1077fa3 | 2014-12-11 15:02:06 -0800 | [diff] [blame] | 86 | smp_lwsync(); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 87 | ___p1; \ |
| 88 | }) |
| 89 | |
Peter Zijlstra | c645073 | 2014-03-13 19:00:35 +0100 | [diff] [blame] | 90 | #define smp_mb__before_atomic() smp_mb() |
| 91 | #define smp_mb__after_atomic() smp_mb() |
Paul E. McKenney | a76ff68 | 2015-04-01 08:19:59 -0700 | [diff] [blame] | 92 | #define smp_mb__before_spinlock() smp_mb() |
Peter Zijlstra | c645073 | 2014-03-13 19:00:35 +0100 | [diff] [blame] | 93 | |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 94 | #endif /* _ASM_POWERPC_BARRIER_H */ |