blob: bfb28caf97b1be1f2d6aa8893bd905a39030e310 [file] [log] [blame]
David Howellsf05e7982012-03-28 18:11:12 +01001#ifndef _ASM_X86_BARRIER_H
2#define _ASM_X86_BARRIER_H
3
4#include <asm/alternative.h>
5#include <asm/nops.h>
6
7/*
8 * Force strict CPU ordering.
Michael S. Tsirkin57d9b1b2016-01-28 19:02:44 +02009 * And yes, this might be required on UP too when we're talking
David Howellsf05e7982012-03-28 18:11:12 +010010 * to devices.
11 */
12
13#ifdef CONFIG_X86_32
Michael S. Tsirkinbd922472016-01-28 19:02:29 +020014#define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
15 X86_FEATURE_XMM2) ::: "memory", "cc")
16#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
17 X86_FEATURE_XMM2) ::: "memory", "cc")
18#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
19 X86_FEATURE_XMM2) ::: "memory", "cc")
David Howellsf05e7982012-03-28 18:11:12 +010020#else
21#define mb() asm volatile("mfence":::"memory")
22#define rmb() asm volatile("lfence":::"memory")
23#define wmb() asm volatile("sfence" ::: "memory")
24#endif
25
Alexander Duyck1077fa32014-12-11 15:02:06 -080026#ifdef CONFIG_X86_PPRO_FENCE
27#define dma_rmb() rmb()
28#else
29#define dma_rmb() barrier()
30#endif
31#define dma_wmb() barrier()
32
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020033#define __smp_mb() mb()
34#define __smp_rmb() dma_rmb()
35#define __smp_wmb() barrier()
36#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
Peter Zijlstra47933ad2013-11-06 14:57:36 +010037
Dave Jones09df7c42014-03-10 19:32:22 -040038#if defined(CONFIG_X86_PPRO_FENCE)
Peter Zijlstra47933ad2013-11-06 14:57:36 +010039
40/*
Peter Zijlstra4f3aaf22014-06-11 11:01:45 +020041 * For this option x86 doesn't have a strong TSO memory
Peter Zijlstra47933ad2013-11-06 14:57:36 +010042 * model and we should fall back to full barriers.
43 */
44
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020045#define __smp_store_release(p, v) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010046do { \
47 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020048 __smp_mb(); \
Andrey Konovalov76695af2015-08-02 17:11:04 +020049 WRITE_ONCE(*p, v); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010050} while (0)
51
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020052#define __smp_load_acquire(p) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010053({ \
Andrey Konovalov76695af2015-08-02 17:11:04 +020054 typeof(*p) ___p1 = READ_ONCE(*p); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010055 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020056 __smp_mb(); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010057 ___p1; \
58})
59
60#else /* regular x86 TSO memory ordering */
61
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020062#define __smp_store_release(p, v) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010063do { \
64 compiletime_assert_atomic_type(*p); \
65 barrier(); \
Andrey Konovalov76695af2015-08-02 17:11:04 +020066 WRITE_ONCE(*p, v); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010067} while (0)
68
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020069#define __smp_load_acquire(p) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010070({ \
Andrey Konovalov76695af2015-08-02 17:11:04 +020071 typeof(*p) ___p1 = READ_ONCE(*p); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010072 compiletime_assert_atomic_type(*p); \
73 barrier(); \
74 ___p1; \
75})
76
David Howellsf05e7982012-03-28 18:11:12 +010077#endif
78
Peter Zijlstrad00a5692014-03-13 19:00:35 +010079/* Atomic operations are already serializing on x86 */
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020080#define __smp_mb__before_atomic() barrier()
81#define __smp_mb__after_atomic() barrier()
Peter Zijlstrad00a5692014-03-13 19:00:35 +010082
Michael S. Tsirkin300b06d2015-12-21 09:22:18 +020083#include <asm-generic/barrier.h>
84
David Howellsf05e7982012-03-28 18:11:12 +010085#endif /* _ASM_X86_BARRIER_H */