blob: e51a8f803f55e30df69a6a02a20a86f9ed7ebdad [file] [log] [blame]
David Howellsf05e7982012-03-28 18:11:12 +01001#ifndef _ASM_X86_BARRIER_H
2#define _ASM_X86_BARRIER_H
3
4#include <asm/alternative.h>
5#include <asm/nops.h>
6
7/*
8 * Force strict CPU ordering.
9 * And yes, this is required on UP too when we're talking
10 * to devices.
11 */
12
13#ifdef CONFIG_X86_32
14/*
15 * Some non-Intel clones support out of order store. wmb() ceases to be a
16 * nop for these.
17 */
18#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
19#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
20#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
21#else
22#define mb() asm volatile("mfence":::"memory")
23#define rmb() asm volatile("lfence":::"memory")
24#define wmb() asm volatile("sfence" ::: "memory")
25#endif
26
Alexander Duyck1077fa32014-12-11 15:02:06 -080027#ifdef CONFIG_X86_PPRO_FENCE
28#define dma_rmb() rmb()
29#else
30#define dma_rmb() barrier()
31#endif
32#define dma_wmb() barrier()
33
David Howellsf05e7982012-03-28 18:11:12 +010034#ifdef CONFIG_SMP
35#define smp_mb() mb()
Alexander Duyck1077fa32014-12-11 15:02:06 -080036#define smp_rmb() dma_rmb()
Dave Jones09df7c42014-03-10 19:32:22 -040037#define smp_wmb() barrier()
Peter Zijlstrab92b8b32015-05-12 10:51:55 +020038#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
Peter Zijlstra47933ad2013-11-06 14:57:36 +010039#else /* !SMP */
David Howellsf05e7982012-03-28 18:11:12 +010040#define smp_mb() barrier()
41#define smp_rmb() barrier()
42#define smp_wmb() barrier()
Peter Zijlstrab92b8b32015-05-12 10:51:55 +020043#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
Peter Zijlstra47933ad2013-11-06 14:57:36 +010044#endif /* SMP */
45
Alexander Duyck8a449712014-12-11 15:01:55 -080046#define read_barrier_depends() do { } while (0)
47#define smp_read_barrier_depends() do { } while (0)
48
Dave Jones09df7c42014-03-10 19:32:22 -040049#if defined(CONFIG_X86_PPRO_FENCE)
Peter Zijlstra47933ad2013-11-06 14:57:36 +010050
51/*
Peter Zijlstra4f3aaf22014-06-11 11:01:45 +020052 * For this option x86 doesn't have a strong TSO memory
Peter Zijlstra47933ad2013-11-06 14:57:36 +010053 * model and we should fall back to full barriers.
54 */
55
56#define smp_store_release(p, v) \
57do { \
58 compiletime_assert_atomic_type(*p); \
59 smp_mb(); \
60 ACCESS_ONCE(*p) = (v); \
61} while (0)
62
63#define smp_load_acquire(p) \
64({ \
65 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
66 compiletime_assert_atomic_type(*p); \
67 smp_mb(); \
68 ___p1; \
69})
70
71#else /* regular x86 TSO memory ordering */
72
73#define smp_store_release(p, v) \
74do { \
75 compiletime_assert_atomic_type(*p); \
76 barrier(); \
77 ACCESS_ONCE(*p) = (v); \
78} while (0)
79
80#define smp_load_acquire(p) \
81({ \
82 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
83 compiletime_assert_atomic_type(*p); \
84 barrier(); \
85 ___p1; \
86})
87
David Howellsf05e7982012-03-28 18:11:12 +010088#endif
89
Peter Zijlstrad00a5692014-03-13 19:00:35 +010090/* Atomic operations are already serializing on x86 */
91#define smp_mb__before_atomic() barrier()
92#define smp_mb__after_atomic() barrier()
93
David Howellsf05e7982012-03-28 18:11:12 +010094/*
95 * Stop RDTSC speculation. This is needed when you need to use RDTSC
96 * (or get_cycles or vread that possibly accesses the TSC) in a defined
97 * code region.
David Howellsf05e7982012-03-28 18:11:12 +010098 */
99static __always_inline void rdtsc_barrier(void)
100{
Borislav Petkovc70e1b42015-01-18 15:19:55 +0100101 alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
102 "lfence", X86_FEATURE_LFENCE_RDTSC);
David Howellsf05e7982012-03-28 18:11:12 +0100103}
104
105#endif /* _ASM_X86_BARRIER_H */