blob: eb53c2c78a1f971d6037b98923c56118c1a1a428 [file] [log] [blame]
David Howellsf05e7982012-03-28 18:11:12 +01001#ifndef _ASM_X86_BARRIER_H
2#define _ASM_X86_BARRIER_H
3
4#include <asm/alternative.h>
5#include <asm/nops.h>
6
7/*
8 * Force strict CPU ordering.
Michael S. Tsirkin57d9b1b2016-01-28 19:02:44 +02009 * And yes, this might be required on UP too when we're talking
David Howellsf05e7982012-03-28 18:11:12 +010010 * to devices.
11 */
12
13#ifdef CONFIG_X86_32
Michael S. Tsirkinbd922472016-01-28 19:02:29 +020014#define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
15 X86_FEATURE_XMM2) ::: "memory", "cc")
16#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
17 X86_FEATURE_XMM2) ::: "memory", "cc")
18#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
19 X86_FEATURE_XMM2) ::: "memory", "cc")
David Howellsf05e7982012-03-28 18:11:12 +010020#else
21#define mb() asm volatile("mfence":::"memory")
22#define rmb() asm volatile("lfence":::"memory")
23#define wmb() asm volatile("sfence" ::: "memory")
24#endif
25
Dan Williams8c33e2d2018-01-29 17:02:28 -080026/**
27 * array_index_mask_nospec() - generate a mask that is ~0UL when the
28 * bounds check succeeds and 0 otherwise
29 * @index: array element index
30 * @size: number of elements in array
31 *
32 * Returns:
33 * 0 - (index < size)
34 */
35static inline unsigned long array_index_mask_nospec(unsigned long index,
36 unsigned long size)
37{
38 unsigned long mask;
39
Dan Williams995cddc2018-06-07 09:13:48 -070040 asm volatile ("cmp %1,%2; sbb %0,%0;"
Dan Williams8c33e2d2018-01-29 17:02:28 -080041 :"=r" (mask)
Dan Williamsbe1ea502018-02-06 18:22:40 -080042 :"g"(size),"r" (index)
Dan Williams8c33e2d2018-01-29 17:02:28 -080043 :"cc");
44 return mask;
45}
46
47/* Override the default implementation from linux/nospec.h. */
48#define array_index_mask_nospec array_index_mask_nospec
49
Dan Williams1f03d142018-01-29 17:02:33 -080050/* Prevent speculative execution past this barrier. */
51#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
52 "lfence", X86_FEATURE_LFENCE_RDTSC)
53
Alexander Duyck1077fa32014-12-11 15:02:06 -080054#ifdef CONFIG_X86_PPRO_FENCE
55#define dma_rmb() rmb()
56#else
57#define dma_rmb() barrier()
58#endif
59#define dma_wmb() barrier()
60
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020061#define __smp_mb() mb()
62#define __smp_rmb() dma_rmb()
63#define __smp_wmb() barrier()
64#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
Peter Zijlstra47933ad2013-11-06 14:57:36 +010065
Dave Jones09df7c42014-03-10 19:32:22 -040066#if defined(CONFIG_X86_PPRO_FENCE)
Peter Zijlstra47933ad2013-11-06 14:57:36 +010067
68/*
Peter Zijlstra4f3aaf22014-06-11 11:01:45 +020069 * For this option x86 doesn't have a strong TSO memory
Peter Zijlstra47933ad2013-11-06 14:57:36 +010070 * model and we should fall back to full barriers.
71 */
72
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020073#define __smp_store_release(p, v) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010074do { \
75 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020076 __smp_mb(); \
Andrey Konovalov76695af2015-08-02 17:11:04 +020077 WRITE_ONCE(*p, v); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010078} while (0)
79
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020080#define __smp_load_acquire(p) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010081({ \
Andrey Konovalov76695af2015-08-02 17:11:04 +020082 typeof(*p) ___p1 = READ_ONCE(*p); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010083 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020084 __smp_mb(); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010085 ___p1; \
86})
87
88#else /* regular x86 TSO memory ordering */
89
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020090#define __smp_store_release(p, v) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010091do { \
92 compiletime_assert_atomic_type(*p); \
93 barrier(); \
Andrey Konovalov76695af2015-08-02 17:11:04 +020094 WRITE_ONCE(*p, v); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010095} while (0)
96
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +020097#define __smp_load_acquire(p) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010098({ \
Andrey Konovalov76695af2015-08-02 17:11:04 +020099 typeof(*p) ___p1 = READ_ONCE(*p); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100100 compiletime_assert_atomic_type(*p); \
101 barrier(); \
102 ___p1; \
103})
104
David Howellsf05e7982012-03-28 18:11:12 +0100105#endif
106
Peter Zijlstrad00a5692014-03-13 19:00:35 +0100107/* Atomic operations are already serializing on x86 */
Michael S. Tsirkin1638fb72015-12-27 15:04:42 +0200108#define __smp_mb__before_atomic() barrier()
109#define __smp_mb__after_atomic() barrier()
Peter Zijlstrad00a5692014-03-13 19:00:35 +0100110
Michael S. Tsirkin300b06d2015-12-21 09:22:18 +0200111#include <asm-generic/barrier.h>
112
David Howellsf05e7982012-03-28 18:11:12 +0100113#endif /* _ASM_X86_BARRIER_H */