blob: 588f1614cafc2a86c2dcb816c4c1f71d633551f2 [file] [log] [blame]
David Howellsc140d872012-03-28 18:30:02 +01001/*
2 * Memory barrier definitions. This is based on information published
3 * in the Processor Abstraction Layer and the System Abstraction Layer
4 * manual.
5 *
6 * Copyright (C) 1998-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
10 */
11#ifndef _ASM_IA64_BARRIER_H
12#define _ASM_IA64_BARRIER_H
13
14#include <linux/compiler.h>
15
16/*
17 * Macros to force memory ordering. In these descriptions, "previous"
18 * and "subsequent" refer to program order; "visible" means that all
19 * architecturally visible effects of a memory access have occurred
20 * (at a minimum, this means the memory has been read or written).
21 *
22 * wmb(): Guarantees that all preceding stores to memory-
23 * like regions are visible before any subsequent
24 * stores and that all following stores will be
25 * visible only after all previous stores.
26 * rmb(): Like wmb(), but for reads.
27 * mb(): wmb()/rmb() combo, i.e., all previous memory
28 * accesses are visible before all subsequent
29 * accesses and vice versa. This is also known as
30 * a "fence."
31 *
32 * Note: "mb()" and its variants cannot be used as a fence to order
33 * accesses to memory mapped I/O registers. For that, mf.a needs to
34 * be used. However, we don't want to always use mf.a because (a)
35 * it's (presumably) much slower than mf and (b) mf.a is supported for
36 * sequential memory pages only.
37 */
Alexander Duyck8a449712014-12-11 15:01:55 -080038#define mb() ia64_mf()
39#define rmb() mb()
40#define wmb() mb()
David Howellsc140d872012-03-28 18:30:02 +010041
Alexander Duyck1077fa32014-12-11 15:02:06 -080042#define dma_rmb() mb()
43#define dma_wmb() mb()
44
Michael S. Tsirkineebd1b92015-12-27 15:04:42 +020045# define __smp_mb() mb()
David Howellsc140d872012-03-28 18:30:02 +010046
Michael S. Tsirkineebd1b92015-12-27 15:04:42 +020047#define __smp_mb__before_atomic() barrier()
48#define __smp_mb__after_atomic() barrier()
Peter Zijlstra0cd64ef2014-03-13 19:00:36 +010049
David Howellsc140d872012-03-28 18:30:02 +010050/*
Peter Zijlstra47933ad2013-11-06 14:57:36 +010051 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
52 * need for asm trickery!
53 */
54
Michael S. Tsirkineebd1b92015-12-27 15:04:42 +020055#define __smp_store_release(p, v) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010056do { \
57 compiletime_assert_atomic_type(*p); \
58 barrier(); \
Andrey Konovalov76695af2015-08-02 17:11:04 +020059 WRITE_ONCE(*p, v); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010060} while (0)
61
Michael S. Tsirkineebd1b92015-12-27 15:04:42 +020062#define __smp_load_acquire(p) \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010063({ \
Andrey Konovalov76695af2015-08-02 17:11:04 +020064 typeof(*p) ___p1 = READ_ONCE(*p); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010065 compiletime_assert_atomic_type(*p); \
66 barrier(); \
67 ___p1; \
68})
69
David Howellsc140d872012-03-28 18:30:02 +010070/*
71 * The group barrier in front of the rsm & ssm are necessary to ensure
72 * that none of the previous instructions in the same group are
73 * affected by the rsm/ssm.
74 */
75
Michael S. Tsirkin53a05ac2015-12-21 09:22:18 +020076#include <asm-generic/barrier.h>
77
David Howellsc140d872012-03-28 18:30:02 +010078#endif /* _ASM_IA64_BARRIER_H */