blob: 578680f6207acb62ccc8f52c321c349f510f73a6 [file] [log] [blame]
David Howellsa0616cd2012-03-28 18:30:02 +01001/*
2 * Copyright IBM Corp. 1999, 2009
3 *
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 */
6
7#ifndef __ASM_BARRIER_H
8#define __ASM_BARRIER_H
9
10/*
11 * Force strict CPU ordering.
12 * And yes, this is required on UP too when we're talking
13 * to devices.
David Howellsa0616cd2012-03-28 18:30:02 +010014 */
15
Heiko Carstense5b8d752012-05-14 12:41:54 +020016#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
Heiko Carstense06ef372013-01-30 13:56:14 +010017/* Fast-BCR without checkpoint synchronization */
18#define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0)
Heiko Carstense5b8d752012-05-14 12:41:54 +020019#else
Heiko Carstense06ef372013-01-30 13:56:14 +010020#define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0)
Heiko Carstense5b8d752012-05-14 12:41:54 +020021#endif
David Howellsa0616cd2012-03-28 18:30:02 +010022
Heiko Carstensc6f48b02012-05-14 12:40:43 +020023#define rmb() mb()
24#define wmb() mb()
25#define read_barrier_depends() do { } while(0)
26#define smp_mb() mb()
27#define smp_rmb() rmb()
28#define smp_wmb() wmb()
29#define smp_read_barrier_depends() read_barrier_depends()
30#define smp_mb__before_clear_bit() smp_mb()
31#define smp_mb__after_clear_bit() smp_mb()
32
33#define set_mb(var, value) do { var = value; mb(); } while (0)
David Howellsa0616cd2012-03-28 18:30:02 +010034
Peter Zijlstra47933ad2013-11-06 14:57:36 +010035#define smp_store_release(p, v) \
36do { \
37 compiletime_assert_atomic_type(*p); \
38 barrier(); \
39 ACCESS_ONCE(*p) = (v); \
40} while (0)
41
42#define smp_load_acquire(p) \
43({ \
44 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
45 compiletime_assert_atomic_type(*p); \
46 barrier(); \
47 ___p1; \
48})
49
David Howellsa0616cd2012-03-28 18:30:02 +010050#endif /* __ASM_BARRIER_H */