blob: 19ff956b752baf751380e1ecf553b8983cdad56d [file] [log] [blame]
David Howellsa0616cd2012-03-28 18:30:02 +01001/*
2 * Copyright IBM Corp. 1999, 2009
3 *
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 */
6
7#ifndef __ASM_BARRIER_H
8#define __ASM_BARRIER_H
9
10/*
11 * Force strict CPU ordering.
12 * And yes, this is required on UP too when we're talking
13 * to devices.
David Howellsa0616cd2012-03-28 18:30:02 +010014 */
15
Heiko Carstense5b8d752012-05-14 12:41:54 +020016#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
Heiko Carstense06ef372013-01-30 13:56:14 +010017/* Fast-BCR without checkpoint synchronization */
18#define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0)
Heiko Carstense5b8d752012-05-14 12:41:54 +020019#else
Heiko Carstense06ef372013-01-30 13:56:14 +010020#define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0)
Heiko Carstense5b8d752012-05-14 12:41:54 +020021#endif
David Howellsa0616cd2012-03-28 18:30:02 +010022
Heiko Carstensc6f48b02012-05-14 12:40:43 +020023#define rmb() mb()
24#define wmb() mb()
25#define read_barrier_depends() do { } while(0)
26#define smp_mb() mb()
27#define smp_rmb() rmb()
28#define smp_wmb() wmb()
29#define smp_read_barrier_depends() read_barrier_depends()
Peter Zijlstra0e530742014-03-13 19:00:35 +010030
31#define smp_mb__before_atomic() smp_mb()
32#define smp_mb__after_atomic() smp_mb()
Heiko Carstensc6f48b02012-05-14 12:40:43 +020033
34#define set_mb(var, value) do { var = value; mb(); } while (0)
David Howellsa0616cd2012-03-28 18:30:02 +010035
Peter Zijlstra47933ad2013-11-06 14:57:36 +010036#define smp_store_release(p, v) \
37do { \
38 compiletime_assert_atomic_type(*p); \
39 barrier(); \
40 ACCESS_ONCE(*p) = (v); \
41} while (0)
42
43#define smp_load_acquire(p) \
44({ \
45 typeof(*p) ___p1 = ACCESS_ONCE(*p); \
46 compiletime_assert_atomic_type(*p); \
47 barrier(); \
48 ___p1; \
49})
50
David Howellsa0616cd2012-03-28 18:30:02 +010051#endif /* __ASM_BARRIER_H */