blob: d68e11e0df5eada7e600f58529e8cbe60fab41c7 [file] [log] [blame]
David Howellsa0616cd2012-03-28 18:30:02 +01001/*
2 * Copyright IBM Corp. 1999, 2009
3 *
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 */
6
7#ifndef __ASM_BARRIER_H
8#define __ASM_BARRIER_H
9
10/*
11 * Force strict CPU ordering.
12 * And yes, this is required on UP too when we're talking
13 * to devices.
David Howellsa0616cd2012-03-28 18:30:02 +010014 */
15
Heiko Carstense5b8d752012-05-14 12:41:54 +020016#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
Heiko Carstense06ef372013-01-30 13:56:14 +010017/* Fast-BCR without checkpoint synchronization */
Heiko Carstens44230282014-09-08 08:20:43 +020018#define __ASM_BARRIER "bcr 14,0\n"
Heiko Carstense5b8d752012-05-14 12:41:54 +020019#else
Heiko Carstens44230282014-09-08 08:20:43 +020020#define __ASM_BARRIER "bcr 15,0\n"
Heiko Carstense5b8d752012-05-14 12:41:54 +020021#endif
David Howellsa0616cd2012-03-28 18:30:02 +010022
Heiko Carstens44230282014-09-08 08:20:43 +020023#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
24
Christian Borntraeger1afc82a2015-09-11 16:32:24 +020025#define rmb() barrier()
26#define wmb() barrier()
27#define dma_rmb() mb()
28#define dma_wmb() mb()
Heiko Carstensc6f48b02012-05-14 12:40:43 +020029#define smp_mb() mb()
30#define smp_rmb() rmb()
31#define smp_wmb() wmb()
Alexander Duyck8a449712014-12-11 15:01:55 -080032
33#define read_barrier_depends() do { } while (0)
34#define smp_read_barrier_depends() do { } while (0)
Peter Zijlstra0e530742014-03-13 19:00:35 +010035
36#define smp_mb__before_atomic() smp_mb()
37#define smp_mb__after_atomic() smp_mb()
Heiko Carstensc6f48b02012-05-14 12:40:43 +020038
Peter Zijlstrab92b8b32015-05-12 10:51:55 +020039#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
David Howellsa0616cd2012-03-28 18:30:02 +010040
Peter Zijlstra47933ad2013-11-06 14:57:36 +010041#define smp_store_release(p, v) \
42do { \
43 compiletime_assert_atomic_type(*p); \
44 barrier(); \
Andrey Konovalov76695af2015-08-02 17:11:04 +020045 WRITE_ONCE(*p, v); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010046} while (0)
47
48#define smp_load_acquire(p) \
49({ \
Andrey Konovalov76695af2015-08-02 17:11:04 +020050 typeof(*p) ___p1 = READ_ONCE(*p); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +010051 compiletime_assert_atomic_type(*p); \
52 barrier(); \
53 ___p1; \
54})
55
David Howellsa0616cd2012-03-28 18:30:02 +010056#endif /* __ASM_BARRIER_H */