blob: 10a508802940201e9919da07138033f1edcb6f78 [file] [log] [blame]
David Howellsa0616cd2012-03-28 18:30:02 +01001/*
2 * Copyright IBM Corp. 1999, 2009
3 *
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 */
6
7#ifndef __ASM_BARRIER_H
8#define __ASM_BARRIER_H
9
10/*
11 * Force strict CPU ordering.
12 * And yes, this is required on UP too when we're talking
13 * to devices.
David Howellsa0616cd2012-03-28 18:30:02 +010014 */
15
Heiko Carstensc6f48b02012-05-14 12:40:43 +020016static inline void mb(void)
17{
Heiko Carstense5b8d752012-05-14 12:41:54 +020018#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
19 /* Fast-BCR without checkpoint synchronization */
20 asm volatile("bcr 14,0" : : : "memory");
21#else
Heiko Carstensc6f48b02012-05-14 12:40:43 +020022 asm volatile("bcr 15,0" : : : "memory");
Heiko Carstense5b8d752012-05-14 12:41:54 +020023#endif
Heiko Carstensc6f48b02012-05-14 12:40:43 +020024}
David Howellsa0616cd2012-03-28 18:30:02 +010025
Heiko Carstensc6f48b02012-05-14 12:40:43 +020026#define rmb() mb()
27#define wmb() mb()
28#define read_barrier_depends() do { } while(0)
29#define smp_mb() mb()
30#define smp_rmb() rmb()
31#define smp_wmb() wmb()
32#define smp_read_barrier_depends() read_barrier_depends()
33#define smp_mb__before_clear_bit() smp_mb()
34#define smp_mb__after_clear_bit() smp_mb()
35
36#define set_mb(var, value) do { var = value; mb(); } while (0)
David Howellsa0616cd2012-03-28 18:30:02 +010037
38#endif /* __ASM_BARRIER_H */