blob: 86fe0ee2cee5945beacca37f563d6d118fae0a12 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ARCH_S390_PERCPU__
2#define __ARCH_S390_PERCPU__
3
Heiko Carstens4c2241f2011-05-23 10:24:32 +02004#include <linux/preempt.h>
5#include <asm/cmpxchg.h>
6
Linus Torvalds1da177e2005-04-16 15:20:36 -07007/*
8 * s390 uses its own implementation for per cpu data, the offset of
9 * the cpu local data area is cached in the cpu's lowcore memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
travis@sgi.comf0343472008-01-30 23:27:58 +010011#define __my_cpu_offset S390_lowcore.percpu_offset
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Tejun Heo9a0ef292009-06-24 15:13:53 +090013/*
14 * For 64 bit module code, the module may be more than 4G above the
15 * per cpu area, use weak definitions to force the compiler to
16 * generate external references.
17 */
Heiko Carstensf4815ac2012-05-23 16:24:51 +020018#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
Tejun Heo9a0ef292009-06-24 15:13:53 +090019#define ARCH_NEEDS_WEAK_PER_CPU
20#endif
21
Christoph Lameter933393f2011-12-22 11:58:51 -060022#define arch_this_cpu_to_op(pcp, val, op) \
Heiko Carstensba6f5c22012-09-17 06:46:55 +020023({ \
Heiko Carstens4c2241f2011-05-23 10:24:32 +020024 typedef typeof(pcp) pcp_op_T__; \
25 pcp_op_T__ old__, new__, prev__; \
26 pcp_op_T__ *ptr__; \
27 preempt_disable(); \
28 ptr__ = __this_cpu_ptr(&(pcp)); \
29 prev__ = *ptr__; \
30 do { \
31 old__ = prev__; \
32 new__ = old__ op (val); \
33 switch (sizeof(*ptr__)) { \
34 case 8: \
35 prev__ = cmpxchg64(ptr__, old__, new__); \
36 break; \
37 default: \
38 prev__ = cmpxchg(ptr__, old__, new__); \
39 } \
40 } while (prev__ != old__); \
41 preempt_enable(); \
Heiko Carstensba6f5c22012-09-17 06:46:55 +020042 new__; \
43})
Heiko Carstens4c2241f2011-05-23 10:24:32 +020044
Christoph Lameter933393f2011-12-22 11:58:51 -060045#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
46#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
47#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
48#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
Heiko Carstens4c2241f2011-05-23 10:24:32 +020049
Heiko Carstensba6f5c22012-09-17 06:46:55 +020050#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
51#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
52#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
53#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
54
Christoph Lameter933393f2011-12-22 11:58:51 -060055#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &)
56#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &)
57#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &)
58#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &)
Heiko Carstens4c2241f2011-05-23 10:24:32 +020059
Christoph Lameter933393f2011-12-22 11:58:51 -060060#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |)
61#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |)
62#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |)
63#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |)
Heiko Carstens4c2241f2011-05-23 10:24:32 +020064
Christoph Lameter933393f2011-12-22 11:58:51 -060065#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
66#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
67#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
68#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
Heiko Carstens4c2241f2011-05-23 10:24:32 +020069
Heiko Carstensb1d6b402012-09-17 07:37:13 +020070#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
Heiko Carstens4c2241f2011-05-23 10:24:32 +020071({ \
72 typedef typeof(pcp) pcp_op_T__; \
73 pcp_op_T__ ret__; \
74 pcp_op_T__ *ptr__; \
75 preempt_disable(); \
76 ptr__ = __this_cpu_ptr(&(pcp)); \
77 switch (sizeof(*ptr__)) { \
78 case 8: \
79 ret__ = cmpxchg64(ptr__, oval, nval); \
80 break; \
81 default: \
82 ret__ = cmpxchg(ptr__, oval, nval); \
83 } \
84 preempt_enable(); \
85 ret__; \
86})
87
Christoph Lameter933393f2011-12-22 11:58:51 -060088#define this_cpu_cmpxchg_1(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
89#define this_cpu_cmpxchg_2(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
90#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
91#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
Heiko Carstens4c2241f2011-05-23 10:24:32 +020092
Heiko Carstens28634a02012-09-17 06:38:22 +020093#define arch_this_cpu_xchg(pcp, nval) \
94({ \
95 typeof(pcp) *ptr__; \
96 typeof(pcp) ret__; \
97 preempt_disable(); \
98 ptr__ = __this_cpu_ptr(&(pcp)); \
99 ret__ = xchg(ptr__, nval); \
100 preempt_enable(); \
101 ret__; \
102})
103
104#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
105#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
106#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
107#ifdef CONFIG_64BIT
108#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
109#endif
110
Heiko Carstensb1d6b402012-09-17 07:37:13 +0200111#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
112({ \
113 typeof(pcp1) o1__ = (o1), n1__ = (n1); \
114 typeof(pcp2) o2__ = (o2), n2__ = (n2); \
115 typeof(pcp1) *p1__; \
116 typeof(pcp2) *p2__; \
117 int ret__; \
118 preempt_disable(); \
119 p1__ = __this_cpu_ptr(&(pcp1)); \
120 p2__ = __this_cpu_ptr(&(pcp2)); \
121 ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
122 preempt_enable(); \
123 ret__; \
124})
125
126#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
127#ifdef CONFIG_64BIT
128#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
129#endif
130
travis@sgi.comf0343472008-01-30 23:27:58 +0100131#include <asm-generic/percpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133#endif /* __ARCH_S390_PERCPU__ */