blob: e0ba66ca68c6fa6d4196204ac74cc9dc8bb00250 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_PERCPU_H
2#define _ASM_X86_PERCPU_H
travis@sgi.com33340522008-01-30 13:32:53 +01003
Tejun Heo1a51e3a2009-01-13 20:41:35 +09004#ifdef CONFIG_X86_64
Tejun Heo9939dda2009-01-13 20:41:35 +09005#define __percpu_seg gs
6#define __percpu_mov_op movq
Tejun Heo1a51e3a2009-01-13 20:41:35 +09007#else
Tejun Heo9939dda2009-01-13 20:41:35 +09008#define __percpu_seg fs
9#define __percpu_mov_op movl
Tejun Heo1a51e3a2009-01-13 20:41:35 +090010#endif
travis@sgi.com33340522008-01-30 13:32:53 +010011
12#ifdef __ASSEMBLY__
13
14/*
15 * PER_CPU finds an address of a per-cpu variable.
16 *
17 * Args:
18 * var - variable name
19 * reg - 32bit register
20 *
21 * The resulting address is stored in the "reg" argument.
22 *
23 * Example:
24 * PER_CPU(cpu_gdt_descr, %ebx)
25 */
26#ifdef CONFIG_SMP
Tejun Heo9939dda2009-01-13 20:41:35 +090027#define PER_CPU(var, reg) \
Rusty Russelldd17c8f2009-10-29 22:34:15 +090028 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
29 lea var(reg), reg
30#define PER_CPU_VAR(var) %__percpu_seg:var
travis@sgi.com33340522008-01-30 13:32:53 +010031#else /* ! SMP */
Rusty Russelldd17c8f2009-10-29 22:34:15 +090032#define PER_CPU(var, reg) __percpu_mov_op $var, reg
33#define PER_CPU_VAR(var) var
travis@sgi.com33340522008-01-30 13:32:53 +010034#endif /* SMP */
35
Brian Gerst2add8e22009-02-08 09:58:39 -050036#ifdef CONFIG_X86_64_SMP
37#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
38#else
Rusty Russelldd17c8f2009-10-29 22:34:15 +090039#define INIT_PER_CPU_VAR(var) var
Brian Gerst2add8e22009-02-08 09:58:39 -050040#endif
41
travis@sgi.com33340522008-01-30 13:32:53 +010042#else /* ...!ASSEMBLY */
43
Tejun Heoe59a1bb2009-06-22 11:56:24 +090044#include <linux/kernel.h>
Tejun Heo9939dda2009-01-13 20:41:35 +090045#include <linux/stringify.h>
46
travis@sgi.com33340522008-01-30 13:32:53 +010047#ifdef CONFIG_SMP
Christoph Lameterd7c3f8c2011-03-26 20:57:18 -050048#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
Alex Shic6ae41e2012-05-11 15:35:27 +080049#define __my_cpu_offset this_cpu_read(this_cpu_off)
Brian Gerstdb7829c2010-09-09 18:17:26 +020050
51/*
52 * Compared to the generic __my_cpu_offset version, the following
53 * saves one instruction and avoids clobbering a temp register.
54 */
Tejun Heobbc344e2014-06-17 19:12:34 -040055#define arch_raw_cpu_ptr(ptr) \
Brian Gerstdb7829c2010-09-09 18:17:26 +020056({ \
57 unsigned long tcp_ptr__; \
Brian Gerstdb7829c2010-09-09 18:17:26 +020058 asm volatile("add " __percpu_arg(1) ", %0" \
59 : "=r" (tcp_ptr__) \
60 : "m" (this_cpu_off), "0" (ptr)); \
61 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
62})
Tejun Heo9939dda2009-01-13 20:41:35 +090063#else
Christoph Lameterd7c3f8c2011-03-26 20:57:18 -050064#define __percpu_prefix ""
Tejun Heo9939dda2009-01-13 20:41:35 +090065#endif
travis@sgi.com33340522008-01-30 13:32:53 +010066
Jan Beulich97b67ae2014-11-04 08:50:48 +000067#define __percpu_arg(x) __percpu_prefix "%" #x
Christoph Lameterd7c3f8c2011-03-26 20:57:18 -050068
Brian Gerst2add8e22009-02-08 09:58:39 -050069/*
70 * Initialized pointers to per-cpu variables needed for the boot
71 * processor need to use these macros to get the proper address
72 * offset from __per_cpu_load on SMP.
73 *
74 * There also must be an entry in vmlinux_64.lds.S
75 */
76#define DECLARE_INIT_PER_CPU(var) \
Rusty Russelldd17c8f2009-10-29 22:34:15 +090077 extern typeof(var) init_per_cpu_var(var)
Brian Gerst2add8e22009-02-08 09:58:39 -050078
79#ifdef CONFIG_X86_64_SMP
80#define init_per_cpu_var(var) init_per_cpu__##var
81#else
Rusty Russelldd17c8f2009-10-29 22:34:15 +090082#define init_per_cpu_var(var) var
Brian Gerst2add8e22009-02-08 09:58:39 -050083#endif
84
travis@sgi.com33340522008-01-30 13:32:53 +010085/* For arch-specific code, we can use direct single-insn ops (they
86 * don't give an lvalue though). */
87extern void __bad_percpu_size(void);
88
Joe Perchesbc9e3be2008-03-23 01:03:06 -070089#define percpu_to_op(op, var, val) \
90do { \
Tejun Heo0f5e4812009-10-29 22:34:12 +090091 typedef typeof(var) pto_T__; \
Joe Perchesbc9e3be2008-03-23 01:03:06 -070092 if (0) { \
Tejun Heo0f5e4812009-10-29 22:34:12 +090093 pto_T__ pto_tmp__; \
94 pto_tmp__ = (val); \
Andi Kleen23b764d2010-06-10 13:10:36 +020095 (void)pto_tmp__; \
Joe Perchesbc9e3be2008-03-23 01:03:06 -070096 } \
97 switch (sizeof(var)) { \
98 case 1: \
Brian Gerst87b26402009-01-19 00:38:59 +090099 asm(op "b %1,"__percpu_arg(0) \
Joe Perchesbc9e3be2008-03-23 01:03:06 -0700100 : "+m" (var) \
Tejun Heo0f5e4812009-10-29 22:34:12 +0900101 : "qi" ((pto_T__)(val))); \
Joe Perchesbc9e3be2008-03-23 01:03:06 -0700102 break; \
103 case 2: \
Brian Gerst87b26402009-01-19 00:38:59 +0900104 asm(op "w %1,"__percpu_arg(0) \
Joe Perchesbc9e3be2008-03-23 01:03:06 -0700105 : "+m" (var) \
Tejun Heo0f5e4812009-10-29 22:34:12 +0900106 : "ri" ((pto_T__)(val))); \
Joe Perchesbc9e3be2008-03-23 01:03:06 -0700107 break; \
108 case 4: \
Brian Gerst87b26402009-01-19 00:38:59 +0900109 asm(op "l %1,"__percpu_arg(0) \
Joe Perchesbc9e3be2008-03-23 01:03:06 -0700110 : "+m" (var) \
Tejun Heo0f5e4812009-10-29 22:34:12 +0900111 : "ri" ((pto_T__)(val))); \
Joe Perchesbc9e3be2008-03-23 01:03:06 -0700112 break; \
Tejun Heo9939dda2009-01-13 20:41:35 +0900113 case 8: \
Brian Gerst87b26402009-01-19 00:38:59 +0900114 asm(op "q %1,"__percpu_arg(0) \
Tejun Heo9939dda2009-01-13 20:41:35 +0900115 : "+m" (var) \
Tejun Heo0f5e4812009-10-29 22:34:12 +0900116 : "re" ((pto_T__)(val))); \
Tejun Heo9939dda2009-01-13 20:41:35 +0900117 break; \
Joe Perchesbc9e3be2008-03-23 01:03:06 -0700118 default: __bad_percpu_size(); \
119 } \
120} while (0)
travis@sgi.com33340522008-01-30 13:32:53 +0100121
Christoph Lameter5917dae2010-01-05 15:34:50 +0900122/*
123 * Generate a percpu add to memory instruction and optimize code
Justin P. Mattock40f0a5d2010-04-19 11:51:16 -0700124 * if one is added or subtracted.
Christoph Lameter5917dae2010-01-05 15:34:50 +0900125 */
126#define percpu_add_op(var, val) \
127do { \
128 typedef typeof(var) pao_T__; \
129 const int pao_ID__ = (__builtin_constant_p(val) && \
Greg Thelenbd09d9a2013-10-30 13:56:20 -0700130 ((val) == 1 || (val) == -1)) ? \
131 (int)(val) : 0; \
Christoph Lameter5917dae2010-01-05 15:34:50 +0900132 if (0) { \
133 pao_T__ pao_tmp__; \
134 pao_tmp__ = (val); \
Andi Kleen23b764d2010-06-10 13:10:36 +0200135 (void)pao_tmp__; \
Christoph Lameter5917dae2010-01-05 15:34:50 +0900136 } \
137 switch (sizeof(var)) { \
138 case 1: \
139 if (pao_ID__ == 1) \
140 asm("incb "__percpu_arg(0) : "+m" (var)); \
141 else if (pao_ID__ == -1) \
142 asm("decb "__percpu_arg(0) : "+m" (var)); \
143 else \
144 asm("addb %1, "__percpu_arg(0) \
145 : "+m" (var) \
146 : "qi" ((pao_T__)(val))); \
147 break; \
148 case 2: \
149 if (pao_ID__ == 1) \
150 asm("incw "__percpu_arg(0) : "+m" (var)); \
151 else if (pao_ID__ == -1) \
152 asm("decw "__percpu_arg(0) : "+m" (var)); \
153 else \
154 asm("addw %1, "__percpu_arg(0) \
155 : "+m" (var) \
156 : "ri" ((pao_T__)(val))); \
157 break; \
158 case 4: \
159 if (pao_ID__ == 1) \
160 asm("incl "__percpu_arg(0) : "+m" (var)); \
161 else if (pao_ID__ == -1) \
162 asm("decl "__percpu_arg(0) : "+m" (var)); \
163 else \
164 asm("addl %1, "__percpu_arg(0) \
165 : "+m" (var) \
166 : "ri" ((pao_T__)(val))); \
167 break; \
168 case 8: \
169 if (pao_ID__ == 1) \
170 asm("incq "__percpu_arg(0) : "+m" (var)); \
171 else if (pao_ID__ == -1) \
172 asm("decq "__percpu_arg(0) : "+m" (var)); \
173 else \
174 asm("addq %1, "__percpu_arg(0) \
175 : "+m" (var) \
176 : "re" ((pao_T__)(val))); \
177 break; \
178 default: __bad_percpu_size(); \
179 } \
180} while (0)
181
Jan Beulich97b67ae2014-11-04 08:50:48 +0000182#define percpu_from_op(op, var) \
Joe Perchesbc9e3be2008-03-23 01:03:06 -0700183({ \
Tejun Heo0f5e4812009-10-29 22:34:12 +0900184 typeof(var) pfo_ret__; \
Joe Perchesbc9e3be2008-03-23 01:03:06 -0700185 switch (sizeof(var)) { \
186 case 1: \
Brian Gerst87b26402009-01-19 00:38:59 +0900187 asm(op "b "__percpu_arg(1)",%0" \
Tejun Heo0f5e4812009-10-29 22:34:12 +0900188 : "=q" (pfo_ret__) \
Jan Beulich97b67ae2014-11-04 08:50:48 +0000189 : "m" (var)); \
Joe Perchesbc9e3be2008-03-23 01:03:06 -0700190 break; \
191 case 2: \
Brian Gerst87b26402009-01-19 00:38:59 +0900192 asm(op "w "__percpu_arg(1)",%0" \
Tejun Heo0f5e4812009-10-29 22:34:12 +0900193 : "=r" (pfo_ret__) \
Jan Beulich97b67ae2014-11-04 08:50:48 +0000194 : "m" (var)); \
Joe Perchesbc9e3be2008-03-23 01:03:06 -0700195 break; \
196 case 4: \
Brian Gerst87b26402009-01-19 00:38:59 +0900197 asm(op "l "__percpu_arg(1)",%0" \
Tejun Heo0f5e4812009-10-29 22:34:12 +0900198 : "=r" (pfo_ret__) \
Jan Beulich97b67ae2014-11-04 08:50:48 +0000199 : "m" (var)); \
Tejun Heo9939dda2009-01-13 20:41:35 +0900200 break; \
201 case 8: \
Brian Gerst87b26402009-01-19 00:38:59 +0900202 asm(op "q "__percpu_arg(1)",%0" \
Tejun Heo0f5e4812009-10-29 22:34:12 +0900203 : "=r" (pfo_ret__) \
Jan Beulich97b67ae2014-11-04 08:50:48 +0000204 : "m" (var)); \
205 break; \
206 default: __bad_percpu_size(); \
207 } \
208 pfo_ret__; \
209})
210
211#define percpu_stable_op(op, var) \
212({ \
213 typeof(var) pfo_ret__; \
214 switch (sizeof(var)) { \
215 case 1: \
216 asm(op "b "__percpu_arg(P1)",%0" \
217 : "=q" (pfo_ret__) \
218 : "p" (&(var))); \
219 break; \
220 case 2: \
221 asm(op "w "__percpu_arg(P1)",%0" \
222 : "=r" (pfo_ret__) \
223 : "p" (&(var))); \
224 break; \
225 case 4: \
226 asm(op "l "__percpu_arg(P1)",%0" \
227 : "=r" (pfo_ret__) \
228 : "p" (&(var))); \
229 break; \
230 case 8: \
231 asm(op "q "__percpu_arg(P1)",%0" \
232 : "=r" (pfo_ret__) \
233 : "p" (&(var))); \
Joe Perchesbc9e3be2008-03-23 01:03:06 -0700234 break; \
235 default: __bad_percpu_size(); \
236 } \
Tejun Heo0f5e4812009-10-29 22:34:12 +0900237 pfo_ret__; \
Joe Perchesbc9e3be2008-03-23 01:03:06 -0700238})
travis@sgi.com33340522008-01-30 13:32:53 +0100239
Jan Beulich402af0d2010-04-21 15:21:51 +0100240#define percpu_unary_op(op, var) \
241({ \
242 switch (sizeof(var)) { \
243 case 1: \
244 asm(op "b "__percpu_arg(0) \
245 : "+m" (var)); \
246 break; \
247 case 2: \
248 asm(op "w "__percpu_arg(0) \
249 : "+m" (var)); \
250 break; \
251 case 4: \
252 asm(op "l "__percpu_arg(0) \
253 : "+m" (var)); \
254 break; \
255 case 8: \
256 asm(op "q "__percpu_arg(0) \
257 : "+m" (var)); \
258 break; \
259 default: __bad_percpu_size(); \
260 } \
261})
262
Linus Torvaldsed8d9ad2009-08-03 14:08:48 +0900263/*
Tejun Heo40304772010-12-17 15:47:04 +0100264 * Add return operation
265 */
266#define percpu_add_return_op(var, val) \
267({ \
268 typeof(var) paro_ret__ = val; \
269 switch (sizeof(var)) { \
270 case 1: \
271 asm("xaddb %0, "__percpu_arg(1) \
272 : "+q" (paro_ret__), "+m" (var) \
273 : : "memory"); \
274 break; \
275 case 2: \
276 asm("xaddw %0, "__percpu_arg(1) \
277 : "+r" (paro_ret__), "+m" (var) \
278 : : "memory"); \
279 break; \
280 case 4: \
281 asm("xaddl %0, "__percpu_arg(1) \
282 : "+r" (paro_ret__), "+m" (var) \
283 : : "memory"); \
284 break; \
285 case 8: \
286 asm("xaddq %0, "__percpu_arg(1) \
287 : "+re" (paro_ret__), "+m" (var) \
288 : : "memory"); \
289 break; \
290 default: __bad_percpu_size(); \
291 } \
292 paro_ret__ += val; \
293 paro_ret__; \
294})
295
296/*
Christoph Lameter82701372010-12-14 10:28:47 -0600297 * xchg is implemented using cmpxchg without a lock prefix. xchg is
298 * expensive due to the implied lock prefix. The processor cannot prefetch
299 * cachelines if xchg is used.
Christoph Lameter7296e082010-12-14 10:28:44 -0600300 */
301#define percpu_xchg_op(var, nval) \
302({ \
303 typeof(var) pxo_ret__; \
304 typeof(var) pxo_new__ = (nval); \
305 switch (sizeof(var)) { \
306 case 1: \
Eric Dumazet889a7a62011-01-25 17:31:54 +0100307 asm("\n\tmov "__percpu_arg(1)",%%al" \
308 "\n1:\tcmpxchgb %2, "__percpu_arg(1) \
Christoph Lameter82701372010-12-14 10:28:47 -0600309 "\n\tjnz 1b" \
Eric Dumazet889a7a62011-01-25 17:31:54 +0100310 : "=&a" (pxo_ret__), "+m" (var) \
Christoph Lameter7296e082010-12-14 10:28:44 -0600311 : "q" (pxo_new__) \
312 : "memory"); \
313 break; \
314 case 2: \
Eric Dumazet889a7a62011-01-25 17:31:54 +0100315 asm("\n\tmov "__percpu_arg(1)",%%ax" \
316 "\n1:\tcmpxchgw %2, "__percpu_arg(1) \
Christoph Lameter82701372010-12-14 10:28:47 -0600317 "\n\tjnz 1b" \
Eric Dumazet889a7a62011-01-25 17:31:54 +0100318 : "=&a" (pxo_ret__), "+m" (var) \
Christoph Lameter7296e082010-12-14 10:28:44 -0600319 : "r" (pxo_new__) \
320 : "memory"); \
321 break; \
322 case 4: \
Eric Dumazet889a7a62011-01-25 17:31:54 +0100323 asm("\n\tmov "__percpu_arg(1)",%%eax" \
324 "\n1:\tcmpxchgl %2, "__percpu_arg(1) \
Christoph Lameter82701372010-12-14 10:28:47 -0600325 "\n\tjnz 1b" \
Eric Dumazet889a7a62011-01-25 17:31:54 +0100326 : "=&a" (pxo_ret__), "+m" (var) \
Christoph Lameter7296e082010-12-14 10:28:44 -0600327 : "r" (pxo_new__) \
328 : "memory"); \
329 break; \
330 case 8: \
Eric Dumazet889a7a62011-01-25 17:31:54 +0100331 asm("\n\tmov "__percpu_arg(1)",%%rax" \
332 "\n1:\tcmpxchgq %2, "__percpu_arg(1) \
Christoph Lameter82701372010-12-14 10:28:47 -0600333 "\n\tjnz 1b" \
Eric Dumazet889a7a62011-01-25 17:31:54 +0100334 : "=&a" (pxo_ret__), "+m" (var) \
Christoph Lameter7296e082010-12-14 10:28:44 -0600335 : "r" (pxo_new__) \
336 : "memory"); \
337 break; \
338 default: __bad_percpu_size(); \
339 } \
340 pxo_ret__; \
341})
342
343/*
344 * cmpxchg has no such implied lock semantics as a result it is much
345 * more efficient for cpu local operations.
346 */
347#define percpu_cmpxchg_op(var, oval, nval) \
348({ \
349 typeof(var) pco_ret__; \
350 typeof(var) pco_old__ = (oval); \
351 typeof(var) pco_new__ = (nval); \
352 switch (sizeof(var)) { \
353 case 1: \
354 asm("cmpxchgb %2, "__percpu_arg(1) \
355 : "=a" (pco_ret__), "+m" (var) \
356 : "q" (pco_new__), "0" (pco_old__) \
357 : "memory"); \
358 break; \
359 case 2: \
360 asm("cmpxchgw %2, "__percpu_arg(1) \
361 : "=a" (pco_ret__), "+m" (var) \
362 : "r" (pco_new__), "0" (pco_old__) \
363 : "memory"); \
364 break; \
365 case 4: \
366 asm("cmpxchgl %2, "__percpu_arg(1) \
367 : "=a" (pco_ret__), "+m" (var) \
368 : "r" (pco_new__), "0" (pco_old__) \
369 : "memory"); \
370 break; \
371 case 8: \
372 asm("cmpxchgq %2, "__percpu_arg(1) \
373 : "=a" (pco_ret__), "+m" (var) \
374 : "r" (pco_new__), "0" (pco_old__) \
375 : "memory"); \
376 break; \
377 default: __bad_percpu_size(); \
378 } \
379 pco_ret__; \
380})
381
382/*
Alex Shi641b6952012-05-14 14:15:32 -0700383 * this_cpu_read() makes gcc load the percpu variable every time it is
Alex Shic6ae41e2012-05-11 15:35:27 +0800384 * accessed while this_cpu_read_stable() allows the value to be cached.
385 * this_cpu_read_stable() is more efficient and can be used if its value
Linus Torvaldsed8d9ad2009-08-03 14:08:48 +0900386 * is guaranteed to be valid across cpus. The current users include
387 * get_current() and get_thread_info() both of which are actually
388 * per-thread variables implemented as per-cpu variables and thus
389 * stable for the duration of the respective task.
390 */
Jan Beulich97b67ae2014-11-04 08:50:48 +0000391#define this_cpu_read_stable(var) percpu_stable_op("mov", var)
Tejun Heo9939dda2009-01-13 20:41:35 +0900392
Jan Beulich97b67ae2014-11-04 08:50:48 +0000393#define raw_cpu_read_1(pcp) percpu_from_op("mov", pcp)
394#define raw_cpu_read_2(pcp) percpu_from_op("mov", pcp)
395#define raw_cpu_read_4(pcp) percpu_from_op("mov", pcp)
Christoph Lameter30ed1a72009-10-03 19:48:22 +0900396
Christoph Lameterb3ca1c12014-04-07 15:39:34 -0700397#define raw_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
398#define raw_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
399#define raw_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
400#define raw_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
401#define raw_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
402#define raw_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
403#define raw_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
404#define raw_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
405#define raw_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
406#define raw_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
407#define raw_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
408#define raw_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
409#define raw_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
410#define raw_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
411#define raw_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
Christoph Lameter30ed1a72009-10-03 19:48:22 +0900412
Jan Beulich97b67ae2014-11-04 08:50:48 +0000413#define this_cpu_read_1(pcp) percpu_from_op("mov", pcp)
414#define this_cpu_read_2(pcp) percpu_from_op("mov", pcp)
415#define this_cpu_read_4(pcp) percpu_from_op("mov", pcp)
Christoph Lameter30ed1a72009-10-03 19:48:22 +0900416#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
417#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
418#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
Christoph Lameter5917dae2010-01-05 15:34:50 +0900419#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
420#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
421#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
Christoph Lameter30ed1a72009-10-03 19:48:22 +0900422#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
423#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
424#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
425#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
426#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
427#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
Christoph Lameter7296e082010-12-14 10:28:44 -0600428#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
429#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
430#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
Christoph Lameter30ed1a72009-10-03 19:48:22 +0900431
Christoph Lameterb3ca1c12014-04-07 15:39:34 -0700432#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
433#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
434#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
435#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
436#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
437#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
Christoph Lameter7296e082010-12-14 10:28:44 -0600438
Christoph Lameterb3ca1c12014-04-07 15:39:34 -0700439#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
440#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
441#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
Christoph Lameter7296e082010-12-14 10:28:44 -0600442#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
443#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
444#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
445
Christoph Lameterb9ec40a2011-02-28 11:02:24 +0100446#ifdef CONFIG_X86_CMPXCHG64
Jan Beulichcebef5b2011-12-14 08:33:25 +0000447#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \
Christoph Lameterb9ec40a2011-02-28 11:02:24 +0100448({ \
Jan Beulichcebef5b2011-12-14 08:33:25 +0000449 bool __ret; \
450 typeof(pcp1) __o1 = (o1), __n1 = (n1); \
451 typeof(pcp2) __o2 = (o2), __n2 = (n2); \
Christoph Lameterb9ec40a2011-02-28 11:02:24 +0100452 asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
Jan Beulichcebef5b2011-12-14 08:33:25 +0000453 : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \
454 : "b" (__n1), "c" (__n2), "a" (__o1)); \
Christoph Lameterb9ec40a2011-02-28 11:02:24 +0100455 __ret; \
456})
457
Christoph Lameterb3ca1c12014-04-07 15:39:34 -0700458#define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
Jan Beulichcebef5b2011-12-14 08:33:25 +0000459#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
Christoph Lameterb9ec40a2011-02-28 11:02:24 +0100460#endif /* CONFIG_X86_CMPXCHG64 */
461
Christoph Lameter30ed1a72009-10-03 19:48:22 +0900462/*
463 * Per cpu atomic 64 bit operations are only available under 64 bit.
464 * 32 bit must fall back to generic operations.
465 */
466#ifdef CONFIG_X86_64
Jan Beulich97b67ae2014-11-04 08:50:48 +0000467#define raw_cpu_read_8(pcp) percpu_from_op("mov", pcp)
Christoph Lameterb3ca1c12014-04-07 15:39:34 -0700468#define raw_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
469#define raw_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
470#define raw_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
471#define raw_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
472#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
473#define raw_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
474#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
Christoph Lameter30ed1a72009-10-03 19:48:22 +0900475
Jan Beulich97b67ae2014-11-04 08:50:48 +0000476#define this_cpu_read_8(pcp) percpu_from_op("mov", pcp)
Christoph Lameterb3ca1c12014-04-07 15:39:34 -0700477#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
478#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
479#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
480#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
481#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
482#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
Christoph Lameter2485b642011-01-11 18:54:53 +0100483#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
Christoph Lameter30ed1a72009-10-03 19:48:22 +0900484
Christoph Lameterb9ec40a2011-02-28 11:02:24 +0100485/*
486 * Pretty complex macro to generate cmpxchg16 instruction. The instruction
487 * is not supported on early AMD64 processors so we must be able to emulate
488 * it in software. The address used in the cmpxchg16 instruction must be
489 * aligned to a 16 byte boundary.
490 */
Jan Beulichcebef5b2011-12-14 08:33:25 +0000491#define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \
Christoph Lameterb9ec40a2011-02-28 11:02:24 +0100492({ \
Jan Beulichcebef5b2011-12-14 08:33:25 +0000493 bool __ret; \
494 typeof(pcp1) __o1 = (o1), __n1 = (n1); \
495 typeof(pcp2) __o2 = (o2), __n2 = (n2); \
496 alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
497 "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \
Christoph Lameterb9ec40a2011-02-28 11:02:24 +0100498 X86_FEATURE_CX16, \
Jan Beulichcebef5b2011-12-14 08:33:25 +0000499 ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \
500 "+m" (pcp2), "+d" (__o2)), \
501 "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \
Christoph Lameterb9ec40a2011-02-28 11:02:24 +0100502 __ret; \
503})
504
Christoph Lameterb3ca1c12014-04-07 15:39:34 -0700505#define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
Jan Beulichcebef5b2011-12-14 08:33:25 +0000506#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
Christoph Lameterb9ec40a2011-02-28 11:02:24 +0100507
Christoph Lameter30ed1a72009-10-03 19:48:22 +0900508#endif
509
Tejun Heo49357d12009-01-13 20:41:35 +0900510/* This is not atomic against other CPUs -- CPU preemption needs to be off */
511#define x86_test_and_clear_bit_percpu(bit, var) \
512({ \
513 int old__; \
Brian Gerst87b26402009-01-19 00:38:59 +0900514 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
Rusty Russelldd17c8f2009-10-29 22:34:15 +0900515 : "=r" (old__), "+m" (var) \
Brian Gerst87b26402009-01-19 00:38:59 +0900516 : "dIr" (bit)); \
Tejun Heo49357d12009-01-13 20:41:35 +0900517 old__; \
518})
519
Christoph Lameter349c0042011-03-12 12:50:10 +0100520static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
521 const unsigned long __percpu *addr)
522{
523 unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
524
Alex Shi641b6952012-05-14 14:15:32 -0700525#ifdef CONFIG_X86_64
Christoph Lameterb3ca1c12014-04-07 15:39:34 -0700526 return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
Alex Shi641b6952012-05-14 14:15:32 -0700527#else
Christoph Lameterb3ca1c12014-04-07 15:39:34 -0700528 return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
Alex Shi641b6952012-05-14 14:15:32 -0700529#endif
Christoph Lameter349c0042011-03-12 12:50:10 +0100530}
531
532static inline int x86_this_cpu_variable_test_bit(int nr,
533 const unsigned long __percpu *addr)
534{
535 int oldbit;
536
537 asm volatile("bt "__percpu_arg(2)",%1\n\t"
538 "sbb %0,%0"
539 : "=r" (oldbit)
540 : "m" (*(unsigned long *)addr), "Ir" (nr));
541
542 return oldbit;
543}
544
545#define x86_this_cpu_test_bit(nr, addr) \
546 (__builtin_constant_p((nr)) \
547 ? x86_this_cpu_constant_test_bit((nr), (addr)) \
548 : x86_this_cpu_variable_test_bit((nr), (addr)))
549
550
Ingo Molnar6dbde352009-01-15 22:15:53 +0900551#include <asm-generic/percpu.h>
552
553/* We can use this directly for local CPU (faster). */
Jan Beulich2c773dd2014-11-04 08:26:42 +0000554DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
Ingo Molnar6dbde352009-01-15 22:15:53 +0900555
travis@sgi.com33340522008-01-30 13:32:53 +0100556#endif /* !__ASSEMBLY__ */
Mike Travis23ca4bb2008-05-12 21:21:12 +0200557
558#ifdef CONFIG_SMP
559
560/*
561 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
562 * variables that are initialized and accessed before there are per_cpu
563 * areas allocated.
564 */
565
566#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
567 DEFINE_PER_CPU(_type, _name) = _initvalue; \
568 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
569 { [0 ... NR_CPUS-1] = _initvalue }; \
Marcin Slusarzc6a92a22008-08-17 17:50:50 +0200570 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
Mike Travis23ca4bb2008-05-12 21:21:12 +0200571
Ido Yarivc35f7742012-06-11 12:56:45 +0300572#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
573 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
574 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
575 { [0 ... NR_CPUS-1] = _initvalue }; \
576 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
577
Mike Travis23ca4bb2008-05-12 21:21:12 +0200578#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
579 EXPORT_PER_CPU_SYMBOL(_name)
580
581#define DECLARE_EARLY_PER_CPU(_type, _name) \
582 DECLARE_PER_CPU(_type, _name); \
583 extern __typeof__(_type) *_name##_early_ptr; \
584 extern __typeof__(_type) _name##_early_map[]
585
Ido Yarivc35f7742012-06-11 12:56:45 +0300586#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
587 DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
588 extern __typeof__(_type) *_name##_early_ptr; \
589 extern __typeof__(_type) _name##_early_map[]
590
Mike Travis23ca4bb2008-05-12 21:21:12 +0200591#define early_per_cpu_ptr(_name) (_name##_early_ptr)
592#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
593#define early_per_cpu(_name, _cpu) \
Tejun Heof10fcd42009-01-13 20:41:34 +0900594 *(early_per_cpu_ptr(_name) ? \
595 &early_per_cpu_ptr(_name)[_cpu] : \
596 &per_cpu(_name, _cpu))
Mike Travis23ca4bb2008-05-12 21:21:12 +0200597
598#else /* !CONFIG_SMP */
599#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
600 DEFINE_PER_CPU(_type, _name) = _initvalue
601
Ido Yarivc35f7742012-06-11 12:56:45 +0300602#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
603 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
604
Mike Travis23ca4bb2008-05-12 21:21:12 +0200605#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
606 EXPORT_PER_CPU_SYMBOL(_name)
607
608#define DECLARE_EARLY_PER_CPU(_type, _name) \
609 DECLARE_PER_CPU(_type, _name)
610
Ido Yarivc35f7742012-06-11 12:56:45 +0300611#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
612 DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
613
Mike Travis23ca4bb2008-05-12 21:21:12 +0200614#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
615#define early_per_cpu_ptr(_name) NULL
616/* no early_per_cpu_map() */
617
618#endif /* !CONFIG_SMP */
619
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700620#endif /* _ASM_X86_PERCPU_H */