blob: e769e620022530c751a244f1b83e301d683d1d2a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ARCH_X8664_LOCAL_H
2#define _ARCH_X8664_LOCAL_H
3
4#include <linux/percpu.h>
5
6typedef struct
7{
Andrew Morton2cf8d822006-03-31 02:30:49 -08008 volatile long counter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009} local_t;
10
11#define LOCAL_INIT(i) { (i) }
12
13#define local_read(v) ((v)->counter)
14#define local_set(v,i) (((v)->counter) = (i))
15
Andrew Morton2cf8d822006-03-31 02:30:49 -080016static inline void local_inc(local_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070017{
18 __asm__ __volatile__(
Andi Kleen94949432006-03-25 16:31:37 +010019 "incq %0"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 :"=m" (v->counter)
21 :"m" (v->counter));
22}
23
Andrew Morton2cf8d822006-03-31 02:30:49 -080024static inline void local_dec(local_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070025{
26 __asm__ __volatile__(
Andi Kleen94949432006-03-25 16:31:37 +010027 "decq %0"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 :"=m" (v->counter)
29 :"m" (v->counter));
30}
31
Andrew Morton2cf8d822006-03-31 02:30:49 -080032static inline void local_add(long i, local_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033{
34 __asm__ __volatile__(
Andi Kleen94949432006-03-25 16:31:37 +010035 "addq %1,%0"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 :"=m" (v->counter)
37 :"ir" (i), "m" (v->counter));
38}
39
Andrew Morton2cf8d822006-03-31 02:30:49 -080040static inline void local_sub(long i, local_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041{
42 __asm__ __volatile__(
Andi Kleen94949432006-03-25 16:31:37 +010043 "subq %1,%0"
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 :"=m" (v->counter)
45 :"ir" (i), "m" (v->counter));
46}
47
Andi Kleen1c173402005-04-16 15:25:08 -070048/* On x86-64 these are better than the atomic variants on SMP kernels
49 because they dont use a lock prefix. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#define __local_inc(l) local_inc(l)
51#define __local_dec(l) local_dec(l)
52#define __local_add(i,l) local_add((i),(l))
53#define __local_sub(i,l) local_sub((i),(l))
54
55/* Use these for per-cpu local_t variables: on some archs they are
56 * much more efficient than these naive implementations. Note they take
57 * a variable, not an address.
58 *
59 * This could be done better if we moved the per cpu data directly
60 * after GS.
61 */
Andi Kleenda531122006-06-26 13:59:20 +020062
63/* Need to disable preemption for the cpu local counters otherwise we could
64 still access a variable of a previous CPU in a non atomic way. */
65#define cpu_local_wrap_v(v) \
66 ({ local_t res__; \
67 preempt_disable(); \
68 res__ = (v); \
69 preempt_enable(); \
70 res__; })
71#define cpu_local_wrap(v) \
72 ({ preempt_disable(); \
73 v; \
74 preempt_enable(); }) \
75
76#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
77#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
78#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
79#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
80#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
81#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
83#define __cpu_local_inc(v) cpu_local_inc(v)
84#define __cpu_local_dec(v) cpu_local_dec(v)
85#define __cpu_local_add(i, v) cpu_local_add((i), (v))
86#define __cpu_local_sub(i, v) cpu_local_sub((i), (v))
87
88#endif /* _ARCH_I386_LOCAL_H */