blob: cd17945bf2181d1bd574b44818601e018ee806aa [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ARCH_X8664_LOCAL_H
2#define _ARCH_X8664_LOCAL_H
3
4#include <linux/percpu.h>
5
6typedef struct
7{
Andrew Morton2cf8d822006-03-31 02:30:49 -08008 volatile long counter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009} local_t;
10
11#define LOCAL_INIT(i) { (i) }
12
13#define local_read(v) ((v)->counter)
14#define local_set(v,i) (((v)->counter) = (i))
15
Andrew Morton2cf8d822006-03-31 02:30:49 -080016static inline void local_inc(local_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070017{
18 __asm__ __volatile__(
Andi Kleen94949432006-03-25 16:31:37 +010019 "incq %0"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 :"=m" (v->counter)
21 :"m" (v->counter));
22}
23
Andrew Morton2cf8d822006-03-31 02:30:49 -080024static inline void local_dec(local_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070025{
26 __asm__ __volatile__(
Andi Kleen94949432006-03-25 16:31:37 +010027 "decq %0"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 :"=m" (v->counter)
29 :"m" (v->counter));
30}
31
Andrew Morton2cf8d822006-03-31 02:30:49 -080032static inline void local_add(long i, local_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033{
34 __asm__ __volatile__(
Andi Kleen94949432006-03-25 16:31:37 +010035 "addq %1,%0"
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 :"=m" (v->counter)
37 :"ir" (i), "m" (v->counter));
38}
39
Andrew Morton2cf8d822006-03-31 02:30:49 -080040static inline void local_sub(long i, local_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041{
42 __asm__ __volatile__(
Andi Kleen94949432006-03-25 16:31:37 +010043 "subq %1,%0"
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 :"=m" (v->counter)
45 :"ir" (i), "m" (v->counter));
46}
47
Andi Kleen1c173402005-04-16 15:25:08 -070048/* On x86-64 these are better than the atomic variants on SMP kernels
49 because they dont use a lock prefix. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#define __local_inc(l) local_inc(l)
51#define __local_dec(l) local_dec(l)
52#define __local_add(i,l) local_add((i),(l))
53#define __local_sub(i,l) local_sub((i),(l))
54
55/* Use these for per-cpu local_t variables: on some archs they are
56 * much more efficient than these naive implementations. Note they take
57 * a variable, not an address.
58 *
59 * This could be done better if we moved the per cpu data directly
60 * after GS.
61 */
62#define cpu_local_read(v) local_read(&__get_cpu_var(v))
63#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
64#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
65#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
66#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
67#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
68
69#define __cpu_local_inc(v) cpu_local_inc(v)
70#define __cpu_local_dec(v) cpu_local_dec(v)
71#define __cpu_local_add(i, v) cpu_local_add((i), (v))
72#define __cpu_local_sub(i, v) cpu_local_sub((i), (v))
73
74#endif /* _ARCH_I386_LOCAL_H */