blob: 1ac969724bb2fff929c397be1cbdcdf007a61953 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PERCPU_H
2#define __LINUX_PERCPU_H
Martin Peschke7ff6f082006-09-25 23:31:21 -07003
Robert P. J. Day0a3021f2007-07-15 23:39:57 -07004#include <linux/preempt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/slab.h> /* For kmalloc() */
6#include <linux/smp.h>
7#include <linux/string.h> /* For memset() */
Martin Peschke7ff6f082006-09-25 23:31:21 -07008#include <linux/cpumask.h>
9
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <asm/percpu.h>
11
travis@sgi.com5280e002008-01-30 13:32:52 +010012#ifdef CONFIG_SMP
13#define DEFINE_PER_CPU(type, name) \
14 __attribute__((__section__(".data.percpu"))) \
15 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
16
17#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
18 __attribute__((__section__(".data.percpu.shared_aligned"))) \
19 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \
20 ____cacheline_aligned_in_smp
21#else
22#define DEFINE_PER_CPU(type, name) \
23 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
24
25#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
26 DEFINE_PER_CPU(type, name)
27#endif
28
29#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
30#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
33#ifndef PERCPU_ENOUGH_ROOM
Jeremy Fitzhardingeb00742d32007-05-02 19:27:11 +020034#ifdef CONFIG_MODULES
35#define PERCPU_MODULE_RESERVE 8192
36#else
37#define PERCPU_MODULE_RESERVE 0
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#endif
39
Jeremy Fitzhardingeb00742d32007-05-02 19:27:11 +020040#define PERCPU_ENOUGH_ROOM \
41 (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
42#endif /* PERCPU_ENOUGH_ROOM */
43
Jan Blunck632bbfe2006-09-25 23:30:53 -070044/*
45 * Must be an lvalue. Since @var must be a simple identifier,
46 * we force a syntax error here if it isn't.
47 */
48#define get_cpu_var(var) (*({ \
Jan Bluncka666ecf2006-10-06 00:43:58 -070049 extern int simple_identifier_##var(void); \
Jan Blunck632bbfe2006-09-25 23:30:53 -070050 preempt_disable(); \
51 &__get_cpu_var(var); }))
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#define put_cpu_var(var) preempt_enable()
53
54#ifdef CONFIG_SMP
55
56struct percpu_data {
Eric Dumazetb3242152008-02-06 01:37:01 -080057 void *ptrs[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -070058};
59
Martin Peschke7ff6f082006-09-25 23:31:21 -070060#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
Martin Peschke7ff6f082006-09-25 23:31:21 -070062 * Use this to get to a cpu's version of the per-cpu object dynamically
63 * allocated. Non-atomic access to the current CPU's version should
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 * probably be combined with get_cpu()/put_cpu().
65 */
Martin Peschke7ff6f082006-09-25 23:31:21 -070066#define percpu_ptr(ptr, cpu) \
67({ \
68 struct percpu_data *__p = __percpu_disguise(ptr); \
69 (__typeof__(ptr))__p->ptrs[(cpu)]; \
Linus Torvalds1da177e2005-04-16 15:20:36 -070070})
71
Martin Peschke7ff6f082006-09-25 23:31:21 -070072extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu);
73extern void percpu_depopulate(void *__pdata, int cpu);
74extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
75 cpumask_t *mask);
76extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask);
77extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
78extern void percpu_free(void *__pdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80#else /* CONFIG_SMP */
81
Martin Peschke7ff6f082006-09-25 23:31:21 -070082#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Martin Peschke7ff6f082006-09-25 23:31:21 -070084static inline void percpu_depopulate(void *__pdata, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -070085{
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
Martin Peschke7ff6f082006-09-25 23:31:21 -070087
88static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
89{
90}
91
92static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp,
93 int cpu)
94{
95 return percpu_ptr(__pdata, cpu);
96}
97
98static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
99 cpumask_t *mask)
100{
101 return 0;
102}
103
Al Viro0891a8d2006-09-29 01:58:34 -0700104static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
Martin Peschke7ff6f082006-09-25 23:31:21 -0700105{
106 return kzalloc(size, gfp);
107}
108
109static inline void percpu_free(void *__pdata)
110{
111 kfree(__pdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112}
113
114#endif /* CONFIG_SMP */
115
Martin Peschke7ff6f082006-09-25 23:31:21 -0700116#define percpu_populate_mask(__pdata, size, gfp, mask) \
117 __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
118#define percpu_depopulate_mask(__pdata, mask) \
119 __percpu_depopulate_mask((__pdata), &(mask))
120#define percpu_alloc_mask(size, gfp, mask) \
121 __percpu_alloc_mask((size), (gfp), &(mask))
122
123#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
124
125/* (legacy) interface for use without CPU hotplug handling */
126
127#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
128 cpu_possible_map)
129#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
130#define free_percpu(ptr) percpu_free((ptr))
131#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
133#endif /* __LINUX_PERCPU_H */