blob: cfda2d5ad319e75c8bf88b262d1b95bae8f3a2a6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PERCPU_H
2#define __LINUX_PERCPU_H
Martin Peschke7ff6f082006-09-25 23:31:21 -07003
Robert P. J. Day0a3021f2007-07-15 23:39:57 -07004#include <linux/preempt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/slab.h> /* For kmalloc() */
6#include <linux/smp.h>
Martin Peschke7ff6f082006-09-25 23:31:21 -07007#include <linux/cpumask.h>
Tejun Heo6a242902009-03-06 14:33:58 +09008#include <linux/pfn.h>
Martin Peschke7ff6f082006-09-25 23:31:21 -07009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <asm/percpu.h>
11
Brian Gerstd3770442009-02-08 09:58:38 -050012#ifndef PER_CPU_BASE_SECTION
travis@sgi.com5280e002008-01-30 13:32:52 +010013#ifdef CONFIG_SMP
Brian Gerst0bd74fa2009-01-19 12:21:27 +090014#define PER_CPU_BASE_SECTION ".data.percpu"
Brian Gerstd3770442009-02-08 09:58:38 -050015#else
16#define PER_CPU_BASE_SECTION ".data"
17#endif
18#endif
19
20#ifdef CONFIG_SMP
travis@sgi.com5280e002008-01-30 13:32:52 +010021
Eric Dumazet44c81432008-05-14 16:05:51 -070022#ifdef MODULE
Brian Gerst0bd74fa2009-01-19 12:21:27 +090023#define PER_CPU_SHARED_ALIGNED_SECTION ""
Eric Dumazet44c81432008-05-14 16:05:51 -070024#else
Brian Gerst0bd74fa2009-01-19 12:21:27 +090025#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
Eric Dumazet44c81432008-05-14 16:05:51 -070026#endif
Brian Gerst0bd74fa2009-01-19 12:21:27 +090027#define PER_CPU_FIRST_SECTION ".first"
28
29#else
30
Brian Gerst0bd74fa2009-01-19 12:21:27 +090031#define PER_CPU_SHARED_ALIGNED_SECTION ""
32#define PER_CPU_FIRST_SECTION ""
33
34#endif
35
36#define DEFINE_PER_CPU_SECTION(type, name, section) \
37 __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
38 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
39
40#define DEFINE_PER_CPU(type, name) \
41 DEFINE_PER_CPU_SECTION(type, name, "")
Eric Dumazet44c81432008-05-14 16:05:51 -070042
travis@sgi.com5280e002008-01-30 13:32:52 +010043#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
Brian Gerst0bd74fa2009-01-19 12:21:27 +090044 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
travis@sgi.com5280e002008-01-30 13:32:52 +010045 ____cacheline_aligned_in_smp
Eric Dumazet63cc8c72008-05-12 15:44:40 +020046
Brian Gerst0bd74fa2009-01-19 12:21:27 +090047#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
48 DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
travis@sgi.com5280e002008-01-30 13:32:52 +010049
Brian Gerst0bd74fa2009-01-19 12:21:27 +090050#define DEFINE_PER_CPU_FIRST(type, name) \
51 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
travis@sgi.com5280e002008-01-30 13:32:52 +010052
53#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
54#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
55
Tejun Heo6a242902009-03-06 14:33:58 +090056/* enough to cover all DEFINE_PER_CPUs in modules */
Jeremy Fitzhardingeb00742d32007-05-02 19:27:11 +020057#ifdef CONFIG_MODULES
Tejun Heo6a242902009-03-06 14:33:58 +090058#define PERCPU_MODULE_RESERVE (8 << 10)
Jeremy Fitzhardingeb00742d32007-05-02 19:27:11 +020059#else
Tejun Heo6a242902009-03-06 14:33:58 +090060#define PERCPU_MODULE_RESERVE 0
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#endif
62
Tejun Heo6a242902009-03-06 14:33:58 +090063#ifndef PERCPU_ENOUGH_ROOM
Jeremy Fitzhardingeb00742d32007-05-02 19:27:11 +020064#define PERCPU_ENOUGH_ROOM \
Tejun Heo6a242902009-03-06 14:33:58 +090065 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
66 PERCPU_MODULE_RESERVE)
67#endif
Jeremy Fitzhardingeb00742d32007-05-02 19:27:11 +020068
Jan Blunck632bbfe2006-09-25 23:30:53 -070069/*
70 * Must be an lvalue. Since @var must be a simple identifier,
71 * we force a syntax error here if it isn't.
72 */
73#define get_cpu_var(var) (*({ \
Jan Bluncka666ecf2006-10-06 00:43:58 -070074 extern int simple_identifier_##var(void); \
Jan Blunck632bbfe2006-09-25 23:30:53 -070075 preempt_disable(); \
76 &__get_cpu_var(var); }))
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#define put_cpu_var(var) preempt_enable()
78
79#ifdef CONFIG_SMP
80
Tejun Heofbf59bc2009-02-20 16:29:08 +090081#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
82
Tejun Heo8d408b42009-02-24 11:57:21 +090083/* minimum unit size, also is the maximum supported allocation size */
Tejun Heo6a242902009-03-06 14:33:58 +090084#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
Tejun Heo8d408b42009-02-24 11:57:21 +090085
86/*
87 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
Tejun Heo6b19b0c2009-03-06 14:33:59 +090088 * back on the first chunk for dynamic percpu allocation if arch is
89 * manually allocating and mapping it for faster access (as a part of
90 * large page mapping for example).
Tejun Heo8d408b42009-02-24 11:57:21 +090091 *
Tejun Heo6b19b0c2009-03-06 14:33:59 +090092 * The following values give between one and two pages of free space
93 * after typical minimal boot (2-way SMP, single disk and NIC) with
94 * both defconfig and a distro config on x86_64 and 32. More
95 * intelligent way to determine this would be nice.
Tejun Heo8d408b42009-02-24 11:57:21 +090096 */
Tejun Heo6b19b0c2009-03-06 14:33:59 +090097#if BITS_PER_LONG > 32
98#define PERCPU_DYNAMIC_RESERVE (20 << 10)
99#else
100#define PERCPU_DYNAMIC_RESERVE (12 << 10)
101#endif
Tejun Heo8d408b42009-02-24 11:57:21 +0900102
Tejun Heofbf59bc2009-02-20 16:29:08 +0900103extern void *pcpu_base_addr;
104
Tejun Heo8d408b42009-02-24 11:57:21 +0900105typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900106typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
107
Tejun Heo8d408b42009-02-24 11:57:21 +0900108extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
Tejun Heoedcb4632009-03-06 14:33:59 +0900109 size_t static_size, size_t reserved_size,
Tejun Heo6074d5b2009-03-10 16:27:48 +0900110 ssize_t dyn_size, ssize_t unit_size,
Tejun Heoedcb4632009-03-06 14:33:59 +0900111 void *base_addr,
112 pcpu_populate_pte_fn_t populate_pte_fn);
Tejun Heo8d408b42009-02-24 11:57:21 +0900113
Tejun Heo66c3a752009-03-10 16:27:48 +0900114extern ssize_t __init pcpu_embed_first_chunk(
115 size_t static_size, size_t reserved_size,
116 ssize_t dyn_size, ssize_t unit_size);
117
Tejun Heofbf59bc2009-02-20 16:29:08 +0900118/*
119 * Use this to get to a cpu's version of the per-cpu object
120 * dynamically allocated. Non-atomic access to the current CPU's
121 * version should probably be combined with get_cpu()/put_cpu().
122 */
123#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
124
Tejun Heoedcb4632009-03-06 14:33:59 +0900125extern void *__alloc_reserved_percpu(size_t size, size_t align);
126
Tejun Heofbf59bc2009-02-20 16:29:08 +0900127#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129struct percpu_data {
Eric Dumazetb3242152008-02-06 01:37:01 -0800130 void *ptrs[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131};
132
Martin Peschke7ff6f082006-09-25 23:31:21 -0700133#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Rusty Russellb36128c2009-02-20 16:29:08 +0900135#define per_cpu_ptr(ptr, cpu) \
136({ \
137 struct percpu_data *__p = __percpu_disguise(ptr); \
138 (__typeof__(ptr))__p->ptrs[(cpu)]; \
139})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Tejun Heofbf59bc2009-02-20 16:29:08 +0900141#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
142
Tejun Heof2a82052009-02-20 16:29:08 +0900143extern void *__alloc_percpu(size_t size, size_t align);
144extern void free_percpu(void *__pdata);
145
146#else /* CONFIG_SMP */
147
148#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
149
150static inline void *__alloc_percpu(size_t size, size_t align)
151{
152 /*
153 * Can't easily make larger alignment work with kmalloc. WARN
154 * on it. Larger alignment should only be used for module
155 * percpu sections on SMP for which this path isn't used.
156 */
Tejun Heoe3176032009-02-26 10:54:17 +0900157 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
Ingo Molnard2b02612009-02-25 14:36:45 +0100158 return kzalloc(size, GFP_KERNEL);
Tejun Heof2a82052009-02-20 16:29:08 +0900159}
160
161static inline void free_percpu(void *p)
162{
163 kfree(p);
164}
165
166#endif /* CONFIG_SMP */
167
168#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
169 __alignof__(type))
170
Tejun Heo066123a2009-04-10 12:02:40 -0700171/*
172 * Optional methods for optimized non-lvalue per-cpu variable access.
173 *
174 * @var can be a percpu variable or a field of it and its size should
175 * equal char, int or long. percpu_read() evaluates to a lvalue and
176 * all others to void.
177 *
178 * These operations are guaranteed to be atomic w.r.t. preemption.
179 * The generic versions use plain get/put_cpu_var(). Archs are
180 * encouraged to implement single-instruction alternatives which don't
181 * require preemption protection.
182 */
183#ifndef percpu_read
184# define percpu_read(var) \
185 ({ \
186 typeof(per_cpu_var(var)) __tmp_var__; \
187 __tmp_var__ = get_cpu_var(var); \
188 put_cpu_var(var); \
189 __tmp_var__; \
190 })
191#endif
192
193#define __percpu_generic_to_op(var, val, op) \
194do { \
195 get_cpu_var(var) op val; \
196 put_cpu_var(var); \
197} while (0)
198
199#ifndef percpu_write
200# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
201#endif
202
203#ifndef percpu_add
204# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
205#endif
206
207#ifndef percpu_sub
208# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
209#endif
210
211#ifndef percpu_and
212# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
213#endif
214
215#ifndef percpu_or
216# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
217#endif
218
219#ifndef percpu_xor
220# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
221#endif
222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223#endif /* __LINUX_PERCPU_H */