blob: 8ff15153ae20cd05bbf280a3f5c0b785243cfc40 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PERCPU_H
2#define __LINUX_PERCPU_H
Martin Peschke7ff6f082006-09-25 23:31:21 -07003
Robert P. J. Day0a3021f2007-07-15 23:39:57 -07004#include <linux/preempt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/slab.h> /* For kmalloc() */
6#include <linux/smp.h>
Martin Peschke7ff6f082006-09-25 23:31:21 -07007#include <linux/cpumask.h>
Tejun Heo6a242902009-03-06 14:33:58 +09008#include <linux/pfn.h>
Martin Peschke7ff6f082006-09-25 23:31:21 -07009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <asm/percpu.h>
11
Brian Gerstd3770442009-02-08 09:58:38 -050012#ifndef PER_CPU_BASE_SECTION
travis@sgi.com5280e002008-01-30 13:32:52 +010013#ifdef CONFIG_SMP
Brian Gerst0bd74fa2009-01-19 12:21:27 +090014#define PER_CPU_BASE_SECTION ".data.percpu"
Brian Gerstd3770442009-02-08 09:58:38 -050015#else
16#define PER_CPU_BASE_SECTION ".data"
17#endif
18#endif
19
20#ifdef CONFIG_SMP
travis@sgi.com5280e002008-01-30 13:32:52 +010021
Eric Dumazet44c81432008-05-14 16:05:51 -070022#ifdef MODULE
Brian Gerst0bd74fa2009-01-19 12:21:27 +090023#define PER_CPU_SHARED_ALIGNED_SECTION ""
Eric Dumazet44c81432008-05-14 16:05:51 -070024#else
Brian Gerst0bd74fa2009-01-19 12:21:27 +090025#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
Eric Dumazet44c81432008-05-14 16:05:51 -070026#endif
Brian Gerst0bd74fa2009-01-19 12:21:27 +090027#define PER_CPU_FIRST_SECTION ".first"
28
29#else
30
Brian Gerst0bd74fa2009-01-19 12:21:27 +090031#define PER_CPU_SHARED_ALIGNED_SECTION ""
32#define PER_CPU_FIRST_SECTION ""
33
34#endif
35
36#define DEFINE_PER_CPU_SECTION(type, name, section) \
37 __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
38 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
39
40#define DEFINE_PER_CPU(type, name) \
41 DEFINE_PER_CPU_SECTION(type, name, "")
Eric Dumazet44c81432008-05-14 16:05:51 -070042
travis@sgi.com5280e002008-01-30 13:32:52 +010043#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
Brian Gerst0bd74fa2009-01-19 12:21:27 +090044 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
travis@sgi.com5280e002008-01-30 13:32:52 +010045 ____cacheline_aligned_in_smp
Eric Dumazet63cc8c72008-05-12 15:44:40 +020046
Brian Gerst0bd74fa2009-01-19 12:21:27 +090047#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
48 DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
travis@sgi.com5280e002008-01-30 13:32:52 +010049
Brian Gerst0bd74fa2009-01-19 12:21:27 +090050#define DEFINE_PER_CPU_FIRST(type, name) \
51 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
travis@sgi.com5280e002008-01-30 13:32:52 +010052
53#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
54#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
55
Tejun Heo6a242902009-03-06 14:33:58 +090056/* enough to cover all DEFINE_PER_CPUs in modules */
Jeremy Fitzhardingeb00742d32007-05-02 19:27:11 +020057#ifdef CONFIG_MODULES
Tejun Heo6a242902009-03-06 14:33:58 +090058#define PERCPU_MODULE_RESERVE (8 << 10)
Jeremy Fitzhardingeb00742d32007-05-02 19:27:11 +020059#else
Tejun Heo6a242902009-03-06 14:33:58 +090060#define PERCPU_MODULE_RESERVE 0
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#endif
62
Tejun Heo6a242902009-03-06 14:33:58 +090063#ifndef PERCPU_ENOUGH_ROOM
Jeremy Fitzhardingeb00742d32007-05-02 19:27:11 +020064#define PERCPU_ENOUGH_ROOM \
Tejun Heo6a242902009-03-06 14:33:58 +090065 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
66 PERCPU_MODULE_RESERVE)
67#endif
Jeremy Fitzhardingeb00742d32007-05-02 19:27:11 +020068
Jan Blunck632bbfe2006-09-25 23:30:53 -070069/*
70 * Must be an lvalue. Since @var must be a simple identifier,
71 * we force a syntax error here if it isn't.
72 */
73#define get_cpu_var(var) (*({ \
Jan Bluncka666ecf2006-10-06 00:43:58 -070074 extern int simple_identifier_##var(void); \
Jan Blunck632bbfe2006-09-25 23:30:53 -070075 preempt_disable(); \
76 &__get_cpu_var(var); }))
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#define put_cpu_var(var) preempt_enable()
78
79#ifdef CONFIG_SMP
80
Tejun Heofbf59bc2009-02-20 16:29:08 +090081#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
82
Tejun Heo8d408b42009-02-24 11:57:21 +090083/* minimum unit size, also is the maximum supported allocation size */
Tejun Heo6a242902009-03-06 14:33:58 +090084#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
Tejun Heo8d408b42009-02-24 11:57:21 +090085
86/*
87 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
88 * back on the first chunk if arch is manually allocating and mapping
89 * it for faster access (as a part of large page mapping for example).
90 * Note that dynamic percpu allocator covers both static and dynamic
91 * areas, so these values are bigger than PERCPU_MODULE_RESERVE.
92 *
93 * On typical configuration with modules, the following values leave
94 * about 8k of free space on the first chunk after boot on both x86_32
95 * and 64 when module support is enabled. When module support is
96 * disabled, it's much tighter.
97 */
98#ifndef PERCPU_DYNAMIC_RESERVE
99# if BITS_PER_LONG > 32
100# ifdef CONFIG_MODULES
Tejun Heo6a242902009-03-06 14:33:58 +0900101# define PERCPU_DYNAMIC_RESERVE (24 << 10)
Tejun Heo8d408b42009-02-24 11:57:21 +0900102# else
Tejun Heo6a242902009-03-06 14:33:58 +0900103# define PERCPU_DYNAMIC_RESERVE (16 << 10)
Tejun Heo8d408b42009-02-24 11:57:21 +0900104# endif
105# else
106# ifdef CONFIG_MODULES
Tejun Heo6a242902009-03-06 14:33:58 +0900107# define PERCPU_DYNAMIC_RESERVE (16 << 10)
Tejun Heo8d408b42009-02-24 11:57:21 +0900108# else
Tejun Heo6a242902009-03-06 14:33:58 +0900109# define PERCPU_DYNAMIC_RESERVE (8 << 10)
Tejun Heo8d408b42009-02-24 11:57:21 +0900110# endif
111# endif
112#endif /* PERCPU_DYNAMIC_RESERVE */
113
Tejun Heofbf59bc2009-02-20 16:29:08 +0900114extern void *pcpu_base_addr;
115
Tejun Heo8d408b42009-02-24 11:57:21 +0900116typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
Tejun Heofbf59bc2009-02-20 16:29:08 +0900117typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
118
Tejun Heo8d408b42009-02-24 11:57:21 +0900119extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
Tejun Heoedcb4632009-03-06 14:33:59 +0900120 size_t static_size, size_t reserved_size,
121 ssize_t unit_size, ssize_t dyn_size,
122 void *base_addr,
123 pcpu_populate_pte_fn_t populate_pte_fn);
Tejun Heo8d408b42009-02-24 11:57:21 +0900124
Tejun Heofbf59bc2009-02-20 16:29:08 +0900125/*
126 * Use this to get to a cpu's version of the per-cpu object
127 * dynamically allocated. Non-atomic access to the current CPU's
128 * version should probably be combined with get_cpu()/put_cpu().
129 */
130#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
131
Tejun Heoedcb4632009-03-06 14:33:59 +0900132extern void *__alloc_reserved_percpu(size_t size, size_t align);
133
Tejun Heofbf59bc2009-02-20 16:29:08 +0900134#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136struct percpu_data {
Eric Dumazetb3242152008-02-06 01:37:01 -0800137 void *ptrs[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138};
139
Martin Peschke7ff6f082006-09-25 23:31:21 -0700140#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Rusty Russellb36128c2009-02-20 16:29:08 +0900142#define per_cpu_ptr(ptr, cpu) \
143({ \
144 struct percpu_data *__p = __percpu_disguise(ptr); \
145 (__typeof__(ptr))__p->ptrs[(cpu)]; \
146})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Tejun Heofbf59bc2009-02-20 16:29:08 +0900148#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
149
Tejun Heof2a82052009-02-20 16:29:08 +0900150extern void *__alloc_percpu(size_t size, size_t align);
151extern void free_percpu(void *__pdata);
152
153#else /* CONFIG_SMP */
154
155#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
156
157static inline void *__alloc_percpu(size_t size, size_t align)
158{
159 /*
160 * Can't easily make larger alignment work with kmalloc. WARN
161 * on it. Larger alignment should only be used for module
162 * percpu sections on SMP for which this path isn't used.
163 */
Tejun Heoe3176032009-02-26 10:54:17 +0900164 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
Ingo Molnard2b02612009-02-25 14:36:45 +0100165 return kzalloc(size, GFP_KERNEL);
Tejun Heof2a82052009-02-20 16:29:08 +0900166}
167
168static inline void free_percpu(void *p)
169{
170 kfree(p);
171}
172
173#endif /* CONFIG_SMP */
174
175#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
176 __alignof__(type))
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178#endif /* __LINUX_PERCPU_H */