Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_PERCPU_H |
| 2 | #define __LINUX_PERCPU_H |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 3 | |
Robert P. J. Day | 0a3021f | 2007-07-15 23:39:57 -0700 | [diff] [blame] | 4 | #include <linux/preempt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/slab.h> /* For kmalloc() */ |
| 6 | #include <linux/smp.h> |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 7 | #include <linux/cpumask.h> |
| 8 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <asm/percpu.h> |
| 10 | |
Brian Gerst | d377044 | 2009-02-08 09:58:38 -0500 | [diff] [blame] | 11 | #ifndef PER_CPU_BASE_SECTION |
travis@sgi.com | 5280e00 | 2008-01-30 13:32:52 +0100 | [diff] [blame] | 12 | #ifdef CONFIG_SMP |
Brian Gerst | 0bd74fa | 2009-01-19 12:21:27 +0900 | [diff] [blame] | 13 | #define PER_CPU_BASE_SECTION ".data.percpu" |
Brian Gerst | d377044 | 2009-02-08 09:58:38 -0500 | [diff] [blame] | 14 | #else |
| 15 | #define PER_CPU_BASE_SECTION ".data" |
| 16 | #endif |
| 17 | #endif |
| 18 | |
| 19 | #ifdef CONFIG_SMP |
travis@sgi.com | 5280e00 | 2008-01-30 13:32:52 +0100 | [diff] [blame] | 20 | |
Eric Dumazet | 44c8143 | 2008-05-14 16:05:51 -0700 | [diff] [blame] | 21 | #ifdef MODULE |
Brian Gerst | 0bd74fa | 2009-01-19 12:21:27 +0900 | [diff] [blame] | 22 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
Eric Dumazet | 44c8143 | 2008-05-14 16:05:51 -0700 | [diff] [blame] | 23 | #else |
Brian Gerst | 0bd74fa | 2009-01-19 12:21:27 +0900 | [diff] [blame] | 24 | #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" |
Eric Dumazet | 44c8143 | 2008-05-14 16:05:51 -0700 | [diff] [blame] | 25 | #endif |
Brian Gerst | 0bd74fa | 2009-01-19 12:21:27 +0900 | [diff] [blame] | 26 | #define PER_CPU_FIRST_SECTION ".first" |
| 27 | |
| 28 | #else |
| 29 | |
Brian Gerst | 0bd74fa | 2009-01-19 12:21:27 +0900 | [diff] [blame] | 30 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
| 31 | #define PER_CPU_FIRST_SECTION "" |
| 32 | |
| 33 | #endif |
| 34 | |
| 35 | #define DEFINE_PER_CPU_SECTION(type, name, section) \ |
| 36 | __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ |
| 37 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name |
| 38 | |
| 39 | #define DEFINE_PER_CPU(type, name) \ |
| 40 | DEFINE_PER_CPU_SECTION(type, name, "") |
Eric Dumazet | 44c8143 | 2008-05-14 16:05:51 -0700 | [diff] [blame] | 41 | |
travis@sgi.com | 5280e00 | 2008-01-30 13:32:52 +0100 | [diff] [blame] | 42 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
Brian Gerst | 0bd74fa | 2009-01-19 12:21:27 +0900 | [diff] [blame] | 43 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ |
travis@sgi.com | 5280e00 | 2008-01-30 13:32:52 +0100 | [diff] [blame] | 44 | ____cacheline_aligned_in_smp |
Eric Dumazet | 63cc8c7 | 2008-05-12 15:44:40 +0200 | [diff] [blame] | 45 | |
Brian Gerst | 0bd74fa | 2009-01-19 12:21:27 +0900 | [diff] [blame] | 46 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ |
| 47 | DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") |
travis@sgi.com | 5280e00 | 2008-01-30 13:32:52 +0100 | [diff] [blame] | 48 | |
Brian Gerst | 0bd74fa | 2009-01-19 12:21:27 +0900 | [diff] [blame] | 49 | #define DEFINE_PER_CPU_FIRST(type, name) \ |
| 50 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) |
travis@sgi.com | 5280e00 | 2008-01-30 13:32:52 +0100 | [diff] [blame] | 51 | |
| 52 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) |
| 53 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) |
| 54 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ |
| 56 | #ifndef PERCPU_ENOUGH_ROOM |
Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 57 | #ifdef CONFIG_MODULES |
| 58 | #define PERCPU_MODULE_RESERVE 8192 |
| 59 | #else |
| 60 | #define PERCPU_MODULE_RESERVE 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | #endif |
| 62 | |
Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 63 | #define PERCPU_ENOUGH_ROOM \ |
| 64 | (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) |
| 65 | #endif /* PERCPU_ENOUGH_ROOM */ |
| 66 | |
Jan Blunck | 632bbfe | 2006-09-25 23:30:53 -0700 | [diff] [blame] | 67 | /* |
| 68 | * Must be an lvalue. Since @var must be a simple identifier, |
| 69 | * we force a syntax error here if it isn't. |
| 70 | */ |
| 71 | #define get_cpu_var(var) (*({ \ |
Jan Blunck | a666ecf | 2006-10-06 00:43:58 -0700 | [diff] [blame] | 72 | extern int simple_identifier_##var(void); \ |
Jan Blunck | 632bbfe | 2006-09-25 23:30:53 -0700 | [diff] [blame] | 73 | preempt_disable(); \ |
| 74 | &__get_cpu_var(var); })) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | #define put_cpu_var(var) preempt_enable() |
| 76 | |
| 77 | #ifdef CONFIG_SMP |
| 78 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 79 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA |
| 80 | |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 81 | /* minimum unit size, also is the maximum supported allocation size */ |
| 82 | #define PCPU_MIN_UNIT_SIZE (16UL << PAGE_SHIFT) |
| 83 | |
| 84 | /* |
| 85 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy |
| 86 | * back on the first chunk if arch is manually allocating and mapping |
| 87 | * it for faster access (as a part of large page mapping for example). |
| 88 | * Note that dynamic percpu allocator covers both static and dynamic |
| 89 | * areas, so these values are bigger than PERCPU_MODULE_RESERVE. |
| 90 | * |
| 91 | * On typical configuration with modules, the following values leave |
| 92 | * about 8k of free space on the first chunk after boot on both x86_32 |
| 93 | * and 64 when module support is enabled. When module support is |
| 94 | * disabled, it's much tighter. |
| 95 | */ |
| 96 | #ifndef PERCPU_DYNAMIC_RESERVE |
| 97 | # if BITS_PER_LONG > 32 |
| 98 | # ifdef CONFIG_MODULES |
| 99 | # define PERCPU_DYNAMIC_RESERVE (6 << PAGE_SHIFT) |
| 100 | # else |
| 101 | # define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT) |
| 102 | # endif |
| 103 | # else |
| 104 | # ifdef CONFIG_MODULES |
| 105 | # define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT) |
| 106 | # else |
| 107 | # define PERCPU_DYNAMIC_RESERVE (2 << PAGE_SHIFT) |
| 108 | # endif |
| 109 | # endif |
| 110 | #endif /* PERCPU_DYNAMIC_RESERVE */ |
| 111 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 112 | extern void *pcpu_base_addr; |
| 113 | |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 114 | typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 115 | typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); |
| 116 | |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 117 | extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, |
| 118 | size_t static_size, size_t unit_size, |
| 119 | size_t free_size, void *base_addr, |
| 120 | pcpu_populate_pte_fn_t populate_pte_fn); |
| 121 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 122 | /* |
| 123 | * Use this to get to a cpu's version of the per-cpu object |
| 124 | * dynamically allocated. Non-atomic access to the current CPU's |
| 125 | * version should probably be combined with get_cpu()/put_cpu(). |
| 126 | */ |
| 127 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
| 128 | |
| 129 | #else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ |
| 130 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | struct percpu_data { |
Eric Dumazet | b324215 | 2008-02-06 01:37:01 -0800 | [diff] [blame] | 132 | void *ptrs[1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | }; |
| 134 | |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 135 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | |
Rusty Russell | b36128c | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 137 | #define per_cpu_ptr(ptr, cpu) \ |
| 138 | ({ \ |
| 139 | struct percpu_data *__p = __percpu_disguise(ptr); \ |
| 140 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ |
| 141 | }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 143 | #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ |
| 144 | |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 145 | extern void *__alloc_percpu(size_t size, size_t align); |
| 146 | extern void free_percpu(void *__pdata); |
| 147 | |
| 148 | #else /* CONFIG_SMP */ |
| 149 | |
| 150 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
| 151 | |
| 152 | static inline void *__alloc_percpu(size_t size, size_t align) |
| 153 | { |
| 154 | /* |
| 155 | * Can't easily make larger alignment work with kmalloc. WARN |
| 156 | * on it. Larger alignment should only be used for module |
| 157 | * percpu sections on SMP for which this path isn't used. |
| 158 | */ |
Tejun Heo | e317603 | 2009-02-26 10:54:17 +0900 | [diff] [blame^] | 159 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); |
Ingo Molnar | d2b0261 | 2009-02-25 14:36:45 +0100 | [diff] [blame] | 160 | return kzalloc(size, GFP_KERNEL); |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 161 | } |
| 162 | |
| 163 | static inline void free_percpu(void *p) |
| 164 | { |
| 165 | kfree(p); |
| 166 | } |
| 167 | |
| 168 | #endif /* CONFIG_SMP */ |
| 169 | |
| 170 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ |
| 171 | __alignof__(type)) |
| 172 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | #endif /* __LINUX_PERCPU_H */ |