Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_PERCPU_H |
| 2 | #define __LINUX_PERCPU_H |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 3 | |
Robert P. J. Day | 0a3021f | 2007-07-15 23:39:57 -0700 | [diff] [blame] | 4 | #include <linux/preempt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/slab.h> /* For kmalloc() */ |
| 6 | #include <linux/smp.h> |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 7 | #include <linux/cpumask.h> |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 8 | #include <linux/pfn.h> |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <asm/percpu.h> |
| 11 | |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 12 | /* enough to cover all DEFINE_PER_CPUs in modules */ |
Jeremy Fitzhardinge | b00742d3 | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 13 | #ifdef CONFIG_MODULES |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 14 | #define PERCPU_MODULE_RESERVE (8 << 10) |
Jeremy Fitzhardinge | b00742d3 | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 15 | #else |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 16 | #define PERCPU_MODULE_RESERVE 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #endif |
| 18 | |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 19 | #ifndef PERCPU_ENOUGH_ROOM |
Jeremy Fitzhardinge | b00742d3 | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 20 | #define PERCPU_ENOUGH_ROOM \ |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 21 | (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ |
| 22 | PERCPU_MODULE_RESERVE) |
| 23 | #endif |
Jeremy Fitzhardinge | b00742d3 | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 24 | |
Jan Blunck | 632bbfe | 2006-09-25 23:30:53 -0700 | [diff] [blame] | 25 | /* |
| 26 | * Must be an lvalue. Since @var must be a simple identifier, |
| 27 | * we force a syntax error here if it isn't. |
| 28 | */ |
| 29 | #define get_cpu_var(var) (*({ \ |
Jan Blunck | a666ecf | 2006-10-06 00:43:58 -0700 | [diff] [blame] | 30 | extern int simple_identifier_##var(void); \ |
Jan Blunck | 632bbfe | 2006-09-25 23:30:53 -0700 | [diff] [blame] | 31 | preempt_disable(); \ |
| 32 | &__get_cpu_var(var); })) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #define put_cpu_var(var) preempt_enable() |
| 34 | |
| 35 | #ifdef CONFIG_SMP |
| 36 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 37 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA |
| 38 | |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 39 | /* minimum unit size, also is the maximum supported allocation size */ |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 40 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 41 | |
| 42 | /* |
| 43 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy |
Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 44 | * back on the first chunk for dynamic percpu allocation if arch is |
| 45 | * manually allocating and mapping it for faster access (as a part of |
| 46 | * large page mapping for example). |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 47 | * |
Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 48 | * The following values give between one and two pages of free space |
| 49 | * after typical minimal boot (2-way SMP, single disk and NIC) with |
| 50 | * both defconfig and a distro config on x86_64 and 32. More |
| 51 | * intelligent way to determine this would be nice. |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 52 | */ |
Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 53 | #if BITS_PER_LONG > 32 |
| 54 | #define PERCPU_DYNAMIC_RESERVE (20 << 10) |
| 55 | #else |
| 56 | #define PERCPU_DYNAMIC_RESERVE (12 << 10) |
| 57 | #endif |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 58 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 59 | extern void *pcpu_base_addr; |
| 60 | |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 61 | typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 62 | typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); |
| 63 | |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 64 | extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, |
Tejun Heo | edcb463 | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 65 | size_t static_size, size_t reserved_size, |
Tejun Heo | 6074d5b | 2009-03-10 16:27:48 +0900 | [diff] [blame] | 66 | ssize_t dyn_size, ssize_t unit_size, |
Tejun Heo | edcb463 | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 67 | void *base_addr, |
| 68 | pcpu_populate_pte_fn_t populate_pte_fn); |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 69 | |
Tejun Heo | 66c3a75 | 2009-03-10 16:27:48 +0900 | [diff] [blame] | 70 | extern ssize_t __init pcpu_embed_first_chunk( |
| 71 | size_t static_size, size_t reserved_size, |
| 72 | ssize_t dyn_size, ssize_t unit_size); |
| 73 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 74 | /* |
| 75 | * Use this to get to a cpu's version of the per-cpu object |
| 76 | * dynamically allocated. Non-atomic access to the current CPU's |
| 77 | * version should probably be combined with get_cpu()/put_cpu(). |
| 78 | */ |
| 79 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
| 80 | |
Tejun Heo | edcb463 | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 81 | extern void *__alloc_reserved_percpu(size_t size, size_t align); |
| 82 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 83 | #else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ |
| 84 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | struct percpu_data { |
Eric Dumazet | b324215 | 2008-02-06 01:37:01 -0800 | [diff] [blame] | 86 | void *ptrs[1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | }; |
| 88 | |
Catalin Marinas | 2e1483c | 2009-06-11 13:24:13 +0100 | [diff] [blame] | 89 | /* pointer disguising messes up the kmemleak objects tracking */ |
| 90 | #ifndef CONFIG_DEBUG_KMEMLEAK |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 91 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
Catalin Marinas | 2e1483c | 2009-06-11 13:24:13 +0100 | [diff] [blame] | 92 | #else |
| 93 | #define __percpu_disguise(pdata) (struct percpu_data *)(pdata) |
| 94 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
Rusty Russell | b36128c | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 96 | #define per_cpu_ptr(ptr, cpu) \ |
| 97 | ({ \ |
| 98 | struct percpu_data *__p = __percpu_disguise(ptr); \ |
| 99 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ |
| 100 | }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 102 | #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ |
| 103 | |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 104 | extern void *__alloc_percpu(size_t size, size_t align); |
| 105 | extern void free_percpu(void *__pdata); |
| 106 | |
| 107 | #else /* CONFIG_SMP */ |
| 108 | |
| 109 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
| 110 | |
| 111 | static inline void *__alloc_percpu(size_t size, size_t align) |
| 112 | { |
| 113 | /* |
| 114 | * Can't easily make larger alignment work with kmalloc. WARN |
| 115 | * on it. Larger alignment should only be used for module |
| 116 | * percpu sections on SMP for which this path isn't used. |
| 117 | */ |
Tejun Heo | e317603 | 2009-02-26 10:54:17 +0900 | [diff] [blame] | 118 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); |
Ingo Molnar | d2b0261 | 2009-02-25 14:36:45 +0100 | [diff] [blame] | 119 | return kzalloc(size, GFP_KERNEL); |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 120 | } |
| 121 | |
| 122 | static inline void free_percpu(void *p) |
| 123 | { |
| 124 | kfree(p); |
| 125 | } |
| 126 | |
| 127 | #endif /* CONFIG_SMP */ |
| 128 | |
| 129 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ |
| 130 | __alignof__(type)) |
| 131 | |
Tejun Heo | 066123a | 2009-04-10 12:02:40 -0700 | [diff] [blame] | 132 | /* |
| 133 | * Optional methods for optimized non-lvalue per-cpu variable access. |
| 134 | * |
| 135 | * @var can be a percpu variable or a field of it and its size should |
| 136 | * equal char, int or long. percpu_read() evaluates to a lvalue and |
| 137 | * all others to void. |
| 138 | * |
| 139 | * These operations are guaranteed to be atomic w.r.t. preemption. |
| 140 | * The generic versions use plain get/put_cpu_var(). Archs are |
| 141 | * encouraged to implement single-instruction alternatives which don't |
| 142 | * require preemption protection. |
| 143 | */ |
| 144 | #ifndef percpu_read |
| 145 | # define percpu_read(var) \ |
| 146 | ({ \ |
| 147 | typeof(per_cpu_var(var)) __tmp_var__; \ |
| 148 | __tmp_var__ = get_cpu_var(var); \ |
| 149 | put_cpu_var(var); \ |
| 150 | __tmp_var__; \ |
| 151 | }) |
| 152 | #endif |
| 153 | |
| 154 | #define __percpu_generic_to_op(var, val, op) \ |
| 155 | do { \ |
| 156 | get_cpu_var(var) op val; \ |
| 157 | put_cpu_var(var); \ |
| 158 | } while (0) |
| 159 | |
| 160 | #ifndef percpu_write |
| 161 | # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) |
| 162 | #endif |
| 163 | |
| 164 | #ifndef percpu_add |
| 165 | # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) |
| 166 | #endif |
| 167 | |
| 168 | #ifndef percpu_sub |
| 169 | # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) |
| 170 | #endif |
| 171 | |
| 172 | #ifndef percpu_and |
| 173 | # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) |
| 174 | #endif |
| 175 | |
| 176 | #ifndef percpu_or |
| 177 | # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) |
| 178 | #endif |
| 179 | |
| 180 | #ifndef percpu_xor |
| 181 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) |
| 182 | #endif |
| 183 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | #endif /* __LINUX_PERCPU_H */ |