Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_PERCPU_H |
| 2 | #define __LINUX_PERCPU_H |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 3 | |
Robert P. J. Day | 0a3021f | 2007-07-15 23:39:57 -0700 | [diff] [blame] | 4 | #include <linux/preempt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/slab.h> /* For kmalloc() */ |
| 6 | #include <linux/smp.h> |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 7 | #include <linux/cpumask.h> |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 8 | #include <linux/pfn.h> |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <asm/percpu.h> |
| 11 | |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 12 | /* enough to cover all DEFINE_PER_CPUs in modules */ |
Jeremy Fitzhardinge | b00742d3 | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 13 | #ifdef CONFIG_MODULES |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 14 | #define PERCPU_MODULE_RESERVE (8 << 10) |
Jeremy Fitzhardinge | b00742d3 | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 15 | #else |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 16 | #define PERCPU_MODULE_RESERVE 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #endif |
| 18 | |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 19 | #ifndef PERCPU_ENOUGH_ROOM |
Jeremy Fitzhardinge | b00742d3 | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 20 | #define PERCPU_ENOUGH_ROOM \ |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 21 | (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ |
| 22 | PERCPU_MODULE_RESERVE) |
| 23 | #endif |
Jeremy Fitzhardinge | b00742d3 | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 24 | |
Jan Blunck | 632bbfe | 2006-09-25 23:30:53 -0700 | [diff] [blame] | 25 | /* |
| 26 | * Must be an lvalue. Since @var must be a simple identifier, |
| 27 | * we force a syntax error here if it isn't. |
| 28 | */ |
| 29 | #define get_cpu_var(var) (*({ \ |
Jan Blunck | 632bbfe | 2006-09-25 23:30:53 -0700 | [diff] [blame] | 30 | preempt_disable(); \ |
| 31 | &__get_cpu_var(var); })) |
Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 32 | |
Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 33 | /* |
| 34 | * The weird & is necessary because sparse considers (void)(var) to be |
| 35 | * a direct dereference of percpu variable (var). |
| 36 | */ |
Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 37 | #define put_cpu_var(var) do { \ |
Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 38 | (void)&(var); \ |
Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 39 | preempt_enable(); \ |
| 40 | } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
| 42 | #ifdef CONFIG_SMP |
| 43 | |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 44 | /* minimum unit size, also is the maximum supported allocation size */ |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 45 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 46 | |
| 47 | /* |
| 48 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy |
Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 49 | * back on the first chunk for dynamic percpu allocation if arch is |
| 50 | * manually allocating and mapping it for faster access (as a part of |
| 51 | * large page mapping for example). |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 52 | * |
Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 53 | * The following values give between one and two pages of free space |
| 54 | * after typical minimal boot (2-way SMP, single disk and NIC) with |
| 55 | * both defconfig and a distro config on x86_64 and 32. More |
| 56 | * intelligent way to determine this would be nice. |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 57 | */ |
Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 58 | #if BITS_PER_LONG > 32 |
| 59 | #define PERCPU_DYNAMIC_RESERVE (20 << 10) |
| 60 | #else |
| 61 | #define PERCPU_DYNAMIC_RESERVE (12 << 10) |
| 62 | #endif |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 63 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 64 | extern void *pcpu_base_addr; |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 65 | extern const unsigned long *pcpu_unit_offsets; |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 66 | |
Tejun Heo | fd1e8a1 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 67 | struct pcpu_group_info { |
| 68 | int nr_units; /* aligned # of units */ |
| 69 | unsigned long base_offset; /* base address offset */ |
| 70 | unsigned int *cpu_map; /* unit->cpu map, empty |
| 71 | * entries contain NR_CPUS */ |
| 72 | }; |
| 73 | |
| 74 | struct pcpu_alloc_info { |
| 75 | size_t static_size; |
| 76 | size_t reserved_size; |
| 77 | size_t dyn_size; |
| 78 | size_t unit_size; |
| 79 | size_t atom_size; |
| 80 | size_t alloc_size; |
| 81 | size_t __ai_size; /* internal, don't use */ |
| 82 | int nr_groups; /* 0 if grouping unnecessary */ |
| 83 | struct pcpu_group_info groups[]; |
| 84 | }; |
| 85 | |
Tejun Heo | f58dc01 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 86 | enum pcpu_fc { |
| 87 | PCPU_FC_AUTO, |
| 88 | PCPU_FC_EMBED, |
| 89 | PCPU_FC_PAGE, |
Tejun Heo | f58dc01 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 90 | |
| 91 | PCPU_FC_NR, |
| 92 | }; |
| 93 | extern const char *pcpu_fc_names[PCPU_FC_NR]; |
| 94 | |
| 95 | extern enum pcpu_fc pcpu_chosen_fc; |
| 96 | |
Tejun Heo | 3cbc856 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 97 | typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, |
| 98 | size_t align); |
Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 99 | typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size); |
| 100 | typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr); |
Tejun Heo | a530b79 | 2009-07-04 08:11:00 +0900 | [diff] [blame] | 101 | typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 102 | |
Tejun Heo | fd1e8a1 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 103 | extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, |
| 104 | int nr_units); |
| 105 | extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); |
Tejun Heo | 033e48f | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 106 | |
Tejun Heo | fd1e8a1 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 107 | extern struct pcpu_alloc_info * __init pcpu_build_alloc_info( |
| 108 | size_t reserved_size, ssize_t dyn_size, |
| 109 | size_t atom_size, |
| 110 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn); |
| 111 | |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 112 | extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, |
| 113 | void *base_addr); |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 114 | |
Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 115 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK |
Tejun Heo | c8826dd | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 116 | extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, |
| 117 | size_t atom_size, |
| 118 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn, |
| 119 | pcpu_fc_alloc_fn_t alloc_fn, |
| 120 | pcpu_fc_free_fn_t free_fn); |
Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 121 | #endif |
Tejun Heo | 66c3a75 | 2009-03-10 16:27:48 +0900 | [diff] [blame] | 122 | |
Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 123 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 124 | extern int __init pcpu_page_first_chunk(size_t reserved_size, |
Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 125 | pcpu_fc_alloc_fn_t alloc_fn, |
| 126 | pcpu_fc_free_fn_t free_fn, |
| 127 | pcpu_fc_populate_pte_fn_t populate_pte_fn); |
Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 128 | #endif |
Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 129 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 130 | /* |
| 131 | * Use this to get to a cpu's version of the per-cpu object |
| 132 | * dynamically allocated. Non-atomic access to the current CPU's |
| 133 | * version should probably be combined with get_cpu()/put_cpu(). |
| 134 | */ |
| 135 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
| 136 | |
Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 137 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); |
| 138 | extern void __percpu *__alloc_percpu(size_t size, size_t align); |
| 139 | extern void free_percpu(void __percpu *__pdata); |
Vivek Goyal | 3b034b0 | 2009-11-24 15:50:03 +0900 | [diff] [blame] | 140 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 141 | |
Tejun Heo | e74e396 | 2009-03-30 19:07:44 +0900 | [diff] [blame] | 142 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
| 143 | extern void __init setup_per_cpu_areas(void); |
| 144 | #endif |
| 145 | |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 146 | #else /* CONFIG_SMP */ |
| 147 | |
| 148 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
| 149 | |
Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 150 | static inline void __percpu *__alloc_percpu(size_t size, size_t align) |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 151 | { |
| 152 | /* |
| 153 | * Can't easily make larger alignment work with kmalloc. WARN |
| 154 | * on it. Larger alignment should only be used for module |
| 155 | * percpu sections on SMP for which this path isn't used. |
| 156 | */ |
Tejun Heo | e317603 | 2009-02-26 10:54:17 +0900 | [diff] [blame] | 157 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); |
Ingo Molnar | d2b0261 | 2009-02-25 14:36:45 +0100 | [diff] [blame] | 158 | return kzalloc(size, GFP_KERNEL); |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 159 | } |
| 160 | |
Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 161 | static inline void free_percpu(void __percpu *p) |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 162 | { |
| 163 | kfree(p); |
| 164 | } |
| 165 | |
Tejun Heo | ee0a6ef | 2009-12-02 08:36:58 +0900 | [diff] [blame] | 166 | static inline phys_addr_t per_cpu_ptr_to_phys(void *addr) |
| 167 | { |
| 168 | return __pa(addr); |
| 169 | } |
| 170 | |
Tejun Heo | e74e396 | 2009-03-30 19:07:44 +0900 | [diff] [blame] | 171 | static inline void __init setup_per_cpu_areas(void) { } |
| 172 | |
Tejun Heo | a76761b | 2009-07-15 23:35:14 +0900 | [diff] [blame] | 173 | static inline void *pcpu_lpage_remapped(void *kaddr) |
| 174 | { |
| 175 | return NULL; |
| 176 | } |
| 177 | |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 178 | #endif /* CONFIG_SMP */ |
| 179 | |
Tejun Heo | 64ef291 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 180 | #define alloc_percpu(type) \ |
Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 181 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 182 | |
Tejun Heo | 066123a | 2009-04-10 12:02:40 -0700 | [diff] [blame] | 183 | /* |
| 184 | * Optional methods for optimized non-lvalue per-cpu variable access. |
| 185 | * |
| 186 | * @var can be a percpu variable or a field of it and its size should |
| 187 | * equal char, int or long. percpu_read() evaluates to a lvalue and |
| 188 | * all others to void. |
| 189 | * |
| 190 | * These operations are guaranteed to be atomic w.r.t. preemption. |
| 191 | * The generic versions use plain get/put_cpu_var(). Archs are |
| 192 | * encouraged to implement single-instruction alternatives which don't |
| 193 | * require preemption protection. |
| 194 | */ |
| 195 | #ifndef percpu_read |
| 196 | # define percpu_read(var) \ |
| 197 | ({ \ |
Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 198 | typeof(var) *pr_ptr__ = &(var); \ |
| 199 | typeof(var) pr_ret__; \ |
| 200 | pr_ret__ = get_cpu_var(*pr_ptr__); \ |
| 201 | put_cpu_var(*pr_ptr__); \ |
| 202 | pr_ret__; \ |
Tejun Heo | 066123a | 2009-04-10 12:02:40 -0700 | [diff] [blame] | 203 | }) |
| 204 | #endif |
| 205 | |
| 206 | #define __percpu_generic_to_op(var, val, op) \ |
| 207 | do { \ |
Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 208 | typeof(var) *pgto_ptr__ = &(var); \ |
| 209 | get_cpu_var(*pgto_ptr__) op val; \ |
| 210 | put_cpu_var(*pgto_ptr__); \ |
Tejun Heo | 066123a | 2009-04-10 12:02:40 -0700 | [diff] [blame] | 211 | } while (0) |
| 212 | |
| 213 | #ifndef percpu_write |
| 214 | # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) |
| 215 | #endif |
| 216 | |
| 217 | #ifndef percpu_add |
| 218 | # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) |
| 219 | #endif |
| 220 | |
| 221 | #ifndef percpu_sub |
| 222 | # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) |
| 223 | #endif |
| 224 | |
| 225 | #ifndef percpu_and |
| 226 | # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) |
| 227 | #endif |
| 228 | |
| 229 | #ifndef percpu_or |
| 230 | # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) |
| 231 | #endif |
| 232 | |
| 233 | #ifndef percpu_xor |
| 234 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) |
| 235 | #endif |
| 236 | |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 237 | /* |
| 238 | * Branching function to split up a function into a set of functions that |
| 239 | * are called for different scalar sizes of the objects handled. |
| 240 | */ |
| 241 | |
| 242 | extern void __bad_size_call_parameter(void); |
| 243 | |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 244 | #define __pcpu_size_call_return(stem, variable) \ |
| 245 | ({ typeof(variable) pscr_ret__; \ |
Tejun Heo | 545695f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 246 | __verify_pcpu_ptr(&(variable)); \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 247 | switch(sizeof(variable)) { \ |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 248 | case 1: pscr_ret__ = stem##1(variable);break; \ |
| 249 | case 2: pscr_ret__ = stem##2(variable);break; \ |
| 250 | case 4: pscr_ret__ = stem##4(variable);break; \ |
| 251 | case 8: pscr_ret__ = stem##8(variable);break; \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 252 | default: \ |
| 253 | __bad_size_call_parameter();break; \ |
| 254 | } \ |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 255 | pscr_ret__; \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 256 | }) |
| 257 | |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 258 | #define __pcpu_size_call(stem, variable, ...) \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 259 | do { \ |
Tejun Heo | 545695f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 260 | __verify_pcpu_ptr(&(variable)); \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 261 | switch(sizeof(variable)) { \ |
| 262 | case 1: stem##1(variable, __VA_ARGS__);break; \ |
| 263 | case 2: stem##2(variable, __VA_ARGS__);break; \ |
| 264 | case 4: stem##4(variable, __VA_ARGS__);break; \ |
| 265 | case 8: stem##8(variable, __VA_ARGS__);break; \ |
| 266 | default: \ |
| 267 | __bad_size_call_parameter();break; \ |
| 268 | } \ |
| 269 | } while (0) |
| 270 | |
| 271 | /* |
| 272 | * Optimized manipulation for memory allocated through the per cpu |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 273 | * allocator or for addresses of per cpu variables. |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 274 | * |
| 275 | * These operation guarantee exclusivity of access for other operations |
| 276 | * on the *same* processor. The assumption is that per cpu data is only |
| 277 | * accessed by a single processor instance (the current one). |
| 278 | * |
| 279 | * The first group is used for accesses that must be done in a |
| 280 | * preemption safe way since we know that the context is not preempt |
| 281 | * safe. Interrupts may occur. If the interrupt modifies the variable |
| 282 | * too then RMW actions will not be reliable. |
| 283 | * |
| 284 | * The arch code can provide optimized functions in two ways: |
| 285 | * |
| 286 | * 1. Override the function completely. F.e. define this_cpu_add(). |
| 287 | * The arch must then ensure that the various scalar format passed |
| 288 | * are handled correctly. |
| 289 | * |
| 290 | * 2. Provide functions for certain scalar sizes. F.e. provide |
| 291 | * this_cpu_add_2() to provide per cpu atomic operations for 2 byte |
| 292 | * sized RMW actions. If arch code does not provide operations for |
| 293 | * a scalar size then the fallback in the generic code will be |
| 294 | * used. |
| 295 | */ |
| 296 | |
| 297 | #define _this_cpu_generic_read(pcp) \ |
| 298 | ({ typeof(pcp) ret__; \ |
| 299 | preempt_disable(); \ |
| 300 | ret__ = *this_cpu_ptr(&(pcp)); \ |
| 301 | preempt_enable(); \ |
| 302 | ret__; \ |
| 303 | }) |
| 304 | |
| 305 | #ifndef this_cpu_read |
| 306 | # ifndef this_cpu_read_1 |
| 307 | # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp) |
| 308 | # endif |
| 309 | # ifndef this_cpu_read_2 |
| 310 | # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp) |
| 311 | # endif |
| 312 | # ifndef this_cpu_read_4 |
| 313 | # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp) |
| 314 | # endif |
| 315 | # ifndef this_cpu_read_8 |
| 316 | # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) |
| 317 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 318 | # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 319 | #endif |
| 320 | |
| 321 | #define _this_cpu_generic_to_op(pcp, val, op) \ |
| 322 | do { \ |
| 323 | preempt_disable(); \ |
Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 324 | *__this_cpu_ptr(&(pcp)) op val; \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 325 | preempt_enable(); \ |
| 326 | } while (0) |
| 327 | |
| 328 | #ifndef this_cpu_write |
| 329 | # ifndef this_cpu_write_1 |
| 330 | # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) |
| 331 | # endif |
| 332 | # ifndef this_cpu_write_2 |
| 333 | # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) |
| 334 | # endif |
| 335 | # ifndef this_cpu_write_4 |
| 336 | # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) |
| 337 | # endif |
| 338 | # ifndef this_cpu_write_8 |
| 339 | # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) |
| 340 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 341 | # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 342 | #endif |
| 343 | |
| 344 | #ifndef this_cpu_add |
| 345 | # ifndef this_cpu_add_1 |
| 346 | # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) |
| 347 | # endif |
| 348 | # ifndef this_cpu_add_2 |
| 349 | # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) |
| 350 | # endif |
| 351 | # ifndef this_cpu_add_4 |
| 352 | # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) |
| 353 | # endif |
| 354 | # ifndef this_cpu_add_8 |
| 355 | # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) |
| 356 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 357 | # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 358 | #endif |
| 359 | |
| 360 | #ifndef this_cpu_sub |
| 361 | # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) |
| 362 | #endif |
| 363 | |
| 364 | #ifndef this_cpu_inc |
| 365 | # define this_cpu_inc(pcp) this_cpu_add((pcp), 1) |
| 366 | #endif |
| 367 | |
| 368 | #ifndef this_cpu_dec |
| 369 | # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) |
| 370 | #endif |
| 371 | |
| 372 | #ifndef this_cpu_and |
| 373 | # ifndef this_cpu_and_1 |
| 374 | # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) |
| 375 | # endif |
| 376 | # ifndef this_cpu_and_2 |
| 377 | # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) |
| 378 | # endif |
| 379 | # ifndef this_cpu_and_4 |
| 380 | # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) |
| 381 | # endif |
| 382 | # ifndef this_cpu_and_8 |
| 383 | # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) |
| 384 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 385 | # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 386 | #endif |
| 387 | |
| 388 | #ifndef this_cpu_or |
| 389 | # ifndef this_cpu_or_1 |
| 390 | # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) |
| 391 | # endif |
| 392 | # ifndef this_cpu_or_2 |
| 393 | # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) |
| 394 | # endif |
| 395 | # ifndef this_cpu_or_4 |
| 396 | # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) |
| 397 | # endif |
| 398 | # ifndef this_cpu_or_8 |
| 399 | # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) |
| 400 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 401 | # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 402 | #endif |
| 403 | |
| 404 | #ifndef this_cpu_xor |
| 405 | # ifndef this_cpu_xor_1 |
| 406 | # define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) |
| 407 | # endif |
| 408 | # ifndef this_cpu_xor_2 |
| 409 | # define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) |
| 410 | # endif |
| 411 | # ifndef this_cpu_xor_4 |
| 412 | # define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) |
| 413 | # endif |
| 414 | # ifndef this_cpu_xor_8 |
| 415 | # define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) |
| 416 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 417 | # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 418 | #endif |
| 419 | |
| 420 | /* |
| 421 | * Generic percpu operations that do not require preemption handling. |
| 422 | * Either we do not care about races or the caller has the |
| 423 | * responsibility of handling preemptions issues. Arch code can still |
| 424 | * override these instructions since the arch per cpu code may be more |
| 425 | * efficient and may actually get race freeness for free (that is the |
| 426 | * case for x86 for example). |
| 427 | * |
| 428 | * If there is no other protection through preempt disable and/or |
| 429 | * disabling interupts then one of these RMW operations can show unexpected |
| 430 | * behavior because the execution thread was rescheduled on another processor |
| 431 | * or an interrupt occurred and the same percpu variable was modified from |
| 432 | * the interrupt context. |
| 433 | */ |
| 434 | #ifndef __this_cpu_read |
| 435 | # ifndef __this_cpu_read_1 |
| 436 | # define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp))) |
| 437 | # endif |
| 438 | # ifndef __this_cpu_read_2 |
| 439 | # define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp))) |
| 440 | # endif |
| 441 | # ifndef __this_cpu_read_4 |
| 442 | # define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp))) |
| 443 | # endif |
| 444 | # ifndef __this_cpu_read_8 |
| 445 | # define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp))) |
| 446 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 447 | # define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 448 | #endif |
| 449 | |
| 450 | #define __this_cpu_generic_to_op(pcp, val, op) \ |
| 451 | do { \ |
| 452 | *__this_cpu_ptr(&(pcp)) op val; \ |
| 453 | } while (0) |
| 454 | |
| 455 | #ifndef __this_cpu_write |
| 456 | # ifndef __this_cpu_write_1 |
| 457 | # define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) |
| 458 | # endif |
| 459 | # ifndef __this_cpu_write_2 |
| 460 | # define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) |
| 461 | # endif |
| 462 | # ifndef __this_cpu_write_4 |
| 463 | # define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) |
| 464 | # endif |
| 465 | # ifndef __this_cpu_write_8 |
| 466 | # define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) |
| 467 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 468 | # define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 469 | #endif |
| 470 | |
| 471 | #ifndef __this_cpu_add |
| 472 | # ifndef __this_cpu_add_1 |
| 473 | # define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) |
| 474 | # endif |
| 475 | # ifndef __this_cpu_add_2 |
| 476 | # define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) |
| 477 | # endif |
| 478 | # ifndef __this_cpu_add_4 |
| 479 | # define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) |
| 480 | # endif |
| 481 | # ifndef __this_cpu_add_8 |
| 482 | # define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) |
| 483 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 484 | # define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 485 | #endif |
| 486 | |
| 487 | #ifndef __this_cpu_sub |
| 488 | # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) |
| 489 | #endif |
| 490 | |
| 491 | #ifndef __this_cpu_inc |
| 492 | # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) |
| 493 | #endif |
| 494 | |
| 495 | #ifndef __this_cpu_dec |
| 496 | # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) |
| 497 | #endif |
| 498 | |
| 499 | #ifndef __this_cpu_and |
| 500 | # ifndef __this_cpu_and_1 |
| 501 | # define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) |
| 502 | # endif |
| 503 | # ifndef __this_cpu_and_2 |
| 504 | # define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) |
| 505 | # endif |
| 506 | # ifndef __this_cpu_and_4 |
| 507 | # define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) |
| 508 | # endif |
| 509 | # ifndef __this_cpu_and_8 |
| 510 | # define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) |
| 511 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 512 | # define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 513 | #endif |
| 514 | |
| 515 | #ifndef __this_cpu_or |
| 516 | # ifndef __this_cpu_or_1 |
| 517 | # define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) |
| 518 | # endif |
| 519 | # ifndef __this_cpu_or_2 |
| 520 | # define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) |
| 521 | # endif |
| 522 | # ifndef __this_cpu_or_4 |
| 523 | # define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) |
| 524 | # endif |
| 525 | # ifndef __this_cpu_or_8 |
| 526 | # define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) |
| 527 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 528 | # define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 529 | #endif |
| 530 | |
| 531 | #ifndef __this_cpu_xor |
| 532 | # ifndef __this_cpu_xor_1 |
| 533 | # define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) |
| 534 | # endif |
| 535 | # ifndef __this_cpu_xor_2 |
| 536 | # define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) |
| 537 | # endif |
| 538 | # ifndef __this_cpu_xor_4 |
| 539 | # define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) |
| 540 | # endif |
| 541 | # ifndef __this_cpu_xor_8 |
| 542 | # define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) |
| 543 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 544 | # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 545 | #endif |
| 546 | |
| 547 | /* |
| 548 | * IRQ safe versions of the per cpu RMW operations. Note that these operations |
| 549 | * are *not* safe against modification of the same variable from another |
| 550 | * processors (which one gets when using regular atomic operations) |
| 551 | . They are guaranteed to be atomic vs. local interrupts and |
| 552 | * preemption only. |
| 553 | */ |
| 554 | #define irqsafe_cpu_generic_to_op(pcp, val, op) \ |
| 555 | do { \ |
| 556 | unsigned long flags; \ |
| 557 | local_irq_save(flags); \ |
| 558 | *__this_cpu_ptr(&(pcp)) op val; \ |
| 559 | local_irq_restore(flags); \ |
| 560 | } while (0) |
| 561 | |
| 562 | #ifndef irqsafe_cpu_add |
| 563 | # ifndef irqsafe_cpu_add_1 |
| 564 | # define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) |
| 565 | # endif |
| 566 | # ifndef irqsafe_cpu_add_2 |
| 567 | # define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) |
| 568 | # endif |
| 569 | # ifndef irqsafe_cpu_add_4 |
| 570 | # define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) |
| 571 | # endif |
| 572 | # ifndef irqsafe_cpu_add_8 |
| 573 | # define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) |
| 574 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 575 | # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 576 | #endif |
| 577 | |
| 578 | #ifndef irqsafe_cpu_sub |
| 579 | # define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val)) |
| 580 | #endif |
| 581 | |
| 582 | #ifndef irqsafe_cpu_inc |
| 583 | # define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1) |
| 584 | #endif |
| 585 | |
| 586 | #ifndef irqsafe_cpu_dec |
| 587 | # define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1) |
| 588 | #endif |
| 589 | |
| 590 | #ifndef irqsafe_cpu_and |
| 591 | # ifndef irqsafe_cpu_and_1 |
| 592 | # define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) |
| 593 | # endif |
| 594 | # ifndef irqsafe_cpu_and_2 |
| 595 | # define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) |
| 596 | # endif |
| 597 | # ifndef irqsafe_cpu_and_4 |
| 598 | # define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) |
| 599 | # endif |
| 600 | # ifndef irqsafe_cpu_and_8 |
| 601 | # define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) |
| 602 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 603 | # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 604 | #endif |
| 605 | |
| 606 | #ifndef irqsafe_cpu_or |
| 607 | # ifndef irqsafe_cpu_or_1 |
| 608 | # define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) |
| 609 | # endif |
| 610 | # ifndef irqsafe_cpu_or_2 |
| 611 | # define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) |
| 612 | # endif |
| 613 | # ifndef irqsafe_cpu_or_4 |
| 614 | # define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) |
| 615 | # endif |
| 616 | # ifndef irqsafe_cpu_or_8 |
| 617 | # define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) |
| 618 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 619 | # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 620 | #endif |
| 621 | |
| 622 | #ifndef irqsafe_cpu_xor |
| 623 | # ifndef irqsafe_cpu_xor_1 |
| 624 | # define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) |
| 625 | # endif |
| 626 | # ifndef irqsafe_cpu_xor_2 |
| 627 | # define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) |
| 628 | # endif |
| 629 | # ifndef irqsafe_cpu_xor_4 |
| 630 | # define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) |
| 631 | # endif |
| 632 | # ifndef irqsafe_cpu_xor_8 |
| 633 | # define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) |
| 634 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 635 | # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 636 | #endif |
| 637 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | #endif /* __LINUX_PERCPU_H */ |