Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_PERCPU_H |
| 2 | #define __LINUX_PERCPU_H |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 3 | |
Robert P. J. Day | 0a3021f | 2007-07-15 23:39:57 -0700 | [diff] [blame] | 4 | #include <linux/preempt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/slab.h> /* For kmalloc() */ |
| 6 | #include <linux/smp.h> |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 7 | #include <linux/cpumask.h> |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 8 | #include <linux/pfn.h> |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <asm/percpu.h> |
| 11 | |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 12 | /* enough to cover all DEFINE_PER_CPUs in modules */ |
Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 13 | #ifdef CONFIG_MODULES |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 14 | #define PERCPU_MODULE_RESERVE (8 << 10) |
Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 15 | #else |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 16 | #define PERCPU_MODULE_RESERVE 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #endif |
| 18 | |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 19 | #ifndef PERCPU_ENOUGH_ROOM |
Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 20 | #define PERCPU_ENOUGH_ROOM \ |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 21 | (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ |
| 22 | PERCPU_MODULE_RESERVE) |
| 23 | #endif |
Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 24 | |
Jan Blunck | 632bbfe | 2006-09-25 23:30:53 -0700 | [diff] [blame] | 25 | /* |
| 26 | * Must be an lvalue. Since @var must be a simple identifier, |
| 27 | * we force a syntax error here if it isn't. |
| 28 | */ |
| 29 | #define get_cpu_var(var) (*({ \ |
Jan Blunck | a666ecf | 2006-10-06 00:43:58 -0700 | [diff] [blame] | 30 | extern int simple_identifier_##var(void); \ |
Jan Blunck | 632bbfe | 2006-09-25 23:30:53 -0700 | [diff] [blame] | 31 | preempt_disable(); \ |
| 32 | &__get_cpu_var(var); })) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #define put_cpu_var(var) preempt_enable() |
| 34 | |
| 35 | #ifdef CONFIG_SMP |
| 36 | |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 37 | /* minimum unit size, also is the maximum supported allocation size */ |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 38 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 39 | |
| 40 | /* |
| 41 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy |
Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 42 | * back on the first chunk for dynamic percpu allocation if arch is |
| 43 | * manually allocating and mapping it for faster access (as a part of |
| 44 | * large page mapping for example). |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 45 | * |
Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 46 | * The following values give between one and two pages of free space |
| 47 | * after typical minimal boot (2-way SMP, single disk and NIC) with |
| 48 | * both defconfig and a distro config on x86_64 and 32. More |
| 49 | * intelligent way to determine this would be nice. |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 50 | */ |
Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 51 | #if BITS_PER_LONG > 32 |
| 52 | #define PERCPU_DYNAMIC_RESERVE (20 << 10) |
| 53 | #else |
| 54 | #define PERCPU_DYNAMIC_RESERVE (12 << 10) |
| 55 | #endif |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 56 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 57 | extern void *pcpu_base_addr; |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 58 | extern const unsigned long *pcpu_unit_offsets; |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 59 | |
Tejun Heo | fd1e8a1 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 60 | struct pcpu_group_info { |
| 61 | int nr_units; /* aligned # of units */ |
| 62 | unsigned long base_offset; /* base address offset */ |
| 63 | unsigned int *cpu_map; /* unit->cpu map, empty |
| 64 | * entries contain NR_CPUS */ |
| 65 | }; |
| 66 | |
| 67 | struct pcpu_alloc_info { |
| 68 | size_t static_size; |
| 69 | size_t reserved_size; |
| 70 | size_t dyn_size; |
| 71 | size_t unit_size; |
| 72 | size_t atom_size; |
| 73 | size_t alloc_size; |
| 74 | size_t __ai_size; /* internal, don't use */ |
| 75 | int nr_groups; /* 0 if grouping unnecessary */ |
| 76 | struct pcpu_group_info groups[]; |
| 77 | }; |
| 78 | |
Tejun Heo | f58dc01 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 79 | enum pcpu_fc { |
| 80 | PCPU_FC_AUTO, |
| 81 | PCPU_FC_EMBED, |
| 82 | PCPU_FC_PAGE, |
Tejun Heo | f58dc01 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 83 | |
| 84 | PCPU_FC_NR, |
| 85 | }; |
| 86 | extern const char *pcpu_fc_names[PCPU_FC_NR]; |
| 87 | |
| 88 | extern enum pcpu_fc pcpu_chosen_fc; |
| 89 | |
Tejun Heo | 3cbc856 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 90 | typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, |
| 91 | size_t align); |
Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 92 | typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size); |
| 93 | typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr); |
Tejun Heo | a530b79 | 2009-07-04 08:11:00 +0900 | [diff] [blame] | 94 | typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 95 | |
Tejun Heo | fd1e8a1 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 96 | extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, |
| 97 | int nr_units); |
| 98 | extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); |
Tejun Heo | 033e48f | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 99 | |
Tejun Heo | fd1e8a1 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 100 | extern struct pcpu_alloc_info * __init pcpu_build_alloc_info( |
| 101 | size_t reserved_size, ssize_t dyn_size, |
| 102 | size_t atom_size, |
| 103 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn); |
| 104 | |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 105 | extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, |
| 106 | void *base_addr); |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 107 | |
Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 108 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK |
Tejun Heo | c8826dd | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 109 | extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, |
| 110 | size_t atom_size, |
| 111 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn, |
| 112 | pcpu_fc_alloc_fn_t alloc_fn, |
| 113 | pcpu_fc_free_fn_t free_fn); |
Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 114 | #endif |
Tejun Heo | 66c3a75 | 2009-03-10 16:27:48 +0900 | [diff] [blame] | 115 | |
Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 116 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 117 | extern int __init pcpu_page_first_chunk(size_t reserved_size, |
Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 118 | pcpu_fc_alloc_fn_t alloc_fn, |
| 119 | pcpu_fc_free_fn_t free_fn, |
| 120 | pcpu_fc_populate_pte_fn_t populate_pte_fn); |
Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 121 | #endif |
Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 122 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 123 | /* |
| 124 | * Use this to get to a cpu's version of the per-cpu object |
| 125 | * dynamically allocated. Non-atomic access to the current CPU's |
| 126 | * version should probably be combined with get_cpu()/put_cpu(). |
| 127 | */ |
| 128 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
| 129 | |
Tejun Heo | edcb463 | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 130 | extern void *__alloc_reserved_percpu(size_t size, size_t align); |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 131 | extern void *__alloc_percpu(size_t size, size_t align); |
| 132 | extern void free_percpu(void *__pdata); |
Vivek Goyal | 3b034b0 | 2009-11-24 15:50:03 +0900 | [diff] [blame] | 133 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 134 | |
Tejun Heo | e74e396 | 2009-03-30 19:07:44 +0900 | [diff] [blame] | 135 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA |
| 136 | extern void __init setup_per_cpu_areas(void); |
| 137 | #endif |
| 138 | |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 139 | #else /* CONFIG_SMP */ |
| 140 | |
| 141 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
| 142 | |
| 143 | static inline void *__alloc_percpu(size_t size, size_t align) |
| 144 | { |
| 145 | /* |
| 146 | * Can't easily make larger alignment work with kmalloc. WARN |
| 147 | * on it. Larger alignment should only be used for module |
| 148 | * percpu sections on SMP for which this path isn't used. |
| 149 | */ |
Tejun Heo | e317603 | 2009-02-26 10:54:17 +0900 | [diff] [blame] | 150 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); |
Ingo Molnar | d2b0261 | 2009-02-25 14:36:45 +0100 | [diff] [blame] | 151 | return kzalloc(size, GFP_KERNEL); |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | static inline void free_percpu(void *p) |
| 155 | { |
| 156 | kfree(p); |
| 157 | } |
| 158 | |
Tejun Heo | ee0a6ef | 2009-12-02 08:36:58 +0900 | [diff] [blame] | 159 | static inline phys_addr_t per_cpu_ptr_to_phys(void *addr) |
| 160 | { |
| 161 | return __pa(addr); |
| 162 | } |
| 163 | |
Tejun Heo | e74e396 | 2009-03-30 19:07:44 +0900 | [diff] [blame] | 164 | static inline void __init setup_per_cpu_areas(void) { } |
| 165 | |
Tejun Heo | a76761b | 2009-07-15 23:35:14 +0900 | [diff] [blame] | 166 | static inline void *pcpu_lpage_remapped(void *kaddr) |
| 167 | { |
| 168 | return NULL; |
| 169 | } |
| 170 | |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 171 | #endif /* CONFIG_SMP */ |
| 172 | |
Tejun Heo | 64ef291 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 173 | #define alloc_percpu(type) \ |
| 174 | (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type)) |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 175 | |
Tejun Heo | 066123a | 2009-04-10 12:02:40 -0700 | [diff] [blame] | 176 | /* |
| 177 | * Optional methods for optimized non-lvalue per-cpu variable access. |
| 178 | * |
| 179 | * @var can be a percpu variable or a field of it and its size should |
| 180 | * equal char, int or long. percpu_read() evaluates to a lvalue and |
| 181 | * all others to void. |
| 182 | * |
| 183 | * These operations are guaranteed to be atomic w.r.t. preemption. |
| 184 | * The generic versions use plain get/put_cpu_var(). Archs are |
| 185 | * encouraged to implement single-instruction alternatives which don't |
| 186 | * require preemption protection. |
| 187 | */ |
| 188 | #ifndef percpu_read |
| 189 | # define percpu_read(var) \ |
| 190 | ({ \ |
| 191 | typeof(per_cpu_var(var)) __tmp_var__; \ |
| 192 | __tmp_var__ = get_cpu_var(var); \ |
| 193 | put_cpu_var(var); \ |
| 194 | __tmp_var__; \ |
| 195 | }) |
| 196 | #endif |
| 197 | |
| 198 | #define __percpu_generic_to_op(var, val, op) \ |
| 199 | do { \ |
| 200 | get_cpu_var(var) op val; \ |
| 201 | put_cpu_var(var); \ |
| 202 | } while (0) |
| 203 | |
| 204 | #ifndef percpu_write |
| 205 | # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) |
| 206 | #endif |
| 207 | |
| 208 | #ifndef percpu_add |
| 209 | # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) |
| 210 | #endif |
| 211 | |
| 212 | #ifndef percpu_sub |
| 213 | # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) |
| 214 | #endif |
| 215 | |
| 216 | #ifndef percpu_and |
| 217 | # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) |
| 218 | #endif |
| 219 | |
| 220 | #ifndef percpu_or |
| 221 | # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) |
| 222 | #endif |
| 223 | |
| 224 | #ifndef percpu_xor |
| 225 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) |
| 226 | #endif |
| 227 | |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 228 | /* |
| 229 | * Branching function to split up a function into a set of functions that |
| 230 | * are called for different scalar sizes of the objects handled. |
| 231 | */ |
| 232 | |
| 233 | extern void __bad_size_call_parameter(void); |
| 234 | |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 235 | #define __pcpu_size_call_return(stem, variable) \ |
| 236 | ({ typeof(variable) pscr_ret__; \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 237 | switch(sizeof(variable)) { \ |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 238 | case 1: pscr_ret__ = stem##1(variable);break; \ |
| 239 | case 2: pscr_ret__ = stem##2(variable);break; \ |
| 240 | case 4: pscr_ret__ = stem##4(variable);break; \ |
| 241 | case 8: pscr_ret__ = stem##8(variable);break; \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 242 | default: \ |
| 243 | __bad_size_call_parameter();break; \ |
| 244 | } \ |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 245 | pscr_ret__; \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 246 | }) |
| 247 | |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 248 | #define __pcpu_size_call(stem, variable, ...) \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 249 | do { \ |
| 250 | switch(sizeof(variable)) { \ |
| 251 | case 1: stem##1(variable, __VA_ARGS__);break; \ |
| 252 | case 2: stem##2(variable, __VA_ARGS__);break; \ |
| 253 | case 4: stem##4(variable, __VA_ARGS__);break; \ |
| 254 | case 8: stem##8(variable, __VA_ARGS__);break; \ |
| 255 | default: \ |
| 256 | __bad_size_call_parameter();break; \ |
| 257 | } \ |
| 258 | } while (0) |
| 259 | |
| 260 | /* |
| 261 | * Optimized manipulation for memory allocated through the per cpu |
| 262 | * allocator or for addresses of per cpu variables (can be determined |
| 263 | * using per_cpu_var(xx). |
| 264 | * |
| 265 | * These operation guarantee exclusivity of access for other operations |
| 266 | * on the *same* processor. The assumption is that per cpu data is only |
| 267 | * accessed by a single processor instance (the current one). |
| 268 | * |
| 269 | * The first group is used for accesses that must be done in a |
| 270 | * preemption safe way since we know that the context is not preempt |
| 271 | * safe. Interrupts may occur. If the interrupt modifies the variable |
| 272 | * too then RMW actions will not be reliable. |
| 273 | * |
| 274 | * The arch code can provide optimized functions in two ways: |
| 275 | * |
| 276 | * 1. Override the function completely. F.e. define this_cpu_add(). |
| 277 | * The arch must then ensure that the various scalar format passed |
| 278 | * are handled correctly. |
| 279 | * |
| 280 | * 2. Provide functions for certain scalar sizes. F.e. provide |
| 281 | * this_cpu_add_2() to provide per cpu atomic operations for 2 byte |
| 282 | * sized RMW actions. If arch code does not provide operations for |
| 283 | * a scalar size then the fallback in the generic code will be |
| 284 | * used. |
| 285 | */ |
| 286 | |
| 287 | #define _this_cpu_generic_read(pcp) \ |
| 288 | ({ typeof(pcp) ret__; \ |
| 289 | preempt_disable(); \ |
| 290 | ret__ = *this_cpu_ptr(&(pcp)); \ |
| 291 | preempt_enable(); \ |
| 292 | ret__; \ |
| 293 | }) |
| 294 | |
| 295 | #ifndef this_cpu_read |
| 296 | # ifndef this_cpu_read_1 |
| 297 | # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp) |
| 298 | # endif |
| 299 | # ifndef this_cpu_read_2 |
| 300 | # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp) |
| 301 | # endif |
| 302 | # ifndef this_cpu_read_4 |
| 303 | # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp) |
| 304 | # endif |
| 305 | # ifndef this_cpu_read_8 |
| 306 | # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) |
| 307 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 308 | # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 309 | #endif |
| 310 | |
| 311 | #define _this_cpu_generic_to_op(pcp, val, op) \ |
| 312 | do { \ |
| 313 | preempt_disable(); \ |
| 314 | *__this_cpu_ptr(&pcp) op val; \ |
| 315 | preempt_enable(); \ |
| 316 | } while (0) |
| 317 | |
| 318 | #ifndef this_cpu_write |
| 319 | # ifndef this_cpu_write_1 |
| 320 | # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) |
| 321 | # endif |
| 322 | # ifndef this_cpu_write_2 |
| 323 | # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) |
| 324 | # endif |
| 325 | # ifndef this_cpu_write_4 |
| 326 | # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) |
| 327 | # endif |
| 328 | # ifndef this_cpu_write_8 |
| 329 | # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) |
| 330 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 331 | # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 332 | #endif |
| 333 | |
| 334 | #ifndef this_cpu_add |
| 335 | # ifndef this_cpu_add_1 |
| 336 | # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) |
| 337 | # endif |
| 338 | # ifndef this_cpu_add_2 |
| 339 | # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) |
| 340 | # endif |
| 341 | # ifndef this_cpu_add_4 |
| 342 | # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) |
| 343 | # endif |
| 344 | # ifndef this_cpu_add_8 |
| 345 | # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) |
| 346 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 347 | # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 348 | #endif |
| 349 | |
| 350 | #ifndef this_cpu_sub |
| 351 | # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) |
| 352 | #endif |
| 353 | |
| 354 | #ifndef this_cpu_inc |
| 355 | # define this_cpu_inc(pcp) this_cpu_add((pcp), 1) |
| 356 | #endif |
| 357 | |
| 358 | #ifndef this_cpu_dec |
| 359 | # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) |
| 360 | #endif |
| 361 | |
| 362 | #ifndef this_cpu_and |
| 363 | # ifndef this_cpu_and_1 |
| 364 | # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) |
| 365 | # endif |
| 366 | # ifndef this_cpu_and_2 |
| 367 | # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) |
| 368 | # endif |
| 369 | # ifndef this_cpu_and_4 |
| 370 | # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) |
| 371 | # endif |
| 372 | # ifndef this_cpu_and_8 |
| 373 | # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) |
| 374 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 375 | # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 376 | #endif |
| 377 | |
| 378 | #ifndef this_cpu_or |
| 379 | # ifndef this_cpu_or_1 |
| 380 | # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) |
| 381 | # endif |
| 382 | # ifndef this_cpu_or_2 |
| 383 | # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) |
| 384 | # endif |
| 385 | # ifndef this_cpu_or_4 |
| 386 | # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) |
| 387 | # endif |
| 388 | # ifndef this_cpu_or_8 |
| 389 | # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) |
| 390 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 391 | # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 392 | #endif |
| 393 | |
| 394 | #ifndef this_cpu_xor |
| 395 | # ifndef this_cpu_xor_1 |
| 396 | # define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) |
| 397 | # endif |
| 398 | # ifndef this_cpu_xor_2 |
| 399 | # define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) |
| 400 | # endif |
| 401 | # ifndef this_cpu_xor_4 |
| 402 | # define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) |
| 403 | # endif |
| 404 | # ifndef this_cpu_xor_8 |
| 405 | # define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) |
| 406 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 407 | # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 408 | #endif |
| 409 | |
| 410 | /* |
| 411 | * Generic percpu operations that do not require preemption handling. |
| 412 | * Either we do not care about races or the caller has the |
| 413 | * responsibility of handling preemptions issues. Arch code can still |
| 414 | * override these instructions since the arch per cpu code may be more |
| 415 | * efficient and may actually get race freeness for free (that is the |
| 416 | * case for x86 for example). |
| 417 | * |
| 418 | * If there is no other protection through preempt disable and/or |
| 419 | * disabling interupts then one of these RMW operations can show unexpected |
| 420 | * behavior because the execution thread was rescheduled on another processor |
| 421 | * or an interrupt occurred and the same percpu variable was modified from |
| 422 | * the interrupt context. |
| 423 | */ |
| 424 | #ifndef __this_cpu_read |
| 425 | # ifndef __this_cpu_read_1 |
| 426 | # define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp))) |
| 427 | # endif |
| 428 | # ifndef __this_cpu_read_2 |
| 429 | # define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp))) |
| 430 | # endif |
| 431 | # ifndef __this_cpu_read_4 |
| 432 | # define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp))) |
| 433 | # endif |
| 434 | # ifndef __this_cpu_read_8 |
| 435 | # define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp))) |
| 436 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 437 | # define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 438 | #endif |
| 439 | |
| 440 | #define __this_cpu_generic_to_op(pcp, val, op) \ |
| 441 | do { \ |
| 442 | *__this_cpu_ptr(&(pcp)) op val; \ |
| 443 | } while (0) |
| 444 | |
| 445 | #ifndef __this_cpu_write |
| 446 | # ifndef __this_cpu_write_1 |
| 447 | # define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) |
| 448 | # endif |
| 449 | # ifndef __this_cpu_write_2 |
| 450 | # define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) |
| 451 | # endif |
| 452 | # ifndef __this_cpu_write_4 |
| 453 | # define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) |
| 454 | # endif |
| 455 | # ifndef __this_cpu_write_8 |
| 456 | # define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) |
| 457 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 458 | # define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 459 | #endif |
| 460 | |
| 461 | #ifndef __this_cpu_add |
| 462 | # ifndef __this_cpu_add_1 |
| 463 | # define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) |
| 464 | # endif |
| 465 | # ifndef __this_cpu_add_2 |
| 466 | # define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) |
| 467 | # endif |
| 468 | # ifndef __this_cpu_add_4 |
| 469 | # define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) |
| 470 | # endif |
| 471 | # ifndef __this_cpu_add_8 |
| 472 | # define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) |
| 473 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 474 | # define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 475 | #endif |
| 476 | |
| 477 | #ifndef __this_cpu_sub |
| 478 | # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) |
| 479 | #endif |
| 480 | |
| 481 | #ifndef __this_cpu_inc |
| 482 | # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) |
| 483 | #endif |
| 484 | |
| 485 | #ifndef __this_cpu_dec |
| 486 | # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) |
| 487 | #endif |
| 488 | |
| 489 | #ifndef __this_cpu_and |
| 490 | # ifndef __this_cpu_and_1 |
| 491 | # define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) |
| 492 | # endif |
| 493 | # ifndef __this_cpu_and_2 |
| 494 | # define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) |
| 495 | # endif |
| 496 | # ifndef __this_cpu_and_4 |
| 497 | # define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) |
| 498 | # endif |
| 499 | # ifndef __this_cpu_and_8 |
| 500 | # define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) |
| 501 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 502 | # define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 503 | #endif |
| 504 | |
| 505 | #ifndef __this_cpu_or |
| 506 | # ifndef __this_cpu_or_1 |
| 507 | # define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) |
| 508 | # endif |
| 509 | # ifndef __this_cpu_or_2 |
| 510 | # define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) |
| 511 | # endif |
| 512 | # ifndef __this_cpu_or_4 |
| 513 | # define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) |
| 514 | # endif |
| 515 | # ifndef __this_cpu_or_8 |
| 516 | # define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) |
| 517 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 518 | # define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 519 | #endif |
| 520 | |
| 521 | #ifndef __this_cpu_xor |
| 522 | # ifndef __this_cpu_xor_1 |
| 523 | # define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) |
| 524 | # endif |
| 525 | # ifndef __this_cpu_xor_2 |
| 526 | # define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) |
| 527 | # endif |
| 528 | # ifndef __this_cpu_xor_4 |
| 529 | # define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) |
| 530 | # endif |
| 531 | # ifndef __this_cpu_xor_8 |
| 532 | # define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) |
| 533 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 534 | # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 535 | #endif |
| 536 | |
| 537 | /* |
| 538 | * IRQ safe versions of the per cpu RMW operations. Note that these operations |
| 539 | * are *not* safe against modification of the same variable from another |
| 540 | * processors (which one gets when using regular atomic operations) |
| 541 | . They are guaranteed to be atomic vs. local interrupts and |
| 542 | * preemption only. |
| 543 | */ |
| 544 | #define irqsafe_cpu_generic_to_op(pcp, val, op) \ |
| 545 | do { \ |
| 546 | unsigned long flags; \ |
| 547 | local_irq_save(flags); \ |
| 548 | *__this_cpu_ptr(&(pcp)) op val; \ |
| 549 | local_irq_restore(flags); \ |
| 550 | } while (0) |
| 551 | |
| 552 | #ifndef irqsafe_cpu_add |
| 553 | # ifndef irqsafe_cpu_add_1 |
| 554 | # define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) |
| 555 | # endif |
| 556 | # ifndef irqsafe_cpu_add_2 |
| 557 | # define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) |
| 558 | # endif |
| 559 | # ifndef irqsafe_cpu_add_4 |
| 560 | # define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) |
| 561 | # endif |
| 562 | # ifndef irqsafe_cpu_add_8 |
| 563 | # define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) |
| 564 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 565 | # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 566 | #endif |
| 567 | |
| 568 | #ifndef irqsafe_cpu_sub |
| 569 | # define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val)) |
| 570 | #endif |
| 571 | |
| 572 | #ifndef irqsafe_cpu_inc |
| 573 | # define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1) |
| 574 | #endif |
| 575 | |
| 576 | #ifndef irqsafe_cpu_dec |
| 577 | # define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1) |
| 578 | #endif |
| 579 | |
| 580 | #ifndef irqsafe_cpu_and |
| 581 | # ifndef irqsafe_cpu_and_1 |
| 582 | # define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) |
| 583 | # endif |
| 584 | # ifndef irqsafe_cpu_and_2 |
| 585 | # define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) |
| 586 | # endif |
| 587 | # ifndef irqsafe_cpu_and_4 |
| 588 | # define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) |
| 589 | # endif |
| 590 | # ifndef irqsafe_cpu_and_8 |
| 591 | # define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) |
| 592 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 593 | # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 594 | #endif |
| 595 | |
| 596 | #ifndef irqsafe_cpu_or |
| 597 | # ifndef irqsafe_cpu_or_1 |
| 598 | # define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) |
| 599 | # endif |
| 600 | # ifndef irqsafe_cpu_or_2 |
| 601 | # define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) |
| 602 | # endif |
| 603 | # ifndef irqsafe_cpu_or_4 |
| 604 | # define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) |
| 605 | # endif |
| 606 | # ifndef irqsafe_cpu_or_8 |
| 607 | # define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) |
| 608 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 609 | # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 610 | #endif |
| 611 | |
| 612 | #ifndef irqsafe_cpu_xor |
| 613 | # ifndef irqsafe_cpu_xor_1 |
| 614 | # define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) |
| 615 | # endif |
| 616 | # ifndef irqsafe_cpu_xor_2 |
| 617 | # define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) |
| 618 | # endif |
| 619 | # ifndef irqsafe_cpu_xor_4 |
| 620 | # define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) |
| 621 | # endif |
| 622 | # ifndef irqsafe_cpu_xor_8 |
| 623 | # define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) |
| 624 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 625 | # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 626 | #endif |
| 627 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | #endif /* __LINUX_PERCPU_H */ |