Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_PERCPU_H |
| 2 | #define __LINUX_PERCPU_H |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 3 | |
Robert P. J. Day | 0a3021f | 2007-07-15 23:39:57 -0700 | [diff] [blame] | 4 | #include <linux/preempt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/smp.h> |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 6 | #include <linux/cpumask.h> |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 7 | #include <linux/pfn.h> |
Tejun Heo | de380b5 | 2010-03-24 17:06:43 +0900 | [diff] [blame] | 8 | #include <linux/init.h> |
Martin Peschke | 7ff6f08 | 2006-09-25 23:31:21 -0700 | [diff] [blame] | 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <asm/percpu.h> |
| 11 | |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 12 | /* enough to cover all DEFINE_PER_CPUs in modules */ |
Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 13 | #ifdef CONFIG_MODULES |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 14 | #define PERCPU_MODULE_RESERVE (8 << 10) |
Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 15 | #else |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 16 | #define PERCPU_MODULE_RESERVE 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #endif |
| 18 | |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 19 | #ifndef PERCPU_ENOUGH_ROOM |
Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 20 | #define PERCPU_ENOUGH_ROOM \ |
Tejun Heo | 6a24290 | 2009-03-06 14:33:58 +0900 | [diff] [blame] | 21 | (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ |
| 22 | PERCPU_MODULE_RESERVE) |
| 23 | #endif |
Jeremy Fitzhardinge | b00742d | 2007-05-02 19:27:11 +0200 | [diff] [blame] | 24 | |
Jan Blunck | 632bbfe | 2006-09-25 23:30:53 -0700 | [diff] [blame] | 25 | /* |
| 26 | * Must be an lvalue. Since @var must be a simple identifier, |
| 27 | * we force a syntax error here if it isn't. |
| 28 | */ |
| 29 | #define get_cpu_var(var) (*({ \ |
Jan Blunck | 632bbfe | 2006-09-25 23:30:53 -0700 | [diff] [blame] | 30 | preempt_disable(); \ |
| 31 | &__get_cpu_var(var); })) |
Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 32 | |
Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 33 | /* |
| 34 | * The weird & is necessary because sparse considers (void)(var) to be |
| 35 | * a direct dereference of percpu variable (var). |
| 36 | */ |
Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 37 | #define put_cpu_var(var) do { \ |
Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 38 | (void)&(var); \ |
Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 39 | preempt_enable(); \ |
| 40 | } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Peter Zijlstra | 8b8e2ec | 2010-09-16 19:21:28 +0200 | [diff] [blame] | 42 | #define get_cpu_ptr(var) ({ \ |
| 43 | preempt_disable(); \ |
| 44 | this_cpu_ptr(var); }) |
| 45 | |
| 46 | #define put_cpu_ptr(var) do { \ |
| 47 | (void)(var); \ |
| 48 | preempt_enable(); \ |
| 49 | } while (0) |
| 50 | |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 51 | /* minimum unit size, also is the maximum supported allocation size */ |
Tejun Heo | 6abad5a | 2010-09-03 18:22:47 +0200 | [diff] [blame] | 52 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 53 | |
| 54 | /* |
Tejun Heo | 099a19d | 2010-06-27 18:50:00 +0200 | [diff] [blame] | 55 | * Percpu allocator can serve percpu allocations before slab is |
| 56 | * initialized which allows slab to depend on the percpu allocator. |
| 57 | * The following two parameters decide how much resource to |
| 58 | * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or |
| 59 | * larger than PERCPU_DYNAMIC_EARLY_SIZE. |
| 60 | */ |
| 61 | #define PERCPU_DYNAMIC_EARLY_SLOTS 128 |
| 62 | #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10) |
| 63 | |
| 64 | /* |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 65 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy |
Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 66 | * back on the first chunk for dynamic percpu allocation if arch is |
| 67 | * manually allocating and mapping it for faster access (as a part of |
| 68 | * large page mapping for example). |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 69 | * |
Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 70 | * The following values give between one and two pages of free space |
| 71 | * after typical minimal boot (2-way SMP, single disk and NIC) with |
| 72 | * both defconfig and a distro config on x86_64 and 32. More |
| 73 | * intelligent way to determine this would be nice. |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 74 | */ |
Tejun Heo | 6b19b0c | 2009-03-06 14:33:59 +0900 | [diff] [blame] | 75 | #if BITS_PER_LONG > 32 |
| 76 | #define PERCPU_DYNAMIC_RESERVE (20 << 10) |
| 77 | #else |
| 78 | #define PERCPU_DYNAMIC_RESERVE (12 << 10) |
| 79 | #endif |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 80 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 81 | extern void *pcpu_base_addr; |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 82 | extern const unsigned long *pcpu_unit_offsets; |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 83 | |
Tejun Heo | fd1e8a1 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 84 | struct pcpu_group_info { |
| 85 | int nr_units; /* aligned # of units */ |
| 86 | unsigned long base_offset; /* base address offset */ |
| 87 | unsigned int *cpu_map; /* unit->cpu map, empty |
| 88 | * entries contain NR_CPUS */ |
| 89 | }; |
| 90 | |
| 91 | struct pcpu_alloc_info { |
| 92 | size_t static_size; |
| 93 | size_t reserved_size; |
| 94 | size_t dyn_size; |
| 95 | size_t unit_size; |
| 96 | size_t atom_size; |
| 97 | size_t alloc_size; |
| 98 | size_t __ai_size; /* internal, don't use */ |
| 99 | int nr_groups; /* 0 if grouping unnecessary */ |
| 100 | struct pcpu_group_info groups[]; |
| 101 | }; |
| 102 | |
Tejun Heo | f58dc01 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 103 | enum pcpu_fc { |
| 104 | PCPU_FC_AUTO, |
| 105 | PCPU_FC_EMBED, |
| 106 | PCPU_FC_PAGE, |
Tejun Heo | f58dc01 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 107 | |
| 108 | PCPU_FC_NR, |
| 109 | }; |
| 110 | extern const char *pcpu_fc_names[PCPU_FC_NR]; |
| 111 | |
| 112 | extern enum pcpu_fc pcpu_chosen_fc; |
| 113 | |
Tejun Heo | 3cbc856 | 2009-08-14 15:00:50 +0900 | [diff] [blame] | 114 | typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, |
| 115 | size_t align); |
Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 116 | typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size); |
| 117 | typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr); |
Tejun Heo | a530b79 | 2009-07-04 08:11:00 +0900 | [diff] [blame] | 118 | typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 119 | |
Tejun Heo | fd1e8a1 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 120 | extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, |
| 121 | int nr_units); |
| 122 | extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); |
Tejun Heo | 033e48f | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 123 | |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 124 | extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, |
| 125 | void *base_addr); |
Tejun Heo | 8d408b4 | 2009-02-24 11:57:21 +0900 | [diff] [blame] | 126 | |
Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 127 | #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK |
Tejun Heo | 4ba6ce2 | 2010-06-27 18:49:59 +0200 | [diff] [blame] | 128 | extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, |
Tejun Heo | c8826dd | 2009-08-14 15:00:52 +0900 | [diff] [blame] | 129 | size_t atom_size, |
| 130 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn, |
| 131 | pcpu_fc_alloc_fn_t alloc_fn, |
| 132 | pcpu_fc_free_fn_t free_fn); |
Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 133 | #endif |
Tejun Heo | 66c3a75 | 2009-03-10 16:27:48 +0900 | [diff] [blame] | 134 | |
Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 135 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK |
Tejun Heo | fb435d5 | 2009-08-14 15:00:51 +0900 | [diff] [blame] | 136 | extern int __init pcpu_page_first_chunk(size_t reserved_size, |
Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 137 | pcpu_fc_alloc_fn_t alloc_fn, |
| 138 | pcpu_fc_free_fn_t free_fn, |
| 139 | pcpu_fc_populate_pte_fn_t populate_pte_fn); |
Tejun Heo | 08fc458 | 2009-08-14 15:00:49 +0900 | [diff] [blame] | 140 | #endif |
Tejun Heo | d4b95f8 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 141 | |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 142 | /* |
| 143 | * Use this to get to a cpu's version of the per-cpu object |
| 144 | * dynamically allocated. Non-atomic access to the current CPU's |
| 145 | * version should probably be combined with get_cpu()/put_cpu(). |
| 146 | */ |
Tejun Heo | bbddff0 | 2010-09-03 18:22:48 +0200 | [diff] [blame] | 147 | #ifdef CONFIG_SMP |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 148 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
Tejun Heo | bbddff0 | 2010-09-03 18:22:48 +0200 | [diff] [blame] | 149 | #else |
| 150 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) |
| 151 | #endif |
Tejun Heo | fbf59bc | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 152 | |
Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 153 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); |
Tejun Heo | 10fad5e | 2010-03-10 18:57:54 +0900 | [diff] [blame] | 154 | extern bool is_kernel_percpu_address(unsigned long addr); |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 155 | |
Tejun Heo | bbddff0 | 2010-09-03 18:22:48 +0200 | [diff] [blame] | 156 | #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) |
Tejun Heo | e74e396 | 2009-03-30 19:07:44 +0900 | [diff] [blame] | 157 | extern void __init setup_per_cpu_areas(void); |
| 158 | #endif |
Tejun Heo | 099a19d | 2010-06-27 18:50:00 +0200 | [diff] [blame] | 159 | extern void __init percpu_init_late(void); |
Tejun Heo | e74e396 | 2009-03-30 19:07:44 +0900 | [diff] [blame] | 160 | |
Tejun Heo | de380b5 | 2010-03-24 17:06:43 +0900 | [diff] [blame] | 161 | extern void __percpu *__alloc_percpu(size_t size, size_t align); |
| 162 | extern void free_percpu(void __percpu *__pdata); |
| 163 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); |
| 164 | |
Tejun Heo | 64ef291 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 165 | #define alloc_percpu(type) \ |
Rusty Russell | e0fdb0e | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 166 | (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) |
Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 167 | |
Tejun Heo | 066123a | 2009-04-10 12:02:40 -0700 | [diff] [blame] | 168 | /* |
| 169 | * Optional methods for optimized non-lvalue per-cpu variable access. |
| 170 | * |
| 171 | * @var can be a percpu variable or a field of it and its size should |
| 172 | * equal char, int or long. percpu_read() evaluates to a lvalue and |
| 173 | * all others to void. |
| 174 | * |
| 175 | * These operations are guaranteed to be atomic w.r.t. preemption. |
| 176 | * The generic versions use plain get/put_cpu_var(). Archs are |
| 177 | * encouraged to implement single-instruction alternatives which don't |
| 178 | * require preemption protection. |
| 179 | */ |
| 180 | #ifndef percpu_read |
| 181 | # define percpu_read(var) \ |
| 182 | ({ \ |
Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 183 | typeof(var) *pr_ptr__ = &(var); \ |
| 184 | typeof(var) pr_ret__; \ |
| 185 | pr_ret__ = get_cpu_var(*pr_ptr__); \ |
| 186 | put_cpu_var(*pr_ptr__); \ |
| 187 | pr_ret__; \ |
Tejun Heo | 066123a | 2009-04-10 12:02:40 -0700 | [diff] [blame] | 188 | }) |
| 189 | #endif |
| 190 | |
| 191 | #define __percpu_generic_to_op(var, val, op) \ |
| 192 | do { \ |
Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 193 | typeof(var) *pgto_ptr__ = &(var); \ |
| 194 | get_cpu_var(*pgto_ptr__) op val; \ |
| 195 | put_cpu_var(*pgto_ptr__); \ |
Tejun Heo | 066123a | 2009-04-10 12:02:40 -0700 | [diff] [blame] | 196 | } while (0) |
| 197 | |
| 198 | #ifndef percpu_write |
| 199 | # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) |
| 200 | #endif |
| 201 | |
| 202 | #ifndef percpu_add |
| 203 | # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) |
| 204 | #endif |
| 205 | |
| 206 | #ifndef percpu_sub |
| 207 | # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) |
| 208 | #endif |
| 209 | |
| 210 | #ifndef percpu_and |
| 211 | # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) |
| 212 | #endif |
| 213 | |
| 214 | #ifndef percpu_or |
| 215 | # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) |
| 216 | #endif |
| 217 | |
| 218 | #ifndef percpu_xor |
| 219 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) |
| 220 | #endif |
| 221 | |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 222 | /* |
| 223 | * Branching function to split up a function into a set of functions that |
| 224 | * are called for different scalar sizes of the objects handled. |
| 225 | */ |
| 226 | |
| 227 | extern void __bad_size_call_parameter(void); |
| 228 | |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 229 | #define __pcpu_size_call_return(stem, variable) \ |
| 230 | ({ typeof(variable) pscr_ret__; \ |
Tejun Heo | 545695f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 231 | __verify_pcpu_ptr(&(variable)); \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 232 | switch(sizeof(variable)) { \ |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 233 | case 1: pscr_ret__ = stem##1(variable);break; \ |
| 234 | case 2: pscr_ret__ = stem##2(variable);break; \ |
| 235 | case 4: pscr_ret__ = stem##4(variable);break; \ |
| 236 | case 8: pscr_ret__ = stem##8(variable);break; \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 237 | default: \ |
| 238 | __bad_size_call_parameter();break; \ |
| 239 | } \ |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 240 | pscr_ret__; \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 241 | }) |
| 242 | |
Christoph Lameter | a663fff | 2010-12-06 11:39:59 -0600 | [diff] [blame] | 243 | #define __pcpu_size_call_return2(stem, variable, ...) \ |
| 244 | ({ \ |
| 245 | typeof(variable) pscr2_ret__; \ |
| 246 | __verify_pcpu_ptr(&(variable)); \ |
| 247 | switch(sizeof(variable)) { \ |
| 248 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ |
| 249 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ |
| 250 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ |
| 251 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ |
| 252 | default: \ |
| 253 | __bad_size_call_parameter(); break; \ |
| 254 | } \ |
| 255 | pscr2_ret__; \ |
| 256 | }) |
| 257 | |
Christoph Lameter | 7c33433 | 2011-02-28 11:02:24 +0100 | [diff] [blame^] | 258 | /* |
| 259 | * Special handling for cmpxchg_double. cmpxchg_double is passed two |
| 260 | * percpu variables. The first has to be aligned to a double word |
| 261 | * boundary and the second has to follow directly thereafter. |
| 262 | */ |
| 263 | #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ |
| 264 | ({ \ |
| 265 | bool pdcrb_ret__; \ |
| 266 | __verify_pcpu_ptr(&pcp1); \ |
| 267 | BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ |
| 268 | VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \ |
| 269 | VM_BUG_ON((unsigned long)(&pcp2) != \ |
| 270 | (unsigned long)(&pcp1) + sizeof(pcp1)); \ |
| 271 | switch(sizeof(pcp1)) { \ |
| 272 | case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ |
| 273 | case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ |
| 274 | case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ |
| 275 | case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ |
| 276 | default: \ |
| 277 | __bad_size_call_parameter(); break; \ |
| 278 | } \ |
| 279 | pdcrb_ret__; \ |
| 280 | }) |
| 281 | |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 282 | #define __pcpu_size_call(stem, variable, ...) \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 283 | do { \ |
Tejun Heo | 545695f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 284 | __verify_pcpu_ptr(&(variable)); \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 285 | switch(sizeof(variable)) { \ |
| 286 | case 1: stem##1(variable, __VA_ARGS__);break; \ |
| 287 | case 2: stem##2(variable, __VA_ARGS__);break; \ |
| 288 | case 4: stem##4(variable, __VA_ARGS__);break; \ |
| 289 | case 8: stem##8(variable, __VA_ARGS__);break; \ |
| 290 | default: \ |
| 291 | __bad_size_call_parameter();break; \ |
| 292 | } \ |
| 293 | } while (0) |
| 294 | |
| 295 | /* |
| 296 | * Optimized manipulation for memory allocated through the per cpu |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 297 | * allocator or for addresses of per cpu variables. |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 298 | * |
| 299 | * These operation guarantee exclusivity of access for other operations |
| 300 | * on the *same* processor. The assumption is that per cpu data is only |
| 301 | * accessed by a single processor instance (the current one). |
| 302 | * |
| 303 | * The first group is used for accesses that must be done in a |
| 304 | * preemption safe way since we know that the context is not preempt |
| 305 | * safe. Interrupts may occur. If the interrupt modifies the variable |
| 306 | * too then RMW actions will not be reliable. |
| 307 | * |
| 308 | * The arch code can provide optimized functions in two ways: |
| 309 | * |
| 310 | * 1. Override the function completely. F.e. define this_cpu_add(). |
| 311 | * The arch must then ensure that the various scalar format passed |
| 312 | * are handled correctly. |
| 313 | * |
| 314 | * 2. Provide functions for certain scalar sizes. F.e. provide |
| 315 | * this_cpu_add_2() to provide per cpu atomic operations for 2 byte |
| 316 | * sized RMW actions. If arch code does not provide operations for |
| 317 | * a scalar size then the fallback in the generic code will be |
| 318 | * used. |
| 319 | */ |
| 320 | |
| 321 | #define _this_cpu_generic_read(pcp) \ |
| 322 | ({ typeof(pcp) ret__; \ |
| 323 | preempt_disable(); \ |
| 324 | ret__ = *this_cpu_ptr(&(pcp)); \ |
| 325 | preempt_enable(); \ |
| 326 | ret__; \ |
| 327 | }) |
| 328 | |
| 329 | #ifndef this_cpu_read |
| 330 | # ifndef this_cpu_read_1 |
| 331 | # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp) |
| 332 | # endif |
| 333 | # ifndef this_cpu_read_2 |
| 334 | # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp) |
| 335 | # endif |
| 336 | # ifndef this_cpu_read_4 |
| 337 | # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp) |
| 338 | # endif |
| 339 | # ifndef this_cpu_read_8 |
| 340 | # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) |
| 341 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 342 | # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 343 | #endif |
| 344 | |
| 345 | #define _this_cpu_generic_to_op(pcp, val, op) \ |
| 346 | do { \ |
| 347 | preempt_disable(); \ |
Tejun Heo | f7b64fe | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 348 | *__this_cpu_ptr(&(pcp)) op val; \ |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 349 | preempt_enable(); \ |
| 350 | } while (0) |
| 351 | |
| 352 | #ifndef this_cpu_write |
| 353 | # ifndef this_cpu_write_1 |
| 354 | # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) |
| 355 | # endif |
| 356 | # ifndef this_cpu_write_2 |
| 357 | # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) |
| 358 | # endif |
| 359 | # ifndef this_cpu_write_4 |
| 360 | # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) |
| 361 | # endif |
| 362 | # ifndef this_cpu_write_8 |
| 363 | # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) |
| 364 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 365 | # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 366 | #endif |
| 367 | |
| 368 | #ifndef this_cpu_add |
| 369 | # ifndef this_cpu_add_1 |
| 370 | # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) |
| 371 | # endif |
| 372 | # ifndef this_cpu_add_2 |
| 373 | # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) |
| 374 | # endif |
| 375 | # ifndef this_cpu_add_4 |
| 376 | # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) |
| 377 | # endif |
| 378 | # ifndef this_cpu_add_8 |
| 379 | # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) |
| 380 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 381 | # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 382 | #endif |
| 383 | |
| 384 | #ifndef this_cpu_sub |
| 385 | # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) |
| 386 | #endif |
| 387 | |
| 388 | #ifndef this_cpu_inc |
| 389 | # define this_cpu_inc(pcp) this_cpu_add((pcp), 1) |
| 390 | #endif |
| 391 | |
| 392 | #ifndef this_cpu_dec |
| 393 | # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) |
| 394 | #endif |
| 395 | |
| 396 | #ifndef this_cpu_and |
| 397 | # ifndef this_cpu_and_1 |
| 398 | # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) |
| 399 | # endif |
| 400 | # ifndef this_cpu_and_2 |
| 401 | # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) |
| 402 | # endif |
| 403 | # ifndef this_cpu_and_4 |
| 404 | # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) |
| 405 | # endif |
| 406 | # ifndef this_cpu_and_8 |
| 407 | # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) |
| 408 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 409 | # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 410 | #endif |
| 411 | |
| 412 | #ifndef this_cpu_or |
| 413 | # ifndef this_cpu_or_1 |
| 414 | # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) |
| 415 | # endif |
| 416 | # ifndef this_cpu_or_2 |
| 417 | # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) |
| 418 | # endif |
| 419 | # ifndef this_cpu_or_4 |
| 420 | # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) |
| 421 | # endif |
| 422 | # ifndef this_cpu_or_8 |
| 423 | # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) |
| 424 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 425 | # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 426 | #endif |
| 427 | |
| 428 | #ifndef this_cpu_xor |
| 429 | # ifndef this_cpu_xor_1 |
| 430 | # define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) |
| 431 | # endif |
| 432 | # ifndef this_cpu_xor_2 |
| 433 | # define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) |
| 434 | # endif |
| 435 | # ifndef this_cpu_xor_4 |
| 436 | # define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) |
| 437 | # endif |
| 438 | # ifndef this_cpu_xor_8 |
| 439 | # define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) |
| 440 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 441 | # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 442 | #endif |
| 443 | |
Tejun Heo | 4030477 | 2010-12-17 15:47:04 +0100 | [diff] [blame] | 444 | #define _this_cpu_generic_add_return(pcp, val) \ |
| 445 | ({ \ |
| 446 | typeof(pcp) ret__; \ |
| 447 | preempt_disable(); \ |
| 448 | __this_cpu_add(pcp, val); \ |
| 449 | ret__ = __this_cpu_read(pcp); \ |
| 450 | preempt_enable(); \ |
| 451 | ret__; \ |
| 452 | }) |
| 453 | |
| 454 | #ifndef this_cpu_add_return |
| 455 | # ifndef this_cpu_add_return_1 |
| 456 | # define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val) |
| 457 | # endif |
| 458 | # ifndef this_cpu_add_return_2 |
| 459 | # define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val) |
| 460 | # endif |
| 461 | # ifndef this_cpu_add_return_4 |
| 462 | # define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val) |
| 463 | # endif |
| 464 | # ifndef this_cpu_add_return_8 |
| 465 | # define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val) |
| 466 | # endif |
| 467 | # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) |
| 468 | #endif |
| 469 | |
| 470 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) |
| 471 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) |
| 472 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) |
| 473 | |
Christoph Lameter | 2b71244 | 2010-12-18 15:54:04 +0100 | [diff] [blame] | 474 | #define _this_cpu_generic_xchg(pcp, nval) \ |
| 475 | ({ typeof(pcp) ret__; \ |
| 476 | preempt_disable(); \ |
| 477 | ret__ = __this_cpu_read(pcp); \ |
| 478 | __this_cpu_write(pcp, nval); \ |
| 479 | preempt_enable(); \ |
| 480 | ret__; \ |
| 481 | }) |
| 482 | |
| 483 | #ifndef this_cpu_xchg |
| 484 | # ifndef this_cpu_xchg_1 |
| 485 | # define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval) |
| 486 | # endif |
| 487 | # ifndef this_cpu_xchg_2 |
| 488 | # define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval) |
| 489 | # endif |
| 490 | # ifndef this_cpu_xchg_4 |
| 491 | # define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval) |
| 492 | # endif |
| 493 | # ifndef this_cpu_xchg_8 |
| 494 | # define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval) |
| 495 | # endif |
| 496 | # define this_cpu_xchg(pcp, nval) \ |
| 497 | __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval) |
| 498 | #endif |
| 499 | |
| 500 | #define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ |
| 501 | ({ typeof(pcp) ret__; \ |
| 502 | preempt_disable(); \ |
| 503 | ret__ = __this_cpu_read(pcp); \ |
| 504 | if (ret__ == (oval)) \ |
| 505 | __this_cpu_write(pcp, nval); \ |
| 506 | preempt_enable(); \ |
| 507 | ret__; \ |
| 508 | }) |
| 509 | |
| 510 | #ifndef this_cpu_cmpxchg |
| 511 | # ifndef this_cpu_cmpxchg_1 |
| 512 | # define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) |
| 513 | # endif |
| 514 | # ifndef this_cpu_cmpxchg_2 |
| 515 | # define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) |
| 516 | # endif |
| 517 | # ifndef this_cpu_cmpxchg_4 |
| 518 | # define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) |
| 519 | # endif |
| 520 | # ifndef this_cpu_cmpxchg_8 |
| 521 | # define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) |
| 522 | # endif |
| 523 | # define this_cpu_cmpxchg(pcp, oval, nval) \ |
| 524 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) |
| 525 | #endif |
| 526 | |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 527 | /* |
Christoph Lameter | 7c33433 | 2011-02-28 11:02:24 +0100 | [diff] [blame^] | 528 | * cmpxchg_double replaces two adjacent scalars at once. The first |
| 529 | * two parameters are per cpu variables which have to be of the same |
| 530 | * size. A truth value is returned to indicate success or failure |
| 531 | * (since a double register result is difficult to handle). There is |
| 532 | * very limited hardware support for these operations, so only certain |
| 533 | * sizes may work. |
| 534 | */ |
| 535 | #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 536 | ({ \ |
| 537 | int ret__; \ |
| 538 | preempt_disable(); \ |
| 539 | ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \ |
| 540 | oval1, oval2, nval1, nval2); \ |
| 541 | preempt_enable(); \ |
| 542 | ret__; \ |
| 543 | }) |
| 544 | |
| 545 | #ifndef this_cpu_cmpxchg_double |
| 546 | # ifndef this_cpu_cmpxchg_double_1 |
| 547 | # define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 548 | _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
| 549 | # endif |
| 550 | # ifndef this_cpu_cmpxchg_double_2 |
| 551 | # define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 552 | _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
| 553 | # endif |
| 554 | # ifndef this_cpu_cmpxchg_double_4 |
| 555 | # define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 556 | _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
| 557 | # endif |
| 558 | # ifndef this_cpu_cmpxchg_double_8 |
| 559 | # define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 560 | _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
| 561 | # endif |
| 562 | # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 563 | __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) |
| 564 | #endif |
| 565 | |
| 566 | /* |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 567 | * Generic percpu operations that do not require preemption handling. |
| 568 | * Either we do not care about races or the caller has the |
| 569 | * responsibility of handling preemptions issues. Arch code can still |
| 570 | * override these instructions since the arch per cpu code may be more |
| 571 | * efficient and may actually get race freeness for free (that is the |
| 572 | * case for x86 for example). |
| 573 | * |
| 574 | * If there is no other protection through preempt disable and/or |
| 575 | * disabling interupts then one of these RMW operations can show unexpected |
| 576 | * behavior because the execution thread was rescheduled on another processor |
| 577 | * or an interrupt occurred and the same percpu variable was modified from |
| 578 | * the interrupt context. |
| 579 | */ |
| 580 | #ifndef __this_cpu_read |
| 581 | # ifndef __this_cpu_read_1 |
| 582 | # define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp))) |
| 583 | # endif |
| 584 | # ifndef __this_cpu_read_2 |
| 585 | # define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp))) |
| 586 | # endif |
| 587 | # ifndef __this_cpu_read_4 |
| 588 | # define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp))) |
| 589 | # endif |
| 590 | # ifndef __this_cpu_read_8 |
| 591 | # define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp))) |
| 592 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 593 | # define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 594 | #endif |
| 595 | |
| 596 | #define __this_cpu_generic_to_op(pcp, val, op) \ |
| 597 | do { \ |
| 598 | *__this_cpu_ptr(&(pcp)) op val; \ |
| 599 | } while (0) |
| 600 | |
| 601 | #ifndef __this_cpu_write |
| 602 | # ifndef __this_cpu_write_1 |
| 603 | # define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) |
| 604 | # endif |
| 605 | # ifndef __this_cpu_write_2 |
| 606 | # define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) |
| 607 | # endif |
| 608 | # ifndef __this_cpu_write_4 |
| 609 | # define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) |
| 610 | # endif |
| 611 | # ifndef __this_cpu_write_8 |
| 612 | # define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) |
| 613 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 614 | # define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 615 | #endif |
| 616 | |
| 617 | #ifndef __this_cpu_add |
| 618 | # ifndef __this_cpu_add_1 |
| 619 | # define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) |
| 620 | # endif |
| 621 | # ifndef __this_cpu_add_2 |
| 622 | # define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) |
| 623 | # endif |
| 624 | # ifndef __this_cpu_add_4 |
| 625 | # define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) |
| 626 | # endif |
| 627 | # ifndef __this_cpu_add_8 |
| 628 | # define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) |
| 629 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 630 | # define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 631 | #endif |
| 632 | |
| 633 | #ifndef __this_cpu_sub |
| 634 | # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) |
| 635 | #endif |
| 636 | |
| 637 | #ifndef __this_cpu_inc |
| 638 | # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) |
| 639 | #endif |
| 640 | |
| 641 | #ifndef __this_cpu_dec |
| 642 | # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) |
| 643 | #endif |
| 644 | |
| 645 | #ifndef __this_cpu_and |
| 646 | # ifndef __this_cpu_and_1 |
| 647 | # define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) |
| 648 | # endif |
| 649 | # ifndef __this_cpu_and_2 |
| 650 | # define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) |
| 651 | # endif |
| 652 | # ifndef __this_cpu_and_4 |
| 653 | # define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) |
| 654 | # endif |
| 655 | # ifndef __this_cpu_and_8 |
| 656 | # define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) |
| 657 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 658 | # define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 659 | #endif |
| 660 | |
| 661 | #ifndef __this_cpu_or |
| 662 | # ifndef __this_cpu_or_1 |
| 663 | # define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) |
| 664 | # endif |
| 665 | # ifndef __this_cpu_or_2 |
| 666 | # define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) |
| 667 | # endif |
| 668 | # ifndef __this_cpu_or_4 |
| 669 | # define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) |
| 670 | # endif |
| 671 | # ifndef __this_cpu_or_8 |
| 672 | # define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) |
| 673 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 674 | # define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 675 | #endif |
| 676 | |
| 677 | #ifndef __this_cpu_xor |
| 678 | # ifndef __this_cpu_xor_1 |
| 679 | # define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) |
| 680 | # endif |
| 681 | # ifndef __this_cpu_xor_2 |
| 682 | # define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) |
| 683 | # endif |
| 684 | # ifndef __this_cpu_xor_4 |
| 685 | # define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) |
| 686 | # endif |
| 687 | # ifndef __this_cpu_xor_8 |
| 688 | # define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) |
| 689 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 690 | # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 691 | #endif |
| 692 | |
Christoph Lameter | a663fff | 2010-12-06 11:39:59 -0600 | [diff] [blame] | 693 | #define __this_cpu_generic_add_return(pcp, val) \ |
| 694 | ({ \ |
| 695 | __this_cpu_add(pcp, val); \ |
| 696 | __this_cpu_read(pcp); \ |
| 697 | }) |
| 698 | |
| 699 | #ifndef __this_cpu_add_return |
| 700 | # ifndef __this_cpu_add_return_1 |
| 701 | # define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val) |
| 702 | # endif |
| 703 | # ifndef __this_cpu_add_return_2 |
| 704 | # define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val) |
| 705 | # endif |
| 706 | # ifndef __this_cpu_add_return_4 |
| 707 | # define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val) |
| 708 | # endif |
| 709 | # ifndef __this_cpu_add_return_8 |
| 710 | # define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val) |
| 711 | # endif |
| 712 | # define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) |
| 713 | #endif |
| 714 | |
| 715 | #define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) |
| 716 | #define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) |
| 717 | #define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) |
| 718 | |
Christoph Lameter | 2b71244 | 2010-12-18 15:54:04 +0100 | [diff] [blame] | 719 | #define __this_cpu_generic_xchg(pcp, nval) \ |
| 720 | ({ typeof(pcp) ret__; \ |
| 721 | ret__ = __this_cpu_read(pcp); \ |
| 722 | __this_cpu_write(pcp, nval); \ |
| 723 | ret__; \ |
| 724 | }) |
| 725 | |
| 726 | #ifndef __this_cpu_xchg |
| 727 | # ifndef __this_cpu_xchg_1 |
| 728 | # define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval) |
| 729 | # endif |
| 730 | # ifndef __this_cpu_xchg_2 |
| 731 | # define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval) |
| 732 | # endif |
| 733 | # ifndef __this_cpu_xchg_4 |
| 734 | # define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval) |
| 735 | # endif |
| 736 | # ifndef __this_cpu_xchg_8 |
| 737 | # define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval) |
| 738 | # endif |
| 739 | # define __this_cpu_xchg(pcp, nval) \ |
| 740 | __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval) |
| 741 | #endif |
| 742 | |
| 743 | #define __this_cpu_generic_cmpxchg(pcp, oval, nval) \ |
| 744 | ({ \ |
| 745 | typeof(pcp) ret__; \ |
| 746 | ret__ = __this_cpu_read(pcp); \ |
| 747 | if (ret__ == (oval)) \ |
| 748 | __this_cpu_write(pcp, nval); \ |
| 749 | ret__; \ |
| 750 | }) |
| 751 | |
| 752 | #ifndef __this_cpu_cmpxchg |
| 753 | # ifndef __this_cpu_cmpxchg_1 |
| 754 | # define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) |
| 755 | # endif |
| 756 | # ifndef __this_cpu_cmpxchg_2 |
| 757 | # define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) |
| 758 | # endif |
| 759 | # ifndef __this_cpu_cmpxchg_4 |
| 760 | # define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) |
| 761 | # endif |
| 762 | # ifndef __this_cpu_cmpxchg_8 |
| 763 | # define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) |
| 764 | # endif |
| 765 | # define __this_cpu_cmpxchg(pcp, oval, nval) \ |
| 766 | __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval) |
| 767 | #endif |
| 768 | |
Christoph Lameter | 7c33433 | 2011-02-28 11:02:24 +0100 | [diff] [blame^] | 769 | #define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 770 | ({ \ |
| 771 | int __ret = 0; \ |
| 772 | if (__this_cpu_read(pcp1) == (oval1) && \ |
| 773 | __this_cpu_read(pcp2) == (oval2)) { \ |
| 774 | __this_cpu_write(pcp1, (nval1)); \ |
| 775 | __this_cpu_write(pcp2, (nval2)); \ |
| 776 | __ret = 1; \ |
| 777 | } \ |
| 778 | (__ret); \ |
| 779 | }) |
| 780 | |
| 781 | #ifndef __this_cpu_cmpxchg_double |
| 782 | # ifndef __this_cpu_cmpxchg_double_1 |
| 783 | # define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 784 | __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
| 785 | # endif |
| 786 | # ifndef __this_cpu_cmpxchg_double_2 |
| 787 | # define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 788 | __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
| 789 | # endif |
| 790 | # ifndef __this_cpu_cmpxchg_double_4 |
| 791 | # define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 792 | __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
| 793 | # endif |
| 794 | # ifndef __this_cpu_cmpxchg_double_8 |
| 795 | # define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 796 | __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
| 797 | # endif |
| 798 | # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 799 | __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) |
| 800 | #endif |
| 801 | |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 802 | /* |
| 803 | * IRQ safe versions of the per cpu RMW operations. Note that these operations |
| 804 | * are *not* safe against modification of the same variable from another |
| 805 | * processors (which one gets when using regular atomic operations) |
Christoph Lameter | 2b71244 | 2010-12-18 15:54:04 +0100 | [diff] [blame] | 806 | * They are guaranteed to be atomic vs. local interrupts and |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 807 | * preemption only. |
| 808 | */ |
| 809 | #define irqsafe_cpu_generic_to_op(pcp, val, op) \ |
| 810 | do { \ |
| 811 | unsigned long flags; \ |
| 812 | local_irq_save(flags); \ |
| 813 | *__this_cpu_ptr(&(pcp)) op val; \ |
| 814 | local_irq_restore(flags); \ |
| 815 | } while (0) |
| 816 | |
| 817 | #ifndef irqsafe_cpu_add |
| 818 | # ifndef irqsafe_cpu_add_1 |
| 819 | # define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) |
| 820 | # endif |
| 821 | # ifndef irqsafe_cpu_add_2 |
| 822 | # define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) |
| 823 | # endif |
| 824 | # ifndef irqsafe_cpu_add_4 |
| 825 | # define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) |
| 826 | # endif |
| 827 | # ifndef irqsafe_cpu_add_8 |
| 828 | # define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) |
| 829 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 830 | # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 831 | #endif |
| 832 | |
| 833 | #ifndef irqsafe_cpu_sub |
| 834 | # define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val)) |
| 835 | #endif |
| 836 | |
| 837 | #ifndef irqsafe_cpu_inc |
| 838 | # define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1) |
| 839 | #endif |
| 840 | |
| 841 | #ifndef irqsafe_cpu_dec |
| 842 | # define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1) |
| 843 | #endif |
| 844 | |
| 845 | #ifndef irqsafe_cpu_and |
| 846 | # ifndef irqsafe_cpu_and_1 |
| 847 | # define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) |
| 848 | # endif |
| 849 | # ifndef irqsafe_cpu_and_2 |
| 850 | # define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) |
| 851 | # endif |
| 852 | # ifndef irqsafe_cpu_and_4 |
| 853 | # define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) |
| 854 | # endif |
| 855 | # ifndef irqsafe_cpu_and_8 |
| 856 | # define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) |
| 857 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 858 | # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 859 | #endif |
| 860 | |
| 861 | #ifndef irqsafe_cpu_or |
| 862 | # ifndef irqsafe_cpu_or_1 |
| 863 | # define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) |
| 864 | # endif |
| 865 | # ifndef irqsafe_cpu_or_2 |
| 866 | # define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) |
| 867 | # endif |
| 868 | # ifndef irqsafe_cpu_or_4 |
| 869 | # define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) |
| 870 | # endif |
| 871 | # ifndef irqsafe_cpu_or_8 |
| 872 | # define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) |
| 873 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 874 | # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 875 | #endif |
| 876 | |
| 877 | #ifndef irqsafe_cpu_xor |
| 878 | # ifndef irqsafe_cpu_xor_1 |
| 879 | # define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) |
| 880 | # endif |
| 881 | # ifndef irqsafe_cpu_xor_2 |
| 882 | # define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) |
| 883 | # endif |
| 884 | # ifndef irqsafe_cpu_xor_4 |
| 885 | # define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) |
| 886 | # endif |
| 887 | # ifndef irqsafe_cpu_xor_8 |
| 888 | # define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) |
| 889 | # endif |
Tejun Heo | 0f5e481 | 2009-10-29 22:34:12 +0900 | [diff] [blame] | 890 | # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) |
Christoph Lameter | 7340a0b | 2009-10-03 19:48:22 +0900 | [diff] [blame] | 891 | #endif |
| 892 | |
Christoph Lameter | 2b71244 | 2010-12-18 15:54:04 +0100 | [diff] [blame] | 893 | #define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \ |
| 894 | ({ \ |
| 895 | typeof(pcp) ret__; \ |
| 896 | unsigned long flags; \ |
| 897 | local_irq_save(flags); \ |
| 898 | ret__ = __this_cpu_read(pcp); \ |
| 899 | if (ret__ == (oval)) \ |
| 900 | __this_cpu_write(pcp, nval); \ |
| 901 | local_irq_restore(flags); \ |
| 902 | ret__; \ |
| 903 | }) |
| 904 | |
| 905 | #ifndef irqsafe_cpu_cmpxchg |
| 906 | # ifndef irqsafe_cpu_cmpxchg_1 |
| 907 | # define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) |
| 908 | # endif |
| 909 | # ifndef irqsafe_cpu_cmpxchg_2 |
| 910 | # define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) |
| 911 | # endif |
| 912 | # ifndef irqsafe_cpu_cmpxchg_4 |
| 913 | # define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) |
| 914 | # endif |
| 915 | # ifndef irqsafe_cpu_cmpxchg_8 |
| 916 | # define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) |
| 917 | # endif |
| 918 | # define irqsafe_cpu_cmpxchg(pcp, oval, nval) \ |
| 919 | __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval) |
| 920 | #endif |
| 921 | |
Christoph Lameter | 7c33433 | 2011-02-28 11:02:24 +0100 | [diff] [blame^] | 922 | #define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 923 | ({ \ |
| 924 | int ret__; \ |
| 925 | unsigned long flags; \ |
| 926 | local_irq_save(flags); \ |
| 927 | ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \ |
| 928 | oval1, oval2, nval1, nval2); \ |
| 929 | local_irq_restore(flags); \ |
| 930 | ret__; \ |
| 931 | }) |
| 932 | |
| 933 | #ifndef irqsafe_cpu_cmpxchg_double |
| 934 | # ifndef irqsafe_cpu_cmpxchg_double_1 |
| 935 | # define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 936 | irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
| 937 | # endif |
| 938 | # ifndef irqsafe_cpu_cmpxchg_double_2 |
| 939 | # define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 940 | irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
| 941 | # endif |
| 942 | # ifndef irqsafe_cpu_cmpxchg_double_4 |
| 943 | # define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 944 | irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
| 945 | # endif |
| 946 | # ifndef irqsafe_cpu_cmpxchg_double_8 |
| 947 | # define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 948 | irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
| 949 | # endif |
| 950 | # define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
| 951 | __pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) |
| 952 | #endif |
| 953 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 | #endif /* __LINUX_PERCPU_H */ |