Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation, version 2. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| 11 | * NON INFRINGEMENT. See the GNU General Public License for |
| 12 | * more details. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/cache.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/uaccess.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <asm/atomic.h> |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 21 | #include <asm/futex.h> |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 22 | #include <arch/chip.h> |
| 23 | |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 24 | /* See <asm/atomic_32.h> */ |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 25 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() |
| 26 | |
| 27 | /* |
| 28 | * A block of memory containing locks for atomic ops. Each instance of this |
| 29 | * struct will be homed on a different CPU. |
| 30 | */ |
| 31 | struct atomic_locks_on_cpu { |
| 32 | int lock[ATOMIC_HASH_L2_SIZE]; |
| 33 | } __attribute__((aligned(ATOMIC_HASH_L2_SIZE * 4))); |
| 34 | |
| 35 | static DEFINE_PER_CPU(struct atomic_locks_on_cpu, atomic_lock_pool); |
| 36 | |
| 37 | /* The locks we'll use until __init_atomic_per_cpu is called. */ |
| 38 | static struct atomic_locks_on_cpu __initdata initial_atomic_locks; |
| 39 | |
| 40 | /* Hash into this vector to get a pointer to lock for the given atomic. */ |
| 41 | struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE] |
| 42 | __write_once = { |
| 43 | [0 ... ATOMIC_HASH_L1_SIZE-1] (&initial_atomic_locks) |
| 44 | }; |
| 45 | |
| 46 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ |
| 47 | |
| 48 | /* This page is remapped on startup to be hash-for-home. */ |
| 49 | int atomic_locks[PAGE_SIZE / sizeof(int) /* Only ATOMIC_HASH_SIZE is used */] |
| 50 | __attribute__((aligned(PAGE_SIZE), section(".bss.page_aligned"))); |
| 51 | |
| 52 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ |
| 53 | |
| 54 | static inline int *__atomic_hashed_lock(volatile void *v) |
| 55 | { |
| 56 | /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec.S */ |
| 57 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() |
| 58 | unsigned long i = |
| 59 | (unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long)); |
| 60 | unsigned long n = __insn_crc32_32(0, i); |
| 61 | |
| 62 | /* Grab high bits for L1 index. */ |
| 63 | unsigned long l1_index = n >> ((sizeof(n) * 8) - ATOMIC_HASH_L1_SHIFT); |
| 64 | /* Grab low bits for L2 index. */ |
| 65 | unsigned long l2_index = n & (ATOMIC_HASH_L2_SIZE - 1); |
| 66 | |
| 67 | return &atomic_lock_ptr[l1_index]->lock[l2_index]; |
| 68 | #else |
| 69 | /* |
| 70 | * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index. |
| 71 | * Using mm works here because atomic_locks is page aligned. |
| 72 | */ |
| 73 | unsigned long ptr = __insn_mm((unsigned long)v >> 1, |
| 74 | (unsigned long)atomic_locks, |
| 75 | 2, (ATOMIC_HASH_SHIFT + 2) - 1); |
| 76 | return (int *)ptr; |
| 77 | #endif |
| 78 | } |
| 79 | |
| 80 | #ifdef CONFIG_SMP |
| 81 | /* Return whether the passed pointer is a valid atomic lock pointer. */ |
| 82 | static int is_atomic_lock(int *p) |
| 83 | { |
| 84 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() |
| 85 | int i; |
| 86 | for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) { |
| 87 | |
| 88 | if (p >= &atomic_lock_ptr[i]->lock[0] && |
| 89 | p < &atomic_lock_ptr[i]->lock[ATOMIC_HASH_L2_SIZE]) { |
| 90 | return 1; |
| 91 | } |
| 92 | } |
| 93 | return 0; |
| 94 | #else |
| 95 | return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE]; |
| 96 | #endif |
| 97 | } |
| 98 | |
| 99 | void __atomic_fault_unlock(int *irqlock_word) |
| 100 | { |
| 101 | BUG_ON(!is_atomic_lock(irqlock_word)); |
| 102 | BUG_ON(*irqlock_word != 1); |
| 103 | *irqlock_word = 0; |
| 104 | } |
| 105 | |
| 106 | #endif /* CONFIG_SMP */ |
| 107 | |
| 108 | static inline int *__atomic_setup(volatile void *v) |
| 109 | { |
| 110 | /* Issue a load to the target to bring it into cache. */ |
| 111 | *(volatile int *)v; |
| 112 | return __atomic_hashed_lock(v); |
| 113 | } |
| 114 | |
| 115 | int _atomic_xchg(atomic_t *v, int n) |
| 116 | { |
| 117 | return __atomic_xchg(&v->counter, __atomic_setup(v), n).val; |
| 118 | } |
| 119 | EXPORT_SYMBOL(_atomic_xchg); |
| 120 | |
| 121 | int _atomic_xchg_add(atomic_t *v, int i) |
| 122 | { |
| 123 | return __atomic_xchg_add(&v->counter, __atomic_setup(v), i).val; |
| 124 | } |
| 125 | EXPORT_SYMBOL(_atomic_xchg_add); |
| 126 | |
| 127 | int _atomic_xchg_add_unless(atomic_t *v, int a, int u) |
| 128 | { |
| 129 | /* |
| 130 | * Note: argument order is switched here since it is easier |
| 131 | * to use the first argument consistently as the "old value" |
| 132 | * in the assembly, as is done for _atomic_cmpxchg(). |
| 133 | */ |
| 134 | return __atomic_xchg_add_unless(&v->counter, __atomic_setup(v), u, a) |
| 135 | .val; |
| 136 | } |
| 137 | EXPORT_SYMBOL(_atomic_xchg_add_unless); |
| 138 | |
| 139 | int _atomic_cmpxchg(atomic_t *v, int o, int n) |
| 140 | { |
| 141 | return __atomic_cmpxchg(&v->counter, __atomic_setup(v), o, n).val; |
| 142 | } |
| 143 | EXPORT_SYMBOL(_atomic_cmpxchg); |
| 144 | |
| 145 | unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask) |
| 146 | { |
| 147 | return __atomic_or((int *)p, __atomic_setup(p), mask).val; |
| 148 | } |
| 149 | EXPORT_SYMBOL(_atomic_or); |
| 150 | |
| 151 | unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) |
| 152 | { |
| 153 | return __atomic_andn((int *)p, __atomic_setup(p), mask).val; |
| 154 | } |
| 155 | EXPORT_SYMBOL(_atomic_andn); |
| 156 | |
| 157 | unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask) |
| 158 | { |
| 159 | return __atomic_xor((int *)p, __atomic_setup(p), mask).val; |
| 160 | } |
| 161 | EXPORT_SYMBOL(_atomic_xor); |
| 162 | |
| 163 | |
| 164 | u64 _atomic64_xchg(atomic64_t *v, u64 n) |
| 165 | { |
| 166 | return __atomic64_xchg(&v->counter, __atomic_setup(v), n); |
| 167 | } |
| 168 | EXPORT_SYMBOL(_atomic64_xchg); |
| 169 | |
| 170 | u64 _atomic64_xchg_add(atomic64_t *v, u64 i) |
| 171 | { |
| 172 | return __atomic64_xchg_add(&v->counter, __atomic_setup(v), i); |
| 173 | } |
| 174 | EXPORT_SYMBOL(_atomic64_xchg_add); |
| 175 | |
| 176 | u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u) |
| 177 | { |
| 178 | /* |
| 179 | * Note: argument order is switched here since it is easier |
| 180 | * to use the first argument consistently as the "old value" |
| 181 | * in the assembly, as is done for _atomic_cmpxchg(). |
| 182 | */ |
| 183 | return __atomic64_xchg_add_unless(&v->counter, __atomic_setup(v), |
| 184 | u, a); |
| 185 | } |
| 186 | EXPORT_SYMBOL(_atomic64_xchg_add_unless); |
| 187 | |
| 188 | u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) |
| 189 | { |
| 190 | return __atomic64_cmpxchg(&v->counter, __atomic_setup(v), o, n); |
| 191 | } |
| 192 | EXPORT_SYMBOL(_atomic64_cmpxchg); |
| 193 | |
| 194 | |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 195 | static inline int *__futex_setup(int __user *v) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 196 | { |
| 197 | /* |
| 198 | * Issue a prefetch to the counter to bring it into cache. |
| 199 | * As for __atomic_setup, but we can't do a read into the L1 |
| 200 | * since it might fault; instead we do a prefetch into the L2. |
| 201 | */ |
| 202 | __insn_prefetch(v); |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 203 | return __atomic_hashed_lock((int __force *)v); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 204 | } |
| 205 | |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 206 | struct __get_user futex_set(int __user *v, int i) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 207 | { |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 208 | return __atomic_xchg((int __force *)v, __futex_setup(v), i); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 209 | } |
| 210 | |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 211 | struct __get_user futex_add(int __user *v, int n) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 212 | { |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 213 | return __atomic_xchg_add((int __force *)v, __futex_setup(v), n); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 214 | } |
| 215 | |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 216 | struct __get_user futex_or(int __user *v, int n) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 217 | { |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 218 | return __atomic_or((int __force *)v, __futex_setup(v), n); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 219 | } |
| 220 | |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 221 | struct __get_user futex_andn(int __user *v, int n) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 222 | { |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 223 | return __atomic_andn((int __force *)v, __futex_setup(v), n); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 224 | } |
| 225 | |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 226 | struct __get_user futex_xor(int __user *v, int n) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 227 | { |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 228 | return __atomic_xor((int __force *)v, __futex_setup(v), n); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 229 | } |
| 230 | |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 231 | struct __get_user futex_cmpxchg(int __user *v, int o, int n) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 232 | { |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 233 | return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 234 | } |
| 235 | |
| 236 | /* |
| 237 | * If any of the atomic or futex routines hit a bad address (not in |
| 238 | * the page tables at kernel PL) this routine is called. The futex |
| 239 | * routines are never used on kernel space, and the normal atomics and |
| 240 | * bitops are never used on user space. So a fault on kernel space |
| 241 | * must be fatal, but a fault on userspace is a futex fault and we |
| 242 | * need to return -EFAULT. Note that the context this routine is |
| 243 | * invoked in is the context of the "_atomic_xxx()" routines called |
| 244 | * by the functions in this file. |
| 245 | */ |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 246 | struct __get_user __atomic_bad_address(int __user *addr) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 247 | { |
| 248 | if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int)))) |
| 249 | panic("Bad address used for kernel atomic op: %p\n", addr); |
| 250 | return (struct __get_user) { .err = -EFAULT }; |
| 251 | } |
| 252 | |
| 253 | |
| 254 | #if CHIP_HAS_CBOX_HOME_MAP() |
| 255 | static int __init noatomichash(char *str) |
| 256 | { |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame^] | 257 | pr_warning("noatomichash is deprecated.\n"); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 258 | return 1; |
| 259 | } |
| 260 | __setup("noatomichash", noatomichash); |
| 261 | #endif |
| 262 | |
| 263 | void __init __init_atomic_per_cpu(void) |
| 264 | { |
| 265 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() |
| 266 | |
| 267 | unsigned int i; |
| 268 | int actual_cpu; |
| 269 | |
| 270 | /* |
| 271 | * Before this is called from setup, we just have one lock for |
| 272 | * all atomic objects/operations. Here we replace the |
| 273 | * elements of atomic_lock_ptr so that they point at per_cpu |
| 274 | * integers. This seemingly over-complex approach stems from |
| 275 | * the fact that DEFINE_PER_CPU defines an entry for each cpu |
| 276 | * in the grid, not each cpu from 0..ATOMIC_HASH_SIZE-1. But |
| 277 | * for efficient hashing of atomics to their locks we want a |
| 278 | * compile time constant power of 2 for the size of this |
| 279 | * table, so we use ATOMIC_HASH_SIZE. |
| 280 | * |
| 281 | * Here we populate atomic_lock_ptr from the per cpu |
| 282 | * atomic_lock_pool, interspersing by actual cpu so that |
| 283 | * subsequent elements are homed on consecutive cpus. |
| 284 | */ |
| 285 | |
| 286 | actual_cpu = cpumask_first(cpu_possible_mask); |
| 287 | |
| 288 | for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) { |
| 289 | /* |
| 290 | * Preincrement to slightly bias against using cpu 0, |
| 291 | * which has plenty of stuff homed on it already. |
| 292 | */ |
| 293 | actual_cpu = cpumask_next(actual_cpu, cpu_possible_mask); |
| 294 | if (actual_cpu >= nr_cpu_ids) |
| 295 | actual_cpu = cpumask_first(cpu_possible_mask); |
| 296 | |
| 297 | atomic_lock_ptr[i] = &per_cpu(atomic_lock_pool, actual_cpu); |
| 298 | } |
| 299 | |
| 300 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ |
| 301 | |
| 302 | /* Validate power-of-two and "bigger than cpus" assumption */ |
| 303 | BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1)); |
| 304 | BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids); |
| 305 | |
| 306 | /* |
| 307 | * On TILEPro we prefer to use a single hash-for-home |
| 308 | * page, since this means atomic operations are less |
| 309 | * likely to encounter a TLB fault and thus should |
| 310 | * in general perform faster. You may wish to disable |
| 311 | * this in situations where few hash-for-home tiles |
| 312 | * are configured. |
| 313 | */ |
| 314 | BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0); |
| 315 | |
| 316 | /* The locks must all fit on one page. */ |
| 317 | BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE); |
| 318 | |
| 319 | /* |
| 320 | * We use the page offset of the atomic value's address as |
| 321 | * an index into atomic_locks, excluding the low 3 bits. |
| 322 | * That should not produce more indices than ATOMIC_HASH_SIZE. |
| 323 | */ |
| 324 | BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE); |
| 325 | |
| 326 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ |
| 327 | |
| 328 | /* The futex code makes this assumption, so we validate it here. */ |
| 329 | BUG_ON(sizeof(atomic_t) != sizeof(int)); |
| 330 | } |