Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation, version 2. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| 11 | * NON INFRINGEMENT. See the GNU General Public License for |
| 12 | * more details. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/cache.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/uaccess.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/mm.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 20 | #include <linux/atomic.h> |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 21 | #include <arch/chip.h> |
| 22 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 23 | /* This page is remapped on startup to be hash-for-home. */ |
Chris Metcalf | 2cb8240 | 2011-02-27 18:52:24 -0500 | [diff] [blame] | 24 | int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 25 | |
Chris Metcalf | 47d632f | 2012-03-29 13:39:51 -0400 | [diff] [blame] | 26 | int *__atomic_hashed_lock(volatile void *v) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 27 | { |
Chris Metcalf | 5fb682b | 2011-02-28 15:58:39 -0500 | [diff] [blame] | 28 | /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */ |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 29 | /* |
| 30 | * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index. |
| 31 | * Using mm works here because atomic_locks is page aligned. |
| 32 | */ |
| 33 | unsigned long ptr = __insn_mm((unsigned long)v >> 1, |
| 34 | (unsigned long)atomic_locks, |
| 35 | 2, (ATOMIC_HASH_SHIFT + 2) - 1); |
| 36 | return (int *)ptr; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 37 | } |
| 38 | |
| 39 | #ifdef CONFIG_SMP |
| 40 | /* Return whether the passed pointer is a valid atomic lock pointer. */ |
| 41 | static int is_atomic_lock(int *p) |
| 42 | { |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 43 | return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE]; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | void __atomic_fault_unlock(int *irqlock_word) |
| 47 | { |
| 48 | BUG_ON(!is_atomic_lock(irqlock_word)); |
| 49 | BUG_ON(*irqlock_word != 1); |
| 50 | *irqlock_word = 0; |
| 51 | } |
| 52 | |
| 53 | #endif /* CONFIG_SMP */ |
| 54 | |
| 55 | static inline int *__atomic_setup(volatile void *v) |
| 56 | { |
| 57 | /* Issue a load to the target to bring it into cache. */ |
| 58 | *(volatile int *)v; |
| 59 | return __atomic_hashed_lock(v); |
| 60 | } |
| 61 | |
Chris Metcalf | 6dc9658 | 2013-09-06 08:56:45 -0400 | [diff] [blame] | 62 | int _atomic_xchg(int *v, int n) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 63 | { |
Peter Zijlstra | b7271b9 | 2016-06-22 11:16:49 +0200 | [diff] [blame] | 64 | return __atomic32_xchg(v, __atomic_setup(v), n).val; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 65 | } |
| 66 | EXPORT_SYMBOL(_atomic_xchg); |
| 67 | |
Chris Metcalf | 6dc9658 | 2013-09-06 08:56:45 -0400 | [diff] [blame] | 68 | int _atomic_xchg_add(int *v, int i) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 69 | { |
Peter Zijlstra | b7271b9 | 2016-06-22 11:16:49 +0200 | [diff] [blame] | 70 | return __atomic32_xchg_add(v, __atomic_setup(v), i).val; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 71 | } |
| 72 | EXPORT_SYMBOL(_atomic_xchg_add); |
| 73 | |
Chris Metcalf | 6dc9658 | 2013-09-06 08:56:45 -0400 | [diff] [blame] | 74 | int _atomic_xchg_add_unless(int *v, int a, int u) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 75 | { |
| 76 | /* |
| 77 | * Note: argument order is switched here since it is easier |
| 78 | * to use the first argument consistently as the "old value" |
| 79 | * in the assembly, as is done for _atomic_cmpxchg(). |
| 80 | */ |
Peter Zijlstra | b7271b9 | 2016-06-22 11:16:49 +0200 | [diff] [blame] | 81 | return __atomic32_xchg_add_unless(v, __atomic_setup(v), u, a).val; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 82 | } |
| 83 | EXPORT_SYMBOL(_atomic_xchg_add_unless); |
| 84 | |
Chris Metcalf | 6dc9658 | 2013-09-06 08:56:45 -0400 | [diff] [blame] | 85 | int _atomic_cmpxchg(int *v, int o, int n) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 86 | { |
Peter Zijlstra | b7271b9 | 2016-06-22 11:16:49 +0200 | [diff] [blame] | 87 | return __atomic32_cmpxchg(v, __atomic_setup(v), o, n).val; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 88 | } |
| 89 | EXPORT_SYMBOL(_atomic_cmpxchg); |
| 90 | |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 91 | unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 92 | { |
Peter Zijlstra | b7271b9 | 2016-06-22 11:16:49 +0200 | [diff] [blame] | 93 | return __atomic32_fetch_or((int *)p, __atomic_setup(p), mask).val; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 94 | } |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 95 | EXPORT_SYMBOL(_atomic_fetch_or); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 96 | |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 97 | unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask) |
Chris Metcalf | 2957c03 | 2015-07-09 16:38:17 -0400 | [diff] [blame] | 98 | { |
Peter Zijlstra | b7271b9 | 2016-06-22 11:16:49 +0200 | [diff] [blame] | 99 | return __atomic32_fetch_and((int *)p, __atomic_setup(p), mask).val; |
Chris Metcalf | 2957c03 | 2015-07-09 16:38:17 -0400 | [diff] [blame] | 100 | } |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 101 | EXPORT_SYMBOL(_atomic_fetch_and); |
Chris Metcalf | 2957c03 | 2015-07-09 16:38:17 -0400 | [diff] [blame] | 102 | |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 103 | unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 104 | { |
Peter Zijlstra | b7271b9 | 2016-06-22 11:16:49 +0200 | [diff] [blame] | 105 | return __atomic32_fetch_andn((int *)p, __atomic_setup(p), mask).val; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 106 | } |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 107 | EXPORT_SYMBOL(_atomic_fetch_andn); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 108 | |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 109 | unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 110 | { |
Peter Zijlstra | b7271b9 | 2016-06-22 11:16:49 +0200 | [diff] [blame] | 111 | return __atomic32_fetch_xor((int *)p, __atomic_setup(p), mask).val; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 112 | } |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 113 | EXPORT_SYMBOL(_atomic_fetch_xor); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 114 | |
| 115 | |
Chen Gang | b924a69 | 2013-09-25 12:14:08 +0800 | [diff] [blame] | 116 | long long _atomic64_xchg(long long *v, long long n) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 117 | { |
Chris Metcalf | 6dc9658 | 2013-09-06 08:56:45 -0400 | [diff] [blame] | 118 | return __atomic64_xchg(v, __atomic_setup(v), n); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 119 | } |
| 120 | EXPORT_SYMBOL(_atomic64_xchg); |
| 121 | |
Chen Gang | b924a69 | 2013-09-25 12:14:08 +0800 | [diff] [blame] | 122 | long long _atomic64_xchg_add(long long *v, long long i) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 123 | { |
Chris Metcalf | 6dc9658 | 2013-09-06 08:56:45 -0400 | [diff] [blame] | 124 | return __atomic64_xchg_add(v, __atomic_setup(v), i); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 125 | } |
| 126 | EXPORT_SYMBOL(_atomic64_xchg_add); |
| 127 | |
Chen Gang | b924a69 | 2013-09-25 12:14:08 +0800 | [diff] [blame] | 128 | long long _atomic64_xchg_add_unless(long long *v, long long a, long long u) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 129 | { |
| 130 | /* |
| 131 | * Note: argument order is switched here since it is easier |
| 132 | * to use the first argument consistently as the "old value" |
| 133 | * in the assembly, as is done for _atomic_cmpxchg(). |
| 134 | */ |
Chris Metcalf | 6dc9658 | 2013-09-06 08:56:45 -0400 | [diff] [blame] | 135 | return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 136 | } |
| 137 | EXPORT_SYMBOL(_atomic64_xchg_add_unless); |
| 138 | |
Chen Gang | b924a69 | 2013-09-25 12:14:08 +0800 | [diff] [blame] | 139 | long long _atomic64_cmpxchg(long long *v, long long o, long long n) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 140 | { |
Chris Metcalf | 6dc9658 | 2013-09-06 08:56:45 -0400 | [diff] [blame] | 141 | return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 142 | } |
| 143 | EXPORT_SYMBOL(_atomic64_cmpxchg); |
| 144 | |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 145 | long long _atomic64_fetch_and(long long *v, long long n) |
Chris Metcalf | 2957c03 | 2015-07-09 16:38:17 -0400 | [diff] [blame] | 146 | { |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 147 | return __atomic64_fetch_and(v, __atomic_setup(v), n); |
Chris Metcalf | 2957c03 | 2015-07-09 16:38:17 -0400 | [diff] [blame] | 148 | } |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 149 | EXPORT_SYMBOL(_atomic64_fetch_and); |
Chris Metcalf | 2957c03 | 2015-07-09 16:38:17 -0400 | [diff] [blame] | 150 | |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 151 | long long _atomic64_fetch_or(long long *v, long long n) |
Chris Metcalf | 2957c03 | 2015-07-09 16:38:17 -0400 | [diff] [blame] | 152 | { |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 153 | return __atomic64_fetch_or(v, __atomic_setup(v), n); |
Chris Metcalf | 2957c03 | 2015-07-09 16:38:17 -0400 | [diff] [blame] | 154 | } |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 155 | EXPORT_SYMBOL(_atomic64_fetch_or); |
Chris Metcalf | 2957c03 | 2015-07-09 16:38:17 -0400 | [diff] [blame] | 156 | |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 157 | long long _atomic64_fetch_xor(long long *v, long long n) |
Chris Metcalf | 2957c03 | 2015-07-09 16:38:17 -0400 | [diff] [blame] | 158 | { |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 159 | return __atomic64_fetch_xor(v, __atomic_setup(v), n); |
Chris Metcalf | 2957c03 | 2015-07-09 16:38:17 -0400 | [diff] [blame] | 160 | } |
Peter Zijlstra | 1af5de9 | 2016-04-18 01:16:03 +0200 | [diff] [blame] | 161 | EXPORT_SYMBOL(_atomic64_fetch_xor); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 162 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 163 | /* |
| 164 | * If any of the atomic or futex routines hit a bad address (not in |
| 165 | * the page tables at kernel PL) this routine is called. The futex |
| 166 | * routines are never used on kernel space, and the normal atomics and |
| 167 | * bitops are never used on user space. So a fault on kernel space |
| 168 | * must be fatal, but a fault on userspace is a futex fault and we |
| 169 | * need to return -EFAULT. Note that the context this routine is |
| 170 | * invoked in is the context of the "_atomic_xxx()" routines called |
| 171 | * by the functions in this file. |
| 172 | */ |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 173 | struct __get_user __atomic_bad_address(int __user *addr) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 174 | { |
| 175 | if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int)))) |
| 176 | panic("Bad address used for kernel atomic op: %p\n", addr); |
| 177 | return (struct __get_user) { .err = -EFAULT }; |
| 178 | } |
| 179 | |
| 180 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 181 | void __init __init_atomic_per_cpu(void) |
| 182 | { |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 183 | /* Validate power-of-two and "bigger than cpus" assumption */ |
Akinobu Mita | de5bbad | 2010-10-06 00:55:29 +0900 | [diff] [blame] | 184 | BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1)); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 185 | BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids); |
| 186 | |
| 187 | /* |
| 188 | * On TILEPro we prefer to use a single hash-for-home |
| 189 | * page, since this means atomic operations are less |
| 190 | * likely to encounter a TLB fault and thus should |
| 191 | * in general perform faster. You may wish to disable |
| 192 | * this in situations where few hash-for-home tiles |
| 193 | * are configured. |
| 194 | */ |
| 195 | BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0); |
| 196 | |
| 197 | /* The locks must all fit on one page. */ |
Akinobu Mita | de5bbad | 2010-10-06 00:55:29 +0900 | [diff] [blame] | 198 | BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 199 | |
| 200 | /* |
| 201 | * We use the page offset of the atomic value's address as |
| 202 | * an index into atomic_locks, excluding the low 3 bits. |
| 203 | * That should not produce more indices than ATOMIC_HASH_SIZE. |
| 204 | */ |
Akinobu Mita | de5bbad | 2010-10-06 00:55:29 +0900 | [diff] [blame] | 205 | BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 206 | } |