blob: f5cada70c3c85c8b77fab9fd66a39ad32939d1de [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/cache.h>
16#include <linux/delay.h>
17#include <linux/uaccess.h>
18#include <linux/module.h>
19#include <linux/mm.h>
Arun Sharma600634972011-07-26 16:09:06 -070020#include <linux/atomic.h>
Chris Metcalf867e3592010-05-28 23:09:12 -040021#include <arch/chip.h>
22
Chris Metcalf0707ad32010-06-25 17:04:17 -040023/* See <asm/atomic_32.h> */
Chris Metcalf867e3592010-05-28 23:09:12 -040024#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
25
26/*
27 * A block of memory containing locks for atomic ops. Each instance of this
28 * struct will be homed on a different CPU.
29 */
30struct atomic_locks_on_cpu {
31 int lock[ATOMIC_HASH_L2_SIZE];
32} __attribute__((aligned(ATOMIC_HASH_L2_SIZE * 4)));
33
34static DEFINE_PER_CPU(struct atomic_locks_on_cpu, atomic_lock_pool);
35
36/* The locks we'll use until __init_atomic_per_cpu is called. */
37static struct atomic_locks_on_cpu __initdata initial_atomic_locks;
38
39/* Hash into this vector to get a pointer to lock for the given atomic. */
40struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE]
41 __write_once = {
42 [0 ... ATOMIC_HASH_L1_SIZE-1] (&initial_atomic_locks)
43};
44
45#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
46
47/* This page is remapped on startup to be hash-for-home. */
Chris Metcalf2cb82402011-02-27 18:52:24 -050048int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
Chris Metcalf867e3592010-05-28 23:09:12 -040049
50#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
51
Chris Metcalf47d632f2012-03-29 13:39:51 -040052int *__atomic_hashed_lock(volatile void *v)
Chris Metcalf867e3592010-05-28 23:09:12 -040053{
Chris Metcalf5fb682b2011-02-28 15:58:39 -050054 /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
Chris Metcalf867e3592010-05-28 23:09:12 -040055#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
56 unsigned long i =
57 (unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long));
58 unsigned long n = __insn_crc32_32(0, i);
59
60 /* Grab high bits for L1 index. */
61 unsigned long l1_index = n >> ((sizeof(n) * 8) - ATOMIC_HASH_L1_SHIFT);
62 /* Grab low bits for L2 index. */
63 unsigned long l2_index = n & (ATOMIC_HASH_L2_SIZE - 1);
64
65 return &atomic_lock_ptr[l1_index]->lock[l2_index];
66#else
67 /*
68 * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
69 * Using mm works here because atomic_locks is page aligned.
70 */
71 unsigned long ptr = __insn_mm((unsigned long)v >> 1,
72 (unsigned long)atomic_locks,
73 2, (ATOMIC_HASH_SHIFT + 2) - 1);
74 return (int *)ptr;
75#endif
76}
77
78#ifdef CONFIG_SMP
79/* Return whether the passed pointer is a valid atomic lock pointer. */
80static int is_atomic_lock(int *p)
81{
82#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
83 int i;
84 for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) {
85
86 if (p >= &atomic_lock_ptr[i]->lock[0] &&
87 p < &atomic_lock_ptr[i]->lock[ATOMIC_HASH_L2_SIZE]) {
88 return 1;
89 }
90 }
91 return 0;
92#else
93 return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
94#endif
95}
96
97void __atomic_fault_unlock(int *irqlock_word)
98{
99 BUG_ON(!is_atomic_lock(irqlock_word));
100 BUG_ON(*irqlock_word != 1);
101 *irqlock_word = 0;
102}
103
104#endif /* CONFIG_SMP */
105
106static inline int *__atomic_setup(volatile void *v)
107{
108 /* Issue a load to the target to bring it into cache. */
109 *(volatile int *)v;
110 return __atomic_hashed_lock(v);
111}
112
113int _atomic_xchg(atomic_t *v, int n)
114{
115 return __atomic_xchg(&v->counter, __atomic_setup(v), n).val;
116}
117EXPORT_SYMBOL(_atomic_xchg);
118
119int _atomic_xchg_add(atomic_t *v, int i)
120{
121 return __atomic_xchg_add(&v->counter, __atomic_setup(v), i).val;
122}
123EXPORT_SYMBOL(_atomic_xchg_add);
124
125int _atomic_xchg_add_unless(atomic_t *v, int a, int u)
126{
127 /*
128 * Note: argument order is switched here since it is easier
129 * to use the first argument consistently as the "old value"
130 * in the assembly, as is done for _atomic_cmpxchg().
131 */
132 return __atomic_xchg_add_unless(&v->counter, __atomic_setup(v), u, a)
133 .val;
134}
135EXPORT_SYMBOL(_atomic_xchg_add_unless);
136
137int _atomic_cmpxchg(atomic_t *v, int o, int n)
138{
139 return __atomic_cmpxchg(&v->counter, __atomic_setup(v), o, n).val;
140}
141EXPORT_SYMBOL(_atomic_cmpxchg);
142
143unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
144{
145 return __atomic_or((int *)p, __atomic_setup(p), mask).val;
146}
147EXPORT_SYMBOL(_atomic_or);
148
149unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
150{
151 return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
152}
153EXPORT_SYMBOL(_atomic_andn);
154
155unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
156{
157 return __atomic_xor((int *)p, __atomic_setup(p), mask).val;
158}
159EXPORT_SYMBOL(_atomic_xor);
160
161
162u64 _atomic64_xchg(atomic64_t *v, u64 n)
163{
164 return __atomic64_xchg(&v->counter, __atomic_setup(v), n);
165}
166EXPORT_SYMBOL(_atomic64_xchg);
167
168u64 _atomic64_xchg_add(atomic64_t *v, u64 i)
169{
170 return __atomic64_xchg_add(&v->counter, __atomic_setup(v), i);
171}
172EXPORT_SYMBOL(_atomic64_xchg_add);
173
174u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u)
175{
176 /*
177 * Note: argument order is switched here since it is easier
178 * to use the first argument consistently as the "old value"
179 * in the assembly, as is done for _atomic_cmpxchg().
180 */
181 return __atomic64_xchg_add_unless(&v->counter, __atomic_setup(v),
182 u, a);
183}
184EXPORT_SYMBOL(_atomic64_xchg_add_unless);
185
186u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
187{
188 return __atomic64_cmpxchg(&v->counter, __atomic_setup(v), o, n);
189}
190EXPORT_SYMBOL(_atomic64_cmpxchg);
191
192
Chris Metcalf867e3592010-05-28 23:09:12 -0400193/*
194 * If any of the atomic or futex routines hit a bad address (not in
195 * the page tables at kernel PL) this routine is called. The futex
196 * routines are never used on kernel space, and the normal atomics and
197 * bitops are never used on user space. So a fault on kernel space
198 * must be fatal, but a fault on userspace is a futex fault and we
199 * need to return -EFAULT. Note that the context this routine is
200 * invoked in is the context of the "_atomic_xxx()" routines called
201 * by the functions in this file.
202 */
Chris Metcalf0707ad32010-06-25 17:04:17 -0400203struct __get_user __atomic_bad_address(int __user *addr)
Chris Metcalf867e3592010-05-28 23:09:12 -0400204{
205 if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
206 panic("Bad address used for kernel atomic op: %p\n", addr);
207 return (struct __get_user) { .err = -EFAULT };
208}
209
210
211#if CHIP_HAS_CBOX_HOME_MAP()
212static int __init noatomichash(char *str)
213{
Chris Metcalf0707ad32010-06-25 17:04:17 -0400214 pr_warning("noatomichash is deprecated.\n");
Chris Metcalf867e3592010-05-28 23:09:12 -0400215 return 1;
216}
217__setup("noatomichash", noatomichash);
218#endif
219
220void __init __init_atomic_per_cpu(void)
221{
222#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
223
224 unsigned int i;
225 int actual_cpu;
226
227 /*
228 * Before this is called from setup, we just have one lock for
229 * all atomic objects/operations. Here we replace the
230 * elements of atomic_lock_ptr so that they point at per_cpu
231 * integers. This seemingly over-complex approach stems from
232 * the fact that DEFINE_PER_CPU defines an entry for each cpu
233 * in the grid, not each cpu from 0..ATOMIC_HASH_SIZE-1. But
234 * for efficient hashing of atomics to their locks we want a
235 * compile time constant power of 2 for the size of this
236 * table, so we use ATOMIC_HASH_SIZE.
237 *
238 * Here we populate atomic_lock_ptr from the per cpu
239 * atomic_lock_pool, interspersing by actual cpu so that
240 * subsequent elements are homed on consecutive cpus.
241 */
242
243 actual_cpu = cpumask_first(cpu_possible_mask);
244
245 for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) {
246 /*
247 * Preincrement to slightly bias against using cpu 0,
248 * which has plenty of stuff homed on it already.
249 */
250 actual_cpu = cpumask_next(actual_cpu, cpu_possible_mask);
251 if (actual_cpu >= nr_cpu_ids)
252 actual_cpu = cpumask_first(cpu_possible_mask);
253
254 atomic_lock_ptr[i] = &per_cpu(atomic_lock_pool, actual_cpu);
255 }
256
257#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
258
259 /* Validate power-of-two and "bigger than cpus" assumption */
Akinobu Mitade5bbad2010-10-06 00:55:29 +0900260 BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
Chris Metcalf867e3592010-05-28 23:09:12 -0400261 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
262
263 /*
264 * On TILEPro we prefer to use a single hash-for-home
265 * page, since this means atomic operations are less
266 * likely to encounter a TLB fault and thus should
267 * in general perform faster. You may wish to disable
268 * this in situations where few hash-for-home tiles
269 * are configured.
270 */
271 BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
272
273 /* The locks must all fit on one page. */
Akinobu Mitade5bbad2010-10-06 00:55:29 +0900274 BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
Chris Metcalf867e3592010-05-28 23:09:12 -0400275
276 /*
277 * We use the page offset of the atomic value's address as
278 * an index into atomic_locks, excluding the low 3 bits.
279 * That should not produce more indices than ATOMIC_HASH_SIZE.
280 */
Akinobu Mitade5bbad2010-10-06 00:55:29 +0900281 BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
Chris Metcalf867e3592010-05-28 23:09:12 -0400282
283#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
Chris Metcalf867e3592010-05-28 23:09:12 -0400284}