blob: 298df1e9912a15b18c4de770a41f25ba14d20d73 [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/cache.h>
16#include <linux/delay.h>
17#include <linux/uaccess.h>
18#include <linux/module.h>
19#include <linux/mm.h>
Arun Sharma600634972011-07-26 16:09:06 -070020#include <linux/atomic.h>
Chris Metcalf867e3592010-05-28 23:09:12 -040021#include <arch/chip.h>
22
Chris Metcalf867e3592010-05-28 23:09:12 -040023/* This page is remapped on startup to be hash-for-home. */
Chris Metcalf2cb82402011-02-27 18:52:24 -050024int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
Chris Metcalf867e3592010-05-28 23:09:12 -040025
Chris Metcalf47d632f2012-03-29 13:39:51 -040026int *__atomic_hashed_lock(volatile void *v)
Chris Metcalf867e3592010-05-28 23:09:12 -040027{
Chris Metcalf5fb682b2011-02-28 15:58:39 -050028 /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
Chris Metcalf867e3592010-05-28 23:09:12 -040029 /*
30 * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
31 * Using mm works here because atomic_locks is page aligned.
32 */
33 unsigned long ptr = __insn_mm((unsigned long)v >> 1,
34 (unsigned long)atomic_locks,
35 2, (ATOMIC_HASH_SHIFT + 2) - 1);
36 return (int *)ptr;
Chris Metcalf867e3592010-05-28 23:09:12 -040037}
38
39#ifdef CONFIG_SMP
40/* Return whether the passed pointer is a valid atomic lock pointer. */
41static int is_atomic_lock(int *p)
42{
Chris Metcalf867e3592010-05-28 23:09:12 -040043 return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
Chris Metcalf867e3592010-05-28 23:09:12 -040044}
45
46void __atomic_fault_unlock(int *irqlock_word)
47{
48 BUG_ON(!is_atomic_lock(irqlock_word));
49 BUG_ON(*irqlock_word != 1);
50 *irqlock_word = 0;
51}
52
53#endif /* CONFIG_SMP */
54
55static inline int *__atomic_setup(volatile void *v)
56{
57 /* Issue a load to the target to bring it into cache. */
58 *(volatile int *)v;
59 return __atomic_hashed_lock(v);
60}
61
Chris Metcalf6dc96582013-09-06 08:56:45 -040062int _atomic_xchg(int *v, int n)
Chris Metcalf867e3592010-05-28 23:09:12 -040063{
Chris Metcalf6dc96582013-09-06 08:56:45 -040064 return __atomic_xchg(v, __atomic_setup(v), n).val;
Chris Metcalf867e3592010-05-28 23:09:12 -040065}
66EXPORT_SYMBOL(_atomic_xchg);
67
Chris Metcalf6dc96582013-09-06 08:56:45 -040068int _atomic_xchg_add(int *v, int i)
Chris Metcalf867e3592010-05-28 23:09:12 -040069{
Chris Metcalf6dc96582013-09-06 08:56:45 -040070 return __atomic_xchg_add(v, __atomic_setup(v), i).val;
Chris Metcalf867e3592010-05-28 23:09:12 -040071}
72EXPORT_SYMBOL(_atomic_xchg_add);
73
Chris Metcalf6dc96582013-09-06 08:56:45 -040074int _atomic_xchg_add_unless(int *v, int a, int u)
Chris Metcalf867e3592010-05-28 23:09:12 -040075{
76 /*
77 * Note: argument order is switched here since it is easier
78 * to use the first argument consistently as the "old value"
79 * in the assembly, as is done for _atomic_cmpxchg().
80 */
Chris Metcalf6dc96582013-09-06 08:56:45 -040081 return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val;
Chris Metcalf867e3592010-05-28 23:09:12 -040082}
83EXPORT_SYMBOL(_atomic_xchg_add_unless);
84
Chris Metcalf6dc96582013-09-06 08:56:45 -040085int _atomic_cmpxchg(int *v, int o, int n)
Chris Metcalf867e3592010-05-28 23:09:12 -040086{
Chris Metcalf6dc96582013-09-06 08:56:45 -040087 return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val;
Chris Metcalf867e3592010-05-28 23:09:12 -040088}
89EXPORT_SYMBOL(_atomic_cmpxchg);
90
91unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
92{
93 return __atomic_or((int *)p, __atomic_setup(p), mask).val;
94}
95EXPORT_SYMBOL(_atomic_or);
96
Chris Metcalf2957c032015-07-09 16:38:17 -040097unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
98{
99 return __atomic_and((int *)p, __atomic_setup(p), mask).val;
100}
101EXPORT_SYMBOL(_atomic_and);
102
Chris Metcalf867e3592010-05-28 23:09:12 -0400103unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
104{
105 return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
106}
107EXPORT_SYMBOL(_atomic_andn);
108
109unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
110{
111 return __atomic_xor((int *)p, __atomic_setup(p), mask).val;
112}
113EXPORT_SYMBOL(_atomic_xor);
114
115
Chen Gangb924a692013-09-25 12:14:08 +0800116long long _atomic64_xchg(long long *v, long long n)
Chris Metcalf867e3592010-05-28 23:09:12 -0400117{
Chris Metcalf6dc96582013-09-06 08:56:45 -0400118 return __atomic64_xchg(v, __atomic_setup(v), n);
Chris Metcalf867e3592010-05-28 23:09:12 -0400119}
120EXPORT_SYMBOL(_atomic64_xchg);
121
Chen Gangb924a692013-09-25 12:14:08 +0800122long long _atomic64_xchg_add(long long *v, long long i)
Chris Metcalf867e3592010-05-28 23:09:12 -0400123{
Chris Metcalf6dc96582013-09-06 08:56:45 -0400124 return __atomic64_xchg_add(v, __atomic_setup(v), i);
Chris Metcalf867e3592010-05-28 23:09:12 -0400125}
126EXPORT_SYMBOL(_atomic64_xchg_add);
127
Chen Gangb924a692013-09-25 12:14:08 +0800128long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
Chris Metcalf867e3592010-05-28 23:09:12 -0400129{
130 /*
131 * Note: argument order is switched here since it is easier
132 * to use the first argument consistently as the "old value"
133 * in the assembly, as is done for _atomic_cmpxchg().
134 */
Chris Metcalf6dc96582013-09-06 08:56:45 -0400135 return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
Chris Metcalf867e3592010-05-28 23:09:12 -0400136}
137EXPORT_SYMBOL(_atomic64_xchg_add_unless);
138
Chen Gangb924a692013-09-25 12:14:08 +0800139long long _atomic64_cmpxchg(long long *v, long long o, long long n)
Chris Metcalf867e3592010-05-28 23:09:12 -0400140{
Chris Metcalf6dc96582013-09-06 08:56:45 -0400141 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
Chris Metcalf867e3592010-05-28 23:09:12 -0400142}
143EXPORT_SYMBOL(_atomic64_cmpxchg);
144
Chris Metcalf2957c032015-07-09 16:38:17 -0400145long long _atomic64_and(long long *v, long long n)
146{
147 return __atomic64_and(v, __atomic_setup(v), n);
148}
149EXPORT_SYMBOL(_atomic64_and);
150
151long long _atomic64_or(long long *v, long long n)
152{
153 return __atomic64_or(v, __atomic_setup(v), n);
154}
155EXPORT_SYMBOL(_atomic64_or);
156
157long long _atomic64_xor(long long *v, long long n)
158{
159 return __atomic64_xor(v, __atomic_setup(v), n);
160}
161EXPORT_SYMBOL(_atomic64_xor);
Chris Metcalf867e3592010-05-28 23:09:12 -0400162
Chris Metcalf867e3592010-05-28 23:09:12 -0400163/*
164 * If any of the atomic or futex routines hit a bad address (not in
165 * the page tables at kernel PL) this routine is called. The futex
166 * routines are never used on kernel space, and the normal atomics and
167 * bitops are never used on user space. So a fault on kernel space
168 * must be fatal, but a fault on userspace is a futex fault and we
169 * need to return -EFAULT. Note that the context this routine is
170 * invoked in is the context of the "_atomic_xxx()" routines called
171 * by the functions in this file.
172 */
Chris Metcalf0707ad32010-06-25 17:04:17 -0400173struct __get_user __atomic_bad_address(int __user *addr)
Chris Metcalf867e3592010-05-28 23:09:12 -0400174{
175 if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
176 panic("Bad address used for kernel atomic op: %p\n", addr);
177 return (struct __get_user) { .err = -EFAULT };
178}
179
180
Chris Metcalf867e3592010-05-28 23:09:12 -0400181void __init __init_atomic_per_cpu(void)
182{
Chris Metcalf867e3592010-05-28 23:09:12 -0400183 /* Validate power-of-two and "bigger than cpus" assumption */
Akinobu Mitade5bbad2010-10-06 00:55:29 +0900184 BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
Chris Metcalf867e3592010-05-28 23:09:12 -0400185 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
186
187 /*
188 * On TILEPro we prefer to use a single hash-for-home
189 * page, since this means atomic operations are less
190 * likely to encounter a TLB fault and thus should
191 * in general perform faster. You may wish to disable
192 * this in situations where few hash-for-home tiles
193 * are configured.
194 */
195 BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
196
197 /* The locks must all fit on one page. */
Akinobu Mitade5bbad2010-10-06 00:55:29 +0900198 BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
Chris Metcalf867e3592010-05-28 23:09:12 -0400199
200 /*
201 * We use the page offset of the atomic value's address as
202 * an index into atomic_locks, excluding the low 3 bits.
203 * That should not produce more indices than ATOMIC_HASH_SIZE.
204 */
Akinobu Mitade5bbad2010-10-06 00:55:29 +0900205 BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
Chris Metcalf867e3592010-05-28 23:09:12 -0400206}