blob: fa57cef12a4664acb29cf8780ed864dd52d68b0d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Ralf Baechle102fa152007-02-16 17:18:50 +00006 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
Jiri Slaby06245172007-10-18 23:40:26 -070012#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/compiler.h>
17#include <linux/types.h>
Ralf Baechle0004a9d2006-10-31 03:45:07 +000018#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/byteorder.h> /* sigh ... */
Maciej W. Rozyckib0984c42014-11-15 22:08:48 +000020#include <asm/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/cpu-features.h>
Ralf Baechle05490622016-04-15 10:25:33 +020022#include <asm/llsc.h>
Ralf Baechle4ffd8b32006-11-30 01:14:50 +000023#include <asm/sgidefs.h>
24#include <asm/war.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Linus Torvalds1da177e2005-04-16 15:20:36 -070026/*
Jim Quinlan92d11592012-09-06 11:36:55 -040027 * These are the "slower" versions of the functions and are in bitops.c.
28 * These functions call raw_local_irq_{save,restore}().
29 */
30void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
31void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
32void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
33int __mips_test_and_set_bit(unsigned long nr,
34 volatile unsigned long *addr);
35int __mips_test_and_set_bit_lock(unsigned long nr,
36 volatile unsigned long *addr);
37int __mips_test_and_clear_bit(unsigned long nr,
38 volatile unsigned long *addr);
39int __mips_test_and_change_bit(unsigned long nr,
40 volatile unsigned long *addr);
41
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 * set_bit - Atomically set a bit in memory
45 * @nr: the bit to set
46 * @addr: the address to start counting from
47 *
48 * This function is atomic and may not be reordered. See __set_bit()
49 * if you do not require the atomic guarantees.
50 * Note that @nr may be almost arbitrarily large; this function is not
51 * restricted to acting on a single-word quantity.
52 */
53static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
54{
55 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Jim Quinlan9de79c52012-09-06 11:36:54 -040056 int bit = nr & SZLONG_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 unsigned long temp;
58
David Daneyb791d112009-07-13 11:15:19 -070059 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +020061 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 "1: " __LL "%0, %1 # set_bit \n"
63 " or %0, %2 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000064 " " __SC "%0, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000066 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +000067 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
68 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
Markos Chandras87a927e2014-11-20 13:58:30 +000069#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
David Daneyb791d112009-07-13 11:15:19 -070070 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
Ralf Baechle78373142010-10-29 19:08:24 +010071 do {
72 __asm__ __volatile__(
73 " " __LL "%0, %1 # set_bit \n"
74 " " __INS "%0, %3, %2, 1 \n"
75 " " __SC "%0, %1 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +000076 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +010077 : "ir" (bit), "r" (~0));
78 } while (unlikely(!temp));
Markos Chandras87a927e2014-11-20 13:58:30 +000079#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
David Daneyb791d112009-07-13 11:15:19 -070080 } else if (kernel_uses_llsc) {
Ralf Baechle78373142010-10-29 19:08:24 +010081 do {
82 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +000083 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle78373142010-10-29 19:08:24 +010084 " " __LL "%0, %1 # set_bit \n"
85 " or %0, %2 \n"
86 " " __SC "%0, %1 \n"
87 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +000088 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +010089 : "ir" (1UL << bit));
90 } while (unlikely(!temp));
Jim Quinlan92d11592012-09-06 11:36:55 -040091 } else
92 __mips_set_bit(nr, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093}
94
95/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 * clear_bit - Clears a bit in memory
97 * @nr: Bit to clear
98 * @addr: Address to start counting from
99 *
100 * clear_bit() is atomic and may not be reordered. However, it does
101 * not contain a memory barrier, so if it is used for locking purposes,
Peter Zijlstra91bbefe2014-03-13 19:00:36 +0100102 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 * in order to ensure changes are visible on other processors.
104 */
105static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
106{
107 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Jim Quinlan9de79c52012-09-06 11:36:54 -0400108 int bit = nr & SZLONG_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 unsigned long temp;
110
David Daneyb791d112009-07-13 11:15:19 -0700111 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200113 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 "1: " __LL "%0, %1 # clear_bit \n"
115 " and %0, %2 \n"
116 " " __SC "%0, %1 \n"
117 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000118 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000119 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +0100120 : "ir" (~(1UL << bit)));
Markos Chandras87a927e2014-11-20 13:58:30 +0000121#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
David Daneyb791d112009-07-13 11:15:19 -0700122 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
Ralf Baechle78373142010-10-29 19:08:24 +0100123 do {
124 __asm__ __volatile__(
125 " " __LL "%0, %1 # clear_bit \n"
126 " " __INS "%0, $0, %2, 1 \n"
127 " " __SC "%0, %1 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000128 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +0100129 : "ir" (bit));
130 } while (unlikely(!temp));
Markos Chandras87a927e2014-11-20 13:58:30 +0000131#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
David Daneyb791d112009-07-13 11:15:19 -0700132 } else if (kernel_uses_llsc) {
Ralf Baechle78373142010-10-29 19:08:24 +0100133 do {
134 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +0000135 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100136 " " __LL "%0, %1 # clear_bit \n"
137 " and %0, %2 \n"
138 " " __SC "%0, %1 \n"
139 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000140 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +0100141 : "ir" (~(1UL << bit)));
142 } while (unlikely(!temp));
Jim Quinlan92d11592012-09-06 11:36:55 -0400143 } else
144 __mips_clear_bit(nr, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145}
146
147/*
Nick Piggin728697c2007-10-18 03:06:53 -0700148 * clear_bit_unlock - Clears a bit in memory
149 * @nr: Bit to clear
150 * @addr: Address to start counting from
151 *
152 * clear_bit() is atomic and implies release semantics before the memory
153 * operation. It can be used for an unlock.
154 */
155static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
156{
Peter Zijlstra91bbefe2014-03-13 19:00:36 +0100157 smp_mb__before_atomic();
Nick Piggin728697c2007-10-18 03:06:53 -0700158 clear_bit(nr, addr);
159}
160
161/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 * change_bit - Toggle a bit in memory
163 * @nr: Bit to change
164 * @addr: Address to start counting from
165 *
166 * change_bit() is atomic and may not be reordered.
167 * Note that @nr may be almost arbitrarily large; this function is not
168 * restricted to acting on a single-word quantity.
169 */
170static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
171{
Jim Quinlan9de79c52012-09-06 11:36:54 -0400172 int bit = nr & SZLONG_MASK;
Ralf Baechleb9611532007-03-05 00:56:15 +0000173
David Daneyb791d112009-07-13 11:15:19 -0700174 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
176 unsigned long temp;
177
178 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200179 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 "1: " __LL "%0, %1 # change_bit \n"
181 " xor %0, %2 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000182 " " __SC "%0, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000184 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000185 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +0100186 : "ir" (1UL << bit));
David Daneyb791d112009-07-13 11:15:19 -0700187 } else if (kernel_uses_llsc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
189 unsigned long temp;
190
Ralf Baechle78373142010-10-29 19:08:24 +0100191 do {
192 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +0000193 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100194 " " __LL "%0, %1 # change_bit \n"
195 " xor %0, %2 \n"
196 " " __SC "%0, %1 \n"
197 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000198 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +0100199 : "ir" (1UL << bit));
200 } while (unlikely(!temp));
Jim Quinlan92d11592012-09-06 11:36:55 -0400201 } else
202 __mips_change_bit(nr, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203}
204
205/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 * test_and_set_bit - Set a bit and return its old value
207 * @nr: Bit to set
208 * @addr: Address to count from
209 *
210 * This operation is atomic and cannot be reordered.
211 * It also implies a memory barrier.
212 */
213static inline int test_and_set_bit(unsigned long nr,
214 volatile unsigned long *addr)
215{
Jim Quinlan9de79c52012-09-06 11:36:54 -0400216 int bit = nr & SZLONG_MASK;
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100217 unsigned long res;
Ralf Baechleb9611532007-03-05 00:56:15 +0000218
David Daneyf252ffd2010-01-08 17:17:43 -0800219 smp_mb__before_llsc();
Nick Pigginc8f30ae2007-10-18 03:06:52 -0700220
David Daneyb791d112009-07-13 11:15:19 -0700221 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100223 unsigned long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200226 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 "1: " __LL "%0, %1 # test_and_set_bit \n"
228 " or %2, %0, %3 \n"
229 " " __SC "%2, %1 \n"
230 " beqzl %2, 1b \n"
231 " and %2, %0, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000232 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000233 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100234 : "r" (1UL << bit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 : "memory");
David Daneyb791d112009-07-13 11:15:19 -0700236 } else if (kernel_uses_llsc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100238 unsigned long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Ralf Baechle78373142010-10-29 19:08:24 +0100240 do {
241 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +0000242 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100243 " " __LL "%0, %1 # test_and_set_bit \n"
244 " or %2, %0, %3 \n"
245 " " __SC "%2, %1 \n"
246 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000247 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100248 : "r" (1UL << bit)
249 : "memory");
250 } while (unlikely(!res));
251
252 res = temp & (1UL << bit);
Jim Quinlan92d11592012-09-06 11:36:55 -0400253 } else
254 res = __mips_test_and_set_bit(nr, addr);
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000255
Ralf Baechle17099b12007-07-14 13:24:05 +0100256 smp_llsc_mb();
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100257
258 return res != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
260
261/*
Nick Piggin728697c2007-10-18 03:06:53 -0700262 * test_and_set_bit_lock - Set a bit and return its old value
263 * @nr: Bit to set
264 * @addr: Address to count from
265 *
266 * This operation is atomic and implies acquire ordering semantics
267 * after the memory operation.
268 */
269static inline int test_and_set_bit_lock(unsigned long nr,
270 volatile unsigned long *addr)
271{
Jim Quinlan9de79c52012-09-06 11:36:54 -0400272 int bit = nr & SZLONG_MASK;
Nick Piggin728697c2007-10-18 03:06:53 -0700273 unsigned long res;
274
David Daneyb791d112009-07-13 11:15:19 -0700275 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Nick Piggin728697c2007-10-18 03:06:53 -0700276 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
277 unsigned long temp;
278
279 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200280 " .set arch=r4000 \n"
Nick Piggin728697c2007-10-18 03:06:53 -0700281 "1: " __LL "%0, %1 # test_and_set_bit \n"
282 " or %2, %0, %3 \n"
283 " " __SC "%2, %1 \n"
284 " beqzl %2, 1b \n"
285 " and %2, %0, %3 \n"
286 " .set mips0 \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100287 : "=&r" (temp), "+m" (*m), "=&r" (res)
288 : "r" (1UL << bit)
Nick Piggin728697c2007-10-18 03:06:53 -0700289 : "memory");
David Daneyb791d112009-07-13 11:15:19 -0700290 } else if (kernel_uses_llsc) {
Nick Piggin728697c2007-10-18 03:06:53 -0700291 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
292 unsigned long temp;
293
Ralf Baechle78373142010-10-29 19:08:24 +0100294 do {
295 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +0000296 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100297 " " __LL "%0, %1 # test_and_set_bit \n"
298 " or %2, %0, %3 \n"
299 " " __SC "%2, %1 \n"
300 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000301 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100302 : "r" (1UL << bit)
303 : "memory");
304 } while (unlikely(!res));
305
306 res = temp & (1UL << bit);
Jim Quinlan92d11592012-09-06 11:36:55 -0400307 } else
308 res = __mips_test_and_set_bit_lock(nr, addr);
Nick Piggin728697c2007-10-18 03:06:53 -0700309
310 smp_llsc_mb();
311
312 return res != 0;
313}
314/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 * test_and_clear_bit - Clear a bit and return its old value
316 * @nr: Bit to clear
317 * @addr: Address to count from
318 *
319 * This operation is atomic and cannot be reordered.
320 * It also implies a memory barrier.
321 */
322static inline int test_and_clear_bit(unsigned long nr,
323 volatile unsigned long *addr)
324{
Jim Quinlan9de79c52012-09-06 11:36:54 -0400325 int bit = nr & SZLONG_MASK;
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100326 unsigned long res;
Ralf Baechleb9611532007-03-05 00:56:15 +0000327
David Daneyf252ffd2010-01-08 17:17:43 -0800328 smp_mb__before_llsc();
Nick Pigginc8f30ae2007-10-18 03:06:52 -0700329
David Daneyb791d112009-07-13 11:15:19 -0700330 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Atsushi Nemoto8e09ffb2007-06-14 00:56:31 +0900332 unsigned long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
334 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200335 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 "1: " __LL "%0, %1 # test_and_clear_bit \n"
337 " or %2, %0, %3 \n"
338 " xor %2, %3 \n"
Ralf Baechle70342282013-01-22 12:59:30 +0100339 " " __SC "%2, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 " beqzl %2, 1b \n"
341 " and %2, %0, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000342 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000343 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100344 : "r" (1UL << bit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 : "memory");
Markos Chandras87a927e2014-11-20 13:58:30 +0000346#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
David Daneyb791d112009-07-13 11:15:19 -0700347 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
Ralf Baechle102fa152007-02-16 17:18:50 +0000348 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100349 unsigned long temp;
Ralf Baechle102fa152007-02-16 17:18:50 +0000350
Ralf Baechle78373142010-10-29 19:08:24 +0100351 do {
352 __asm__ __volatile__(
Ralf Baechle70342282013-01-22 12:59:30 +0100353 " " __LL "%0, %1 # test_and_clear_bit \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100354 " " __EXT "%2, %0, %3, 1 \n"
Ralf Baechle70342282013-01-22 12:59:30 +0100355 " " __INS "%0, $0, %3, 1 \n"
356 " " __SC "%0, %1 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000357 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100358 : "ir" (bit)
359 : "memory");
360 } while (unlikely(!temp));
Ralf Baechle102fa152007-02-16 17:18:50 +0000361#endif
David Daneyb791d112009-07-13 11:15:19 -0700362 } else if (kernel_uses_llsc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100364 unsigned long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Ralf Baechle78373142010-10-29 19:08:24 +0100366 do {
367 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +0000368 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle70342282013-01-22 12:59:30 +0100369 " " __LL "%0, %1 # test_and_clear_bit \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100370 " or %2, %0, %3 \n"
371 " xor %2, %3 \n"
Ralf Baechle70342282013-01-22 12:59:30 +0100372 " " __SC "%2, %1 \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100373 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000374 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100375 : "r" (1UL << bit)
376 : "memory");
377 } while (unlikely(!res));
378
379 res = temp & (1UL << bit);
Jim Quinlan92d11592012-09-06 11:36:55 -0400380 } else
381 res = __mips_test_and_clear_bit(nr, addr);
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000382
Ralf Baechle17099b12007-07-14 13:24:05 +0100383 smp_llsc_mb();
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100384
385 return res != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386}
387
388/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 * test_and_change_bit - Change a bit and return its old value
390 * @nr: Bit to change
391 * @addr: Address to count from
392 *
393 * This operation is atomic and cannot be reordered.
394 * It also implies a memory barrier.
395 */
396static inline int test_and_change_bit(unsigned long nr,
397 volatile unsigned long *addr)
398{
Jim Quinlan9de79c52012-09-06 11:36:54 -0400399 int bit = nr & SZLONG_MASK;
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100400 unsigned long res;
Ralf Baechleb9611532007-03-05 00:56:15 +0000401
David Daneyf252ffd2010-01-08 17:17:43 -0800402 smp_mb__before_llsc();
Nick Pigginc8f30ae2007-10-18 03:06:52 -0700403
David Daneyb791d112009-07-13 11:15:19 -0700404 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100406 unsigned long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200409 " .set arch=r4000 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000410 "1: " __LL "%0, %1 # test_and_change_bit \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 " xor %2, %0, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000412 " " __SC "%2, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 " beqzl %2, 1b \n"
414 " and %2, %0, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000415 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000416 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100417 : "r" (1UL << bit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 : "memory");
David Daneyb791d112009-07-13 11:15:19 -0700419 } else if (kernel_uses_llsc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100421 unsigned long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
Ralf Baechle78373142010-10-29 19:08:24 +0100423 do {
424 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +0000425 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle70342282013-01-22 12:59:30 +0100426 " " __LL "%0, %1 # test_and_change_bit \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100427 " xor %2, %0, %3 \n"
428 " " __SC "\t%2, %1 \n"
429 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000430 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100431 : "r" (1UL << bit)
432 : "memory");
433 } while (unlikely(!res));
434
435 res = temp & (1UL << bit);
Jim Quinlan92d11592012-09-06 11:36:55 -0400436 } else
437 res = __mips_test_and_change_bit(nr, addr);
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000438
Ralf Baechle17099b12007-07-14 13:24:05 +0100439 smp_llsc_mb();
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100440
441 return res != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442}
443
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800444#include <asm-generic/bitops/non-atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
Ralf Baechle65903262005-07-12 12:50:30 +0000446/*
Nick Piggin728697c2007-10-18 03:06:53 -0700447 * __clear_bit_unlock - Clears a bit in memory
448 * @nr: Bit to clear
449 * @addr: Address to start counting from
450 *
451 * __clear_bit() is non-atomic and implies release semantics before the memory
452 * operation. It can be used for an unlock if no other CPUs can concurrently
453 * modify other bits in the word.
454 */
455static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
456{
Leonid Yegoshin6f6ed482015-06-01 17:09:52 -0700457 smp_mb__before_llsc();
Nick Piggin728697c2007-10-18 03:06:53 -0700458 __clear_bit(nr, addr);
459}
460
461/*
Ralf Baechle65903262005-07-12 12:50:30 +0000462 * Return the bit position (0..63) of the most significant 1 bit in a word
463 * Returns -1 if no 1 bit exists
464 */
Ralf Baechle48162272008-10-28 09:40:35 +0000465static inline unsigned long __fls(unsigned long word)
Ralf Baechle65903262005-07-12 12:50:30 +0000466{
Ralf Baechle48162272008-10-28 09:40:35 +0000467 int num;
Ralf Baechle65903262005-07-12 12:50:30 +0000468
Maciej W. Rozyckicb5d4aa2015-04-03 23:25:00 +0100469 if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
Ralf Baechle47740eb2009-04-19 03:21:22 +0200470 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
Ralf Baechle49a89ef2007-10-11 23:46:15 +0100471 __asm__(
Ralf Baechleec917c2c2005-10-07 16:58:15 +0100472 " .set push \n"
Markos Chandras87a927e2014-11-20 13:58:30 +0000473 " .set "MIPS_ISA_LEVEL" \n"
Ralf Baechleec917c2c2005-10-07 16:58:15 +0100474 " clz %0, %1 \n"
475 " .set pop \n"
Ralf Baechle48162272008-10-28 09:40:35 +0000476 : "=r" (num)
477 : "r" (word));
Ralf Baechleec917c2c2005-10-07 16:58:15 +0100478
Ralf Baechle48162272008-10-28 09:40:35 +0000479 return 31 - num;
Ralf Baechleec917c2c2005-10-07 16:58:15 +0100480 }
481
Maciej W. Rozyckicb5d4aa2015-04-03 23:25:00 +0100482 if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
Ralf Baechle48162272008-10-28 09:40:35 +0000483 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
484 __asm__(
485 " .set push \n"
Markos Chandras87a927e2014-11-20 13:58:30 +0000486 " .set "MIPS_ISA_LEVEL" \n"
Ralf Baechle48162272008-10-28 09:40:35 +0000487 " dclz %0, %1 \n"
488 " .set pop \n"
489 : "=r" (num)
490 : "r" (word));
Ralf Baechleec917c2c2005-10-07 16:58:15 +0100491
Ralf Baechle48162272008-10-28 09:40:35 +0000492 return 63 - num;
493 }
Ralf Baechle65903262005-07-12 12:50:30 +0000494
Ralf Baechle48162272008-10-28 09:40:35 +0000495 num = BITS_PER_LONG - 1;
496
497#if BITS_PER_LONG == 64
498 if (!(word & (~0ul << 32))) {
499 num -= 32;
500 word <<= 32;
501 }
502#endif
503 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
504 num -= 16;
505 word <<= 16;
506 }
507 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
508 num -= 8;
509 word <<= 8;
510 }
511 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
512 num -= 4;
513 word <<= 4;
514 }
515 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
516 num -= 2;
517 word <<= 2;
518 }
519 if (!(word & (~0ul << (BITS_PER_LONG-1))))
520 num -= 1;
521 return num;
Ralf Baechle65903262005-07-12 12:50:30 +0000522}
Ralf Baechle65903262005-07-12 12:50:30 +0000523
524/*
525 * __ffs - find first bit in word.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 * @word: The word to search
527 *
Ralf Baechle65903262005-07-12 12:50:30 +0000528 * Returns 0..SZLONG-1
529 * Undefined if no bit exists, so code should check against 0 first.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 */
Ralf Baechle65903262005-07-12 12:50:30 +0000531static inline unsigned long __ffs(unsigned long word)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532{
Ralf Baechleddc0d002008-05-04 14:53:53 +0100533 return __fls(word & -word);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534}
535
536/*
Atsushi Nemotobc818242006-04-17 21:19:12 +0900537 * fls - find last bit set.
538 * @word: The word to search
539 *
540 * This is defined the same way as ffs.
541 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
542 */
Ralf Baechle48162272008-10-28 09:40:35 +0000543static inline int fls(int x)
Atsushi Nemotobc818242006-04-17 21:19:12 +0900544{
Ralf Baechle48162272008-10-28 09:40:35 +0000545 int r;
Atsushi Nemotobc818242006-04-17 21:19:12 +0900546
Maciej W. Rozyckicb5d4aa2015-04-03 23:25:00 +0100547 if (!__builtin_constant_p(x) &&
548 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
Maciej W. Rozyckidb873132014-06-29 00:26:20 +0100549 __asm__(
550 " .set push \n"
Markos Chandras87a927e2014-11-20 13:58:30 +0000551 " .set "MIPS_ISA_LEVEL" \n"
Maciej W. Rozyckidb873132014-06-29 00:26:20 +0100552 " clz %0, %1 \n"
553 " .set pop \n"
554 : "=r" (x)
555 : "r" (x));
Ralf Baechle48162272008-10-28 09:40:35 +0000556
557 return 32 - x;
558 }
559
560 r = 32;
561 if (!x)
562 return 0;
563 if (!(x & 0xffff0000u)) {
564 x <<= 16;
565 r -= 16;
566 }
567 if (!(x & 0xff000000u)) {
568 x <<= 8;
569 r -= 8;
570 }
571 if (!(x & 0xf0000000u)) {
572 x <<= 4;
573 r -= 4;
574 }
575 if (!(x & 0xc0000000u)) {
576 x <<= 2;
577 r -= 2;
578 }
579 if (!(x & 0x80000000u)) {
580 x <<= 1;
581 r -= 1;
582 }
583 return r;
Atsushi Nemotobc818242006-04-17 21:19:12 +0900584}
585
Atsushi Nemotobc818242006-04-17 21:19:12 +0900586#include <asm-generic/bitops/fls64.h>
Atsushi Nemotobc818242006-04-17 21:19:12 +0900587
588/*
Ralf Baechle65903262005-07-12 12:50:30 +0000589 * ffs - find first bit set.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 * @word: The word to search
591 *
Atsushi Nemotobc818242006-04-17 21:19:12 +0900592 * This is defined the same way as
593 * the libc and compiler builtin ffs routines, therefore
594 * differs in spirit from the above ffz (man ffs).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 */
Atsushi Nemotobc818242006-04-17 21:19:12 +0900596static inline int ffs(int word)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597{
Ralf Baechle65903262005-07-12 12:50:30 +0000598 if (!word)
599 return 0;
600
Atsushi Nemotobc818242006-04-17 21:19:12 +0900601 return fls(word & -word);
Ralf Baechle65903262005-07-12 12:50:30 +0000602}
Ralf Baechle2caf1902006-01-30 17:14:41 +0000603
Atsushi Nemotobc818242006-04-17 21:19:12 +0900604#include <asm-generic/bitops/ffz.h>
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800605#include <asm-generic/bitops/find.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
607#ifdef __KERNEL__
608
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800609#include <asm-generic/bitops/sched.h>
David Daney1a403d12010-06-25 16:46:07 -0700610
611#include <asm/arch_hweight.h>
612#include <asm-generic/bitops/const_hweight.h>
613
Akinobu Mita861b5ae2011-03-23 16:42:02 -0700614#include <asm-generic/bitops/le.h>
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800615#include <asm-generic/bitops/ext2-atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617#endif /* __KERNEL__ */
618
619#endif /* _ASM_BITOPS_H */