blob: 0cf29bd5dc5c538e807d6b1e4021a1b111d869ec [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Ralf Baechle102fa152007-02-16 17:18:50 +00006 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
Jiri Slaby06245172007-10-18 23:40:26 -070012#ifndef _LINUX_BITOPS_H
13#error only <linux/bitops.h> can be included directly
14#endif
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/compiler.h>
17#include <linux/types.h>
Ralf Baechle0004a9d2006-10-31 03:45:07 +000018#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/byteorder.h> /* sigh ... */
Maciej W. Rozyckib0984c42014-11-15 22:08:48 +000020#include <asm/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/cpu-features.h>
Ralf Baechle4ffd8b32006-11-30 01:14:50 +000022#include <asm/sgidefs.h>
23#include <asm/war.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Ralf Baechle49a89ef2007-10-11 23:46:15 +010025#if _MIPS_SZLONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#define SZLONG_LOG 5
27#define SZLONG_MASK 31UL
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000028#define __LL "ll "
29#define __SC "sc "
Ralf Baechle70342282013-01-22 12:59:30 +010030#define __INS "ins "
31#define __EXT "ext "
Ralf Baechle49a89ef2007-10-11 23:46:15 +010032#elif _MIPS_SZLONG == 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#define SZLONG_LOG 6
34#define SZLONG_MASK 63UL
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000035#define __LL "lld "
36#define __SC "scd "
Ralf Baechle70342282013-01-22 12:59:30 +010037#define __INS "dins "
38#define __EXT "dext "
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#endif
40
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/*
Jim Quinlan92d11592012-09-06 11:36:55 -040042 * These are the "slower" versions of the functions and are in bitops.c.
43 * These functions call raw_local_irq_{save,restore}().
44 */
45void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
46void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
47void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
48int __mips_test_and_set_bit(unsigned long nr,
49 volatile unsigned long *addr);
50int __mips_test_and_set_bit_lock(unsigned long nr,
51 volatile unsigned long *addr);
52int __mips_test_and_clear_bit(unsigned long nr,
53 volatile unsigned long *addr);
54int __mips_test_and_change_bit(unsigned long nr,
55 volatile unsigned long *addr);
56
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 * set_bit - Atomically set a bit in memory
60 * @nr: the bit to set
61 * @addr: the address to start counting from
62 *
63 * This function is atomic and may not be reordered. See __set_bit()
64 * if you do not require the atomic guarantees.
65 * Note that @nr may be almost arbitrarily large; this function is not
66 * restricted to acting on a single-word quantity.
67 */
68static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
69{
70 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Jim Quinlan9de79c52012-09-06 11:36:54 -040071 int bit = nr & SZLONG_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 unsigned long temp;
73
David Daneyb791d112009-07-13 11:15:19 -070074 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +020076 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 "1: " __LL "%0, %1 # set_bit \n"
78 " or %0, %2 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000079 " " __SC "%0, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000081 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +000082 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
83 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
Markos Chandras87a927e2014-11-20 13:58:30 +000084#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
David Daneyb791d112009-07-13 11:15:19 -070085 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
Ralf Baechle78373142010-10-29 19:08:24 +010086 do {
87 __asm__ __volatile__(
88 " " __LL "%0, %1 # set_bit \n"
89 " " __INS "%0, %3, %2, 1 \n"
90 " " __SC "%0, %1 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +000091 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +010092 : "ir" (bit), "r" (~0));
93 } while (unlikely(!temp));
Markos Chandras87a927e2014-11-20 13:58:30 +000094#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
David Daneyb791d112009-07-13 11:15:19 -070095 } else if (kernel_uses_llsc) {
Ralf Baechle78373142010-10-29 19:08:24 +010096 do {
97 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +000098 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle78373142010-10-29 19:08:24 +010099 " " __LL "%0, %1 # set_bit \n"
100 " or %0, %2 \n"
101 " " __SC "%0, %1 \n"
102 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000103 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +0100104 : "ir" (1UL << bit));
105 } while (unlikely(!temp));
Jim Quinlan92d11592012-09-06 11:36:55 -0400106 } else
107 __mips_set_bit(nr, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108}
109
110/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 * clear_bit - Clears a bit in memory
112 * @nr: Bit to clear
113 * @addr: Address to start counting from
114 *
115 * clear_bit() is atomic and may not be reordered. However, it does
116 * not contain a memory barrier, so if it is used for locking purposes,
Peter Zijlstra91bbefe2014-03-13 19:00:36 +0100117 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 * in order to ensure changes are visible on other processors.
119 */
120static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
121{
122 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Jim Quinlan9de79c52012-09-06 11:36:54 -0400123 int bit = nr & SZLONG_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 unsigned long temp;
125
David Daneyb791d112009-07-13 11:15:19 -0700126 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200128 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 "1: " __LL "%0, %1 # clear_bit \n"
130 " and %0, %2 \n"
131 " " __SC "%0, %1 \n"
132 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000133 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000134 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +0100135 : "ir" (~(1UL << bit)));
Markos Chandras87a927e2014-11-20 13:58:30 +0000136#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
David Daneyb791d112009-07-13 11:15:19 -0700137 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
Ralf Baechle78373142010-10-29 19:08:24 +0100138 do {
139 __asm__ __volatile__(
140 " " __LL "%0, %1 # clear_bit \n"
141 " " __INS "%0, $0, %2, 1 \n"
142 " " __SC "%0, %1 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000143 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +0100144 : "ir" (bit));
145 } while (unlikely(!temp));
Markos Chandras87a927e2014-11-20 13:58:30 +0000146#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
David Daneyb791d112009-07-13 11:15:19 -0700147 } else if (kernel_uses_llsc) {
Ralf Baechle78373142010-10-29 19:08:24 +0100148 do {
149 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +0000150 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100151 " " __LL "%0, %1 # clear_bit \n"
152 " and %0, %2 \n"
153 " " __SC "%0, %1 \n"
154 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000155 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +0100156 : "ir" (~(1UL << bit)));
157 } while (unlikely(!temp));
Jim Quinlan92d11592012-09-06 11:36:55 -0400158 } else
159 __mips_clear_bit(nr, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160}
161
162/*
Nick Piggin728697c2007-10-18 03:06:53 -0700163 * clear_bit_unlock - Clears a bit in memory
164 * @nr: Bit to clear
165 * @addr: Address to start counting from
166 *
167 * clear_bit() is atomic and implies release semantics before the memory
168 * operation. It can be used for an unlock.
169 */
170static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
171{
Peter Zijlstra91bbefe2014-03-13 19:00:36 +0100172 smp_mb__before_atomic();
Nick Piggin728697c2007-10-18 03:06:53 -0700173 clear_bit(nr, addr);
174}
175
176/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 * change_bit - Toggle a bit in memory
178 * @nr: Bit to change
179 * @addr: Address to start counting from
180 *
181 * change_bit() is atomic and may not be reordered.
182 * Note that @nr may be almost arbitrarily large; this function is not
183 * restricted to acting on a single-word quantity.
184 */
185static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
186{
Jim Quinlan9de79c52012-09-06 11:36:54 -0400187 int bit = nr & SZLONG_MASK;
Ralf Baechleb9611532007-03-05 00:56:15 +0000188
David Daneyb791d112009-07-13 11:15:19 -0700189 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
191 unsigned long temp;
192
193 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200194 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 "1: " __LL "%0, %1 # change_bit \n"
196 " xor %0, %2 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000197 " " __SC "%0, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000199 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000200 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +0100201 : "ir" (1UL << bit));
David Daneyb791d112009-07-13 11:15:19 -0700202 } else if (kernel_uses_llsc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
204 unsigned long temp;
205
Ralf Baechle78373142010-10-29 19:08:24 +0100206 do {
207 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +0000208 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100209 " " __LL "%0, %1 # change_bit \n"
210 " xor %0, %2 \n"
211 " " __SC "%0, %1 \n"
212 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000213 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
Ralf Baechle78373142010-10-29 19:08:24 +0100214 : "ir" (1UL << bit));
215 } while (unlikely(!temp));
Jim Quinlan92d11592012-09-06 11:36:55 -0400216 } else
217 __mips_change_bit(nr, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
219
220/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 * test_and_set_bit - Set a bit and return its old value
222 * @nr: Bit to set
223 * @addr: Address to count from
224 *
225 * This operation is atomic and cannot be reordered.
226 * It also implies a memory barrier.
227 */
228static inline int test_and_set_bit(unsigned long nr,
229 volatile unsigned long *addr)
230{
Jim Quinlan9de79c52012-09-06 11:36:54 -0400231 int bit = nr & SZLONG_MASK;
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100232 unsigned long res;
Ralf Baechleb9611532007-03-05 00:56:15 +0000233
David Daneyf252ffd2010-01-08 17:17:43 -0800234 smp_mb__before_llsc();
Nick Pigginc8f30ae2007-10-18 03:06:52 -0700235
David Daneyb791d112009-07-13 11:15:19 -0700236 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100238 unsigned long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200241 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 "1: " __LL "%0, %1 # test_and_set_bit \n"
243 " or %2, %0, %3 \n"
244 " " __SC "%2, %1 \n"
245 " beqzl %2, 1b \n"
246 " and %2, %0, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000247 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000248 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100249 : "r" (1UL << bit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 : "memory");
David Daneyb791d112009-07-13 11:15:19 -0700251 } else if (kernel_uses_llsc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100253 unsigned long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Ralf Baechle78373142010-10-29 19:08:24 +0100255 do {
256 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +0000257 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100258 " " __LL "%0, %1 # test_and_set_bit \n"
259 " or %2, %0, %3 \n"
260 " " __SC "%2, %1 \n"
261 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000262 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100263 : "r" (1UL << bit)
264 : "memory");
265 } while (unlikely(!res));
266
267 res = temp & (1UL << bit);
Jim Quinlan92d11592012-09-06 11:36:55 -0400268 } else
269 res = __mips_test_and_set_bit(nr, addr);
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000270
Ralf Baechle17099b12007-07-14 13:24:05 +0100271 smp_llsc_mb();
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100272
273 return res != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274}
275
276/*
Nick Piggin728697c2007-10-18 03:06:53 -0700277 * test_and_set_bit_lock - Set a bit and return its old value
278 * @nr: Bit to set
279 * @addr: Address to count from
280 *
281 * This operation is atomic and implies acquire ordering semantics
282 * after the memory operation.
283 */
284static inline int test_and_set_bit_lock(unsigned long nr,
285 volatile unsigned long *addr)
286{
Jim Quinlan9de79c52012-09-06 11:36:54 -0400287 int bit = nr & SZLONG_MASK;
Nick Piggin728697c2007-10-18 03:06:53 -0700288 unsigned long res;
289
David Daneyb791d112009-07-13 11:15:19 -0700290 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Nick Piggin728697c2007-10-18 03:06:53 -0700291 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
292 unsigned long temp;
293
294 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200295 " .set arch=r4000 \n"
Nick Piggin728697c2007-10-18 03:06:53 -0700296 "1: " __LL "%0, %1 # test_and_set_bit \n"
297 " or %2, %0, %3 \n"
298 " " __SC "%2, %1 \n"
299 " beqzl %2, 1b \n"
300 " and %2, %0, %3 \n"
301 " .set mips0 \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100302 : "=&r" (temp), "+m" (*m), "=&r" (res)
303 : "r" (1UL << bit)
Nick Piggin728697c2007-10-18 03:06:53 -0700304 : "memory");
David Daneyb791d112009-07-13 11:15:19 -0700305 } else if (kernel_uses_llsc) {
Nick Piggin728697c2007-10-18 03:06:53 -0700306 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
307 unsigned long temp;
308
Ralf Baechle78373142010-10-29 19:08:24 +0100309 do {
310 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +0000311 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100312 " " __LL "%0, %1 # test_and_set_bit \n"
313 " or %2, %0, %3 \n"
314 " " __SC "%2, %1 \n"
315 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000316 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100317 : "r" (1UL << bit)
318 : "memory");
319 } while (unlikely(!res));
320
321 res = temp & (1UL << bit);
Jim Quinlan92d11592012-09-06 11:36:55 -0400322 } else
323 res = __mips_test_and_set_bit_lock(nr, addr);
Nick Piggin728697c2007-10-18 03:06:53 -0700324
325 smp_llsc_mb();
326
327 return res != 0;
328}
329/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 * test_and_clear_bit - Clear a bit and return its old value
331 * @nr: Bit to clear
332 * @addr: Address to count from
333 *
334 * This operation is atomic and cannot be reordered.
335 * It also implies a memory barrier.
336 */
337static inline int test_and_clear_bit(unsigned long nr,
338 volatile unsigned long *addr)
339{
Jim Quinlan9de79c52012-09-06 11:36:54 -0400340 int bit = nr & SZLONG_MASK;
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100341 unsigned long res;
Ralf Baechleb9611532007-03-05 00:56:15 +0000342
David Daneyf252ffd2010-01-08 17:17:43 -0800343 smp_mb__before_llsc();
Nick Pigginc8f30ae2007-10-18 03:06:52 -0700344
David Daneyb791d112009-07-13 11:15:19 -0700345 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Atsushi Nemoto8e09ffb2007-06-14 00:56:31 +0900347 unsigned long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
349 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200350 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 "1: " __LL "%0, %1 # test_and_clear_bit \n"
352 " or %2, %0, %3 \n"
353 " xor %2, %3 \n"
Ralf Baechle70342282013-01-22 12:59:30 +0100354 " " __SC "%2, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 " beqzl %2, 1b \n"
356 " and %2, %0, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000357 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000358 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100359 : "r" (1UL << bit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 : "memory");
Markos Chandras87a927e2014-11-20 13:58:30 +0000361#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
David Daneyb791d112009-07-13 11:15:19 -0700362 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
Ralf Baechle102fa152007-02-16 17:18:50 +0000363 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100364 unsigned long temp;
Ralf Baechle102fa152007-02-16 17:18:50 +0000365
Ralf Baechle78373142010-10-29 19:08:24 +0100366 do {
367 __asm__ __volatile__(
Ralf Baechle70342282013-01-22 12:59:30 +0100368 " " __LL "%0, %1 # test_and_clear_bit \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100369 " " __EXT "%2, %0, %3, 1 \n"
Ralf Baechle70342282013-01-22 12:59:30 +0100370 " " __INS "%0, $0, %3, 1 \n"
371 " " __SC "%0, %1 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000372 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100373 : "ir" (bit)
374 : "memory");
375 } while (unlikely(!temp));
Ralf Baechle102fa152007-02-16 17:18:50 +0000376#endif
David Daneyb791d112009-07-13 11:15:19 -0700377 } else if (kernel_uses_llsc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100379 unsigned long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
Ralf Baechle78373142010-10-29 19:08:24 +0100381 do {
382 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +0000383 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle70342282013-01-22 12:59:30 +0100384 " " __LL "%0, %1 # test_and_clear_bit \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100385 " or %2, %0, %3 \n"
386 " xor %2, %3 \n"
Ralf Baechle70342282013-01-22 12:59:30 +0100387 " " __SC "%2, %1 \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100388 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000389 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100390 : "r" (1UL << bit)
391 : "memory");
392 } while (unlikely(!res));
393
394 res = temp & (1UL << bit);
Jim Quinlan92d11592012-09-06 11:36:55 -0400395 } else
396 res = __mips_test_and_clear_bit(nr, addr);
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000397
Ralf Baechle17099b12007-07-14 13:24:05 +0100398 smp_llsc_mb();
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100399
400 return res != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401}
402
403/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 * test_and_change_bit - Change a bit and return its old value
405 * @nr: Bit to change
406 * @addr: Address to count from
407 *
408 * This operation is atomic and cannot be reordered.
409 * It also implies a memory barrier.
410 */
411static inline int test_and_change_bit(unsigned long nr,
412 volatile unsigned long *addr)
413{
Jim Quinlan9de79c52012-09-06 11:36:54 -0400414 int bit = nr & SZLONG_MASK;
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100415 unsigned long res;
Ralf Baechleb9611532007-03-05 00:56:15 +0000416
David Daneyf252ffd2010-01-08 17:17:43 -0800417 smp_mb__before_llsc();
Nick Pigginc8f30ae2007-10-18 03:06:52 -0700418
David Daneyb791d112009-07-13 11:15:19 -0700419 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100421 unsigned long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
423 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200424 " .set arch=r4000 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000425 "1: " __LL "%0, %1 # test_and_change_bit \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 " xor %2, %0, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000427 " " __SC "%2, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 " beqzl %2, 1b \n"
429 " and %2, %0, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000430 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000431 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100432 : "r" (1UL << bit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 : "memory");
David Daneyb791d112009-07-13 11:15:19 -0700434 } else if (kernel_uses_llsc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100436 unsigned long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Ralf Baechle78373142010-10-29 19:08:24 +0100438 do {
439 __asm__ __volatile__(
Markos Chandras87a927e2014-11-20 13:58:30 +0000440 " .set "MIPS_ISA_ARCH_LEVEL" \n"
Ralf Baechle70342282013-01-22 12:59:30 +0100441 " " __LL "%0, %1 # test_and_change_bit \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100442 " xor %2, %0, %3 \n"
443 " " __SC "\t%2, %1 \n"
444 " .set mips0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000445 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
Ralf Baechle78373142010-10-29 19:08:24 +0100446 : "r" (1UL << bit)
447 : "memory");
448 } while (unlikely(!res));
449
450 res = temp & (1UL << bit);
Jim Quinlan92d11592012-09-06 11:36:55 -0400451 } else
452 res = __mips_test_and_change_bit(nr, addr);
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000453
Ralf Baechle17099b12007-07-14 13:24:05 +0100454 smp_llsc_mb();
Ralf Baechleff72b7a2007-06-07 13:17:30 +0100455
456 return res != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457}
458
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800459#include <asm-generic/bitops/non-atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Ralf Baechle65903262005-07-12 12:50:30 +0000461/*
Nick Piggin728697c2007-10-18 03:06:53 -0700462 * __clear_bit_unlock - Clears a bit in memory
463 * @nr: Bit to clear
464 * @addr: Address to start counting from
465 *
466 * __clear_bit() is non-atomic and implies release semantics before the memory
467 * operation. It can be used for an unlock if no other CPUs can concurrently
468 * modify other bits in the word.
469 */
470static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
471{
472 smp_mb();
473 __clear_bit(nr, addr);
474}
475
476/*
Ralf Baechle65903262005-07-12 12:50:30 +0000477 * Return the bit position (0..63) of the most significant 1 bit in a word
478 * Returns -1 if no 1 bit exists
479 */
Ralf Baechle48162272008-10-28 09:40:35 +0000480static inline unsigned long __fls(unsigned long word)
Ralf Baechle65903262005-07-12 12:50:30 +0000481{
Ralf Baechle48162272008-10-28 09:40:35 +0000482 int num;
Ralf Baechle65903262005-07-12 12:50:30 +0000483
Maciej W. Rozyckicb5d4aa2015-04-03 23:25:00 +0100484 if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
Ralf Baechle47740eb2009-04-19 03:21:22 +0200485 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
Ralf Baechle49a89ef2007-10-11 23:46:15 +0100486 __asm__(
Ralf Baechleec917c2c2005-10-07 16:58:15 +0100487 " .set push \n"
Markos Chandras87a927e2014-11-20 13:58:30 +0000488 " .set "MIPS_ISA_LEVEL" \n"
Ralf Baechleec917c2c2005-10-07 16:58:15 +0100489 " clz %0, %1 \n"
490 " .set pop \n"
Ralf Baechle48162272008-10-28 09:40:35 +0000491 : "=r" (num)
492 : "r" (word));
Ralf Baechleec917c2c2005-10-07 16:58:15 +0100493
Ralf Baechle48162272008-10-28 09:40:35 +0000494 return 31 - num;
Ralf Baechleec917c2c2005-10-07 16:58:15 +0100495 }
496
Maciej W. Rozyckicb5d4aa2015-04-03 23:25:00 +0100497 if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
Ralf Baechle48162272008-10-28 09:40:35 +0000498 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
499 __asm__(
500 " .set push \n"
Markos Chandras87a927e2014-11-20 13:58:30 +0000501 " .set "MIPS_ISA_LEVEL" \n"
Ralf Baechle48162272008-10-28 09:40:35 +0000502 " dclz %0, %1 \n"
503 " .set pop \n"
504 : "=r" (num)
505 : "r" (word));
Ralf Baechleec917c2c2005-10-07 16:58:15 +0100506
Ralf Baechle48162272008-10-28 09:40:35 +0000507 return 63 - num;
508 }
Ralf Baechle65903262005-07-12 12:50:30 +0000509
Ralf Baechle48162272008-10-28 09:40:35 +0000510 num = BITS_PER_LONG - 1;
511
512#if BITS_PER_LONG == 64
513 if (!(word & (~0ul << 32))) {
514 num -= 32;
515 word <<= 32;
516 }
517#endif
518 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
519 num -= 16;
520 word <<= 16;
521 }
522 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
523 num -= 8;
524 word <<= 8;
525 }
526 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
527 num -= 4;
528 word <<= 4;
529 }
530 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
531 num -= 2;
532 word <<= 2;
533 }
534 if (!(word & (~0ul << (BITS_PER_LONG-1))))
535 num -= 1;
536 return num;
Ralf Baechle65903262005-07-12 12:50:30 +0000537}
Ralf Baechle65903262005-07-12 12:50:30 +0000538
539/*
540 * __ffs - find first bit in word.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 * @word: The word to search
542 *
Ralf Baechle65903262005-07-12 12:50:30 +0000543 * Returns 0..SZLONG-1
544 * Undefined if no bit exists, so code should check against 0 first.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 */
Ralf Baechle65903262005-07-12 12:50:30 +0000546static inline unsigned long __ffs(unsigned long word)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547{
Ralf Baechleddc0d002008-05-04 14:53:53 +0100548 return __fls(word & -word);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549}
550
551/*
Atsushi Nemotobc818242006-04-17 21:19:12 +0900552 * fls - find last bit set.
553 * @word: The word to search
554 *
555 * This is defined the same way as ffs.
556 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
557 */
Ralf Baechle48162272008-10-28 09:40:35 +0000558static inline int fls(int x)
Atsushi Nemotobc818242006-04-17 21:19:12 +0900559{
Ralf Baechle48162272008-10-28 09:40:35 +0000560 int r;
Atsushi Nemotobc818242006-04-17 21:19:12 +0900561
Maciej W. Rozyckicb5d4aa2015-04-03 23:25:00 +0100562 if (!__builtin_constant_p(x) &&
563 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
Maciej W. Rozyckidb873132014-06-29 00:26:20 +0100564 __asm__(
565 " .set push \n"
Markos Chandras87a927e2014-11-20 13:58:30 +0000566 " .set "MIPS_ISA_LEVEL" \n"
Maciej W. Rozyckidb873132014-06-29 00:26:20 +0100567 " clz %0, %1 \n"
568 " .set pop \n"
569 : "=r" (x)
570 : "r" (x));
Ralf Baechle48162272008-10-28 09:40:35 +0000571
572 return 32 - x;
573 }
574
575 r = 32;
576 if (!x)
577 return 0;
578 if (!(x & 0xffff0000u)) {
579 x <<= 16;
580 r -= 16;
581 }
582 if (!(x & 0xff000000u)) {
583 x <<= 8;
584 r -= 8;
585 }
586 if (!(x & 0xf0000000u)) {
587 x <<= 4;
588 r -= 4;
589 }
590 if (!(x & 0xc0000000u)) {
591 x <<= 2;
592 r -= 2;
593 }
594 if (!(x & 0x80000000u)) {
595 x <<= 1;
596 r -= 1;
597 }
598 return r;
Atsushi Nemotobc818242006-04-17 21:19:12 +0900599}
600
Atsushi Nemotobc818242006-04-17 21:19:12 +0900601#include <asm-generic/bitops/fls64.h>
Atsushi Nemotobc818242006-04-17 21:19:12 +0900602
603/*
Ralf Baechle65903262005-07-12 12:50:30 +0000604 * ffs - find first bit set.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 * @word: The word to search
606 *
Atsushi Nemotobc818242006-04-17 21:19:12 +0900607 * This is defined the same way as
608 * the libc and compiler builtin ffs routines, therefore
609 * differs in spirit from the above ffz (man ffs).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 */
Atsushi Nemotobc818242006-04-17 21:19:12 +0900611static inline int ffs(int word)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
Ralf Baechle65903262005-07-12 12:50:30 +0000613 if (!word)
614 return 0;
615
Atsushi Nemotobc818242006-04-17 21:19:12 +0900616 return fls(word & -word);
Ralf Baechle65903262005-07-12 12:50:30 +0000617}
Ralf Baechle2caf1902006-01-30 17:14:41 +0000618
Atsushi Nemotobc818242006-04-17 21:19:12 +0900619#include <asm-generic/bitops/ffz.h>
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800620#include <asm-generic/bitops/find.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
622#ifdef __KERNEL__
623
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800624#include <asm-generic/bitops/sched.h>
David Daney1a403d12010-06-25 16:46:07 -0700625
626#include <asm/arch_hweight.h>
627#include <asm-generic/bitops/const_hweight.h>
628
Akinobu Mita861b5ae2011-03-23 16:42:02 -0700629#include <asm-generic/bitops/le.h>
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800630#include <asm-generic/bitops/ext2-atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
632#endif /* __KERNEL__ */
633
634#endif /* _ASM_BITOPS_H */