blob: 60b87ee54fb8863eb2387467b76d95117405fb6c [file] [log] [blame]
Chris Metcalf18aecc22011-05-04 14:38:26 -04001/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_BITOPS_64_H
16#define _ASM_TILE_BITOPS_64_H
17
18#include <linux/compiler.h>
Arun Sharma600634972011-07-26 16:09:06 -070019#include <linux/atomic.h>
Chris Metcalf18aecc22011-05-04 14:38:26 -040020
21/* See <asm/bitops.h> for API comments. */
22
23static inline void set_bit(unsigned nr, volatile unsigned long *addr)
24{
25 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
26 __insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
27}
28
29static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
30{
31 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
32 __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
33}
34
35#define smp_mb__before_clear_bit() smp_mb()
36#define smp_mb__after_clear_bit() smp_mb()
37
38
39static inline void change_bit(unsigned nr, volatile unsigned long *addr)
40{
Chris Metcalf664c1002012-03-27 14:17:05 -040041 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
42 unsigned long guess, oldval;
Chris Metcalf18aecc22011-05-04 14:38:26 -040043 addr += nr / BITS_PER_LONG;
Chris Metcalf664c1002012-03-27 14:17:05 -040044 oldval = *addr;
Chris Metcalf18aecc22011-05-04 14:38:26 -040045 do {
46 guess = oldval;
47 oldval = atomic64_cmpxchg((atomic64_t *)addr,
48 guess, guess ^ mask);
49 } while (guess != oldval);
50}
51
52
53/*
54 * The test_and_xxx_bit() routines require a memory fence before we
55 * start the operation, and after the operation completes. We use
56 * smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
57 * barrier(), to block until the atomic op is complete.
58 */
59
60static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
61{
62 int val;
63 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
64 smp_mb(); /* barrier for proper semantics */
65 val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
66 & mask) != 0;
67 barrier();
68 return val;
69}
70
71
72static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
73{
74 int val;
75 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
76 smp_mb(); /* barrier for proper semantics */
77 val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
78 & mask) != 0;
79 barrier();
80 return val;
81}
82
83
84static inline int test_and_change_bit(unsigned nr,
85 volatile unsigned long *addr)
86{
87 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
Chris Metcalf664c1002012-03-27 14:17:05 -040088 unsigned long guess, oldval;
Chris Metcalf18aecc22011-05-04 14:38:26 -040089 addr += nr / BITS_PER_LONG;
90 oldval = *addr;
91 do {
92 guess = oldval;
93 oldval = atomic64_cmpxchg((atomic64_t *)addr,
94 guess, guess ^ mask);
95 } while (guess != oldval);
96 return (oldval & mask) != 0;
97}
98
Akinobu Mita148817b2011-07-26 16:09:04 -070099#include <asm-generic/bitops/ext2-atomic-setbit.h>
Chris Metcalf18aecc22011-05-04 14:38:26 -0400100
101#endif /* _ASM_TILE_BITOPS_64_H */