blob: 36389efd45e8a2e97ae65d6f3630df5b7621dc35 [file] [log] [blame]
Mark Salter4f81ca12010-10-27 17:28:52 +01001/* MN10300 Atomic counter operations
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_ATOMIC_H
12#define _ASM_ATOMIC_H
13
14#include <asm/irqflags.h>
David Howells1c80f222012-03-28 18:30:02 +010015#include <asm/cmpxchg.h>
Peter Zijlstra9424cdf2014-03-13 19:00:36 +010016#include <asm/barrier.h>
Mark Salter4f81ca12010-10-27 17:28:52 +010017
18#ifndef CONFIG_SMP
Mathieu Desnoyers366831612010-05-24 14:32:56 -070019#include <asm-generic/atomic.h>
Mark Salter4f81ca12010-10-27 17:28:52 +010020#else
21
22/*
23 * Atomic operations that C can't guarantee us. Useful for
24 * resource counting etc..
25 */
26
27#define ATOMIC_INIT(i) { (i) }
28
29#ifdef __KERNEL__
30
31/**
32 * atomic_read - read atomic variable
33 * @v: pointer of type atomic_t
34 *
35 * Atomically reads the value of @v. Note that the guaranteed
Mark Salter4f81ca12010-10-27 17:28:52 +010036 */
Peter Zijlstra62e8a322015-09-18 11:13:10 +020037#define atomic_read(v) READ_ONCE((v)->counter)
Mark Salter4f81ca12010-10-27 17:28:52 +010038
39/**
40 * atomic_set - set atomic variable
41 * @v: pointer of type atomic_t
42 * @i: required value
43 *
44 * Atomically sets the value of @v to @i. Note that the guaranteed
Mark Salter4f81ca12010-10-27 17:28:52 +010045 */
Peter Zijlstra62e8a322015-09-18 11:13:10 +020046#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
Mark Salter4f81ca12010-10-27 17:28:52 +010047
Peter Zijlstrae69a0ef2014-03-26 17:59:04 +010048#define ATOMIC_OP(op) \
49static inline void atomic_##op(int i, atomic_t *v) \
50{ \
51 int retval, status; \
52 \
53 asm volatile( \
54 "1: mov %4,(_AAR,%3) \n" \
55 " mov (_ADR,%3),%1 \n" \
56 " " #op " %5,%1 \n" \
57 " mov %1,(_ADR,%3) \n" \
58 " mov (_ADR,%3),%0 \n" /* flush */ \
59 " mov (_ASR,%3),%0 \n" \
60 " or %0,%0 \n" \
61 " bne 1b \n" \
62 : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
63 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
64 : "memory", "cc"); \
Mark Salter4f81ca12010-10-27 17:28:52 +010065}
66
Peter Zijlstrae69a0ef2014-03-26 17:59:04 +010067#define ATOMIC_OP_RETURN(op) \
68static inline int atomic_##op##_return(int i, atomic_t *v) \
69{ \
70 int retval, status; \
71 \
72 asm volatile( \
73 "1: mov %4,(_AAR,%3) \n" \
74 " mov (_ADR,%3),%1 \n" \
75 " " #op " %5,%1 \n" \
76 " mov %1,(_ADR,%3) \n" \
77 " mov (_ADR,%3),%0 \n" /* flush */ \
78 " mov (_ASR,%3),%0 \n" \
79 " or %0,%0 \n" \
80 " bne 1b \n" \
81 : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
82 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
83 : "memory", "cc"); \
84 return retval; \
Mark Salter4f81ca12010-10-27 17:28:52 +010085}
86
Peter Zijlstraf8d638e2016-04-18 01:16:05 +020087#define ATOMIC_FETCH_OP(op) \
88static inline int atomic_fetch_##op(int i, atomic_t *v) \
89{ \
90 int retval, status; \
91 \
92 asm volatile( \
93 "1: mov %4,(_AAR,%3) \n" \
94 " mov (_ADR,%3),%1 \n" \
95 " mov %1,%0 \n" \
96 " " #op " %5,%0 \n" \
97 " mov %0,(_ADR,%3) \n" \
98 " mov (_ADR,%3),%0 \n" /* flush */ \
99 " mov (_ASR,%3),%0 \n" \
100 " or %0,%0 \n" \
101 " bne 1b \n" \
102 : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
103 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
104 : "memory", "cc"); \
105 return retval; \
106}
107
108#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
Peter Zijlstrae69a0ef2014-03-26 17:59:04 +0100109
110ATOMIC_OPS(add)
111ATOMIC_OPS(sub)
112
Peter Zijlstraf8d638e2016-04-18 01:16:05 +0200113#undef ATOMIC_OPS
114#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
115
Peter Zijlstraf8d638e2016-04-18 01:16:05 +0200116ATOMIC_OPS(and)
117ATOMIC_OPS(or)
118ATOMIC_OPS(xor)
Peter Zijlstraddb75732014-04-23 19:50:20 +0200119
Peter Zijlstrae69a0ef2014-03-26 17:59:04 +0100120#undef ATOMIC_OPS
Peter Zijlstraf8d638e2016-04-18 01:16:05 +0200121#undef ATOMIC_FETCH_OP
Peter Zijlstrae69a0ef2014-03-26 17:59:04 +0100122#undef ATOMIC_OP_RETURN
123#undef ATOMIC_OP
124
Mark Salter4f81ca12010-10-27 17:28:52 +0100125static inline int atomic_add_negative(int i, atomic_t *v)
126{
127 return atomic_add_return(i, v) < 0;
128}
129
Mark Salter4f81ca12010-10-27 17:28:52 +0100130static inline void atomic_inc(atomic_t *v)
131{
132 atomic_add_return(1, v);
133}
134
135static inline void atomic_dec(atomic_t *v)
136{
137 atomic_sub_return(1, v);
138}
139
140#define atomic_dec_return(v) atomic_sub_return(1, (v))
141#define atomic_inc_return(v) atomic_add_return(1, (v))
142
143#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
144#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
145#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
146
Arun Sharmaf24219b2011-07-26 16:09:07 -0700147#define __atomic_add_unless(v, a, u) \
Mark Salter4f81ca12010-10-27 17:28:52 +0100148({ \
149 int c, old; \
150 c = atomic_read(v); \
151 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
152 c = old; \
Arun Sharmaf24219b2011-07-26 16:09:07 -0700153 c; \
Mark Salter4f81ca12010-10-27 17:28:52 +0100154})
155
David Howells1c80f222012-03-28 18:30:02 +0100156#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
157#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
Mark Salter4f81ca12010-10-27 17:28:52 +0100158
Mark Salter4f81ca12010-10-27 17:28:52 +0100159#endif /* __KERNEL__ */
160#endif /* CONFIG_SMP */
161#endif /* _ASM_ATOMIC_H */