blob: 8575dccb9ef78165bcac39d8c6e8903466557a64 [file] [log] [blame]
Paul Mundtec723fbe2006-12-07 20:33:38 +09001#ifndef __ASM_SH_ATOMIC_LLSC_H
2#define __ASM_SH_ATOMIC_LLSC_H
3
4/*
Paul Mundtec723fbe2006-12-07 20:33:38 +09005 * SH-4A note:
6 *
7 * We basically get atomic_xxx_return() for free compared with
8 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
9 * encoding, so the retval is automatically set without having to
10 * do any special work.
11 */
Peter Zijlstrac6470152014-03-26 18:12:45 +010012/*
13 * To get proper branch prediction for the main line, we must branch
14 * forward to code at the end of this object's .text section, then
15 * branch back to restart the operation.
16 */
Paul Mundtec723fbe2006-12-07 20:33:38 +090017
Peter Zijlstrac6470152014-03-26 18:12:45 +010018#define ATOMIC_OP(op) \
19static inline void atomic_##op(int i, atomic_t *v) \
20{ \
21 unsigned long tmp; \
22 \
23 __asm__ __volatile__ ( \
24"1: movli.l @%2, %0 ! atomic_" #op "\n" \
25" " #op " %1, %0 \n" \
26" movco.l %0, @%2 \n" \
27" bf 1b \n" \
28 : "=&z" (tmp) \
29 : "r" (i), "r" (&v->counter) \
30 : "t"); \
Paul Mundtec723fbe2006-12-07 20:33:38 +090031}
32
Peter Zijlstrac6470152014-03-26 18:12:45 +010033#define ATOMIC_OP_RETURN(op) \
34static inline int atomic_##op##_return(int i, atomic_t *v) \
35{ \
36 unsigned long temp; \
37 \
38 __asm__ __volatile__ ( \
39"1: movli.l @%2, %0 ! atomic_" #op "_return \n" \
40" " #op " %1, %0 \n" \
41" movco.l %0, @%2 \n" \
42" bf 1b \n" \
43" synco \n" \
44 : "=&z" (temp) \
45 : "r" (i), "r" (&v->counter) \
46 : "t"); \
47 \
48 return temp; \
Paul Mundtec723fbe2006-12-07 20:33:38 +090049}
50
Peter Zijlstrac6470152014-03-26 18:12:45 +010051#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
52
53ATOMIC_OPS(add)
54ATOMIC_OPS(sub)
55
56#undef ATOMIC_OPS
57#undef ATOMIC_OP_RETURN
58#undef ATOMIC_OP
59
Paul Mundtec723fbe2006-12-07 20:33:38 +090060static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
61{
62 unsigned long tmp;
63
64 __asm__ __volatile__ (
65"1: movli.l @%2, %0 ! atomic_clear_mask \n"
66" and %1, %0 \n"
67" movco.l %0, @%2 \n"
68" bf 1b \n"
69 : "=&z" (tmp)
70 : "r" (~mask), "r" (&v->counter)
71 : "t");
72}
73
74static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
75{
76 unsigned long tmp;
77
78 __asm__ __volatile__ (
79"1: movli.l @%2, %0 ! atomic_set_mask \n"
80" or %1, %0 \n"
81" movco.l %0, @%2 \n"
82" bf 1b \n"
83 : "=&z" (tmp)
84 : "r" (mask), "r" (&v->counter)
85 : "t");
86}
87
Paul Mundtec723fbe2006-12-07 20:33:38 +090088#endif /* __ASM_SH_ATOMIC_LLSC_H */