Paul Mundt | ec723fbe | 2006-12-07 20:33:38 +0900 | [diff] [blame] | 1 | #ifndef __ASM_SH_ATOMIC_LLSC_H |
| 2 | #define __ASM_SH_ATOMIC_LLSC_H |
| 3 | |
| 4 | /* |
Paul Mundt | ec723fbe | 2006-12-07 20:33:38 +0900 | [diff] [blame] | 5 | * SH-4A note: |
| 6 | * |
| 7 | * We basically get atomic_xxx_return() for free compared with |
| 8 | * atomic_xxx(). movli.l/movco.l require r0 due to the instruction |
| 9 | * encoding, so the retval is automatically set without having to |
| 10 | * do any special work. |
| 11 | */ |
Peter Zijlstra | c647015 | 2014-03-26 18:12:45 +0100 | [diff] [blame] | 12 | /* |
| 13 | * To get proper branch prediction for the main line, we must branch |
| 14 | * forward to code at the end of this object's .text section, then |
| 15 | * branch back to restart the operation. |
| 16 | */ |
Paul Mundt | ec723fbe | 2006-12-07 20:33:38 +0900 | [diff] [blame] | 17 | |
Peter Zijlstra | c647015 | 2014-03-26 18:12:45 +0100 | [diff] [blame] | 18 | #define ATOMIC_OP(op) \ |
| 19 | static inline void atomic_##op(int i, atomic_t *v) \ |
| 20 | { \ |
| 21 | unsigned long tmp; \ |
| 22 | \ |
| 23 | __asm__ __volatile__ ( \ |
| 24 | "1: movli.l @%2, %0 ! atomic_" #op "\n" \ |
| 25 | " " #op " %1, %0 \n" \ |
| 26 | " movco.l %0, @%2 \n" \ |
| 27 | " bf 1b \n" \ |
| 28 | : "=&z" (tmp) \ |
| 29 | : "r" (i), "r" (&v->counter) \ |
| 30 | : "t"); \ |
Paul Mundt | ec723fbe | 2006-12-07 20:33:38 +0900 | [diff] [blame] | 31 | } |
| 32 | |
Peter Zijlstra | c647015 | 2014-03-26 18:12:45 +0100 | [diff] [blame] | 33 | #define ATOMIC_OP_RETURN(op) \ |
| 34 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
| 35 | { \ |
| 36 | unsigned long temp; \ |
| 37 | \ |
| 38 | __asm__ __volatile__ ( \ |
| 39 | "1: movli.l @%2, %0 ! atomic_" #op "_return \n" \ |
| 40 | " " #op " %1, %0 \n" \ |
| 41 | " movco.l %0, @%2 \n" \ |
| 42 | " bf 1b \n" \ |
| 43 | " synco \n" \ |
| 44 | : "=&z" (temp) \ |
| 45 | : "r" (i), "r" (&v->counter) \ |
| 46 | : "t"); \ |
| 47 | \ |
| 48 | return temp; \ |
Paul Mundt | ec723fbe | 2006-12-07 20:33:38 +0900 | [diff] [blame] | 49 | } |
| 50 | |
Peter Zijlstra | 7d9794e | 2016-04-18 01:16:04 +0200 | [diff] [blame] | 51 | #define ATOMIC_FETCH_OP(op) \ |
| 52 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ |
| 53 | { \ |
| 54 | unsigned long res, temp; \ |
| 55 | \ |
| 56 | __asm__ __volatile__ ( \ |
| 57 | "1: movli.l @%3, %0 ! atomic_fetch_" #op " \n" \ |
| 58 | " mov %0, %1 \n" \ |
| 59 | " " #op " %2, %0 \n" \ |
| 60 | " movco.l %0, @%3 \n" \ |
| 61 | " bf 1b \n" \ |
| 62 | " synco \n" \ |
Peter Zijlstra | 9bf6ffd | 2016-08-26 15:06:04 +0200 | [diff] [blame] | 63 | : "=&z" (temp), "=&r" (res) \ |
Peter Zijlstra | 7d9794e | 2016-04-18 01:16:04 +0200 | [diff] [blame] | 64 | : "r" (i), "r" (&v->counter) \ |
| 65 | : "t"); \ |
| 66 | \ |
| 67 | return res; \ |
| 68 | } |
| 69 | |
| 70 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) |
Peter Zijlstra | c647015 | 2014-03-26 18:12:45 +0100 | [diff] [blame] | 71 | |
| 72 | ATOMIC_OPS(add) |
| 73 | ATOMIC_OPS(sub) |
| 74 | |
| 75 | #undef ATOMIC_OPS |
Peter Zijlstra | 7d9794e | 2016-04-18 01:16:04 +0200 | [diff] [blame] | 76 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) |
| 77 | |
| 78 | ATOMIC_OPS(and) |
| 79 | ATOMIC_OPS(or) |
| 80 | ATOMIC_OPS(xor) |
| 81 | |
| 82 | #undef ATOMIC_OPS |
| 83 | #undef ATOMIC_FETCH_OP |
Peter Zijlstra | c647015 | 2014-03-26 18:12:45 +0100 | [diff] [blame] | 84 | #undef ATOMIC_OP_RETURN |
| 85 | #undef ATOMIC_OP |
| 86 | |
Paul Mundt | ec723fbe | 2006-12-07 20:33:38 +0900 | [diff] [blame] | 87 | #endif /* __ASM_SH_ATOMIC_LLSC_H */ |