Arjan van de Ven | 2acbb8c | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1 | /* |
Nick Piggin | ae564c6 | 2008-11-11 17:54:15 +0000 | [diff] [blame] | 2 | * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm |
Arjan van de Ven | 2acbb8c | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 3 | */ |
Nick Piggin | ae564c6 | 2008-11-11 17:54:15 +0000 | [diff] [blame] | 4 | #ifndef _ASM_POWERPC_MUTEX_H |
| 5 | #define _ASM_POWERPC_MUTEX_H |
Arjan van de Ven | 2acbb8c | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 6 | |
Nick Piggin | ae564c6 | 2008-11-11 17:54:15 +0000 | [diff] [blame] | 7 | static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new) |
| 8 | { |
| 9 | int t; |
| 10 | |
| 11 | __asm__ __volatile__ ( |
| 12 | "1: lwarx %0,0,%1 # mutex trylock\n\ |
| 13 | cmpw 0,%0,%2\n\ |
| 14 | bne- 2f\n" |
| 15 | PPC405_ERR77(0,%1) |
| 16 | " stwcx. %3,0,%1\n\ |
| 17 | bne- 1b" |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 18 | PPC_ACQUIRE_BARRIER |
Nick Piggin | ae564c6 | 2008-11-11 17:54:15 +0000 | [diff] [blame] | 19 | "\n\ |
| 20 | 2:" |
| 21 | : "=&r" (t) |
| 22 | : "r" (&v->counter), "r" (old), "r" (new) |
| 23 | : "cc", "memory"); |
| 24 | |
| 25 | return t; |
| 26 | } |
| 27 | |
| 28 | static inline int __mutex_dec_return_lock(atomic_t *v) |
| 29 | { |
| 30 | int t; |
| 31 | |
| 32 | __asm__ __volatile__( |
| 33 | "1: lwarx %0,0,%1 # mutex lock\n\ |
| 34 | addic %0,%0,-1\n" |
| 35 | PPC405_ERR77(0,%1) |
| 36 | " stwcx. %0,0,%1\n\ |
| 37 | bne- 1b" |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 38 | PPC_ACQUIRE_BARRIER |
Nick Piggin | ae564c6 | 2008-11-11 17:54:15 +0000 | [diff] [blame] | 39 | : "=&r" (t) |
| 40 | : "r" (&v->counter) |
| 41 | : "cc", "memory"); |
| 42 | |
| 43 | return t; |
| 44 | } |
| 45 | |
| 46 | static inline int __mutex_inc_return_unlock(atomic_t *v) |
| 47 | { |
| 48 | int t; |
| 49 | |
| 50 | __asm__ __volatile__( |
Anton Blanchard | f10e2e5 | 2010-02-10 01:04:06 +0000 | [diff] [blame] | 51 | PPC_RELEASE_BARRIER |
Nick Piggin | ae564c6 | 2008-11-11 17:54:15 +0000 | [diff] [blame] | 52 | "1: lwarx %0,0,%1 # mutex unlock\n\ |
| 53 | addic %0,%0,1\n" |
| 54 | PPC405_ERR77(0,%1) |
| 55 | " stwcx. %0,0,%1 \n\ |
| 56 | bne- 1b" |
| 57 | : "=&r" (t) |
| 58 | : "r" (&v->counter) |
| 59 | : "cc", "memory"); |
| 60 | |
| 61 | return t; |
| 62 | } |
| 63 | |
| 64 | /** |
| 65 | * __mutex_fastpath_lock - try to take the lock by moving the count |
| 66 | * from 1 to a 0 value |
| 67 | * @count: pointer of type atomic_t |
| 68 | * @fail_fn: function to call if the original value was not 1 |
| 69 | * |
| 70 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if |
| 71 | * it wasn't 1 originally. This function MUST leave the value lower than |
| 72 | * 1 even when the "1" assertion wasn't true. |
| 73 | */ |
| 74 | static inline void |
| 75 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
| 76 | { |
| 77 | if (unlikely(__mutex_dec_return_lock(count) < 0)) |
| 78 | fail_fn(count); |
| 79 | } |
| 80 | |
| 81 | /** |
| 82 | * __mutex_fastpath_lock_retval - try to take the lock by moving the count |
| 83 | * from 1 to a 0 value |
| 84 | * @count: pointer of type atomic_t |
| 85 | * @fail_fn: function to call if the original value was not 1 |
| 86 | * |
| 87 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if |
| 88 | * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, |
| 89 | * or anything the slow path function returns. |
| 90 | */ |
| 91 | static inline int |
| 92 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) |
| 93 | { |
| 94 | if (unlikely(__mutex_dec_return_lock(count) < 0)) |
| 95 | return fail_fn(count); |
| 96 | return 0; |
| 97 | } |
| 98 | |
| 99 | /** |
| 100 | * __mutex_fastpath_unlock - try to promote the count from 0 to 1 |
| 101 | * @count: pointer of type atomic_t |
| 102 | * @fail_fn: function to call if the original value was not 0 |
| 103 | * |
| 104 | * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. |
| 105 | * In the failure case, this function is allowed to either set the value to |
| 106 | * 1, or to set it to a value lower than 1. |
| 107 | */ |
| 108 | static inline void |
| 109 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
| 110 | { |
| 111 | if (unlikely(__mutex_inc_return_unlock(count) <= 0)) |
| 112 | fail_fn(count); |
| 113 | } |
| 114 | |
| 115 | #define __mutex_slowpath_needs_to_unlock() 1 |
| 116 | |
| 117 | /** |
| 118 | * __mutex_fastpath_trylock - try to acquire the mutex, without waiting |
| 119 | * |
| 120 | * @count: pointer of type atomic_t |
| 121 | * @fail_fn: fallback function |
| 122 | * |
| 123 | * Change the count from 1 to 0, and return 1 (success), or if the count |
| 124 | * was not 1, then return 0 (failure). |
| 125 | */ |
| 126 | static inline int |
| 127 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) |
| 128 | { |
| 129 | if (likely(__mutex_cmpxchg_lock(count, 1, 0) == 1)) |
| 130 | return 1; |
| 131 | return 0; |
| 132 | } |
| 133 | |
| 134 | #endif |