Nicolas Pitre | 823d0f4 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 1 | /* |
Russell King | 4baa992 | 2008-08-02 10:55:55 +0100 | [diff] [blame] | 2 | * arch/arm/include/asm/mutex.h |
Nicolas Pitre | 823d0f4 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 3 | * |
| 4 | * ARM optimized mutex locking primitives |
| 5 | * |
| 6 | * Please look into asm-generic/mutex-xchg.h for a formal definition. |
| 7 | */ |
| 8 | #ifndef _ASM_MUTEX_H |
| 9 | #define _ASM_MUTEX_H |
| 10 | |
| 11 | #if __LINUX_ARM_ARCH__ < 6 |
| 12 | /* On pre-ARMv6 hardware the swp based implementation is the most efficient. */ |
| 13 | # include <asm-generic/mutex-xchg.h> |
| 14 | #else |
| 15 | |
| 16 | /* |
| 17 | * Attempting to lock a mutex on ARMv6+ can be done with a bastardized |
| 18 | * atomic decrement (it is not a reliable atomic decrement but it satisfies |
| 19 | * the defined semantics for our purpose, while being smaller and faster |
| 20 | * than a real atomic decrement or atomic swap. The idea is to attempt |
| 21 | * decrementing the lock value only once. If once decremented it isn't zero, |
| 22 | * or if its store-back fails due to a dispute on the exclusive store, we |
| 23 | * simply bail out immediately through the slow path where the lock will be |
| 24 | * reattempted until it succeeds. |
| 25 | */ |
Nicolas Pitre | 365bf8a | 2006-02-08 21:19:38 +0000 | [diff] [blame] | 26 | static inline void |
Harvey Harrison | edde08f | 2008-02-08 04:19:57 -0800 | [diff] [blame] | 27 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
Nicolas Pitre | 365bf8a | 2006-02-08 21:19:38 +0000 | [diff] [blame] | 28 | { |
| 29 | int __ex_flag, __res; |
Nicolas Pitre | 823d0f4 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 30 | |
Nicolas Pitre | 365bf8a | 2006-02-08 21:19:38 +0000 | [diff] [blame] | 31 | __asm__ ( |
| 32 | |
| 33 | "ldrex %0, [%2] \n\t" |
| 34 | "sub %0, %0, #1 \n\t" |
| 35 | "strex %1, %0, [%2] " |
| 36 | |
| 37 | : "=&r" (__res), "=&r" (__ex_flag) |
| 38 | : "r" (&(count)->counter) |
| 39 | : "cc","memory" ); |
| 40 | |
| 41 | __res |= __ex_flag; |
| 42 | if (unlikely(__res != 0)) |
| 43 | fail_fn(count); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 44 | else |
| 45 | smp_rmb(); |
Nicolas Pitre | 365bf8a | 2006-02-08 21:19:38 +0000 | [diff] [blame] | 46 | } |
| 47 | |
| 48 | static inline int |
Harvey Harrison | edde08f | 2008-02-08 04:19:57 -0800 | [diff] [blame] | 49 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) |
Nicolas Pitre | 365bf8a | 2006-02-08 21:19:38 +0000 | [diff] [blame] | 50 | { |
| 51 | int __ex_flag, __res; |
| 52 | |
| 53 | __asm__ ( |
| 54 | |
| 55 | "ldrex %0, [%2] \n\t" |
| 56 | "sub %0, %0, #1 \n\t" |
| 57 | "strex %1, %0, [%2] " |
| 58 | |
| 59 | : "=&r" (__res), "=&r" (__ex_flag) |
| 60 | : "r" (&(count)->counter) |
| 61 | : "cc","memory" ); |
| 62 | |
| 63 | __res |= __ex_flag; |
| 64 | if (unlikely(__res != 0)) |
| 65 | __res = fail_fn(count); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 66 | else |
| 67 | smp_rmb(); |
| 68 | |
Nicolas Pitre | 365bf8a | 2006-02-08 21:19:38 +0000 | [diff] [blame] | 69 | return __res; |
| 70 | } |
Nicolas Pitre | 823d0f4 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 71 | |
| 72 | /* |
| 73 | * Same trick is used for the unlock fast path. However the original value, |
| 74 | * rather than the result, is used to test for success in order to have |
| 75 | * better generated assembly. |
| 76 | */ |
Nicolas Pitre | 365bf8a | 2006-02-08 21:19:38 +0000 | [diff] [blame] | 77 | static inline void |
Harvey Harrison | edde08f | 2008-02-08 04:19:57 -0800 | [diff] [blame] | 78 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
Nicolas Pitre | 365bf8a | 2006-02-08 21:19:38 +0000 | [diff] [blame] | 79 | { |
| 80 | int __ex_flag, __res, __orig; |
| 81 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 82 | smp_wmb(); |
Nicolas Pitre | 365bf8a | 2006-02-08 21:19:38 +0000 | [diff] [blame] | 83 | __asm__ ( |
| 84 | |
| 85 | "ldrex %0, [%3] \n\t" |
| 86 | "add %1, %0, #1 \n\t" |
| 87 | "strex %2, %1, [%3] " |
| 88 | |
| 89 | : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) |
| 90 | : "r" (&(count)->counter) |
| 91 | : "cc","memory" ); |
| 92 | |
| 93 | __orig |= __ex_flag; |
| 94 | if (unlikely(__orig != 0)) |
| 95 | fail_fn(count); |
| 96 | } |
Nicolas Pitre | 823d0f4 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 97 | |
| 98 | /* |
| 99 | * If the unlock was done on a contended lock, or if the unlock simply fails |
| 100 | * then the mutex remains locked. |
| 101 | */ |
| 102 | #define __mutex_slowpath_needs_to_unlock() 1 |
| 103 | |
| 104 | /* |
| 105 | * For __mutex_fastpath_trylock we use another construct which could be |
| 106 | * described as a "single value cmpxchg". |
| 107 | * |
| 108 | * This provides the needed trylock semantics like cmpxchg would, but it is |
| 109 | * lighter and less generic than a true cmpxchg implementation. |
| 110 | */ |
| 111 | static inline int |
| 112 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) |
| 113 | { |
| 114 | int __ex_flag, __res, __orig; |
| 115 | |
| 116 | __asm__ ( |
| 117 | |
Nicolas Pitre | 365bf8a | 2006-02-08 21:19:38 +0000 | [diff] [blame] | 118 | "1: ldrex %0, [%3] \n\t" |
| 119 | "subs %1, %0, #1 \n\t" |
| 120 | "strexeq %2, %1, [%3] \n\t" |
| 121 | "movlt %0, #0 \n\t" |
| 122 | "cmpeq %2, #0 \n\t" |
| 123 | "bgt 1b " |
Nicolas Pitre | 823d0f4 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 124 | |
| 125 | : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) |
| 126 | : "r" (&count->counter) |
| 127 | : "cc", "memory" ); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 128 | if (__orig) |
| 129 | smp_rmb(); |
Nicolas Pitre | 823d0f4 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 130 | |
| 131 | return __orig; |
| 132 | } |
| 133 | |
| 134 | #endif |
| 135 | #endif |