blob: fd3f17ef94a04e4d2efde1f92db298728d74455e [file] [log] [blame]
Nicolas Pitre823d0f42006-01-09 15:59:18 -08001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/mutex.h
Nicolas Pitre823d0f42006-01-09 15:59:18 -08003 *
4 * ARM optimized mutex locking primitives
5 *
6 * Please look into asm-generic/mutex-xchg.h for a formal definition.
7 */
8#ifndef _ASM_MUTEX_H
9#define _ASM_MUTEX_H
10
11#if __LINUX_ARM_ARCH__ < 6
12/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
13# include <asm-generic/mutex-xchg.h>
14#else
15
16/*
17 * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
18 * atomic decrement (it is not a reliable atomic decrement but it satisfies
19 * the defined semantics for our purpose, while being smaller and faster
20 * than a real atomic decrement or atomic swap. The idea is to attempt
21 * decrementing the lock value only once. If once decremented it isn't zero,
22 * or if its store-back fails due to a dispute on the exclusive store, we
23 * simply bail out immediately through the slow path where the lock will be
24 * reattempted until it succeeds.
25 */
Nicolas Pitre365bf8a2006-02-08 21:19:38 +000026static inline void
Harvey Harrisonedde08f2008-02-08 04:19:57 -080027__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
Nicolas Pitre365bf8a2006-02-08 21:19:38 +000028{
29 int __ex_flag, __res;
Nicolas Pitre823d0f42006-01-09 15:59:18 -080030
Nicolas Pitre365bf8a2006-02-08 21:19:38 +000031 __asm__ (
32
33 "ldrex %0, [%2] \n\t"
34 "sub %0, %0, #1 \n\t"
35 "strex %1, %0, [%2] "
36
37 : "=&r" (__res), "=&r" (__ex_flag)
38 : "r" (&(count)->counter)
39 : "cc","memory" );
40
41 __res |= __ex_flag;
42 if (unlikely(__res != 0))
43 fail_fn(count);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044 else
45 smp_rmb();
Nicolas Pitre365bf8a2006-02-08 21:19:38 +000046}
47
48static inline int
Harvey Harrisonedde08f2008-02-08 04:19:57 -080049__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
Nicolas Pitre365bf8a2006-02-08 21:19:38 +000050{
51 int __ex_flag, __res;
52
53 __asm__ (
54
55 "ldrex %0, [%2] \n\t"
56 "sub %0, %0, #1 \n\t"
57 "strex %1, %0, [%2] "
58
59 : "=&r" (__res), "=&r" (__ex_flag)
60 : "r" (&(count)->counter)
61 : "cc","memory" );
62
63 __res |= __ex_flag;
64 if (unlikely(__res != 0))
65 __res = fail_fn(count);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066 else
67 smp_rmb();
68
Nicolas Pitre365bf8a2006-02-08 21:19:38 +000069 return __res;
70}
Nicolas Pitre823d0f42006-01-09 15:59:18 -080071
72/*
73 * Same trick is used for the unlock fast path. However the original value,
74 * rather than the result, is used to test for success in order to have
75 * better generated assembly.
76 */
Nicolas Pitre365bf8a2006-02-08 21:19:38 +000077static inline void
Harvey Harrisonedde08f2008-02-08 04:19:57 -080078__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
Nicolas Pitre365bf8a2006-02-08 21:19:38 +000079{
80 int __ex_flag, __res, __orig;
81
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082 smp_wmb();
Nicolas Pitre365bf8a2006-02-08 21:19:38 +000083 __asm__ (
84
85 "ldrex %0, [%3] \n\t"
86 "add %1, %0, #1 \n\t"
87 "strex %2, %1, [%3] "
88
89 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
90 : "r" (&(count)->counter)
91 : "cc","memory" );
92
93 __orig |= __ex_flag;
94 if (unlikely(__orig != 0))
95 fail_fn(count);
96}
Nicolas Pitre823d0f42006-01-09 15:59:18 -080097
98/*
99 * If the unlock was done on a contended lock, or if the unlock simply fails
100 * then the mutex remains locked.
101 */
102#define __mutex_slowpath_needs_to_unlock() 1
103
104/*
105 * For __mutex_fastpath_trylock we use another construct which could be
106 * described as a "single value cmpxchg".
107 *
108 * This provides the needed trylock semantics like cmpxchg would, but it is
109 * lighter and less generic than a true cmpxchg implementation.
110 */
111static inline int
112__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
113{
114 int __ex_flag, __res, __orig;
115
116 __asm__ (
117
Nicolas Pitre365bf8a2006-02-08 21:19:38 +0000118 "1: ldrex %0, [%3] \n\t"
119 "subs %1, %0, #1 \n\t"
120 "strexeq %2, %1, [%3] \n\t"
121 "movlt %0, #0 \n\t"
122 "cmpeq %2, #0 \n\t"
123 "bgt 1b "
Nicolas Pitre823d0f42006-01-09 15:59:18 -0800124
125 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
126 : "r" (&count->counter)
127 : "cc", "memory" );
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128 if (__orig)
129 smp_rmb();
Nicolas Pitre823d0f42006-01-09 15:59:18 -0800130
131 return __orig;
132}
133
134#endif
135#endif