Ingo Molnar | b8aa036 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Assembly implementation of the mutex fastpath, based on atomic |
| 3 | * decrement/increment. |
| 4 | * |
| 5 | * started by Ingo Molnar: |
| 6 | * |
| 7 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 8 | */ |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 9 | #ifndef _ASM_X86_MUTEX_64_H |
| 10 | #define _ASM_X86_MUTEX_64_H |
Ingo Molnar | b8aa036 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 11 | |
| 12 | /** |
| 13 | * __mutex_fastpath_lock - decrement and call function if negative |
| 14 | * @v: pointer of type atomic_t |
| 15 | * @fail_fn: function to call if the result is negative |
| 16 | * |
| 17 | * Atomically decrements @v and calls <fail_fn> if the result is negative. |
| 18 | */ |
Wedson Almeida Filho | 00e55a7 | 2013-06-28 04:50:45 -0700 | [diff] [blame] | 19 | #ifdef CC_HAVE_ASM_GOTO |
| 20 | static inline void __mutex_fastpath_lock(atomic_t *v, |
| 21 | void (*fail_fn)(atomic_t *)) |
| 22 | { |
Ingo Molnar | 3f0116c | 2013-10-10 10:16:30 +0200 | [diff] [blame] | 23 | asm_volatile_goto(LOCK_PREFIX " decl %0\n" |
Wedson Almeida Filho | 00e55a7 | 2013-06-28 04:50:45 -0700 | [diff] [blame] | 24 | " jns %l[exit]\n" |
| 25 | : : "m" (v->counter) |
| 26 | : "memory", "cc" |
| 27 | : exit); |
| 28 | fail_fn(v); |
| 29 | exit: |
| 30 | return; |
| 31 | } |
| 32 | #else |
Joe Perches | 2c4e883 | 2008-03-23 01:02:54 -0700 | [diff] [blame] | 33 | #define __mutex_fastpath_lock(v, fail_fn) \ |
| 34 | do { \ |
| 35 | unsigned long dummy; \ |
| 36 | \ |
| 37 | typecheck(atomic_t *, v); \ |
| 38 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
| 39 | \ |
| 40 | asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \ |
| 41 | " jns 1f \n" \ |
| 42 | " call " #fail_fn "\n" \ |
| 43 | "1:" \ |
| 44 | : "=D" (dummy) \ |
| 45 | : "D" (v) \ |
| 46 | : "rax", "rsi", "rdx", "rcx", \ |
| 47 | "r8", "r9", "r10", "r11", "memory"); \ |
Ingo Molnar | b8aa036 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 48 | } while (0) |
Wedson Almeida Filho | 00e55a7 | 2013-06-28 04:50:45 -0700 | [diff] [blame] | 49 | #endif |
Ingo Molnar | b8aa036 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 50 | |
| 51 | /** |
| 52 | * __mutex_fastpath_lock_retval - try to take the lock by moving the count |
| 53 | * from 1 to a 0 value |
| 54 | * @count: pointer of type atomic_t |
Ingo Molnar | b8aa036 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 55 | * |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 56 | * Change the count from 1 to a value lower than 1. This function returns 0 |
| 57 | * if the fastpath succeeds, or -1 otherwise. |
Ingo Molnar | b8aa036 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 58 | */ |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 59 | static inline int __mutex_fastpath_lock_retval(atomic_t *count) |
Ingo Molnar | b8aa036 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 60 | { |
| 61 | if (unlikely(atomic_dec_return(count) < 0)) |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 62 | return -1; |
Ingo Molnar | b8aa036 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 63 | else |
| 64 | return 0; |
| 65 | } |
| 66 | |
| 67 | /** |
| 68 | * __mutex_fastpath_unlock - increment and call function if nonpositive |
| 69 | * @v: pointer of type atomic_t |
| 70 | * @fail_fn: function to call if the result is nonpositive |
| 71 | * |
| 72 | * Atomically increments @v and calls <fail_fn> if the result is nonpositive. |
| 73 | */ |
Wedson Almeida Filho | 00e55a7 | 2013-06-28 04:50:45 -0700 | [diff] [blame] | 74 | #ifdef CC_HAVE_ASM_GOTO |
| 75 | static inline void __mutex_fastpath_unlock(atomic_t *v, |
| 76 | void (*fail_fn)(atomic_t *)) |
| 77 | { |
Ingo Molnar | 3f0116c | 2013-10-10 10:16:30 +0200 | [diff] [blame] | 78 | asm_volatile_goto(LOCK_PREFIX " incl %0\n" |
Wedson Almeida Filho | 00e55a7 | 2013-06-28 04:50:45 -0700 | [diff] [blame] | 79 | " jg %l[exit]\n" |
| 80 | : : "m" (v->counter) |
| 81 | : "memory", "cc" |
| 82 | : exit); |
| 83 | fail_fn(v); |
| 84 | exit: |
| 85 | return; |
| 86 | } |
| 87 | #else |
Joe Perches | 2c4e883 | 2008-03-23 01:02:54 -0700 | [diff] [blame] | 88 | #define __mutex_fastpath_unlock(v, fail_fn) \ |
| 89 | do { \ |
| 90 | unsigned long dummy; \ |
| 91 | \ |
| 92 | typecheck(atomic_t *, v); \ |
| 93 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
| 94 | \ |
| 95 | asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \ |
| 96 | " jg 1f\n" \ |
| 97 | " call " #fail_fn "\n" \ |
| 98 | "1:" \ |
| 99 | : "=D" (dummy) \ |
| 100 | : "D" (v) \ |
| 101 | : "rax", "rsi", "rdx", "rcx", \ |
| 102 | "r8", "r9", "r10", "r11", "memory"); \ |
Ingo Molnar | b8aa036 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 103 | } while (0) |
Wedson Almeida Filho | 00e55a7 | 2013-06-28 04:50:45 -0700 | [diff] [blame] | 104 | #endif |
Ingo Molnar | b8aa036 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 105 | |
| 106 | #define __mutex_slowpath_needs_to_unlock() 1 |
| 107 | |
| 108 | /** |
| 109 | * __mutex_fastpath_trylock - try to acquire the mutex, without waiting |
| 110 | * |
| 111 | * @count: pointer of type atomic_t |
| 112 | * @fail_fn: fallback function |
| 113 | * |
| 114 | * Change the count from 1 to 0 and return 1 (success), or return 0 (failure) |
| 115 | * if it wasn't 1 originally. [the fallback function is never used on |
| 116 | * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.] |
| 117 | */ |
Joe Perches | 2c4e883 | 2008-03-23 01:02:54 -0700 | [diff] [blame] | 118 | static inline int __mutex_fastpath_trylock(atomic_t *count, |
| 119 | int (*fail_fn)(atomic_t *)) |
Ingo Molnar | b8aa036 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 120 | { |
Linus Torvalds | 4cec873 | 2006-01-11 15:50:47 -0800 | [diff] [blame] | 121 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) |
Ingo Molnar | b8aa036 | 2006-01-09 15:59:18 -0800 | [diff] [blame] | 122 | return 1; |
| 123 | else |
| 124 | return 0; |
| 125 | } |
| 126 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 127 | #endif /* _ASM_X86_MUTEX_64_H */ |