Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2005, Red Hat, Inc., Ingo Molnar |
| 3 | * Released under the General Public License (GPL). |
| 4 | * |
| 5 | * This file contains the spinlock/rwlock implementations for |
| 6 | * DEBUG_SPINLOCK. |
| 7 | */ |
| 8 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 9 | #include <linux/spinlock.h> |
Andrew Morton | bb81a09 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 10 | #include <linux/nmi.h> |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 11 | #include <linux/interrupt.h> |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 12 | #include <linux/debug_locks.h> |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 13 | #include <linux/delay.h> |
Paul Gortmaker | 8bc3bcc | 2011-11-16 21:29:17 -0500 | [diff] [blame] | 14 | #include <linux/export.h> |
Rohit Vaswani | 173de7d | 2014-10-02 21:02:56 -0700 | [diff] [blame] | 15 | #include <linux/bug.h> |
| 16 | #include <soc/qcom/watchdog.h> |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 17 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 18 | void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
| 19 | struct lock_class_key *key) |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 20 | { |
| 21 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 22 | /* |
| 23 | * Make sure we are not reinitializing a held lock: |
| 24 | */ |
| 25 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 26 | lockdep_init_map(&lock->dep_map, name, key, 0); |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 27 | #endif |
Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 28 | lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 29 | lock->magic = SPINLOCK_MAGIC; |
| 30 | lock->owner = SPINLOCK_OWNER_INIT; |
| 31 | lock->owner_cpu = -1; |
| 32 | } |
| 33 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 34 | EXPORT_SYMBOL(__raw_spin_lock_init); |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 35 | |
| 36 | void __rwlock_init(rwlock_t *lock, const char *name, |
| 37 | struct lock_class_key *key) |
| 38 | { |
| 39 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 40 | /* |
| 41 | * Make sure we are not reinitializing a held lock: |
| 42 | */ |
| 43 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
Peter Zijlstra | 4dfbb9d | 2006-10-11 01:45:14 -0400 | [diff] [blame] | 44 | lockdep_init_map(&lock->dep_map, name, key, 0); |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 45 | #endif |
Thomas Gleixner | fb3a6bb | 2009-12-03 20:01:19 +0100 | [diff] [blame] | 46 | lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 47 | lock->magic = RWLOCK_MAGIC; |
| 48 | lock->owner = SPINLOCK_OWNER_INIT; |
| 49 | lock->owner_cpu = -1; |
| 50 | } |
| 51 | |
| 52 | EXPORT_SYMBOL(__rwlock_init); |
| 53 | |
Akinobu Mita | 4e101b0 | 2011-10-31 17:12:29 -0700 | [diff] [blame] | 54 | static void spin_dump(raw_spinlock_t *lock, const char *msg) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 55 | { |
Marco Elver | c091102 | 2019-11-20 16:57:15 +0100 | [diff] [blame] | 56 | struct task_struct *owner = READ_ONCE(lock->owner); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 57 | |
Marco Elver | c091102 | 2019-11-20 16:57:15 +0100 | [diff] [blame] | 58 | if (owner == SPINLOCK_OWNER_INIT) |
| 59 | owner = NULL; |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 60 | printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", |
| 61 | msg, raw_smp_processor_id(), |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 62 | current->comm, task_pid_nr(current)); |
Stephen Boyd | 4b06814 | 2012-07-30 14:41:11 -0700 | [diff] [blame] | 63 | printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, " |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 64 | ".owner_cpu: %d\n", |
Marco Elver | c091102 | 2019-11-20 16:57:15 +0100 | [diff] [blame] | 65 | lock, READ_ONCE(lock->magic), |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 66 | owner ? owner->comm : "<none>", |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 67 | owner ? task_pid_nr(owner) : -1, |
Marco Elver | c091102 | 2019-11-20 16:57:15 +0100 | [diff] [blame] | 68 | READ_ONCE(lock->owner_cpu)); |
Rohit Vaswani | 173de7d | 2014-10-02 21:02:56 -0700 | [diff] [blame] | 69 | #ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG |
| 70 | msm_trigger_wdog_bite(); |
| 71 | #elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG) |
| 72 | BUG(); |
| 73 | #endif |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 74 | dump_stack(); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 75 | } |
| 76 | |
Akinobu Mita | 4e101b0 | 2011-10-31 17:12:29 -0700 | [diff] [blame] | 77 | static void spin_bug(raw_spinlock_t *lock, const char *msg) |
| 78 | { |
| 79 | if (!debug_locks_off()) |
| 80 | return; |
| 81 | |
| 82 | spin_dump(lock, msg); |
| 83 | } |
| 84 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 85 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) |
| 86 | |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 87 | static inline void |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 88 | debug_spin_lock_before(raw_spinlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 89 | { |
Marco Elver | c091102 | 2019-11-20 16:57:15 +0100 | [diff] [blame] | 90 | SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic"); |
| 91 | SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion"); |
| 92 | SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(), |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 93 | lock, "cpu recursion"); |
| 94 | } |
| 95 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 96 | static inline void debug_spin_lock_after(raw_spinlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 97 | { |
Marco Elver | c091102 | 2019-11-20 16:57:15 +0100 | [diff] [blame] | 98 | WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id()); |
| 99 | WRITE_ONCE(lock->owner, current); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 100 | } |
| 101 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 102 | static inline void debug_spin_unlock(raw_spinlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 103 | { |
| 104 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 105 | SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked"); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 106 | SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); |
| 107 | SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), |
| 108 | lock, "wrong CPU"); |
Marco Elver | c091102 | 2019-11-20 16:57:15 +0100 | [diff] [blame] | 109 | WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT); |
| 110 | WRITE_ONCE(lock->owner_cpu, -1); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 111 | } |
| 112 | |
Waiman Long | 60894cb | 2017-02-08 14:46:48 -0500 | [diff] [blame] | 113 | /* |
| 114 | * We are now relying on the NMI watchdog to detect lockup instead of doing |
| 115 | * the detection here with an unfair lock which can cause problem of its own. |
| 116 | */ |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 117 | void do_raw_spin_lock(raw_spinlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 118 | { |
| 119 | debug_spin_lock_before(lock); |
Waiman Long | 60894cb | 2017-02-08 14:46:48 -0500 | [diff] [blame] | 120 | arch_spin_lock(&lock->raw_lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 121 | debug_spin_lock_after(lock); |
| 122 | } |
| 123 | |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 124 | int do_raw_spin_trylock(raw_spinlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 125 | { |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 126 | int ret = arch_spin_trylock(&lock->raw_lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 127 | |
| 128 | if (ret) |
| 129 | debug_spin_lock_after(lock); |
| 130 | #ifndef CONFIG_SMP |
| 131 | /* |
| 132 | * Must not happen on UP: |
| 133 | */ |
| 134 | SPIN_BUG_ON(!ret, lock, "trylock failure on UP"); |
| 135 | #endif |
| 136 | return ret; |
| 137 | } |
| 138 | |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 139 | void do_raw_spin_unlock(raw_spinlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 140 | { |
| 141 | debug_spin_unlock(lock); |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 142 | arch_spin_unlock(&lock->raw_lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 143 | } |
| 144 | |
| 145 | static void rwlock_bug(rwlock_t *lock, const char *msg) |
| 146 | { |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 147 | if (!debug_locks_off()) |
| 148 | return; |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 149 | |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 150 | printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", |
| 151 | msg, raw_smp_processor_id(), current->comm, |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 152 | task_pid_nr(current), lock); |
Prasad Sodagudi | 6a6a26f | 2015-04-11 06:06:26 +0530 | [diff] [blame] | 153 | #ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG |
| 154 | msm_trigger_wdog_bite(); |
| 155 | #elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG) |
| 156 | BUG(); |
| 157 | #endif |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 158 | dump_stack(); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 159 | } |
| 160 | |
| 161 | #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) |
| 162 | |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 163 | void do_raw_read_lock(rwlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 164 | { |
| 165 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 166 | arch_read_lock(&lock->raw_lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 167 | } |
| 168 | |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 169 | int do_raw_read_trylock(rwlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 170 | { |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 171 | int ret = arch_read_trylock(&lock->raw_lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 172 | |
| 173 | #ifndef CONFIG_SMP |
| 174 | /* |
| 175 | * Must not happen on UP: |
| 176 | */ |
| 177 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); |
| 178 | #endif |
| 179 | return ret; |
| 180 | } |
| 181 | |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 182 | void do_raw_read_unlock(rwlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 183 | { |
| 184 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 185 | arch_read_unlock(&lock->raw_lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | static inline void debug_write_lock_before(rwlock_t *lock) |
| 189 | { |
| 190 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
| 191 | RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); |
| 192 | RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), |
| 193 | lock, "cpu recursion"); |
| 194 | } |
| 195 | |
| 196 | static inline void debug_write_lock_after(rwlock_t *lock) |
| 197 | { |
Marco Elver | c091102 | 2019-11-20 16:57:15 +0100 | [diff] [blame] | 198 | WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id()); |
| 199 | WRITE_ONCE(lock->owner, current); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 200 | } |
| 201 | |
| 202 | static inline void debug_write_unlock(rwlock_t *lock) |
| 203 | { |
| 204 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
| 205 | RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); |
| 206 | RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), |
| 207 | lock, "wrong CPU"); |
Marco Elver | c091102 | 2019-11-20 16:57:15 +0100 | [diff] [blame] | 208 | WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT); |
| 209 | WRITE_ONCE(lock->owner_cpu, -1); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 210 | } |
| 211 | |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 212 | void do_raw_write_lock(rwlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 213 | { |
| 214 | debug_write_lock_before(lock); |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 215 | arch_write_lock(&lock->raw_lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 216 | debug_write_lock_after(lock); |
| 217 | } |
| 218 | |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 219 | int do_raw_write_trylock(rwlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 220 | { |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 221 | int ret = arch_write_trylock(&lock->raw_lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 222 | |
| 223 | if (ret) |
| 224 | debug_write_lock_after(lock); |
| 225 | #ifndef CONFIG_SMP |
| 226 | /* |
| 227 | * Must not happen on UP: |
| 228 | */ |
| 229 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); |
| 230 | #endif |
| 231 | return ret; |
| 232 | } |
| 233 | |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 234 | void do_raw_write_unlock(rwlock_t *lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 235 | { |
| 236 | debug_write_unlock(lock); |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 237 | arch_write_unlock(&lock->raw_lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 238 | } |