blob: 8ab57cce761ad26fef3464efa35066b238d376d9 [file] [log] [blame]
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07001/*
2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
3 * Released under the General Public License (GPL).
4 *
5 * This file contains the spinlock/rwlock implementations for
6 * DEBUG_SPINLOCK.
7 */
8
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07009#include <linux/spinlock.h>
Andrew Mortonbb81a092006-12-07 02:14:01 +010010#include <linux/nmi.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070011#include <linux/interrupt.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070012#include <linux/debug_locks.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070013#include <linux/delay.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050014#include <linux/export.h>
Rohit Vaswani173de7d2014-10-02 21:02:56 -070015#include <linux/bug.h>
16#include <soc/qcom/watchdog.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070017
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010018void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
19 struct lock_class_key *key)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070020{
21#ifdef CONFIG_DEBUG_LOCK_ALLOC
22 /*
23 * Make sure we are not reinitializing a held lock:
24 */
25 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -040026 lockdep_init_map(&lock->dep_map, name, key, 0);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070027#endif
Thomas Gleixneredc35bd2009-12-03 12:38:57 +010028 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070029 lock->magic = SPINLOCK_MAGIC;
30 lock->owner = SPINLOCK_OWNER_INIT;
31 lock->owner_cpu = -1;
32}
33
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010034EXPORT_SYMBOL(__raw_spin_lock_init);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070035
36void __rwlock_init(rwlock_t *lock, const char *name,
37 struct lock_class_key *key)
38{
39#ifdef CONFIG_DEBUG_LOCK_ALLOC
40 /*
41 * Make sure we are not reinitializing a held lock:
42 */
43 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -040044 lockdep_init_map(&lock->dep_map, name, key, 0);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070045#endif
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +010046 lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070047 lock->magic = RWLOCK_MAGIC;
48 lock->owner = SPINLOCK_OWNER_INIT;
49 lock->owner_cpu = -1;
50}
51
52EXPORT_SYMBOL(__rwlock_init);
53
Akinobu Mita4e101b02011-10-31 17:12:29 -070054static void spin_dump(raw_spinlock_t *lock, const char *msg)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070055{
Marco Elverc0911022019-11-20 16:57:15 +010056 struct task_struct *owner = READ_ONCE(lock->owner);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070057
Marco Elverc0911022019-11-20 16:57:15 +010058 if (owner == SPINLOCK_OWNER_INIT)
59 owner = NULL;
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070060 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
61 msg, raw_smp_processor_id(),
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -070062 current->comm, task_pid_nr(current));
Stephen Boyd4b068142012-07-30 14:41:11 -070063 printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070064 ".owner_cpu: %d\n",
Marco Elverc0911022019-11-20 16:57:15 +010065 lock, READ_ONCE(lock->magic),
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070066 owner ? owner->comm : "<none>",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -070067 owner ? task_pid_nr(owner) : -1,
Marco Elverc0911022019-11-20 16:57:15 +010068 READ_ONCE(lock->owner_cpu));
Rohit Vaswani173de7d2014-10-02 21:02:56 -070069#ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG
70 msm_trigger_wdog_bite();
71#elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG)
72 BUG();
73#endif
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070074 dump_stack();
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070075}
76
Akinobu Mita4e101b02011-10-31 17:12:29 -070077static void spin_bug(raw_spinlock_t *lock, const char *msg)
78{
79 if (!debug_locks_off())
80 return;
81
82 spin_dump(lock, msg);
83}
84
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070085#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
86
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070087static inline void
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010088debug_spin_lock_before(raw_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070089{
Marco Elverc0911022019-11-20 16:57:15 +010090 SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
91 SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
92 SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070093 lock, "cpu recursion");
94}
95
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010096static inline void debug_spin_lock_after(raw_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070097{
Marco Elverc0911022019-11-20 16:57:15 +010098 WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
99 WRITE_ONCE(lock->owner, current);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700100}
101
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100102static inline void debug_spin_unlock(raw_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700103{
104 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100105 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700106 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
107 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
108 lock, "wrong CPU");
Marco Elverc0911022019-11-20 16:57:15 +0100109 WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
110 WRITE_ONCE(lock->owner_cpu, -1);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700111}
112
Waiman Long60894cb2017-02-08 14:46:48 -0500113/*
114 * We are now relying on the NMI watchdog to detect lockup instead of doing
115 * the detection here with an unfair lock which can cause problem of its own.
116 */
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100117void do_raw_spin_lock(raw_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700118{
119 debug_spin_lock_before(lock);
Waiman Long60894cb2017-02-08 14:46:48 -0500120 arch_spin_lock(&lock->raw_lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700121 debug_spin_lock_after(lock);
122}
123
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100124int do_raw_spin_trylock(raw_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700125{
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100126 int ret = arch_spin_trylock(&lock->raw_lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700127
128 if (ret)
129 debug_spin_lock_after(lock);
130#ifndef CONFIG_SMP
131 /*
132 * Must not happen on UP:
133 */
134 SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
135#endif
136 return ret;
137}
138
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100139void do_raw_spin_unlock(raw_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700140{
141 debug_spin_unlock(lock);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100142 arch_spin_unlock(&lock->raw_lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700143}
144
145static void rwlock_bug(rwlock_t *lock, const char *msg)
146{
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700147 if (!debug_locks_off())
148 return;
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700149
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700150 printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
151 msg, raw_smp_processor_id(), current->comm,
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700152 task_pid_nr(current), lock);
Prasad Sodagudi6a6a26f2015-04-11 06:06:26 +0530153#ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG
154 msm_trigger_wdog_bite();
155#elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG)
156 BUG();
157#endif
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700158 dump_stack();
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700159}
160
161#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
162
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100163void do_raw_read_lock(rwlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700164{
165 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
Thomas Gleixnere5931942009-12-03 20:08:46 +0100166 arch_read_lock(&lock->raw_lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700167}
168
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100169int do_raw_read_trylock(rwlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700170{
Thomas Gleixnere5931942009-12-03 20:08:46 +0100171 int ret = arch_read_trylock(&lock->raw_lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700172
173#ifndef CONFIG_SMP
174 /*
175 * Must not happen on UP:
176 */
177 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
178#endif
179 return ret;
180}
181
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100182void do_raw_read_unlock(rwlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700183{
184 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
Thomas Gleixnere5931942009-12-03 20:08:46 +0100185 arch_read_unlock(&lock->raw_lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700186}
187
188static inline void debug_write_lock_before(rwlock_t *lock)
189{
190 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
191 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
192 RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
193 lock, "cpu recursion");
194}
195
196static inline void debug_write_lock_after(rwlock_t *lock)
197{
Marco Elverc0911022019-11-20 16:57:15 +0100198 WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
199 WRITE_ONCE(lock->owner, current);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700200}
201
202static inline void debug_write_unlock(rwlock_t *lock)
203{
204 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
205 RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
206 RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
207 lock, "wrong CPU");
Marco Elverc0911022019-11-20 16:57:15 +0100208 WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
209 WRITE_ONCE(lock->owner_cpu, -1);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700210}
211
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100212void do_raw_write_lock(rwlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700213{
214 debug_write_lock_before(lock);
Thomas Gleixnere5931942009-12-03 20:08:46 +0100215 arch_write_lock(&lock->raw_lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700216 debug_write_lock_after(lock);
217}
218
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100219int do_raw_write_trylock(rwlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700220{
Thomas Gleixnere5931942009-12-03 20:08:46 +0100221 int ret = arch_write_trylock(&lock->raw_lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700222
223 if (ret)
224 debug_write_lock_after(lock);
225#ifndef CONFIG_SMP
226 /*
227 * Must not happen on UP:
228 */
229 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
230#endif
231 return ret;
232}
233
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100234void do_raw_write_unlock(rwlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700235{
236 debug_write_unlock(lock);
Thomas Gleixnere5931942009-12-03 20:08:46 +0100237 arch_write_unlock(&lock->raw_lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700238}