blob: dafaf1de2491aedb0a323aed4951e5084e58e18c [file] [log] [blame]
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07001/*
2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
3 * Released under the General Public License (GPL).
4 *
5 * This file contains the spinlock/rwlock implementations for
6 * DEBUG_SPINLOCK.
7 */
8
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07009#include <linux/spinlock.h>
10#include <linux/interrupt.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070011#include <linux/debug_locks.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070012#include <linux/delay.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070013#include <linux/module.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070014
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070015void __spin_lock_init(spinlock_t *lock, const char *name,
16 struct lock_class_key *key)
17{
18#ifdef CONFIG_DEBUG_LOCK_ALLOC
19 /*
20 * Make sure we are not reinitializing a held lock:
21 */
22 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
23 lockdep_init_map(&lock->dep_map, name, key);
24#endif
25 lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
26 lock->magic = SPINLOCK_MAGIC;
27 lock->owner = SPINLOCK_OWNER_INIT;
28 lock->owner_cpu = -1;
29}
30
31EXPORT_SYMBOL(__spin_lock_init);
32
33void __rwlock_init(rwlock_t *lock, const char *name,
34 struct lock_class_key *key)
35{
36#ifdef CONFIG_DEBUG_LOCK_ALLOC
37 /*
38 * Make sure we are not reinitializing a held lock:
39 */
40 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
41 lockdep_init_map(&lock->dep_map, name, key);
42#endif
43 lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
44 lock->magic = RWLOCK_MAGIC;
45 lock->owner = SPINLOCK_OWNER_INIT;
46 lock->owner_cpu = -1;
47}
48
49EXPORT_SYMBOL(__rwlock_init);
50
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070051static void spin_bug(spinlock_t *lock, const char *msg)
52{
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070053 struct task_struct *owner = NULL;
54
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070055 if (!debug_locks_off())
56 return;
57
58 if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
59 owner = lock->owner;
60 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
61 msg, raw_smp_processor_id(),
62 current->comm, current->pid);
63 printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
64 ".owner_cpu: %d\n",
65 lock, lock->magic,
66 owner ? owner->comm : "<none>",
67 owner ? owner->pid : -1,
68 lock->owner_cpu);
69 dump_stack();
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070070}
71
72#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
73
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070074static inline void
75debug_spin_lock_before(spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070076{
77 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
78 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
79 SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
80 lock, "cpu recursion");
81}
82
83static inline void debug_spin_lock_after(spinlock_t *lock)
84{
85 lock->owner_cpu = raw_smp_processor_id();
86 lock->owner = current;
87}
88
89static inline void debug_spin_unlock(spinlock_t *lock)
90{
91 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
92 SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
93 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
94 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
95 lock, "wrong CPU");
96 lock->owner = SPINLOCK_OWNER_INIT;
97 lock->owner_cpu = -1;
98}
99
100static void __spin_lock_debug(spinlock_t *lock)
101{
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700102 u64 i;
Chuck Ebbertc22f0082006-09-29 01:59:14 -0700103 u64 loops = loops_per_jiffy * HZ;
104 int print_once = 1;
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700105
106 for (;;) {
Chuck Ebbertc22f0082006-09-29 01:59:14 -0700107 for (i = 0; i < loops; i++) {
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700108 if (__raw_spin_trylock(&lock->raw_lock))
109 return;
Ingo Molnare0a60292006-02-07 12:58:54 -0800110 __delay(1);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700111 }
112 /* lockup suspected: */
113 if (print_once) {
114 print_once = 0;
Dave Jones51989b92006-01-09 20:51:32 -0800115 printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
116 "%s/%d, %p\n",
Ingo Molnarbb44f112005-12-20 11:54:17 +0100117 raw_smp_processor_id(), current->comm,
118 current->pid, lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700119 dump_stack();
120 }
121 }
122}
123
124void _raw_spin_lock(spinlock_t *lock)
125{
126 debug_spin_lock_before(lock);
127 if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
128 __spin_lock_debug(lock);
129 debug_spin_lock_after(lock);
130}
131
132int _raw_spin_trylock(spinlock_t *lock)
133{
134 int ret = __raw_spin_trylock(&lock->raw_lock);
135
136 if (ret)
137 debug_spin_lock_after(lock);
138#ifndef CONFIG_SMP
139 /*
140 * Must not happen on UP:
141 */
142 SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
143#endif
144 return ret;
145}
146
147void _raw_spin_unlock(spinlock_t *lock)
148{
149 debug_spin_unlock(lock);
150 __raw_spin_unlock(&lock->raw_lock);
151}
152
153static void rwlock_bug(rwlock_t *lock, const char *msg)
154{
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700155 if (!debug_locks_off())
156 return;
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700157
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700158 printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
159 msg, raw_smp_processor_id(), current->comm,
160 current->pid, lock);
161 dump_stack();
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700162}
163
164#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
165
Andrew Morton72f0b4e2006-08-05 12:13:47 -0700166#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700167static void __read_lock_debug(rwlock_t *lock)
168{
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700169 u64 i;
Chuck Ebbertc22f0082006-09-29 01:59:14 -0700170 u64 loops = loops_per_jiffy * HZ;
171 int print_once = 1;
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700172
173 for (;;) {
Chuck Ebbertc22f0082006-09-29 01:59:14 -0700174 for (i = 0; i < loops; i++) {
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700175 if (__raw_read_trylock(&lock->raw_lock))
176 return;
Ingo Molnare0a60292006-02-07 12:58:54 -0800177 __delay(1);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700178 }
179 /* lockup suspected: */
180 if (print_once) {
181 print_once = 0;
Dave Jones51989b92006-01-09 20:51:32 -0800182 printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
183 "%s/%d, %p\n",
Ingo Molnarbb44f112005-12-20 11:54:17 +0100184 raw_smp_processor_id(), current->comm,
185 current->pid, lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700186 dump_stack();
187 }
188 }
189}
Andrew Morton72f0b4e2006-08-05 12:13:47 -0700190#endif
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700191
192void _raw_read_lock(rwlock_t *lock)
193{
194 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
Andrew Morton72f0b4e2006-08-05 12:13:47 -0700195 __raw_read_lock(&lock->raw_lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700196}
197
198int _raw_read_trylock(rwlock_t *lock)
199{
200 int ret = __raw_read_trylock(&lock->raw_lock);
201
202#ifndef CONFIG_SMP
203 /*
204 * Must not happen on UP:
205 */
206 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
207#endif
208 return ret;
209}
210
211void _raw_read_unlock(rwlock_t *lock)
212{
213 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
214 __raw_read_unlock(&lock->raw_lock);
215}
216
217static inline void debug_write_lock_before(rwlock_t *lock)
218{
219 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
220 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
221 RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
222 lock, "cpu recursion");
223}
224
225static inline void debug_write_lock_after(rwlock_t *lock)
226{
227 lock->owner_cpu = raw_smp_processor_id();
228 lock->owner = current;
229}
230
231static inline void debug_write_unlock(rwlock_t *lock)
232{
233 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
234 RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
235 RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
236 lock, "wrong CPU");
237 lock->owner = SPINLOCK_OWNER_INIT;
238 lock->owner_cpu = -1;
239}
240
Andrew Morton72f0b4e2006-08-05 12:13:47 -0700241#if 0 /* This can cause lockups */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700242static void __write_lock_debug(rwlock_t *lock)
243{
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700244 u64 i;
Chuck Ebbertc22f0082006-09-29 01:59:14 -0700245 u64 loops = loops_per_jiffy * HZ;
246 int print_once = 1;
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700247
248 for (;;) {
Chuck Ebbertc22f0082006-09-29 01:59:14 -0700249 for (i = 0; i < loops; i++) {
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700250 if (__raw_write_trylock(&lock->raw_lock))
251 return;
Ingo Molnare0a60292006-02-07 12:58:54 -0800252 __delay(1);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700253 }
254 /* lockup suspected: */
255 if (print_once) {
256 print_once = 0;
Dave Jones51989b92006-01-09 20:51:32 -0800257 printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
258 "%s/%d, %p\n",
Ingo Molnarbb44f112005-12-20 11:54:17 +0100259 raw_smp_processor_id(), current->comm,
260 current->pid, lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700261 dump_stack();
262 }
263 }
264}
Andrew Morton72f0b4e2006-08-05 12:13:47 -0700265#endif
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700266
267void _raw_write_lock(rwlock_t *lock)
268{
269 debug_write_lock_before(lock);
Andrew Morton72f0b4e2006-08-05 12:13:47 -0700270 __raw_write_lock(&lock->raw_lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700271 debug_write_lock_after(lock);
272}
273
274int _raw_write_trylock(rwlock_t *lock)
275{
276 int ret = __raw_write_trylock(&lock->raw_lock);
277
278 if (ret)
279 debug_write_lock_after(lock);
280#ifndef CONFIG_SMP
281 /*
282 * Must not happen on UP:
283 */
284 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
285#endif
286 return ret;
287}
288
289void _raw_write_unlock(rwlock_t *lock)
290{
291 debug_write_unlock(lock);
292 __raw_write_unlock(&lock->raw_lock);
293}