blob: e6061f2e64f28a6c87420ade2dba5381e6648ebb [file] [log] [blame]
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -07001/*
2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
4 */
5#include <linux/kernel_stat.h>
6#include <linux/spinlock.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07007#include <linux/debugfs.h>
8#include <linux/log2.h>
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -07009
10#include <asm/paravirt.h>
11
12#include <xen/interface/xen.h>
13#include <xen/events.h>
14
15#include "xen-ops.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070016#include "debugfs.h"
17
18#ifdef CONFIG_XEN_DEBUG_FS
19static struct xen_spinlock_stats
20{
21 u64 taken;
22 u32 taken_slow;
23 u32 taken_slow_nested;
24 u32 taken_slow_pickup;
25 u32 taken_slow_spurious;
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -070026 u32 taken_slow_irqenable;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070027
28 u64 released;
29 u32 released_slow;
30 u32 released_slow_kicked;
31
32#define HISTO_BUCKETS 20
33 u32 histo_spin_fast[HISTO_BUCKETS+1];
34 u32 histo_spin[HISTO_BUCKETS+1];
35
36 u64 spinning_time;
37 u64 total_time;
38} spinlock_stats;
39
40static u8 zero_stats;
41
42static unsigned lock_timeout = 1 << 10;
43#define TIMEOUT lock_timeout
44
45static inline void check_zero(void)
46{
47 if (unlikely(zero_stats)) {
48 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
49 zero_stats = 0;
50 }
51}
52
53#define ADD_STATS(elem, val) \
54 do { check_zero(); spinlock_stats.elem += (val); } while(0)
55
56static inline u64 spin_time_start(void)
57{
58 return xen_clocksource_read();
59}
60
61static void __spin_time_accum(u64 delta, u32 *array)
62{
63 unsigned index = ilog2(delta);
64
65 check_zero();
66
67 if (index < HISTO_BUCKETS)
68 array[index]++;
69 else
70 array[HISTO_BUCKETS]++;
71}
72
73static inline void spin_time_accum_fast(u64 start)
74{
75 u32 delta = xen_clocksource_read() - start;
76
77 __spin_time_accum(delta, spinlock_stats.histo_spin_fast);
78 spinlock_stats.spinning_time += delta;
79}
80
81static inline void spin_time_accum(u64 start)
82{
83 u32 delta = xen_clocksource_read() - start;
84
85 __spin_time_accum(delta, spinlock_stats.histo_spin);
86 spinlock_stats.total_time += delta;
87}
88#else /* !CONFIG_XEN_DEBUG_FS */
89#define TIMEOUT (1 << 10)
90#define ADD_STATS(elem, val) do { (void)(val); } while(0)
91
92static inline u64 spin_time_start(void)
93{
94 return 0;
95}
96
97static inline void spin_time_accum_fast(u64 start)
98{
99}
100static inline void spin_time_accum(u64 start)
101{
102}
103#endif /* CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700104
105struct xen_spinlock {
106 unsigned char lock; /* 0 -> free; 1 -> locked */
107 unsigned short spinners; /* count of waiting cpus */
108};
109
110static int xen_spin_is_locked(struct raw_spinlock *lock)
111{
112 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
113
114 return xl->lock != 0;
115}
116
117static int xen_spin_is_contended(struct raw_spinlock *lock)
118{
119 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
120
121 /* Not strictly true; this is only the count of contended
122 lock-takers entering the slow path. */
123 return xl->spinners != 0;
124}
125
126static int xen_spin_trylock(struct raw_spinlock *lock)
127{
128 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
129 u8 old = 1;
130
131 asm("xchgb %b0,%1"
132 : "+q" (old), "+m" (xl->lock) : : "memory");
133
134 return old == 0;
135}
136
137static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
138static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
139
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700140/*
141 * Mark a cpu as interested in a lock. Returns the CPU's previous
142 * lock of interest, in case we got preempted by an interrupt.
143 */
144static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700145{
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700146 struct xen_spinlock *prev;
147
148 prev = __get_cpu_var(lock_spinners);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700149 __get_cpu_var(lock_spinners) = xl;
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700150
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700151 wmb(); /* set lock of interest before count */
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700152
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700153 asm(LOCK_PREFIX " incw %0"
154 : "+m" (xl->spinners) : : "memory");
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700155
156 return prev;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700157}
158
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700159/*
160 * Mark a cpu as no longer interested in a lock. Restores previous
161 * lock of interest (NULL for none).
162 */
163static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700164{
165 asm(LOCK_PREFIX " decw %0"
166 : "+m" (xl->spinners) : : "memory");
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700167 wmb(); /* decrement count before restoring lock */
168 __get_cpu_var(lock_spinners) = prev;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700169}
170
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700171static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700172{
173 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700174 struct xen_spinlock *prev;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700175 int irq = __get_cpu_var(lock_kicker_irq);
176 int ret;
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700177 unsigned long flags;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700178
179 /* If kicker interrupts not initialized yet, just spin */
180 if (irq == -1)
181 return 0;
182
183 /* announce we're spinning */
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700184 prev = spinning_lock(xl);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700185
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700186 flags = __raw_local_save_flags();
187 if (irq_enable) {
188 ADD_STATS(taken_slow_irqenable, 1);
189 raw_local_irq_enable();
190 }
191
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700192 ADD_STATS(taken_slow, 1);
193 ADD_STATS(taken_slow_nested, prev != NULL);
194
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700195 do {
196 /* clear pending */
197 xen_clear_irq_pending(irq);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700198
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700199 /* check again make sure it didn't become free while
200 we weren't looking */
201 ret = xen_spin_trylock(lock);
202 if (ret) {
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700203 ADD_STATS(taken_slow_pickup, 1);
204
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700205 /*
206 * If we interrupted another spinlock while it
207 * was blocking, make sure it doesn't block
208 * without rechecking the lock.
209 */
210 if (prev != NULL)
211 xen_set_irq_pending(irq);
212 goto out;
213 }
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700214
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700215 /*
216 * Block until irq becomes pending. If we're
217 * interrupted at this point (after the trylock but
218 * before entering the block), then the nested lock
219 * handler guarantees that the irq will be left
220 * pending if there's any chance the lock became free;
221 * xen_poll_irq() returns immediately if the irq is
222 * pending.
223 */
224 xen_poll_irq(irq);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700225 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700226 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
227
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700228 kstat_this_cpu.irqs[irq]++;
229
230out:
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700231 raw_local_irq_restore(flags);
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700232 unspinning_lock(xl, prev);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700233 return ret;
234}
235
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700236static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700237{
238 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700239 unsigned timeout;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700240 u8 oldval;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700241 u64 start_spin;
242
243 ADD_STATS(taken, 1);
244
245 start_spin = spin_time_start();
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700246
247 do {
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700248 u64 start_spin_fast = spin_time_start();
249
250 timeout = TIMEOUT;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700251
252 asm("1: xchgb %1,%0\n"
253 " testb %1,%1\n"
254 " jz 3f\n"
255 "2: rep;nop\n"
256 " cmpb $0,%0\n"
257 " je 1b\n"
258 " dec %2\n"
259 " jnz 2b\n"
260 "3:\n"
261 : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
262 : "1" (1)
263 : "memory");
264
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700265 spin_time_accum_fast(start_spin_fast);
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700266
267 } while (unlikely(oldval != 0 &&
268 (TIMEOUT == ~0 || !xen_spin_lock_slow(lock, irq_enable))));
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700269
270 spin_time_accum(start_spin);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700271}
272
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700273static void xen_spin_lock(struct raw_spinlock *lock)
274{
275 __xen_spin_lock(lock, false);
276}
277
278static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
279{
280 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
281}
282
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700283static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
284{
285 int cpu;
286
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700287 ADD_STATS(released_slow, 1);
288
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700289 for_each_online_cpu(cpu) {
290 /* XXX should mix up next cpu selection */
291 if (per_cpu(lock_spinners, cpu) == xl) {
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700292 ADD_STATS(released_slow_kicked, 1);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700293 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
294 break;
295 }
296 }
297}
298
299static void xen_spin_unlock(struct raw_spinlock *lock)
300{
301 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
302
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700303 ADD_STATS(released, 1);
304
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700305 smp_wmb(); /* make sure no writes get moved after unlock */
306 xl->lock = 0; /* release lock */
307
308 /* make sure unlock happens before kick */
309 barrier();
310
311 if (unlikely(xl->spinners))
312 xen_spin_unlock_slow(xl);
313}
314
315static irqreturn_t dummy_handler(int irq, void *dev_id)
316{
317 BUG();
318 return IRQ_HANDLED;
319}
320
321void __cpuinit xen_init_lock_cpu(int cpu)
322{
323 int irq;
324 const char *name;
325
326 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
327 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
328 cpu,
329 dummy_handler,
330 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
331 name,
332 NULL);
333
334 if (irq >= 0) {
335 disable_irq(irq); /* make sure it's never delivered */
336 per_cpu(lock_kicker_irq, cpu) = irq;
337 }
338
339 printk("cpu %d spinlock event irq %d\n", cpu, irq);
340}
341
342void __init xen_init_spinlocks(void)
343{
344 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
345 pv_lock_ops.spin_is_contended = xen_spin_is_contended;
346 pv_lock_ops.spin_lock = xen_spin_lock;
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700347 pv_lock_ops.spin_lock_flags = xen_spin_lock_flags;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700348 pv_lock_ops.spin_trylock = xen_spin_trylock;
349 pv_lock_ops.spin_unlock = xen_spin_unlock;
350}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700351
352#ifdef CONFIG_XEN_DEBUG_FS
353
354static struct dentry *d_spin_debug;
355
356static int __init xen_spinlock_debugfs(void)
357{
358 struct dentry *d_xen = xen_init_debugfs();
359
360 if (d_xen == NULL)
361 return -ENOMEM;
362
363 d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
364
365 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
366
367 debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout);
368
369 debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken);
370 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
371 &spinlock_stats.taken_slow);
372 debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug,
373 &spinlock_stats.taken_slow_nested);
374 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
375 &spinlock_stats.taken_slow_pickup);
376 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
377 &spinlock_stats.taken_slow_spurious);
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700378 debugfs_create_u32("taken_slow_irqenable", 0444, d_spin_debug,
379 &spinlock_stats.taken_slow_irqenable);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700380
381 debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released);
382 debugfs_create_u32("released_slow", 0444, d_spin_debug,
383 &spinlock_stats.released_slow);
384 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
385 &spinlock_stats.released_slow_kicked);
386
387 debugfs_create_u64("time_spinning", 0444, d_spin_debug,
388 &spinlock_stats.spinning_time);
389 debugfs_create_u64("time_total", 0444, d_spin_debug,
390 &spinlock_stats.total_time);
391
392 xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
393 spinlock_stats.histo_spin, HISTO_BUCKETS + 1);
394 xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
395 spinlock_stats.histo_spin_fast, HISTO_BUCKETS + 1);
396
397 return 0;
398}
399fs_initcall(xen_spinlock_debugfs);
400
401#endif /* CONFIG_XEN_DEBUG_FS */