blob: 3002ec1bb71a27d193ce4b1e041f8b3a3c5b49fa [file] [log] [blame]
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -07001/*
2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
4 */
5#include <linux/kernel_stat.h>
6#include <linux/spinlock.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07007#include <linux/debugfs.h>
8#include <linux/log2.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -070010
11#include <asm/paravirt.h>
12
13#include <xen/interface/xen.h>
14#include <xen/events.h>
15
16#include "xen-ops.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070017#include "debugfs.h"
18
19#ifdef CONFIG_XEN_DEBUG_FS
20static struct xen_spinlock_stats
21{
22 u64 taken;
23 u32 taken_slow;
24 u32 taken_slow_nested;
25 u32 taken_slow_pickup;
26 u32 taken_slow_spurious;
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -070027 u32 taken_slow_irqenable;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070028
29 u64 released;
30 u32 released_slow;
31 u32 released_slow_kicked;
32
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -070033#define HISTO_BUCKETS 30
34 u32 histo_spin_total[HISTO_BUCKETS+1];
35 u32 histo_spin_spinning[HISTO_BUCKETS+1];
36 u32 histo_spin_blocked[HISTO_BUCKETS+1];
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070037
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -070038 u64 time_total;
39 u64 time_spinning;
40 u64 time_blocked;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070041} spinlock_stats;
42
43static u8 zero_stats;
44
45static unsigned lock_timeout = 1 << 10;
46#define TIMEOUT lock_timeout
47
48static inline void check_zero(void)
49{
50 if (unlikely(zero_stats)) {
51 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
52 zero_stats = 0;
53 }
54}
55
56#define ADD_STATS(elem, val) \
57 do { check_zero(); spinlock_stats.elem += (val); } while(0)
58
59static inline u64 spin_time_start(void)
60{
61 return xen_clocksource_read();
62}
63
64static void __spin_time_accum(u64 delta, u32 *array)
65{
66 unsigned index = ilog2(delta);
67
68 check_zero();
69
70 if (index < HISTO_BUCKETS)
71 array[index]++;
72 else
73 array[HISTO_BUCKETS]++;
74}
75
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -070076static inline void spin_time_accum_spinning(u64 start)
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070077{
78 u32 delta = xen_clocksource_read() - start;
79
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -070080 __spin_time_accum(delta, spinlock_stats.histo_spin_spinning);
81 spinlock_stats.time_spinning += delta;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070082}
83
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -070084static inline void spin_time_accum_total(u64 start)
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070085{
86 u32 delta = xen_clocksource_read() - start;
87
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -070088 __spin_time_accum(delta, spinlock_stats.histo_spin_total);
89 spinlock_stats.time_total += delta;
90}
91
92static inline void spin_time_accum_blocked(u64 start)
93{
94 u32 delta = xen_clocksource_read() - start;
95
96 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
97 spinlock_stats.time_blocked += delta;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070098}
99#else /* !CONFIG_XEN_DEBUG_FS */
100#define TIMEOUT (1 << 10)
101#define ADD_STATS(elem, val) do { (void)(val); } while(0)
102
103static inline u64 spin_time_start(void)
104{
105 return 0;
106}
107
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700108static inline void spin_time_accum_total(u64 start)
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700109{
110}
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700111static inline void spin_time_accum_spinning(u64 start)
112{
113}
114static inline void spin_time_accum_blocked(u64 start)
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700115{
116}
117#endif /* CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700118
David Vrabel7a7546b2012-01-23 19:32:25 +0000119/*
120 * Size struct xen_spinlock so it's the same as arch_spinlock_t.
121 */
122#if NR_CPUS < 256
123typedef u8 xen_spinners_t;
124# define inc_spinners(xl) \
125 asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory");
126# define dec_spinners(xl) \
127 asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory");
128#else
129typedef u16 xen_spinners_t;
130# define inc_spinners(xl) \
131 asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory");
132# define dec_spinners(xl) \
133 asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
134#endif
135
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700136struct xen_spinlock {
137 unsigned char lock; /* 0 -> free; 1 -> locked */
David Vrabel7a7546b2012-01-23 19:32:25 +0000138 xen_spinners_t spinners; /* count of waiting cpus */
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700139};
140
Thomas Gleixner445c8952009-12-02 19:49:50 +0100141static int xen_spin_is_locked(struct arch_spinlock *lock)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700142{
143 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
144
145 return xl->lock != 0;
146}
147
Thomas Gleixner445c8952009-12-02 19:49:50 +0100148static int xen_spin_is_contended(struct arch_spinlock *lock)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700149{
150 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
151
152 /* Not strictly true; this is only the count of contended
153 lock-takers entering the slow path. */
154 return xl->spinners != 0;
155}
156
Thomas Gleixner445c8952009-12-02 19:49:50 +0100157static int xen_spin_trylock(struct arch_spinlock *lock)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700158{
159 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
160 u8 old = 1;
161
162 asm("xchgb %b0,%1"
163 : "+q" (old), "+m" (xl->lock) : : "memory");
164
165 return old == 0;
166}
167
168static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
169static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
170
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700171/*
172 * Mark a cpu as interested in a lock. Returns the CPU's previous
173 * lock of interest, in case we got preempted by an interrupt.
174 */
175static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700176{
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700177 struct xen_spinlock *prev;
178
Christoph Lameter780f36d2010-12-06 11:16:29 -0600179 prev = __this_cpu_read(lock_spinners);
180 __this_cpu_write(lock_spinners, xl);
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700181
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700182 wmb(); /* set lock of interest before count */
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700183
David Vrabel7a7546b2012-01-23 19:32:25 +0000184 inc_spinners(xl);
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700185
186 return prev;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700187}
188
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700189/*
190 * Mark a cpu as no longer interested in a lock. Restores previous
191 * lock of interest (NULL for none).
192 */
193static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700194{
David Vrabel7a7546b2012-01-23 19:32:25 +0000195 dec_spinners(xl);
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700196 wmb(); /* decrement count before restoring lock */
Christoph Lameter780f36d2010-12-06 11:16:29 -0600197 __this_cpu_write(lock_spinners, prev);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700198}
199
Thomas Gleixner445c8952009-12-02 19:49:50 +0100200static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700201{
202 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700203 struct xen_spinlock *prev;
Christoph Lameter780f36d2010-12-06 11:16:29 -0600204 int irq = __this_cpu_read(lock_kicker_irq);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700205 int ret;
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700206 u64 start;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700207
208 /* If kicker interrupts not initialized yet, just spin */
209 if (irq == -1)
210 return 0;
211
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700212 start = spin_time_start();
213
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700214 /* announce we're spinning */
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700215 prev = spinning_lock(xl);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700216
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700217 ADD_STATS(taken_slow, 1);
218 ADD_STATS(taken_slow_nested, prev != NULL);
219
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700220 do {
Jeremy Fitzhardinge4d576b52009-09-09 12:33:51 -0700221 unsigned long flags;
222
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700223 /* clear pending */
224 xen_clear_irq_pending(irq);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700225
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700226 /* check again make sure it didn't become free while
227 we weren't looking */
228 ret = xen_spin_trylock(lock);
229 if (ret) {
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700230 ADD_STATS(taken_slow_pickup, 1);
231
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700232 /*
233 * If we interrupted another spinlock while it
234 * was blocking, make sure it doesn't block
235 * without rechecking the lock.
236 */
237 if (prev != NULL)
238 xen_set_irq_pending(irq);
239 goto out;
240 }
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700241
David Howellsdf9ee292010-10-07 14:08:55 +0100242 flags = arch_local_save_flags();
Jeremy Fitzhardinge4d576b52009-09-09 12:33:51 -0700243 if (irq_enable) {
244 ADD_STATS(taken_slow_irqenable, 1);
245 raw_local_irq_enable();
246 }
247
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700248 /*
249 * Block until irq becomes pending. If we're
250 * interrupted at this point (after the trylock but
251 * before entering the block), then the nested lock
252 * handler guarantees that the irq will be left
253 * pending if there's any chance the lock became free;
254 * xen_poll_irq() returns immediately if the irq is
255 * pending.
256 */
257 xen_poll_irq(irq);
Jeremy Fitzhardinge4d576b52009-09-09 12:33:51 -0700258
259 raw_local_irq_restore(flags);
260
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700261 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700262 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
263
Thomas Gleixnerd6c88a52008-10-15 15:27:23 +0200264 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700265
266out:
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700267 unspinning_lock(xl, prev);
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700268 spin_time_accum_blocked(start);
269
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700270 return ret;
271}
272
Thomas Gleixner445c8952009-12-02 19:49:50 +0100273static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700274{
275 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700276 unsigned timeout;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700277 u8 oldval;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700278 u64 start_spin;
279
280 ADD_STATS(taken, 1);
281
282 start_spin = spin_time_start();
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700283
284 do {
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700285 u64 start_spin_fast = spin_time_start();
286
287 timeout = TIMEOUT;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700288
289 asm("1: xchgb %1,%0\n"
290 " testb %1,%1\n"
291 " jz 3f\n"
292 "2: rep;nop\n"
293 " cmpb $0,%0\n"
294 " je 1b\n"
295 " dec %2\n"
296 " jnz 2b\n"
297 "3:\n"
298 : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
299 : "1" (1)
300 : "memory");
301
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700302 spin_time_accum_spinning(start_spin_fast);
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700303
304 } while (unlikely(oldval != 0 &&
305 (TIMEOUT == ~0 || !xen_spin_lock_slow(lock, irq_enable))));
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700306
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700307 spin_time_accum_total(start_spin);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700308}
309
Thomas Gleixner445c8952009-12-02 19:49:50 +0100310static void xen_spin_lock(struct arch_spinlock *lock)
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700311{
312 __xen_spin_lock(lock, false);
313}
314
Thomas Gleixner445c8952009-12-02 19:49:50 +0100315static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700316{
317 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
318}
319
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700320static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
321{
322 int cpu;
323
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700324 ADD_STATS(released_slow, 1);
325
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700326 for_each_online_cpu(cpu) {
327 /* XXX should mix up next cpu selection */
328 if (per_cpu(lock_spinners, cpu) == xl) {
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700329 ADD_STATS(released_slow_kicked, 1);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700330 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700331 }
332 }
333}
334
Thomas Gleixner445c8952009-12-02 19:49:50 +0100335static void xen_spin_unlock(struct arch_spinlock *lock)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700336{
337 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
338
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700339 ADD_STATS(released, 1);
340
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700341 smp_wmb(); /* make sure no writes get moved after unlock */
342 xl->lock = 0; /* release lock */
343
Yang Xiaowei2496afb2009-09-09 12:44:52 -0700344 /*
345 * Make sure unlock happens before checking for waiting
346 * spinners. We need a strong barrier to enforce the
347 * write-read ordering to different memory locations, as the
348 * CPU makes no implied guarantees about their ordering.
349 */
350 mb();
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700351
352 if (unlikely(xl->spinners))
353 xen_spin_unlock_slow(xl);
354}
355
356static irqreturn_t dummy_handler(int irq, void *dev_id)
357{
358 BUG();
359 return IRQ_HANDLED;
360}
361
362void __cpuinit xen_init_lock_cpu(int cpu)
363{
364 int irq;
365 const char *name;
366
Konrad Rzeszutek Wilkcb91f8f2013-05-06 08:33:15 -0400367 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
Konrad Rzeszutek Wilkcb9c6f12013-04-16 14:33:20 -0400368 cpu, per_cpu(lock_kicker_irq, cpu));
369
Konrad Rzeszutek Wilk70dd4992013-04-16 14:34:45 -0400370 /*
371 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
372 * (xen: disable PV spinlocks on HVM)
373 */
374 if (xen_hvm_domain())
375 return;
376
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700377 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
378 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
379 cpu,
380 dummy_handler,
381 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
382 name,
383 NULL);
384
385 if (irq >= 0) {
386 disable_irq(irq); /* make sure it's never delivered */
387 per_cpu(lock_kicker_irq, cpu) = irq;
388 }
389
390 printk("cpu %d spinlock event irq %d\n", cpu, irq);
391}
392
Alex Nixond68d82a2008-08-22 11:52:15 +0100393void xen_uninit_lock_cpu(int cpu)
394{
Konrad Rzeszutek Wilk70dd4992013-04-16 14:34:45 -0400395 /*
396 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
397 * (xen: disable PV spinlocks on HVM)
398 */
399 if (xen_hvm_domain())
400 return;
401
Alex Nixond68d82a2008-08-22 11:52:15 +0100402 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
Konrad Rzeszutek Wilkcb9c6f12013-04-16 14:33:20 -0400403 per_cpu(lock_kicker_irq, cpu) = -1;
Alex Nixond68d82a2008-08-22 11:52:15 +0100404}
405
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700406void __init xen_init_spinlocks(void)
407{
Konrad Rzeszutek Wilk70dd4992013-04-16 14:34:45 -0400408 /*
409 * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
410 * (xen: disable PV spinlocks on HVM)
411 */
412 if (xen_hvm_domain())
413 return;
414
David Vrabel7a7546b2012-01-23 19:32:25 +0000415 BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
416
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700417 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
418 pv_lock_ops.spin_is_contended = xen_spin_is_contended;
419 pv_lock_ops.spin_lock = xen_spin_lock;
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700420 pv_lock_ops.spin_lock_flags = xen_spin_lock_flags;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700421 pv_lock_ops.spin_trylock = xen_spin_trylock;
422 pv_lock_ops.spin_unlock = xen_spin_unlock;
423}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700424
425#ifdef CONFIG_XEN_DEBUG_FS
426
427static struct dentry *d_spin_debug;
428
429static int __init xen_spinlock_debugfs(void)
430{
431 struct dentry *d_xen = xen_init_debugfs();
432
433 if (d_xen == NULL)
434 return -ENOMEM;
435
436 d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
437
438 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
439
440 debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout);
441
442 debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken);
443 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
444 &spinlock_stats.taken_slow);
445 debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug,
446 &spinlock_stats.taken_slow_nested);
447 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
448 &spinlock_stats.taken_slow_pickup);
449 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
450 &spinlock_stats.taken_slow_spurious);
Jeremy Fitzhardinge1e696f62008-08-20 17:02:20 -0700451 debugfs_create_u32("taken_slow_irqenable", 0444, d_spin_debug,
452 &spinlock_stats.taken_slow_irqenable);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700453
454 debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released);
455 debugfs_create_u32("released_slow", 0444, d_spin_debug,
456 &spinlock_stats.released_slow);
457 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
458 &spinlock_stats.released_slow_kicked);
459
460 debugfs_create_u64("time_spinning", 0444, d_spin_debug,
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700461 &spinlock_stats.time_spinning);
462 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
463 &spinlock_stats.time_blocked);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700464 debugfs_create_u64("time_total", 0444, d_spin_debug,
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700465 &spinlock_stats.time_total);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700466
Srivatsa Vaddagiri9fe2a702012-03-23 13:36:28 +0530467 debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
468 spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1);
469 debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
470 spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1);
471 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
472 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700473
474 return 0;
475}
476fs_initcall(xen_spinlock_debugfs);
477
478#endif /* CONFIG_XEN_DEBUG_FS */