blob: 9e2ba5c6e1dd7be4a0b10a70b315cf5f0f20c081 [file] [log] [blame]
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -07001/*
2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
4 */
5#include <linux/kernel_stat.h>
6#include <linux/spinlock.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07007#include <linux/debugfs.h>
8#include <linux/log2.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Konrad Rzeszutek Wilk354e7b72013-06-05 10:44:47 -040010#include <linux/slab.h>
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -070011
12#include <asm/paravirt.h>
13
14#include <xen/interface/xen.h>
15#include <xen/events.h>
16
17#include "xen-ops.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070018#include "debugfs.h"
19
David Vrabele95e6f12015-04-24 14:56:40 -040020static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
21static DEFINE_PER_CPU(char *, irq_name);
22static bool xen_pvspin = true;
23
Ingo Molnar62c7a1e2015-05-11 09:47:23 +020024#ifdef CONFIG_QUEUED_SPINLOCKS
David Vrabele95e6f12015-04-24 14:56:40 -040025
26#include <asm/qspinlock.h>
27
28static void xen_qlock_kick(int cpu)
29{
30 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
31}
32
33/*
34 * Halt the current CPU & release it back to the host
35 */
36static void xen_qlock_wait(u8 *byte, u8 val)
37{
38 int irq = __this_cpu_read(lock_kicker_irq);
39
40 /* If kicker interrupts not initialized yet, just spin */
41 if (irq == -1)
42 return;
43
44 /* clear pending */
45 xen_clear_irq_pending(irq);
46 barrier();
47
48 /*
49 * We check the byte value after clearing pending IRQ to make sure
50 * that we won't miss a wakeup event because of the clearing.
51 *
52 * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
53 * So it is effectively a memory barrier for x86.
54 */
55 if (READ_ONCE(*byte) != val)
56 return;
57
58 /*
59 * If an interrupt happens here, it will leave the wakeup irq
60 * pending, which will cause xen_poll_irq() to return
61 * immediately.
62 */
63
64 /* Block until irq becomes pending (or perhaps a spurious wakeup) */
65 xen_poll_irq(irq);
66}
67
Ingo Molnar62c7a1e2015-05-11 09:47:23 +020068#else /* CONFIG_QUEUED_SPINLOCKS */
David Vrabele95e6f12015-04-24 14:56:40 -040069
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +053070enum xen_contention_stat {
71 TAKEN_SLOW,
72 TAKEN_SLOW_PICKUP,
73 TAKEN_SLOW_SPURIOUS,
74 RELEASED_SLOW,
75 RELEASED_SLOW_KICKED,
76 NR_CONTENTION_STATS
77};
78
79
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070080#ifdef CONFIG_XEN_DEBUG_FS
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +053081#define HISTO_BUCKETS 30
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070082static struct xen_spinlock_stats
83{
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +053084 u32 contention_stats[NR_CONTENTION_STATS];
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -070085 u32 histo_spin_blocked[HISTO_BUCKETS+1];
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -070086 u64 time_blocked;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070087} spinlock_stats;
88
89static u8 zero_stats;
90
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070091static inline void check_zero(void)
92{
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +053093 u8 ret;
Raghavendra K Td6abfdb2015-02-06 16:44:11 +053094 u8 old = READ_ONCE(zero_stats);
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +053095 if (unlikely(old)) {
96 ret = cmpxchg(&zero_stats, old, 0);
97 /* This ensures only one fellow resets the stat */
98 if (ret == old)
99 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700100 }
101}
102
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530103static inline void add_stats(enum xen_contention_stat var, u32 val)
104{
105 check_zero();
106 spinlock_stats.contention_stats[var] += val;
107}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700108
109static inline u64 spin_time_start(void)
110{
111 return xen_clocksource_read();
112}
113
114static void __spin_time_accum(u64 delta, u32 *array)
115{
116 unsigned index = ilog2(delta);
117
118 check_zero();
119
120 if (index < HISTO_BUCKETS)
121 array[index]++;
122 else
123 array[HISTO_BUCKETS]++;
124}
125
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700126static inline void spin_time_accum_blocked(u64 start)
127{
128 u32 delta = xen_clocksource_read() - start;
129
130 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
131 spinlock_stats.time_blocked += delta;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700132}
133#else /* !CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530134static inline void add_stats(enum xen_contention_stat var, u32 val)
135{
136}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700137
138static inline u64 spin_time_start(void)
139{
140 return 0;
141}
142
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700143static inline void spin_time_accum_blocked(u64 start)
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700144{
145}
146#endif /* CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700147
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530148struct xen_lock_waiting {
149 struct arch_spinlock *lock;
150 __ticket_t want;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700151};
152
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530153static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
154static cpumask_t waiting_cpus;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700155
Andi Kleendd41f812013-10-22 09:07:58 -0700156__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700157{
Christoph Lameter780f36d2010-12-06 11:16:29 -0600158 int irq = __this_cpu_read(lock_kicker_irq);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500159 struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530160 int cpu = smp_processor_id();
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700161 u64 start;
Raghavendra K Td6abfdb2015-02-06 16:44:11 +0530162 __ticket_t head;
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530163 unsigned long flags;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700164
165 /* If kicker interrupts not initialized yet, just spin */
166 if (irq == -1)
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530167 return;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700168
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700169 start = spin_time_start();
170
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530171 /*
172 * Make sure an interrupt handler can't upset things in a
173 * partially setup state.
174 */
175 local_irq_save(flags);
Jeremy Fitzhardinge1ed7bf52013-08-09 19:51:59 +0530176 /*
177 * We don't really care if we're overwriting some other
178 * (lock,want) pair, as that would mean that we're currently
179 * in an interrupt context, and the outer context had
180 * interrupts enabled. That has already kicked the VCPU out
181 * of xen_poll_irq(), so it will just return spuriously and
182 * retry with newly setup (lock,want).
183 *
184 * The ordering protocol on this is that the "lock" pointer
185 * may only be set non-NULL if the "want" ticket is correct.
186 * If we're updating "want", we must first clear "lock".
187 */
188 w->lock = NULL;
189 smp_wmb();
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530190 w->want = want;
191 smp_wmb();
192 w->lock = lock;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700193
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530194 /* This uses set_bit, which atomic and therefore a barrier */
195 cpumask_set_cpu(cpu, &waiting_cpus);
196 add_stats(TAKEN_SLOW, 1);
Jeremy Fitzhardinge4d576b52009-09-09 12:33:51 -0700197
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530198 /* clear pending */
199 xen_clear_irq_pending(irq);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700200
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530201 /* Only check lock once pending cleared */
202 barrier();
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700203
Jeremy Fitzhardinge1ed7bf52013-08-09 19:51:59 +0530204 /*
205 * Mark entry to slowpath before doing the pickup test to make
206 * sure we don't deadlock with an unlocker.
207 */
Jeremy Fitzhardinge96f853e2013-08-09 19:51:58 +0530208 __ticket_enter_slowpath(lock);
209
Raghavendra K Td6abfdb2015-02-06 16:44:11 +0530210 /* make sure enter_slowpath, which is atomic does not cross the read */
211 smp_mb__after_atomic();
212
Jeremy Fitzhardinge1ed7bf52013-08-09 19:51:59 +0530213 /*
214 * check again make sure it didn't become free while
215 * we weren't looking
216 */
Raghavendra K Td6abfdb2015-02-06 16:44:11 +0530217 head = READ_ONCE(lock->tickets.head);
218 if (__tickets_equal(head, want)) {
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530219 add_stats(TAKEN_SLOW_PICKUP, 1);
220 goto out;
221 }
Jeremy Fitzhardinge1ed7bf52013-08-09 19:51:59 +0530222
223 /* Allow interrupts while blocked */
224 local_irq_restore(flags);
225
226 /*
227 * If an interrupt happens here, it will leave the wakeup irq
228 * pending, which will cause xen_poll_irq() to return
229 * immediately.
230 */
231
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530232 /* Block until irq becomes pending (or perhaps a spurious wakeup) */
233 xen_poll_irq(irq);
234 add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
Jeremy Fitzhardinge1ed7bf52013-08-09 19:51:59 +0530235
236 local_irq_save(flags);
237
Thomas Gleixner770144e2014-02-23 21:40:16 +0000238 kstat_incr_irq_this_cpu(irq);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700239out:
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530240 cpumask_clear_cpu(cpu, &waiting_cpus);
241 w->lock = NULL;
Jeremy Fitzhardinge1ed7bf52013-08-09 19:51:59 +0530242
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530243 local_irq_restore(flags);
Jeremy Fitzhardinge1ed7bf52013-08-09 19:51:59 +0530244
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700245 spin_time_accum_blocked(start);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700246}
Jeremy Fitzhardinge354714d2013-08-09 19:51:55 +0530247PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700248
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530249static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700250{
251 int cpu;
252
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530253 add_stats(RELEASED_SLOW, 1);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700254
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530255 for_each_cpu(cpu, &waiting_cpus) {
256 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
257
Jeremy Fitzhardinge1ed7bf52013-08-09 19:51:59 +0530258 /* Make sure we read lock before want */
Raghavendra K Td6abfdb2015-02-06 16:44:11 +0530259 if (READ_ONCE(w->lock) == lock &&
260 READ_ONCE(w->want) == next) {
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530261 add_stats(RELEASED_SLOW_KICKED, 1);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700262 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530263 break;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700264 }
265 }
266}
Ingo Molnar62c7a1e2015-05-11 09:47:23 +0200267#endif /* CONFIG_QUEUED_SPINLOCKS */
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700268
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700269static irqreturn_t dummy_handler(int irq, void *dev_id)
270{
271 BUG();
272 return IRQ_HANDLED;
273}
274
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400275void xen_init_lock_cpu(int cpu)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700276{
277 int irq;
Konrad Rzeszutek Wilk354e7b72013-06-05 10:44:47 -0400278 char *name;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700279
Konrad Rzeszutek Wilk3310bbe2013-08-26 14:28:06 -0400280 if (!xen_pvspin)
281 return;
282
Konrad Rzeszutek Wilkcb91f8f2013-05-06 08:33:15 -0400283 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
Konrad Rzeszutek Wilkcb9c6f12013-04-16 14:33:20 -0400284 cpu, per_cpu(lock_kicker_irq, cpu));
285
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700286 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
287 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
288 cpu,
289 dummy_handler,
Michael Opdenacker9d71cee2013-09-07 08:46:49 +0200290 IRQF_PERCPU|IRQF_NOBALANCING,
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700291 name,
292 NULL);
293
294 if (irq >= 0) {
295 disable_irq(irq); /* make sure it's never delivered */
296 per_cpu(lock_kicker_irq, cpu) = irq;
Konrad Rzeszutek Wilk354e7b72013-06-05 10:44:47 -0400297 per_cpu(irq_name, cpu) = name;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700298 }
299
300 printk("cpu %d spinlock event irq %d\n", cpu, irq);
301}
302
Alex Nixond68d82a2008-08-22 11:52:15 +0100303void xen_uninit_lock_cpu(int cpu)
304{
Konrad Rzeszutek Wilk3310bbe2013-08-26 14:28:06 -0400305 if (!xen_pvspin)
306 return;
307
Alex Nixond68d82a2008-08-22 11:52:15 +0100308 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
Konrad Rzeszutek Wilkcb9c6f12013-04-16 14:33:20 -0400309 per_cpu(lock_kicker_irq, cpu) = -1;
Konrad Rzeszutek Wilk354e7b72013-06-05 10:44:47 -0400310 kfree(per_cpu(irq_name, cpu));
311 per_cpu(irq_name, cpu) = NULL;
Alex Nixond68d82a2008-08-22 11:52:15 +0100312}
313
Jeremy Fitzhardingeb8fa70b2013-08-09 19:51:54 +0530314
Konrad Rzeszutek Wilka9459282013-09-12 22:29:44 -0400315/*
316 * Our init of PV spinlocks is split in two init functions due to us
317 * using paravirt patching and jump labels patching and having to do
318 * all of this before SMP code is invoked.
319 *
320 * The paravirt patching needs to be done _before_ the alternative asm code
321 * is started, otherwise we would not patch the core kernel code.
322 */
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700323void __init xen_init_spinlocks(void)
324{
Konrad Rzeszutek Wilk70dd4992013-04-16 14:34:45 -0400325
Jeremy Fitzhardingeb8fa70b2013-08-09 19:51:54 +0530326 if (!xen_pvspin) {
327 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
328 return;
329 }
Konrad Rzeszutek Wilke0fc17a2014-04-04 14:48:04 -0400330 printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
Ingo Molnar62c7a1e2015-05-11 09:47:23 +0200331#ifdef CONFIG_QUEUED_SPINLOCKS
David Vrabele95e6f12015-04-24 14:56:40 -0400332 __pv_init_lock_hash();
333 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
334 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
335 pv_lock_ops.wait = xen_qlock_wait;
336 pv_lock_ops.kick = xen_qlock_kick;
337#else
Jeremy Fitzhardinge354714d2013-08-09 19:51:55 +0530338 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530339 pv_lock_ops.unlock_kick = xen_unlock_kick;
David Vrabele95e6f12015-04-24 14:56:40 -0400340#endif
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700341}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700342
Konrad Rzeszutek Wilka9459282013-09-12 22:29:44 -0400343/*
344 * While the jump_label init code needs to happend _after_ the jump labels are
345 * enabled and before SMP is started. Hence we use pre-SMP initcall level
346 * init. We cannot do it in xen_init_spinlocks as that is done before
347 * jump labels are activated.
348 */
349static __init int xen_init_spinlocks_jump(void)
350{
351 if (!xen_pvspin)
352 return 0;
353
Konrad Rzeszutek Wilke0fc17a2014-04-04 14:48:04 -0400354 if (!xen_domain())
355 return 0;
356
Konrad Rzeszutek Wilka9459282013-09-12 22:29:44 -0400357 static_key_slow_inc(&paravirt_ticketlocks_enabled);
358 return 0;
359}
360early_initcall(xen_init_spinlocks_jump);
361
Jeremy Fitzhardingeb8fa70b2013-08-09 19:51:54 +0530362static __init int xen_parse_nopvspin(char *arg)
363{
364 xen_pvspin = false;
365 return 0;
366}
367early_param("xen_nopvspin", xen_parse_nopvspin);
368
Ingo Molnar62c7a1e2015-05-11 09:47:23 +0200369#if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCKS)
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700370
371static struct dentry *d_spin_debug;
372
373static int __init xen_spinlock_debugfs(void)
374{
375 struct dentry *d_xen = xen_init_debugfs();
376
377 if (d_xen == NULL)
378 return -ENOMEM;
379
Konrad Rzeszutek Wilk3310bbe2013-08-26 14:28:06 -0400380 if (!xen_pvspin)
381 return 0;
382
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700383 d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
384
385 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
386
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700387 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530388 &spinlock_stats.contention_stats[TAKEN_SLOW]);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700389 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530390 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700391 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530392 &spinlock_stats.contention_stats[TAKEN_SLOW_SPURIOUS]);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700393
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700394 debugfs_create_u32("released_slow", 0444, d_spin_debug,
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530395 &spinlock_stats.contention_stats[RELEASED_SLOW]);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700396 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
Jeremy Fitzhardinge80bd58f2013-08-09 19:51:53 +0530397 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700398
Jeremy Fitzhardingef8eca412008-08-20 17:02:21 -0700399 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
400 &spinlock_stats.time_blocked);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700401
Srivatsa Vaddagiri9fe2a702012-03-23 13:36:28 +0530402 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
403 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700404
405 return 0;
406}
407fs_initcall(xen_spinlock_debugfs);
408
409#endif /* CONFIG_XEN_DEBUG_FS */