blob: 4884bc603aa7ed0c591289c3035b69558bb99da8 [file] [log] [blame]
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -07001/*
2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
4 */
5#include <linux/kernel_stat.h>
6#include <linux/spinlock.h>
7
8#include <asm/paravirt.h>
9
10#include <xen/interface/xen.h>
11#include <xen/events.h>
12
13#include "xen-ops.h"
14
15struct xen_spinlock {
16 unsigned char lock; /* 0 -> free; 1 -> locked */
17 unsigned short spinners; /* count of waiting cpus */
18};
19
20static int xen_spin_is_locked(struct raw_spinlock *lock)
21{
22 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
23
24 return xl->lock != 0;
25}
26
27static int xen_spin_is_contended(struct raw_spinlock *lock)
28{
29 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
30
31 /* Not strictly true; this is only the count of contended
32 lock-takers entering the slow path. */
33 return xl->spinners != 0;
34}
35
36static int xen_spin_trylock(struct raw_spinlock *lock)
37{
38 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
39 u8 old = 1;
40
41 asm("xchgb %b0,%1"
42 : "+q" (old), "+m" (xl->lock) : : "memory");
43
44 return old == 0;
45}
46
47static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
48static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
49
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -070050/*
51 * Mark a cpu as interested in a lock. Returns the CPU's previous
52 * lock of interest, in case we got preempted by an interrupt.
53 */
54static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -070055{
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -070056 struct xen_spinlock *prev;
57
58 prev = __get_cpu_var(lock_spinners);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -070059 __get_cpu_var(lock_spinners) = xl;
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -070060
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -070061 wmb(); /* set lock of interest before count */
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -070062
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -070063 asm(LOCK_PREFIX " incw %0"
64 : "+m" (xl->spinners) : : "memory");
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -070065
66 return prev;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -070067}
68
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -070069/*
70 * Mark a cpu as no longer interested in a lock. Restores previous
71 * lock of interest (NULL for none).
72 */
73static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -070074{
75 asm(LOCK_PREFIX " decw %0"
76 : "+m" (xl->spinners) : : "memory");
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -070077 wmb(); /* decrement count before restoring lock */
78 __get_cpu_var(lock_spinners) = prev;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -070079}
80
81static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
82{
83 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -070084 struct xen_spinlock *prev;
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -070085 int irq = __get_cpu_var(lock_kicker_irq);
86 int ret;
87
88 /* If kicker interrupts not initialized yet, just spin */
89 if (irq == -1)
90 return 0;
91
92 /* announce we're spinning */
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -070093 prev = spinning_lock(xl);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -070094
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -070095 do {
96 /* clear pending */
97 xen_clear_irq_pending(irq);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -070098
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -070099 /* check again make sure it didn't become free while
100 we weren't looking */
101 ret = xen_spin_trylock(lock);
102 if (ret) {
103 /*
104 * If we interrupted another spinlock while it
105 * was blocking, make sure it doesn't block
106 * without rechecking the lock.
107 */
108 if (prev != NULL)
109 xen_set_irq_pending(irq);
110 goto out;
111 }
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700112
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700113 /*
114 * Block until irq becomes pending. If we're
115 * interrupted at this point (after the trylock but
116 * before entering the block), then the nested lock
117 * handler guarantees that the irq will be left
118 * pending if there's any chance the lock became free;
119 * xen_poll_irq() returns immediately if the irq is
120 * pending.
121 */
122 xen_poll_irq(irq);
123 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
124
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700125 kstat_this_cpu.irqs[irq]++;
126
127out:
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700128 unspinning_lock(xl, prev);
Jeremy Fitzhardinged5de8842008-07-23 13:28:58 -0700129 return ret;
130}
131
132static void xen_spin_lock(struct raw_spinlock *lock)
133{
134 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
135 int timeout;
136 u8 oldval;
137
138 do {
139 timeout = 1 << 10;
140
141 asm("1: xchgb %1,%0\n"
142 " testb %1,%1\n"
143 " jz 3f\n"
144 "2: rep;nop\n"
145 " cmpb $0,%0\n"
146 " je 1b\n"
147 " dec %2\n"
148 " jnz 2b\n"
149 "3:\n"
150 : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
151 : "1" (1)
152 : "memory");
153
154 } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock)));
155}
156
157static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
158{
159 int cpu;
160
161 for_each_online_cpu(cpu) {
162 /* XXX should mix up next cpu selection */
163 if (per_cpu(lock_spinners, cpu) == xl) {
164 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
165 break;
166 }
167 }
168}
169
170static void xen_spin_unlock(struct raw_spinlock *lock)
171{
172 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
173
174 smp_wmb(); /* make sure no writes get moved after unlock */
175 xl->lock = 0; /* release lock */
176
177 /* make sure unlock happens before kick */
178 barrier();
179
180 if (unlikely(xl->spinners))
181 xen_spin_unlock_slow(xl);
182}
183
184static irqreturn_t dummy_handler(int irq, void *dev_id)
185{
186 BUG();
187 return IRQ_HANDLED;
188}
189
190void __cpuinit xen_init_lock_cpu(int cpu)
191{
192 int irq;
193 const char *name;
194
195 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
196 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
197 cpu,
198 dummy_handler,
199 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
200 name,
201 NULL);
202
203 if (irq >= 0) {
204 disable_irq(irq); /* make sure it's never delivered */
205 per_cpu(lock_kicker_irq, cpu) = irq;
206 }
207
208 printk("cpu %d spinlock event irq %d\n", cpu, irq);
209}
210
211void __init xen_init_spinlocks(void)
212{
213 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
214 pv_lock_ops.spin_is_contended = xen_spin_is_contended;
215 pv_lock_ops.spin_lock = xen_spin_lock;
216 pv_lock_ops.spin_trylock = xen_spin_trylock;
217 pv_lock_ops.spin_unlock = xen_spin_unlock;
218}