Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Split spinlock implementation out into its own file, so it can be |
| 3 | * compiled in a FTRACE-compatible way. |
| 4 | */ |
| 5 | #include <linux/kernel_stat.h> |
| 6 | #include <linux/spinlock.h> |
| 7 | |
| 8 | #include <asm/paravirt.h> |
| 9 | |
| 10 | #include <xen/interface/xen.h> |
| 11 | #include <xen/events.h> |
| 12 | |
| 13 | #include "xen-ops.h" |
| 14 | |
| 15 | struct xen_spinlock { |
| 16 | unsigned char lock; /* 0 -> free; 1 -> locked */ |
| 17 | unsigned short spinners; /* count of waiting cpus */ |
| 18 | }; |
| 19 | |
| 20 | static int xen_spin_is_locked(struct raw_spinlock *lock) |
| 21 | { |
| 22 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
| 23 | |
| 24 | return xl->lock != 0; |
| 25 | } |
| 26 | |
| 27 | static int xen_spin_is_contended(struct raw_spinlock *lock) |
| 28 | { |
| 29 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
| 30 | |
| 31 | /* Not strictly true; this is only the count of contended |
| 32 | lock-takers entering the slow path. */ |
| 33 | return xl->spinners != 0; |
| 34 | } |
| 35 | |
| 36 | static int xen_spin_trylock(struct raw_spinlock *lock) |
| 37 | { |
| 38 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
| 39 | u8 old = 1; |
| 40 | |
| 41 | asm("xchgb %b0,%1" |
| 42 | : "+q" (old), "+m" (xl->lock) : : "memory"); |
| 43 | |
| 44 | return old == 0; |
| 45 | } |
| 46 | |
| 47 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; |
| 48 | static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); |
| 49 | |
| 50 | static inline void spinning_lock(struct xen_spinlock *xl) |
| 51 | { |
| 52 | __get_cpu_var(lock_spinners) = xl; |
| 53 | wmb(); /* set lock of interest before count */ |
| 54 | asm(LOCK_PREFIX " incw %0" |
| 55 | : "+m" (xl->spinners) : : "memory"); |
| 56 | } |
| 57 | |
| 58 | static inline void unspinning_lock(struct xen_spinlock *xl) |
| 59 | { |
| 60 | asm(LOCK_PREFIX " decw %0" |
| 61 | : "+m" (xl->spinners) : : "memory"); |
| 62 | wmb(); /* decrement count before clearing lock */ |
| 63 | __get_cpu_var(lock_spinners) = NULL; |
| 64 | } |
| 65 | |
| 66 | static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) |
| 67 | { |
| 68 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
| 69 | int irq = __get_cpu_var(lock_kicker_irq); |
| 70 | int ret; |
| 71 | |
| 72 | /* If kicker interrupts not initialized yet, just spin */ |
| 73 | if (irq == -1) |
| 74 | return 0; |
| 75 | |
| 76 | /* announce we're spinning */ |
| 77 | spinning_lock(xl); |
| 78 | |
| 79 | /* clear pending */ |
| 80 | xen_clear_irq_pending(irq); |
| 81 | |
| 82 | /* check again make sure it didn't become free while |
| 83 | we weren't looking */ |
| 84 | ret = xen_spin_trylock(lock); |
| 85 | if (ret) |
| 86 | goto out; |
| 87 | |
| 88 | /* block until irq becomes pending */ |
| 89 | xen_poll_irq(irq); |
| 90 | kstat_this_cpu.irqs[irq]++; |
| 91 | |
| 92 | out: |
| 93 | unspinning_lock(xl); |
| 94 | return ret; |
| 95 | } |
| 96 | |
| 97 | static void xen_spin_lock(struct raw_spinlock *lock) |
| 98 | { |
| 99 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
| 100 | int timeout; |
| 101 | u8 oldval; |
| 102 | |
| 103 | do { |
| 104 | timeout = 1 << 10; |
| 105 | |
| 106 | asm("1: xchgb %1,%0\n" |
| 107 | " testb %1,%1\n" |
| 108 | " jz 3f\n" |
| 109 | "2: rep;nop\n" |
| 110 | " cmpb $0,%0\n" |
| 111 | " je 1b\n" |
| 112 | " dec %2\n" |
| 113 | " jnz 2b\n" |
| 114 | "3:\n" |
| 115 | : "+m" (xl->lock), "=q" (oldval), "+r" (timeout) |
| 116 | : "1" (1) |
| 117 | : "memory"); |
| 118 | |
| 119 | } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock))); |
| 120 | } |
| 121 | |
| 122 | static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) |
| 123 | { |
| 124 | int cpu; |
| 125 | |
| 126 | for_each_online_cpu(cpu) { |
| 127 | /* XXX should mix up next cpu selection */ |
| 128 | if (per_cpu(lock_spinners, cpu) == xl) { |
| 129 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); |
| 130 | break; |
| 131 | } |
| 132 | } |
| 133 | } |
| 134 | |
| 135 | static void xen_spin_unlock(struct raw_spinlock *lock) |
| 136 | { |
| 137 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
| 138 | |
| 139 | smp_wmb(); /* make sure no writes get moved after unlock */ |
| 140 | xl->lock = 0; /* release lock */ |
| 141 | |
| 142 | /* make sure unlock happens before kick */ |
| 143 | barrier(); |
| 144 | |
| 145 | if (unlikely(xl->spinners)) |
| 146 | xen_spin_unlock_slow(xl); |
| 147 | } |
| 148 | |
| 149 | static irqreturn_t dummy_handler(int irq, void *dev_id) |
| 150 | { |
| 151 | BUG(); |
| 152 | return IRQ_HANDLED; |
| 153 | } |
| 154 | |
| 155 | void __cpuinit xen_init_lock_cpu(int cpu) |
| 156 | { |
| 157 | int irq; |
| 158 | const char *name; |
| 159 | |
| 160 | name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); |
| 161 | irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, |
| 162 | cpu, |
| 163 | dummy_handler, |
| 164 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, |
| 165 | name, |
| 166 | NULL); |
| 167 | |
| 168 | if (irq >= 0) { |
| 169 | disable_irq(irq); /* make sure it's never delivered */ |
| 170 | per_cpu(lock_kicker_irq, cpu) = irq; |
| 171 | } |
| 172 | |
| 173 | printk("cpu %d spinlock event irq %d\n", cpu, irq); |
| 174 | } |
| 175 | |
| 176 | void __init xen_init_spinlocks(void) |
| 177 | { |
| 178 | pv_lock_ops.spin_is_locked = xen_spin_is_locked; |
| 179 | pv_lock_ops.spin_is_contended = xen_spin_is_contended; |
| 180 | pv_lock_ops.spin_lock = xen_spin_lock; |
| 181 | pv_lock_ops.spin_trylock = xen_spin_trylock; |
| 182 | pv_lock_ops.spin_unlock = xen_spin_unlock; |
| 183 | } |