Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Split spinlock implementation out into its own file, so it can be |
| 3 | * compiled in a FTRACE-compatible way. |
| 4 | */ |
| 5 | #include <linux/kernel_stat.h> |
| 6 | #include <linux/spinlock.h> |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 7 | #include <linux/debugfs.h> |
| 8 | #include <linux/log2.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 9 | #include <linux/gfp.h> |
Konrad Rzeszutek Wilk | 354e7b7 | 2013-06-05 10:44:47 -0400 | [diff] [blame] | 10 | #include <linux/slab.h> |
Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 11 | |
| 12 | #include <asm/paravirt.h> |
| 13 | |
| 14 | #include <xen/interface/xen.h> |
| 15 | #include <xen/events.h> |
| 16 | |
| 17 | #include "xen-ops.h" |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 18 | #include "debugfs.h" |
| 19 | |
David Vrabel | e95e6f1 | 2015-04-24 14:56:40 -0400 | [diff] [blame] | 20 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; |
| 21 | static DEFINE_PER_CPU(char *, irq_name); |
| 22 | static bool xen_pvspin = true; |
| 23 | |
David Vrabel | e95e6f1 | 2015-04-24 14:56:40 -0400 | [diff] [blame] | 24 | #include <asm/qspinlock.h> |
| 25 | |
| 26 | static void xen_qlock_kick(int cpu) |
| 27 | { |
Ross Lagerwall | 707e59b | 2016-04-22 13:05:31 +0100 | [diff] [blame] | 28 | int irq = per_cpu(lock_kicker_irq, cpu); |
| 29 | |
| 30 | /* Don't kick if the target's kicker interrupt is not initialized. */ |
| 31 | if (irq == -1) |
| 32 | return; |
| 33 | |
David Vrabel | e95e6f1 | 2015-04-24 14:56:40 -0400 | [diff] [blame] | 34 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); |
| 35 | } |
| 36 | |
| 37 | /* |
| 38 | * Halt the current CPU & release it back to the host |
| 39 | */ |
| 40 | static void xen_qlock_wait(u8 *byte, u8 val) |
| 41 | { |
| 42 | int irq = __this_cpu_read(lock_kicker_irq); |
| 43 | |
| 44 | /* If kicker interrupts not initialized yet, just spin */ |
| 45 | if (irq == -1) |
| 46 | return; |
| 47 | |
| 48 | /* clear pending */ |
| 49 | xen_clear_irq_pending(irq); |
| 50 | barrier(); |
| 51 | |
| 52 | /* |
| 53 | * We check the byte value after clearing pending IRQ to make sure |
| 54 | * that we won't miss a wakeup event because of the clearing. |
| 55 | * |
| 56 | * The sync_clear_bit() call in xen_clear_irq_pending() is atomic. |
| 57 | * So it is effectively a memory barrier for x86. |
| 58 | */ |
| 59 | if (READ_ONCE(*byte) != val) |
| 60 | return; |
| 61 | |
| 62 | /* |
| 63 | * If an interrupt happens here, it will leave the wakeup irq |
| 64 | * pending, which will cause xen_poll_irq() to return |
| 65 | * immediately. |
| 66 | */ |
| 67 | |
| 68 | /* Block until irq becomes pending (or perhaps a spurious wakeup) */ |
| 69 | xen_poll_irq(irq); |
| 70 | } |
| 71 | |
Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 72 | static irqreturn_t dummy_handler(int irq, void *dev_id) |
| 73 | { |
| 74 | BUG(); |
| 75 | return IRQ_HANDLED; |
| 76 | } |
| 77 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 78 | void xen_init_lock_cpu(int cpu) |
Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 79 | { |
| 80 | int irq; |
Konrad Rzeszutek Wilk | 354e7b7 | 2013-06-05 10:44:47 -0400 | [diff] [blame] | 81 | char *name; |
Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 82 | |
Konrad Rzeszutek Wilk | 3310bbe | 2013-08-26 14:28:06 -0400 | [diff] [blame] | 83 | if (!xen_pvspin) |
| 84 | return; |
| 85 | |
Konrad Rzeszutek Wilk | cb91f8f | 2013-05-06 08:33:15 -0400 | [diff] [blame] | 86 | WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", |
Konrad Rzeszutek Wilk | cb9c6f1 | 2013-04-16 14:33:20 -0400 | [diff] [blame] | 87 | cpu, per_cpu(lock_kicker_irq, cpu)); |
| 88 | |
Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 89 | name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); |
| 90 | irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, |
| 91 | cpu, |
| 92 | dummy_handler, |
Michael Opdenacker | 9d71cee | 2013-09-07 08:46:49 +0200 | [diff] [blame] | 93 | IRQF_PERCPU|IRQF_NOBALANCING, |
Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 94 | name, |
| 95 | NULL); |
| 96 | |
| 97 | if (irq >= 0) { |
| 98 | disable_irq(irq); /* make sure it's never delivered */ |
| 99 | per_cpu(lock_kicker_irq, cpu) = irq; |
Konrad Rzeszutek Wilk | 354e7b7 | 2013-06-05 10:44:47 -0400 | [diff] [blame] | 100 | per_cpu(irq_name, cpu) = name; |
Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 101 | } |
| 102 | |
| 103 | printk("cpu %d spinlock event irq %d\n", cpu, irq); |
| 104 | } |
| 105 | |
Alex Nixon | d68d82a | 2008-08-22 11:52:15 +0100 | [diff] [blame] | 106 | void xen_uninit_lock_cpu(int cpu) |
| 107 | { |
Konrad Rzeszutek Wilk | 3310bbe | 2013-08-26 14:28:06 -0400 | [diff] [blame] | 108 | if (!xen_pvspin) |
| 109 | return; |
| 110 | |
Alex Nixon | d68d82a | 2008-08-22 11:52:15 +0100 | [diff] [blame] | 111 | unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); |
Konrad Rzeszutek Wilk | cb9c6f1 | 2013-04-16 14:33:20 -0400 | [diff] [blame] | 112 | per_cpu(lock_kicker_irq, cpu) = -1; |
Konrad Rzeszutek Wilk | 354e7b7 | 2013-06-05 10:44:47 -0400 | [diff] [blame] | 113 | kfree(per_cpu(irq_name, cpu)); |
| 114 | per_cpu(irq_name, cpu) = NULL; |
Alex Nixon | d68d82a | 2008-08-22 11:52:15 +0100 | [diff] [blame] | 115 | } |
| 116 | |
Peter Zijlstra | 3cded41 | 2016-11-15 16:47:06 +0100 | [diff] [blame] | 117 | PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); |
| 118 | |
Konrad Rzeszutek Wilk | a945928 | 2013-09-12 22:29:44 -0400 | [diff] [blame] | 119 | /* |
| 120 | * Our init of PV spinlocks is split in two init functions due to us |
| 121 | * using paravirt patching and jump labels patching and having to do |
| 122 | * all of this before SMP code is invoked. |
| 123 | * |
| 124 | * The paravirt patching needs to be done _before_ the alternative asm code |
| 125 | * is started, otherwise we would not patch the core kernel code. |
| 126 | */ |
Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 127 | void __init xen_init_spinlocks(void) |
| 128 | { |
Konrad Rzeszutek Wilk | 70dd499 | 2013-04-16 14:34:45 -0400 | [diff] [blame] | 129 | |
Jeremy Fitzhardinge | b8fa70b | 2013-08-09 19:51:54 +0530 | [diff] [blame] | 130 | if (!xen_pvspin) { |
| 131 | printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); |
| 132 | return; |
| 133 | } |
Konrad Rzeszutek Wilk | e0fc17a | 2014-04-04 14:48:04 -0400 | [diff] [blame] | 134 | printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); |
Peter Zijlstra | cfd8983 | 2016-05-18 20:43:02 +0200 | [diff] [blame] | 135 | |
David Vrabel | e95e6f1 | 2015-04-24 14:56:40 -0400 | [diff] [blame] | 136 | __pv_init_lock_hash(); |
| 137 | pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; |
| 138 | pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); |
| 139 | pv_lock_ops.wait = xen_qlock_wait; |
| 140 | pv_lock_ops.kick = xen_qlock_kick; |
Peter Zijlstra | 3cded41 | 2016-11-15 16:47:06 +0100 | [diff] [blame] | 141 | pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen); |
Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 142 | } |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 143 | |
Jeremy Fitzhardinge | b8fa70b | 2013-08-09 19:51:54 +0530 | [diff] [blame] | 144 | static __init int xen_parse_nopvspin(char *arg) |
| 145 | { |
| 146 | xen_pvspin = false; |
| 147 | return 0; |
| 148 | } |
| 149 | early_param("xen_nopvspin", xen_parse_nopvspin); |
| 150 | |