blob: c6f0ef1d9ab7851a7a5185285f40fc6358a2f651 [file] [log] [blame]
Thomas Gleixner6b39ba72008-10-16 11:32:24 +02001/*
2 * Common interrupt code for 32 and 64 bit
3 */
4#include <linux/cpu.h>
5#include <linux/interrupt.h>
6#include <linux/kernel_stat.h>
Andres Salomon4722d192010-11-12 05:45:26 +00007#include <linux/of.h>
Thomas Gleixner6b39ba72008-10-16 11:32:24 +02008#include <linux/seq_file.h>
Jaswinder Singh Rajput6a02e712009-01-04 16:22:17 +05309#include <linux/smp.h>
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -080010#include <linux/ftrace.h>
Jean Delvareca4445642011-03-25 15:20:14 +010011#include <linux/delay.h>
Paul Gortmaker69c60c82011-05-26 12:22:53 -040012#include <linux/export.h>
Nicolai Stange8574df12018-07-29 12:15:33 +020013#include <linux/irq.h>
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020014
Ingo Molnar7b6aa332009-02-17 13:58:15 +010015#include <asm/apic.h>
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020016#include <asm/io_apic.h>
Ingo Molnarc3d80002008-12-23 15:15:17 +010017#include <asm/irq.h>
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -080018#include <asm/idle.h>
Andi Kleen01ca79f2009-05-27 21:56:52 +020019#include <asm/mce.h>
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +053020#include <asm/hw_irq.h>
Yinghai Luac2a5532014-05-13 11:39:34 -040021#include <asm/desc.h>
Steven Rostedt (Red Hat)83ab8512013-06-21 10:29:05 -040022
23#define CREATE_TRACE_POINTS
Seiji Aguchicf910e82013-06-20 11:46:53 -040024#include <asm/trace/irq_vectors.h>
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020025
Brian Gerstc5bde902015-05-09 11:36:50 -040026DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
27EXPORT_PER_CPU_SYMBOL(irq_stat);
28
29DEFINE_PER_CPU(struct pt_regs *, irq_regs);
30EXPORT_PER_CPU_SYMBOL(irq_regs);
31
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020032atomic_t irq_err_count;
33
Dimitri Sivanichacaabe72009-03-04 12:56:05 -060034/* Function pointer for generic interrupt vector handling */
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -050035void (*x86_platform_ipi_callback)(void) = NULL;
Dimitri Sivanichacaabe72009-03-04 12:56:05 -060036
Thomas Gleixner249f6d92008-10-16 12:18:50 +020037/*
38 * 'what should we do if we get a hw irq event on an illegal vector'.
39 * each architecture has to answer this themselves.
40 */
41void ack_bad_irq(unsigned int irq)
42{
Cyrill Gorcunovedea7142009-04-12 20:47:39 +040043 if (printk_ratelimit())
44 pr_err("unexpected IRQ trap at vector %02x\n", irq);
Thomas Gleixner249f6d92008-10-16 12:18:50 +020045
Thomas Gleixner249f6d92008-10-16 12:18:50 +020046 /*
47 * Currently unexpected vectors happen only on SMP and APIC.
48 * We _must_ ack these because every local APIC has only N
49 * irq slots per priority level, and a 'hanging, unacked' IRQ
50 * holds up an irq slot - in excessive cases (when multiple
51 * unexpected vectors occur) that might lock up the APIC
52 * completely.
53 * But only ack when the APIC is enabled -AK
54 */
Cyrill Gorcunov08306ce2009-04-12 20:47:41 +040055 ack_APIC_irq();
Thomas Gleixner249f6d92008-10-16 12:18:50 +020056}
57
Brian Gerst1b437c82009-01-19 00:38:57 +090058#define irq_stats(x) (&per_cpu(irq_stat, x))
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020059/*
Thomas Gleixner517e4982010-12-16 17:59:57 +010060 * /proc/interrupts printing for arch specific interrupts
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020061 */
Thomas Gleixner517e4982010-12-16 17:59:57 +010062int arch_show_interrupts(struct seq_file *p, int prec)
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020063{
64 int j;
65
Jan Beulich7a81d9a2009-03-12 12:45:15 +000066 seq_printf(p, "%*s: ", prec, "NMI");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020067 for_each_online_cpu(j)
68 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +010069 seq_puts(p, " Non-maskable interrupts\n");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020070#ifdef CONFIG_X86_LOCAL_APIC
Jan Beulich7a81d9a2009-03-12 12:45:15 +000071 seq_printf(p, "%*s: ", prec, "LOC");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020072 for_each_online_cpu(j)
73 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
Rasmus Villemoes37367082014-11-28 22:03:41 +010074 seq_puts(p, " Local timer interrupts\n");
Jaswinder Singh Rajput474e56b2009-03-23 02:08:34 +053075
76 seq_printf(p, "%*s: ", prec, "SPU");
77 for_each_online_cpu(j)
78 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +010079 seq_puts(p, " Spurious interrupts\n");
Li Hong89ccf462009-10-14 18:50:39 +080080 seq_printf(p, "%*s: ", prec, "PMI");
Ingo Molnar241771e2008-12-03 10:39:53 +010081 for_each_online_cpu(j)
82 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
Rasmus Villemoes37367082014-11-28 22:03:41 +010083 seq_puts(p, " Performance monitoring interrupts\n");
Peter Zijlstrae360adb2010-10-14 14:01:34 +080084 seq_printf(p, "%*s: ", prec, "IWI");
Peter Zijlstrab6276f32009-04-06 11:45:03 +020085 for_each_online_cpu(j)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080086 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
Rasmus Villemoes37367082014-11-28 22:03:41 +010087 seq_puts(p, " IRQ work interrupts\n");
Fernando Luis Vázquez Cao346b46b2011-12-13 11:51:53 +090088 seq_printf(p, "%*s: ", prec, "RTR");
89 for_each_online_cpu(j)
Fernando Luis Vazquez Caob49d7d82011-12-15 11:32:24 +090090 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +010091 seq_puts(p, " APIC ICR read retries\n");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020092#endif
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -050093 if (x86_platform_ipi_callback) {
Hidetoshi Seto59d13812009-03-25 10:50:34 +090094 seq_printf(p, "%*s: ", prec, "PLT");
Dimitri Sivanichacaabe72009-03-04 12:56:05 -060095 for_each_online_cpu(j)
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -050096 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
Rasmus Villemoes37367082014-11-28 22:03:41 +010097 seq_puts(p, " Platform interrupts\n");
Dimitri Sivanichacaabe72009-03-04 12:56:05 -060098 }
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020099#ifdef CONFIG_SMP
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000100 seq_printf(p, "%*s: ", prec, "RES");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200101 for_each_online_cpu(j)
102 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100103 seq_puts(p, " Rescheduling interrupts\n");
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000104 seq_printf(p, "%*s: ", prec, "CAL");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200105 for_each_online_cpu(j)
Aaron Lu82ba4fa2016-08-11 15:44:30 +0800106 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100107 seq_puts(p, " Function call interrupts\n");
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000108 seq_printf(p, "%*s: ", prec, "TLB");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200109 for_each_online_cpu(j)
110 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100111 seq_puts(p, " TLB shootdowns\n");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200112#endif
Jan Beulich0444c9b2009-11-20 14:03:05 +0000113#ifdef CONFIG_X86_THERMAL_VECTOR
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000114 seq_printf(p, "%*s: ", prec, "TRM");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200115 for_each_online_cpu(j)
116 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100117 seq_puts(p, " Thermal event interrupts\n");
Jan Beulich0444c9b2009-11-20 14:03:05 +0000118#endif
119#ifdef CONFIG_X86_MCE_THRESHOLD
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000120 seq_printf(p, "%*s: ", prec, "THR");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200121 for_each_online_cpu(j)
122 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100123 seq_puts(p, " Threshold APIC interrupts\n");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200124#endif
Aravind Gopalakrishnan24fd78a2015-05-06 06:58:56 -0500125#ifdef CONFIG_X86_MCE_AMD
126 seq_printf(p, "%*s: ", prec, "DFR");
127 for_each_online_cpu(j)
128 seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
129 seq_puts(p, " Deferred Error APIC interrupts\n");
130#endif
Andi Kleenc1ebf832009-07-09 00:31:41 +0200131#ifdef CONFIG_X86_MCE
Andi Kleen01ca79f2009-05-27 21:56:52 +0200132 seq_printf(p, "%*s: ", prec, "MCE");
133 for_each_online_cpu(j)
134 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
Rasmus Villemoes37367082014-11-28 22:03:41 +0100135 seq_puts(p, " Machine check exceptions\n");
Andi Kleenca84f692009-05-27 21:56:57 +0200136 seq_printf(p, "%*s: ", prec, "MCP");
137 for_each_online_cpu(j)
138 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
Rasmus Villemoes37367082014-11-28 22:03:41 +0100139 seq_puts(p, " Machine check polls\n");
Andi Kleen01ca79f2009-05-27 21:56:52 +0200140#endif
K. Y. Srinivasanf704a7d2014-04-01 23:51:42 -0700141#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
Vitaly Kuznetsov9d87cd62015-07-07 18:26:13 +0200142 if (test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) {
143 seq_printf(p, "%*s: ", prec, "HYP");
144 for_each_online_cpu(j)
145 seq_printf(p, "%10u ",
146 irq_stats(j)->irq_hv_callback_count);
147 seq_puts(p, " Hypervisor callback interrupts\n");
148 }
Thomas Gleixner929320e2014-02-23 21:40:20 +0000149#endif
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000150 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200151#if defined(CONFIG_X86_IO_APIC)
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000152 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200153#endif
Feng Wu501b3262015-05-19 17:07:17 +0800154#ifdef CONFIG_HAVE_KVM
155 seq_printf(p, "%*s: ", prec, "PIN");
156 for_each_online_cpu(j)
157 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
158 seq_puts(p, " Posted-interrupt notification event\n");
159
160 seq_printf(p, "%*s: ", prec, "PIW");
161 for_each_online_cpu(j)
162 seq_printf(p, "%10u ",
163 irq_stats(j)->kvm_posted_intr_wakeup_ipis);
164 seq_puts(p, " Posted-interrupt wakeup event\n");
165#endif
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200166 return 0;
167}
168
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200169/*
170 * /proc/stat helpers
171 */
172u64 arch_irq_stat_cpu(unsigned int cpu)
173{
174 u64 sum = irq_stats(cpu)->__nmi_count;
175
176#ifdef CONFIG_X86_LOCAL_APIC
177 sum += irq_stats(cpu)->apic_timer_irqs;
Jaswinder Singh Rajput474e56b2009-03-23 02:08:34 +0530178 sum += irq_stats(cpu)->irq_spurious_count;
Ingo Molnar241771e2008-12-03 10:39:53 +0100179 sum += irq_stats(cpu)->apic_perf_irqs;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800180 sum += irq_stats(cpu)->apic_irq_work_irqs;
Fernando Luis Vazquez Caob49d7d82011-12-15 11:32:24 +0900181 sum += irq_stats(cpu)->icr_read_retry_count;
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200182#endif
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500183 if (x86_platform_ipi_callback)
184 sum += irq_stats(cpu)->x86_platform_ipis;
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200185#ifdef CONFIG_SMP
186 sum += irq_stats(cpu)->irq_resched_count;
187 sum += irq_stats(cpu)->irq_call_count;
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200188#endif
Jan Beulich0444c9b2009-11-20 14:03:05 +0000189#ifdef CONFIG_X86_THERMAL_VECTOR
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200190 sum += irq_stats(cpu)->irq_thermal_count;
Jan Beulich0444c9b2009-11-20 14:03:05 +0000191#endif
192#ifdef CONFIG_X86_MCE_THRESHOLD
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200193 sum += irq_stats(cpu)->irq_threshold_count;
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200194#endif
Andi Kleenc1ebf832009-07-09 00:31:41 +0200195#ifdef CONFIG_X86_MCE
Hidetoshi Seto8051dbd2009-06-02 16:53:23 +0900196 sum += per_cpu(mce_exception_count, cpu);
197 sum += per_cpu(mce_poll_count, cpu);
198#endif
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200199 return sum;
200}
201
202u64 arch_irq_stat(void)
203{
204 u64 sum = atomic_read(&irq_err_count);
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200205 return sum;
206}
Ingo Molnarc3d80002008-12-23 15:15:17 +0100207
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800208
209/*
210 * do_IRQ handles all normal device IRQ's (the special
211 * SMP cross-CPU interrupts have their own specific
212 * handlers).
213 */
Andi Kleen1d9090e2013-08-05 15:02:37 -0700214__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800215{
216 struct pt_regs *old_regs = set_irq_regs(regs);
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000217 struct irq_desc * desc;
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800218 /* high bit used in ret_from_ code */
219 unsigned vector = ~regs->orig_ax;
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800220
Andy Lutomirski0333a202015-07-03 12:44:34 -0700221 /*
222 * NB: Unlike exception entries, IRQ entries do not reliably
223 * handle context tracking in the low-level entry code. This is
224 * because syscall entries execute briefly with IRQs on before
225 * updating context tracking state, so we can take an IRQ from
226 * kernel mode with CONTEXT_USER. The low-level entry code only
227 * updates the context if we came from user mode, so we won't
228 * switch to CONTEXT_KERNEL. We'll fix that once the syscall
229 * code is cleaned up enough that we can cleanly defer enabling
230 * IRQs.
231 */
232
Thomas Gleixner6af7faf2015-05-15 15:48:25 +0200233 entering_irq();
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800234
Andy Lutomirski0333a202015-07-03 12:44:34 -0700235 /* entering_irq() tells RCU that we're not quiescent. Check it. */
Linus Torvalds57780772015-09-01 08:40:25 -0700236 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
Andy Lutomirski0333a202015-07-03 12:44:34 -0700237
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000238 desc = __this_cpu_read(vector_irq[vector]);
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800239
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000240 if (!handle_irq(desc, regs)) {
Cyrill Gorcunov08306ce2009-04-12 20:47:41 +0400241 ack_APIC_irq();
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800242
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000243 if (desc != VECTOR_RETRIGGERED) {
244 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
Prarit Bhargava93450052014-01-05 11:10:52 -0500245 __func__, smp_processor_id(),
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000246 vector);
Prarit Bhargava93450052014-01-05 11:10:52 -0500247 } else {
Thomas Gleixner7276c6a2015-08-02 20:38:25 +0000248 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
Prarit Bhargava93450052014-01-05 11:10:52 -0500249 }
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800250 }
251
Thomas Gleixner6af7faf2015-05-15 15:48:25 +0200252 exiting_irq();
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800253
254 set_irq_regs(old_regs);
255 return 1;
256}
257
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600258/*
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500259 * Handler for X86_PLATFORM_IPI_VECTOR.
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600260 */
Seiji Aguchieddc0e92013-06-20 11:45:17 -0400261void __smp_x86_platform_ipi(void)
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600262{
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500263 inc_irq_stat(x86_platform_ipis);
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600264
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500265 if (x86_platform_ipi_callback)
266 x86_platform_ipi_callback();
Seiji Aguchieddc0e92013-06-20 11:45:17 -0400267}
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600268
Daniel Bristot de Oliveirad9b035e2017-01-04 12:20:33 +0100269__visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs)
Seiji Aguchieddc0e92013-06-20 11:45:17 -0400270{
271 struct pt_regs *old_regs = set_irq_regs(regs);
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600272
Seiji Aguchieddc0e92013-06-20 11:45:17 -0400273 entering_ack_irq();
274 __smp_x86_platform_ipi();
275 exiting_irq();
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600276 set_irq_regs(old_regs);
277}
278
Yang Zhangd78f2662013-04-11 19:25:11 +0800279#ifdef CONFIG_HAVE_KVM
Feng Wuf6b3c72c2015-05-19 17:07:16 +0800280static void dummy_handler(void) {}
281static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
282
283void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
284{
285 if (handler)
286 kvm_posted_intr_wakeup_handler = handler;
287 else
288 kvm_posted_intr_wakeup_handler = dummy_handler;
289}
290EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
291
Yang Zhangd78f2662013-04-11 19:25:11 +0800292/*
293 * Handler for POSTED_INTERRUPT_VECTOR.
294 */
Andi Kleen1d9090e2013-08-05 15:02:37 -0700295__visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
Yang Zhangd78f2662013-04-11 19:25:11 +0800296{
297 struct pt_regs *old_regs = set_irq_regs(regs);
298
Thomas Gleixner6af7faf2015-05-15 15:48:25 +0200299 entering_ack_irq();
Yang Zhangd78f2662013-04-11 19:25:11 +0800300 inc_irq_stat(kvm_posted_intr_ipis);
Thomas Gleixner6af7faf2015-05-15 15:48:25 +0200301 exiting_irq();
Yang Zhangd78f2662013-04-11 19:25:11 +0800302 set_irq_regs(old_regs);
303}
Feng Wuf6b3c72c2015-05-19 17:07:16 +0800304
305/*
306 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
307 */
308__visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
309{
310 struct pt_regs *old_regs = set_irq_regs(regs);
311
312 entering_ack_irq();
313 inc_irq_stat(kvm_posted_intr_wakeup_ipis);
314 kvm_posted_intr_wakeup_handler();
315 exiting_irq();
316 set_irq_regs(old_regs);
317}
Yang Zhangd78f2662013-04-11 19:25:11 +0800318#endif
319
Daniel Bristot de Oliveirad9b035e2017-01-04 12:20:33 +0100320__visible void __irq_entry smp_trace_x86_platform_ipi(struct pt_regs *regs)
Seiji Aguchicf910e82013-06-20 11:46:53 -0400321{
322 struct pt_regs *old_regs = set_irq_regs(regs);
323
324 entering_ack_irq();
325 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
326 __smp_x86_platform_ipi();
327 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
328 exiting_irq();
329 set_irq_regs(old_regs);
330}
331
Ingo Molnarc3d80002008-12-23 15:15:17 +0100332EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800333
334#ifdef CONFIG_HOTPLUG_CPU
Prarit Bhargava39424e82014-01-28 08:22:11 -0500335
336/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
337 * below, which is protected by stop_machine(). Putting them on the stack
338 * results in a stack frame overflow. Dynamically allocating could result in a
339 * failure so declare these two cpumasks as global.
340 */
341static struct cpumask affinity_new, online_new;
342
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500343/*
344 * This cpu is going to be removed and its vectors migrated to the remaining
345 * online cpus. Check to see if there are enough vectors in the remaining cpus.
346 * This function is protected by stop_machine().
347 */
348int check_irq_vectors_for_cpu_disable(void)
349{
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500350 unsigned int this_cpu, vector, this_count, count;
351 struct irq_desc *desc;
352 struct irq_data *data;
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000353 int cpu;
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500354
355 this_cpu = smp_processor_id();
356 cpumask_copy(&online_new, cpu_online_mask);
Rusty Russell020b37a2015-03-02 22:05:49 +1030357 cpumask_clear_cpu(this_cpu, &online_new);
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500358
359 this_count = 0;
360 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000361 desc = __this_cpu_read(vector_irq[vector]);
362 if (IS_ERR_OR_NULL(desc))
Thomas Gleixner44825752015-08-02 20:38:25 +0000363 continue;
Thomas Gleixner44825752015-08-02 20:38:25 +0000364 /*
365 * Protect against concurrent action removal, affinity
366 * changes etc.
367 */
368 raw_spin_lock(&desc->lock);
369 data = irq_desc_get_irq_data(desc);
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000370 cpumask_copy(&affinity_new,
371 irq_data_get_affinity_mask(data));
Thomas Gleixner44825752015-08-02 20:38:25 +0000372 cpumask_clear_cpu(this_cpu, &affinity_new);
Joerg Roedeld97eb892015-02-04 13:33:33 +0100373
Thomas Gleixner44825752015-08-02 20:38:25 +0000374 /* Do not count inactive or per-cpu irqs. */
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000375 if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
Thomas Gleixnercbb24dc2015-07-05 17:12:33 +0000376 raw_spin_unlock(&desc->lock);
Thomas Gleixner44825752015-08-02 20:38:25 +0000377 continue;
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500378 }
Thomas Gleixner44825752015-08-02 20:38:25 +0000379
380 raw_spin_unlock(&desc->lock);
381 /*
382 * A single irq may be mapped to multiple cpu's
383 * vector_irq[] (for example IOAPIC cluster mode). In
384 * this case we have two possibilities:
385 *
386 * 1) the resulting affinity mask is empty; that is
387 * this the down'd cpu is the last cpu in the irq's
388 * affinity mask, or
389 *
390 * 2) the resulting affinity mask is no longer a
391 * subset of the online cpus but the affinity mask is
392 * not zero; that is the down'd cpu is the last online
393 * cpu in a user set affinity mask.
394 */
395 if (cpumask_empty(&affinity_new) ||
396 !cpumask_subset(&affinity_new, &online_new))
397 this_count++;
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500398 }
399
400 count = 0;
401 for_each_online_cpu(cpu) {
402 if (cpu == this_cpu)
403 continue;
Yinghai Luac2a5532014-05-13 11:39:34 -0400404 /*
405 * We scan from FIRST_EXTERNAL_VECTOR to first system
406 * vector. If the vector is marked in the used vectors
407 * bitmap or an irq is assigned to it, we don't count
408 * it as available.
Thomas Gleixnercbb24dc2015-07-05 17:12:33 +0000409 *
410 * As this is an inaccurate snapshot anyway, we can do
411 * this w/o holding vector_lock.
Yinghai Luac2a5532014-05-13 11:39:34 -0400412 */
413 for (vector = FIRST_EXTERNAL_VECTOR;
414 vector < first_system_vector; vector++) {
415 if (!test_bit(vector, used_vectors) &&
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000416 IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
417 count++;
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500418 }
419 }
420
421 if (count < this_count) {
422 pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
423 this_cpu, this_count, count);
424 return -ERANGE;
425 }
426 return 0;
427}
428
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800429/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
430void fixup_irqs(void)
431{
Suresh Siddha5231a682009-10-26 14:24:36 -0800432 unsigned int irq, vector;
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800433 static int warned;
434 struct irq_desc *desc;
Thomas Gleixnera3c08e52010-10-08 20:24:58 +0200435 struct irq_data *data;
Thomas Gleixner51c43ac2011-02-10 21:40:36 +0100436 struct irq_chip *chip;
Prarit Bhargavafb24da82014-04-02 08:11:13 -0400437 int ret;
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800438
439 for_each_irq_desc(irq, desc) {
440 int break_affinity = 0;
441 int set_affinity = 1;
442 const struct cpumask *affinity;
443
444 if (!desc)
445 continue;
446 if (irq == 2)
447 continue;
448
449 /* interrupt's are disabled at this point */
Thomas Gleixner239007b2009-11-17 16:46:45 +0100450 raw_spin_lock(&desc->lock);
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800451
Thomas Gleixner51c43ac2011-02-10 21:40:36 +0100452 data = irq_desc_get_irq_data(desc);
Jiang Liuc149e4c2015-06-03 11:46:22 +0800453 affinity = irq_data_get_affinity_mask(data);
Tian, Kevinb87ba872011-05-06 14:43:36 +0800454 if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
Jan Beulich58bff942011-02-17 15:54:26 +0000455 cpumask_subset(affinity, cpu_online_mask)) {
Thomas Gleixner239007b2009-11-17 16:46:45 +0100456 raw_spin_unlock(&desc->lock);
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800457 continue;
458 }
459
Suresh Siddhaa5e74b82009-10-26 14:24:34 -0800460 /*
461 * Complete the irq move. This cpu is going down and for
462 * non intr-remapping case, we can't wait till this interrupt
463 * arrives at this cpu before completing the irq move.
464 */
Thomas Gleixner90a22822015-12-31 16:30:53 +0000465 irq_force_complete_move(desc);
Suresh Siddhaa5e74b82009-10-26 14:24:34 -0800466
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800467 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
468 break_affinity = 1;
Liu, Chuansheng2530cd42012-08-14 06:55:01 +0000469 affinity = cpu_online_mask;
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800470 }
471
Thomas Gleixner51c43ac2011-02-10 21:40:36 +0100472 chip = irq_data_get_irq_chip(data);
Thomas Gleixner36f34c82015-12-31 16:30:45 +0000473 /*
474 * The interrupt descriptor might have been cleaned up
475 * already, but it is not yet removed from the radix tree
476 */
477 if (!chip) {
478 raw_spin_unlock(&desc->lock);
479 continue;
480 }
481
Thomas Gleixner51c43ac2011-02-10 21:40:36 +0100482 if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
483 chip->irq_mask(data);
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800484
Prarit Bhargavafb24da82014-04-02 08:11:13 -0400485 if (chip->irq_set_affinity) {
486 ret = chip->irq_set_affinity(data, affinity, true);
487 if (ret == -ENOSPC)
488 pr_crit("IRQ %d set affinity failed because there are no available vectors. The device assigned to this IRQ is unstable.\n", irq);
489 } else {
490 if (!(warned++))
491 set_affinity = 0;
492 }
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800493
Liu, Chuansheng99dd5492012-03-26 07:11:50 +0000494 /*
495 * We unmask if the irq was not marked masked by the
496 * core code. That respects the lazy irq disable
497 * behaviour.
498 */
Tian, Kevin983bbf12011-05-06 14:43:56 +0800499 if (!irqd_can_move_in_process_context(data) &&
Liu, Chuansheng99dd5492012-03-26 07:11:50 +0000500 !irqd_irq_masked(data) && chip->irq_unmask)
Thomas Gleixner51c43ac2011-02-10 21:40:36 +0100501 chip->irq_unmask(data);
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800502
Thomas Gleixner239007b2009-11-17 16:46:45 +0100503 raw_spin_unlock(&desc->lock);
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800504
505 if (break_affinity && set_affinity)
Joe Perchesc767a542012-05-21 19:50:07 -0700506 pr_notice("Broke affinity for irq %i\n", irq);
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800507 else if (!set_affinity)
Joe Perchesc767a542012-05-21 19:50:07 -0700508 pr_notice("Cannot set affinity for irq %i\n", irq);
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800509 }
510
Suresh Siddha5231a682009-10-26 14:24:36 -0800511 /*
512 * We can remove mdelay() and then send spuriuous interrupts to
513 * new cpu targets for all the irqs that were handled previously by
514 * this cpu. While it works, I have seen spurious interrupt messages
515 * (nothing wrong but still...).
516 *
517 * So for now, retain mdelay(1) and check the IRR and then send those
518 * interrupts to new targets as this cpu is already offlined...
519 */
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800520 mdelay(1);
Suresh Siddha5231a682009-10-26 14:24:36 -0800521
Thomas Gleixner09cf92b2015-07-05 17:12:35 +0000522 /*
523 * We can walk the vector array of this cpu without holding
524 * vector_lock because the cpu is already marked !online, so
525 * nothing else will touch it.
526 */
Suresh Siddha5231a682009-10-26 14:24:36 -0800527 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
528 unsigned int irr;
529
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000530 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
Suresh Siddha5231a682009-10-26 14:24:36 -0800531 continue;
532
533 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
534 if (irr & (1 << (vector % 32))) {
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000535 desc = __this_cpu_read(vector_irq[vector]);
Suresh Siddha5231a682009-10-26 14:24:36 -0800536
Thomas Gleixner09cf92b2015-07-05 17:12:35 +0000537 raw_spin_lock(&desc->lock);
Thomas Gleixner51c43ac2011-02-10 21:40:36 +0100538 data = irq_desc_get_irq_data(desc);
539 chip = irq_data_get_irq_chip(data);
Prarit Bhargava93450052014-01-05 11:10:52 -0500540 if (chip->irq_retrigger) {
Thomas Gleixner51c43ac2011-02-10 21:40:36 +0100541 chip->irq_retrigger(data);
Prarit Bhargava93450052014-01-05 11:10:52 -0500542 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
543 }
Thomas Gleixner239007b2009-11-17 16:46:45 +0100544 raw_spin_unlock(&desc->lock);
Suresh Siddha5231a682009-10-26 14:24:36 -0800545 }
Prarit Bhargava93450052014-01-05 11:10:52 -0500546 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
Thomas Gleixner7276c6a2015-08-02 20:38:25 +0000547 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
Suresh Siddha5231a682009-10-26 14:24:36 -0800548 }
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800549}
550#endif