Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame] | 2 | #ifndef _ASM_X86_HARDIRQ_H |
| 3 | #define _ASM_X86_HARDIRQ_H |
| 4 | |
| 5 | #include <linux/threads.h> |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame] | 6 | |
| 7 | typedef struct { |
Nicolai Stange | 9aee5f8 | 2018-07-27 12:46:29 +0200 | [diff] [blame] | 8 | u16 __softirq_pending; |
Nicolai Stange | 45b575c | 2018-07-27 13:22:16 +0200 | [diff] [blame] | 9 | #if IS_ENABLED(CONFIG_KVM_INTEL) |
| 10 | u8 kvm_cpu_l1tf_flush_l1d; |
| 11 | #endif |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame] | 12 | unsigned int __nmi_count; /* arch dependent */ |
Brian Gerst | 2de3a5f | 2009-01-23 11:03:32 +0900 | [diff] [blame] | 13 | #ifdef CONFIG_X86_LOCAL_APIC |
| 14 | unsigned int apic_timer_irqs; /* arch dependent */ |
| 15 | unsigned int irq_spurious_count; |
Fernando Luis Vazquez Cao | b49d7d8 | 2011-12-15 11:32:24 +0900 | [diff] [blame] | 16 | unsigned int icr_read_retry_count; |
Brian Gerst | 2de3a5f | 2009-01-23 11:03:32 +0900 | [diff] [blame] | 17 | #endif |
Yang Zhang | d78f266 | 2013-04-11 19:25:11 +0800 | [diff] [blame] | 18 | #ifdef CONFIG_HAVE_KVM |
| 19 | unsigned int kvm_posted_intr_ipis; |
Feng Wu | f6b3c72c | 2015-05-19 17:07:16 +0800 | [diff] [blame] | 20 | unsigned int kvm_posted_intr_wakeup_ipis; |
Wincy Van | 210f84b | 2017-04-28 13:13:58 +0800 | [diff] [blame] | 21 | unsigned int kvm_posted_intr_nested_ipis; |
Yang Zhang | d78f266 | 2013-04-11 19:25:11 +0800 | [diff] [blame] | 22 | #endif |
Dimitri Sivanich | 4a4de9c | 2009-10-14 09:22:57 -0500 | [diff] [blame] | 23 | unsigned int x86_platform_ipis; /* arch dependent */ |
Ingo Molnar | bfe2a3c | 2009-01-23 10:20:15 +0100 | [diff] [blame] | 24 | unsigned int apic_perf_irqs; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 25 | unsigned int apic_irq_work_irqs; |
Brian Gerst | 2de3a5f | 2009-01-23 11:03:32 +0900 | [diff] [blame] | 26 | #ifdef CONFIG_SMP |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame] | 27 | unsigned int irq_resched_count; |
| 28 | unsigned int irq_call_count; |
Brian Gerst | 2de3a5f | 2009-01-23 11:03:32 +0900 | [diff] [blame] | 29 | #endif |
Andy Lutomirski | ce4a4e56 | 2017-05-28 10:00:14 -0700 | [diff] [blame] | 30 | unsigned int irq_tlb_count; |
Jan Beulich | 0444c9b | 2009-11-20 14:03:05 +0000 | [diff] [blame] | 31 | #ifdef CONFIG_X86_THERMAL_VECTOR |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame] | 32 | unsigned int irq_thermal_count; |
Jan Beulich | 0444c9b | 2009-11-20 14:03:05 +0000 | [diff] [blame] | 33 | #endif |
| 34 | #ifdef CONFIG_X86_MCE_THRESHOLD |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame] | 35 | unsigned int irq_threshold_count; |
Brian Gerst | 2de3a5f | 2009-01-23 11:03:32 +0900 | [diff] [blame] | 36 | #endif |
Aravind Gopalakrishnan | 24fd78a | 2015-05-06 06:58:56 -0500 | [diff] [blame] | 37 | #ifdef CONFIG_X86_MCE_AMD |
| 38 | unsigned int irq_deferred_error_count; |
| 39 | #endif |
Thomas Gleixner | 7ff4247 | 2014-03-06 12:08:37 +0100 | [diff] [blame] | 40 | #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) |
Thomas Gleixner | 929320e | 2014-02-23 21:40:20 +0000 | [diff] [blame] | 41 | unsigned int irq_hv_callback_count; |
| 42 | #endif |
Vitaly Kuznetsov | 51d4e5d | 2018-01-24 14:23:35 +0100 | [diff] [blame] | 43 | #if IS_ENABLED(CONFIG_HYPERV) |
| 44 | unsigned int irq_hv_reenlightenment_count; |
Michael Kelley | 248e742 | 2018-03-04 22:17:18 -0700 | [diff] [blame] | 45 | unsigned int hyperv_stimer0_count; |
Vitaly Kuznetsov | 51d4e5d | 2018-01-24 14:23:35 +0100 | [diff] [blame] | 46 | #endif |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame] | 47 | } ____cacheline_aligned irq_cpustat_t; |
| 48 | |
David Howells | 9b8de74 | 2009-04-21 23:00:24 +0100 | [diff] [blame] | 49 | DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame] | 50 | |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame] | 51 | #define __ARCH_IRQ_STAT |
| 52 | |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 53 | #define inc_irq_stat(member) this_cpu_inc(irq_stat.member) |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame] | 54 | |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame] | 55 | extern void ack_bad_irq(unsigned int irq); |
Jan Beulich | a2eddfa | 2008-05-12 15:44:41 +0200 | [diff] [blame] | 56 | |
| 57 | extern u64 arch_irq_stat_cpu(unsigned int cpu); |
| 58 | #define arch_irq_stat_cpu arch_irq_stat_cpu |
| 59 | |
| 60 | extern u64 arch_irq_stat(void); |
| 61 | #define arch_irq_stat arch_irq_stat |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame] | 62 | |
Nicolai Stange | 45b575c | 2018-07-27 13:22:16 +0200 | [diff] [blame] | 63 | |
| 64 | #if IS_ENABLED(CONFIG_KVM_INTEL) |
| 65 | static inline void kvm_set_cpu_l1tf_flush_l1d(void) |
| 66 | { |
| 67 | __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1); |
| 68 | } |
| 69 | |
| 70 | static inline void kvm_clear_cpu_l1tf_flush_l1d(void) |
| 71 | { |
| 72 | __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0); |
| 73 | } |
| 74 | |
| 75 | static inline bool kvm_get_cpu_l1tf_flush_l1d(void) |
| 76 | { |
| 77 | return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d); |
| 78 | } |
| 79 | #else /* !IS_ENABLED(CONFIG_KVM_INTEL) */ |
| 80 | static inline void kvm_set_cpu_l1tf_flush_l1d(void) { } |
| 81 | #endif /* IS_ENABLED(CONFIG_KVM_INTEL) */ |
| 82 | |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame] | 83 | #endif /* _ASM_X86_HARDIRQ_H */ |