Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame^] | 1 | #ifndef _ASM_X86_HARDIRQ_H |
| 2 | #define _ASM_X86_HARDIRQ_H |
| 3 | |
| 4 | #include <linux/threads.h> |
| 5 | #include <linux/irq.h> |
| 6 | |
| 7 | typedef struct { |
| 8 | unsigned int __softirq_pending; |
| 9 | unsigned int __nmi_count; /* arch dependent */ |
| 10 | unsigned int apic_timer_irqs; /* arch dependent */ |
| 11 | unsigned int irq0_irqs; |
| 12 | unsigned int irq_resched_count; |
| 13 | unsigned int irq_call_count; |
| 14 | unsigned int irq_tlb_count; |
| 15 | unsigned int irq_thermal_count; |
| 16 | unsigned int irq_spurious_count; |
| 17 | unsigned int irq_threshold_count; |
| 18 | } ____cacheline_aligned irq_cpustat_t; |
| 19 | |
| 20 | DECLARE_PER_CPU(irq_cpustat_t, irq_stat); |
| 21 | |
| 22 | /* We can have at most NR_VECTORS irqs routed to a cpu at a time */ |
| 23 | #define MAX_HARDIRQS_PER_CPU NR_VECTORS |
| 24 | |
| 25 | #define __ARCH_IRQ_STAT |
| 26 | |
| 27 | #define inc_irq_stat(member) percpu_add(irq_stat.member, 1) |
| 28 | |
| 29 | #define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) |
| 30 | |
| 31 | #define __ARCH_SET_SOFTIRQ_PENDING |
| 32 | |
| 33 | #define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) |
| 34 | #define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) |
| 35 | |
| 36 | extern void ack_bad_irq(unsigned int irq); |
Jan Beulich | a2eddfa | 2008-05-12 15:44:41 +0200 | [diff] [blame] | 37 | |
| 38 | extern u64 arch_irq_stat_cpu(unsigned int cpu); |
| 39 | #define arch_irq_stat_cpu arch_irq_stat_cpu |
| 40 | |
| 41 | extern u64 arch_irq_stat(void); |
| 42 | #define arch_irq_stat arch_irq_stat |
Brian Gerst | 22da7b3 | 2009-01-23 11:03:31 +0900 | [diff] [blame^] | 43 | |
| 44 | #endif /* _ASM_X86_HARDIRQ_H */ |