Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 1 | /* |
| 2 | * include/asm-i386/irqflags.h |
| 3 | * |
| 4 | * IRQ flags handling |
| 5 | * |
| 6 | * This file gets included from lowlevel asm headers too, to provide |
| 7 | * wrapped versions of the local_irq_*() APIs, based on the |
Ingo Molnar | c8558fc | 2006-07-03 00:24:44 -0700 | [diff] [blame] | 8 | * raw_local_irq_*() functions from the lowlevel headers. |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 9 | */ |
| 10 | #ifndef _ASM_IRQFLAGS_H |
| 11 | #define _ASM_IRQFLAGS_H |
Andi Kleen | b4531e8 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 12 | #include <asm/processor-flags.h> |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 13 | |
Rusty Russell | 90a0a06 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 14 | #ifndef __ASSEMBLY__ |
| 15 | static inline unsigned long native_save_fl(void) |
| 16 | { |
| 17 | unsigned long f; |
| 18 | asm volatile("pushfl ; popl %0":"=g" (f): /* no input */); |
| 19 | return f; |
| 20 | } |
| 21 | |
| 22 | static inline void native_restore_fl(unsigned long f) |
| 23 | { |
| 24 | asm volatile("pushl %0 ; popfl": /* no output */ |
| 25 | :"g" (f) |
| 26 | :"memory", "cc"); |
| 27 | } |
| 28 | |
| 29 | static inline void native_irq_disable(void) |
| 30 | { |
| 31 | asm volatile("cli": : :"memory"); |
| 32 | } |
| 33 | |
| 34 | static inline void native_irq_enable(void) |
| 35 | { |
| 36 | asm volatile("sti": : :"memory"); |
| 37 | } |
| 38 | |
| 39 | static inline void native_safe_halt(void) |
| 40 | { |
| 41 | asm volatile("sti; hlt": : :"memory"); |
| 42 | } |
| 43 | |
| 44 | static inline void native_halt(void) |
| 45 | { |
| 46 | asm volatile("hlt": : :"memory"); |
| 47 | } |
| 48 | #endif /* __ASSEMBLY__ */ |
| 49 | |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 50 | #ifdef CONFIG_PARAVIRT |
| 51 | #include <asm/paravirt.h> |
| 52 | #else |
Ingo Molnar | c8558fc | 2006-07-03 00:24:44 -0700 | [diff] [blame] | 53 | #ifndef __ASSEMBLY__ |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 54 | |
Ingo Molnar | c8558fc | 2006-07-03 00:24:44 -0700 | [diff] [blame] | 55 | static inline unsigned long __raw_local_save_flags(void) |
| 56 | { |
Rusty Russell | 90a0a06 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 57 | return native_save_fl(); |
Ingo Molnar | c8558fc | 2006-07-03 00:24:44 -0700 | [diff] [blame] | 58 | } |
| 59 | |
Ingo Molnar | c8558fc | 2006-07-03 00:24:44 -0700 | [diff] [blame] | 60 | static inline void raw_local_irq_restore(unsigned long flags) |
| 61 | { |
Rusty Russell | 90a0a06 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 62 | native_restore_fl(flags); |
Ingo Molnar | c8558fc | 2006-07-03 00:24:44 -0700 | [diff] [blame] | 63 | } |
| 64 | |
| 65 | static inline void raw_local_irq_disable(void) |
| 66 | { |
Rusty Russell | 90a0a06 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 67 | native_irq_disable(); |
Ingo Molnar | c8558fc | 2006-07-03 00:24:44 -0700 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | static inline void raw_local_irq_enable(void) |
| 71 | { |
Rusty Russell | 90a0a06 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 72 | native_irq_enable(); |
Ingo Molnar | c8558fc | 2006-07-03 00:24:44 -0700 | [diff] [blame] | 73 | } |
| 74 | |
| 75 | /* |
| 76 | * Used in the idle loop; sti takes one instruction cycle |
| 77 | * to complete: |
| 78 | */ |
| 79 | static inline void raw_safe_halt(void) |
| 80 | { |
Rusty Russell | 90a0a06 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 81 | native_safe_halt(); |
Ingo Molnar | c8558fc | 2006-07-03 00:24:44 -0700 | [diff] [blame] | 82 | } |
| 83 | |
| 84 | /* |
| 85 | * Used when interrupts are already enabled or to |
| 86 | * shutdown the processor: |
| 87 | */ |
| 88 | static inline void halt(void) |
| 89 | { |
Rusty Russell | 90a0a06 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 90 | native_halt(); |
Ingo Molnar | c8558fc | 2006-07-03 00:24:44 -0700 | [diff] [blame] | 91 | } |
| 92 | |
Ingo Molnar | c8558fc | 2006-07-03 00:24:44 -0700 | [diff] [blame] | 93 | /* |
| 94 | * For spinlocks, etc: |
| 95 | */ |
| 96 | static inline unsigned long __raw_local_irq_save(void) |
| 97 | { |
| 98 | unsigned long flags = __raw_local_save_flags(); |
| 99 | |
| 100 | raw_local_irq_disable(); |
| 101 | |
| 102 | return flags; |
| 103 | } |
| 104 | |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 105 | #else |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 106 | #define DISABLE_INTERRUPTS(clobbers) cli |
| 107 | #define ENABLE_INTERRUPTS(clobbers) sti |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 108 | #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit |
| 109 | #define INTERRUPT_RETURN iret |
| 110 | #define GET_CR0_INTO_EAX movl %cr0, %eax |
| 111 | #endif /* __ASSEMBLY__ */ |
| 112 | #endif /* CONFIG_PARAVIRT */ |
| 113 | |
| 114 | #ifndef __ASSEMBLY__ |
| 115 | #define raw_local_save_flags(flags) \ |
| 116 | do { (flags) = __raw_local_save_flags(); } while (0) |
| 117 | |
Ingo Molnar | c8558fc | 2006-07-03 00:24:44 -0700 | [diff] [blame] | 118 | #define raw_local_irq_save(flags) \ |
| 119 | do { (flags) = __raw_local_irq_save(); } while (0) |
| 120 | |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 121 | static inline int raw_irqs_disabled_flags(unsigned long flags) |
| 122 | { |
Andi Kleen | b4531e8 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 123 | return !(flags & X86_EFLAGS_IF); |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 124 | } |
| 125 | |
| 126 | static inline int raw_irqs_disabled(void) |
| 127 | { |
| 128 | unsigned long flags = __raw_local_save_flags(); |
| 129 | |
| 130 | return raw_irqs_disabled_flags(flags); |
| 131 | } |
Peter Zijlstra | 143a5d3 | 2007-10-25 14:01:10 +0200 | [diff] [blame] | 132 | |
| 133 | /* |
| 134 | * makes the traced hardirq state match with the machine state |
| 135 | * |
| 136 | * should be a rarely used function, only in places where its |
| 137 | * otherwise impossible to know the irq state, like in traps. |
| 138 | */ |
| 139 | static inline void trace_hardirqs_fixup_flags(unsigned long flags) |
| 140 | { |
| 141 | if (raw_irqs_disabled_flags(flags)) |
| 142 | trace_hardirqs_off(); |
| 143 | else |
| 144 | trace_hardirqs_on(); |
| 145 | } |
| 146 | |
| 147 | static inline void trace_hardirqs_fixup(void) |
| 148 | { |
| 149 | unsigned long flags = __raw_local_save_flags(); |
| 150 | |
| 151 | trace_hardirqs_fixup_flags(flags); |
| 152 | } |
Ingo Molnar | c8558fc | 2006-07-03 00:24:44 -0700 | [diff] [blame] | 153 | #endif /* __ASSEMBLY__ */ |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 154 | |
| 155 | /* |
| 156 | * Do the CPU's IRQ-state tracing from assembly code. We call a |
| 157 | * C function, so save all the C-clobbered registers: |
| 158 | */ |
| 159 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 160 | |
| 161 | # define TRACE_IRQS_ON \ |
| 162 | pushl %eax; \ |
| 163 | pushl %ecx; \ |
| 164 | pushl %edx; \ |
| 165 | call trace_hardirqs_on; \ |
| 166 | popl %edx; \ |
| 167 | popl %ecx; \ |
| 168 | popl %eax; |
| 169 | |
| 170 | # define TRACE_IRQS_OFF \ |
| 171 | pushl %eax; \ |
| 172 | pushl %ecx; \ |
| 173 | pushl %edx; \ |
| 174 | call trace_hardirqs_off; \ |
| 175 | popl %edx; \ |
| 176 | popl %ecx; \ |
| 177 | popl %eax; |
| 178 | |
| 179 | #else |
| 180 | # define TRACE_IRQS_ON |
| 181 | # define TRACE_IRQS_OFF |
| 182 | #endif |
| 183 | |
Peter Zijlstra | c7e872e | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 184 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 185 | # define LOCKDEP_SYS_EXIT \ |
| 186 | pushl %eax; \ |
| 187 | pushl %ecx; \ |
| 188 | pushl %edx; \ |
| 189 | call lockdep_sys_exit; \ |
| 190 | popl %edx; \ |
| 191 | popl %ecx; \ |
| 192 | popl %eax; |
| 193 | #else |
| 194 | # define LOCKDEP_SYS_EXIT |
| 195 | #endif |
| 196 | |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 197 | #endif |