Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SYSTEM_H |
| 2 | #define __ASM_SYSTEM_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/kernel.h> |
| 5 | #include <asm/segment.h> |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame^] | 6 | #include <asm/cmpxchg.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | |
| 8 | #ifdef __KERNEL__ |
| 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #define __STR(x) #x |
| 11 | #define STR(x) __STR(x) |
| 12 | |
| 13 | #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t" |
| 14 | #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" |
| 15 | |
| 16 | /* frame pointer must be last for get_wchan */ |
Andi Kleen | 658fdbe | 2006-09-26 10:52:41 +0200 | [diff] [blame] | 17 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" |
| 18 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
| 20 | #define __EXTRA_CLOBBER \ |
| 21 | ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" |
| 22 | |
Andi Kleen | 658fdbe | 2006-09-26 10:52:41 +0200 | [diff] [blame] | 23 | /* Save restore flags to clear handle leaking NT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #define switch_to(prev,next,last) \ |
| 25 | asm volatile(SAVE_CONTEXT \ |
| 26 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ |
| 27 | "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ |
| 28 | "call __switch_to\n\t" \ |
| 29 | ".globl thread_return\n" \ |
| 30 | "thread_return:\n\t" \ |
| 31 | "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ |
| 32 | "movq %P[thread_info](%%rsi),%%r8\n\t" \ |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 33 | LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | "movq %%rax,%%rdi\n\t" \ |
| 35 | "jc ret_from_fork\n\t" \ |
| 36 | RESTORE_CONTEXT \ |
| 37 | : "=a" (last) \ |
| 38 | : [next] "S" (next), [prev] "D" (prev), \ |
| 39 | [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \ |
| 40 | [ti_flags] "i" (offsetof(struct thread_info, flags)),\ |
| 41 | [tif_fork] "i" (TIF_FORK), \ |
| 42 | [thread_info] "i" (offsetof(struct task_struct, thread_info)), \ |
| 43 | [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \ |
| 44 | : "memory", "cc" __EXTRA_CLOBBER) |
| 45 | |
| 46 | extern void load_gs_index(unsigned); |
| 47 | |
| 48 | /* |
| 49 | * Load a segment. Fall back on loading the zero |
| 50 | * segment if something goes wrong.. |
| 51 | */ |
| 52 | #define loadsegment(seg,value) \ |
| 53 | asm volatile("\n" \ |
| 54 | "1:\t" \ |
| 55 | "movl %k0,%%" #seg "\n" \ |
| 56 | "2:\n" \ |
| 57 | ".section .fixup,\"ax\"\n" \ |
| 58 | "3:\t" \ |
| 59 | "movl %1,%%" #seg "\n\t" \ |
| 60 | "jmp 2b\n" \ |
| 61 | ".previous\n" \ |
| 62 | ".section __ex_table,\"a\"\n\t" \ |
| 63 | ".align 8\n\t" \ |
| 64 | ".quad 1b,3b\n" \ |
| 65 | ".previous" \ |
| 66 | : :"r" (value), "r" (0)) |
| 67 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | /* |
| 69 | * Clear and set 'TS' bit respectively |
| 70 | */ |
| 71 | #define clts() __asm__ __volatile__ ("clts") |
| 72 | |
| 73 | static inline unsigned long read_cr0(void) |
| 74 | { |
| 75 | unsigned long cr0; |
| 76 | asm volatile("movq %%cr0,%0" : "=r" (cr0)); |
| 77 | return cr0; |
| 78 | } |
| 79 | |
| 80 | static inline void write_cr0(unsigned long val) |
| 81 | { |
| 82 | asm volatile("movq %0,%%cr0" :: "r" (val)); |
| 83 | } |
| 84 | |
| 85 | static inline unsigned long read_cr3(void) |
| 86 | { |
| 87 | unsigned long cr3; |
| 88 | asm("movq %%cr3,%0" : "=r" (cr3)); |
| 89 | return cr3; |
| 90 | } |
| 91 | |
Glauber de Oliveira Costa | fbc16f2 | 2007-05-02 19:27:06 +0200 | [diff] [blame] | 92 | static inline void write_cr3(unsigned long val) |
| 93 | { |
| 94 | asm volatile("movq %0,%%cr3" :: "r" (val) : "memory"); |
| 95 | } |
| 96 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | static inline unsigned long read_cr4(void) |
| 98 | { |
| 99 | unsigned long cr4; |
| 100 | asm("movq %%cr4,%0" : "=r" (cr4)); |
| 101 | return cr4; |
| 102 | } |
| 103 | |
| 104 | static inline void write_cr4(unsigned long val) |
| 105 | { |
Glauber de Oliveira Costa | fbc16f2 | 2007-05-02 19:27:06 +0200 | [diff] [blame] | 106 | asm volatile("movq %0,%%cr4" :: "r" (val) : "memory"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | } |
| 108 | |
| 109 | #define stts() write_cr0(8 | read_cr0()) |
| 110 | |
| 111 | #define wbinvd() \ |
| 112 | __asm__ __volatile__ ("wbinvd": : :"memory"); |
| 113 | |
Ingo Molnar | 4dc7a0b | 2006-01-12 01:05:27 -0800 | [diff] [blame] | 114 | /* |
| 115 | * On SMP systems, when the scheduler does migration-cost autodetection, |
| 116 | * it needs a way to flush as much of the CPU's caches as possible. |
| 117 | */ |
| 118 | static inline void sched_cacheflush(void) |
| 119 | { |
| 120 | wbinvd(); |
| 121 | } |
| 122 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | #endif /* __KERNEL__ */ |
| 124 | |
| 125 | #define nop() __asm__ __volatile__ ("nop") |
| 126 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | #ifdef CONFIG_SMP |
| 128 | #define smp_mb() mb() |
| 129 | #define smp_rmb() rmb() |
| 130 | #define smp_wmb() wmb() |
| 131 | #define smp_read_barrier_depends() do {} while(0) |
| 132 | #else |
| 133 | #define smp_mb() barrier() |
| 134 | #define smp_rmb() barrier() |
| 135 | #define smp_wmb() barrier() |
| 136 | #define smp_read_barrier_depends() do {} while(0) |
| 137 | #endif |
| 138 | |
| 139 | |
| 140 | /* |
| 141 | * Force strict CPU ordering. |
| 142 | * And yes, this is required on UP too when we're talking |
| 143 | * to devices. |
| 144 | */ |
| 145 | #define mb() asm volatile("mfence":::"memory") |
| 146 | #define rmb() asm volatile("lfence":::"memory") |
| 147 | |
| 148 | #ifdef CONFIG_UNORDERED_IO |
| 149 | #define wmb() asm volatile("sfence" ::: "memory") |
| 150 | #else |
| 151 | #define wmb() asm volatile("" ::: "memory") |
| 152 | #endif |
| 153 | #define read_barrier_depends() do {} while(0) |
Takashi Iwai | 911b0ad | 2006-02-04 23:28:05 -0800 | [diff] [blame] | 154 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | |
| 156 | #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) |
| 157 | |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 158 | #include <linux/irqflags.h> |
Ravikiran G Thirumalai | 2ddb55f | 2006-01-17 07:03:47 +0100 | [diff] [blame] | 159 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | void cpu_idle_wait(void); |
| 161 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | extern unsigned long arch_align_stack(unsigned long sp); |
Gerd Hoffmann | d167a51 | 2006-06-26 13:56:16 +0200 | [diff] [blame] | 163 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | |
| 165 | #endif |