blob: bd376bc8c4ab26b5702bf667ad84f18f38fb5184 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SYSTEM_H
2#define __ASM_SYSTEM_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/kernel.h>
5#include <asm/segment.h>
Gerd Hoffmannd167a512006-06-26 13:56:16 +02006#include <asm/alternative.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007
8#ifdef __KERNEL__
9
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#define __STR(x) #x
11#define STR(x) __STR(x)
12
13#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
14#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
15
16/* frame pointer must be last for get_wchan */
Andi Kleen658fdbe2006-09-26 10:52:41 +020017#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
18#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20#define __EXTRA_CLOBBER \
21 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
22
Andi Kleen658fdbe2006-09-26 10:52:41 +020023/* Save restore flags to clear handle leaking NT */
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#define switch_to(prev,next,last) \
25 asm volatile(SAVE_CONTEXT \
26 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
27 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
28 "call __switch_to\n\t" \
29 ".globl thread_return\n" \
30 "thread_return:\n\t" \
31 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
32 "movq %P[thread_info](%%rsi),%%r8\n\t" \
Gerd Hoffmannd167a512006-06-26 13:56:16 +020033 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 "movq %%rax,%%rdi\n\t" \
35 "jc ret_from_fork\n\t" \
36 RESTORE_CONTEXT \
37 : "=a" (last) \
38 : [next] "S" (next), [prev] "D" (prev), \
39 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
40 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
41 [tif_fork] "i" (TIF_FORK), \
42 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
43 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
44 : "memory", "cc" __EXTRA_CLOBBER)
45
46extern void load_gs_index(unsigned);
47
48/*
49 * Load a segment. Fall back on loading the zero
50 * segment if something goes wrong..
51 */
52#define loadsegment(seg,value) \
53 asm volatile("\n" \
54 "1:\t" \
55 "movl %k0,%%" #seg "\n" \
56 "2:\n" \
57 ".section .fixup,\"ax\"\n" \
58 "3:\t" \
59 "movl %1,%%" #seg "\n\t" \
60 "jmp 2b\n" \
61 ".previous\n" \
62 ".section __ex_table,\"a\"\n\t" \
63 ".align 8\n\t" \
64 ".quad 1b,3b\n" \
65 ".previous" \
66 : :"r" (value), "r" (0))
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/*
69 * Clear and set 'TS' bit respectively
70 */
71#define clts() __asm__ __volatile__ ("clts")
72
73static inline unsigned long read_cr0(void)
74{
75 unsigned long cr0;
76 asm volatile("movq %%cr0,%0" : "=r" (cr0));
77 return cr0;
78}
79
80static inline void write_cr0(unsigned long val)
81{
82 asm volatile("movq %0,%%cr0" :: "r" (val));
83}
84
85static inline unsigned long read_cr3(void)
86{
87 unsigned long cr3;
88 asm("movq %%cr3,%0" : "=r" (cr3));
89 return cr3;
90}
91
92static inline unsigned long read_cr4(void)
93{
94 unsigned long cr4;
95 asm("movq %%cr4,%0" : "=r" (cr4));
96 return cr4;
97}
98
99static inline void write_cr4(unsigned long val)
100{
101 asm volatile("movq %0,%%cr4" :: "r" (val));
102}
103
104#define stts() write_cr0(8 | read_cr0())
105
106#define wbinvd() \
107 __asm__ __volatile__ ("wbinvd": : :"memory");
108
Ingo Molnar4dc7a0b2006-01-12 01:05:27 -0800109/*
110 * On SMP systems, when the scheduler does migration-cost autodetection,
111 * it needs a way to flush as much of the CPU's caches as possible.
112 */
113static inline void sched_cacheflush(void)
114{
115 wbinvd();
116}
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#endif /* __KERNEL__ */
119
120#define nop() __asm__ __volatile__ ("nop")
121
122#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
123
124#define tas(ptr) (xchg((ptr),1))
125
126#define __xg(x) ((volatile long *)(x))
127
Adrian Bunk9c0aa0f2005-09-12 18:49:24 +0200128static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
130 *ptr = val;
131}
132
133#define _set_64bit set_64bit
134
135/*
136 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
137 * Note 2: xchg has side effect, so that attribute volatile is necessary,
138 * but generally the primitive is invalid, *ptr is output argument. --ANK
139 */
140static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
141{
142 switch (size) {
143 case 1:
144 __asm__ __volatile__("xchgb %b0,%1"
145 :"=q" (x)
146 :"m" (*__xg(ptr)), "0" (x)
147 :"memory");
148 break;
149 case 2:
150 __asm__ __volatile__("xchgw %w0,%1"
151 :"=r" (x)
152 :"m" (*__xg(ptr)), "0" (x)
153 :"memory");
154 break;
155 case 4:
156 __asm__ __volatile__("xchgl %k0,%1"
157 :"=r" (x)
158 :"m" (*__xg(ptr)), "0" (x)
159 :"memory");
160 break;
161 case 8:
162 __asm__ __volatile__("xchgq %0,%1"
163 :"=r" (x)
164 :"m" (*__xg(ptr)), "0" (x)
165 :"memory");
166 break;
167 }
168 return x;
169}
170
171/*
172 * Atomic compare and exchange. Compare OLD with MEM, if identical,
173 * store NEW in MEM. Return the initial value in MEM. Success is
174 * indicated by comparing RETURN with OLD.
175 */
176
177#define __HAVE_ARCH_CMPXCHG 1
178
179static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
180 unsigned long new, int size)
181{
182 unsigned long prev;
183 switch (size) {
184 case 1:
185 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
186 : "=a"(prev)
187 : "q"(new), "m"(*__xg(ptr)), "0"(old)
188 : "memory");
189 return prev;
190 case 2:
191 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
192 : "=a"(prev)
Jan Beulicha2d236b2005-09-12 18:49:24 +0200193 : "r"(new), "m"(*__xg(ptr)), "0"(old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 : "memory");
195 return prev;
196 case 4:
197 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
198 : "=a"(prev)
Jan Beulicha2d236b2005-09-12 18:49:24 +0200199 : "r"(new), "m"(*__xg(ptr)), "0"(old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 : "memory");
201 return prev;
202 case 8:
203 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
204 : "=a"(prev)
Jan Beulicha2d236b2005-09-12 18:49:24 +0200205 : "r"(new), "m"(*__xg(ptr)), "0"(old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 : "memory");
207 return prev;
208 }
209 return old;
210}
211
212#define cmpxchg(ptr,o,n)\
213 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
214 (unsigned long)(n),sizeof(*(ptr))))
215
216#ifdef CONFIG_SMP
217#define smp_mb() mb()
218#define smp_rmb() rmb()
219#define smp_wmb() wmb()
220#define smp_read_barrier_depends() do {} while(0)
221#else
222#define smp_mb() barrier()
223#define smp_rmb() barrier()
224#define smp_wmb() barrier()
225#define smp_read_barrier_depends() do {} while(0)
226#endif
227
228
229/*
230 * Force strict CPU ordering.
231 * And yes, this is required on UP too when we're talking
232 * to devices.
233 */
234#define mb() asm volatile("mfence":::"memory")
235#define rmb() asm volatile("lfence":::"memory")
236
237#ifdef CONFIG_UNORDERED_IO
238#define wmb() asm volatile("sfence" ::: "memory")
239#else
240#define wmb() asm volatile("" ::: "memory")
241#endif
242#define read_barrier_depends() do {} while(0)
Takashi Iwai911b0ad2006-02-04 23:28:05 -0800243#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
246
Ingo Molnar2601e642006-07-03 00:24:45 -0700247#include <linux/irqflags.h>
Ravikiran G Thirumalai2ddb55f2006-01-17 07:03:47 +0100248
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249void cpu_idle_wait(void);
250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251extern unsigned long arch_align_stack(unsigned long sp);
Gerd Hoffmannd167a512006-06-26 13:56:16 +0200252extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254#endif