blob: 098bcee94e389a615b3b2327fa2be03f117c28c3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SYSTEM_H
2#define __ASM_SYSTEM_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/kernel.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <linux/bitops.h> /* for LOCK_PREFIX */
8
9#ifdef __KERNEL__
10
11struct task_struct; /* one of the stranger aspects of C forward declarations.. */
12extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
13
Linus Torvalds47a5c6f2006-09-18 16:20:40 -070014/*
15 * Saving eflags is important. It switches not only IOPL between tasks,
16 * it also protects other tasks from NT leaking through sysenter etc.
17 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#define switch_to(prev,next,last) do { \
19 unsigned long esi,edi; \
Linus Torvalds47a5c6f2006-09-18 16:20:40 -070020 asm volatile("pushfl\n\t" /* Save flags */ \
21 "pushl %%ebp\n\t" \
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 "movl %%esp,%0\n\t" /* save ESP */ \
23 "movl %5,%%esp\n\t" /* restore ESP */ \
24 "movl $1f,%1\n\t" /* save EIP */ \
25 "pushl %6\n\t" /* restore EIP */ \
26 "jmp __switch_to\n" \
27 "1:\t" \
28 "popl %%ebp\n\t" \
Linus Torvalds47a5c6f2006-09-18 16:20:40 -070029 "popfl" \
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
31 "=a" (last),"=S" (esi),"=D" (edi) \
32 :"m" (next->thread.esp),"m" (next->thread.eip), \
33 "2" (prev), "d" (next)); \
34} while (0)
35
36#define _set_base(addr,base) do { unsigned long __pr; \
37__asm__ __volatile__ ("movw %%dx,%1\n\t" \
38 "rorl $16,%%edx\n\t" \
39 "movb %%dl,%2\n\t" \
40 "movb %%dh,%3" \
41 :"=&d" (__pr) \
42 :"m" (*((addr)+2)), \
43 "m" (*((addr)+4)), \
44 "m" (*((addr)+7)), \
45 "0" (base) \
46 ); } while(0)
47
48#define _set_limit(addr,limit) do { unsigned long __lr; \
49__asm__ __volatile__ ("movw %%dx,%1\n\t" \
50 "rorl $16,%%edx\n\t" \
51 "movb %2,%%dh\n\t" \
52 "andb $0xf0,%%dh\n\t" \
53 "orb %%dh,%%dl\n\t" \
54 "movb %%dl,%2" \
55 :"=&d" (__lr) \
56 :"m" (*(addr)), \
57 "m" (*((addr)+6)), \
58 "0" (limit) \
59 ); } while(0)
60
61#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
Zachary Amsden5fe9fe3c2006-01-06 00:11:55 -080062#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
65 * Load a segment. Fall back on loading the zero
66 * segment if something goes wrong..
67 */
68#define loadsegment(seg,value) \
69 asm volatile("\n" \
70 "1:\t" \
H. J. Lufd51f662005-05-01 08:58:48 -070071 "mov %0,%%" #seg "\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 "2:\n" \
73 ".section .fixup,\"ax\"\n" \
74 "3:\t" \
75 "pushl $0\n\t" \
76 "popl %%" #seg "\n\t" \
77 "jmp 2b\n" \
78 ".previous\n" \
79 ".section __ex_table,\"a\"\n\t" \
80 ".align 4\n\t" \
81 ".long 1b,3b\n" \
82 ".previous" \
Zachary Amsden4d37e7e2005-09-03 15:56:38 -070083 : :"rm" (value))
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85/*
86 * Save a segment register away
87 */
88#define savesegment(seg, value) \
Zachary Amsden4d37e7e2005-09-03 15:56:38 -070089 asm volatile("mov %%" #seg ",%0":"=rm" (value))
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#define read_cr0() ({ \
92 unsigned int __dummy; \
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -070093 __asm__ __volatile__( \
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 "movl %%cr0,%0\n\t" \
95 :"=r" (__dummy)); \
96 __dummy; \
97})
98#define write_cr0(x) \
Chuck Ebbertb43c7ce2006-07-12 16:41:15 -040099 __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -0700100
101#define read_cr2() ({ \
102 unsigned int __dummy; \
103 __asm__ __volatile__( \
104 "movl %%cr2,%0\n\t" \
105 :"=r" (__dummy)); \
106 __dummy; \
107})
108#define write_cr2(x) \
Chuck Ebbertb43c7ce2006-07-12 16:41:15 -0400109 __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -0700110
111#define read_cr3() ({ \
112 unsigned int __dummy; \
113 __asm__ ( \
114 "movl %%cr3,%0\n\t" \
115 :"=r" (__dummy)); \
116 __dummy; \
117})
118#define write_cr3(x) \
Chuck Ebbertb43c7ce2006-07-12 16:41:15 -0400119 __asm__ __volatile__("movl %0,%%cr3": :"r" (x))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121#define read_cr4() ({ \
122 unsigned int __dummy; \
123 __asm__( \
124 "movl %%cr4,%0\n\t" \
125 :"=r" (__dummy)); \
126 __dummy; \
127})
Zachary Amsdenff6e8c02006-01-06 00:11:50 -0800128#define read_cr4_safe() ({ \
129 unsigned int __dummy; \
130 /* This could fault if %cr4 does not exist */ \
131 __asm__("1: movl %%cr4, %0 \n" \
132 "2: \n" \
133 ".section __ex_table,\"a\" \n" \
134 ".long 1b,2b \n" \
135 ".previous \n" \
136 : "=r" (__dummy): "0" (0)); \
137 __dummy; \
138})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139#define write_cr4(x) \
Chuck Ebbertb43c7ce2006-07-12 16:41:15 -0400140 __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
141
142/*
143 * Clear and set 'TS' bit respectively
144 */
145#define clts() __asm__ __volatile__ ("clts")
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146#define stts() write_cr0(8 | read_cr0())
147
148#endif /* __KERNEL__ */
149
150#define wbinvd() \
Chuck Ebbertb43c7ce2006-07-12 16:41:15 -0400151 __asm__ __volatile__ ("wbinvd": : :"memory")
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
153static inline unsigned long get_limit(unsigned long segment)
154{
155 unsigned long __limit;
156 __asm__("lsll %1,%0"
157 :"=r" (__limit):"r" (segment));
158 return __limit+1;
159}
160
161#define nop() __asm__ __volatile__ ("nop")
162
163#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
164
165#define tas(ptr) (xchg((ptr),1))
166
167struct __xchg_dummy { unsigned long a[100]; };
168#define __xg(x) ((struct __xchg_dummy *)(x))
169
170
Jan Beulich8896fab2005-10-30 14:59:27 -0800171#ifdef CONFIG_X86_CMPXCHG64
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173/*
174 * The semantics of XCHGCMP8B are a bit strange, this is why
175 * there is a loop and the loading of %%eax and %%edx has to
176 * be inside. This inlines well in most cases, the cached
177 * cost is around ~38 cycles. (in the future we might want
178 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
179 * might have an implicit FPU-save as a cost, so it's not
180 * clear which path to go.)
181 *
182 * cmpxchg8b must be used with the lock prefix here to allow
183 * the instruction to be executed atomically, see page 3-102
184 * of the instruction set reference 24319102.pdf. We need
185 * the reader side to see the coherent 64bit value.
186 */
187static inline void __set_64bit (unsigned long long * ptr,
188 unsigned int low, unsigned int high)
189{
190 __asm__ __volatile__ (
191 "\n1:\t"
192 "movl (%0), %%eax\n\t"
193 "movl 4(%0), %%edx\n\t"
194 "lock cmpxchg8b (%0)\n\t"
195 "jnz 1b"
196 : /* no outputs */
197 : "D"(ptr),
198 "b"(low),
199 "c"(high)
200 : "ax","dx","memory");
201}
202
203static inline void __set_64bit_constant (unsigned long long *ptr,
204 unsigned long long value)
205{
206 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
207}
208#define ll_low(x) *(((unsigned int*)&(x))+0)
209#define ll_high(x) *(((unsigned int*)&(x))+1)
210
211static inline void __set_64bit_var (unsigned long long *ptr,
212 unsigned long long value)
213{
214 __set_64bit(ptr,ll_low(value), ll_high(value));
215}
216
217#define set_64bit(ptr,value) \
218(__builtin_constant_p(value) ? \
219 __set_64bit_constant(ptr, value) : \
220 __set_64bit_var(ptr, value) )
221
222#define _set_64bit(ptr,value) \
223(__builtin_constant_p(value) ? \
224 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
225 __set_64bit(ptr, ll_low(value), ll_high(value)) )
226
Jan Beulich8896fab2005-10-30 14:59:27 -0800227#endif
228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229/*
230 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
231 * Note 2: xchg has side effect, so that attribute volatile is necessary,
232 * but generally the primitive is invalid, *ptr is output argument. --ANK
233 */
234static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
235{
236 switch (size) {
237 case 1:
238 __asm__ __volatile__("xchgb %b0,%1"
239 :"=q" (x)
240 :"m" (*__xg(ptr)), "0" (x)
241 :"memory");
242 break;
243 case 2:
244 __asm__ __volatile__("xchgw %w0,%1"
245 :"=r" (x)
246 :"m" (*__xg(ptr)), "0" (x)
247 :"memory");
248 break;
249 case 4:
250 __asm__ __volatile__("xchgl %0,%1"
251 :"=r" (x)
252 :"m" (*__xg(ptr)), "0" (x)
253 :"memory");
254 break;
255 }
256 return x;
257}
258
259/*
260 * Atomic compare and exchange. Compare OLD with MEM, if identical,
261 * store NEW in MEM. Return the initial value in MEM. Success is
262 * indicated by comparing RETURN with OLD.
263 */
264
265#ifdef CONFIG_X86_CMPXCHG
266#define __HAVE_ARCH_CMPXCHG 1
Nick Piggin53e86b92005-11-13 16:07:23 -0800267#define cmpxchg(ptr,o,n)\
268 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
269 (unsigned long)(n),sizeof(*(ptr))))
270#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
272static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
273 unsigned long new, int size)
274{
275 unsigned long prev;
276 switch (size) {
277 case 1:
278 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
279 : "=a"(prev)
280 : "q"(new), "m"(*__xg(ptr)), "0"(old)
281 : "memory");
282 return prev;
283 case 2:
284 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
285 : "=a"(prev)
Jan Beulich8896fab2005-10-30 14:59:27 -0800286 : "r"(new), "m"(*__xg(ptr)), "0"(old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 : "memory");
288 return prev;
289 case 4:
290 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
291 : "=a"(prev)
Jan Beulich8896fab2005-10-30 14:59:27 -0800292 : "r"(new), "m"(*__xg(ptr)), "0"(old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 : "memory");
294 return prev;
295 }
296 return old;
297}
298
Nick Piggin53e86b92005-11-13 16:07:23 -0800299#ifndef CONFIG_X86_CMPXCHG
300/*
301 * Building a kernel capable running on 80386. It may be necessary to
302 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
303 * a function for each of the sizes we support.
304 */
Jan Beulich8896fab2005-10-30 14:59:27 -0800305
Nick Piggin53e86b92005-11-13 16:07:23 -0800306extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
307extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
308extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
309
310static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
311 unsigned long new, int size)
312{
313 switch (size) {
314 case 1:
315 return cmpxchg_386_u8(ptr, old, new);
316 case 2:
317 return cmpxchg_386_u16(ptr, old, new);
318 case 4:
319 return cmpxchg_386_u32(ptr, old, new);
320 }
321 return old;
322}
323
324#define cmpxchg(ptr,o,n) \
325({ \
326 __typeof__(*(ptr)) __ret; \
327 if (likely(boot_cpu_data.x86 > 3)) \
328 __ret = __cmpxchg((ptr), (unsigned long)(o), \
329 (unsigned long)(n), sizeof(*(ptr))); \
330 else \
331 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
332 (unsigned long)(n), sizeof(*(ptr))); \
333 __ret; \
334})
Jan Beulich8896fab2005-10-30 14:59:27 -0800335#endif
336
337#ifdef CONFIG_X86_CMPXCHG64
338
339static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
340 unsigned long long new)
341{
342 unsigned long long prev;
343 __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
344 : "=A"(prev)
345 : "b"((unsigned long)new),
346 "c"((unsigned long)(new >> 32)),
347 "m"(*__xg(ptr)),
348 "0"(old)
349 : "memory");
350 return prev;
351}
352
353#define cmpxchg64(ptr,o,n)\
354 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
355 (unsigned long long)(n)))
356
357#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359/*
360 * Force strict CPU ordering.
361 * And yes, this is required on UP too when we're talking
362 * to devices.
363 *
364 * For now, "wmb()" doesn't actually do anything, as all
365 * Intel CPU's follow what Intel calls a *Processor Order*,
366 * in which all writes are seen in the program order even
367 * outside the CPU.
368 *
369 * I expect future Intel CPU's to have a weaker ordering,
370 * but I'd also expect them to finally get their act together
371 * and add some real memory barriers if so.
372 *
373 * Some non intel clones support out of order store. wmb() ceases to be a
374 * nop for these.
375 */
376
377
378/*
379 * Actually only lfence would be needed for mb() because all stores done
380 * by the kernel should be already ordered. But keep a full barrier for now.
381 */
382
383#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
384#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
385
386/**
387 * read_barrier_depends - Flush all pending reads that subsequents reads
388 * depend on.
389 *
390 * No data-dependent reads from memory-like regions are ever reordered
391 * over this barrier. All reads preceding this primitive are guaranteed
392 * to access memory (but not necessarily other CPUs' caches) before any
393 * reads following this primitive that depend on the data return by
394 * any of the preceding reads. This primitive is much lighter weight than
395 * rmb() on most CPUs, and is never heavier weight than is
396 * rmb().
397 *
398 * These ordering constraints are respected by both the local CPU
399 * and the compiler.
400 *
401 * Ordering is not guaranteed by anything other than these primitives,
402 * not even by data dependencies. See the documentation for
403 * memory_barrier() for examples and URLs to more information.
404 *
405 * For example, the following code would force ordering (the initial
406 * value of "a" is zero, "b" is one, and "p" is "&a"):
407 *
408 * <programlisting>
409 * CPU 0 CPU 1
410 *
411 * b = 2;
412 * memory_barrier();
413 * p = &b; q = p;
414 * read_barrier_depends();
415 * d = *q;
416 * </programlisting>
417 *
418 * because the read of "*q" depends on the read of "p" and these
419 * two reads are separated by a read_barrier_depends(). However,
420 * the following code, with the same initial values for "a" and "b":
421 *
422 * <programlisting>
423 * CPU 0 CPU 1
424 *
425 * a = 2;
426 * memory_barrier();
427 * b = 3; y = b;
428 * read_barrier_depends();
429 * x = a;
430 * </programlisting>
431 *
432 * does not enforce ordering, since there is no data dependency between
433 * the read of "a" and the read of "b". Therefore, on some CPUs, such
434 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
Andreas Mohrd6e05ed2006-06-26 18:35:02 +0200435 * in cases like this where there are no data dependencies.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 **/
437
438#define read_barrier_depends() do { } while(0)
439
440#ifdef CONFIG_X86_OOSTORE
441/* Actually there are no OOO store capable CPUs for now that do SSE,
442 but make it already an possibility. */
443#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
444#else
445#define wmb() __asm__ __volatile__ ("": : :"memory")
446#endif
447
448#ifdef CONFIG_SMP
449#define smp_mb() mb()
450#define smp_rmb() rmb()
451#define smp_wmb() wmb()
452#define smp_read_barrier_depends() read_barrier_depends()
Takashi Iwai911b0ad2006-02-04 23:28:05 -0800453#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454#else
455#define smp_mb() barrier()
456#define smp_rmb() barrier()
457#define smp_wmb() barrier()
458#define smp_read_barrier_depends() do { } while(0)
459#define set_mb(var, value) do { var = value; barrier(); } while (0)
460#endif
461
Ingo Molnar55f327f2006-07-03 00:24:43 -0700462#include <linux/irqflags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464/*
465 * disable hlt during certain critical i/o operations
466 */
467#define HAVE_DISABLE_HLT
468void disable_hlt(void);
469void enable_hlt(void);
470
471extern int es7000_plat;
472void cpu_idle_wait(void);
473
Ingo Molnar4dc7a0b2006-01-12 01:05:27 -0800474/*
475 * On SMP systems, when the scheduler does migration-cost autodetection,
476 * it needs a way to flush as much of the CPU's caches as possible:
477 */
478static inline void sched_cacheflush(void)
479{
480 wbinvd();
481}
482
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483extern unsigned long arch_align_stack(unsigned long sp);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800484extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
Adrian Bunkcdb04522006-03-24 03:15:57 -0800486void default_idle(void);
487
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488#endif