blob: a6d20d9a1a307b0f6bbf55351e91fb26f48ad6b0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SYSTEM_H
2#define __ASM_SYSTEM_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/kernel.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <linux/bitops.h> /* for LOCK_PREFIX */
8
9#ifdef __KERNEL__
10
11struct task_struct; /* one of the stranger aspects of C forward declarations.. */
12extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
13
Linus Torvalds47a5c6f2006-09-18 16:20:40 -070014/*
15 * Saving eflags is important. It switches not only IOPL between tasks,
16 * it also protects other tasks from NT leaking through sysenter etc.
17 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#define switch_to(prev,next,last) do { \
19 unsigned long esi,edi; \
Linus Torvalds47a5c6f2006-09-18 16:20:40 -070020 asm volatile("pushfl\n\t" /* Save flags */ \
21 "pushl %%ebp\n\t" \
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 "movl %%esp,%0\n\t" /* save ESP */ \
23 "movl %5,%%esp\n\t" /* restore ESP */ \
24 "movl $1f,%1\n\t" /* save EIP */ \
25 "pushl %6\n\t" /* restore EIP */ \
26 "jmp __switch_to\n" \
27 "1:\t" \
28 "popl %%ebp\n\t" \
Linus Torvalds47a5c6f2006-09-18 16:20:40 -070029 "popfl" \
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
31 "=a" (last),"=S" (esi),"=D" (edi) \
32 :"m" (next->thread.esp),"m" (next->thread.eip), \
33 "2" (prev), "d" (next)); \
34} while (0)
35
36#define _set_base(addr,base) do { unsigned long __pr; \
37__asm__ __volatile__ ("movw %%dx,%1\n\t" \
38 "rorl $16,%%edx\n\t" \
39 "movb %%dl,%2\n\t" \
40 "movb %%dh,%3" \
41 :"=&d" (__pr) \
42 :"m" (*((addr)+2)), \
43 "m" (*((addr)+4)), \
44 "m" (*((addr)+7)), \
45 "0" (base) \
46 ); } while(0)
47
48#define _set_limit(addr,limit) do { unsigned long __lr; \
49__asm__ __volatile__ ("movw %%dx,%1\n\t" \
50 "rorl $16,%%edx\n\t" \
51 "movb %2,%%dh\n\t" \
52 "andb $0xf0,%%dh\n\t" \
53 "orb %%dh,%%dl\n\t" \
54 "movb %%dl,%2" \
55 :"=&d" (__lr) \
56 :"m" (*(addr)), \
57 "m" (*((addr)+6)), \
58 "0" (limit) \
59 ); } while(0)
60
61#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
Zachary Amsden5fe9fe3c2006-01-06 00:11:55 -080062#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
65 * Load a segment. Fall back on loading the zero
66 * segment if something goes wrong..
67 */
68#define loadsegment(seg,value) \
69 asm volatile("\n" \
70 "1:\t" \
H. J. Lufd51f662005-05-01 08:58:48 -070071 "mov %0,%%" #seg "\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 "2:\n" \
73 ".section .fixup,\"ax\"\n" \
74 "3:\t" \
75 "pushl $0\n\t" \
76 "popl %%" #seg "\n\t" \
77 "jmp 2b\n" \
78 ".previous\n" \
79 ".section __ex_table,\"a\"\n\t" \
80 ".align 4\n\t" \
81 ".long 1b,3b\n" \
82 ".previous" \
Zachary Amsden4d37e7e2005-09-03 15:56:38 -070083 : :"rm" (value))
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85/*
86 * Save a segment register away
87 */
88#define savesegment(seg, value) \
Zachary Amsden4d37e7e2005-09-03 15:56:38 -070089 asm volatile("mov %%" #seg ",%0":"=rm" (value))
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Rusty Russelld3561b72006-12-07 02:14:07 +010091#ifdef CONFIG_PARAVIRT
92#include <asm/paravirt.h>
93#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#define read_cr0() ({ \
95 unsigned int __dummy; \
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -070096 __asm__ __volatile__( \
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 "movl %%cr0,%0\n\t" \
98 :"=r" (__dummy)); \
99 __dummy; \
100})
101#define write_cr0(x) \
Chuck Ebbertb43c7ce2006-07-12 16:41:15 -0400102 __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -0700103
104#define read_cr2() ({ \
105 unsigned int __dummy; \
106 __asm__ __volatile__( \
107 "movl %%cr2,%0\n\t" \
108 :"=r" (__dummy)); \
109 __dummy; \
110})
111#define write_cr2(x) \
Chuck Ebbertb43c7ce2006-07-12 16:41:15 -0400112 __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -0700113
114#define read_cr3() ({ \
115 unsigned int __dummy; \
116 __asm__ ( \
117 "movl %%cr3,%0\n\t" \
118 :"=r" (__dummy)); \
119 __dummy; \
120})
121#define write_cr3(x) \
Chuck Ebbertb43c7ce2006-07-12 16:41:15 -0400122 __asm__ __volatile__("movl %0,%%cr3": :"r" (x))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
124#define read_cr4() ({ \
125 unsigned int __dummy; \
126 __asm__( \
127 "movl %%cr4,%0\n\t" \
128 :"=r" (__dummy)); \
129 __dummy; \
130})
Zachary Amsdenff6e8c02006-01-06 00:11:50 -0800131#define read_cr4_safe() ({ \
132 unsigned int __dummy; \
133 /* This could fault if %cr4 does not exist */ \
134 __asm__("1: movl %%cr4, %0 \n" \
135 "2: \n" \
136 ".section __ex_table,\"a\" \n" \
137 ".long 1b,2b \n" \
138 ".previous \n" \
139 : "=r" (__dummy): "0" (0)); \
140 __dummy; \
141})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142#define write_cr4(x) \
Chuck Ebbertb43c7ce2006-07-12 16:41:15 -0400143 __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
144
Rusty Russelld3561b72006-12-07 02:14:07 +0100145#define wbinvd() \
146 __asm__ __volatile__ ("wbinvd": : :"memory")
147
148/* Clear the 'TS' bit */
Chuck Ebbertb43c7ce2006-07-12 16:41:15 -0400149#define clts() __asm__ __volatile__ ("clts")
Rusty Russelld3561b72006-12-07 02:14:07 +0100150#endif/* CONFIG_PARAVIRT */
151
152/* Set the 'TS' bit */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#define stts() write_cr0(8 | read_cr0())
154
155#endif /* __KERNEL__ */
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157static inline unsigned long get_limit(unsigned long segment)
158{
159 unsigned long __limit;
160 __asm__("lsll %1,%0"
161 :"=r" (__limit):"r" (segment));
162 return __limit+1;
163}
164
165#define nop() __asm__ __volatile__ ("nop")
166
167#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
168
169#define tas(ptr) (xchg((ptr),1))
170
171struct __xchg_dummy { unsigned long a[100]; };
172#define __xg(x) ((struct __xchg_dummy *)(x))
173
174
Jan Beulich8896fab2005-10-30 14:59:27 -0800175#ifdef CONFIG_X86_CMPXCHG64
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177/*
178 * The semantics of XCHGCMP8B are a bit strange, this is why
179 * there is a loop and the loading of %%eax and %%edx has to
180 * be inside. This inlines well in most cases, the cached
181 * cost is around ~38 cycles. (in the future we might want
182 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
183 * might have an implicit FPU-save as a cost, so it's not
184 * clear which path to go.)
185 *
186 * cmpxchg8b must be used with the lock prefix here to allow
187 * the instruction to be executed atomically, see page 3-102
188 * of the instruction set reference 24319102.pdf. We need
189 * the reader side to see the coherent 64bit value.
190 */
191static inline void __set_64bit (unsigned long long * ptr,
192 unsigned int low, unsigned int high)
193{
194 __asm__ __volatile__ (
195 "\n1:\t"
196 "movl (%0), %%eax\n\t"
197 "movl 4(%0), %%edx\n\t"
198 "lock cmpxchg8b (%0)\n\t"
199 "jnz 1b"
200 : /* no outputs */
201 : "D"(ptr),
202 "b"(low),
203 "c"(high)
204 : "ax","dx","memory");
205}
206
207static inline void __set_64bit_constant (unsigned long long *ptr,
208 unsigned long long value)
209{
210 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
211}
212#define ll_low(x) *(((unsigned int*)&(x))+0)
213#define ll_high(x) *(((unsigned int*)&(x))+1)
214
215static inline void __set_64bit_var (unsigned long long *ptr,
216 unsigned long long value)
217{
218 __set_64bit(ptr,ll_low(value), ll_high(value));
219}
220
221#define set_64bit(ptr,value) \
222(__builtin_constant_p(value) ? \
223 __set_64bit_constant(ptr, value) : \
224 __set_64bit_var(ptr, value) )
225
226#define _set_64bit(ptr,value) \
227(__builtin_constant_p(value) ? \
228 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
229 __set_64bit(ptr, ll_low(value), ll_high(value)) )
230
Jan Beulich8896fab2005-10-30 14:59:27 -0800231#endif
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233/*
234 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
235 * Note 2: xchg has side effect, so that attribute volatile is necessary,
236 * but generally the primitive is invalid, *ptr is output argument. --ANK
237 */
238static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
239{
240 switch (size) {
241 case 1:
242 __asm__ __volatile__("xchgb %b0,%1"
243 :"=q" (x)
244 :"m" (*__xg(ptr)), "0" (x)
245 :"memory");
246 break;
247 case 2:
248 __asm__ __volatile__("xchgw %w0,%1"
249 :"=r" (x)
250 :"m" (*__xg(ptr)), "0" (x)
251 :"memory");
252 break;
253 case 4:
254 __asm__ __volatile__("xchgl %0,%1"
255 :"=r" (x)
256 :"m" (*__xg(ptr)), "0" (x)
257 :"memory");
258 break;
259 }
260 return x;
261}
262
263/*
264 * Atomic compare and exchange. Compare OLD with MEM, if identical,
265 * store NEW in MEM. Return the initial value in MEM. Success is
266 * indicated by comparing RETURN with OLD.
267 */
268
269#ifdef CONFIG_X86_CMPXCHG
270#define __HAVE_ARCH_CMPXCHG 1
Nick Piggin53e86b92005-11-13 16:07:23 -0800271#define cmpxchg(ptr,o,n)\
272 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
273 (unsigned long)(n),sizeof(*(ptr))))
Chris Wright027a8c72006-09-25 23:32:23 -0700274#define sync_cmpxchg(ptr,o,n)\
275 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
276 (unsigned long)(n),sizeof(*(ptr))))
Nick Piggin53e86b92005-11-13 16:07:23 -0800277#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
279static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
280 unsigned long new, int size)
281{
282 unsigned long prev;
283 switch (size) {
284 case 1:
285 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
286 : "=a"(prev)
287 : "q"(new), "m"(*__xg(ptr)), "0"(old)
288 : "memory");
289 return prev;
290 case 2:
291 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
292 : "=a"(prev)
Jan Beulich8896fab2005-10-30 14:59:27 -0800293 : "r"(new), "m"(*__xg(ptr)), "0"(old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 : "memory");
295 return prev;
296 case 4:
297 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
298 : "=a"(prev)
Jan Beulich8896fab2005-10-30 14:59:27 -0800299 : "r"(new), "m"(*__xg(ptr)), "0"(old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 : "memory");
301 return prev;
302 }
303 return old;
304}
305
Chris Wright027a8c72006-09-25 23:32:23 -0700306/*
307 * Always use locked operations when touching memory shared with a
308 * hypervisor, since the system may be SMP even if the guest kernel
309 * isn't.
310 */
311static inline unsigned long __sync_cmpxchg(volatile void *ptr,
312 unsigned long old,
313 unsigned long new, int size)
314{
315 unsigned long prev;
316 switch (size) {
317 case 1:
318 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
319 : "=a"(prev)
320 : "q"(new), "m"(*__xg(ptr)), "0"(old)
321 : "memory");
322 return prev;
323 case 2:
324 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
325 : "=a"(prev)
326 : "r"(new), "m"(*__xg(ptr)), "0"(old)
327 : "memory");
328 return prev;
329 case 4:
330 __asm__ __volatile__("lock; cmpxchgl %1,%2"
331 : "=a"(prev)
332 : "r"(new), "m"(*__xg(ptr)), "0"(old)
333 : "memory");
334 return prev;
335 }
336 return old;
337}
338
Nick Piggin53e86b92005-11-13 16:07:23 -0800339#ifndef CONFIG_X86_CMPXCHG
340/*
341 * Building a kernel capable running on 80386. It may be necessary to
342 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
343 * a function for each of the sizes we support.
344 */
Jan Beulich8896fab2005-10-30 14:59:27 -0800345
Nick Piggin53e86b92005-11-13 16:07:23 -0800346extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
347extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
348extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
349
350static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
351 unsigned long new, int size)
352{
353 switch (size) {
354 case 1:
355 return cmpxchg_386_u8(ptr, old, new);
356 case 2:
357 return cmpxchg_386_u16(ptr, old, new);
358 case 4:
359 return cmpxchg_386_u32(ptr, old, new);
360 }
361 return old;
362}
363
364#define cmpxchg(ptr,o,n) \
365({ \
366 __typeof__(*(ptr)) __ret; \
367 if (likely(boot_cpu_data.x86 > 3)) \
368 __ret = __cmpxchg((ptr), (unsigned long)(o), \
369 (unsigned long)(n), sizeof(*(ptr))); \
370 else \
371 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
372 (unsigned long)(n), sizeof(*(ptr))); \
373 __ret; \
374})
Jan Beulich8896fab2005-10-30 14:59:27 -0800375#endif
376
377#ifdef CONFIG_X86_CMPXCHG64
378
379static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
380 unsigned long long new)
381{
382 unsigned long long prev;
383 __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
384 : "=A"(prev)
385 : "b"((unsigned long)new),
386 "c"((unsigned long)(new >> 32)),
387 "m"(*__xg(ptr)),
388 "0"(old)
389 : "memory");
390 return prev;
391}
392
393#define cmpxchg64(ptr,o,n)\
394 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
395 (unsigned long long)(n)))
396
397#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399/*
400 * Force strict CPU ordering.
401 * And yes, this is required on UP too when we're talking
402 * to devices.
403 *
404 * For now, "wmb()" doesn't actually do anything, as all
405 * Intel CPU's follow what Intel calls a *Processor Order*,
406 * in which all writes are seen in the program order even
407 * outside the CPU.
408 *
409 * I expect future Intel CPU's to have a weaker ordering,
410 * but I'd also expect them to finally get their act together
411 * and add some real memory barriers if so.
412 *
413 * Some non intel clones support out of order store. wmb() ceases to be a
414 * nop for these.
415 */
416
417
418/*
419 * Actually only lfence would be needed for mb() because all stores done
420 * by the kernel should be already ordered. But keep a full barrier for now.
421 */
422
423#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
424#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
425
426/**
427 * read_barrier_depends - Flush all pending reads that subsequents reads
428 * depend on.
429 *
430 * No data-dependent reads from memory-like regions are ever reordered
431 * over this barrier. All reads preceding this primitive are guaranteed
432 * to access memory (but not necessarily other CPUs' caches) before any
433 * reads following this primitive that depend on the data return by
434 * any of the preceding reads. This primitive is much lighter weight than
435 * rmb() on most CPUs, and is never heavier weight than is
436 * rmb().
437 *
438 * These ordering constraints are respected by both the local CPU
439 * and the compiler.
440 *
441 * Ordering is not guaranteed by anything other than these primitives,
442 * not even by data dependencies. See the documentation for
443 * memory_barrier() for examples and URLs to more information.
444 *
445 * For example, the following code would force ordering (the initial
446 * value of "a" is zero, "b" is one, and "p" is "&a"):
447 *
448 * <programlisting>
449 * CPU 0 CPU 1
450 *
451 * b = 2;
452 * memory_barrier();
453 * p = &b; q = p;
454 * read_barrier_depends();
455 * d = *q;
456 * </programlisting>
457 *
458 * because the read of "*q" depends on the read of "p" and these
459 * two reads are separated by a read_barrier_depends(). However,
460 * the following code, with the same initial values for "a" and "b":
461 *
462 * <programlisting>
463 * CPU 0 CPU 1
464 *
465 * a = 2;
466 * memory_barrier();
467 * b = 3; y = b;
468 * read_barrier_depends();
469 * x = a;
470 * </programlisting>
471 *
472 * does not enforce ordering, since there is no data dependency between
473 * the read of "a" and the read of "b". Therefore, on some CPUs, such
474 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
Andreas Mohrd6e05ed2006-06-26 18:35:02 +0200475 * in cases like this where there are no data dependencies.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 **/
477
478#define read_barrier_depends() do { } while(0)
479
480#ifdef CONFIG_X86_OOSTORE
481/* Actually there are no OOO store capable CPUs for now that do SSE,
482 but make it already an possibility. */
483#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
484#else
485#define wmb() __asm__ __volatile__ ("": : :"memory")
486#endif
487
488#ifdef CONFIG_SMP
489#define smp_mb() mb()
490#define smp_rmb() rmb()
491#define smp_wmb() wmb()
492#define smp_read_barrier_depends() read_barrier_depends()
Takashi Iwai911b0ad2006-02-04 23:28:05 -0800493#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494#else
495#define smp_mb() barrier()
496#define smp_rmb() barrier()
497#define smp_wmb() barrier()
498#define smp_read_barrier_depends() do { } while(0)
499#define set_mb(var, value) do { var = value; barrier(); } while (0)
500#endif
501
Ingo Molnar55f327f2006-07-03 00:24:43 -0700502#include <linux/irqflags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
504/*
505 * disable hlt during certain critical i/o operations
506 */
507#define HAVE_DISABLE_HLT
508void disable_hlt(void);
509void enable_hlt(void);
510
511extern int es7000_plat;
512void cpu_idle_wait(void);
513
Ingo Molnar4dc7a0b2006-01-12 01:05:27 -0800514/*
515 * On SMP systems, when the scheduler does migration-cost autodetection,
516 * it needs a way to flush as much of the CPU's caches as possible:
517 */
518static inline void sched_cacheflush(void)
519{
520 wbinvd();
521}
522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523extern unsigned long arch_align_stack(unsigned long sp);
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800524extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
Adrian Bunkcdb04522006-03-24 03:15:57 -0800526void default_idle(void);
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528#endif