blob: da0a675adf94a1a753792d1bca54c45d4fe3bed4 [file] [log] [blame]
Christoph Lameter2052e8d2008-05-12 15:43:41 +02001/* thread_info.h: low-level thread information
2 *
3 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
Suresh Siddha61c46282008-03-10 15:28:04 -07007#ifndef _ASM_X86_THREAD_INFO_H
Christoph Lameter2052e8d2008-05-12 15:43:41 +02008#define _ASM_X86_THREAD_INFO_H
9
Christoph Lameter2052e8d2008-05-12 15:43:41 +020010#include <linux/compiler.h>
11#include <asm/page.h>
Christoph Lameter12a638e2008-04-28 18:52:33 -070012#include <asm/types.h>
13
Christoph Lameter2052e8d2008-05-12 15:43:41 +020014/*
15 * low level task data that entry.S needs immediate access to
16 * - this struct should fit entirely inside of one cache line
17 * - this struct shares the supervisor stack pages
Christoph Lameter2052e8d2008-05-12 15:43:41 +020018 */
19#ifndef __ASSEMBLY__
Christoph Lameter006c4842008-04-28 18:52:35 -070020struct task_struct;
21struct exec_domain;
22#include <asm/processor.h>
Christoph Lameter2052e8d2008-05-12 15:43:41 +020023
24struct thread_info {
25 struct task_struct *task; /* main task structure */
26 struct exec_domain *exec_domain; /* execution domain */
Ingo Molnardedd4912008-05-17 08:28:33 +020027 unsigned long flags; /* low level flags */
Christoph Lameter006c4842008-04-28 18:52:35 -070028 __u32 status; /* thread synchronous flags */
Christoph Lameter2052e8d2008-05-12 15:43:41 +020029 __u32 cpu; /* current CPU */
Ingo Molnardedd4912008-05-17 08:28:33 +020030 int preempt_count; /* 0 => preemptable,
Christoph Lameter2052e8d2008-05-12 15:43:41 +020031 <0 => BUG */
Christoph Lameter006c4842008-04-28 18:52:35 -070032 mm_segment_t addr_limit;
Christoph Lameter2052e8d2008-05-12 15:43:41 +020033 struct restart_block restart_block;
Christoph Lameter006c4842008-04-28 18:52:35 -070034 void __user *sysenter_return;
35#ifdef CONFIG_X86_32
Christoph Lameter2052e8d2008-05-12 15:43:41 +020036 unsigned long previous_esp; /* ESP of the previous stack in
37 case of nested (IRQ) stacks
38 */
39 __u8 supervisor_stack[0];
Christoph Lameter006c4842008-04-28 18:52:35 -070040#endif
Christoph Lameter2052e8d2008-05-12 15:43:41 +020041};
Christoph Lameter3351cc02008-04-28 18:52:36 -070042
43#define INIT_THREAD_INFO(tsk) \
44{ \
45 .task = &tsk, \
46 .exec_domain = &default_exec_domain, \
47 .flags = 0, \
48 .cpu = 0, \
49 .preempt_count = 1, \
50 .addr_limit = KERNEL_DS, \
51 .restart_block = { \
52 .fn = do_no_restart_syscall, \
53 }, \
54}
55
56#define init_thread_info (init_thread_union.thread_info)
57#define init_stack (init_thread_union.stack)
58
Christoph Lameter2052e8d2008-05-12 15:43:41 +020059#else /* !__ASSEMBLY__ */
60
61#include <asm/asm-offsets.h>
62
63#endif
64
Christoph Lametere57549b2008-04-28 18:52:38 -070065/*
66 * thread information flags
67 * - these are process state flags that various assembly files
68 * may need to access
69 * - pending work-to-be-done flags are in LSW
70 * - other flags in MSW
71 * Warning: layout of LSW is hardcoded in entry.S
72 */
73#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
74#define TIF_SIGPENDING 2 /* signal pending */
75#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
76#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
77#define TIF_IRET 5 /* force IRET */
Christoph Lametere57549b2008-04-28 18:52:38 -070078#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
Christoph Lametere57549b2008-04-28 18:52:38 -070079#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
80#define TIF_SECCOMP 8 /* secure computing */
Christoph Lametere57549b2008-04-28 18:52:38 -070081#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
Christoph Lametere57549b2008-04-28 18:52:38 -070082#define TIF_NOTSC 16 /* TSC is not accessible in userland */
83#define TIF_IA32 17 /* 32bit process */
84#define TIF_FORK 18 /* ret_from_fork */
85#define TIF_ABI_PENDING 19
86#define TIF_MEMDIE 20
87#define TIF_DEBUG 21 /* uses debug registers */
88#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
89#define TIF_FREEZE 23 /* is freezing for suspend */
90#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
91#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
92#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
93#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
94
95#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
96#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
97#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
98#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
99#define _TIF_IRET (1 << TIF_IRET)
Christoph Lametere57549b2008-04-28 18:52:38 -0700100#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
Christoph Lametere57549b2008-04-28 18:52:38 -0700101#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
102#define _TIF_SECCOMP (1 << TIF_SECCOMP)
Christoph Lametere57549b2008-04-28 18:52:38 -0700103#define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
Christoph Lametere57549b2008-04-28 18:52:38 -0700104#define _TIF_NOTSC (1 << TIF_NOTSC)
105#define _TIF_IA32 (1 << TIF_IA32)
106#define _TIF_FORK (1 << TIF_FORK)
107#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING)
108#define _TIF_DEBUG (1 << TIF_DEBUG)
109#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
110#define _TIF_FREEZE (1 << TIF_FREEZE)
111#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
112#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
113#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
114#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
115
Roland McGrathd4d67152008-07-09 02:38:07 -0700116/* work to do in syscall_trace_enter() */
117#define _TIF_WORK_SYSCALL_ENTRY \
118 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | \
Roland McGrath380fdd72008-07-09 02:39:29 -0700119 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP)
Roland McGrathd4d67152008-07-09 02:38:07 -0700120
121/* work to do in syscall_trace_leave() */
122#define _TIF_WORK_SYSCALL_EXIT \
123 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)
124
Christoph Lameter00c1bb12008-04-28 18:52:39 -0700125/* work to do on interrupt/exception return */
126#define _TIF_WORK_MASK \
127 (0x0000FFFF & \
Roland McGrath64f09732008-07-09 01:33:14 -0700128 ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT| \
Roland McGrathd4d67152008-07-09 02:38:07 -0700129 _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU))
Christoph Lameter00c1bb12008-04-28 18:52:39 -0700130
131/* work to do on any return to user space */
132#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
133
Christoph Lameterb84200b2008-04-28 18:52:40 -0700134/* Only used for 64 bit */
Christoph Lameter00c1bb12008-04-28 18:52:39 -0700135#define _TIF_DO_NOTIFY_MASK \
Linus Torvalds7f9dce32008-07-23 19:36:53 -0700136 (_TIF_SIGPENDING|_TIF_MCE_NOTIFY)
Christoph Lameter00c1bb12008-04-28 18:52:39 -0700137
138/* flags to check in __switch_to() */
139#define _TIF_WORK_CTXSW \
140 (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS| \
141 _TIF_NOTSC)
142
143#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
144#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
145
Christoph Lameter24e2de62008-04-28 18:52:37 -0700146#define PREEMPT_ACTIVE 0x10000000
147
Christoph Lameterb84200b2008-04-28 18:52:40 -0700148/* thread information allocation */
149#ifdef CONFIG_DEBUG_STACK_USAGE
150#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
151#else
152#define THREAD_FLAGS GFP_KERNEL
153#endif
154
FUJITA Tomonorib69c49b2008-07-25 01:45:40 -0700155#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
156
Christoph Lameterb84200b2008-04-28 18:52:40 -0700157#define alloc_thread_info(tsk) \
158 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
159
Christoph Lameter006c4842008-04-28 18:52:35 -0700160#ifdef CONFIG_X86_32
161
Christoph Lameterb84200b2008-04-28 18:52:40 -0700162#define STACK_WARN (THREAD_SIZE/8)
Christoph Lameter2052e8d2008-05-12 15:43:41 +0200163/*
164 * macros/functions for gaining access to the thread information structure
165 *
166 * preempt_count needs to be 1 initially, until the scheduler is functional.
167 */
168#ifndef __ASSEMBLY__
169
Christoph Lameter2052e8d2008-05-12 15:43:41 +0200170
171/* how to get the current stack pointer from C */
172register unsigned long current_stack_pointer asm("esp") __used;
173
174/* how to get the thread information struct from C */
175static inline struct thread_info *current_thread_info(void)
176{
177 return (struct thread_info *)
178 (current_stack_pointer & ~(THREAD_SIZE - 1));
179}
180
Christoph Lameter2052e8d2008-05-12 15:43:41 +0200181#else /* !__ASSEMBLY__ */
182
183/* how to get the thread information struct from ASM */
184#define GET_THREAD_INFO(reg) \
185 movl $-THREAD_SIZE, reg; \
186 andl %esp, reg
187
188/* use this one if reg already contains %esp */
189#define GET_THREAD_INFO_WITH_ESP(reg) \
190 andl $-THREAD_SIZE, reg
191
192#endif
193
Christoph Lameter2052e8d2008-05-12 15:43:41 +0200194#else /* X86_32 */
195
Christoph Lameter2052e8d2008-05-12 15:43:41 +0200196#include <asm/pda.h>
197
198/*
Christoph Lameter2052e8d2008-05-12 15:43:41 +0200199 * macros/functions for gaining access to the thread information structure
200 * preempt_count needs to be 1 initially, until the scheduler is functional.
201 */
202#ifndef __ASSEMBLY__
Christoph Lameter2052e8d2008-05-12 15:43:41 +0200203static inline struct thread_info *current_thread_info(void)
204{
205 struct thread_info *ti;
206 ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
207 return ti;
208}
209
210/* do not use in interrupt context */
211static inline struct thread_info *stack_thread_info(void)
212{
213 struct thread_info *ti;
214 asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
215 return ti;
216}
217
Christoph Lameter2052e8d2008-05-12 15:43:41 +0200218#else /* !__ASSEMBLY__ */
219
220/* how to get the thread information struct from ASM */
221#define GET_THREAD_INFO(reg) \
222 movq %gs:pda_kernelstack,reg ; \
223 subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
224
225#endif
226
Christoph Lameterf2ea3b12008-04-28 18:52:34 -0700227#endif /* !X86_32 */
228
Christoph Lameter2052e8d2008-05-12 15:43:41 +0200229/*
230 * Thread-synchronous status.
231 *
232 * This is different from the flags in that nobody else
233 * ever touches our thread-synchronous status, so we don't
234 * have to worry about atomic accesses.
235 */
236#define TS_USEDFPU 0x0001 /* FPU was used by this task
237 this quantum (SMP) */
Christoph Lameterf2ea3b12008-04-28 18:52:34 -0700238#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
Christoph Lameter2052e8d2008-05-12 15:43:41 +0200239#define TS_POLLING 0x0004 /* true if in idle loop
240 and not sleeping */
Ingo Molnar8a6c1602008-04-30 22:13:44 +0200241#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
Christoph Lameter2052e8d2008-05-12 15:43:41 +0200242
243#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
244
Suresh Siddha61c46282008-03-10 15:28:04 -0700245#ifndef __ASSEMBLY__
Ingo Molnar8a6c1602008-04-30 22:13:44 +0200246#define HAVE_SET_RESTORE_SIGMASK 1
247static inline void set_restore_sigmask(void)
248{
249 struct thread_info *ti = current_thread_info();
250 ti->status |= TS_RESTORE_SIGMASK;
Thomas Gleixner3711ccb2008-05-24 17:24:34 +0200251 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
Ingo Molnar8a6c1602008-04-30 22:13:44 +0200252}
253#endif /* !__ASSEMBLY__ */
254
255#ifndef __ASSEMBLY__
Suresh Siddha61c46282008-03-10 15:28:04 -0700256extern void arch_task_cache_init(void);
257extern void free_thread_info(struct thread_info *ti);
258extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
Suresh Siddha2adee9b2008-04-16 10:25:35 +0200259#define arch_task_cache_init arch_task_cache_init
Suresh Siddha61c46282008-03-10 15:28:04 -0700260#endif
261#endif /* _ASM_X86_THREAD_INFO_H */