Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX__INIT_TASK_H |
| 2 | #define _LINUX__INIT_TASK_H |
| 3 | |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 4 | #include <linux/rcupdate.h> |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 5 | #include <linux/irqflags.h> |
Serge E. Hallyn | 4865ecf | 2006-10-02 02:18:14 -0700 | [diff] [blame] | 6 | #include <linux/utsname.h> |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 7 | #include <linux/lockdep.h> |
Steven Rostedt | 5ac9f62 | 2009-03-25 20:55:00 -0400 | [diff] [blame] | 8 | #include <linux/ftrace.h> |
Kirill Korotaev | 73ea413 | 2006-10-02 02:18:20 -0700 | [diff] [blame] | 9 | #include <linux/ipc.h> |
Cedric Le Goater | 9a575a9 | 2006-12-08 02:37:59 -0800 | [diff] [blame] | 10 | #include <linux/pid_namespace.h> |
Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 11 | #include <linux/user_namespace.h> |
Andrew G. Morgan | 3898b1b | 2008-04-28 02:13:40 -0700 | [diff] [blame] | 12 | #include <linux/securebits.h> |
Eric W. Biederman | 772698f | 2007-09-12 11:55:17 +0200 | [diff] [blame] | 13 | #include <net/net_namespace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame] | 15 | #ifdef CONFIG_SMP |
| 16 | # define INIT_PUSHABLE_TASKS(tsk) \ |
| 17 | .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), |
| 18 | #else |
| 19 | # define INIT_PUSHABLE_TASKS(tsk) |
| 20 | #endif |
| 21 | |
Al Viro | f52111b | 2008-05-08 18:19:16 -0400 | [diff] [blame] | 22 | extern struct files_struct init_files; |
Al Viro | 18d8fda | 2008-12-26 00:35:37 -0500 | [diff] [blame] | 23 | extern struct fs_struct init_fs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Ben Blum | 4714d1d | 2011-05-26 16:25:18 -0700 | [diff] [blame^] | 25 | #ifdef CONFIG_CGROUPS |
| 26 | #define INIT_THREADGROUP_FORK_LOCK(sig) \ |
| 27 | .threadgroup_fork_lock = \ |
| 28 | __RWSEM_INITIALIZER(sig.threadgroup_fork_lock), |
| 29 | #else |
| 30 | #define INIT_THREADGROUP_FORK_LOCK(sig) |
| 31 | #endif |
| 32 | |
Cedric Le Goater | 1ec320a | 2006-12-08 02:37:55 -0800 | [diff] [blame] | 33 | #define INIT_SIGNALS(sig) { \ |
Oleg Nesterov | b3ac022 | 2010-05-26 14:43:24 -0700 | [diff] [blame] | 34 | .nr_threads = 1, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ |
Cedric Le Goater | 1ec320a | 2006-12-08 02:37:55 -0800 | [diff] [blame] | 36 | .shared_pending = { \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | .list = LIST_HEAD_INIT(sig.shared_pending.list), \ |
Cedric Le Goater | 1ec320a | 2006-12-08 02:37:55 -0800 | [diff] [blame] | 38 | .signal = {{0}}}, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ |
| 40 | .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ |
| 41 | .rlim = INIT_RLIMITS, \ |
Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 42 | .cputimer = { \ |
| 43 | .cputime = INIT_CPUTIME, \ |
| 44 | .running = 0, \ |
| 45 | .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ |
| 46 | }, \ |
KOSAKI Motohiro | 9b1bf12 | 2010-10-27 15:34:08 -0700 | [diff] [blame] | 47 | .cred_guard_mutex = \ |
| 48 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ |
Ben Blum | 4714d1d | 2011-05-26 16:25:18 -0700 | [diff] [blame^] | 49 | INIT_THREADGROUP_FORK_LOCK(sig) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | } |
| 51 | |
Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 52 | extern struct nsproxy init_nsproxy; |
Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 53 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #define INIT_SIGHAND(sighand) { \ |
| 55 | .count = ATOMIC_INIT(1), \ |
Oleg Nesterov | 0a14a13 | 2010-05-26 14:44:12 -0700 | [diff] [blame] | 56 | .action = { { { .sa_handler = SIG_DFL, } }, }, \ |
Ingo Molnar | e4d9191 | 2006-07-03 00:24:34 -0700 | [diff] [blame] | 57 | .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ |
Davide Libenzi | b8fceee | 2007-09-20 12:40:16 -0700 | [diff] [blame] | 58 | .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | } |
| 60 | |
| 61 | extern struct group_info init_groups; |
| 62 | |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 63 | #define INIT_STRUCT_PID { \ |
| 64 | .count = ATOMIC_INIT(1), \ |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 65 | .tasks = { \ |
Oleg Nesterov | f200114 | 2010-05-26 14:44:10 -0700 | [diff] [blame] | 66 | { .first = NULL }, \ |
| 67 | { .first = NULL }, \ |
| 68 | { .first = NULL }, \ |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 69 | }, \ |
Sukadev Bhattiprolu | 4c3f2ea | 2007-10-18 23:40:03 -0700 | [diff] [blame] | 70 | .level = 0, \ |
| 71 | .numbers = { { \ |
| 72 | .nr = 0, \ |
| 73 | .ns = &init_pid_ns, \ |
| 74 | .pid_chain = { .next = NULL, .pprev = NULL }, \ |
| 75 | }, } \ |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 76 | } |
| 77 | |
| 78 | #define INIT_PID_LINK(type) \ |
| 79 | { \ |
| 80 | .node = { \ |
| 81 | .next = NULL, \ |
Oleg Nesterov | f200114 | 2010-05-26 14:44:10 -0700 | [diff] [blame] | 82 | .pprev = NULL, \ |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 83 | }, \ |
| 84 | .pid = &init_struct_pid, \ |
| 85 | } |
| 86 | |
Al Viro | bfef93a | 2008-01-10 04:53:18 -0500 | [diff] [blame] | 87 | #ifdef CONFIG_AUDITSYSCALL |
| 88 | #define INIT_IDS \ |
Eric Paris | 4746ec5 | 2008-01-08 10:06:53 -0500 | [diff] [blame] | 89 | .loginuid = -1, \ |
| 90 | .sessionid = -1, |
Al Viro | bfef93a | 2008-01-10 04:53:18 -0500 | [diff] [blame] | 91 | #else |
| 92 | #define INIT_IDS |
| 93 | #endif |
Serge E. Hallyn | 3b7391d | 2008-02-04 22:29:45 -0800 | [diff] [blame] | 94 | |
Paul E. McKenney | 24278d1 | 2010-09-27 17:25:23 -0700 | [diff] [blame] | 95 | #ifdef CONFIG_RCU_BOOST |
| 96 | #define INIT_TASK_RCU_BOOST() \ |
| 97 | .rcu_boost_mutex = NULL, |
| 98 | #else |
| 99 | #define INIT_TASK_RCU_BOOST() |
| 100 | #endif |
Paul E. McKenney | 6b3ef48 | 2009-08-22 13:56:53 -0700 | [diff] [blame] | 101 | #ifdef CONFIG_TREE_PREEMPT_RCU |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 102 | #define INIT_TASK_RCU_TREE_PREEMPT() \ |
| 103 | .rcu_blocked_node = NULL, |
| 104 | #else |
| 105 | #define INIT_TASK_RCU_TREE_PREEMPT(tsk) |
| 106 | #endif |
| 107 | #ifdef CONFIG_PREEMPT_RCU |
Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 108 | #define INIT_TASK_RCU_PREEMPT(tsk) \ |
| 109 | .rcu_read_lock_nesting = 0, \ |
| 110 | .rcu_read_unlock_special = 0, \ |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 111 | .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ |
Paul E. McKenney | 24278d1 | 2010-09-27 17:25:23 -0700 | [diff] [blame] | 112 | INIT_TASK_RCU_TREE_PREEMPT() \ |
| 113 | INIT_TASK_RCU_BOOST() |
Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 114 | #else |
| 115 | #define INIT_TASK_RCU_PREEMPT(tsk) |
| 116 | #endif |
| 117 | |
David Howells | b6dff3e | 2008-11-14 10:39:16 +1100 | [diff] [blame] | 118 | extern struct cred init_cred; |
| 119 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 120 | #ifdef CONFIG_PERF_EVENTS |
| 121 | # define INIT_PERF_EVENTS(tsk) \ |
| 122 | .perf_event_mutex = \ |
| 123 | __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ |
| 124 | .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), |
Peter Zijlstra | 082ff5a | 2009-05-23 18:29:00 +0200 | [diff] [blame] | 125 | #else |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 126 | # define INIT_PERF_EVENTS(tsk) |
Peter Zijlstra | 082ff5a | 2009-05-23 18:29:00 +0200 | [diff] [blame] | 127 | #endif |
| 128 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | /* |
| 130 | * INIT_TASK is used to set up the first task table, touch at |
| 131 | * your own risk!. Base=0, limit=0x1fffff (=2MB) |
| 132 | */ |
| 133 | #define INIT_TASK(tsk) \ |
| 134 | { \ |
| 135 | .state = 0, \ |
Roman Zippel | f7e4217 | 2007-05-09 02:35:17 -0700 | [diff] [blame] | 136 | .stack = &init_thread_info, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | .usage = ATOMIC_INIT(2), \ |
Oleg Nesterov | 7b34e42 | 2008-07-25 01:47:37 -0700 | [diff] [blame] | 138 | .flags = PF_KTHREAD, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | .prio = MAX_PRIO-20, \ |
| 140 | .static_prio = MAX_PRIO-20, \ |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 141 | .normal_prio = MAX_PRIO-20, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | .policy = SCHED_NORMAL, \ |
| 143 | .cpus_allowed = CPU_MASK_ALL, \ |
| 144 | .mm = NULL, \ |
| 145 | .active_mm = &init_mm, \ |
Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 146 | .se = { \ |
| 147 | .group_node = LIST_HEAD_INIT(tsk.se.group_node), \ |
| 148 | }, \ |
Peter Zijlstra | fa71706 | 2008-01-25 21:08:27 +0100 | [diff] [blame] | 149 | .rt = { \ |
| 150 | .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 151 | .time_slice = HZ, \ |
| 152 | .nr_cpus_allowed = NR_CPUS, \ |
| 153 | }, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame] | 155 | INIT_PUSHABLE_TASKS(tsk) \ |
Roland McGrath | f470021 | 2008-03-24 18:36:23 -0700 | [diff] [blame] | 156 | .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ |
| 157 | .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | .real_parent = &tsk, \ |
| 159 | .parent = &tsk, \ |
| 160 | .children = LIST_HEAD_INIT(tsk.children), \ |
| 161 | .sibling = LIST_HEAD_INIT(tsk.sibling), \ |
| 162 | .group_leader = &tsk, \ |
Arnd Bergmann | 4d2deb4 | 2010-02-24 20:01:56 +0100 | [diff] [blame] | 163 | RCU_INIT_POINTER(.real_cred, &init_cred), \ |
| 164 | RCU_INIT_POINTER(.cred, &init_cred), \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | .comm = "swapper", \ |
| 166 | .thread = INIT_THREAD, \ |
| 167 | .fs = &init_fs, \ |
| 168 | .files = &init_files, \ |
| 169 | .signal = &init_signals, \ |
| 170 | .sighand = &init_sighand, \ |
Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 171 | .nsproxy = &init_nsproxy, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | .pending = { \ |
| 173 | .list = LIST_HEAD_INIT(tsk.pending.list), \ |
| 174 | .signal = {{0}}}, \ |
| 175 | .blocked = {{0}}, \ |
Ingo Molnar | e4d9191 | 2006-07-03 00:24:34 -0700 | [diff] [blame] | 176 | .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ |
Linus Torvalds | b6e3224 | 2009-12-17 13:23:24 -0800 | [diff] [blame] | 177 | .journal_info = NULL, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 179 | .fs_excl = ATOMIC_INIT(0), \ |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 180 | .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ |
Arjan van de Ven | 6976675 | 2008-09-01 15:52:40 -0700 | [diff] [blame] | 181 | .timer_slack_ns = 50000, /* 50 usec default slack */ \ |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 182 | .pids = { \ |
| 183 | [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ |
| 184 | [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ |
| 185 | [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ |
| 186 | }, \ |
Oleg Nesterov | fa2755e | 2010-05-26 14:44:08 -0700 | [diff] [blame] | 187 | .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ |
Peter Zijlstra | 3e26c14 | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 188 | .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ |
Al Viro | bfef93a | 2008-01-10 04:53:18 -0500 | [diff] [blame] | 189 | INIT_IDS \ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 190 | INIT_PERF_EVENTS(tsk) \ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 191 | INIT_TRACE_IRQFLAGS \ |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 192 | INIT_LOCKDEP \ |
Steven Rostedt | 5ac9f62 | 2009-03-25 20:55:00 -0400 | [diff] [blame] | 193 | INIT_FTRACE_GRAPH \ |
Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 194 | INIT_TRACE_RECURSION \ |
Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 195 | INIT_TASK_RCU_PREEMPT(tsk) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | } |
| 197 | |
| 198 | |
| 199 | #define INIT_CPU_TIMERS(cpu_timers) \ |
| 200 | { \ |
| 201 | LIST_HEAD_INIT(cpu_timers[0]), \ |
| 202 | LIST_HEAD_INIT(cpu_timers[1]), \ |
| 203 | LIST_HEAD_INIT(cpu_timers[2]), \ |
| 204 | } |
| 205 | |
Tim Abbott | 857ecee | 2009-06-23 19:59:36 -0400 | [diff] [blame] | 206 | /* Attach to the init_task data structure for proper alignment */ |
Tim Abbott | 2af7687 | 2010-02-20 01:03:35 +0100 | [diff] [blame] | 207 | #define __init_task_data __attribute__((__section__(".data..init_task"))) |
Tim Abbott | 857ecee | 2009-06-23 19:59:36 -0400 | [diff] [blame] | 208 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | |
| 210 | #endif |