Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX__INIT_TASK_H |
| 2 | #define _LINUX__INIT_TASK_H |
| 3 | |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 4 | #include <linux/rcupdate.h> |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 5 | #include <linux/irqflags.h> |
Serge E. Hallyn | 4865ecf | 2006-10-02 02:18:14 -0700 | [diff] [blame] | 6 | #include <linux/utsname.h> |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 7 | #include <linux/lockdep.h> |
Steven Rostedt | 5ac9f62 | 2009-03-25 20:55:00 -0400 | [diff] [blame] | 8 | #include <linux/ftrace.h> |
Kirill Korotaev | 73ea413 | 2006-10-02 02:18:20 -0700 | [diff] [blame] | 9 | #include <linux/ipc.h> |
Cedric Le Goater | 9a575a9 | 2006-12-08 02:37:59 -0800 | [diff] [blame] | 10 | #include <linux/pid_namespace.h> |
Cedric Le Goater | acce292 | 2007-07-15 23:40:59 -0700 | [diff] [blame] | 11 | #include <linux/user_namespace.h> |
Andrew G. Morgan | 3898b1b | 2008-04-28 02:13:40 -0700 | [diff] [blame] | 12 | #include <linux/securebits.h> |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 13 | #include <linux/seqlock.h> |
Peter Zijlstra | fb00aca | 2013-11-07 14:43:43 +0100 | [diff] [blame] | 14 | #include <linux/rbtree.h> |
Eric W. Biederman | 772698f | 2007-09-12 11:55:17 +0200 | [diff] [blame] | 15 | #include <net/net_namespace.h> |
Ingo Molnar | 77852fe | 2013-02-16 09:46:48 +0100 | [diff] [blame] | 16 | #include <linux/sched/rt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame] | 18 | #ifdef CONFIG_SMP |
| 19 | # define INIT_PUSHABLE_TASKS(tsk) \ |
| 20 | .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), |
| 21 | #else |
| 22 | # define INIT_PUSHABLE_TASKS(tsk) |
| 23 | #endif |
| 24 | |
Al Viro | f52111b | 2008-05-08 18:19:16 -0400 | [diff] [blame] | 25 | extern struct files_struct init_files; |
Al Viro | 18d8fda | 2008-12-26 00:35:37 -0500 | [diff] [blame] | 26 | extern struct fs_struct init_fs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
Ben Blum | 4714d1d | 2011-05-26 16:25:18 -0700 | [diff] [blame] | 28 | #ifdef CONFIG_CGROUPS |
Tejun Heo | 257058a | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 29 | #define INIT_GROUP_RWSEM(sig) \ |
| 30 | .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem), |
Ben Blum | 4714d1d | 2011-05-26 16:25:18 -0700 | [diff] [blame] | 31 | #else |
Tejun Heo | 257058a | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 32 | #define INIT_GROUP_RWSEM(sig) |
Ben Blum | 4714d1d | 2011-05-26 16:25:18 -0700 | [diff] [blame] | 33 | #endif |
| 34 | |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 35 | #ifdef CONFIG_CPUSETS |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 36 | #define INIT_CPUSET_SEQ(tsk) \ |
| 37 | .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 38 | #else |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 39 | #define INIT_CPUSET_SEQ(tsk) |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 40 | #endif |
| 41 | |
Cedric Le Goater | 1ec320a | 2006-12-08 02:37:55 -0800 | [diff] [blame] | 42 | #define INIT_SIGNALS(sig) { \ |
Oleg Nesterov | b3ac022 | 2010-05-26 14:43:24 -0700 | [diff] [blame] | 43 | .nr_threads = 1, \ |
Oleg Nesterov | 0c740d0 | 2014-01-21 15:49:56 -0800 | [diff] [blame] | 44 | .thread_head = LIST_HEAD_INIT(init_task.thread_node), \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ |
Cedric Le Goater | 1ec320a | 2006-12-08 02:37:55 -0800 | [diff] [blame] | 46 | .shared_pending = { \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | .list = LIST_HEAD_INIT(sig.shared_pending.list), \ |
Cedric Le Goater | 1ec320a | 2006-12-08 02:37:55 -0800 | [diff] [blame] | 48 | .signal = {{0}}}, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ |
| 50 | .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ |
| 51 | .rlim = INIT_RLIMITS, \ |
Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 52 | .cputimer = { \ |
| 53 | .cputime = INIT_CPUTIME, \ |
| 54 | .running = 0, \ |
Thomas Gleixner | ee30a7b | 2009-07-25 18:56:56 +0200 | [diff] [blame] | 55 | .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ |
Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 56 | }, \ |
KOSAKI Motohiro | 9b1bf12 | 2010-10-27 15:34:08 -0700 | [diff] [blame] | 57 | .cred_guard_mutex = \ |
| 58 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ |
Tejun Heo | 257058a | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 59 | INIT_GROUP_RWSEM(sig) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | } |
| 61 | |
Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 62 | extern struct nsproxy init_nsproxy; |
Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 63 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | #define INIT_SIGHAND(sighand) { \ |
| 65 | .count = ATOMIC_INIT(1), \ |
Oleg Nesterov | 0a14a13 | 2010-05-26 14:44:12 -0700 | [diff] [blame] | 66 | .action = { { { .sa_handler = SIG_DFL, } }, }, \ |
Ingo Molnar | e4d9191 | 2006-07-03 00:24:34 -0700 | [diff] [blame] | 67 | .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ |
Davide Libenzi | b8fceee | 2007-09-20 12:40:16 -0700 | [diff] [blame] | 68 | .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | } |
| 70 | |
| 71 | extern struct group_info init_groups; |
| 72 | |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 73 | #define INIT_STRUCT_PID { \ |
| 74 | .count = ATOMIC_INIT(1), \ |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 75 | .tasks = { \ |
Oleg Nesterov | f200114 | 2010-05-26 14:44:10 -0700 | [diff] [blame] | 76 | { .first = NULL }, \ |
| 77 | { .first = NULL }, \ |
| 78 | { .first = NULL }, \ |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 79 | }, \ |
Sukadev Bhattiprolu | 4c3f2ea | 2007-10-18 23:40:03 -0700 | [diff] [blame] | 80 | .level = 0, \ |
| 81 | .numbers = { { \ |
| 82 | .nr = 0, \ |
| 83 | .ns = &init_pid_ns, \ |
| 84 | .pid_chain = { .next = NULL, .pprev = NULL }, \ |
| 85 | }, } \ |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | #define INIT_PID_LINK(type) \ |
| 89 | { \ |
| 90 | .node = { \ |
| 91 | .next = NULL, \ |
Oleg Nesterov | f200114 | 2010-05-26 14:44:10 -0700 | [diff] [blame] | 92 | .pprev = NULL, \ |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 93 | }, \ |
| 94 | .pid = &init_struct_pid, \ |
| 95 | } |
| 96 | |
Al Viro | bfef93a | 2008-01-10 04:53:18 -0500 | [diff] [blame] | 97 | #ifdef CONFIG_AUDITSYSCALL |
| 98 | #define INIT_IDS \ |
Eric W. Biederman | e1760bd | 2012-09-10 22:39:43 -0700 | [diff] [blame] | 99 | .loginuid = INVALID_UID, \ |
Eric Paris | 4440e85 | 2013-11-27 17:35:17 -0500 | [diff] [blame] | 100 | .sessionid = (unsigned int)-1, |
Al Viro | bfef93a | 2008-01-10 04:53:18 -0500 | [diff] [blame] | 101 | #else |
| 102 | #define INIT_IDS |
| 103 | #endif |
Serge E. Hallyn | 3b7391d | 2008-02-04 22:29:45 -0800 | [diff] [blame] | 104 | |
Paul E. McKenney | 6b3ef48 | 2009-08-22 13:56:53 -0700 | [diff] [blame] | 105 | #ifdef CONFIG_TREE_PREEMPT_RCU |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 106 | #define INIT_TASK_RCU_TREE_PREEMPT() \ |
| 107 | .rcu_blocked_node = NULL, |
| 108 | #else |
| 109 | #define INIT_TASK_RCU_TREE_PREEMPT(tsk) |
| 110 | #endif |
| 111 | #ifdef CONFIG_PREEMPT_RCU |
Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 112 | #define INIT_TASK_RCU_PREEMPT(tsk) \ |
| 113 | .rcu_read_lock_nesting = 0, \ |
Paul E. McKenney | 1d082fd | 2014-08-14 16:01:53 -0700 | [diff] [blame] | 114 | .rcu_read_unlock_special.s = 0, \ |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 115 | .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ |
Paul E. McKenney | abaa93d | 2014-06-12 13:30:25 -0700 | [diff] [blame] | 116 | INIT_TASK_RCU_TREE_PREEMPT() |
Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 117 | #else |
| 118 | #define INIT_TASK_RCU_PREEMPT(tsk) |
| 119 | #endif |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 120 | #ifdef CONFIG_TASKS_RCU |
| 121 | #define INIT_TASK_RCU_TASKS(tsk) \ |
| 122 | .rcu_tasks_holdout = false, \ |
| 123 | .rcu_tasks_holdout_list = \ |
Paul E. McKenney | 176f8f7 | 2014-08-04 17:43:50 -0700 | [diff] [blame] | 124 | LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \ |
| 125 | .rcu_tasks_idle_cpu = -1, |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 126 | #else |
| 127 | #define INIT_TASK_RCU_TASKS(tsk) |
| 128 | #endif |
Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 129 | |
David Howells | b6dff3e | 2008-11-14 10:39:16 +1100 | [diff] [blame] | 130 | extern struct cred init_cred; |
| 131 | |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 132 | extern struct task_group root_task_group; |
| 133 | |
| 134 | #ifdef CONFIG_CGROUP_SCHED |
| 135 | # define INIT_CGROUP_SCHED(tsk) \ |
| 136 | .sched_task_group = &root_task_group, |
| 137 | #else |
| 138 | # define INIT_CGROUP_SCHED(tsk) |
| 139 | #endif |
| 140 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 141 | #ifdef CONFIG_PERF_EVENTS |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 142 | # define INIT_PERF_EVENTS(tsk) \ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 143 | .perf_event_mutex = \ |
| 144 | __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ |
| 145 | .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), |
Peter Zijlstra | 082ff5a | 2009-05-23 18:29:00 +0200 | [diff] [blame] | 146 | #else |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 147 | # define INIT_PERF_EVENTS(tsk) |
Peter Zijlstra | 082ff5a | 2009-05-23 18:29:00 +0200 | [diff] [blame] | 148 | #endif |
| 149 | |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 150 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
| 151 | # define INIT_VTIME(tsk) \ |
| 152 | .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ |
| 153 | .vtime_snap = 0, \ |
| 154 | .vtime_snap_whence = VTIME_SYS, |
| 155 | #else |
| 156 | # define INIT_VTIME(tsk) |
| 157 | #endif |
| 158 | |
Carsten Emde | f1c6f1a | 2011-10-26 23:14:16 +0200 | [diff] [blame] | 159 | #define INIT_TASK_COMM "swapper" |
| 160 | |
Peter Zijlstra | fb00aca | 2013-11-07 14:43:43 +0100 | [diff] [blame] | 161 | #ifdef CONFIG_RT_MUTEXES |
| 162 | # define INIT_RT_MUTEXES(tsk) \ |
| 163 | .pi_waiters = RB_ROOT, \ |
| 164 | .pi_waiters_leftmost = NULL, |
| 165 | #else |
| 166 | # define INIT_RT_MUTEXES(tsk) |
| 167 | #endif |
| 168 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | /* |
| 170 | * INIT_TASK is used to set up the first task table, touch at |
| 171 | * your own risk!. Base=0, limit=0x1fffff (=2MB) |
| 172 | */ |
| 173 | #define INIT_TASK(tsk) \ |
| 174 | { \ |
| 175 | .state = 0, \ |
Roman Zippel | f7e4217 | 2007-05-09 02:35:17 -0700 | [diff] [blame] | 176 | .stack = &init_thread_info, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | .usage = ATOMIC_INIT(2), \ |
Oleg Nesterov | 7b34e42 | 2008-07-25 01:47:37 -0700 | [diff] [blame] | 178 | .flags = PF_KTHREAD, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | .prio = MAX_PRIO-20, \ |
| 180 | .static_prio = MAX_PRIO-20, \ |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 181 | .normal_prio = MAX_PRIO-20, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | .policy = SCHED_NORMAL, \ |
| 183 | .cpus_allowed = CPU_MASK_ALL, \ |
Peter Zijlstra | 29baa74 | 2012-04-23 12:11:21 +0200 | [diff] [blame] | 184 | .nr_cpus_allowed= NR_CPUS, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | .mm = NULL, \ |
| 186 | .active_mm = &init_mm, \ |
Peter Zijlstra | 4a55bd5 | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 187 | .se = { \ |
| 188 | .group_node = LIST_HEAD_INIT(tsk.se.group_node), \ |
| 189 | }, \ |
Peter Zijlstra | fa71706 | 2008-01-25 21:08:27 +0100 | [diff] [blame] | 190 | .rt = { \ |
| 191 | .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ |
Hiroshi Shimamoto | de5bdff | 2012-02-16 14:52:21 +0900 | [diff] [blame] | 192 | .time_slice = RR_TIMESLICE, \ |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 193 | }, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame] | 195 | INIT_PUSHABLE_TASKS(tsk) \ |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 196 | INIT_CGROUP_SCHED(tsk) \ |
Roland McGrath | f470021 | 2008-03-24 18:36:23 -0700 | [diff] [blame] | 197 | .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ |
| 198 | .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | .real_parent = &tsk, \ |
| 200 | .parent = &tsk, \ |
| 201 | .children = LIST_HEAD_INIT(tsk.children), \ |
| 202 | .sibling = LIST_HEAD_INIT(tsk.sibling), \ |
| 203 | .group_leader = &tsk, \ |
Paul E. McKenney | d36cc70 | 2012-05-16 15:33:15 -0700 | [diff] [blame] | 204 | RCU_POINTER_INITIALIZER(real_cred, &init_cred), \ |
| 205 | RCU_POINTER_INITIALIZER(cred, &init_cred), \ |
Carsten Emde | f1c6f1a | 2011-10-26 23:14:16 +0200 | [diff] [blame] | 206 | .comm = INIT_TASK_COMM, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | .thread = INIT_THREAD, \ |
| 208 | .fs = &init_fs, \ |
| 209 | .files = &init_files, \ |
| 210 | .signal = &init_signals, \ |
| 211 | .sighand = &init_sighand, \ |
Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 212 | .nsproxy = &init_nsproxy, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | .pending = { \ |
| 214 | .list = LIST_HEAD_INIT(tsk.pending.list), \ |
| 215 | .signal = {{0}}}, \ |
| 216 | .blocked = {{0}}, \ |
Ingo Molnar | e4d9191 | 2006-07-03 00:24:34 -0700 | [diff] [blame] | 217 | .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ |
Linus Torvalds | b6e3224 | 2009-12-17 13:23:24 -0800 | [diff] [blame] | 218 | .journal_info = NULL, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 220 | .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ |
Arjan van de Ven | 6976675 | 2008-09-01 15:52:40 -0700 | [diff] [blame] | 221 | .timer_slack_ns = 50000, /* 50 usec default slack */ \ |
Sukadev Bhattiprolu | 820e45d | 2007-05-10 22:23:00 -0700 | [diff] [blame] | 222 | .pids = { \ |
| 223 | [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ |
| 224 | [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ |
| 225 | [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ |
| 226 | }, \ |
Oleg Nesterov | fa2755e | 2010-05-26 14:44:08 -0700 | [diff] [blame] | 227 | .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ |
Oleg Nesterov | 0c740d0 | 2014-01-21 15:49:56 -0800 | [diff] [blame] | 228 | .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \ |
Al Viro | bfef93a | 2008-01-10 04:53:18 -0500 | [diff] [blame] | 229 | INIT_IDS \ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 230 | INIT_PERF_EVENTS(tsk) \ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 231 | INIT_TRACE_IRQFLAGS \ |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 232 | INIT_LOCKDEP \ |
Steven Rostedt | 5ac9f62 | 2009-03-25 20:55:00 -0400 | [diff] [blame] | 233 | INIT_FTRACE_GRAPH \ |
Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 234 | INIT_TRACE_RECURSION \ |
Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 235 | INIT_TASK_RCU_PREEMPT(tsk) \ |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 236 | INIT_TASK_RCU_TASKS(tsk) \ |
John Stultz | 1ca7d67 | 2013-10-07 15:51:59 -0700 | [diff] [blame] | 237 | INIT_CPUSET_SEQ(tsk) \ |
Peter Zijlstra | fb00aca | 2013-11-07 14:43:43 +0100 | [diff] [blame] | 238 | INIT_RT_MUTEXES(tsk) \ |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 239 | INIT_VTIME(tsk) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | } |
| 241 | |
| 242 | |
| 243 | #define INIT_CPU_TIMERS(cpu_timers) \ |
| 244 | { \ |
| 245 | LIST_HEAD_INIT(cpu_timers[0]), \ |
| 246 | LIST_HEAD_INIT(cpu_timers[1]), \ |
| 247 | LIST_HEAD_INIT(cpu_timers[2]), \ |
| 248 | } |
| 249 | |
Tim Abbott | 857ecee | 2009-06-23 19:59:36 -0400 | [diff] [blame] | 250 | /* Attach to the init_task data structure for proper alignment */ |
Tim Abbott | 2af7687 | 2010-02-20 01:03:35 +0100 | [diff] [blame] | 251 | #define __init_task_data __attribute__((__section__(".data..init_task"))) |
Tim Abbott | 857ecee | 2009-06-23 19:59:36 -0400 | [diff] [blame] | 252 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | |
| 254 | #endif |