Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_SCHED_H |
| 2 | #define _LINUX_SCHED_H |
| 3 | |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 4 | #include <uapi/linux/sched.h> |
David Woodhouse | b7b3c76 | 2006-04-27 00:12:56 +0100 | [diff] [blame] | 5 | |
Dongsheng Yang | 5c22807 | 2014-01-27 17:15:37 -0500 | [diff] [blame] | 6 | #include <linux/sched/prio.h> |
Ingo Molnar | ee6a3d1 | 2017-02-06 10:01:09 +0100 | [diff] [blame] | 7 | #include <linux/nodemask.h> |
Dongsheng Yang | 5c22807 | 2014-01-27 17:15:37 -0500 | [diff] [blame] | 8 | |
Ingo Molnar | b69339b | 2017-02-05 16:15:03 +0100 | [diff] [blame] | 9 | #include <linux/mutex.h> |
Peter Zijlstra | fb00aca | 2013-11-07 14:43:43 +0100 | [diff] [blame] | 10 | #include <linux/plist.h> |
Ingo Molnar | 77ba809 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 11 | #include <linux/mm_types_task.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/sem.h> |
Jack Miller | ab602f7 | 2014-08-08 14:23:19 -0700 | [diff] [blame] | 14 | #include <linux/shm.h> |
Ingo Molnar | f361bf4 | 2017-02-03 23:47:37 +0100 | [diff] [blame] | 15 | #include <linux/signal_types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/pid.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/seccomp.h> |
Ingo Molnar | b68070e | 2017-02-04 01:27:20 +0100 | [diff] [blame] | 18 | #include <linux/rcupdate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
David Woodhouse | a3b6714 | 2006-04-25 14:54:40 +0100 | [diff] [blame] | 20 | #include <linux/resource.h> |
David Woodhouse | a3b6714 | 2006-04-25 14:54:40 +0100 | [diff] [blame] | 21 | #include <linux/hrtimer.h> |
Dmitry Vyukov | 5c9a875 | 2016-03-22 14:27:30 -0700 | [diff] [blame] | 22 | #include <linux/kcov.h> |
Andrew Morton | 7c3ab73 | 2006-12-10 02:19:19 -0800 | [diff] [blame] | 23 | #include <linux/task_io_accounting.h> |
Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 24 | #include <linux/latencytop.h> |
David Woodhouse | a3b6714 | 2006-04-25 14:54:40 +0100 | [diff] [blame] | 25 | |
Ingo Molnar | 70b8157 | 2017-02-03 12:11:00 +0100 | [diff] [blame] | 26 | #include <asm/current.h> |
| 27 | |
Ingo Molnar | c7af787 | 2017-02-03 22:01:58 +0100 | [diff] [blame] | 28 | /* task_struct member predeclarations: */ |
| 29 | struct audit_context; |
| 30 | struct autogroup; |
| 31 | struct backing_dev_info; |
| 32 | struct bio_list; |
| 33 | struct blk_plug; |
| 34 | struct cfs_rq; |
| 35 | struct filename; |
| 36 | struct fs_struct; |
| 37 | struct futex_pi_state; |
| 38 | struct io_context; |
| 39 | struct mempolicy; |
| 40 | struct nameidata; |
| 41 | struct nsproxy; |
| 42 | struct perf_event_context; |
| 43 | struct pid_namespace; |
| 44 | struct pipe_inode_info; |
| 45 | struct rcu_node; |
| 46 | struct reclaim_state; |
| 47 | struct robust_list_head; |
Ingo Molnar | e2d1e2a | 2017-02-01 18:07:51 +0100 | [diff] [blame] | 48 | struct sched_attr; |
| 49 | struct sched_param; |
Ingo Molnar | 43ae34c | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 50 | struct seq_file; |
Ingo Molnar | c7af787 | 2017-02-03 22:01:58 +0100 | [diff] [blame] | 51 | struct sighand_struct; |
| 52 | struct signal_struct; |
| 53 | struct task_delay_info; |
Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 54 | struct task_group; |
Ingo Molnar | c7af787 | 2017-02-03 22:01:58 +0100 | [diff] [blame] | 55 | struct task_struct; |
| 56 | struct uts_namespace; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
Linus Torvalds | 4a8342d | 2005-09-29 15:18:21 -0700 | [diff] [blame] | 58 | /* |
| 59 | * Task state bitmask. NOTE! These bits are also |
| 60 | * encoded in fs/proc/array.c: get_task_state(). |
| 61 | * |
| 62 | * We have two separate sets of flags: task->state |
| 63 | * is about runnability, while task->exit_state are |
| 64 | * about the task exiting. Confusing, but this way |
| 65 | * modifying one set can't modify the other one by |
| 66 | * mistake. |
| 67 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | #define TASK_RUNNING 0 |
| 69 | #define TASK_INTERRUPTIBLE 1 |
| 70 | #define TASK_UNINTERRUPTIBLE 2 |
Matthew Wilcox | f021a3c | 2007-12-06 11:13:16 -0500 | [diff] [blame] | 71 | #define __TASK_STOPPED 4 |
| 72 | #define __TASK_TRACED 8 |
Linus Torvalds | 4a8342d | 2005-09-29 15:18:21 -0700 | [diff] [blame] | 73 | /* in tsk->exit_state */ |
Oleg Nesterov | ad86622 | 2014-04-07 15:38:46 -0700 | [diff] [blame] | 74 | #define EXIT_DEAD 16 |
| 75 | #define EXIT_ZOMBIE 32 |
Oleg Nesterov | abd50b3 | 2014-04-07 15:38:42 -0700 | [diff] [blame] | 76 | #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) |
Linus Torvalds | 4a8342d | 2005-09-29 15:18:21 -0700 | [diff] [blame] | 77 | /* in tsk->state again */ |
Mike Galbraith | af92723 | 2007-10-15 17:00:13 +0200 | [diff] [blame] | 78 | #define TASK_DEAD 64 |
Matthew Wilcox | f021a3c | 2007-12-06 11:13:16 -0500 | [diff] [blame] | 79 | #define TASK_WAKEKILL 128 |
Peter Zijlstra | e9c8431 | 2009-09-15 14:43:03 +0200 | [diff] [blame] | 80 | #define TASK_WAKING 256 |
Thomas Gleixner | f2530dc | 2013-04-09 09:33:34 +0200 | [diff] [blame] | 81 | #define TASK_PARKED 512 |
Peter Zijlstra | 80ed87c | 2015-05-08 14:23:45 +0200 | [diff] [blame] | 82 | #define TASK_NOLOAD 1024 |
Peter Zijlstra | 7dc603c | 2016-06-16 13:29:28 +0200 | [diff] [blame] | 83 | #define TASK_NEW 2048 |
| 84 | #define TASK_STATE_MAX 4096 |
Matthew Wilcox | f021a3c | 2007-12-06 11:13:16 -0500 | [diff] [blame] | 85 | |
Peter Zijlstra | 7dc603c | 2016-06-16 13:29:28 +0200 | [diff] [blame] | 86 | #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" |
Peter Zijlstra | 7334215 | 2009-12-17 13:16:27 +0100 | [diff] [blame] | 87 | |
Davidlohr Bueso | 642fa44 | 2017-01-03 13:43:14 -0800 | [diff] [blame] | 88 | /* Convenience macros for the sake of set_current_state */ |
Matthew Wilcox | f021a3c | 2007-12-06 11:13:16 -0500 | [diff] [blame] | 89 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
| 90 | #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) |
| 91 | #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | |
Peter Zijlstra | 80ed87c | 2015-05-08 14:23:45 +0200 | [diff] [blame] | 93 | #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) |
| 94 | |
Matthew Wilcox | 92a1f4b | 2007-12-06 10:55:25 -0500 | [diff] [blame] | 95 | /* Convenience macros for the sake of wake_up */ |
| 96 | #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) |
Matthew Wilcox | f021a3c | 2007-12-06 11:13:16 -0500 | [diff] [blame] | 97 | #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) |
Matthew Wilcox | 92a1f4b | 2007-12-06 10:55:25 -0500 | [diff] [blame] | 98 | |
| 99 | /* get_task_state() */ |
| 100 | #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ |
Matthew Wilcox | f021a3c | 2007-12-06 11:13:16 -0500 | [diff] [blame] | 101 | TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ |
Oleg Nesterov | 74e3720 | 2014-01-23 15:55:35 -0800 | [diff] [blame] | 102 | __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) |
Matthew Wilcox | 92a1f4b | 2007-12-06 10:55:25 -0500 | [diff] [blame] | 103 | |
Matthew Wilcox | f021a3c | 2007-12-06 11:13:16 -0500 | [diff] [blame] | 104 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
| 105 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) |
Matthew Wilcox | 92a1f4b | 2007-12-06 10:55:25 -0500 | [diff] [blame] | 106 | #define task_is_stopped_or_traced(task) \ |
Matthew Wilcox | f021a3c | 2007-12-06 11:13:16 -0500 | [diff] [blame] | 107 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
Matthew Wilcox | 92a1f4b | 2007-12-06 10:55:25 -0500 | [diff] [blame] | 108 | #define task_contributes_to_load(task) \ |
Nathan Lynch | e3c8ca8 | 2009-04-08 19:45:12 -0500 | [diff] [blame] | 109 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
Peter Zijlstra | 80ed87c | 2015-05-08 14:23:45 +0200 | [diff] [blame] | 110 | (task->flags & PF_FROZEN) == 0 && \ |
| 111 | (task->state & TASK_NOLOAD) == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 113 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
| 114 | |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 115 | #define __set_current_state(state_value) \ |
| 116 | do { \ |
| 117 | current->task_state_change = _THIS_IP_; \ |
| 118 | current->state = (state_value); \ |
| 119 | } while (0) |
| 120 | #define set_current_state(state_value) \ |
| 121 | do { \ |
| 122 | current->task_state_change = _THIS_IP_; \ |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 123 | smp_store_mb(current->state, (state_value)); \ |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 124 | } while (0) |
| 125 | |
| 126 | #else |
Andrew Morton | 498d0c5 | 2005-09-13 01:25:14 -0700 | [diff] [blame] | 127 | /* |
| 128 | * set_current_state() includes a barrier so that the write of current->state |
| 129 | * is correctly serialised wrt the caller's subsequent test of whether to |
| 130 | * actually sleep: |
| 131 | * |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 132 | * for (;;) { |
Andrew Morton | 498d0c5 | 2005-09-13 01:25:14 -0700 | [diff] [blame] | 133 | * set_current_state(TASK_UNINTERRUPTIBLE); |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 134 | * if (!need_sleep) |
| 135 | * break; |
Andrew Morton | 498d0c5 | 2005-09-13 01:25:14 -0700 | [diff] [blame] | 136 | * |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 137 | * schedule(); |
| 138 | * } |
| 139 | * __set_current_state(TASK_RUNNING); |
| 140 | * |
| 141 | * If the caller does not need such serialisation (because, for instance, the |
| 142 | * condition test and condition change and wakeup are under the same lock) then |
| 143 | * use __set_current_state(). |
| 144 | * |
| 145 | * The above is typically ordered against the wakeup, which does: |
| 146 | * |
| 147 | * need_sleep = false; |
| 148 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); |
| 149 | * |
| 150 | * Where wake_up_state() (and all other wakeup primitives) imply enough |
| 151 | * barriers to order the store of the variable against wakeup. |
| 152 | * |
| 153 | * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, |
| 154 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a |
| 155 | * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). |
| 156 | * |
| 157 | * This is obviously fine, since they both store the exact same value. |
| 158 | * |
| 159 | * Also see the comments of try_to_wake_up(). |
Andrew Morton | 498d0c5 | 2005-09-13 01:25:14 -0700 | [diff] [blame] | 160 | */ |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 161 | #define __set_current_state(state_value) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | do { current->state = (state_value); } while (0) |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 163 | #define set_current_state(state_value) \ |
Peter Zijlstra | b92b8b3 | 2015-05-12 10:51:55 +0200 | [diff] [blame] | 164 | smp_store_mb(current->state, (state_value)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 166 | #endif |
| 167 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | /* Task command name length */ |
| 169 | #define TASK_COMM_LEN 16 |
| 170 | |
Rik van Riel | 3fa0818 | 2015-03-09 12:12:07 -0400 | [diff] [blame] | 171 | extern cpumask_var_t cpu_isolated_map; |
| 172 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | extern void scheduler_tick(void); |
| 174 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 176 | extern signed long schedule_timeout(signed long timeout); |
Nishanth Aravamudan | 64ed93a | 2005-09-10 00:27:21 -0700 | [diff] [blame] | 177 | extern signed long schedule_timeout_interruptible(signed long timeout); |
Matthew Wilcox | 294d5cc | 2007-12-06 11:59:46 -0500 | [diff] [blame] | 178 | extern signed long schedule_timeout_killable(signed long timeout); |
Nishanth Aravamudan | 64ed93a | 2005-09-10 00:27:21 -0700 | [diff] [blame] | 179 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
Andrew Morton | 69b27ba | 2016-03-25 14:20:21 -0700 | [diff] [blame] | 180 | extern signed long schedule_timeout_idle(signed long timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | asmlinkage void schedule(void); |
Thomas Gleixner | c5491ea | 2011-03-21 12:09:35 +0100 | [diff] [blame] | 182 | extern void schedule_preempt_disabled(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | |
Tejun Heo | 10ab564 | 2016-10-28 12:58:10 -0400 | [diff] [blame] | 184 | extern int __must_check io_schedule_prepare(void); |
| 185 | extern void io_schedule_finish(int token); |
NeilBrown | 9cff8ad | 2015-02-13 15:49:17 +1100 | [diff] [blame] | 186 | extern long io_schedule_timeout(long timeout); |
Tejun Heo | 10ab564 | 2016-10-28 12:58:10 -0400 | [diff] [blame] | 187 | extern void io_schedule(void); |
NeilBrown | 9cff8ad | 2015-02-13 15:49:17 +1100 | [diff] [blame] | 188 | |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 189 | /** |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 190 | * struct prev_cputime - snaphsot of system and user cputime |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 191 | * @utime: time spent in user mode |
| 192 | * @stime: time spent in system mode |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 193 | * @lock: protects the above two fields |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 194 | * |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 195 | * Stores previous user/system time values such that we can guarantee |
| 196 | * monotonicity. |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 197 | */ |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 198 | struct prev_cputime { |
| 199 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Frederic Weisbecker | 5613fda | 2017-01-31 04:09:23 +0100 | [diff] [blame] | 200 | u64 utime; |
| 201 | u64 stime; |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 202 | raw_spinlock_t lock; |
| 203 | #endif |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 204 | }; |
| 205 | |
| 206 | /** |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 207 | * struct task_cputime - collected CPU time counts |
Frederic Weisbecker | 5613fda | 2017-01-31 04:09:23 +0100 | [diff] [blame] | 208 | * @utime: time spent in user mode, in nanoseconds |
| 209 | * @stime: time spent in kernel mode, in nanoseconds |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 210 | * @sum_exec_runtime: total time spent on the CPU, in nanoseconds |
Ingo Molnar | 5ce73a4 | 2008-09-14 17:11:46 +0200 | [diff] [blame] | 211 | * |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 212 | * This structure groups together three kinds of CPU time that are tracked for |
| 213 | * threads and thread groups. Most things considering CPU time want to group |
| 214 | * these counts together and treat all three of them in parallel. |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 215 | */ |
| 216 | struct task_cputime { |
Frederic Weisbecker | 5613fda | 2017-01-31 04:09:23 +0100 | [diff] [blame] | 217 | u64 utime; |
| 218 | u64 stime; |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 219 | unsigned long long sum_exec_runtime; |
| 220 | }; |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 221 | |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 222 | /* Alternate field names when used to cache expirations. */ |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 223 | #define virt_exp utime |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 224 | #define prof_exp stime |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 225 | #define sched_exp sum_exec_runtime |
| 226 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | struct sched_info { |
Ingo Molnar | 7f5f8e8 | 2017-02-06 11:44:12 +0100 | [diff] [blame^] | 228 | #ifdef CONFIG_SCHED_INFO |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | /* cumulative counters */ |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 230 | unsigned long pcount; /* # of times run on this cpu */ |
Ken Chen | 9c2c480 | 2008-12-16 23:41:22 -0800 | [diff] [blame] | 231 | unsigned long long run_delay; /* time spent waiting on a runqueue */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | |
| 233 | /* timestamps */ |
Balbir Singh | 172ba84 | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 234 | unsigned long long last_arrival,/* when we last ran on a cpu */ |
| 235 | last_queued; /* when we were last queued to run */ |
Naveen N. Rao | f6db834 | 2015-06-25 23:53:37 +0530 | [diff] [blame] | 236 | #endif /* CONFIG_SCHED_INFO */ |
Ingo Molnar | 7f5f8e8 | 2017-02-06 11:44:12 +0100 | [diff] [blame^] | 237 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | /* |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 240 | * Integer metrics need fixed point arithmetic, e.g., sched/fair |
| 241 | * has a few: load, load_avg, util_avg, freq, and capacity. |
| 242 | * |
| 243 | * We define a basic fixed point arithmetic range, and then formalize |
| 244 | * all these metrics based on that basic range. |
| 245 | */ |
| 246 | # define SCHED_FIXEDPOINT_SHIFT 10 |
| 247 | # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) |
| 248 | |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 249 | struct load_weight { |
Peter Zijlstra | 9dbdb15 | 2013-11-18 18:27:06 +0100 | [diff] [blame] | 250 | unsigned long weight; |
| 251 | u32 inv_weight; |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 252 | }; |
| 253 | |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 254 | /* |
Yuyang Du | 7b59533 | 2016-04-05 12:12:28 +0800 | [diff] [blame] | 255 | * The load_avg/util_avg accumulates an infinite geometric series |
| 256 | * (see __update_load_avg() in kernel/sched/fair.c). |
| 257 | * |
| 258 | * [load_avg definition] |
| 259 | * |
| 260 | * load_avg = runnable% * scale_load_down(load) |
| 261 | * |
| 262 | * where runnable% is the time ratio that a sched_entity is runnable. |
| 263 | * For cfs_rq, it is the aggregated load_avg of all runnable and |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 264 | * blocked sched_entities. |
Yuyang Du | 7b59533 | 2016-04-05 12:12:28 +0800 | [diff] [blame] | 265 | * |
| 266 | * load_avg may also take frequency scaling into account: |
| 267 | * |
| 268 | * load_avg = runnable% * scale_load_down(load) * freq% |
| 269 | * |
| 270 | * where freq% is the CPU frequency normalized to the highest frequency. |
| 271 | * |
| 272 | * [util_avg definition] |
| 273 | * |
| 274 | * util_avg = running% * SCHED_CAPACITY_SCALE |
| 275 | * |
| 276 | * where running% is the time ratio that a sched_entity is running on |
| 277 | * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable |
| 278 | * and blocked sched_entities. |
| 279 | * |
| 280 | * util_avg may also factor frequency scaling and CPU capacity scaling: |
| 281 | * |
| 282 | * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% |
| 283 | * |
| 284 | * where freq% is the same as above, and capacity% is the CPU capacity |
| 285 | * normalized to the greatest capacity (due to uarch differences, etc). |
| 286 | * |
| 287 | * N.B., the above ratios (runnable%, running%, freq%, and capacity%) |
| 288 | * themselves are in the range of [0, 1]. To do fixed point arithmetics, |
| 289 | * we therefore scale them to as large a range as necessary. This is for |
| 290 | * example reflected by util_avg's SCHED_CAPACITY_SCALE. |
| 291 | * |
| 292 | * [Overflow issue] |
| 293 | * |
| 294 | * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities |
| 295 | * with the highest load (=88761), always runnable on a single cfs_rq, |
| 296 | * and should not overflow as the number already hits PID_MAX_LIMIT. |
| 297 | * |
| 298 | * For all other cases (including 32-bit kernels), struct load_weight's |
| 299 | * weight will overflow first before we do, because: |
| 300 | * |
| 301 | * Max(load_avg) <= Max(load.weight) |
| 302 | * |
| 303 | * Then it is the load_weight's responsibility to consider overflow |
| 304 | * issues. |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 305 | */ |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 306 | struct sched_avg { |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 307 | u64 last_update_time, load_sum; |
| 308 | u32 util_sum, period_contrib; |
| 309 | unsigned long load_avg, util_avg; |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 310 | }; |
| 311 | |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 312 | struct sched_statistics { |
Ingo Molnar | 7f5f8e8 | 2017-02-06 11:44:12 +0100 | [diff] [blame^] | 313 | #ifdef CONFIG_SCHEDSTATS |
Ingo Molnar | 94c1822 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 314 | u64 wait_start; |
| 315 | u64 wait_max; |
Arjan van de Ven | 6d08259 | 2008-01-25 21:08:35 +0100 | [diff] [blame] | 316 | u64 wait_count; |
| 317 | u64 wait_sum; |
Arjan van de Ven | 8f0dfc3 | 2009-07-20 11:26:58 -0700 | [diff] [blame] | 318 | u64 iowait_count; |
| 319 | u64 iowait_sum; |
Ingo Molnar | 94c1822 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 320 | |
| 321 | u64 sleep_start; |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 322 | u64 sleep_max; |
Ingo Molnar | 94c1822 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 323 | s64 sum_sleep_runtime; |
| 324 | |
| 325 | u64 block_start; |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 326 | u64 block_max; |
| 327 | u64 exec_max; |
Ingo Molnar | eba1ed4 | 2007-10-15 17:00:02 +0200 | [diff] [blame] | 328 | u64 slice_max; |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 329 | |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 330 | u64 nr_migrations_cold; |
| 331 | u64 nr_failed_migrations_affine; |
| 332 | u64 nr_failed_migrations_running; |
| 333 | u64 nr_failed_migrations_hot; |
| 334 | u64 nr_forced_migrations; |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 335 | |
| 336 | u64 nr_wakeups; |
| 337 | u64 nr_wakeups_sync; |
| 338 | u64 nr_wakeups_migrate; |
| 339 | u64 nr_wakeups_local; |
| 340 | u64 nr_wakeups_remote; |
| 341 | u64 nr_wakeups_affine; |
| 342 | u64 nr_wakeups_affine_attempts; |
| 343 | u64 nr_wakeups_passive; |
| 344 | u64 nr_wakeups_idle; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 345 | #endif |
Ingo Molnar | 7f5f8e8 | 2017-02-06 11:44:12 +0100 | [diff] [blame^] | 346 | }; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 347 | |
| 348 | struct sched_entity { |
| 349 | struct load_weight load; /* for load-balancing */ |
| 350 | struct rb_node run_node; |
| 351 | struct list_head group_node; |
| 352 | unsigned int on_rq; |
| 353 | |
| 354 | u64 exec_start; |
| 355 | u64 sum_exec_runtime; |
| 356 | u64 vruntime; |
| 357 | u64 prev_sum_exec_runtime; |
| 358 | |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 359 | u64 nr_migrations; |
| 360 | |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 361 | struct sched_statistics statistics; |
Ingo Molnar | 94c1822 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 362 | |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 363 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Peter Zijlstra | fed14d4 | 2012-02-11 06:05:00 +0100 | [diff] [blame] | 364 | int depth; |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 365 | struct sched_entity *parent; |
| 366 | /* rq on which this entity is (to be) queued: */ |
| 367 | struct cfs_rq *cfs_rq; |
| 368 | /* rq "owned" by this entity/group: */ |
| 369 | struct cfs_rq *my_q; |
| 370 | #endif |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 371 | |
Alex Shi | 141965c | 2013-06-26 13:05:39 +0800 | [diff] [blame] | 372 | #ifdef CONFIG_SMP |
Jiri Olsa | 5a10780 | 2015-12-08 21:23:59 +0100 | [diff] [blame] | 373 | /* |
| 374 | * Per entity load average tracking. |
| 375 | * |
| 376 | * Put into separate cache line so it does not |
| 377 | * collide with read-mostly values above. |
| 378 | */ |
| 379 | struct sched_avg avg ____cacheline_aligned_in_smp; |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 380 | #endif |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 381 | }; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 382 | |
Peter Zijlstra | fa71706 | 2008-01-25 21:08:27 +0100 | [diff] [blame] | 383 | struct sched_rt_entity { |
| 384 | struct list_head run_list; |
Peter Zijlstra | 78f2c7d | 2008-01-25 21:08:27 +0100 | [diff] [blame] | 385 | unsigned long timeout; |
Ying Xue | 57d2aa0 | 2012-07-17 15:03:43 +0800 | [diff] [blame] | 386 | unsigned long watchdog_stamp; |
Richard Kennedy | bee367e | 2008-08-01 13:24:08 +0100 | [diff] [blame] | 387 | unsigned int time_slice; |
Peter Zijlstra | ff77e46 | 2016-01-18 15:27:07 +0100 | [diff] [blame] | 388 | unsigned short on_rq; |
| 389 | unsigned short on_list; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 390 | |
Peter Zijlstra | 58d6c2d | 2008-04-19 19:45:00 +0200 | [diff] [blame] | 391 | struct sched_rt_entity *back; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 392 | #ifdef CONFIG_RT_GROUP_SCHED |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 393 | struct sched_rt_entity *parent; |
| 394 | /* rq on which this entity is (to be) queued: */ |
| 395 | struct rt_rq *rt_rq; |
| 396 | /* rq "owned" by this entity/group: */ |
| 397 | struct rt_rq *my_q; |
| 398 | #endif |
Peter Zijlstra | fa71706 | 2008-01-25 21:08:27 +0100 | [diff] [blame] | 399 | }; |
| 400 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 401 | struct sched_dl_entity { |
| 402 | struct rb_node rb_node; |
| 403 | |
| 404 | /* |
| 405 | * Original scheduling parameters. Copied here from sched_attr |
xiaofeng.yan | 4027d08 | 2014-05-09 03:21:27 +0000 | [diff] [blame] | 406 | * during sched_setattr(), they will remain the same until |
| 407 | * the next sched_setattr(). |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 408 | */ |
| 409 | u64 dl_runtime; /* maximum runtime for each instance */ |
| 410 | u64 dl_deadline; /* relative deadline of each instance */ |
Harald Gustafsson | 755378a | 2013-11-07 14:43:40 +0100 | [diff] [blame] | 411 | u64 dl_period; /* separation of two instances (period) */ |
Dario Faggioli | 332ac17 | 2013-11-07 14:43:45 +0100 | [diff] [blame] | 412 | u64 dl_bw; /* dl_runtime / dl_deadline */ |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 413 | |
| 414 | /* |
| 415 | * Actual scheduling parameters. Initialized with the values above, |
| 416 | * they are continously updated during task execution. Note that |
| 417 | * the remaining runtime could be < 0 in case we are in overrun. |
| 418 | */ |
| 419 | s64 runtime; /* remaining runtime for this instance */ |
| 420 | u64 deadline; /* absolute deadline for this instance */ |
| 421 | unsigned int flags; /* specifying the scheduler behaviour */ |
| 422 | |
| 423 | /* |
| 424 | * Some bool flags: |
| 425 | * |
| 426 | * @dl_throttled tells if we exhausted the runtime. If so, the |
| 427 | * task has to wait for a replenishment to be performed at the |
| 428 | * next firing of dl_timer. |
| 429 | * |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 430 | * @dl_boosted tells if we are boosted due to DI. If so we are |
| 431 | * outside bandwidth enforcement mechanism (but only until we |
Juri Lelli | 5bfd126 | 2014-04-15 13:49:04 +0200 | [diff] [blame] | 432 | * exit the critical section); |
| 433 | * |
| 434 | * @dl_yielded tells if task gave up the cpu before consuming |
| 435 | * all its available runtime during the last job. |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 436 | */ |
Luca Abeni | 72f9f3f | 2016-03-07 12:27:04 +0100 | [diff] [blame] | 437 | int dl_throttled, dl_boosted, dl_yielded; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 438 | |
| 439 | /* |
| 440 | * Bandwidth enforcement timer. Each -deadline task has its |
| 441 | * own bandwidth to be enforced, thus we need one timer per task. |
| 442 | */ |
| 443 | struct hrtimer dl_timer; |
| 444 | }; |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 445 | |
Paul E. McKenney | 1d082fd | 2014-08-14 16:01:53 -0700 | [diff] [blame] | 446 | union rcu_special { |
| 447 | struct { |
Paul E. McKenney | 8203d6d | 2015-08-02 13:53:17 -0700 | [diff] [blame] | 448 | u8 blocked; |
| 449 | u8 need_qs; |
| 450 | u8 exp_need_qs; |
| 451 | u8 pad; /* Otherwise the compiler can store garbage here. */ |
| 452 | } b; /* Bits. */ |
| 453 | u32 s; /* Set of bits. */ |
Paul E. McKenney | 1d082fd | 2014-08-14 16:01:53 -0700 | [diff] [blame] | 454 | }; |
Paul E. McKenney | 8684896 | 2009-08-27 15:00:12 -0700 | [diff] [blame] | 455 | |
Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 456 | enum perf_event_task_context { |
| 457 | perf_invalid_context = -1, |
| 458 | perf_hw_context = 0, |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 459 | perf_sw_context, |
Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 460 | perf_nr_task_contexts, |
| 461 | }; |
| 462 | |
Ingo Molnar | eb61baf | 2017-02-01 17:09:06 +0100 | [diff] [blame] | 463 | struct wake_q_node { |
| 464 | struct wake_q_node *next; |
| 465 | }; |
| 466 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | struct task_struct { |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 468 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 469 | /* |
| 470 | * For reasons of header soup (see current_thread_info()), this |
| 471 | * must be the first element of task_struct. |
| 472 | */ |
| 473 | struct thread_info thread_info; |
| 474 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
Roman Zippel | f7e4217 | 2007-05-09 02:35:17 -0700 | [diff] [blame] | 476 | void *stack; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | atomic_t usage; |
William Cohen | 97dc32c | 2007-05-08 00:23:41 -0700 | [diff] [blame] | 478 | unsigned int flags; /* per process flags, defined below */ |
| 479 | unsigned int ptrace; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 481 | #ifdef CONFIG_SMP |
Peter Zijlstra | fa14ff4 | 2011-09-12 13:06:17 +0200 | [diff] [blame] | 482 | struct llist_node wake_entry; |
Peter Zijlstra | 3ca7a44 | 2011-04-05 17:23:40 +0200 | [diff] [blame] | 483 | int on_cpu; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 484 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 485 | unsigned int cpu; /* current CPU */ |
| 486 | #endif |
Mike Galbraith | 63b0e9e | 2015-07-14 17:39:50 +0200 | [diff] [blame] | 487 | unsigned int wakee_flips; |
Michael Wang | 6247041 | 2013-07-04 12:55:51 +0800 | [diff] [blame] | 488 | unsigned long wakee_flip_decay_ts; |
Mike Galbraith | 63b0e9e | 2015-07-14 17:39:50 +0200 | [diff] [blame] | 489 | struct task_struct *last_wakee; |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 490 | |
| 491 | int wake_cpu; |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 492 | #endif |
Peter Zijlstra | fd2f441 | 2011-04-05 17:23:44 +0200 | [diff] [blame] | 493 | int on_rq; |
Ingo Molnar | 50e645a | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 494 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 495 | int prio, static_prio, normal_prio; |
Richard Kennedy | c7aceab | 2008-05-15 12:09:15 +0100 | [diff] [blame] | 496 | unsigned int rt_priority; |
Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 497 | const struct sched_class *sched_class; |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 498 | struct sched_entity se; |
Peter Zijlstra | fa71706 | 2008-01-25 21:08:27 +0100 | [diff] [blame] | 499 | struct sched_rt_entity rt; |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 500 | #ifdef CONFIG_CGROUP_SCHED |
| 501 | struct task_group *sched_task_group; |
| 502 | #endif |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 503 | struct sched_dl_entity dl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 505 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 506 | /* list of struct preempt_notifier: */ |
| 507 | struct hlist_head preempt_notifiers; |
| 508 | #endif |
| 509 | |
Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 510 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 511 | unsigned int btrace_seq; |
Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 512 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | |
William Cohen | 97dc32c | 2007-05-08 00:23:41 -0700 | [diff] [blame] | 514 | unsigned int policy; |
Peter Zijlstra | 29baa74 | 2012-04-23 12:11:21 +0200 | [diff] [blame] | 515 | int nr_cpus_allowed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | cpumask_t cpus_allowed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 518 | #ifdef CONFIG_PREEMPT_RCU |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 519 | int rcu_read_lock_nesting; |
Paul E. McKenney | 1d082fd | 2014-08-14 16:01:53 -0700 | [diff] [blame] | 520 | union rcu_special rcu_read_unlock_special; |
Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 521 | struct list_head rcu_node_entry; |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 522 | struct rcu_node *rcu_blocked_node; |
Pranith Kumar | 28f6569 | 2014-09-22 14:00:48 -0400 | [diff] [blame] | 523 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 524 | #ifdef CONFIG_TASKS_RCU |
| 525 | unsigned long rcu_tasks_nvcsw; |
| 526 | bool rcu_tasks_holdout; |
| 527 | struct list_head rcu_tasks_holdout_list; |
Paul E. McKenney | 176f8f7 | 2014-08-04 17:43:50 -0700 | [diff] [blame] | 528 | int rcu_tasks_idle_cpu; |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 529 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 530 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | struct sched_info sched_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | |
| 533 | struct list_head tasks; |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame] | 534 | #ifdef CONFIG_SMP |
Gregory Haskins | 917b627 | 2008-12-29 09:39:53 -0500 | [diff] [blame] | 535 | struct plist_node pushable_tasks; |
Juri Lelli | 1baca4c | 2013-11-07 14:43:38 +0100 | [diff] [blame] | 536 | struct rb_node pushable_dl_tasks; |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame] | 537 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | |
| 539 | struct mm_struct *mm, *active_mm; |
Ingo Molnar | 314ff78 | 2017-02-03 11:03:31 +0100 | [diff] [blame] | 540 | |
| 541 | /* Per-thread vma caching: */ |
| 542 | struct vmacache vmacache; |
| 543 | |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 544 | #if defined(SPLIT_RSS_COUNTING) |
| 545 | struct task_rss_stat rss_stat; |
| 546 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | /* task state */ |
William Cohen | 97dc32c | 2007-05-08 00:23:41 -0700 | [diff] [blame] | 548 | int exit_state; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | int exit_code, exit_signal; |
| 550 | int pdeath_signal; /* The signal sent when the parent dies */ |
Palmer Dabbelt | e7cc417 | 2015-04-30 21:19:55 -0700 | [diff] [blame] | 551 | unsigned long jobctl; /* JOBCTL_*, siglock protected */ |
Andrei Epure | 9b89f6b | 2013-04-11 20:30:29 +0300 | [diff] [blame] | 552 | |
| 553 | /* Used for emulating ABI behavior of previous Linux versions */ |
William Cohen | 97dc32c | 2007-05-08 00:23:41 -0700 | [diff] [blame] | 554 | unsigned int personality; |
Andrei Epure | 9b89f6b | 2013-04-11 20:30:29 +0300 | [diff] [blame] | 555 | |
Peter Zijlstra | be958bd | 2015-11-25 16:02:07 +0100 | [diff] [blame] | 556 | /* scheduler bits, serialized by scheduler locks */ |
Lennart Poettering | ca94c44 | 2009-06-15 17:17:47 +0200 | [diff] [blame] | 557 | unsigned sched_reset_on_fork:1; |
Peter Zijlstra | a8e4f2e | 2011-04-05 17:23:49 +0200 | [diff] [blame] | 558 | unsigned sched_contributes_to_load:1; |
Peter Zijlstra | ff303e6 | 2015-04-17 20:05:30 +0200 | [diff] [blame] | 559 | unsigned sched_migrated:1; |
Peter Zijlstra | b7e7ade | 2016-05-23 11:19:07 +0200 | [diff] [blame] | 560 | unsigned sched_remote_wakeup:1; |
Peter Zijlstra | be958bd | 2015-11-25 16:02:07 +0100 | [diff] [blame] | 561 | unsigned :0; /* force alignment to the next boundary */ |
| 562 | |
| 563 | /* unserialized, strictly 'current' */ |
| 564 | unsigned in_execve:1; /* bit to tell LSMs we're in execve */ |
| 565 | unsigned in_iowait:1; |
Andy Lutomirski | 7e78141 | 2016-08-02 14:05:36 -0700 | [diff] [blame] | 566 | #if !defined(TIF_RESTORE_SIGMASK) |
| 567 | unsigned restore_sigmask:1; |
| 568 | #endif |
Tejun Heo | 626ebc4 | 2015-11-05 18:46:09 -0800 | [diff] [blame] | 569 | #ifdef CONFIG_MEMCG |
| 570 | unsigned memcg_may_oom:1; |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 571 | #ifndef CONFIG_SLOB |
Vladimir Davydov | 6f185c2 | 2014-12-12 16:55:15 -0800 | [diff] [blame] | 572 | unsigned memcg_kmem_skip_account:1; |
| 573 | #endif |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 574 | #endif |
Peter Zijlstra | ff303e6 | 2015-04-17 20:05:30 +0200 | [diff] [blame] | 575 | #ifdef CONFIG_COMPAT_BRK |
| 576 | unsigned brk_randomized:1; |
| 577 | #endif |
Vladimir Davydov | 6f185c2 | 2014-12-12 16:55:15 -0800 | [diff] [blame] | 578 | |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 579 | unsigned long atomic_flags; /* Flags needing atomic access. */ |
| 580 | |
Andy Lutomirski | f56141e | 2015-02-12 15:01:14 -0800 | [diff] [blame] | 581 | struct restart_block restart_block; |
| 582 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | pid_t pid; |
| 584 | pid_t tgid; |
Arjan van de Ven | 0a425405 | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 585 | |
Hiroshi Shimamoto | 1314562 | 2009-08-18 15:06:02 +0900 | [diff] [blame] | 586 | #ifdef CONFIG_CC_STACKPROTECTOR |
Arjan van de Ven | 0a425405 | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 587 | /* Canary value for the -fstack-protector gcc feature */ |
| 588 | unsigned long stack_canary; |
Hiroshi Shimamoto | 1314562 | 2009-08-18 15:06:02 +0900 | [diff] [blame] | 589 | #endif |
Oleg Nesterov | 4d1d61a | 2012-05-11 10:59:08 +1000 | [diff] [blame] | 590 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | * pointers to (original) parent process, youngest child, younger sibling, |
Oleg Nesterov | 4d1d61a | 2012-05-11 10:59:08 +1000 | [diff] [blame] | 592 | * older sibling, respectively. (p->father can be replaced with |
Roland McGrath | f470021 | 2008-03-24 18:36:23 -0700 | [diff] [blame] | 593 | * p->real_parent->pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | */ |
Kees Cook | abd63bc | 2011-12-14 14:39:26 -0800 | [diff] [blame] | 595 | struct task_struct __rcu *real_parent; /* real parent process */ |
| 596 | struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | /* |
Roland McGrath | f470021 | 2008-03-24 18:36:23 -0700 | [diff] [blame] | 598 | * children/sibling forms the list of my natural children |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | */ |
| 600 | struct list_head children; /* list of my children */ |
| 601 | struct list_head sibling; /* linkage in my parent's children list */ |
| 602 | struct task_struct *group_leader; /* threadgroup leader */ |
| 603 | |
Roland McGrath | f470021 | 2008-03-24 18:36:23 -0700 | [diff] [blame] | 604 | /* |
| 605 | * ptraced is the list of tasks this task is using ptrace on. |
| 606 | * This includes both natural children and PTRACE_ATTACH targets. |
| 607 | * p->ptrace_entry is p's link on the p->parent->ptraced list. |
| 608 | */ |
| 609 | struct list_head ptraced; |
| 610 | struct list_head ptrace_entry; |
| 611 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | /* PID/PID hash table linkage. */ |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 613 | struct pid_link pids[PIDTYPE_MAX]; |
Oleg Nesterov | 47e6532 | 2006-03-28 16:11:25 -0800 | [diff] [blame] | 614 | struct list_head thread_group; |
Oleg Nesterov | 0c740d0 | 2014-01-21 15:49:56 -0800 | [diff] [blame] | 615 | struct list_head thread_node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | |
| 617 | struct completion *vfork_done; /* for vfork() */ |
| 618 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ |
| 619 | int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ |
| 620 | |
Frederic Weisbecker | 5613fda | 2017-01-31 04:09:23 +0100 | [diff] [blame] | 621 | u64 utime, stime; |
Stanislaw Gruszka | 40565b5 | 2016-11-15 03:06:51 +0100 | [diff] [blame] | 622 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
Frederic Weisbecker | 5613fda | 2017-01-31 04:09:23 +0100 | [diff] [blame] | 623 | u64 utimescaled, stimescaled; |
Stanislaw Gruszka | 40565b5 | 2016-11-15 03:06:51 +0100 | [diff] [blame] | 624 | #endif |
Frederic Weisbecker | 16a6d9b | 2017-01-31 04:09:21 +0100 | [diff] [blame] | 625 | u64 gtime; |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 626 | struct prev_cputime prev_cputime; |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 627 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
Frederic Weisbecker | b7ce227 | 2015-11-19 16:47:34 +0100 | [diff] [blame] | 628 | seqcount_t vtime_seqcount; |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 629 | unsigned long long vtime_snap; |
| 630 | enum { |
Frederic Weisbecker | 7098c1e | 2015-11-19 16:47:30 +0100 | [diff] [blame] | 631 | /* Task is sleeping or running in a CPU with VTIME inactive */ |
| 632 | VTIME_INACTIVE = 0, |
| 633 | /* Task runs in userspace in a CPU with VTIME active */ |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 634 | VTIME_USER, |
Frederic Weisbecker | 7098c1e | 2015-11-19 16:47:30 +0100 | [diff] [blame] | 635 | /* Task runs in kernelspace in a CPU with VTIME active */ |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 636 | VTIME_SYS, |
| 637 | } vtime_snap_whence; |
| 638 | #endif |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 639 | |
| 640 | #ifdef CONFIG_NO_HZ_FULL |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 641 | atomic_t tick_dep_mask; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 642 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | unsigned long nvcsw, nivcsw; /* context switch counts */ |
Thomas Gleixner | ccbf62d | 2014-07-16 21:04:34 +0000 | [diff] [blame] | 644 | u64 start_time; /* monotonic time in nsec */ |
Thomas Gleixner | 57e0be0 | 2014-07-16 21:04:32 +0000 | [diff] [blame] | 645 | u64 real_start_time; /* boot based time in nsec */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ |
| 647 | unsigned long min_flt, maj_flt; |
| 648 | |
Nicolas Pitre | b18b6a9 | 2017-01-21 00:09:08 -0500 | [diff] [blame] | 649 | #ifdef CONFIG_POSIX_TIMERS |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 650 | struct task_cputime cputime_expires; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | struct list_head cpu_timers[3]; |
Nicolas Pitre | b18b6a9 | 2017-01-21 00:09:08 -0500 | [diff] [blame] | 652 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | |
| 654 | /* process credentials */ |
Eric W. Biederman | 64b875f | 2016-11-14 18:48:07 -0600 | [diff] [blame] | 655 | const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ |
Arnd Bergmann | 1b0ba1c | 2010-02-24 19:45:09 +0100 | [diff] [blame] | 656 | const struct cred __rcu *real_cred; /* objective and real subjective task |
David Howells | 3b11a1d | 2008-11-14 10:39:26 +1100 | [diff] [blame] | 657 | * credentials (COW) */ |
Arnd Bergmann | 1b0ba1c | 2010-02-24 19:45:09 +0100 | [diff] [blame] | 658 | const struct cred __rcu *cred; /* effective (overridable) subjective task |
David Howells | 3b11a1d | 2008-11-14 10:39:26 +1100 | [diff] [blame] | 659 | * credentials (COW) */ |
Paolo 'Blaisorblade' Giarrusso | 3677209 | 2005-05-05 16:16:12 -0700 | [diff] [blame] | 660 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
| 661 | - access with [gs]et_task_comm (which lock |
| 662 | it with task_lock()) |
Linus Torvalds | 221af7f | 2010-01-28 22:14:42 -0800 | [diff] [blame] | 663 | - initialized normally by setup_new_exec */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | /* file system info */ |
NeilBrown | 756daf2 | 2015-03-23 13:37:38 +1100 | [diff] [blame] | 665 | struct nameidata *nameidata; |
Alexey Dobriyan | 3d5b6fc | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 666 | #ifdef CONFIG_SYSVIPC |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | /* ipc stuff */ |
| 668 | struct sysv_sem sysvsem; |
Jack Miller | ab602f7 | 2014-08-08 14:23:19 -0700 | [diff] [blame] | 669 | struct sysv_shm sysvshm; |
Alexey Dobriyan | 3d5b6fc | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 670 | #endif |
Mandeep Singh Baines | e162b39 | 2009-01-15 11:08:40 -0800 | [diff] [blame] | 671 | #ifdef CONFIG_DETECT_HUNG_TASK |
Ingo Molnar | 82a1fcb | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 672 | /* hung task detection */ |
Ingo Molnar | 82a1fcb | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 673 | unsigned long last_switch_count; |
| 674 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | /* filesystem information */ |
| 676 | struct fs_struct *fs; |
| 677 | /* open file information */ |
| 678 | struct files_struct *files; |
Serge E. Hallyn | 1651e14 | 2006-10-02 02:18:08 -0700 | [diff] [blame] | 679 | /* namespaces */ |
Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 680 | struct nsproxy *nsproxy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | /* signal handlers */ |
| 682 | struct signal_struct *signal; |
| 683 | struct sighand_struct *sighand; |
| 684 | |
| 685 | sigset_t blocked, real_blocked; |
Roland McGrath | f3de272 | 2008-04-30 00:53:09 -0700 | [diff] [blame] | 686 | sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | struct sigpending pending; |
| 688 | |
| 689 | unsigned long sas_ss_sp; |
| 690 | size_t sas_ss_size; |
Stas Sergeev | 2a74213 | 2016-04-14 23:20:04 +0300 | [diff] [blame] | 691 | unsigned sas_ss_flags; |
Oleg Nesterov | 2e01fab | 2015-11-06 16:32:19 -0800 | [diff] [blame] | 692 | |
Al Viro | 67d1214 | 2012-06-27 11:07:19 +0400 | [diff] [blame] | 693 | struct callback_head *task_works; |
Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 694 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | struct audit_context *audit_context; |
Al Viro | bfef93a | 2008-01-10 04:53:18 -0500 | [diff] [blame] | 696 | #ifdef CONFIG_AUDITSYSCALL |
Eric W. Biederman | e1760bd | 2012-09-10 22:39:43 -0700 | [diff] [blame] | 697 | kuid_t loginuid; |
Eric Paris | 4746ec5 | 2008-01-08 10:06:53 -0500 | [diff] [blame] | 698 | unsigned int sessionid; |
Al Viro | bfef93a | 2008-01-10 04:53:18 -0500 | [diff] [blame] | 699 | #endif |
Will Drewry | 932eceb | 2012-04-12 16:47:54 -0500 | [diff] [blame] | 700 | struct seccomp seccomp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | |
| 702 | /* Thread group tracking */ |
| 703 | u32 parent_exec_id; |
| 704 | u32 self_exec_id; |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 705 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, |
| 706 | * mempolicy */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | spinlock_t alloc_lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 709 | /* Protection of the PI data structures: */ |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 710 | raw_spinlock_t pi_lock; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 711 | |
Peter Zijlstra | 7675104 | 2015-05-01 08:27:50 -0700 | [diff] [blame] | 712 | struct wake_q_node wake_q; |
| 713 | |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 714 | #ifdef CONFIG_RT_MUTEXES |
| 715 | /* PI waiters blocked on a rt_mutex held by this task */ |
Peter Zijlstra | fb00aca | 2013-11-07 14:43:43 +0100 | [diff] [blame] | 716 | struct rb_root pi_waiters; |
| 717 | struct rb_node *pi_waiters_leftmost; |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 718 | /* Deadlock detection and priority inheritance handling */ |
| 719 | struct rt_mutex_waiter *pi_blocked_on; |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 720 | #endif |
| 721 | |
Ingo Molnar | 408894e | 2006-01-09 15:59:20 -0800 | [diff] [blame] | 722 | #ifdef CONFIG_DEBUG_MUTEXES |
| 723 | /* mutex deadlock detection */ |
| 724 | struct mutex_waiter *blocked_on; |
| 725 | #endif |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 726 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 727 | unsigned int irq_events; |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 728 | unsigned long hardirq_enable_ip; |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 729 | unsigned long hardirq_disable_ip; |
Hiroshi Shimamoto | fa1452e | 2009-11-30 14:59:44 +0900 | [diff] [blame] | 730 | unsigned int hardirq_enable_event; |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 731 | unsigned int hardirq_disable_event; |
Hiroshi Shimamoto | fa1452e | 2009-11-30 14:59:44 +0900 | [diff] [blame] | 732 | int hardirqs_enabled; |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 733 | int hardirq_context; |
Hiroshi Shimamoto | fa1452e | 2009-11-30 14:59:44 +0900 | [diff] [blame] | 734 | unsigned long softirq_disable_ip; |
| 735 | unsigned long softirq_enable_ip; |
| 736 | unsigned int softirq_disable_event; |
| 737 | unsigned int softirq_enable_event; |
| 738 | int softirqs_enabled; |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 739 | int softirq_context; |
| 740 | #endif |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 741 | #ifdef CONFIG_LOCKDEP |
Peter Zijlstra | bdb9441 | 2008-02-25 23:02:48 +0100 | [diff] [blame] | 742 | # define MAX_LOCK_DEPTH 48UL |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 743 | u64 curr_chain_key; |
| 744 | int lockdep_depth; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 745 | unsigned int lockdep_recursion; |
Richard Kennedy | c7aceab | 2008-05-15 12:09:15 +0100 | [diff] [blame] | 746 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
Nick Piggin | cf40bd1 | 2009-01-21 08:12:39 +0100 | [diff] [blame] | 747 | gfp_t lockdep_reclaim_gfp; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 748 | #endif |
Andrey Ryabinin | c6d3085 | 2016-01-20 15:00:55 -0800 | [diff] [blame] | 749 | #ifdef CONFIG_UBSAN |
| 750 | unsigned int in_ubsan; |
| 751 | #endif |
Ingo Molnar | 408894e | 2006-01-09 15:59:20 -0800 | [diff] [blame] | 752 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 753 | /* journalling filesystem info */ |
| 754 | void *journal_info; |
| 755 | |
Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 756 | /* stacked block device info */ |
Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 757 | struct bio_list *bio_list; |
Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 758 | |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 759 | #ifdef CONFIG_BLOCK |
| 760 | /* stack plugging */ |
| 761 | struct blk_plug *plug; |
| 762 | #endif |
| 763 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | /* VM state */ |
| 765 | struct reclaim_state *reclaim_state; |
| 766 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | struct backing_dev_info *backing_dev_info; |
| 768 | |
| 769 | struct io_context *io_context; |
| 770 | |
| 771 | unsigned long ptrace_message; |
| 772 | siginfo_t *last_siginfo; /* For ptrace use. */ |
Andrew Morton | 7c3ab73 | 2006-12-10 02:19:19 -0800 | [diff] [blame] | 773 | struct task_io_accounting ioac; |
Jay Lan | 8f0ab51 | 2006-09-30 23:28:59 -0700 | [diff] [blame] | 774 | #if defined(CONFIG_TASK_XACCT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | u64 acct_rss_mem1; /* accumulated rss usage */ |
| 776 | u64 acct_vm_mem1; /* accumulated virtual memory usage */ |
Frederic Weisbecker | 605dc2b | 2017-01-31 04:09:30 +0100 | [diff] [blame] | 777 | u64 acct_timexpd; /* stime + utime since last update */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | #endif |
| 779 | #ifdef CONFIG_CPUSETS |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 780 | nodemask_t mems_allowed; /* Protected by alloc_lock */ |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 781 | seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 782 | int cpuset_mem_spread_rotor; |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 783 | int cpuset_slab_spread_rotor; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 784 | #endif |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 785 | #ifdef CONFIG_CGROUPS |
Paul Menage | 817929e | 2007-10-18 23:39:36 -0700 | [diff] [blame] | 786 | /* Control Group info protected by css_set_lock */ |
Arnd Bergmann | 2c392b8 | 2010-02-24 19:41:39 +0100 | [diff] [blame] | 787 | struct css_set __rcu *cgroups; |
Paul Menage | 817929e | 2007-10-18 23:39:36 -0700 | [diff] [blame] | 788 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ |
| 789 | struct list_head cg_list; |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 790 | #endif |
Fenghua Yu | e02737d | 2016-10-28 15:04:46 -0700 | [diff] [blame] | 791 | #ifdef CONFIG_INTEL_RDT_A |
| 792 | int closid; |
| 793 | #endif |
Alexey Dobriyan | 42b2dd0 | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 794 | #ifdef CONFIG_FUTEX |
Ingo Molnar | 0771dfe | 2006-03-27 01:16:22 -0800 | [diff] [blame] | 795 | struct robust_list_head __user *robust_list; |
Ingo Molnar | 34f192c | 2006-03-27 01:16:24 -0800 | [diff] [blame] | 796 | #ifdef CONFIG_COMPAT |
| 797 | struct compat_robust_list_head __user *compat_robust_list; |
| 798 | #endif |
Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 799 | struct list_head pi_state_list; |
| 800 | struct futex_pi_state *pi_state_cache; |
Alexey Dobriyan | 42b2dd0 | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 801 | #endif |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 802 | #ifdef CONFIG_PERF_EVENTS |
Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 803 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 804 | struct mutex perf_event_mutex; |
| 805 | struct list_head perf_event_list; |
Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 806 | #endif |
Thomas Gleixner | 8f47b18 | 2014-02-07 20:58:39 +0100 | [diff] [blame] | 807 | #ifdef CONFIG_DEBUG_PREEMPT |
| 808 | unsigned long preempt_disable_ip; |
| 809 | #endif |
Richard Kennedy | c7aceab | 2008-05-15 12:09:15 +0100 | [diff] [blame] | 810 | #ifdef CONFIG_NUMA |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 811 | struct mempolicy *mempolicy; /* Protected by alloc_lock */ |
Richard Kennedy | c7aceab | 2008-05-15 12:09:15 +0100 | [diff] [blame] | 812 | short il_next; |
Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 813 | short pref_node_fork; |
Richard Kennedy | c7aceab | 2008-05-15 12:09:15 +0100 | [diff] [blame] | 814 | #endif |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 815 | #ifdef CONFIG_NUMA_BALANCING |
| 816 | int numa_scan_seq; |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 817 | unsigned int numa_scan_period; |
Mel Gorman | 598f0ec | 2013-10-07 11:28:55 +0100 | [diff] [blame] | 818 | unsigned int numa_scan_period_max; |
Rik van Riel | de1c9ce6 | 2013-10-07 11:29:39 +0100 | [diff] [blame] | 819 | int numa_preferred_nid; |
Mel Gorman | 6b9a746 | 2013-10-07 11:29:11 +0100 | [diff] [blame] | 820 | unsigned long numa_migrate_retry; |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 821 | u64 node_stamp; /* migration stamp */ |
Rik van Riel | 7e2703e | 2014-01-27 17:03:45 -0500 | [diff] [blame] | 822 | u64 last_task_numa_placement; |
| 823 | u64 last_sum_exec_runtime; |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 824 | struct callback_head numa_work; |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 825 | |
Peter Zijlstra | 8c8a743 | 2013-10-07 11:29:21 +0100 | [diff] [blame] | 826 | struct list_head numa_entry; |
| 827 | struct numa_group *numa_group; |
| 828 | |
Mel Gorman | 745d614 | 2013-10-07 11:28:59 +0100 | [diff] [blame] | 829 | /* |
Iulia Manda | 44dba3d | 2014-10-31 02:13:31 +0200 | [diff] [blame] | 830 | * numa_faults is an array split into four regions: |
| 831 | * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer |
| 832 | * in this precise order. |
| 833 | * |
| 834 | * faults_memory: Exponential decaying average of faults on a per-node |
| 835 | * basis. Scheduling placement decisions are made based on these |
| 836 | * counts. The values remain static for the duration of a PTE scan. |
| 837 | * faults_cpu: Track the nodes the process was running on when a NUMA |
| 838 | * hinting fault was incurred. |
| 839 | * faults_memory_buffer and faults_cpu_buffer: Record faults per node |
| 840 | * during the current scan window. When the scan completes, the counts |
| 841 | * in faults_memory and faults_cpu decay and these values are copied. |
Mel Gorman | 745d614 | 2013-10-07 11:28:59 +0100 | [diff] [blame] | 842 | */ |
Iulia Manda | 44dba3d | 2014-10-31 02:13:31 +0200 | [diff] [blame] | 843 | unsigned long *numa_faults; |
Mel Gorman | 83e1d2c | 2013-10-07 11:29:27 +0100 | [diff] [blame] | 844 | unsigned long total_numa_faults; |
Mel Gorman | 745d614 | 2013-10-07 11:28:59 +0100 | [diff] [blame] | 845 | |
| 846 | /* |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 847 | * numa_faults_locality tracks if faults recorded during the last |
Mel Gorman | 074c238 | 2015-03-25 15:55:42 -0700 | [diff] [blame] | 848 | * scan window were remote/local or failed to migrate. The task scan |
| 849 | * period is adapted based on the locality of the faults with different |
| 850 | * weights depending on whether they were shared or private faults |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 851 | */ |
Mel Gorman | 074c238 | 2015-03-25 15:55:42 -0700 | [diff] [blame] | 852 | unsigned long numa_faults_locality[3]; |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 853 | |
Ingo Molnar | b32e86b | 2013-10-07 11:29:30 +0100 | [diff] [blame] | 854 | unsigned long numa_pages_migrated; |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 855 | #endif /* CONFIG_NUMA_BALANCING */ |
| 856 | |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 857 | struct tlbflush_unmap_batch tlb_ubc; |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 858 | |
Ingo Molnar | e56d090 | 2006-01-08 01:01:37 -0800 | [diff] [blame] | 859 | struct rcu_head rcu; |
Jens Axboe | b92ce55 | 2006-04-11 13:52:07 +0200 | [diff] [blame] | 860 | |
| 861 | /* |
| 862 | * cache last used pipe for splice |
| 863 | */ |
| 864 | struct pipe_inode_info *splice_pipe; |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 865 | |
| 866 | struct page_frag task_frag; |
| 867 | |
Ingo Molnar | 47913d4 | 2017-02-01 18:00:26 +0100 | [diff] [blame] | 868 | #ifdef CONFIG_TASK_DELAY_ACCT |
| 869 | struct task_delay_info *delays; |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 870 | #endif |
Ingo Molnar | 47913d4 | 2017-02-01 18:00:26 +0100 | [diff] [blame] | 871 | |
Akinobu Mita | f4f154f | 2006-12-08 02:39:47 -0800 | [diff] [blame] | 872 | #ifdef CONFIG_FAULT_INJECTION |
| 873 | int make_it_fail; |
| 874 | #endif |
Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 875 | /* |
| 876 | * when (nr_dirtied >= nr_dirtied_pause), it's time to call |
| 877 | * balance_dirty_pages() for some dirty throttling pause |
| 878 | */ |
| 879 | int nr_dirtied; |
| 880 | int nr_dirtied_pause; |
Wu Fengguang | 8371235 | 2011-06-11 19:25:42 -0600 | [diff] [blame] | 881 | unsigned long dirty_paused_when; /* start of a write-and-pause period */ |
Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 882 | |
Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 883 | #ifdef CONFIG_LATENCYTOP |
| 884 | int latency_record_count; |
| 885 | struct latency_record latency_record[LT_SAVECOUNT]; |
| 886 | #endif |
Arjan van de Ven | 6976675 | 2008-09-01 15:52:40 -0700 | [diff] [blame] | 887 | /* |
| 888 | * time slack values; these are used to round up poll() and |
| 889 | * select() etc timeout values. These are in nanoseconds. |
| 890 | */ |
John Stultz | da8b44d | 2016-03-17 14:20:51 -0700 | [diff] [blame] | 891 | u64 timer_slack_ns; |
| 892 | u64 default_timer_slack_ns; |
David Miller | f8d570a | 2008-11-06 00:37:40 -0800 | [diff] [blame] | 893 | |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 894 | #ifdef CONFIG_KASAN |
| 895 | unsigned int kasan_depth; |
| 896 | #endif |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 897 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Daniel Mack | 3ad2f3f | 2010-02-03 08:01:28 +0800 | [diff] [blame] | 898 | /* Index of current stored address in ret_stack */ |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 899 | int curr_ret_stack; |
| 900 | /* Stack of return addresses for return function tracing */ |
| 901 | struct ftrace_ret_stack *ret_stack; |
Steven Rostedt | 8aef2d2 | 2009-03-24 01:10:15 -0400 | [diff] [blame] | 902 | /* time stamp for last schedule */ |
| 903 | unsigned long long ftrace_timestamp; |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 904 | /* |
| 905 | * Number of functions that haven't been traced |
| 906 | * because of depth overrun. |
| 907 | */ |
| 908 | atomic_t trace_overrun; |
Frederic Weisbecker | 380c4b1 | 2008-12-06 03:43:41 +0100 | [diff] [blame] | 909 | /* Pause for the tracing */ |
| 910 | atomic_t tracing_graph_pause; |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 911 | #endif |
Steven Rostedt | ea4e2bc | 2008-12-03 15:36:57 -0500 | [diff] [blame] | 912 | #ifdef CONFIG_TRACING |
| 913 | /* state flags for use by tracers */ |
| 914 | unsigned long trace; |
Steven Rostedt | b1cff0a | 2011-05-25 14:27:43 -0400 | [diff] [blame] | 915 | /* bitmask and counter of trace recursion */ |
Steven Rostedt | 261842b | 2009-04-16 21:41:52 -0400 | [diff] [blame] | 916 | unsigned long trace_recursion; |
| 917 | #endif /* CONFIG_TRACING */ |
Dmitry Vyukov | 5c9a875 | 2016-03-22 14:27:30 -0700 | [diff] [blame] | 918 | #ifdef CONFIG_KCOV |
| 919 | /* Coverage collection mode enabled for this task (0 if disabled). */ |
| 920 | enum kcov_mode kcov_mode; |
| 921 | /* Size of the kcov_area. */ |
| 922 | unsigned kcov_size; |
| 923 | /* Buffer for coverage collection. */ |
| 924 | void *kcov_area; |
| 925 | /* kcov desciptor wired with this task or NULL. */ |
| 926 | struct kcov *kcov; |
| 927 | #endif |
Vladimir Davydov | 6f185c2 | 2014-12-12 16:55:15 -0800 | [diff] [blame] | 928 | #ifdef CONFIG_MEMCG |
Tejun Heo | 626ebc4 | 2015-11-05 18:46:09 -0800 | [diff] [blame] | 929 | struct mem_cgroup *memcg_in_oom; |
| 930 | gfp_t memcg_oom_gfp_mask; |
| 931 | int memcg_oom_order; |
Tejun Heo | b23afb9 | 2015-11-05 18:46:11 -0800 | [diff] [blame] | 932 | |
| 933 | /* number of pages to reclaim on returning to userland */ |
| 934 | unsigned int memcg_nr_pages_over_high; |
KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 935 | #endif |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 936 | #ifdef CONFIG_UPROBES |
| 937 | struct uprobe_task *utask; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 938 | #endif |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 939 | #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) |
| 940 | unsigned int sequential_io; |
| 941 | unsigned int sequential_io_avg; |
| 942 | #endif |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 943 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
| 944 | unsigned long task_state_change; |
| 945 | #endif |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 946 | int pagefault_disabled; |
Michal Hocko | 0304926 | 2016-03-25 14:20:33 -0700 | [diff] [blame] | 947 | #ifdef CONFIG_MMU |
Vladimir Davydov | 29c696e | 2016-03-25 14:20:39 -0700 | [diff] [blame] | 948 | struct task_struct *oom_reaper_list; |
Michal Hocko | 0304926 | 2016-03-25 14:20:33 -0700 | [diff] [blame] | 949 | #endif |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 950 | #ifdef CONFIG_VMAP_STACK |
| 951 | struct vm_struct *stack_vm_area; |
| 952 | #endif |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 953 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 954 | /* A live task holds one reference. */ |
| 955 | atomic_t stack_refcount; |
| 956 | #endif |
Dave Hansen | 0c8c0f0 | 2015-07-17 12:28:11 +0200 | [diff] [blame] | 957 | /* CPU-specific state of this task */ |
| 958 | struct thread_struct thread; |
| 959 | /* |
| 960 | * WARNING: on x86, 'thread_struct' contains a variable-sized |
| 961 | * structure. It *MUST* be at the end of 'task_struct'. |
| 962 | * |
| 963 | * Do not put anything below here! |
| 964 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | }; |
| 966 | |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 967 | static inline struct pid *task_pid(struct task_struct *task) |
Eric W. Biederman | 22c935f | 2006-10-02 02:17:09 -0700 | [diff] [blame] | 968 | { |
| 969 | return task->pids[PIDTYPE_PID].pid; |
| 970 | } |
| 971 | |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 972 | static inline struct pid *task_tgid(struct task_struct *task) |
Eric W. Biederman | 22c935f | 2006-10-02 02:17:09 -0700 | [diff] [blame] | 973 | { |
| 974 | return task->group_leader->pids[PIDTYPE_PID].pid; |
| 975 | } |
| 976 | |
Oleg Nesterov | 6dda81f | 2009-04-02 16:58:35 -0700 | [diff] [blame] | 977 | /* |
| 978 | * Without tasklist or rcu lock it is not safe to dereference |
| 979 | * the result of task_pgrp/task_session even if task == current, |
| 980 | * we can race with another thread doing sys_setsid/sys_setpgid. |
| 981 | */ |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 982 | static inline struct pid *task_pgrp(struct task_struct *task) |
Eric W. Biederman | 22c935f | 2006-10-02 02:17:09 -0700 | [diff] [blame] | 983 | { |
| 984 | return task->group_leader->pids[PIDTYPE_PGID].pid; |
| 985 | } |
| 986 | |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 987 | static inline struct pid *task_session(struct task_struct *task) |
Eric W. Biederman | 22c935f | 2006-10-02 02:17:09 -0700 | [diff] [blame] | 988 | { |
| 989 | return task->group_leader->pids[PIDTYPE_SID].pid; |
| 990 | } |
| 991 | |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 992 | /* |
| 993 | * the helpers to get the task's different pids as they are seen |
| 994 | * from various namespaces |
| 995 | * |
| 996 | * task_xid_nr() : global id, i.e. the id seen from the init namespace; |
Eric W. Biederman | 44c4e1b | 2008-02-08 04:19:15 -0800 | [diff] [blame] | 997 | * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of |
| 998 | * current. |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 999 | * task_xid_nr_ns() : id seen from the ns specified; |
| 1000 | * |
| 1001 | * set_task_vxid() : assigns a virtual id to a task; |
| 1002 | * |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1003 | * see also pid_nr() etc in include/linux/pid.h |
| 1004 | */ |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1005 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, |
| 1006 | struct pid_namespace *ns); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1007 | |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1008 | static inline pid_t task_pid_nr(struct task_struct *tsk) |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1009 | { |
| 1010 | return tsk->pid; |
| 1011 | } |
| 1012 | |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1013 | static inline pid_t task_pid_nr_ns(struct task_struct *tsk, |
| 1014 | struct pid_namespace *ns) |
| 1015 | { |
| 1016 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); |
| 1017 | } |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1018 | |
| 1019 | static inline pid_t task_pid_vnr(struct task_struct *tsk) |
| 1020 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1021 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1022 | } |
| 1023 | |
| 1024 | |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1025 | static inline pid_t task_tgid_nr(struct task_struct *tsk) |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1026 | { |
| 1027 | return tsk->tgid; |
| 1028 | } |
| 1029 | |
Pavel Emelyanov | 2f2a3a4 | 2007-10-18 23:40:19 -0700 | [diff] [blame] | 1030 | pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1031 | |
| 1032 | static inline pid_t task_tgid_vnr(struct task_struct *tsk) |
| 1033 | { |
| 1034 | return pid_vnr(task_tgid(tsk)); |
| 1035 | } |
| 1036 | |
| 1037 | |
Richard Guy Briggs | 80e0b6e | 2014-03-16 14:00:19 -0400 | [diff] [blame] | 1038 | static inline int pid_alive(const struct task_struct *p); |
Richard Guy Briggs | ad36d28 | 2013-08-15 18:05:12 -0400 | [diff] [blame] | 1039 | static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) |
| 1040 | { |
| 1041 | pid_t pid = 0; |
| 1042 | |
| 1043 | rcu_read_lock(); |
| 1044 | if (pid_alive(tsk)) |
| 1045 | pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); |
| 1046 | rcu_read_unlock(); |
| 1047 | |
| 1048 | return pid; |
| 1049 | } |
| 1050 | |
| 1051 | static inline pid_t task_ppid_nr(const struct task_struct *tsk) |
| 1052 | { |
| 1053 | return task_ppid_nr_ns(tsk, &init_pid_ns); |
| 1054 | } |
| 1055 | |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1056 | static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, |
| 1057 | struct pid_namespace *ns) |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1058 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1059 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1060 | } |
| 1061 | |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1062 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) |
| 1063 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1064 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1065 | } |
| 1066 | |
| 1067 | |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1068 | static inline pid_t task_session_nr_ns(struct task_struct *tsk, |
| 1069 | struct pid_namespace *ns) |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1070 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1071 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1072 | } |
| 1073 | |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1074 | static inline pid_t task_session_vnr(struct task_struct *tsk) |
| 1075 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1076 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1077 | } |
| 1078 | |
Oleg Nesterov | 1b0f7ff | 2009-04-02 16:58:39 -0700 | [diff] [blame] | 1079 | /* obsolete, do not use */ |
| 1080 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) |
| 1081 | { |
| 1082 | return task_pgrp_nr_ns(tsk, &init_pid_ns); |
| 1083 | } |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1084 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1085 | /** |
| 1086 | * pid_alive - check that a task structure is not stale |
| 1087 | * @p: Task structure to be checked. |
| 1088 | * |
| 1089 | * Test if a process is not yet dead (at most zombie state) |
| 1090 | * If pid_alive fails, then pointers within the task structure |
| 1091 | * can be stale and must not be dereferenced. |
Yacine Belkadi | e69f618 | 2013-07-12 20:45:47 +0200 | [diff] [blame] | 1092 | * |
| 1093 | * Return: 1 if the process is alive. 0 otherwise. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1094 | */ |
Richard Guy Briggs | ad36d28 | 2013-08-15 18:05:12 -0400 | [diff] [blame] | 1095 | static inline int pid_alive(const struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1096 | { |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 1097 | return p->pids[PIDTYPE_PID].pid != NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1098 | } |
| 1099 | |
Sukadev Bhattiprolu | f400e19 | 2006-09-29 02:00:07 -0700 | [diff] [blame] | 1100 | /** |
Sergey Senozhatsky | 570f524 | 2016-01-01 23:03:01 +0900 | [diff] [blame] | 1101 | * is_global_init - check if a task structure is init. Since init |
| 1102 | * is free to have sub-threads we need to check tgid. |
Henne | 3260259 | 2006-10-06 00:44:01 -0700 | [diff] [blame] | 1103 | * @tsk: Task structure to be checked. |
| 1104 | * |
| 1105 | * Check if a task structure is the first user space task the kernel created. |
Yacine Belkadi | e69f618 | 2013-07-12 20:45:47 +0200 | [diff] [blame] | 1106 | * |
| 1107 | * Return: 1 if the task structure is init. 0 otherwise. |
Sukadev Bhattiprolu | f400e19 | 2006-09-29 02:00:07 -0700 | [diff] [blame] | 1108 | */ |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1109 | static inline int is_global_init(struct task_struct *tsk) |
Pavel Emelyanov | b461cc0 | 2007-10-18 23:40:09 -0700 | [diff] [blame] | 1110 | { |
Sergey Senozhatsky | 570f524 | 2016-01-01 23:03:01 +0900 | [diff] [blame] | 1111 | return task_tgid_nr(tsk) == 1; |
Pavel Emelyanov | b461cc0 | 2007-10-18 23:40:09 -0700 | [diff] [blame] | 1112 | } |
Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 1113 | |
Cedric Le Goater | 9ec5209 | 2006-10-02 02:19:00 -0700 | [diff] [blame] | 1114 | extern struct pid *cad_pid; |
| 1115 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | /* |
| 1117 | * Per process flags |
| 1118 | */ |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 1119 | #define PF_IDLE 0x00000002 /* I am an IDLE thread */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | #define PF_EXITING 0x00000004 /* getting shut down */ |
Alexey Kuznetsov | 778e9a9 | 2007-06-08 13:47:00 -0700 | [diff] [blame] | 1121 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
Laurent Vivier | 94886b8 | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 1122 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
Tejun Heo | 21aa9af | 2010-06-08 21:40:37 +0200 | [diff] [blame] | 1123 | #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ |
Andi Kleen | 4db96cf | 2009-09-16 11:50:14 +0200 | [diff] [blame] | 1125 | #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ |
| 1127 | #define PF_DUMPCORE 0x00000200 /* dumped core */ |
| 1128 | #define PF_SIGNALED 0x00000400 /* killed by a signal */ |
| 1129 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ |
Vasiliy Kulikov | 72fa599 | 2011-08-08 19:02:04 +0400 | [diff] [blame] | 1130 | #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1131 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ |
Tejun Heo | 774a122 | 2013-01-15 18:52:51 -0800 | [diff] [blame] | 1132 | #define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ |
| 1134 | #define PF_FROZEN 0x00010000 /* frozen for system suspend */ |
| 1135 | #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ |
| 1136 | #define PF_KSWAPD 0x00040000 /* I am kswapd */ |
Ming Lei | 21caf2f | 2013-02-22 16:34:08 -0800 | [diff] [blame] | 1137 | #define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1138 | #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ |
Oleg Nesterov | 246bb0b | 2008-07-25 01:47:38 -0700 | [diff] [blame] | 1139 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ |
Jens Axboe | b31dc66 | 2006-06-13 08:26:10 +0200 | [diff] [blame] | 1140 | #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ |
| 1141 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
Tejun Heo | 14a40ff | 2013-03-19 13:45:20 -0700 | [diff] [blame] | 1142 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ |
Andi Kleen | 4db96cf | 2009-09-16 11:50:14 +0200 | [diff] [blame] | 1143 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
Thomas Gleixner | 61a8712 | 2006-06-27 02:54:56 -0700 | [diff] [blame] | 1144 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 1145 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
Colin Cross | 2b44c4d | 2013-07-24 17:41:33 -0700 | [diff] [blame] | 1146 | #define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1147 | |
| 1148 | /* |
| 1149 | * Only the _current_ task can read/write to tsk->flags, but other |
| 1150 | * tasks can access tsk->flags in readonly mode for example |
| 1151 | * with tsk_used_math (like during threaded core dumping). |
| 1152 | * There is however an exception to this rule during ptrace |
| 1153 | * or during fork: the ptracer task is allowed to write to the |
| 1154 | * child->flags of its traced child (same goes for fork, the parent |
| 1155 | * can write to the child->flags), because we're guaranteed the |
| 1156 | * child is not running and in turn not changing child->flags |
| 1157 | * at the same time the parent does it. |
| 1158 | */ |
| 1159 | #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) |
| 1160 | #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) |
| 1161 | #define clear_used_math() clear_stopped_child_used_math(current) |
| 1162 | #define set_used_math() set_stopped_child_used_math(current) |
| 1163 | #define conditional_stopped_child_used_math(condition, child) \ |
| 1164 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) |
| 1165 | #define conditional_used_math(condition) \ |
| 1166 | conditional_stopped_child_used_math(condition, current) |
| 1167 | #define copy_to_stopped_child_used_math(child) \ |
| 1168 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) |
| 1169 | /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ |
| 1170 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
| 1171 | #define used_math() tsk_used_math(current) |
| 1172 | |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1173 | /* Per-process atomic flags. */ |
Zefan Li | a2b86f7 | 2014-09-25 09:40:17 +0800 | [diff] [blame] | 1174 | #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 1175 | #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ |
| 1176 | #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ |
Tetsuo Handa | 77ed2c5 | 2016-03-08 20:01:32 +0900 | [diff] [blame] | 1177 | #define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */ |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1178 | |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1179 | |
Zefan Li | e0e5070 | 2014-09-25 09:40:40 +0800 | [diff] [blame] | 1180 | #define TASK_PFA_TEST(name, func) \ |
| 1181 | static inline bool task_##func(struct task_struct *p) \ |
| 1182 | { return test_bit(PFA_##name, &p->atomic_flags); } |
| 1183 | #define TASK_PFA_SET(name, func) \ |
| 1184 | static inline void task_set_##func(struct task_struct *p) \ |
| 1185 | { set_bit(PFA_##name, &p->atomic_flags); } |
| 1186 | #define TASK_PFA_CLEAR(name, func) \ |
| 1187 | static inline void task_clear_##func(struct task_struct *p) \ |
| 1188 | { clear_bit(PFA_##name, &p->atomic_flags); } |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1189 | |
Zefan Li | e0e5070 | 2014-09-25 09:40:40 +0800 | [diff] [blame] | 1190 | TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) |
| 1191 | TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1192 | |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 1193 | TASK_PFA_TEST(SPREAD_PAGE, spread_page) |
| 1194 | TASK_PFA_SET(SPREAD_PAGE, spread_page) |
| 1195 | TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) |
| 1196 | |
| 1197 | TASK_PFA_TEST(SPREAD_SLAB, spread_slab) |
| 1198 | TASK_PFA_SET(SPREAD_SLAB, spread_slab) |
| 1199 | TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) |
Tejun Heo | 544b2c9 | 2011-06-14 11:20:18 +0200 | [diff] [blame] | 1200 | |
Tetsuo Handa | 77ed2c5 | 2016-03-08 20:01:32 +0900 | [diff] [blame] | 1201 | TASK_PFA_TEST(LMK_WAITING, lmk_waiting) |
| 1202 | TASK_PFA_SET(LMK_WAITING, lmk_waiting) |
| 1203 | |
Mel Gorman | 907aed4 | 2012-07-31 16:44:07 -0700 | [diff] [blame] | 1204 | static inline void tsk_restore_flags(struct task_struct *task, |
| 1205 | unsigned long orig_flags, unsigned long flags) |
| 1206 | { |
| 1207 | task->flags &= ~flags; |
| 1208 | task->flags |= orig_flags & flags; |
| 1209 | } |
| 1210 | |
Juri Lelli | f82f804 | 2014-10-07 09:52:11 +0100 | [diff] [blame] | 1211 | extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, |
| 1212 | const struct cpumask *trial); |
Juri Lelli | 7f51412 | 2014-09-19 10:22:40 +0100 | [diff] [blame] | 1213 | extern int task_can_attach(struct task_struct *p, |
| 1214 | const struct cpumask *cs_cpus_allowed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | #ifdef CONFIG_SMP |
KOSAKI Motohiro | 1e1b6c5 | 2011-05-19 15:08:58 +0900 | [diff] [blame] | 1216 | extern void do_set_cpus_allowed(struct task_struct *p, |
| 1217 | const struct cpumask *new_mask); |
| 1218 | |
Mike Travis | cd8ba7c | 2008-03-26 14:23:49 -0700 | [diff] [blame] | 1219 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 1220 | const struct cpumask *new_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 | #else |
KOSAKI Motohiro | 1e1b6c5 | 2011-05-19 15:08:58 +0900 | [diff] [blame] | 1222 | static inline void do_set_cpus_allowed(struct task_struct *p, |
| 1223 | const struct cpumask *new_mask) |
| 1224 | { |
| 1225 | } |
Mike Travis | cd8ba7c | 2008-03-26 14:23:49 -0700 | [diff] [blame] | 1226 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 1227 | const struct cpumask *new_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 | { |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 1229 | if (!cpumask_test_cpu(0, new_mask)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | return -EINVAL; |
| 1231 | return 0; |
| 1232 | } |
| 1233 | #endif |
Rusty Russell | e0ad955 | 2009-09-24 09:34:38 -0600 | [diff] [blame] | 1234 | |
Christian Borntraeger | 6d0d287 | 2016-11-16 13:23:05 +0100 | [diff] [blame] | 1235 | #ifndef cpu_relax_yield |
| 1236 | #define cpu_relax_yield() cpu_relax() |
| 1237 | #endif |
| 1238 | |
Dan Carpenter | fa93384 | 2014-05-23 13:20:42 +0300 | [diff] [blame] | 1239 | extern int yield_to(struct task_struct *p, bool preempt); |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1240 | extern void set_user_nice(struct task_struct *p, long nice); |
| 1241 | extern int task_prio(const struct task_struct *p); |
Dongsheng Yang | d0ea026 | 2014-01-27 22:00:45 -0500 | [diff] [blame] | 1242 | /** |
| 1243 | * task_nice - return the nice value of a given task. |
| 1244 | * @p: the task in question. |
| 1245 | * |
| 1246 | * Return: The nice value [ -20 ... 0 ... 19 ]. |
| 1247 | */ |
| 1248 | static inline int task_nice(const struct task_struct *p) |
| 1249 | { |
| 1250 | return PRIO_TO_NICE((p)->static_prio); |
| 1251 | } |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1252 | extern int can_nice(const struct task_struct *p, const int nice); |
| 1253 | extern int task_curr(const struct task_struct *p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1254 | extern int idle_cpu(int cpu); |
KOSAKI Motohiro | fe7de49 | 2010-10-20 16:01:12 -0700 | [diff] [blame] | 1255 | extern int sched_setscheduler(struct task_struct *, int, |
| 1256 | const struct sched_param *); |
Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 1257 | extern int sched_setscheduler_nocheck(struct task_struct *, int, |
KOSAKI Motohiro | fe7de49 | 2010-10-20 16:01:12 -0700 | [diff] [blame] | 1258 | const struct sched_param *); |
Dario Faggioli | d50dde5 | 2013-11-07 14:43:36 +0100 | [diff] [blame] | 1259 | extern int sched_setattr(struct task_struct *, |
| 1260 | const struct sched_attr *); |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1261 | extern struct task_struct *idle_task(int cpu); |
Paul E. McKenney | c4f3060 | 2011-11-10 12:41:56 -0800 | [diff] [blame] | 1262 | /** |
| 1263 | * is_idle_task - is the specified task an idle task? |
Randy Dunlap | fa75728 | 2012-01-21 11:03:13 -0800 | [diff] [blame] | 1264 | * @p: the task in question. |
Yacine Belkadi | e69f618 | 2013-07-12 20:45:47 +0200 | [diff] [blame] | 1265 | * |
| 1266 | * Return: 1 if @p is an idle task. 0 otherwise. |
Paul E. McKenney | c4f3060 | 2011-11-10 12:41:56 -0800 | [diff] [blame] | 1267 | */ |
Paul E. McKenney | 7061ca3 | 2011-12-20 08:20:46 -0800 | [diff] [blame] | 1268 | static inline bool is_idle_task(const struct task_struct *p) |
Paul E. McKenney | c4f3060 | 2011-11-10 12:41:56 -0800 | [diff] [blame] | 1269 | { |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 1270 | return !!(p->flags & PF_IDLE); |
Paul E. McKenney | c4f3060 | 2011-11-10 12:41:56 -0800 | [diff] [blame] | 1271 | } |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1272 | extern struct task_struct *curr_task(int cpu); |
Peter Zijlstra | a458ae2 | 2016-09-20 20:29:40 +0200 | [diff] [blame] | 1273 | extern void ia64_set_curr_task(int cpu, struct task_struct *p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1274 | |
| 1275 | void yield(void); |
| 1276 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1277 | union thread_union { |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1278 | #ifndef CONFIG_THREAD_INFO_IN_TASK |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1279 | struct thread_info thread_info; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1280 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1281 | unsigned long stack[THREAD_SIZE/sizeof(long)]; |
| 1282 | }; |
| 1283 | |
Ingo Molnar | f3ac606 | 2017-02-03 22:59:33 +0100 | [diff] [blame] | 1284 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 1285 | static inline struct thread_info *task_thread_info(struct task_struct *task) |
| 1286 | { |
| 1287 | return &task->thread_info; |
| 1288 | } |
| 1289 | #elif !defined(__HAVE_THREAD_FUNCTIONS) |
| 1290 | # define task_thread_info(task) ((struct thread_info *)(task)->stack) |
| 1291 | #endif |
| 1292 | |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1293 | /* |
| 1294 | * find a task by one of its numerical ids |
| 1295 | * |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1296 | * find_task_by_pid_ns(): |
| 1297 | * finds a task by its pid in the specified namespace |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 1298 | * find_task_by_vpid(): |
| 1299 | * finds a task by its virtual pid |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1300 | * |
Pavel Emelyanov | e49859e | 2008-07-25 01:48:36 -0700 | [diff] [blame] | 1301 | * see also find_vpid() etc in include/linux/pid.h |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1302 | */ |
| 1303 | |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 1304 | extern struct task_struct *find_task_by_vpid(pid_t nr); |
| 1305 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, |
| 1306 | struct pid_namespace *ns); |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1307 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 1308 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
| 1309 | extern int wake_up_process(struct task_struct *tsk); |
Samir Bellabes | 3e51e3e | 2011-05-11 18:18:05 +0200 | [diff] [blame] | 1310 | extern void wake_up_new_task(struct task_struct *tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1311 | #ifdef CONFIG_SMP |
| 1312 | extern void kick_process(struct task_struct *tsk); |
| 1313 | #else |
| 1314 | static inline void kick_process(struct task_struct *tsk) { } |
| 1315 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1316 | |
Adrian Hunter | 82b8977 | 2014-05-28 11:45:04 +0300 | [diff] [blame] | 1317 | extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); |
| 1318 | static inline void set_task_comm(struct task_struct *tsk, const char *from) |
| 1319 | { |
| 1320 | __set_task_comm(tsk, from, false); |
| 1321 | } |
Andrew Morton | 59714d6 | 2008-02-04 22:27:21 -0800 | [diff] [blame] | 1322 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1323 | |
| 1324 | #ifdef CONFIG_SMP |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 1325 | void scheduler_ipi(void); |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 1326 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1327 | #else |
Peter Zijlstra | 184748c | 2011-04-05 17:23:39 +0200 | [diff] [blame] | 1328 | static inline void scheduler_ipi(void) { } |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 1329 | static inline unsigned long wait_task_inactive(struct task_struct *p, |
| 1330 | long match_state) |
| 1331 | { |
| 1332 | return 1; |
| 1333 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1334 | #endif |
| 1335 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1336 | /* set thread flags in other task's structures |
| 1337 | * - see asm/thread_info.h for TIF_xxxx flags available |
| 1338 | */ |
| 1339 | static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1340 | { |
Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1341 | set_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1342 | } |
| 1343 | |
| 1344 | static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1345 | { |
Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1346 | clear_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1347 | } |
| 1348 | |
| 1349 | static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1350 | { |
Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1351 | return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1352 | } |
| 1353 | |
| 1354 | static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1355 | { |
Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1356 | return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1357 | } |
| 1358 | |
| 1359 | static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1360 | { |
Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1361 | return test_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1362 | } |
| 1363 | |
| 1364 | static inline void set_tsk_need_resched(struct task_struct *tsk) |
| 1365 | { |
| 1366 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
| 1367 | } |
| 1368 | |
| 1369 | static inline void clear_tsk_need_resched(struct task_struct *tsk) |
| 1370 | { |
| 1371 | clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
| 1372 | } |
| 1373 | |
Gregory Haskins | 8ae121a | 2008-04-23 07:13:29 -0400 | [diff] [blame] | 1374 | static inline int test_tsk_need_resched(struct task_struct *tsk) |
| 1375 | { |
| 1376 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); |
| 1377 | } |
| 1378 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1379 | /* |
| 1380 | * cond_resched() and cond_resched_lock(): latency reduction via |
| 1381 | * explicit rescheduling in places that are safe. The return |
| 1382 | * value indicates whether a reschedule was done in fact. |
| 1383 | * cond_resched_lock() will drop the spinlock before scheduling, |
| 1384 | * cond_resched_softirq() will enable bhs before scheduling. |
| 1385 | */ |
Peter Zijlstra | 35a773a | 2016-09-19 12:57:53 +0200 | [diff] [blame] | 1386 | #ifndef CONFIG_PREEMPT |
Linus Torvalds | c3921ab | 2008-05-11 16:04:48 -0700 | [diff] [blame] | 1387 | extern int _cond_resched(void); |
Peter Zijlstra | 35a773a | 2016-09-19 12:57:53 +0200 | [diff] [blame] | 1388 | #else |
| 1389 | static inline int _cond_resched(void) { return 0; } |
| 1390 | #endif |
Frederic Weisbecker | 6f80bd9 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1391 | |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1392 | #define cond_resched() ({ \ |
Peter Zijlstra | 3427445 | 2014-09-24 10:18:56 +0200 | [diff] [blame] | 1393 | ___might_sleep(__FILE__, __LINE__, 0); \ |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1394 | _cond_resched(); \ |
| 1395 | }) |
Frederic Weisbecker | 6f80bd9 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1396 | |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1397 | extern int __cond_resched_lock(spinlock_t *lock); |
| 1398 | |
| 1399 | #define cond_resched_lock(lock) ({ \ |
Peter Zijlstra | 3427445 | 2014-09-24 10:18:56 +0200 | [diff] [blame] | 1400 | ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1401 | __cond_resched_lock(lock); \ |
| 1402 | }) |
| 1403 | |
| 1404 | extern int __cond_resched_softirq(void); |
| 1405 | |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 1406 | #define cond_resched_softirq() ({ \ |
Peter Zijlstra | 3427445 | 2014-09-24 10:18:56 +0200 | [diff] [blame] | 1407 | ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 1408 | __cond_resched_softirq(); \ |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1409 | }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1410 | |
Simon Horman | f6f3c43 | 2013-05-22 14:50:31 +0900 | [diff] [blame] | 1411 | static inline void cond_resched_rcu(void) |
| 1412 | { |
| 1413 | #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) |
| 1414 | rcu_read_unlock(); |
| 1415 | cond_resched(); |
| 1416 | rcu_read_lock(); |
| 1417 | #endif |
| 1418 | } |
| 1419 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1420 | /* |
| 1421 | * Does a critical section need to be broken due to another |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 1422 | * task waiting?: (technically does not depend on CONFIG_PREEMPT, |
| 1423 | * but a general need for low latency) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1424 | */ |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 1425 | static inline int spin_needbreak(spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1426 | { |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 1427 | #ifdef CONFIG_PREEMPT |
| 1428 | return spin_is_contended(lock); |
| 1429 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1430 | return 0; |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 1431 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1432 | } |
| 1433 | |
Peter Zijlstra | 75f93fe | 2013-09-27 17:30:03 +0200 | [diff] [blame] | 1434 | static __always_inline bool need_resched(void) |
| 1435 | { |
| 1436 | return unlikely(tif_need_resched()); |
| 1437 | } |
| 1438 | |
Thomas Gleixner | ee761f6 | 2013-03-21 22:49:32 +0100 | [diff] [blame] | 1439 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1440 | * Wrappers for p->thread_info->cpu access. No-op on UP. |
| 1441 | */ |
| 1442 | #ifdef CONFIG_SMP |
| 1443 | |
| 1444 | static inline unsigned int task_cpu(const struct task_struct *p) |
| 1445 | { |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1446 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 1447 | return p->cpu; |
| 1448 | #else |
Al Viro | a1261f5 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1449 | return task_thread_info(p)->cpu; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1450 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1451 | } |
| 1452 | |
Ingo Molnar | c65cc87 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1453 | extern void set_task_cpu(struct task_struct *p, unsigned int cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1454 | |
| 1455 | #else |
| 1456 | |
| 1457 | static inline unsigned int task_cpu(const struct task_struct *p) |
| 1458 | { |
| 1459 | return 0; |
| 1460 | } |
| 1461 | |
| 1462 | static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) |
| 1463 | { |
| 1464 | } |
| 1465 | |
| 1466 | #endif /* CONFIG_SMP */ |
| 1467 | |
Pan Xinhui | d9345c6 | 2016-11-02 05:08:28 -0400 | [diff] [blame] | 1468 | /* |
| 1469 | * In order to reduce various lock holder preemption latencies provide an |
| 1470 | * interface to see if a vCPU is currently running or not. |
| 1471 | * |
| 1472 | * This allows us to terminate optimistic spin loops and block, analogous to |
| 1473 | * the native optimistic spin heuristic of testing if the lock owner task is |
| 1474 | * running or not. |
| 1475 | */ |
| 1476 | #ifndef vcpu_is_preempted |
| 1477 | # define vcpu_is_preempted(cpu) false |
| 1478 | #endif |
| 1479 | |
Rusty Russell | 96f874e | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 1480 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
| 1481 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 1482 | |
Dave Hansen | 8245525 | 2008-02-04 22:28:59 -0800 | [diff] [blame] | 1483 | #ifndef TASK_SIZE_OF |
| 1484 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
| 1485 | #endif |
| 1486 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1487 | #endif |