blob: 5398356e33c7b84566610bbe16a31821aba0d9d6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
David Howells607ca462012-10-13 10:46:48 +01004#include <uapi/linux/sched.h>
David Woodhouseb7b3c762006-04-27 00:12:56 +01005
Dongsheng Yang5c228072014-01-27 17:15:37 -05006#include <linux/sched/prio.h>
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/capability.h>
Ingo Molnarb69339b2017-02-05 16:15:03 +01009#include <linux/mutex.h>
Peter Zijlstrafb00aca2013-11-07 14:43:43 +010010#include <linux/plist.h>
Martin Schwidefskyc92ff1b2007-10-16 01:24:43 -070011#include <linux/mm_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <asm/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/sem.h>
Jack Millerab602f72014-08-08 14:23:19 -070015#include <linux/shm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/signal.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010017#include <linux/signal_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/pid.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/seccomp.h>
Jiri Pirko05725f72009-04-14 20:17:16 +020020#include <linux/rculist.h>
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070021#include <linux/rtmutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
David Woodhousea3b67142006-04-25 14:54:40 +010023#include <linux/resource.h>
David Woodhousea3b67142006-04-25 14:54:40 +010024#include <linux/hrtimer.h>
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -070025#include <linux/kcov.h>
Andrew Morton7c3ab732006-12-10 02:19:19 -080026#include <linux/task_io_accounting.h>
Arjan van de Ven97455122008-01-25 21:08:34 +010027#include <linux/latencytop.h>
David Howells9e2b2dc2008-08-13 16:20:04 +010028#include <linux/cred.h>
Ming Lei21caf2f2013-02-22 16:34:08 -080029#include <linux/gfp.h>
Ingo Molnarfd771232017-02-02 20:56:33 +010030#include <linux/topology.h>
Aaron Tomlind4311ff2014-09-12 14:16:17 +010031#include <linux/magic.h>
Tejun Heo7d7efec2015-05-13 16:35:16 -040032#include <linux/cgroup-defs.h>
David Woodhousea3b67142006-04-25 14:54:40 +010033
Ingo Molnar70b81572017-02-03 12:11:00 +010034#include <asm/current.h>
35
Ingo Molnarc7af7872017-02-03 22:01:58 +010036/* task_struct member predeclarations: */
37struct audit_context;
38struct autogroup;
39struct backing_dev_info;
40struct bio_list;
41struct blk_plug;
42struct cfs_rq;
43struct filename;
44struct fs_struct;
45struct futex_pi_state;
46struct io_context;
47struct mempolicy;
48struct nameidata;
49struct nsproxy;
50struct perf_event_context;
51struct pid_namespace;
52struct pipe_inode_info;
53struct rcu_node;
54struct reclaim_state;
55struct robust_list_head;
Ingo Molnare2d1e2a2017-02-01 18:07:51 +010056struct sched_attr;
57struct sched_param;
Ingo Molnar43ae34c2007-07-09 18:52:00 +020058struct seq_file;
Ingo Molnarc7af7872017-02-03 22:01:58 +010059struct sighand_struct;
60struct signal_struct;
61struct task_delay_info;
Ingo Molnar4cf86d72007-10-15 17:00:14 +020062struct task_group;
Ingo Molnarc7af7872017-02-03 22:01:58 +010063struct task_struct;
64struct uts_namespace;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Linus Torvalds4a8342d2005-09-29 15:18:21 -070066/*
67 * Task state bitmask. NOTE! These bits are also
68 * encoded in fs/proc/array.c: get_task_state().
69 *
70 * We have two separate sets of flags: task->state
71 * is about runnability, while task->exit_state are
72 * about the task exiting. Confusing, but this way
73 * modifying one set can't modify the other one by
74 * mistake.
75 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#define TASK_RUNNING 0
77#define TASK_INTERRUPTIBLE 1
78#define TASK_UNINTERRUPTIBLE 2
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050079#define __TASK_STOPPED 4
80#define __TASK_TRACED 8
Linus Torvalds4a8342d2005-09-29 15:18:21 -070081/* in tsk->exit_state */
Oleg Nesterovad866222014-04-07 15:38:46 -070082#define EXIT_DEAD 16
83#define EXIT_ZOMBIE 32
Oleg Nesterovabd50b32014-04-07 15:38:42 -070084#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
Linus Torvalds4a8342d2005-09-29 15:18:21 -070085/* in tsk->state again */
Mike Galbraithaf927232007-10-15 17:00:13 +020086#define TASK_DEAD 64
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050087#define TASK_WAKEKILL 128
Peter Zijlstrae9c84312009-09-15 14:43:03 +020088#define TASK_WAKING 256
Thomas Gleixnerf2530dc2013-04-09 09:33:34 +020089#define TASK_PARKED 512
Peter Zijlstra80ed87c2015-05-08 14:23:45 +020090#define TASK_NOLOAD 1024
Peter Zijlstra7dc603c2016-06-16 13:29:28 +020091#define TASK_NEW 2048
92#define TASK_STATE_MAX 4096
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050093
Peter Zijlstra7dc603c2016-06-16 13:29:28 +020094#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
Peter Zijlstra73342152009-12-17 13:16:27 +010095
Davidlohr Bueso642fa442017-01-03 13:43:14 -080096/* Convenience macros for the sake of set_current_state */
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050097#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
98#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
99#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Peter Zijlstra80ed87c2015-05-08 14:23:45 +0200101#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
102
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -0500103/* Convenience macros for the sake of wake_up */
104#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500105#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -0500106
107/* get_task_state() */
108#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500109 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
Oleg Nesterov74e37202014-01-23 15:55:35 -0800110 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -0500111
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500112#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
113#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -0500114#define task_is_stopped_or_traced(task) \
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500115 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -0500116#define task_contributes_to_load(task) \
Nathan Lynche3c8ca82009-04-08 19:45:12 -0500117 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
Peter Zijlstra80ed87c2015-05-08 14:23:45 +0200118 (task->flags & PF_FROZEN) == 0 && \
119 (task->state & TASK_NOLOAD) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200121#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
122
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200123#define __set_current_state(state_value) \
124 do { \
125 current->task_state_change = _THIS_IP_; \
126 current->state = (state_value); \
127 } while (0)
128#define set_current_state(state_value) \
129 do { \
130 current->task_state_change = _THIS_IP_; \
Peter Zijlstraa2250232016-10-19 15:45:27 +0200131 smp_store_mb(current->state, (state_value)); \
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200132 } while (0)
133
134#else
Andrew Morton498d0c52005-09-13 01:25:14 -0700135/*
136 * set_current_state() includes a barrier so that the write of current->state
137 * is correctly serialised wrt the caller's subsequent test of whether to
138 * actually sleep:
139 *
Peter Zijlstraa2250232016-10-19 15:45:27 +0200140 * for (;;) {
Andrew Morton498d0c52005-09-13 01:25:14 -0700141 * set_current_state(TASK_UNINTERRUPTIBLE);
Peter Zijlstraa2250232016-10-19 15:45:27 +0200142 * if (!need_sleep)
143 * break;
Andrew Morton498d0c52005-09-13 01:25:14 -0700144 *
Peter Zijlstraa2250232016-10-19 15:45:27 +0200145 * schedule();
146 * }
147 * __set_current_state(TASK_RUNNING);
148 *
149 * If the caller does not need such serialisation (because, for instance, the
150 * condition test and condition change and wakeup are under the same lock) then
151 * use __set_current_state().
152 *
153 * The above is typically ordered against the wakeup, which does:
154 *
155 * need_sleep = false;
156 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
157 *
158 * Where wake_up_state() (and all other wakeup primitives) imply enough
159 * barriers to order the store of the variable against wakeup.
160 *
161 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
162 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
163 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
164 *
165 * This is obviously fine, since they both store the exact same value.
166 *
167 * Also see the comments of try_to_wake_up().
Andrew Morton498d0c52005-09-13 01:25:14 -0700168 */
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200169#define __set_current_state(state_value) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 do { current->state = (state_value); } while (0)
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200171#define set_current_state(state_value) \
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200172 smp_store_mb(current->state, (state_value))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200174#endif
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176/* Task command name length */
177#define TASK_COMM_LEN 16
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179extern void sched_init(void);
180extern void sched_init_smp(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
Rik van Riel3fa08182015-03-09 12:12:07 -0400182extern cpumask_var_t cpu_isolated_map;
183
Andrew Morton89f19f02009-09-19 11:55:44 -0700184extern int runqueue_is_locked(int cpu);
Ingo Molnar017730c2008-05-12 21:20:52 +0200185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186extern void cpu_init (void);
187extern void trap_init(void);
188extern void update_process_times(int user);
189extern void scheduler_tick(void);
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191#define MAX_SCHEDULE_TIMEOUT LONG_MAX
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800192extern signed long schedule_timeout(signed long timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -0700193extern signed long schedule_timeout_interruptible(signed long timeout);
Matthew Wilcox294d5cc2007-12-06 11:59:46 -0500194extern signed long schedule_timeout_killable(signed long timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -0700195extern signed long schedule_timeout_uninterruptible(signed long timeout);
Andrew Morton69b27ba2016-03-25 14:20:21 -0700196extern signed long schedule_timeout_idle(signed long timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197asmlinkage void schedule(void);
Thomas Gleixnerc5491ea2011-03-21 12:09:35 +0100198extern void schedule_preempt_disabled(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Tejun Heo10ab5642016-10-28 12:58:10 -0400200extern int __must_check io_schedule_prepare(void);
201extern void io_schedule_finish(int token);
NeilBrown9cff8ad2015-02-13 15:49:17 +1100202extern long io_schedule_timeout(long timeout);
Tejun Heo10ab5642016-10-28 12:58:10 -0400203extern void io_schedule(void);
NeilBrown9cff8ad2015-02-13 15:49:17 +1100204
Frank Mayharf06febc2008-09-12 09:54:39 -0700205/**
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200206 * struct prev_cputime - snaphsot of system and user cputime
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100207 * @utime: time spent in user mode
208 * @stime: time spent in system mode
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200209 * @lock: protects the above two fields
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100210 *
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200211 * Stores previous user/system time values such that we can guarantee
212 * monotonicity.
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100213 */
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200214struct prev_cputime {
215#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100216 u64 utime;
217 u64 stime;
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200218 raw_spinlock_t lock;
219#endif
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100220};
221
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200222static inline void prev_cputime_init(struct prev_cputime *prev)
223{
224#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
225 prev->utime = prev->stime = 0;
226 raw_spin_lock_init(&prev->lock);
227#endif
228}
229
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100230/**
Frank Mayharf06febc2008-09-12 09:54:39 -0700231 * struct task_cputime - collected CPU time counts
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100232 * @utime: time spent in user mode, in nanoseconds
233 * @stime: time spent in kernel mode, in nanoseconds
Frank Mayharf06febc2008-09-12 09:54:39 -0700234 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
Ingo Molnar5ce73a42008-09-14 17:11:46 +0200235 *
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200236 * This structure groups together three kinds of CPU time that are tracked for
237 * threads and thread groups. Most things considering CPU time want to group
238 * these counts together and treat all three of them in parallel.
Frank Mayharf06febc2008-09-12 09:54:39 -0700239 */
240struct task_cputime {
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100241 u64 utime;
242 u64 stime;
Frank Mayharf06febc2008-09-12 09:54:39 -0700243 unsigned long long sum_exec_runtime;
244};
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200245
Frank Mayharf06febc2008-09-12 09:54:39 -0700246/* Alternate field names when used to cache expirations. */
Frank Mayharf06febc2008-09-12 09:54:39 -0700247#define virt_exp utime
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200248#define prof_exp stime
Frank Mayharf06febc2008-09-12 09:54:39 -0700249#define sched_exp sum_exec_runtime
250
Jason Low971e8a982015-04-28 13:00:23 -0700251/*
252 * This is the atomic variant of task_cputime, which can be used for
253 * storing and updating task_cputime statistics without locking.
254 */
255struct task_cputime_atomic {
256 atomic64_t utime;
257 atomic64_t stime;
258 atomic64_t sum_exec_runtime;
259};
260
261#define INIT_CPUTIME_ATOMIC \
262 (struct task_cputime_atomic) { \
263 .utime = ATOMIC64_INIT(0), \
264 .stime = ATOMIC64_INIT(0), \
265 .sum_exec_runtime = ATOMIC64_INIT(0), \
266 }
267
Peter Zijlstra609ca062015-09-28 17:52:18 +0200268#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
Peter Zijlstraa233f112013-09-23 19:04:26 +0200269
Peter Zijlstrac99e6ef2009-07-10 14:57:56 +0200270/*
Peter Zijlstra87dcbc02015-09-28 17:45:40 +0200271 * Disable preemption until the scheduler is running -- use an unconditional
272 * value so that it also works on !PREEMPT_COUNT kernels.
Peter Zijlstrad86ee482009-07-10 14:57:57 +0200273 *
Peter Zijlstra87dcbc02015-09-28 17:45:40 +0200274 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
Peter Zijlstrac99e6ef2009-07-10 14:57:56 +0200275 */
Peter Zijlstra87dcbc02015-09-28 17:45:40 +0200276#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
Peter Zijlstrac99e6ef2009-07-10 14:57:56 +0200277
Peter Zijlstra609ca062015-09-28 17:52:18 +0200278/*
279 * Initial preempt_count value; reflects the preempt_count schedule invariant
280 * which states that during context switches:
281 *
282 * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
283 *
284 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
285 * Note: See finish_task_switch().
286 */
287#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +0100288
Frank Mayharf06febc2008-09-12 09:54:39 -0700289/**
290 * struct thread_group_cputimer - thread group interval timer counts
Jason Low920ce392015-05-08 14:31:50 -0700291 * @cputime_atomic: atomic thread group interval timers.
Jason Lowd5c373e2015-10-14 12:07:55 -0700292 * @running: true when there are timers running and
293 * @cputime_atomic receives updates.
Jason Lowc8d75aa2015-10-14 12:07:56 -0700294 * @checking_timer: true when a thread in the group is in the
295 * process of checking for thread group timers.
Frank Mayharf06febc2008-09-12 09:54:39 -0700296 *
297 * This structure contains the version of task_cputime, above, that is
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +0100298 * used for thread group CPU timer calculations.
Frank Mayharf06febc2008-09-12 09:54:39 -0700299 */
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +0100300struct thread_group_cputimer {
Jason Low71107442015-04-28 13:00:24 -0700301 struct task_cputime_atomic cputime_atomic;
Jason Lowd5c373e2015-10-14 12:07:55 -0700302 bool running;
Jason Lowc8d75aa2015-10-14 12:07:56 -0700303 bool checking_timer;
Frank Mayharf06febc2008-09-12 09:54:39 -0700304};
Frank Mayharf06febc2008-09-12 09:54:39 -0700305
Ben Blum4714d1d2011-05-26 16:25:18 -0700306#include <linux/rwsem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Naveen N. Raof6db8342015-06-25 23:53:37 +0530308#ifdef CONFIG_SCHED_INFO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309struct sched_info {
310 /* cumulative counters */
Ingo Molnar2d723762007-10-15 17:00:12 +0200311 unsigned long pcount; /* # of times run on this cpu */
Ken Chen9c2c4802008-12-16 23:41:22 -0800312 unsigned long long run_delay; /* time spent waiting on a runqueue */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
314 /* timestamps */
Balbir Singh172ba842007-07-09 18:52:00 +0200315 unsigned long long last_arrival,/* when we last ran on a cpu */
316 last_queued; /* when we were last queued to run */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317};
Naveen N. Raof6db8342015-06-25 23:53:37 +0530318#endif /* CONFIG_SCHED_INFO */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
Chandra Seetharaman52f17b62006-07-14 00:24:38 -0700320static inline int sched_info_on(void)
321{
322#ifdef CONFIG_SCHEDSTATS
323 return 1;
324#elif defined(CONFIG_TASK_DELAY_ACCT)
325 extern int delayacct_on;
326 return delayacct_on;
327#else
328 return 0;
Shailabh Nagarca74e922006-07-14 00:24:36 -0700329#endif
Chandra Seetharaman52f17b62006-07-14 00:24:38 -0700330}
Shailabh Nagarca74e922006-07-14 00:24:36 -0700331
Mel Gormancb251762016-02-05 09:08:36 +0000332#ifdef CONFIG_SCHEDSTATS
333void force_schedstat_enabled(void);
334#endif
335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336/*
Yuyang Du6ecdd742016-04-05 12:12:26 +0800337 * Integer metrics need fixed point arithmetic, e.g., sched/fair
338 * has a few: load, load_avg, util_avg, freq, and capacity.
339 *
340 * We define a basic fixed point arithmetic range, and then formalize
341 * all these metrics based on that basic range.
342 */
343# define SCHED_FIXEDPOINT_SHIFT 10
344# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
345
Chen, Kenneth W383f2832005-09-09 13:02:02 -0700346#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
Ingo Molnar36c8b582006-07-03 00:25:41 -0700347extern void prefetch_stack(struct task_struct *t);
Chen, Kenneth W383f2832005-09-09 13:02:02 -0700348#else
349static inline void prefetch_stack(struct task_struct *t) { }
350#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
Ingo Molnar20b8a592007-07-09 18:51:58 +0200352struct load_weight {
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100353 unsigned long weight;
354 u32 inv_weight;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200355};
356
Yuyang Du9d89c252015-07-15 08:04:37 +0800357/*
Yuyang Du7b595332016-04-05 12:12:28 +0800358 * The load_avg/util_avg accumulates an infinite geometric series
359 * (see __update_load_avg() in kernel/sched/fair.c).
360 *
361 * [load_avg definition]
362 *
363 * load_avg = runnable% * scale_load_down(load)
364 *
365 * where runnable% is the time ratio that a sched_entity is runnable.
366 * For cfs_rq, it is the aggregated load_avg of all runnable and
Yuyang Du9d89c252015-07-15 08:04:37 +0800367 * blocked sched_entities.
Yuyang Du7b595332016-04-05 12:12:28 +0800368 *
369 * load_avg may also take frequency scaling into account:
370 *
371 * load_avg = runnable% * scale_load_down(load) * freq%
372 *
373 * where freq% is the CPU frequency normalized to the highest frequency.
374 *
375 * [util_avg definition]
376 *
377 * util_avg = running% * SCHED_CAPACITY_SCALE
378 *
379 * where running% is the time ratio that a sched_entity is running on
380 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
381 * and blocked sched_entities.
382 *
383 * util_avg may also factor frequency scaling and CPU capacity scaling:
384 *
385 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
386 *
387 * where freq% is the same as above, and capacity% is the CPU capacity
388 * normalized to the greatest capacity (due to uarch differences, etc).
389 *
390 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
391 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
392 * we therefore scale them to as large a range as necessary. This is for
393 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
394 *
395 * [Overflow issue]
396 *
397 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
398 * with the highest load (=88761), always runnable on a single cfs_rq,
399 * and should not overflow as the number already hits PID_MAX_LIMIT.
400 *
401 * For all other cases (including 32-bit kernels), struct load_weight's
402 * weight will overflow first before we do, because:
403 *
404 * Max(load_avg) <= Max(load.weight)
405 *
406 * Then it is the load_weight's responsibility to consider overflow
407 * issues.
Yuyang Du9d89c252015-07-15 08:04:37 +0800408 */
Paul Turner9d85f212012-10-04 13:18:29 +0200409struct sched_avg {
Yuyang Du9d89c252015-07-15 08:04:37 +0800410 u64 last_update_time, load_sum;
411 u32 util_sum, period_contrib;
412 unsigned long load_avg, util_avg;
Paul Turner9d85f212012-10-04 13:18:29 +0200413};
414
Ingo Molnar94c18222007-08-02 17:41:40 +0200415#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -0300416struct sched_statistics {
Ingo Molnar94c18222007-08-02 17:41:40 +0200417 u64 wait_start;
418 u64 wait_max;
Arjan van de Ven6d082592008-01-25 21:08:35 +0100419 u64 wait_count;
420 u64 wait_sum;
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -0700421 u64 iowait_count;
422 u64 iowait_sum;
Ingo Molnar94c18222007-08-02 17:41:40 +0200423
424 u64 sleep_start;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200425 u64 sleep_max;
Ingo Molnar94c18222007-08-02 17:41:40 +0200426 s64 sum_sleep_runtime;
427
428 u64 block_start;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200429 u64 block_max;
430 u64 exec_max;
Ingo Molnareba1ed42007-10-15 17:00:02 +0200431 u64 slice_max;
Ingo Molnarcc367732007-10-15 17:00:18 +0200432
Ingo Molnarcc367732007-10-15 17:00:18 +0200433 u64 nr_migrations_cold;
434 u64 nr_failed_migrations_affine;
435 u64 nr_failed_migrations_running;
436 u64 nr_failed_migrations_hot;
437 u64 nr_forced_migrations;
Ingo Molnarcc367732007-10-15 17:00:18 +0200438
439 u64 nr_wakeups;
440 u64 nr_wakeups_sync;
441 u64 nr_wakeups_migrate;
442 u64 nr_wakeups_local;
443 u64 nr_wakeups_remote;
444 u64 nr_wakeups_affine;
445 u64 nr_wakeups_affine_attempts;
446 u64 nr_wakeups_passive;
447 u64 nr_wakeups_idle;
Lucas De Marchi41acab82010-03-10 23:37:45 -0300448};
449#endif
450
451struct sched_entity {
452 struct load_weight load; /* for load-balancing */
453 struct rb_node run_node;
454 struct list_head group_node;
455 unsigned int on_rq;
456
457 u64 exec_start;
458 u64 sum_exec_runtime;
459 u64 vruntime;
460 u64 prev_sum_exec_runtime;
461
Lucas De Marchi41acab82010-03-10 23:37:45 -0300462 u64 nr_migrations;
463
Lucas De Marchi41acab82010-03-10 23:37:45 -0300464#ifdef CONFIG_SCHEDSTATS
465 struct sched_statistics statistics;
Ingo Molnar94c18222007-08-02 17:41:40 +0200466#endif
467
Ingo Molnar20b8a592007-07-09 18:51:58 +0200468#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100469 int depth;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200470 struct sched_entity *parent;
471 /* rq on which this entity is (to be) queued: */
472 struct cfs_rq *cfs_rq;
473 /* rq "owned" by this entity/group: */
474 struct cfs_rq *my_q;
475#endif
Clark Williams8bd75c72013-02-07 09:47:07 -0600476
Alex Shi141965c2013-06-26 13:05:39 +0800477#ifdef CONFIG_SMP
Jiri Olsa5a107802015-12-08 21:23:59 +0100478 /*
479 * Per entity load average tracking.
480 *
481 * Put into separate cache line so it does not
482 * collide with read-mostly values above.
483 */
484 struct sched_avg avg ____cacheline_aligned_in_smp;
Paul Turner9d85f212012-10-04 13:18:29 +0200485#endif
Ingo Molnar20b8a592007-07-09 18:51:58 +0200486};
Ingo Molnar70b97a72006-07-03 00:25:42 -0700487
Peter Zijlstrafa717062008-01-25 21:08:27 +0100488struct sched_rt_entity {
489 struct list_head run_list;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100490 unsigned long timeout;
Ying Xue57d2aa02012-07-17 15:03:43 +0800491 unsigned long watchdog_stamp;
Richard Kennedybee367e2008-08-01 13:24:08 +0100492 unsigned int time_slice;
Peter Zijlstraff77e462016-01-18 15:27:07 +0100493 unsigned short on_rq;
494 unsigned short on_list;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100495
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +0200496 struct sched_rt_entity *back;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100497#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100498 struct sched_rt_entity *parent;
499 /* rq on which this entity is (to be) queued: */
500 struct rt_rq *rt_rq;
501 /* rq "owned" by this entity/group: */
502 struct rt_rq *my_q;
503#endif
Peter Zijlstrafa717062008-01-25 21:08:27 +0100504};
505
Dario Faggioliaab03e02013-11-28 11:14:43 +0100506struct sched_dl_entity {
507 struct rb_node rb_node;
508
509 /*
510 * Original scheduling parameters. Copied here from sched_attr
xiaofeng.yan4027d082014-05-09 03:21:27 +0000511 * during sched_setattr(), they will remain the same until
512 * the next sched_setattr().
Dario Faggioliaab03e02013-11-28 11:14:43 +0100513 */
514 u64 dl_runtime; /* maximum runtime for each instance */
515 u64 dl_deadline; /* relative deadline of each instance */
Harald Gustafsson755378a2013-11-07 14:43:40 +0100516 u64 dl_period; /* separation of two instances (period) */
Dario Faggioli332ac172013-11-07 14:43:45 +0100517 u64 dl_bw; /* dl_runtime / dl_deadline */
Dario Faggioliaab03e02013-11-28 11:14:43 +0100518
519 /*
520 * Actual scheduling parameters. Initialized with the values above,
521 * they are continously updated during task execution. Note that
522 * the remaining runtime could be < 0 in case we are in overrun.
523 */
524 s64 runtime; /* remaining runtime for this instance */
525 u64 deadline; /* absolute deadline for this instance */
526 unsigned int flags; /* specifying the scheduler behaviour */
527
528 /*
529 * Some bool flags:
530 *
531 * @dl_throttled tells if we exhausted the runtime. If so, the
532 * task has to wait for a replenishment to be performed at the
533 * next firing of dl_timer.
534 *
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100535 * @dl_boosted tells if we are boosted due to DI. If so we are
536 * outside bandwidth enforcement mechanism (but only until we
Juri Lelli5bfd1262014-04-15 13:49:04 +0200537 * exit the critical section);
538 *
539 * @dl_yielded tells if task gave up the cpu before consuming
540 * all its available runtime during the last job.
Dario Faggioliaab03e02013-11-28 11:14:43 +0100541 */
Luca Abeni72f9f3f2016-03-07 12:27:04 +0100542 int dl_throttled, dl_boosted, dl_yielded;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100543
544 /*
545 * Bandwidth enforcement timer. Each -deadline task has its
546 * own bandwidth to be enforced, thus we need one timer per task.
547 */
548 struct hrtimer dl_timer;
549};
Clark Williams8bd75c72013-02-07 09:47:07 -0600550
Paul E. McKenney1d082fd2014-08-14 16:01:53 -0700551union rcu_special {
552 struct {
Paul E. McKenney8203d6d2015-08-02 13:53:17 -0700553 u8 blocked;
554 u8 need_qs;
555 u8 exp_need_qs;
556 u8 pad; /* Otherwise the compiler can store garbage here. */
557 } b; /* Bits. */
558 u32 s; /* Set of bits. */
Paul E. McKenney1d082fd2014-08-14 16:01:53 -0700559};
Paul E. McKenney86848962009-08-27 15:00:12 -0700560
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200561enum perf_event_task_context {
562 perf_invalid_context = -1,
563 perf_hw_context = 0,
Peter Zijlstra89a1e182010-09-07 17:34:50 +0200564 perf_sw_context,
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200565 perf_nr_task_contexts,
566};
567
Ingo Molnareb61baf2017-02-01 17:09:06 +0100568struct wake_q_node {
569 struct wake_q_node *next;
570};
571
Mel Gorman72b252a2015-09-04 15:47:32 -0700572/* Track pages that require TLB flushes */
573struct tlbflush_unmap_batch {
574 /*
575 * Each bit set is a CPU that potentially has a TLB entry for one of
576 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
577 */
578 struct cpumask cpumask;
579
580 /* True if any bit in cpumask is set */
581 bool flush_required;
Mel Gormand950c942015-09-04 15:47:35 -0700582
583 /*
584 * If true then the PTE was dirty when unmapped. The entry must be
585 * flushed before IO is initiated or a stale TLB entry potentially
586 * allows an update without redirtying the page.
587 */
588 bool writable;
Mel Gorman72b252a2015-09-04 15:47:32 -0700589};
590
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591struct task_struct {
Andy Lutomirskic65eacb2016-09-13 14:29:24 -0700592#ifdef CONFIG_THREAD_INFO_IN_TASK
593 /*
594 * For reasons of header soup (see current_thread_info()), this
595 * must be the first element of task_struct.
596 */
597 struct thread_info thread_info;
598#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
Roman Zippelf7e42172007-05-09 02:35:17 -0700600 void *stack;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 atomic_t usage;
William Cohen97dc32c2007-05-08 00:23:41 -0700602 unsigned int flags; /* per process flags, defined below */
603 unsigned int ptrace;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Peter Williams2dd73a42006-06-27 02:54:34 -0700605#ifdef CONFIG_SMP
Peter Zijlstrafa14ff42011-09-12 13:06:17 +0200606 struct llist_node wake_entry;
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200607 int on_cpu;
Andy Lutomirskic65eacb2016-09-13 14:29:24 -0700608#ifdef CONFIG_THREAD_INFO_IN_TASK
609 unsigned int cpu; /* current CPU */
610#endif
Mike Galbraith63b0e9e2015-07-14 17:39:50 +0200611 unsigned int wakee_flips;
Michael Wang62470412013-07-04 12:55:51 +0800612 unsigned long wakee_flip_decay_ts;
Mike Galbraith63b0e9e2015-07-14 17:39:50 +0200613 struct task_struct *last_wakee;
Peter Zijlstraac66f542013-10-07 11:29:16 +0100614
615 int wake_cpu;
Nick Piggin4866cde2005-06-25 14:57:23 -0700616#endif
Peter Zijlstrafd2f4412011-04-05 17:23:44 +0200617 int on_rq;
Ingo Molnar50e645a2007-07-09 18:52:00 +0200618
Ingo Molnarb29739f2006-06-27 02:54:51 -0700619 int prio, static_prio, normal_prio;
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100620 unsigned int rt_priority;
Ingo Molnar5522d5d2007-10-15 17:00:12 +0200621 const struct sched_class *sched_class;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200622 struct sched_entity se;
Peter Zijlstrafa717062008-01-25 21:08:27 +0100623 struct sched_rt_entity rt;
Peter Zijlstra8323f262012-06-22 13:36:05 +0200624#ifdef CONFIG_CGROUP_SCHED
625 struct task_group *sched_task_group;
626#endif
Dario Faggioliaab03e02013-11-28 11:14:43 +0100627 struct sched_dl_entity dl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
Avi Kivitye107be32007-07-26 13:40:43 +0200629#ifdef CONFIG_PREEMPT_NOTIFIERS
630 /* list of struct preempt_notifier: */
631 struct hlist_head preempt_notifiers;
632#endif
633
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700634#ifdef CONFIG_BLK_DEV_IO_TRACE
Jens Axboe2056a782006-03-23 20:00:26 +0100635 unsigned int btrace_seq;
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700636#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
William Cohen97dc32c2007-05-08 00:23:41 -0700638 unsigned int policy;
Peter Zijlstra29baa742012-04-23 12:11:21 +0200639 int nr_cpus_allowed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 cpumask_t cpus_allowed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700642#ifdef CONFIG_PREEMPT_RCU
Paul E. McKenneye260be62008-01-25 21:08:24 +0100643 int rcu_read_lock_nesting;
Paul E. McKenney1d082fd2014-08-14 16:01:53 -0700644 union rcu_special rcu_read_unlock_special;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700645 struct list_head rcu_node_entry;
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700646 struct rcu_node *rcu_blocked_node;
Pranith Kumar28f65692014-09-22 14:00:48 -0400647#endif /* #ifdef CONFIG_PREEMPT_RCU */
Paul E. McKenney8315f422014-06-27 13:42:20 -0700648#ifdef CONFIG_TASKS_RCU
649 unsigned long rcu_tasks_nvcsw;
650 bool rcu_tasks_holdout;
651 struct list_head rcu_tasks_holdout_list;
Paul E. McKenney176f8f72014-08-04 17:43:50 -0700652 int rcu_tasks_idle_cpu;
Paul E. McKenney8315f422014-06-27 13:42:20 -0700653#endif /* #ifdef CONFIG_TASKS_RCU */
Paul E. McKenneye260be62008-01-25 21:08:24 +0100654
Naveen N. Raof6db8342015-06-25 23:53:37 +0530655#ifdef CONFIG_SCHED_INFO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 struct sched_info sched_info;
657#endif
658
659 struct list_head tasks;
Dario Faggioli806c09a2010-11-30 19:51:33 +0100660#ifdef CONFIG_SMP
Gregory Haskins917b6272008-12-29 09:39:53 -0500661 struct plist_node pushable_tasks;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100662 struct rb_node pushable_dl_tasks;
Dario Faggioli806c09a2010-11-30 19:51:33 +0100663#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
665 struct mm_struct *mm, *active_mm;
Ingo Molnar314ff782017-02-03 11:03:31 +0100666
667 /* Per-thread vma caching: */
668 struct vmacache vmacache;
669
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800670#if defined(SPLIT_RSS_COUNTING)
671 struct task_rss_stat rss_stat;
672#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673/* task state */
William Cohen97dc32c2007-05-08 00:23:41 -0700674 int exit_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 int exit_code, exit_signal;
676 int pdeath_signal; /* The signal sent when the parent dies */
Palmer Dabbelte7cc4172015-04-30 21:19:55 -0700677 unsigned long jobctl; /* JOBCTL_*, siglock protected */
Andrei Epure9b89f6b2013-04-11 20:30:29 +0300678
679 /* Used for emulating ABI behavior of previous Linux versions */
William Cohen97dc32c2007-05-08 00:23:41 -0700680 unsigned int personality;
Andrei Epure9b89f6b2013-04-11 20:30:29 +0300681
Peter Zijlstrabe958bd2015-11-25 16:02:07 +0100682 /* scheduler bits, serialized by scheduler locks */
Lennart Poetteringca94c442009-06-15 17:17:47 +0200683 unsigned sched_reset_on_fork:1;
Peter Zijlstraa8e4f2e2011-04-05 17:23:49 +0200684 unsigned sched_contributes_to_load:1;
Peter Zijlstraff303e62015-04-17 20:05:30 +0200685 unsigned sched_migrated:1;
Peter Zijlstrab7e7ade2016-05-23 11:19:07 +0200686 unsigned sched_remote_wakeup:1;
Peter Zijlstrabe958bd2015-11-25 16:02:07 +0100687 unsigned :0; /* force alignment to the next boundary */
688
689 /* unserialized, strictly 'current' */
690 unsigned in_execve:1; /* bit to tell LSMs we're in execve */
691 unsigned in_iowait:1;
Andy Lutomirski7e781412016-08-02 14:05:36 -0700692#if !defined(TIF_RESTORE_SIGMASK)
693 unsigned restore_sigmask:1;
694#endif
Tejun Heo626ebc42015-11-05 18:46:09 -0800695#ifdef CONFIG_MEMCG
696 unsigned memcg_may_oom:1;
Johannes Weiner127424c2016-01-20 15:02:32 -0800697#ifndef CONFIG_SLOB
Vladimir Davydov6f185c22014-12-12 16:55:15 -0800698 unsigned memcg_kmem_skip_account:1;
699#endif
Johannes Weiner127424c2016-01-20 15:02:32 -0800700#endif
Peter Zijlstraff303e62015-04-17 20:05:30 +0200701#ifdef CONFIG_COMPAT_BRK
702 unsigned brk_randomized:1;
703#endif
Vladimir Davydov6f185c22014-12-12 16:55:15 -0800704
Kees Cook1d4457f2014-05-21 15:23:46 -0700705 unsigned long atomic_flags; /* Flags needing atomic access. */
706
Andy Lutomirskif56141e2015-02-12 15:01:14 -0800707 struct restart_block restart_block;
708
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 pid_t pid;
710 pid_t tgid;
Arjan van de Ven0a4254052006-09-26 10:52:38 +0200711
Hiroshi Shimamoto13145622009-08-18 15:06:02 +0900712#ifdef CONFIG_CC_STACKPROTECTOR
Arjan van de Ven0a4254052006-09-26 10:52:38 +0200713 /* Canary value for the -fstack-protector gcc feature */
714 unsigned long stack_canary;
Hiroshi Shimamoto13145622009-08-18 15:06:02 +0900715#endif
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000716 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 * pointers to (original) parent process, youngest child, younger sibling,
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000718 * older sibling, respectively. (p->father can be replaced with
Roland McGrathf4700212008-03-24 18:36:23 -0700719 * p->real_parent->pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 */
Kees Cookabd63bc2011-12-14 14:39:26 -0800721 struct task_struct __rcu *real_parent; /* real parent process */
722 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 /*
Roland McGrathf4700212008-03-24 18:36:23 -0700724 * children/sibling forms the list of my natural children
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 */
726 struct list_head children; /* list of my children */
727 struct list_head sibling; /* linkage in my parent's children list */
728 struct task_struct *group_leader; /* threadgroup leader */
729
Roland McGrathf4700212008-03-24 18:36:23 -0700730 /*
731 * ptraced is the list of tasks this task is using ptrace on.
732 * This includes both natural children and PTRACE_ATTACH targets.
733 * p->ptrace_entry is p's link on the p->parent->ptraced list.
734 */
735 struct list_head ptraced;
736 struct list_head ptrace_entry;
737
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 /* PID/PID hash table linkage. */
Eric W. Biederman92476d72006-03-31 02:31:42 -0800739 struct pid_link pids[PIDTYPE_MAX];
Oleg Nesterov47e65322006-03-28 16:11:25 -0800740 struct list_head thread_group;
Oleg Nesterov0c740d02014-01-21 15:49:56 -0800741 struct list_head thread_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
743 struct completion *vfork_done; /* for vfork() */
744 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
745 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
746
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100747 u64 utime, stime;
Stanislaw Gruszka40565b52016-11-15 03:06:51 +0100748#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100749 u64 utimescaled, stimescaled;
Stanislaw Gruszka40565b52016-11-15 03:06:51 +0100750#endif
Frederic Weisbecker16a6d9b2017-01-31 04:09:21 +0100751 u64 gtime;
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200752 struct prev_cputime prev_cputime;
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100753#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
Frederic Weisbeckerb7ce2272015-11-19 16:47:34 +0100754 seqcount_t vtime_seqcount;
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100755 unsigned long long vtime_snap;
756 enum {
Frederic Weisbecker7098c1e2015-11-19 16:47:30 +0100757 /* Task is sleeping or running in a CPU with VTIME inactive */
758 VTIME_INACTIVE = 0,
759 /* Task runs in userspace in a CPU with VTIME active */
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100760 VTIME_USER,
Frederic Weisbecker7098c1e2015-11-19 16:47:30 +0100761 /* Task runs in kernelspace in a CPU with VTIME active */
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100762 VTIME_SYS,
763 } vtime_snap_whence;
764#endif
Frederic Weisbeckerd027d452015-06-07 15:54:30 +0200765
766#ifdef CONFIG_NO_HZ_FULL
Frederic Weisbeckerf009a7a2016-03-24 15:38:00 +0100767 atomic_t tick_dep_mask;
Frederic Weisbeckerd027d452015-06-07 15:54:30 +0200768#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 unsigned long nvcsw, nivcsw; /* context switch counts */
Thomas Gleixnerccbf62d2014-07-16 21:04:34 +0000770 u64 start_time; /* monotonic time in nsec */
Thomas Gleixner57e0be02014-07-16 21:04:32 +0000771 u64 real_start_time; /* boot based time in nsec */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
773 unsigned long min_flt, maj_flt;
774
Nicolas Pitreb18b6a92017-01-21 00:09:08 -0500775#ifdef CONFIG_POSIX_TIMERS
Frank Mayharf06febc2008-09-12 09:54:39 -0700776 struct task_cputime cputime_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 struct list_head cpu_timers[3];
Nicolas Pitreb18b6a92017-01-21 00:09:08 -0500778#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780/* process credentials */
Eric W. Biederman64b875f2016-11-14 18:48:07 -0600781 const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
Arnd Bergmann1b0ba1c2010-02-24 19:45:09 +0100782 const struct cred __rcu *real_cred; /* objective and real subjective task
David Howells3b11a1d2008-11-14 10:39:26 +1100783 * credentials (COW) */
Arnd Bergmann1b0ba1c2010-02-24 19:45:09 +0100784 const struct cred __rcu *cred; /* effective (overridable) subjective task
David Howells3b11a1d2008-11-14 10:39:26 +1100785 * credentials (COW) */
Paolo 'Blaisorblade' Giarrusso36772092005-05-05 16:16:12 -0700786 char comm[TASK_COMM_LEN]; /* executable name excluding path
787 - access with [gs]et_task_comm (which lock
788 it with task_lock())
Linus Torvalds221af7f2010-01-28 22:14:42 -0800789 - initialized normally by setup_new_exec */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790/* file system info */
NeilBrown756daf22015-03-23 13:37:38 +1100791 struct nameidata *nameidata;
Alexey Dobriyan3d5b6fc2006-09-29 01:59:40 -0700792#ifdef CONFIG_SYSVIPC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793/* ipc stuff */
794 struct sysv_sem sysvsem;
Jack Millerab602f72014-08-08 14:23:19 -0700795 struct sysv_shm sysvshm;
Alexey Dobriyan3d5b6fc2006-09-29 01:59:40 -0700796#endif
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800797#ifdef CONFIG_DETECT_HUNG_TASK
Ingo Molnar82a1fcb2008-01-25 21:08:02 +0100798/* hung task detection */
Ingo Molnar82a1fcb2008-01-25 21:08:02 +0100799 unsigned long last_switch_count;
800#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801/* filesystem information */
802 struct fs_struct *fs;
803/* open file information */
804 struct files_struct *files;
Serge E. Hallyn1651e142006-10-02 02:18:08 -0700805/* namespaces */
Serge E. Hallynab516012006-10-02 02:18:06 -0700806 struct nsproxy *nsproxy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807/* signal handlers */
808 struct signal_struct *signal;
809 struct sighand_struct *sighand;
810
811 sigset_t blocked, real_blocked;
Roland McGrathf3de2722008-04-30 00:53:09 -0700812 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 struct sigpending pending;
814
815 unsigned long sas_ss_sp;
816 size_t sas_ss_size;
Stas Sergeev2a742132016-04-14 23:20:04 +0300817 unsigned sas_ss_flags;
Oleg Nesterov2e01fab2015-11-06 16:32:19 -0800818
Al Viro67d12142012-06-27 11:07:19 +0400819 struct callback_head *task_works;
Oleg Nesterove73f8952012-05-11 10:59:07 +1000820
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 struct audit_context *audit_context;
Al Virobfef93a2008-01-10 04:53:18 -0500822#ifdef CONFIG_AUDITSYSCALL
Eric W. Biedermane1760bd2012-09-10 22:39:43 -0700823 kuid_t loginuid;
Eric Paris4746ec52008-01-08 10:06:53 -0500824 unsigned int sessionid;
Al Virobfef93a2008-01-10 04:53:18 -0500825#endif
Will Drewry932eceb2012-04-12 16:47:54 -0500826 struct seccomp seccomp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
828/* Thread group tracking */
829 u32 parent_exec_id;
830 u32 self_exec_id;
Miao Xie58568d22009-06-16 15:31:49 -0700831/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
832 * mempolicy */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 spinlock_t alloc_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
Ingo Molnarb29739f2006-06-27 02:54:51 -0700835 /* Protection of the PI data structures: */
Thomas Gleixner1d615482009-11-17 14:54:03 +0100836 raw_spinlock_t pi_lock;
Ingo Molnarb29739f2006-06-27 02:54:51 -0700837
Peter Zijlstra76751042015-05-01 08:27:50 -0700838 struct wake_q_node wake_q;
839
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700840#ifdef CONFIG_RT_MUTEXES
841 /* PI waiters blocked on a rt_mutex held by this task */
Peter Zijlstrafb00aca2013-11-07 14:43:43 +0100842 struct rb_root pi_waiters;
843 struct rb_node *pi_waiters_leftmost;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700844 /* Deadlock detection and priority inheritance handling */
845 struct rt_mutex_waiter *pi_blocked_on;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700846#endif
847
Ingo Molnar408894e2006-01-09 15:59:20 -0800848#ifdef CONFIG_DEBUG_MUTEXES
849 /* mutex deadlock detection */
850 struct mutex_waiter *blocked_on;
851#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700852#ifdef CONFIG_TRACE_IRQFLAGS
853 unsigned int irq_events;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700854 unsigned long hardirq_enable_ip;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700855 unsigned long hardirq_disable_ip;
Hiroshi Shimamotofa1452e2009-11-30 14:59:44 +0900856 unsigned int hardirq_enable_event;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700857 unsigned int hardirq_disable_event;
Hiroshi Shimamotofa1452e2009-11-30 14:59:44 +0900858 int hardirqs_enabled;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700859 int hardirq_context;
Hiroshi Shimamotofa1452e2009-11-30 14:59:44 +0900860 unsigned long softirq_disable_ip;
861 unsigned long softirq_enable_ip;
862 unsigned int softirq_disable_event;
863 unsigned int softirq_enable_event;
864 int softirqs_enabled;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700865 int softirq_context;
866#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700867#ifdef CONFIG_LOCKDEP
Peter Zijlstrabdb94412008-02-25 23:02:48 +0100868# define MAX_LOCK_DEPTH 48UL
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700869 u64 curr_chain_key;
870 int lockdep_depth;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700871 unsigned int lockdep_recursion;
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100872 struct held_lock held_locks[MAX_LOCK_DEPTH];
Nick Piggincf40bd12009-01-21 08:12:39 +0100873 gfp_t lockdep_reclaim_gfp;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700874#endif
Andrey Ryabininc6d30852016-01-20 15:00:55 -0800875#ifdef CONFIG_UBSAN
876 unsigned int in_ubsan;
877#endif
Ingo Molnar408894e2006-01-09 15:59:20 -0800878
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879/* journalling filesystem info */
880 void *journal_info;
881
Neil Brownd89d8792007-05-01 09:53:42 +0200882/* stacked block device info */
Akinobu Mitabddd87c2010-02-23 08:55:42 +0100883 struct bio_list *bio_list;
Neil Brownd89d8792007-05-01 09:53:42 +0200884
Jens Axboe73c10102011-03-08 13:19:51 +0100885#ifdef CONFIG_BLOCK
886/* stack plugging */
887 struct blk_plug *plug;
888#endif
889
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890/* VM state */
891 struct reclaim_state *reclaim_state;
892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 struct backing_dev_info *backing_dev_info;
894
895 struct io_context *io_context;
896
897 unsigned long ptrace_message;
898 siginfo_t *last_siginfo; /* For ptrace use. */
Andrew Morton7c3ab732006-12-10 02:19:19 -0800899 struct task_io_accounting ioac;
Jay Lan8f0ab512006-09-30 23:28:59 -0700900#if defined(CONFIG_TASK_XACCT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 u64 acct_rss_mem1; /* accumulated rss usage */
902 u64 acct_vm_mem1; /* accumulated virtual memory usage */
Frederic Weisbecker605dc2b2017-01-31 04:09:30 +0100903 u64 acct_timexpd; /* stime + utime since last update */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904#endif
905#ifdef CONFIG_CPUSETS
Miao Xie58568d22009-06-16 15:31:49 -0700906 nodemask_t mems_allowed; /* Protected by alloc_lock */
Mel Gormancc9a6c82012-03-21 16:34:11 -0700907 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
Paul Jackson825a46a2006-03-24 03:16:03 -0800908 int cpuset_mem_spread_rotor;
Jack Steiner6adef3e2010-05-26 14:42:49 -0700909 int cpuset_slab_spread_rotor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910#endif
Paul Menageddbcc7e2007-10-18 23:39:30 -0700911#ifdef CONFIG_CGROUPS
Paul Menage817929e2007-10-18 23:39:36 -0700912 /* Control Group info protected by css_set_lock */
Arnd Bergmann2c392b82010-02-24 19:41:39 +0100913 struct css_set __rcu *cgroups;
Paul Menage817929e2007-10-18 23:39:36 -0700914 /* cg_list protected by css_set_lock and tsk->alloc_lock */
915 struct list_head cg_list;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700916#endif
Fenghua Yue02737d2016-10-28 15:04:46 -0700917#ifdef CONFIG_INTEL_RDT_A
918 int closid;
919#endif
Alexey Dobriyan42b2dd02007-10-16 23:27:30 -0700920#ifdef CONFIG_FUTEX
Ingo Molnar0771dfe2006-03-27 01:16:22 -0800921 struct robust_list_head __user *robust_list;
Ingo Molnar34f192c2006-03-27 01:16:24 -0800922#ifdef CONFIG_COMPAT
923 struct compat_robust_list_head __user *compat_robust_list;
924#endif
Ingo Molnarc87e2832006-06-27 02:54:58 -0700925 struct list_head pi_state_list;
926 struct futex_pi_state *pi_state_cache;
Alexey Dobriyan42b2dd02007-10-16 23:27:30 -0700927#endif
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200928#ifdef CONFIG_PERF_EVENTS
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200929 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200930 struct mutex perf_event_mutex;
931 struct list_head perf_event_list;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000932#endif
Thomas Gleixner8f47b182014-02-07 20:58:39 +0100933#ifdef CONFIG_DEBUG_PREEMPT
934 unsigned long preempt_disable_ip;
935#endif
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100936#ifdef CONFIG_NUMA
Miao Xie58568d22009-06-16 15:31:49 -0700937 struct mempolicy *mempolicy; /* Protected by alloc_lock */
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100938 short il_next;
Eric Dumazet207205a2011-03-22 16:30:44 -0700939 short pref_node_fork;
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100940#endif
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200941#ifdef CONFIG_NUMA_BALANCING
942 int numa_scan_seq;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200943 unsigned int numa_scan_period;
Mel Gorman598f0ec2013-10-07 11:28:55 +0100944 unsigned int numa_scan_period_max;
Rik van Rielde1c9ce62013-10-07 11:29:39 +0100945 int numa_preferred_nid;
Mel Gorman6b9a7462013-10-07 11:29:11 +0100946 unsigned long numa_migrate_retry;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200947 u64 node_stamp; /* migration stamp */
Rik van Riel7e2703e2014-01-27 17:03:45 -0500948 u64 last_task_numa_placement;
949 u64 last_sum_exec_runtime;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200950 struct callback_head numa_work;
Mel Gormanf809ca92013-10-07 11:28:57 +0100951
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100952 struct list_head numa_entry;
953 struct numa_group *numa_group;
954
Mel Gorman745d6142013-10-07 11:28:59 +0100955 /*
Iulia Manda44dba3d2014-10-31 02:13:31 +0200956 * numa_faults is an array split into four regions:
957 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
958 * in this precise order.
959 *
960 * faults_memory: Exponential decaying average of faults on a per-node
961 * basis. Scheduling placement decisions are made based on these
962 * counts. The values remain static for the duration of a PTE scan.
963 * faults_cpu: Track the nodes the process was running on when a NUMA
964 * hinting fault was incurred.
965 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
966 * during the current scan window. When the scan completes, the counts
967 * in faults_memory and faults_cpu decay and these values are copied.
Mel Gorman745d6142013-10-07 11:28:59 +0100968 */
Iulia Manda44dba3d2014-10-31 02:13:31 +0200969 unsigned long *numa_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100970 unsigned long total_numa_faults;
Mel Gorman745d6142013-10-07 11:28:59 +0100971
972 /*
Rik van Riel04bb2f92013-10-07 11:29:36 +0100973 * numa_faults_locality tracks if faults recorded during the last
Mel Gorman074c2382015-03-25 15:55:42 -0700974 * scan window were remote/local or failed to migrate. The task scan
975 * period is adapted based on the locality of the faults with different
976 * weights depending on whether they were shared or private faults
Rik van Riel04bb2f92013-10-07 11:29:36 +0100977 */
Mel Gorman074c2382015-03-25 15:55:42 -0700978 unsigned long numa_faults_locality[3];
Rik van Riel04bb2f92013-10-07 11:29:36 +0100979
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100980 unsigned long numa_pages_migrated;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200981#endif /* CONFIG_NUMA_BALANCING */
982
Mel Gorman72b252a2015-09-04 15:47:32 -0700983#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
984 struct tlbflush_unmap_batch tlb_ubc;
985#endif
986
Ingo Molnare56d0902006-01-08 01:01:37 -0800987 struct rcu_head rcu;
Jens Axboeb92ce552006-04-11 13:52:07 +0200988
989 /*
990 * cache last used pipe for splice
991 */
992 struct pipe_inode_info *splice_pipe;
Eric Dumazet5640f762012-09-23 23:04:42 +0000993
994 struct page_frag task_frag;
995
Ingo Molnar47913d42017-02-01 18:00:26 +0100996#ifdef CONFIG_TASK_DELAY_ACCT
997 struct task_delay_info *delays;
Shailabh Nagarca74e922006-07-14 00:24:36 -0700998#endif
Ingo Molnar47913d42017-02-01 18:00:26 +0100999
Akinobu Mitaf4f154f2006-12-08 02:39:47 -08001000#ifdef CONFIG_FAULT_INJECTION
1001 int make_it_fail;
1002#endif
Wu Fengguang9d823e82011-06-11 18:10:12 -06001003 /*
1004 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1005 * balance_dirty_pages() for some dirty throttling pause
1006 */
1007 int nr_dirtied;
1008 int nr_dirtied_pause;
Wu Fengguang83712352011-06-11 19:25:42 -06001009 unsigned long dirty_paused_when; /* start of a write-and-pause period */
Wu Fengguang9d823e82011-06-11 18:10:12 -06001010
Arjan van de Ven97455122008-01-25 21:08:34 +01001011#ifdef CONFIG_LATENCYTOP
1012 int latency_record_count;
1013 struct latency_record latency_record[LT_SAVECOUNT];
1014#endif
Arjan van de Ven69766752008-09-01 15:52:40 -07001015 /*
1016 * time slack values; these are used to round up poll() and
1017 * select() etc timeout values. These are in nanoseconds.
1018 */
John Stultzda8b44d2016-03-17 14:20:51 -07001019 u64 timer_slack_ns;
1020 u64 default_timer_slack_ns;
David Millerf8d570a2008-11-06 00:37:40 -08001021
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08001022#ifdef CONFIG_KASAN
1023 unsigned int kasan_depth;
1024#endif
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01001025#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001026 /* Index of current stored address in ret_stack */
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001027 int curr_ret_stack;
1028 /* Stack of return addresses for return function tracing */
1029 struct ftrace_ret_stack *ret_stack;
Steven Rostedt8aef2d22009-03-24 01:10:15 -04001030 /* time stamp for last schedule */
1031 unsigned long long ftrace_timestamp;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001032 /*
1033 * Number of functions that haven't been traced
1034 * because of depth overrun.
1035 */
1036 atomic_t trace_overrun;
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01001037 /* Pause for the tracing */
1038 atomic_t tracing_graph_pause;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001039#endif
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001040#ifdef CONFIG_TRACING
1041 /* state flags for use by tracers */
1042 unsigned long trace;
Steven Rostedtb1cff0a2011-05-25 14:27:43 -04001043 /* bitmask and counter of trace recursion */
Steven Rostedt261842b2009-04-16 21:41:52 -04001044 unsigned long trace_recursion;
1045#endif /* CONFIG_TRACING */
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -07001046#ifdef CONFIG_KCOV
1047 /* Coverage collection mode enabled for this task (0 if disabled). */
1048 enum kcov_mode kcov_mode;
1049 /* Size of the kcov_area. */
1050 unsigned kcov_size;
1051 /* Buffer for coverage collection. */
1052 void *kcov_area;
1053 /* kcov desciptor wired with this task or NULL. */
1054 struct kcov *kcov;
1055#endif
Vladimir Davydov6f185c22014-12-12 16:55:15 -08001056#ifdef CONFIG_MEMCG
Tejun Heo626ebc42015-11-05 18:46:09 -08001057 struct mem_cgroup *memcg_in_oom;
1058 gfp_t memcg_oom_gfp_mask;
1059 int memcg_oom_order;
Tejun Heob23afb92015-11-05 18:46:11 -08001060
1061 /* number of pages to reclaim on returning to userland */
1062 unsigned int memcg_nr_pages_over_high;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08001063#endif
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05301064#ifdef CONFIG_UPROBES
1065 struct uprobe_task *utask;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05301066#endif
Kent Overstreetcafe5632013-03-23 16:11:31 -07001067#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1068 unsigned int sequential_io;
1069 unsigned int sequential_io_avg;
1070#endif
Peter Zijlstra8eb23b92014-09-24 10:18:55 +02001071#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1072 unsigned long task_state_change;
1073#endif
David Hildenbrand8bcbde52015-05-11 17:52:06 +02001074 int pagefault_disabled;
Michal Hocko03049262016-03-25 14:20:33 -07001075#ifdef CONFIG_MMU
Vladimir Davydov29c696e2016-03-25 14:20:39 -07001076 struct task_struct *oom_reaper_list;
Michal Hocko03049262016-03-25 14:20:33 -07001077#endif
Andy Lutomirskiba14a192016-08-11 02:35:21 -07001078#ifdef CONFIG_VMAP_STACK
1079 struct vm_struct *stack_vm_area;
1080#endif
Andy Lutomirski68f24b082016-09-15 22:45:48 -07001081#ifdef CONFIG_THREAD_INFO_IN_TASK
1082 /* A live task holds one reference. */
1083 atomic_t stack_refcount;
1084#endif
Dave Hansen0c8c0f02015-07-17 12:28:11 +02001085/* CPU-specific state of this task */
1086 struct thread_struct thread;
1087/*
1088 * WARNING: on x86, 'thread_struct' contains a variable-sized
1089 * structure. It *MUST* be at the end of 'task_struct'.
1090 *
1091 * Do not put anything below here!
1092 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093};
1094
Alexey Dobriyane8681712007-10-26 12:17:22 +04001095static inline struct pid *task_pid(struct task_struct *task)
Eric W. Biederman22c935f2006-10-02 02:17:09 -07001096{
1097 return task->pids[PIDTYPE_PID].pid;
1098}
1099
Alexey Dobriyane8681712007-10-26 12:17:22 +04001100static inline struct pid *task_tgid(struct task_struct *task)
Eric W. Biederman22c935f2006-10-02 02:17:09 -07001101{
1102 return task->group_leader->pids[PIDTYPE_PID].pid;
1103}
1104
Oleg Nesterov6dda81f2009-04-02 16:58:35 -07001105/*
1106 * Without tasklist or rcu lock it is not safe to dereference
1107 * the result of task_pgrp/task_session even if task == current,
1108 * we can race with another thread doing sys_setsid/sys_setpgid.
1109 */
Alexey Dobriyane8681712007-10-26 12:17:22 +04001110static inline struct pid *task_pgrp(struct task_struct *task)
Eric W. Biederman22c935f2006-10-02 02:17:09 -07001111{
1112 return task->group_leader->pids[PIDTYPE_PGID].pid;
1113}
1114
Alexey Dobriyane8681712007-10-26 12:17:22 +04001115static inline struct pid *task_session(struct task_struct *task)
Eric W. Biederman22c935f2006-10-02 02:17:09 -07001116{
1117 return task->group_leader->pids[PIDTYPE_SID].pid;
1118}
1119
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001120/*
1121 * the helpers to get the task's different pids as they are seen
1122 * from various namespaces
1123 *
1124 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
Eric W. Biederman44c4e1b2008-02-08 04:19:15 -08001125 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
1126 * current.
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001127 * task_xid_nr_ns() : id seen from the ns specified;
1128 *
1129 * set_task_vxid() : assigns a virtual id to a task;
1130 *
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001131 * see also pid_nr() etc in include/linux/pid.h
1132 */
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001133pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1134 struct pid_namespace *ns);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001135
Alexey Dobriyane8681712007-10-26 12:17:22 +04001136static inline pid_t task_pid_nr(struct task_struct *tsk)
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001137{
1138 return tsk->pid;
1139}
1140
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001141static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1142 struct pid_namespace *ns)
1143{
1144 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1145}
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001146
1147static inline pid_t task_pid_vnr(struct task_struct *tsk)
1148{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001149 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001150}
1151
1152
Alexey Dobriyane8681712007-10-26 12:17:22 +04001153static inline pid_t task_tgid_nr(struct task_struct *tsk)
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001154{
1155 return tsk->tgid;
1156}
1157
Pavel Emelyanov2f2a3a42007-10-18 23:40:19 -07001158pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001159
1160static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1161{
1162 return pid_vnr(task_tgid(tsk));
1163}
1164
1165
Richard Guy Briggs80e0b6e2014-03-16 14:00:19 -04001166static inline int pid_alive(const struct task_struct *p);
Richard Guy Briggsad36d282013-08-15 18:05:12 -04001167static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1168{
1169 pid_t pid = 0;
1170
1171 rcu_read_lock();
1172 if (pid_alive(tsk))
1173 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1174 rcu_read_unlock();
1175
1176 return pid;
1177}
1178
1179static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1180{
1181 return task_ppid_nr_ns(tsk, &init_pid_ns);
1182}
1183
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001184static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1185 struct pid_namespace *ns)
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001186{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001187 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001188}
1189
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001190static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1191{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001192 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001193}
1194
1195
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001196static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1197 struct pid_namespace *ns)
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001198{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001199 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001200}
1201
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001202static inline pid_t task_session_vnr(struct task_struct *tsk)
1203{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001204 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001205}
1206
Oleg Nesterov1b0f7ff2009-04-02 16:58:39 -07001207/* obsolete, do not use */
1208static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1209{
1210 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1211}
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213/**
1214 * pid_alive - check that a task structure is not stale
1215 * @p: Task structure to be checked.
1216 *
1217 * Test if a process is not yet dead (at most zombie state)
1218 * If pid_alive fails, then pointers within the task structure
1219 * can be stale and must not be dereferenced.
Yacine Belkadie69f6182013-07-12 20:45:47 +02001220 *
1221 * Return: 1 if the process is alive. 0 otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 */
Richard Guy Briggsad36d282013-08-15 18:05:12 -04001223static inline int pid_alive(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224{
Eric W. Biederman92476d72006-03-31 02:31:42 -08001225 return p->pids[PIDTYPE_PID].pid != NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226}
1227
Sukadev Bhattiproluf400e192006-09-29 02:00:07 -07001228/**
Sergey Senozhatsky570f5242016-01-01 23:03:01 +09001229 * is_global_init - check if a task structure is init. Since init
1230 * is free to have sub-threads we need to check tgid.
Henne32602592006-10-06 00:44:01 -07001231 * @tsk: Task structure to be checked.
1232 *
1233 * Check if a task structure is the first user space task the kernel created.
Yacine Belkadie69f6182013-07-12 20:45:47 +02001234 *
1235 * Return: 1 if the task structure is init. 0 otherwise.
Sukadev Bhattiproluf400e192006-09-29 02:00:07 -07001236 */
Alexey Dobriyane8681712007-10-26 12:17:22 +04001237static inline int is_global_init(struct task_struct *tsk)
Pavel Emelyanovb461cc02007-10-18 23:40:09 -07001238{
Sergey Senozhatsky570f5242016-01-01 23:03:01 +09001239 return task_tgid_nr(tsk) == 1;
Pavel Emelyanovb461cc02007-10-18 23:40:09 -07001240}
Serge E. Hallynb460cbc2007-10-18 23:39:52 -07001241
Cedric Le Goater9ec52092006-10-02 02:19:00 -07001242extern struct pid *cad_pid;
1243
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244extern void free_task(struct task_struct *tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
Ingo Molnare56d0902006-01-08 01:01:37 -08001246
Andrew Morton158d9eb2006-03-31 02:31:34 -08001247extern void __put_task_struct(struct task_struct *t);
Ingo Molnare56d0902006-01-08 01:01:37 -08001248
1249static inline void put_task_struct(struct task_struct *t)
1250{
1251 if (atomic_dec_and_test(&t->usage))
Eric W. Biederman8c7904a2006-03-31 02:31:37 -08001252 __put_task_struct(t);
Ingo Molnare56d0902006-01-08 01:01:37 -08001253}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Oleg Nesterov150593b2016-05-18 19:02:18 +02001255struct task_struct *task_rcu_dereference(struct task_struct **ptask);
1256struct task_struct *try_get_task_struct(struct task_struct **ptask);
1257
Frederic Weisbecker6a616712012-12-16 20:00:34 +01001258#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1259extern void task_cputime(struct task_struct *t,
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001260 u64 *utime, u64 *stime);
Frederic Weisbecker16a6d9b2017-01-31 04:09:21 +01001261extern u64 task_gtime(struct task_struct *t);
Frederic Weisbecker6a616712012-12-16 20:00:34 +01001262#else
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001263static inline void task_cputime(struct task_struct *t,
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001264 u64 *utime, u64 *stime)
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001265{
Stanislaw Gruszka353c50e2016-11-15 03:06:52 +01001266 *utime = t->utime;
1267 *stime = t->stime;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001268}
Frederic Weisbecker6a616712012-12-16 20:00:34 +01001269
Frederic Weisbecker16a6d9b2017-01-31 04:09:21 +01001270static inline u64 task_gtime(struct task_struct *t)
Frederic Weisbecker6a616712012-12-16 20:00:34 +01001271{
1272 return t->gtime;
1273}
1274#endif
Stanislaw Gruszka40565b52016-11-15 03:06:51 +01001275
1276#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
Jens Axboeb31dc662006-06-13 08:26:10 +02001277static inline void task_cputime_scaled(struct task_struct *t,
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001278 u64 *utimescaled,
1279 u64 *stimescaled)
Jens Axboeb31dc662006-06-13 08:26:10 +02001280{
Stanislaw Gruszka353c50e2016-11-15 03:06:52 +01001281 *utimescaled = t->utimescaled;
1282 *stimescaled = t->stimescaled;
Tejun Heo58a69cb2011-02-16 09:25:31 +01001283}
Stanislaw Gruszka40565b52016-11-15 03:06:51 +01001284#else
1285static inline void task_cputime_scaled(struct task_struct *t,
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001286 u64 *utimescaled,
1287 u64 *stimescaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288{
Stanislaw Gruszka40565b52016-11-15 03:06:51 +01001289 task_cputime(t, utimescaled, stimescaled);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290}
1291#endif
Stanislaw Gruszka40565b52016-11-15 03:06:51 +01001292
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001293extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
1294extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295
1296/*
1297 * Per process flags
1298 */
Peter Zijlstrac1de45c2016-11-28 23:03:05 -08001299#define PF_IDLE 0x00000002 /* I am an IDLE thread */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300#define PF_EXITING 0x00000004 /* getting shut down */
1301#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1302#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1303#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1304#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1305#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1306#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1307#define PF_DUMPCORE 0x00000200 /* dumped core */
1308#define PF_SIGNALED 0x00000400 /* killed by a signal */
1309#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1310#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1311#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1312#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
1313#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1314#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1315#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1316#define PF_KSWAPD 0x00040000 /* I am kswapd */
Ming Lei21caf2f2013-02-22 16:34:08 -08001317#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1319#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1320#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1321#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
Tejun Heo14a40ff2013-03-19 13:45:20 -07001322#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1325#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
Colin Cross2b44c4d2013-07-24 17:41:33 -07001326#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
1328/*
1329 * Only the _current_ task can read/write to tsk->flags, but other
1330 * tasks can access tsk->flags in readonly mode for example
1331 * with tsk_used_math (like during threaded core dumping).
1332 * There is however an exception to this rule during ptrace
1333 * or during fork: the ptracer task is allowed to write to the
1334 * child->flags of its traced child (same goes for fork, the parent
1335 * can write to the child->flags), because we're guaranteed the
1336 * child is not running and in turn not changing child->flags
1337 * at the same time the parent does it.
1338 */
1339#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1340#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1341#define clear_used_math() clear_stopped_child_used_math(current)
1342#define set_used_math() set_stopped_child_used_math(current)
1343#define conditional_stopped_child_used_math(condition, child) \
1344 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1345#define conditional_used_math(condition) \
1346 conditional_stopped_child_used_math(condition, current)
1347#define copy_to_stopped_child_used_math(child) \
1348 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1349/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1350#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1351#define used_math() tsk_used_math(current)
1352
Kees Cook1d4457f2014-05-21 15:23:46 -07001353/* Per-process atomic flags. */
Zefan Lia2b86f72014-09-25 09:40:17 +08001354#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
Zefan Li2ad654b2014-09-25 09:41:02 +08001355#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1356#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
Tetsuo Handa77ed2c52016-03-08 20:01:32 +09001357#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
Kees Cook1d4457f2014-05-21 15:23:46 -07001358
Kees Cook1d4457f2014-05-21 15:23:46 -07001359
Zefan Lie0e50702014-09-25 09:40:40 +08001360#define TASK_PFA_TEST(name, func) \
1361 static inline bool task_##func(struct task_struct *p) \
1362 { return test_bit(PFA_##name, &p->atomic_flags); }
1363#define TASK_PFA_SET(name, func) \
1364 static inline void task_set_##func(struct task_struct *p) \
1365 { set_bit(PFA_##name, &p->atomic_flags); }
1366#define TASK_PFA_CLEAR(name, func) \
1367 static inline void task_clear_##func(struct task_struct *p) \
1368 { clear_bit(PFA_##name, &p->atomic_flags); }
Kees Cook1d4457f2014-05-21 15:23:46 -07001369
Zefan Lie0e50702014-09-25 09:40:40 +08001370TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1371TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
Kees Cook1d4457f2014-05-21 15:23:46 -07001372
Zefan Li2ad654b2014-09-25 09:41:02 +08001373TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1374TASK_PFA_SET(SPREAD_PAGE, spread_page)
1375TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1376
1377TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1378TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1379TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
Tejun Heo544b2c92011-06-14 11:20:18 +02001380
Tetsuo Handa77ed2c52016-03-08 20:01:32 +09001381TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
1382TASK_PFA_SET(LMK_WAITING, lmk_waiting)
1383
Mel Gorman907aed42012-07-31 16:44:07 -07001384static inline void tsk_restore_flags(struct task_struct *task,
1385 unsigned long orig_flags, unsigned long flags)
1386{
1387 task->flags &= ~flags;
1388 task->flags |= orig_flags & flags;
1389}
1390
Juri Lellif82f8042014-10-07 09:52:11 +01001391extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
1392 const struct cpumask *trial);
Juri Lelli7f514122014-09-19 10:22:40 +01001393extern int task_can_attach(struct task_struct *p,
1394 const struct cpumask *cs_cpus_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395#ifdef CONFIG_SMP
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09001396extern void do_set_cpus_allowed(struct task_struct *p,
1397 const struct cpumask *new_mask);
1398
Mike Traviscd8ba7c2008-03-26 14:23:49 -07001399extern int set_cpus_allowed_ptr(struct task_struct *p,
Rusty Russell96f874e2008-11-25 02:35:14 +10301400 const struct cpumask *new_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401#else
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09001402static inline void do_set_cpus_allowed(struct task_struct *p,
1403 const struct cpumask *new_mask)
1404{
1405}
Mike Traviscd8ba7c2008-03-26 14:23:49 -07001406static inline int set_cpus_allowed_ptr(struct task_struct *p,
Rusty Russell96f874e2008-11-25 02:35:14 +10301407 const struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408{
Rusty Russell96f874e2008-11-25 02:35:14 +10301409 if (!cpumask_test_cpu(0, new_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 return -EINVAL;
1411 return 0;
1412}
1413#endif
Rusty Russelle0ad9552009-09-24 09:34:38 -06001414
Christian Borntraeger6d0d2872016-11-16 13:23:05 +01001415#ifndef cpu_relax_yield
1416#define cpu_relax_yield() cpu_relax()
1417#endif
1418
Ingo Molnar36c8b582006-07-03 00:25:41 -07001419extern unsigned long long
Ingo Molnar41b86e92007-07-09 18:51:58 +02001420task_sched_runtime(struct task_struct *task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
1422/* sched_exec is called by processes performing an exec */
1423#ifdef CONFIG_SMP
1424extern void sched_exec(void);
1425#else
1426#define sched_exec() {}
1427#endif
1428
Dan Carpenterfa933842014-05-23 13:20:42 +03001429extern int yield_to(struct task_struct *p, bool preempt);
Ingo Molnar36c8b582006-07-03 00:25:41 -07001430extern void set_user_nice(struct task_struct *p, long nice);
1431extern int task_prio(const struct task_struct *p);
Dongsheng Yangd0ea0262014-01-27 22:00:45 -05001432/**
1433 * task_nice - return the nice value of a given task.
1434 * @p: the task in question.
1435 *
1436 * Return: The nice value [ -20 ... 0 ... 19 ].
1437 */
1438static inline int task_nice(const struct task_struct *p)
1439{
1440 return PRIO_TO_NICE((p)->static_prio);
1441}
Ingo Molnar36c8b582006-07-03 00:25:41 -07001442extern int can_nice(const struct task_struct *p, const int nice);
1443extern int task_curr(const struct task_struct *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444extern int idle_cpu(int cpu);
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07001445extern int sched_setscheduler(struct task_struct *, int,
1446 const struct sched_param *);
Rusty Russell961ccdd2008-06-23 13:55:38 +10001447extern int sched_setscheduler_nocheck(struct task_struct *, int,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07001448 const struct sched_param *);
Dario Faggiolid50dde52013-11-07 14:43:36 +01001449extern int sched_setattr(struct task_struct *,
1450 const struct sched_attr *);
Ingo Molnar36c8b582006-07-03 00:25:41 -07001451extern struct task_struct *idle_task(int cpu);
Paul E. McKenneyc4f30602011-11-10 12:41:56 -08001452/**
1453 * is_idle_task - is the specified task an idle task?
Randy Dunlapfa757282012-01-21 11:03:13 -08001454 * @p: the task in question.
Yacine Belkadie69f6182013-07-12 20:45:47 +02001455 *
1456 * Return: 1 if @p is an idle task. 0 otherwise.
Paul E. McKenneyc4f30602011-11-10 12:41:56 -08001457 */
Paul E. McKenney7061ca32011-12-20 08:20:46 -08001458static inline bool is_idle_task(const struct task_struct *p)
Paul E. McKenneyc4f30602011-11-10 12:41:56 -08001459{
Peter Zijlstrac1de45c2016-11-28 23:03:05 -08001460 return !!(p->flags & PF_IDLE);
Paul E. McKenneyc4f30602011-11-10 12:41:56 -08001461}
Ingo Molnar36c8b582006-07-03 00:25:41 -07001462extern struct task_struct *curr_task(int cpu);
Peter Zijlstraa458ae22016-09-20 20:29:40 +02001463extern void ia64_set_curr_task(int cpu, struct task_struct *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464
1465void yield(void);
1466
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467union thread_union {
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001468#ifndef CONFIG_THREAD_INFO_IN_TASK
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 struct thread_info thread_info;
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001470#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 unsigned long stack[THREAD_SIZE/sizeof(long)];
1472};
1473
1474#ifndef __HAVE_ARCH_KSTACK_END
1475static inline int kstack_end(void *addr)
1476{
1477 /* Reliable end of stack detection:
1478 * Some APM bios versions misalign the stack
1479 */
1480 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
1481}
1482#endif
1483
1484extern union thread_union init_thread_union;
1485extern struct task_struct init_task;
1486
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001487extern struct pid_namespace init_pid_ns;
1488
1489/*
1490 * find a task by one of its numerical ids
1491 *
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001492 * find_task_by_pid_ns():
1493 * finds a task by its pid in the specified namespace
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001494 * find_task_by_vpid():
1495 * finds a task by its virtual pid
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001496 *
Pavel Emelyanove49859e2008-07-25 01:48:36 -07001497 * see also find_vpid() etc in include/linux/pid.h
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001498 */
1499
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001500extern struct task_struct *find_task_by_vpid(pid_t nr);
1501extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1502 struct pid_namespace *ns);
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001503
Harvey Harrisonb3c97522008-02-13 15:03:15 -08001504extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1505extern int wake_up_process(struct task_struct *tsk);
Samir Bellabes3e51e3e2011-05-11 18:18:05 +02001506extern void wake_up_new_task(struct task_struct *tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507#ifdef CONFIG_SMP
1508 extern void kick_process(struct task_struct *tsk);
1509#else
1510 static inline void kick_process(struct task_struct *tsk) { }
1511#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513extern void exit_files(struct task_struct *);
Oleg Nesterovcbaffba2008-05-26 20:55:42 +04001514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515extern void exit_itimers(struct signal_struct *);
1516
Linus Torvaldsc4ad8f92014-02-05 12:54:53 -08001517extern int do_execve(struct filename *,
David Howellsd7627462010-08-17 23:52:56 +01001518 const char __user * const __user *,
Al Viroda3d4c52012-10-20 21:49:33 -04001519 const char __user * const __user *);
David Drysdale51f39a12014-12-12 16:57:29 -08001520extern int do_execveat(int, struct filename *,
1521 const char __user * const __user *,
1522 const char __user * const __user *,
1523 int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Adrian Hunter82b89772014-05-28 11:45:04 +03001525extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1526static inline void set_task_comm(struct task_struct *tsk, const char *from)
1527{
1528 __set_task_comm(tsk, from, false);
1529}
Andrew Morton59714d62008-02-04 22:27:21 -08001530extern char *get_task_comm(char *to, struct task_struct *tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
1532#ifdef CONFIG_SMP
Peter Zijlstra317f3942011-04-05 17:23:58 +02001533void scheduler_ipi(void);
Roland McGrath85ba2d82008-07-25 19:45:58 -07001534extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535#else
Peter Zijlstra184748c2011-04-05 17:23:39 +02001536static inline void scheduler_ipi(void) { }
Roland McGrath85ba2d82008-07-25 19:45:58 -07001537static inline unsigned long wait_task_inactive(struct task_struct *p,
1538 long match_state)
1539{
1540 return 1;
1541}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542#endif
1543
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544/*
Eric W. Biederman260ea102006-06-23 02:05:18 -07001545 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
Jens Axboe22e2c502005-06-27 10:55:12 +02001546 * subscriptions and synchronises with wait4(). Also used in procfs. Also
Paul Menageddbcc7e2007-10-18 23:39:30 -07001547 * pins the final release of task.io_context. Also protects ->cpuset and
Oleg Nesterovd68b46f2012-03-05 14:59:13 -08001548 * ->cgroup.subsys[]. And ->vfork_done.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 *
1550 * Nests both inside and outside of read_lock(&tasklist_lock).
1551 * It must not be nested with write_lock_irq(&tasklist_lock),
1552 * neither inside nor outside.
1553 */
1554static inline void task_lock(struct task_struct *p)
1555{
1556 spin_lock(&p->alloc_lock);
1557}
1558
1559static inline void task_unlock(struct task_struct *p)
1560{
1561 spin_unlock(&p->alloc_lock);
1562}
1563
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001564#ifdef CONFIG_THREAD_INFO_IN_TASK
1565
1566static inline struct thread_info *task_thread_info(struct task_struct *task)
1567{
1568 return &task->thread_info;
1569}
Andy Lutomirskic6c314a2016-09-15 22:45:43 -07001570
1571/*
1572 * When accessing the stack of a non-current task that might exit, use
1573 * try_get_task_stack() instead. task_stack_page will return a pointer
1574 * that could get freed out from under you.
1575 */
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001576static inline void *task_stack_page(const struct task_struct *task)
1577{
1578 return task->stack;
1579}
Andy Lutomirskic6c314a2016-09-15 22:45:43 -07001580
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001581#define setup_thread_stack(new,old) do { } while(0)
Andy Lutomirskic6c314a2016-09-15 22:45:43 -07001582
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001583static inline unsigned long *end_of_stack(const struct task_struct *task)
1584{
1585 return task->stack;
1586}
1587
1588#elif !defined(__HAVE_THREAD_FUNCTIONS)
Al Virof0373602005-11-13 16:06:57 -08001589
Roman Zippelf7e42172007-05-09 02:35:17 -07001590#define task_thread_info(task) ((struct thread_info *)(task)->stack)
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001591#define task_stack_page(task) ((void *)(task)->stack)
Al Viroa1261f52005-11-13 16:06:55 -08001592
Al Viro10ebffd2005-11-13 16:06:56 -08001593static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
1594{
1595 *task_thread_info(p) = *task_thread_info(org);
1596 task_thread_info(p)->task = p;
1597}
1598
Chuck Ebbert6a402812014-09-20 10:17:51 -05001599/*
1600 * Return the address of the last usable long on the stack.
1601 *
1602 * When the stack grows down, this is just above the thread
1603 * info struct. Going any lower will corrupt the threadinfo.
1604 *
1605 * When the stack grows up, this is the highest address.
1606 * Beyond that position, we corrupt data on the next page.
1607 */
Al Viro10ebffd2005-11-13 16:06:56 -08001608static inline unsigned long *end_of_stack(struct task_struct *p)
1609{
Chuck Ebbert6a402812014-09-20 10:17:51 -05001610#ifdef CONFIG_STACK_GROWSUP
1611 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
1612#else
Roman Zippelf7e42172007-05-09 02:35:17 -07001613 return (unsigned long *)(task_thread_info(p) + 1);
Chuck Ebbert6a402812014-09-20 10:17:51 -05001614#endif
Al Viro10ebffd2005-11-13 16:06:56 -08001615}
1616
Al Virof0373602005-11-13 16:06:57 -08001617#endif
Andy Lutomirskic6c314a2016-09-15 22:45:43 -07001618
Andy Lutomirski68f24b082016-09-15 22:45:48 -07001619#ifdef CONFIG_THREAD_INFO_IN_TASK
1620static inline void *try_get_task_stack(struct task_struct *tsk)
1621{
1622 return atomic_inc_not_zero(&tsk->stack_refcount) ?
1623 task_stack_page(tsk) : NULL;
1624}
1625
1626extern void put_task_stack(struct task_struct *tsk);
1627#else
Andy Lutomirskic6c314a2016-09-15 22:45:43 -07001628static inline void *try_get_task_stack(struct task_struct *tsk)
1629{
1630 return task_stack_page(tsk);
1631}
1632
1633static inline void put_task_stack(struct task_struct *tsk) {}
Andy Lutomirski68f24b082016-09-15 22:45:48 -07001634#endif
Andy Lutomirskic6c314a2016-09-15 22:45:43 -07001635
Aaron Tomlina70857e2014-09-12 14:16:18 +01001636#define task_stack_end_corrupted(task) \
1637 (*(end_of_stack(task)) != STACK_END_MAGIC)
Al Virof0373602005-11-13 16:06:57 -08001638
FUJITA Tomonori8b05c7e2008-07-23 21:26:53 -07001639static inline int object_is_on_stack(void *obj)
1640{
1641 void *stack = task_stack_page(current);
1642
1643 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
1644}
1645
Linus Torvaldsb235bee2016-06-24 15:09:37 -07001646extern void thread_stack_cache_init(void);
Benjamin Herrenschmidt8c9843e2008-04-18 16:56:15 +10001647
Eric Sandeen7c9f8862008-04-22 16:38:23 -05001648#ifdef CONFIG_DEBUG_STACK_USAGE
1649static inline unsigned long stack_not_used(struct task_struct *p)
1650{
1651 unsigned long *n = end_of_stack(p);
1652
1653 do { /* Skip over canary */
Helge Deller6c31da32016-03-19 17:54:10 +01001654# ifdef CONFIG_STACK_GROWSUP
1655 n--;
1656# else
Eric Sandeen7c9f8862008-04-22 16:38:23 -05001657 n++;
Helge Deller6c31da32016-03-19 17:54:10 +01001658# endif
Eric Sandeen7c9f8862008-04-22 16:38:23 -05001659 } while (!*n);
1660
Helge Deller6c31da32016-03-19 17:54:10 +01001661# ifdef CONFIG_STACK_GROWSUP
1662 return (unsigned long)end_of_stack(p) - (unsigned long)n;
1663# else
Eric Sandeen7c9f8862008-04-22 16:38:23 -05001664 return (unsigned long)n - (unsigned long)end_of_stack(p);
Helge Deller6c31da32016-03-19 17:54:10 +01001665# endif
Eric Sandeen7c9f8862008-04-22 16:38:23 -05001666}
1667#endif
Aaron Tomlind4311ff2014-09-12 14:16:17 +01001668extern void set_task_stack_end_magic(struct task_struct *tsk);
Eric Sandeen7c9f8862008-04-22 16:38:23 -05001669
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670/* set thread flags in other task's structures
1671 * - see asm/thread_info.h for TIF_xxxx flags available
1672 */
1673static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1674{
Al Viroa1261f52005-11-13 16:06:55 -08001675 set_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676}
1677
1678static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1679{
Al Viroa1261f52005-11-13 16:06:55 -08001680 clear_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681}
1682
1683static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1684{
Al Viroa1261f52005-11-13 16:06:55 -08001685 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686}
1687
1688static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1689{
Al Viroa1261f52005-11-13 16:06:55 -08001690 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691}
1692
1693static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1694{
Al Viroa1261f52005-11-13 16:06:55 -08001695 return test_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696}
1697
1698static inline void set_tsk_need_resched(struct task_struct *tsk)
1699{
1700 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1701}
1702
1703static inline void clear_tsk_need_resched(struct task_struct *tsk)
1704{
1705 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1706}
1707
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001708static inline int test_tsk_need_resched(struct task_struct *tsk)
1709{
1710 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1711}
1712
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713/*
1714 * cond_resched() and cond_resched_lock(): latency reduction via
1715 * explicit rescheduling in places that are safe. The return
1716 * value indicates whether a reschedule was done in fact.
1717 * cond_resched_lock() will drop the spinlock before scheduling,
1718 * cond_resched_softirq() will enable bhs before scheduling.
1719 */
Peter Zijlstra35a773a2016-09-19 12:57:53 +02001720#ifndef CONFIG_PREEMPT
Linus Torvaldsc3921ab2008-05-11 16:04:48 -07001721extern int _cond_resched(void);
Peter Zijlstra35a773a2016-09-19 12:57:53 +02001722#else
1723static inline int _cond_resched(void) { return 0; }
1724#endif
Frederic Weisbecker6f80bd92009-07-16 15:44:29 +02001725
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001726#define cond_resched() ({ \
Peter Zijlstra34274452014-09-24 10:18:56 +02001727 ___might_sleep(__FILE__, __LINE__, 0); \
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001728 _cond_resched(); \
1729})
Frederic Weisbecker6f80bd92009-07-16 15:44:29 +02001730
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001731extern int __cond_resched_lock(spinlock_t *lock);
1732
1733#define cond_resched_lock(lock) ({ \
Peter Zijlstra34274452014-09-24 10:18:56 +02001734 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001735 __cond_resched_lock(lock); \
1736})
1737
1738extern int __cond_resched_softirq(void);
1739
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -07001740#define cond_resched_softirq() ({ \
Peter Zijlstra34274452014-09-24 10:18:56 +02001741 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -07001742 __cond_resched_softirq(); \
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001743})
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
Simon Hormanf6f3c432013-05-22 14:50:31 +09001745static inline void cond_resched_rcu(void)
1746{
1747#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1748 rcu_read_unlock();
1749 cond_resched();
1750 rcu_read_lock();
1751#endif
1752}
1753
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754/*
1755 * Does a critical section need to be broken due to another
Nick Piggin95c354f2008-01-30 13:31:20 +01001756 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
1757 * but a general need for low latency)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 */
Nick Piggin95c354f2008-01-30 13:31:20 +01001759static inline int spin_needbreak(spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760{
Nick Piggin95c354f2008-01-30 13:31:20 +01001761#ifdef CONFIG_PREEMPT
1762 return spin_is_contended(lock);
1763#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 return 0;
Nick Piggin95c354f2008-01-30 13:31:20 +01001765#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766}
1767
Peter Zijlstra75f93fe2013-09-27 17:30:03 +02001768static __always_inline bool need_resched(void)
1769{
1770 return unlikely(tif_need_resched());
1771}
1772
Thomas Gleixneree761f62013-03-21 22:49:32 +01001773/*
Frank Mayharf06febc2008-09-12 09:54:39 -07001774 * Thread group CPU time accounting.
1775 */
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +01001776void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
Peter Zijlstra4da94d492009-02-11 11:30:27 +01001777void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
Frank Mayharf06febc2008-09-12 09:54:39 -07001778
Frank Mayharf06febc2008-09-12 09:54:39 -07001779/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 * Wrappers for p->thread_info->cpu access. No-op on UP.
1781 */
1782#ifdef CONFIG_SMP
1783
1784static inline unsigned int task_cpu(const struct task_struct *p)
1785{
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001786#ifdef CONFIG_THREAD_INFO_IN_TASK
1787 return p->cpu;
1788#else
Al Viroa1261f52005-11-13 16:06:55 -08001789 return task_thread_info(p)->cpu;
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001790#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791}
1792
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001793static inline int task_node(const struct task_struct *p)
1794{
1795 return cpu_to_node(task_cpu(p));
1796}
1797
Ingo Molnarc65cc872007-07-09 18:51:58 +02001798extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799
1800#else
1801
1802static inline unsigned int task_cpu(const struct task_struct *p)
1803{
1804 return 0;
1805}
1806
1807static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1808{
1809}
1810
1811#endif /* CONFIG_SMP */
1812
Pan Xinhuid9345c62016-11-02 05:08:28 -04001813/*
1814 * In order to reduce various lock holder preemption latencies provide an
1815 * interface to see if a vCPU is currently running or not.
1816 *
1817 * This allows us to terminate optimistic spin loops and block, analogous to
1818 * the native optimistic spin heuristic of testing if the lock owner task is
1819 * running or not.
1820 */
1821#ifndef vcpu_is_preempted
1822# define vcpu_is_preempted(cpu) false
1823#endif
1824
Rusty Russell96f874e2008-11-25 02:35:14 +10301825extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1826extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07001827
Dhaval Giani7c941432010-01-20 13:26:18 +01001828#ifdef CONFIG_CGROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08001829extern struct task_group root_task_group;
Peter Zijlstra8323f262012-06-22 13:36:05 +02001830#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02001831
Dhaval Giani54e99122009-02-27 15:13:54 +05301832extern int task_can_switch_user(struct user_struct *up,
1833 struct task_struct *tsk);
1834
Dave Hansen82455252008-02-04 22:28:59 -08001835#ifndef TASK_SIZE_OF
1836#define TASK_SIZE_OF(tsk) TASK_SIZE
1837#endif
1838
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839#endif