blob: 341f5792fbe060e90dfcd1172244d2403208da6c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
David Howells607ca462012-10-13 10:46:48 +01004#include <uapi/linux/sched.h>
David Woodhouseb7b3c762006-04-27 00:12:56 +01005
Dongsheng Yang5c228072014-01-27 17:15:37 -05006#include <linux/sched/prio.h>
Ingo Molnaree6a3d12017-02-06 10:01:09 +01007#include <linux/nodemask.h>
Dongsheng Yang5c228072014-01-27 17:15:37 -05008
Ingo Molnarb69339b2017-02-05 16:15:03 +01009#include <linux/mutex.h>
Peter Zijlstrafb00aca2013-11-07 14:43:43 +010010#include <linux/plist.h>
Ingo Molnar77ba8092017-02-04 00:16:44 +010011#include <linux/mm_types_task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/sem.h>
Jack Millerab602f72014-08-08 14:23:19 -070014#include <linux/shm.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010015#include <linux/signal_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/pid.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/seccomp.h>
Ingo Molnarb68070e2017-02-04 01:27:20 +010018#include <linux/rcupdate.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
David Woodhousea3b67142006-04-25 14:54:40 +010020#include <linux/resource.h>
David Woodhousea3b67142006-04-25 14:54:40 +010021#include <linux/hrtimer.h>
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -070022#include <linux/kcov.h>
Andrew Morton7c3ab732006-12-10 02:19:19 -080023#include <linux/task_io_accounting.h>
Arjan van de Ven97455122008-01-25 21:08:34 +010024#include <linux/latencytop.h>
David Woodhousea3b67142006-04-25 14:54:40 +010025
Ingo Molnar70b81572017-02-03 12:11:00 +010026#include <asm/current.h>
27
Ingo Molnarc7af7872017-02-03 22:01:58 +010028/* task_struct member predeclarations: */
29struct audit_context;
30struct autogroup;
31struct backing_dev_info;
32struct bio_list;
33struct blk_plug;
34struct cfs_rq;
35struct filename;
36struct fs_struct;
37struct futex_pi_state;
38struct io_context;
39struct mempolicy;
40struct nameidata;
41struct nsproxy;
42struct perf_event_context;
43struct pid_namespace;
44struct pipe_inode_info;
45struct rcu_node;
46struct reclaim_state;
47struct robust_list_head;
Ingo Molnare2d1e2a2017-02-01 18:07:51 +010048struct sched_attr;
49struct sched_param;
Ingo Molnar43ae34c2007-07-09 18:52:00 +020050struct seq_file;
Ingo Molnarc7af7872017-02-03 22:01:58 +010051struct sighand_struct;
52struct signal_struct;
53struct task_delay_info;
Ingo Molnar4cf86d72007-10-15 17:00:14 +020054struct task_group;
Ingo Molnarc7af7872017-02-03 22:01:58 +010055struct task_struct;
56struct uts_namespace;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Linus Torvalds4a8342d2005-09-29 15:18:21 -070058/*
59 * Task state bitmask. NOTE! These bits are also
60 * encoded in fs/proc/array.c: get_task_state().
61 *
62 * We have two separate sets of flags: task->state
63 * is about runnability, while task->exit_state are
64 * about the task exiting. Confusing, but this way
65 * modifying one set can't modify the other one by
66 * mistake.
67 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#define TASK_RUNNING 0
69#define TASK_INTERRUPTIBLE 1
70#define TASK_UNINTERRUPTIBLE 2
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050071#define __TASK_STOPPED 4
72#define __TASK_TRACED 8
Linus Torvalds4a8342d2005-09-29 15:18:21 -070073/* in tsk->exit_state */
Oleg Nesterovad866222014-04-07 15:38:46 -070074#define EXIT_DEAD 16
75#define EXIT_ZOMBIE 32
Oleg Nesterovabd50b32014-04-07 15:38:42 -070076#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
Linus Torvalds4a8342d2005-09-29 15:18:21 -070077/* in tsk->state again */
Mike Galbraithaf927232007-10-15 17:00:13 +020078#define TASK_DEAD 64
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050079#define TASK_WAKEKILL 128
Peter Zijlstrae9c84312009-09-15 14:43:03 +020080#define TASK_WAKING 256
Thomas Gleixnerf2530dc2013-04-09 09:33:34 +020081#define TASK_PARKED 512
Peter Zijlstra80ed87c2015-05-08 14:23:45 +020082#define TASK_NOLOAD 1024
Peter Zijlstra7dc603c2016-06-16 13:29:28 +020083#define TASK_NEW 2048
84#define TASK_STATE_MAX 4096
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050085
Peter Zijlstra7dc603c2016-06-16 13:29:28 +020086#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
Peter Zijlstra73342152009-12-17 13:16:27 +010087
Davidlohr Bueso642fa442017-01-03 13:43:14 -080088/* Convenience macros for the sake of set_current_state */
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050089#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
90#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
91#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Peter Zijlstra80ed87c2015-05-08 14:23:45 +020093#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
94
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -050095/* Convenience macros for the sake of wake_up */
96#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050097#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -050098
99/* get_task_state() */
100#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500101 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
Oleg Nesterov74e37202014-01-23 15:55:35 -0800102 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -0500103
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500104#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
105#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -0500106#define task_is_stopped_or_traced(task) \
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500107 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -0500108#define task_contributes_to_load(task) \
Nathan Lynche3c8ca82009-04-08 19:45:12 -0500109 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
Peter Zijlstra80ed87c2015-05-08 14:23:45 +0200110 (task->flags & PF_FROZEN) == 0 && \
111 (task->state & TASK_NOLOAD) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200113#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
114
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200115#define __set_current_state(state_value) \
116 do { \
117 current->task_state_change = _THIS_IP_; \
118 current->state = (state_value); \
119 } while (0)
120#define set_current_state(state_value) \
121 do { \
122 current->task_state_change = _THIS_IP_; \
Peter Zijlstraa2250232016-10-19 15:45:27 +0200123 smp_store_mb(current->state, (state_value)); \
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200124 } while (0)
125
126#else
Andrew Morton498d0c52005-09-13 01:25:14 -0700127/*
128 * set_current_state() includes a barrier so that the write of current->state
129 * is correctly serialised wrt the caller's subsequent test of whether to
130 * actually sleep:
131 *
Peter Zijlstraa2250232016-10-19 15:45:27 +0200132 * for (;;) {
Andrew Morton498d0c52005-09-13 01:25:14 -0700133 * set_current_state(TASK_UNINTERRUPTIBLE);
Peter Zijlstraa2250232016-10-19 15:45:27 +0200134 * if (!need_sleep)
135 * break;
Andrew Morton498d0c52005-09-13 01:25:14 -0700136 *
Peter Zijlstraa2250232016-10-19 15:45:27 +0200137 * schedule();
138 * }
139 * __set_current_state(TASK_RUNNING);
140 *
141 * If the caller does not need such serialisation (because, for instance, the
142 * condition test and condition change and wakeup are under the same lock) then
143 * use __set_current_state().
144 *
145 * The above is typically ordered against the wakeup, which does:
146 *
147 * need_sleep = false;
148 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
149 *
150 * Where wake_up_state() (and all other wakeup primitives) imply enough
151 * barriers to order the store of the variable against wakeup.
152 *
153 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
154 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
155 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
156 *
157 * This is obviously fine, since they both store the exact same value.
158 *
159 * Also see the comments of try_to_wake_up().
Andrew Morton498d0c52005-09-13 01:25:14 -0700160 */
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200161#define __set_current_state(state_value) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 do { current->state = (state_value); } while (0)
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200163#define set_current_state(state_value) \
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200164 smp_store_mb(current->state, (state_value))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200166#endif
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168/* Task command name length */
169#define TASK_COMM_LEN 16
170
Rik van Riel3fa08182015-03-09 12:12:07 -0400171extern cpumask_var_t cpu_isolated_map;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173extern void scheduler_tick(void);
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175#define MAX_SCHEDULE_TIMEOUT LONG_MAX
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800176extern signed long schedule_timeout(signed long timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -0700177extern signed long schedule_timeout_interruptible(signed long timeout);
Matthew Wilcox294d5cc2007-12-06 11:59:46 -0500178extern signed long schedule_timeout_killable(signed long timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -0700179extern signed long schedule_timeout_uninterruptible(signed long timeout);
Andrew Morton69b27ba2016-03-25 14:20:21 -0700180extern signed long schedule_timeout_idle(signed long timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181asmlinkage void schedule(void);
Thomas Gleixnerc5491ea2011-03-21 12:09:35 +0100182extern void schedule_preempt_disabled(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Tejun Heo10ab5642016-10-28 12:58:10 -0400184extern int __must_check io_schedule_prepare(void);
185extern void io_schedule_finish(int token);
NeilBrown9cff8ad2015-02-13 15:49:17 +1100186extern long io_schedule_timeout(long timeout);
Tejun Heo10ab5642016-10-28 12:58:10 -0400187extern void io_schedule(void);
NeilBrown9cff8ad2015-02-13 15:49:17 +1100188
Frank Mayharf06febc2008-09-12 09:54:39 -0700189/**
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200190 * struct prev_cputime - snaphsot of system and user cputime
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100191 * @utime: time spent in user mode
192 * @stime: time spent in system mode
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200193 * @lock: protects the above two fields
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100194 *
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200195 * Stores previous user/system time values such that we can guarantee
196 * monotonicity.
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100197 */
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200198struct prev_cputime {
199#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100200 u64 utime;
201 u64 stime;
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200202 raw_spinlock_t lock;
203#endif
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100204};
205
206/**
Frank Mayharf06febc2008-09-12 09:54:39 -0700207 * struct task_cputime - collected CPU time counts
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100208 * @utime: time spent in user mode, in nanoseconds
209 * @stime: time spent in kernel mode, in nanoseconds
Frank Mayharf06febc2008-09-12 09:54:39 -0700210 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
Ingo Molnar5ce73a42008-09-14 17:11:46 +0200211 *
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200212 * This structure groups together three kinds of CPU time that are tracked for
213 * threads and thread groups. Most things considering CPU time want to group
214 * these counts together and treat all three of them in parallel.
Frank Mayharf06febc2008-09-12 09:54:39 -0700215 */
216struct task_cputime {
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100217 u64 utime;
218 u64 stime;
Frank Mayharf06febc2008-09-12 09:54:39 -0700219 unsigned long long sum_exec_runtime;
220};
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200221
Frank Mayharf06febc2008-09-12 09:54:39 -0700222/* Alternate field names when used to cache expirations. */
Frank Mayharf06febc2008-09-12 09:54:39 -0700223#define virt_exp utime
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200224#define prof_exp stime
Frank Mayharf06febc2008-09-12 09:54:39 -0700225#define sched_exp sum_exec_runtime
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227struct sched_info {
Ingo Molnar7f5f8e82017-02-06 11:44:12 +0100228#ifdef CONFIG_SCHED_INFO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 /* cumulative counters */
Ingo Molnar2d723762007-10-15 17:00:12 +0200230 unsigned long pcount; /* # of times run on this cpu */
Ken Chen9c2c4802008-12-16 23:41:22 -0800231 unsigned long long run_delay; /* time spent waiting on a runqueue */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
233 /* timestamps */
Balbir Singh172ba842007-07-09 18:52:00 +0200234 unsigned long long last_arrival,/* when we last ran on a cpu */
235 last_queued; /* when we were last queued to run */
Naveen N. Raof6db8342015-06-25 23:53:37 +0530236#endif /* CONFIG_SCHED_INFO */
Ingo Molnar7f5f8e82017-02-06 11:44:12 +0100237};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239/*
Yuyang Du6ecdd742016-04-05 12:12:26 +0800240 * Integer metrics need fixed point arithmetic, e.g., sched/fair
241 * has a few: load, load_avg, util_avg, freq, and capacity.
242 *
243 * We define a basic fixed point arithmetic range, and then formalize
244 * all these metrics based on that basic range.
245 */
246# define SCHED_FIXEDPOINT_SHIFT 10
247# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
248
Ingo Molnar20b8a592007-07-09 18:51:58 +0200249struct load_weight {
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100250 unsigned long weight;
251 u32 inv_weight;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200252};
253
Yuyang Du9d89c252015-07-15 08:04:37 +0800254/*
Yuyang Du7b595332016-04-05 12:12:28 +0800255 * The load_avg/util_avg accumulates an infinite geometric series
256 * (see __update_load_avg() in kernel/sched/fair.c).
257 *
258 * [load_avg definition]
259 *
260 * load_avg = runnable% * scale_load_down(load)
261 *
262 * where runnable% is the time ratio that a sched_entity is runnable.
263 * For cfs_rq, it is the aggregated load_avg of all runnable and
Yuyang Du9d89c252015-07-15 08:04:37 +0800264 * blocked sched_entities.
Yuyang Du7b595332016-04-05 12:12:28 +0800265 *
266 * load_avg may also take frequency scaling into account:
267 *
268 * load_avg = runnable% * scale_load_down(load) * freq%
269 *
270 * where freq% is the CPU frequency normalized to the highest frequency.
271 *
272 * [util_avg definition]
273 *
274 * util_avg = running% * SCHED_CAPACITY_SCALE
275 *
276 * where running% is the time ratio that a sched_entity is running on
277 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
278 * and blocked sched_entities.
279 *
280 * util_avg may also factor frequency scaling and CPU capacity scaling:
281 *
282 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
283 *
284 * where freq% is the same as above, and capacity% is the CPU capacity
285 * normalized to the greatest capacity (due to uarch differences, etc).
286 *
287 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
288 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
289 * we therefore scale them to as large a range as necessary. This is for
290 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
291 *
292 * [Overflow issue]
293 *
294 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
295 * with the highest load (=88761), always runnable on a single cfs_rq,
296 * and should not overflow as the number already hits PID_MAX_LIMIT.
297 *
298 * For all other cases (including 32-bit kernels), struct load_weight's
299 * weight will overflow first before we do, because:
300 *
301 * Max(load_avg) <= Max(load.weight)
302 *
303 * Then it is the load_weight's responsibility to consider overflow
304 * issues.
Yuyang Du9d89c252015-07-15 08:04:37 +0800305 */
Paul Turner9d85f212012-10-04 13:18:29 +0200306struct sched_avg {
Yuyang Du9d89c252015-07-15 08:04:37 +0800307 u64 last_update_time, load_sum;
308 u32 util_sum, period_contrib;
309 unsigned long load_avg, util_avg;
Paul Turner9d85f212012-10-04 13:18:29 +0200310};
311
Lucas De Marchi41acab82010-03-10 23:37:45 -0300312struct sched_statistics {
Ingo Molnar7f5f8e82017-02-06 11:44:12 +0100313#ifdef CONFIG_SCHEDSTATS
Ingo Molnar94c18222007-08-02 17:41:40 +0200314 u64 wait_start;
315 u64 wait_max;
Arjan van de Ven6d082592008-01-25 21:08:35 +0100316 u64 wait_count;
317 u64 wait_sum;
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -0700318 u64 iowait_count;
319 u64 iowait_sum;
Ingo Molnar94c18222007-08-02 17:41:40 +0200320
321 u64 sleep_start;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200322 u64 sleep_max;
Ingo Molnar94c18222007-08-02 17:41:40 +0200323 s64 sum_sleep_runtime;
324
325 u64 block_start;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200326 u64 block_max;
327 u64 exec_max;
Ingo Molnareba1ed42007-10-15 17:00:02 +0200328 u64 slice_max;
Ingo Molnarcc367732007-10-15 17:00:18 +0200329
Ingo Molnarcc367732007-10-15 17:00:18 +0200330 u64 nr_migrations_cold;
331 u64 nr_failed_migrations_affine;
332 u64 nr_failed_migrations_running;
333 u64 nr_failed_migrations_hot;
334 u64 nr_forced_migrations;
Ingo Molnarcc367732007-10-15 17:00:18 +0200335
336 u64 nr_wakeups;
337 u64 nr_wakeups_sync;
338 u64 nr_wakeups_migrate;
339 u64 nr_wakeups_local;
340 u64 nr_wakeups_remote;
341 u64 nr_wakeups_affine;
342 u64 nr_wakeups_affine_attempts;
343 u64 nr_wakeups_passive;
344 u64 nr_wakeups_idle;
Lucas De Marchi41acab82010-03-10 23:37:45 -0300345#endif
Ingo Molnar7f5f8e82017-02-06 11:44:12 +0100346};
Lucas De Marchi41acab82010-03-10 23:37:45 -0300347
348struct sched_entity {
349 struct load_weight load; /* for load-balancing */
350 struct rb_node run_node;
351 struct list_head group_node;
352 unsigned int on_rq;
353
354 u64 exec_start;
355 u64 sum_exec_runtime;
356 u64 vruntime;
357 u64 prev_sum_exec_runtime;
358
Lucas De Marchi41acab82010-03-10 23:37:45 -0300359 u64 nr_migrations;
360
Lucas De Marchi41acab82010-03-10 23:37:45 -0300361 struct sched_statistics statistics;
Ingo Molnar94c18222007-08-02 17:41:40 +0200362
Ingo Molnar20b8a592007-07-09 18:51:58 +0200363#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100364 int depth;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200365 struct sched_entity *parent;
366 /* rq on which this entity is (to be) queued: */
367 struct cfs_rq *cfs_rq;
368 /* rq "owned" by this entity/group: */
369 struct cfs_rq *my_q;
370#endif
Clark Williams8bd75c72013-02-07 09:47:07 -0600371
Alex Shi141965c2013-06-26 13:05:39 +0800372#ifdef CONFIG_SMP
Jiri Olsa5a107802015-12-08 21:23:59 +0100373 /*
374 * Per entity load average tracking.
375 *
376 * Put into separate cache line so it does not
377 * collide with read-mostly values above.
378 */
379 struct sched_avg avg ____cacheline_aligned_in_smp;
Paul Turner9d85f212012-10-04 13:18:29 +0200380#endif
Ingo Molnar20b8a592007-07-09 18:51:58 +0200381};
Ingo Molnar70b97a72006-07-03 00:25:42 -0700382
Peter Zijlstrafa717062008-01-25 21:08:27 +0100383struct sched_rt_entity {
384 struct list_head run_list;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100385 unsigned long timeout;
Ying Xue57d2aa02012-07-17 15:03:43 +0800386 unsigned long watchdog_stamp;
Richard Kennedybee367e2008-08-01 13:24:08 +0100387 unsigned int time_slice;
Peter Zijlstraff77e462016-01-18 15:27:07 +0100388 unsigned short on_rq;
389 unsigned short on_list;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100390
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +0200391 struct sched_rt_entity *back;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100392#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100393 struct sched_rt_entity *parent;
394 /* rq on which this entity is (to be) queued: */
395 struct rt_rq *rt_rq;
396 /* rq "owned" by this entity/group: */
397 struct rt_rq *my_q;
398#endif
Peter Zijlstrafa717062008-01-25 21:08:27 +0100399};
400
Dario Faggioliaab03e02013-11-28 11:14:43 +0100401struct sched_dl_entity {
402 struct rb_node rb_node;
403
404 /*
405 * Original scheduling parameters. Copied here from sched_attr
xiaofeng.yan4027d082014-05-09 03:21:27 +0000406 * during sched_setattr(), they will remain the same until
407 * the next sched_setattr().
Dario Faggioliaab03e02013-11-28 11:14:43 +0100408 */
409 u64 dl_runtime; /* maximum runtime for each instance */
410 u64 dl_deadline; /* relative deadline of each instance */
Harald Gustafsson755378a2013-11-07 14:43:40 +0100411 u64 dl_period; /* separation of two instances (period) */
Dario Faggioli332ac172013-11-07 14:43:45 +0100412 u64 dl_bw; /* dl_runtime / dl_deadline */
Dario Faggioliaab03e02013-11-28 11:14:43 +0100413
414 /*
415 * Actual scheduling parameters. Initialized with the values above,
416 * they are continously updated during task execution. Note that
417 * the remaining runtime could be < 0 in case we are in overrun.
418 */
419 s64 runtime; /* remaining runtime for this instance */
420 u64 deadline; /* absolute deadline for this instance */
421 unsigned int flags; /* specifying the scheduler behaviour */
422
423 /*
424 * Some bool flags:
425 *
426 * @dl_throttled tells if we exhausted the runtime. If so, the
427 * task has to wait for a replenishment to be performed at the
428 * next firing of dl_timer.
429 *
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100430 * @dl_boosted tells if we are boosted due to DI. If so we are
431 * outside bandwidth enforcement mechanism (but only until we
Juri Lelli5bfd1262014-04-15 13:49:04 +0200432 * exit the critical section);
433 *
434 * @dl_yielded tells if task gave up the cpu before consuming
435 * all its available runtime during the last job.
Dario Faggioliaab03e02013-11-28 11:14:43 +0100436 */
Luca Abeni72f9f3f2016-03-07 12:27:04 +0100437 int dl_throttled, dl_boosted, dl_yielded;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100438
439 /*
440 * Bandwidth enforcement timer. Each -deadline task has its
441 * own bandwidth to be enforced, thus we need one timer per task.
442 */
443 struct hrtimer dl_timer;
444};
Clark Williams8bd75c72013-02-07 09:47:07 -0600445
Paul E. McKenney1d082fd2014-08-14 16:01:53 -0700446union rcu_special {
447 struct {
Paul E. McKenney8203d6d2015-08-02 13:53:17 -0700448 u8 blocked;
449 u8 need_qs;
450 u8 exp_need_qs;
451 u8 pad; /* Otherwise the compiler can store garbage here. */
452 } b; /* Bits. */
453 u32 s; /* Set of bits. */
Paul E. McKenney1d082fd2014-08-14 16:01:53 -0700454};
Paul E. McKenney86848962009-08-27 15:00:12 -0700455
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200456enum perf_event_task_context {
457 perf_invalid_context = -1,
458 perf_hw_context = 0,
Peter Zijlstra89a1e182010-09-07 17:34:50 +0200459 perf_sw_context,
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200460 perf_nr_task_contexts,
461};
462
Ingo Molnareb61baf2017-02-01 17:09:06 +0100463struct wake_q_node {
464 struct wake_q_node *next;
465};
466
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467struct task_struct {
Andy Lutomirskic65eacb2016-09-13 14:29:24 -0700468#ifdef CONFIG_THREAD_INFO_IN_TASK
469 /*
470 * For reasons of header soup (see current_thread_info()), this
471 * must be the first element of task_struct.
472 */
473 struct thread_info thread_info;
474#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
Roman Zippelf7e42172007-05-09 02:35:17 -0700476 void *stack;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 atomic_t usage;
William Cohen97dc32c2007-05-08 00:23:41 -0700478 unsigned int flags; /* per process flags, defined below */
479 unsigned int ptrace;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Peter Williams2dd73a42006-06-27 02:54:34 -0700481#ifdef CONFIG_SMP
Peter Zijlstrafa14ff42011-09-12 13:06:17 +0200482 struct llist_node wake_entry;
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200483 int on_cpu;
Andy Lutomirskic65eacb2016-09-13 14:29:24 -0700484#ifdef CONFIG_THREAD_INFO_IN_TASK
485 unsigned int cpu; /* current CPU */
486#endif
Mike Galbraith63b0e9e2015-07-14 17:39:50 +0200487 unsigned int wakee_flips;
Michael Wang62470412013-07-04 12:55:51 +0800488 unsigned long wakee_flip_decay_ts;
Mike Galbraith63b0e9e2015-07-14 17:39:50 +0200489 struct task_struct *last_wakee;
Peter Zijlstraac66f542013-10-07 11:29:16 +0100490
491 int wake_cpu;
Nick Piggin4866cde2005-06-25 14:57:23 -0700492#endif
Peter Zijlstrafd2f4412011-04-05 17:23:44 +0200493 int on_rq;
Ingo Molnar50e645a2007-07-09 18:52:00 +0200494
Ingo Molnarb29739f2006-06-27 02:54:51 -0700495 int prio, static_prio, normal_prio;
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100496 unsigned int rt_priority;
Ingo Molnar5522d5d2007-10-15 17:00:12 +0200497 const struct sched_class *sched_class;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200498 struct sched_entity se;
Peter Zijlstrafa717062008-01-25 21:08:27 +0100499 struct sched_rt_entity rt;
Peter Zijlstra8323f262012-06-22 13:36:05 +0200500#ifdef CONFIG_CGROUP_SCHED
501 struct task_group *sched_task_group;
502#endif
Dario Faggioliaab03e02013-11-28 11:14:43 +0100503 struct sched_dl_entity dl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
Avi Kivitye107be32007-07-26 13:40:43 +0200505#ifdef CONFIG_PREEMPT_NOTIFIERS
506 /* list of struct preempt_notifier: */
507 struct hlist_head preempt_notifiers;
508#endif
509
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700510#ifdef CONFIG_BLK_DEV_IO_TRACE
Jens Axboe2056a782006-03-23 20:00:26 +0100511 unsigned int btrace_seq;
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700512#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
William Cohen97dc32c2007-05-08 00:23:41 -0700514 unsigned int policy;
Peter Zijlstra29baa742012-04-23 12:11:21 +0200515 int nr_cpus_allowed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 cpumask_t cpus_allowed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700518#ifdef CONFIG_PREEMPT_RCU
Paul E. McKenneye260be62008-01-25 21:08:24 +0100519 int rcu_read_lock_nesting;
Paul E. McKenney1d082fd2014-08-14 16:01:53 -0700520 union rcu_special rcu_read_unlock_special;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700521 struct list_head rcu_node_entry;
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700522 struct rcu_node *rcu_blocked_node;
Pranith Kumar28f65692014-09-22 14:00:48 -0400523#endif /* #ifdef CONFIG_PREEMPT_RCU */
Paul E. McKenney8315f422014-06-27 13:42:20 -0700524#ifdef CONFIG_TASKS_RCU
525 unsigned long rcu_tasks_nvcsw;
526 bool rcu_tasks_holdout;
527 struct list_head rcu_tasks_holdout_list;
Paul E. McKenney176f8f72014-08-04 17:43:50 -0700528 int rcu_tasks_idle_cpu;
Paul E. McKenney8315f422014-06-27 13:42:20 -0700529#endif /* #ifdef CONFIG_TASKS_RCU */
Paul E. McKenneye260be62008-01-25 21:08:24 +0100530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 struct sched_info sched_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
533 struct list_head tasks;
Dario Faggioli806c09a2010-11-30 19:51:33 +0100534#ifdef CONFIG_SMP
Gregory Haskins917b6272008-12-29 09:39:53 -0500535 struct plist_node pushable_tasks;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100536 struct rb_node pushable_dl_tasks;
Dario Faggioli806c09a2010-11-30 19:51:33 +0100537#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
539 struct mm_struct *mm, *active_mm;
Ingo Molnar314ff782017-02-03 11:03:31 +0100540
541 /* Per-thread vma caching: */
542 struct vmacache vmacache;
543
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800544#if defined(SPLIT_RSS_COUNTING)
545 struct task_rss_stat rss_stat;
546#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547/* task state */
William Cohen97dc32c2007-05-08 00:23:41 -0700548 int exit_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 int exit_code, exit_signal;
550 int pdeath_signal; /* The signal sent when the parent dies */
Palmer Dabbelte7cc4172015-04-30 21:19:55 -0700551 unsigned long jobctl; /* JOBCTL_*, siglock protected */
Andrei Epure9b89f6b2013-04-11 20:30:29 +0300552
553 /* Used for emulating ABI behavior of previous Linux versions */
William Cohen97dc32c2007-05-08 00:23:41 -0700554 unsigned int personality;
Andrei Epure9b89f6b2013-04-11 20:30:29 +0300555
Peter Zijlstrabe958bd2015-11-25 16:02:07 +0100556 /* scheduler bits, serialized by scheduler locks */
Lennart Poetteringca94c442009-06-15 17:17:47 +0200557 unsigned sched_reset_on_fork:1;
Peter Zijlstraa8e4f2e2011-04-05 17:23:49 +0200558 unsigned sched_contributes_to_load:1;
Peter Zijlstraff303e62015-04-17 20:05:30 +0200559 unsigned sched_migrated:1;
Peter Zijlstrab7e7ade2016-05-23 11:19:07 +0200560 unsigned sched_remote_wakeup:1;
Peter Zijlstrabe958bd2015-11-25 16:02:07 +0100561 unsigned :0; /* force alignment to the next boundary */
562
563 /* unserialized, strictly 'current' */
564 unsigned in_execve:1; /* bit to tell LSMs we're in execve */
565 unsigned in_iowait:1;
Andy Lutomirski7e781412016-08-02 14:05:36 -0700566#if !defined(TIF_RESTORE_SIGMASK)
567 unsigned restore_sigmask:1;
568#endif
Tejun Heo626ebc42015-11-05 18:46:09 -0800569#ifdef CONFIG_MEMCG
570 unsigned memcg_may_oom:1;
Johannes Weiner127424c2016-01-20 15:02:32 -0800571#ifndef CONFIG_SLOB
Vladimir Davydov6f185c22014-12-12 16:55:15 -0800572 unsigned memcg_kmem_skip_account:1;
573#endif
Johannes Weiner127424c2016-01-20 15:02:32 -0800574#endif
Peter Zijlstraff303e62015-04-17 20:05:30 +0200575#ifdef CONFIG_COMPAT_BRK
576 unsigned brk_randomized:1;
577#endif
Vladimir Davydov6f185c22014-12-12 16:55:15 -0800578
Kees Cook1d4457f2014-05-21 15:23:46 -0700579 unsigned long atomic_flags; /* Flags needing atomic access. */
580
Andy Lutomirskif56141e2015-02-12 15:01:14 -0800581 struct restart_block restart_block;
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 pid_t pid;
584 pid_t tgid;
Arjan van de Ven0a4254052006-09-26 10:52:38 +0200585
Hiroshi Shimamoto13145622009-08-18 15:06:02 +0900586#ifdef CONFIG_CC_STACKPROTECTOR
Arjan van de Ven0a4254052006-09-26 10:52:38 +0200587 /* Canary value for the -fstack-protector gcc feature */
588 unsigned long stack_canary;
Hiroshi Shimamoto13145622009-08-18 15:06:02 +0900589#endif
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000590 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 * pointers to (original) parent process, youngest child, younger sibling,
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000592 * older sibling, respectively. (p->father can be replaced with
Roland McGrathf4700212008-03-24 18:36:23 -0700593 * p->real_parent->pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 */
Kees Cookabd63bc2011-12-14 14:39:26 -0800595 struct task_struct __rcu *real_parent; /* real parent process */
596 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 /*
Roland McGrathf4700212008-03-24 18:36:23 -0700598 * children/sibling forms the list of my natural children
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 */
600 struct list_head children; /* list of my children */
601 struct list_head sibling; /* linkage in my parent's children list */
602 struct task_struct *group_leader; /* threadgroup leader */
603
Roland McGrathf4700212008-03-24 18:36:23 -0700604 /*
605 * ptraced is the list of tasks this task is using ptrace on.
606 * This includes both natural children and PTRACE_ATTACH targets.
607 * p->ptrace_entry is p's link on the p->parent->ptraced list.
608 */
609 struct list_head ptraced;
610 struct list_head ptrace_entry;
611
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 /* PID/PID hash table linkage. */
Eric W. Biederman92476d72006-03-31 02:31:42 -0800613 struct pid_link pids[PIDTYPE_MAX];
Oleg Nesterov47e65322006-03-28 16:11:25 -0800614 struct list_head thread_group;
Oleg Nesterov0c740d02014-01-21 15:49:56 -0800615 struct list_head thread_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617 struct completion *vfork_done; /* for vfork() */
618 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
619 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
620
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100621 u64 utime, stime;
Stanislaw Gruszka40565b52016-11-15 03:06:51 +0100622#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100623 u64 utimescaled, stimescaled;
Stanislaw Gruszka40565b52016-11-15 03:06:51 +0100624#endif
Frederic Weisbecker16a6d9b2017-01-31 04:09:21 +0100625 u64 gtime;
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200626 struct prev_cputime prev_cputime;
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100627#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
Frederic Weisbeckerb7ce2272015-11-19 16:47:34 +0100628 seqcount_t vtime_seqcount;
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100629 unsigned long long vtime_snap;
630 enum {
Frederic Weisbecker7098c1e2015-11-19 16:47:30 +0100631 /* Task is sleeping or running in a CPU with VTIME inactive */
632 VTIME_INACTIVE = 0,
633 /* Task runs in userspace in a CPU with VTIME active */
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100634 VTIME_USER,
Frederic Weisbecker7098c1e2015-11-19 16:47:30 +0100635 /* Task runs in kernelspace in a CPU with VTIME active */
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100636 VTIME_SYS,
637 } vtime_snap_whence;
638#endif
Frederic Weisbeckerd027d452015-06-07 15:54:30 +0200639
640#ifdef CONFIG_NO_HZ_FULL
Frederic Weisbeckerf009a7a2016-03-24 15:38:00 +0100641 atomic_t tick_dep_mask;
Frederic Weisbeckerd027d452015-06-07 15:54:30 +0200642#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 unsigned long nvcsw, nivcsw; /* context switch counts */
Thomas Gleixnerccbf62d2014-07-16 21:04:34 +0000644 u64 start_time; /* monotonic time in nsec */
Thomas Gleixner57e0be02014-07-16 21:04:32 +0000645 u64 real_start_time; /* boot based time in nsec */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
647 unsigned long min_flt, maj_flt;
648
Nicolas Pitreb18b6a92017-01-21 00:09:08 -0500649#ifdef CONFIG_POSIX_TIMERS
Frank Mayharf06febc2008-09-12 09:54:39 -0700650 struct task_cputime cputime_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 struct list_head cpu_timers[3];
Nicolas Pitreb18b6a92017-01-21 00:09:08 -0500652#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654/* process credentials */
Eric W. Biederman64b875f2016-11-14 18:48:07 -0600655 const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
Arnd Bergmann1b0ba1c2010-02-24 19:45:09 +0100656 const struct cred __rcu *real_cred; /* objective and real subjective task
David Howells3b11a1d2008-11-14 10:39:26 +1100657 * credentials (COW) */
Arnd Bergmann1b0ba1c2010-02-24 19:45:09 +0100658 const struct cred __rcu *cred; /* effective (overridable) subjective task
David Howells3b11a1d2008-11-14 10:39:26 +1100659 * credentials (COW) */
Paolo 'Blaisorblade' Giarrusso36772092005-05-05 16:16:12 -0700660 char comm[TASK_COMM_LEN]; /* executable name excluding path
661 - access with [gs]et_task_comm (which lock
662 it with task_lock())
Linus Torvalds221af7f2010-01-28 22:14:42 -0800663 - initialized normally by setup_new_exec */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664/* file system info */
NeilBrown756daf22015-03-23 13:37:38 +1100665 struct nameidata *nameidata;
Alexey Dobriyan3d5b6fc2006-09-29 01:59:40 -0700666#ifdef CONFIG_SYSVIPC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667/* ipc stuff */
668 struct sysv_sem sysvsem;
Jack Millerab602f72014-08-08 14:23:19 -0700669 struct sysv_shm sysvshm;
Alexey Dobriyan3d5b6fc2006-09-29 01:59:40 -0700670#endif
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800671#ifdef CONFIG_DETECT_HUNG_TASK
Ingo Molnar82a1fcb2008-01-25 21:08:02 +0100672/* hung task detection */
Ingo Molnar82a1fcb2008-01-25 21:08:02 +0100673 unsigned long last_switch_count;
674#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675/* filesystem information */
676 struct fs_struct *fs;
677/* open file information */
678 struct files_struct *files;
Serge E. Hallyn1651e142006-10-02 02:18:08 -0700679/* namespaces */
Serge E. Hallynab516012006-10-02 02:18:06 -0700680 struct nsproxy *nsproxy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681/* signal handlers */
682 struct signal_struct *signal;
683 struct sighand_struct *sighand;
684
685 sigset_t blocked, real_blocked;
Roland McGrathf3de2722008-04-30 00:53:09 -0700686 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 struct sigpending pending;
688
689 unsigned long sas_ss_sp;
690 size_t sas_ss_size;
Stas Sergeev2a742132016-04-14 23:20:04 +0300691 unsigned sas_ss_flags;
Oleg Nesterov2e01fab2015-11-06 16:32:19 -0800692
Al Viro67d12142012-06-27 11:07:19 +0400693 struct callback_head *task_works;
Oleg Nesterove73f8952012-05-11 10:59:07 +1000694
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 struct audit_context *audit_context;
Al Virobfef93a2008-01-10 04:53:18 -0500696#ifdef CONFIG_AUDITSYSCALL
Eric W. Biedermane1760bd2012-09-10 22:39:43 -0700697 kuid_t loginuid;
Eric Paris4746ec52008-01-08 10:06:53 -0500698 unsigned int sessionid;
Al Virobfef93a2008-01-10 04:53:18 -0500699#endif
Will Drewry932eceb2012-04-12 16:47:54 -0500700 struct seccomp seccomp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702/* Thread group tracking */
703 u32 parent_exec_id;
704 u32 self_exec_id;
Miao Xie58568d22009-06-16 15:31:49 -0700705/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
706 * mempolicy */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 spinlock_t alloc_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
Ingo Molnarb29739f2006-06-27 02:54:51 -0700709 /* Protection of the PI data structures: */
Thomas Gleixner1d615482009-11-17 14:54:03 +0100710 raw_spinlock_t pi_lock;
Ingo Molnarb29739f2006-06-27 02:54:51 -0700711
Peter Zijlstra76751042015-05-01 08:27:50 -0700712 struct wake_q_node wake_q;
713
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700714#ifdef CONFIG_RT_MUTEXES
715 /* PI waiters blocked on a rt_mutex held by this task */
Peter Zijlstrafb00aca2013-11-07 14:43:43 +0100716 struct rb_root pi_waiters;
717 struct rb_node *pi_waiters_leftmost;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700718 /* Deadlock detection and priority inheritance handling */
719 struct rt_mutex_waiter *pi_blocked_on;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700720#endif
721
Ingo Molnar408894e2006-01-09 15:59:20 -0800722#ifdef CONFIG_DEBUG_MUTEXES
723 /* mutex deadlock detection */
724 struct mutex_waiter *blocked_on;
725#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700726#ifdef CONFIG_TRACE_IRQFLAGS
727 unsigned int irq_events;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700728 unsigned long hardirq_enable_ip;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700729 unsigned long hardirq_disable_ip;
Hiroshi Shimamotofa1452e2009-11-30 14:59:44 +0900730 unsigned int hardirq_enable_event;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700731 unsigned int hardirq_disable_event;
Hiroshi Shimamotofa1452e2009-11-30 14:59:44 +0900732 int hardirqs_enabled;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700733 int hardirq_context;
Hiroshi Shimamotofa1452e2009-11-30 14:59:44 +0900734 unsigned long softirq_disable_ip;
735 unsigned long softirq_enable_ip;
736 unsigned int softirq_disable_event;
737 unsigned int softirq_enable_event;
738 int softirqs_enabled;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700739 int softirq_context;
740#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700741#ifdef CONFIG_LOCKDEP
Peter Zijlstrabdb94412008-02-25 23:02:48 +0100742# define MAX_LOCK_DEPTH 48UL
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700743 u64 curr_chain_key;
744 int lockdep_depth;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700745 unsigned int lockdep_recursion;
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100746 struct held_lock held_locks[MAX_LOCK_DEPTH];
Nick Piggincf40bd12009-01-21 08:12:39 +0100747 gfp_t lockdep_reclaim_gfp;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700748#endif
Andrey Ryabininc6d30852016-01-20 15:00:55 -0800749#ifdef CONFIG_UBSAN
750 unsigned int in_ubsan;
751#endif
Ingo Molnar408894e2006-01-09 15:59:20 -0800752
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753/* journalling filesystem info */
754 void *journal_info;
755
Neil Brownd89d8792007-05-01 09:53:42 +0200756/* stacked block device info */
Akinobu Mitabddd87c2010-02-23 08:55:42 +0100757 struct bio_list *bio_list;
Neil Brownd89d8792007-05-01 09:53:42 +0200758
Jens Axboe73c10102011-03-08 13:19:51 +0100759#ifdef CONFIG_BLOCK
760/* stack plugging */
761 struct blk_plug *plug;
762#endif
763
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764/* VM state */
765 struct reclaim_state *reclaim_state;
766
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 struct backing_dev_info *backing_dev_info;
768
769 struct io_context *io_context;
770
771 unsigned long ptrace_message;
772 siginfo_t *last_siginfo; /* For ptrace use. */
Andrew Morton7c3ab732006-12-10 02:19:19 -0800773 struct task_io_accounting ioac;
Jay Lan8f0ab512006-09-30 23:28:59 -0700774#if defined(CONFIG_TASK_XACCT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 u64 acct_rss_mem1; /* accumulated rss usage */
776 u64 acct_vm_mem1; /* accumulated virtual memory usage */
Frederic Weisbecker605dc2b2017-01-31 04:09:30 +0100777 u64 acct_timexpd; /* stime + utime since last update */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778#endif
779#ifdef CONFIG_CPUSETS
Miao Xie58568d22009-06-16 15:31:49 -0700780 nodemask_t mems_allowed; /* Protected by alloc_lock */
Mel Gormancc9a6c82012-03-21 16:34:11 -0700781 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
Paul Jackson825a46a2006-03-24 03:16:03 -0800782 int cpuset_mem_spread_rotor;
Jack Steiner6adef3e2010-05-26 14:42:49 -0700783 int cpuset_slab_spread_rotor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784#endif
Paul Menageddbcc7e2007-10-18 23:39:30 -0700785#ifdef CONFIG_CGROUPS
Paul Menage817929e2007-10-18 23:39:36 -0700786 /* Control Group info protected by css_set_lock */
Arnd Bergmann2c392b82010-02-24 19:41:39 +0100787 struct css_set __rcu *cgroups;
Paul Menage817929e2007-10-18 23:39:36 -0700788 /* cg_list protected by css_set_lock and tsk->alloc_lock */
789 struct list_head cg_list;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700790#endif
Fenghua Yue02737d2016-10-28 15:04:46 -0700791#ifdef CONFIG_INTEL_RDT_A
792 int closid;
793#endif
Alexey Dobriyan42b2dd02007-10-16 23:27:30 -0700794#ifdef CONFIG_FUTEX
Ingo Molnar0771dfe2006-03-27 01:16:22 -0800795 struct robust_list_head __user *robust_list;
Ingo Molnar34f192c2006-03-27 01:16:24 -0800796#ifdef CONFIG_COMPAT
797 struct compat_robust_list_head __user *compat_robust_list;
798#endif
Ingo Molnarc87e2832006-06-27 02:54:58 -0700799 struct list_head pi_state_list;
800 struct futex_pi_state *pi_state_cache;
Alexey Dobriyan42b2dd02007-10-16 23:27:30 -0700801#endif
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200802#ifdef CONFIG_PERF_EVENTS
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200803 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200804 struct mutex perf_event_mutex;
805 struct list_head perf_event_list;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000806#endif
Thomas Gleixner8f47b182014-02-07 20:58:39 +0100807#ifdef CONFIG_DEBUG_PREEMPT
808 unsigned long preempt_disable_ip;
809#endif
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100810#ifdef CONFIG_NUMA
Miao Xie58568d22009-06-16 15:31:49 -0700811 struct mempolicy *mempolicy; /* Protected by alloc_lock */
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100812 short il_next;
Eric Dumazet207205a2011-03-22 16:30:44 -0700813 short pref_node_fork;
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100814#endif
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200815#ifdef CONFIG_NUMA_BALANCING
816 int numa_scan_seq;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200817 unsigned int numa_scan_period;
Mel Gorman598f0ec2013-10-07 11:28:55 +0100818 unsigned int numa_scan_period_max;
Rik van Rielde1c9ce62013-10-07 11:29:39 +0100819 int numa_preferred_nid;
Mel Gorman6b9a7462013-10-07 11:29:11 +0100820 unsigned long numa_migrate_retry;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200821 u64 node_stamp; /* migration stamp */
Rik van Riel7e2703e2014-01-27 17:03:45 -0500822 u64 last_task_numa_placement;
823 u64 last_sum_exec_runtime;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200824 struct callback_head numa_work;
Mel Gormanf809ca92013-10-07 11:28:57 +0100825
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100826 struct list_head numa_entry;
827 struct numa_group *numa_group;
828
Mel Gorman745d6142013-10-07 11:28:59 +0100829 /*
Iulia Manda44dba3d2014-10-31 02:13:31 +0200830 * numa_faults is an array split into four regions:
831 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
832 * in this precise order.
833 *
834 * faults_memory: Exponential decaying average of faults on a per-node
835 * basis. Scheduling placement decisions are made based on these
836 * counts. The values remain static for the duration of a PTE scan.
837 * faults_cpu: Track the nodes the process was running on when a NUMA
838 * hinting fault was incurred.
839 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
840 * during the current scan window. When the scan completes, the counts
841 * in faults_memory and faults_cpu decay and these values are copied.
Mel Gorman745d6142013-10-07 11:28:59 +0100842 */
Iulia Manda44dba3d2014-10-31 02:13:31 +0200843 unsigned long *numa_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100844 unsigned long total_numa_faults;
Mel Gorman745d6142013-10-07 11:28:59 +0100845
846 /*
Rik van Riel04bb2f92013-10-07 11:29:36 +0100847 * numa_faults_locality tracks if faults recorded during the last
Mel Gorman074c2382015-03-25 15:55:42 -0700848 * scan window were remote/local or failed to migrate. The task scan
849 * period is adapted based on the locality of the faults with different
850 * weights depending on whether they were shared or private faults
Rik van Riel04bb2f92013-10-07 11:29:36 +0100851 */
Mel Gorman074c2382015-03-25 15:55:42 -0700852 unsigned long numa_faults_locality[3];
Rik van Riel04bb2f92013-10-07 11:29:36 +0100853
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100854 unsigned long numa_pages_migrated;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200855#endif /* CONFIG_NUMA_BALANCING */
856
Mel Gorman72b252a2015-09-04 15:47:32 -0700857 struct tlbflush_unmap_batch tlb_ubc;
Mel Gorman72b252a2015-09-04 15:47:32 -0700858
Ingo Molnare56d0902006-01-08 01:01:37 -0800859 struct rcu_head rcu;
Jens Axboeb92ce552006-04-11 13:52:07 +0200860
861 /*
862 * cache last used pipe for splice
863 */
864 struct pipe_inode_info *splice_pipe;
Eric Dumazet5640f762012-09-23 23:04:42 +0000865
866 struct page_frag task_frag;
867
Ingo Molnar47913d42017-02-01 18:00:26 +0100868#ifdef CONFIG_TASK_DELAY_ACCT
869 struct task_delay_info *delays;
Shailabh Nagarca74e922006-07-14 00:24:36 -0700870#endif
Ingo Molnar47913d42017-02-01 18:00:26 +0100871
Akinobu Mitaf4f154f2006-12-08 02:39:47 -0800872#ifdef CONFIG_FAULT_INJECTION
873 int make_it_fail;
874#endif
Wu Fengguang9d823e82011-06-11 18:10:12 -0600875 /*
876 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
877 * balance_dirty_pages() for some dirty throttling pause
878 */
879 int nr_dirtied;
880 int nr_dirtied_pause;
Wu Fengguang83712352011-06-11 19:25:42 -0600881 unsigned long dirty_paused_when; /* start of a write-and-pause period */
Wu Fengguang9d823e82011-06-11 18:10:12 -0600882
Arjan van de Ven97455122008-01-25 21:08:34 +0100883#ifdef CONFIG_LATENCYTOP
884 int latency_record_count;
885 struct latency_record latency_record[LT_SAVECOUNT];
886#endif
Arjan van de Ven69766752008-09-01 15:52:40 -0700887 /*
888 * time slack values; these are used to round up poll() and
889 * select() etc timeout values. These are in nanoseconds.
890 */
John Stultzda8b44d2016-03-17 14:20:51 -0700891 u64 timer_slack_ns;
892 u64 default_timer_slack_ns;
David Millerf8d570a2008-11-06 00:37:40 -0800893
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800894#ifdef CONFIG_KASAN
895 unsigned int kasan_depth;
896#endif
Frederic Weisbeckerfb526072008-11-25 21:07:04 +0100897#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800898 /* Index of current stored address in ret_stack */
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100899 int curr_ret_stack;
900 /* Stack of return addresses for return function tracing */
901 struct ftrace_ret_stack *ret_stack;
Steven Rostedt8aef2d22009-03-24 01:10:15 -0400902 /* time stamp for last schedule */
903 unsigned long long ftrace_timestamp;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100904 /*
905 * Number of functions that haven't been traced
906 * because of depth overrun.
907 */
908 atomic_t trace_overrun;
Frederic Weisbecker380c4b12008-12-06 03:43:41 +0100909 /* Pause for the tracing */
910 atomic_t tracing_graph_pause;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +0100911#endif
Steven Rostedtea4e2bc2008-12-03 15:36:57 -0500912#ifdef CONFIG_TRACING
913 /* state flags for use by tracers */
914 unsigned long trace;
Steven Rostedtb1cff0a2011-05-25 14:27:43 -0400915 /* bitmask and counter of trace recursion */
Steven Rostedt261842b2009-04-16 21:41:52 -0400916 unsigned long trace_recursion;
917#endif /* CONFIG_TRACING */
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -0700918#ifdef CONFIG_KCOV
919 /* Coverage collection mode enabled for this task (0 if disabled). */
920 enum kcov_mode kcov_mode;
921 /* Size of the kcov_area. */
922 unsigned kcov_size;
923 /* Buffer for coverage collection. */
924 void *kcov_area;
925 /* kcov desciptor wired with this task or NULL. */
926 struct kcov *kcov;
927#endif
Vladimir Davydov6f185c22014-12-12 16:55:15 -0800928#ifdef CONFIG_MEMCG
Tejun Heo626ebc42015-11-05 18:46:09 -0800929 struct mem_cgroup *memcg_in_oom;
930 gfp_t memcg_oom_gfp_mask;
931 int memcg_oom_order;
Tejun Heob23afb92015-11-05 18:46:11 -0800932
933 /* number of pages to reclaim on returning to userland */
934 unsigned int memcg_nr_pages_over_high;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800935#endif
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530936#ifdef CONFIG_UPROBES
937 struct uprobe_task *utask;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +0530938#endif
Kent Overstreetcafe5632013-03-23 16:11:31 -0700939#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
940 unsigned int sequential_io;
941 unsigned int sequential_io_avg;
942#endif
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200943#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
944 unsigned long task_state_change;
945#endif
David Hildenbrand8bcbde52015-05-11 17:52:06 +0200946 int pagefault_disabled;
Michal Hocko03049262016-03-25 14:20:33 -0700947#ifdef CONFIG_MMU
Vladimir Davydov29c696e2016-03-25 14:20:39 -0700948 struct task_struct *oom_reaper_list;
Michal Hocko03049262016-03-25 14:20:33 -0700949#endif
Andy Lutomirskiba14a192016-08-11 02:35:21 -0700950#ifdef CONFIG_VMAP_STACK
951 struct vm_struct *stack_vm_area;
952#endif
Andy Lutomirski68f24b082016-09-15 22:45:48 -0700953#ifdef CONFIG_THREAD_INFO_IN_TASK
954 /* A live task holds one reference. */
955 atomic_t stack_refcount;
956#endif
Dave Hansen0c8c0f02015-07-17 12:28:11 +0200957/* CPU-specific state of this task */
958 struct thread_struct thread;
959/*
960 * WARNING: on x86, 'thread_struct' contains a variable-sized
961 * structure. It *MUST* be at the end of 'task_struct'.
962 *
963 * Do not put anything below here!
964 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965};
966
Alexey Dobriyane8681712007-10-26 12:17:22 +0400967static inline struct pid *task_pid(struct task_struct *task)
Eric W. Biederman22c935f2006-10-02 02:17:09 -0700968{
969 return task->pids[PIDTYPE_PID].pid;
970}
971
Alexey Dobriyane8681712007-10-26 12:17:22 +0400972static inline struct pid *task_tgid(struct task_struct *task)
Eric W. Biederman22c935f2006-10-02 02:17:09 -0700973{
974 return task->group_leader->pids[PIDTYPE_PID].pid;
975}
976
Oleg Nesterov6dda81f2009-04-02 16:58:35 -0700977/*
978 * Without tasklist or rcu lock it is not safe to dereference
979 * the result of task_pgrp/task_session even if task == current,
980 * we can race with another thread doing sys_setsid/sys_setpgid.
981 */
Alexey Dobriyane8681712007-10-26 12:17:22 +0400982static inline struct pid *task_pgrp(struct task_struct *task)
Eric W. Biederman22c935f2006-10-02 02:17:09 -0700983{
984 return task->group_leader->pids[PIDTYPE_PGID].pid;
985}
986
Alexey Dobriyane8681712007-10-26 12:17:22 +0400987static inline struct pid *task_session(struct task_struct *task)
Eric W. Biederman22c935f2006-10-02 02:17:09 -0700988{
989 return task->group_leader->pids[PIDTYPE_SID].pid;
990}
991
Pavel Emelyanov7af57292007-10-18 23:40:06 -0700992/*
993 * the helpers to get the task's different pids as they are seen
994 * from various namespaces
995 *
996 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
Eric W. Biederman44c4e1b2008-02-08 04:19:15 -0800997 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
998 * current.
Pavel Emelyanov7af57292007-10-18 23:40:06 -0700999 * task_xid_nr_ns() : id seen from the ns specified;
1000 *
1001 * set_task_vxid() : assigns a virtual id to a task;
1002 *
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001003 * see also pid_nr() etc in include/linux/pid.h
1004 */
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001005pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1006 struct pid_namespace *ns);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001007
Alexey Dobriyane8681712007-10-26 12:17:22 +04001008static inline pid_t task_pid_nr(struct task_struct *tsk)
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001009{
1010 return tsk->pid;
1011}
1012
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001013static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1014 struct pid_namespace *ns)
1015{
1016 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1017}
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001018
1019static inline pid_t task_pid_vnr(struct task_struct *tsk)
1020{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001021 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001022}
1023
1024
Alexey Dobriyane8681712007-10-26 12:17:22 +04001025static inline pid_t task_tgid_nr(struct task_struct *tsk)
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001026{
1027 return tsk->tgid;
1028}
1029
Pavel Emelyanov2f2a3a42007-10-18 23:40:19 -07001030pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001031
1032static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1033{
1034 return pid_vnr(task_tgid(tsk));
1035}
1036
1037
Richard Guy Briggs80e0b6e2014-03-16 14:00:19 -04001038static inline int pid_alive(const struct task_struct *p);
Richard Guy Briggsad36d282013-08-15 18:05:12 -04001039static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1040{
1041 pid_t pid = 0;
1042
1043 rcu_read_lock();
1044 if (pid_alive(tsk))
1045 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1046 rcu_read_unlock();
1047
1048 return pid;
1049}
1050
1051static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1052{
1053 return task_ppid_nr_ns(tsk, &init_pid_ns);
1054}
1055
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001056static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1057 struct pid_namespace *ns)
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001058{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001059 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001060}
1061
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001062static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1063{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001064 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001065}
1066
1067
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001068static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1069 struct pid_namespace *ns)
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001070{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001071 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001072}
1073
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001074static inline pid_t task_session_vnr(struct task_struct *tsk)
1075{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001076 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001077}
1078
Oleg Nesterov1b0f7ff2009-04-02 16:58:39 -07001079/* obsolete, do not use */
1080static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1081{
1082 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1083}
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001084
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085/**
1086 * pid_alive - check that a task structure is not stale
1087 * @p: Task structure to be checked.
1088 *
1089 * Test if a process is not yet dead (at most zombie state)
1090 * If pid_alive fails, then pointers within the task structure
1091 * can be stale and must not be dereferenced.
Yacine Belkadie69f6182013-07-12 20:45:47 +02001092 *
1093 * Return: 1 if the process is alive. 0 otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 */
Richard Guy Briggsad36d282013-08-15 18:05:12 -04001095static inline int pid_alive(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096{
Eric W. Biederman92476d72006-03-31 02:31:42 -08001097 return p->pids[PIDTYPE_PID].pid != NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098}
1099
Sukadev Bhattiproluf400e192006-09-29 02:00:07 -07001100/**
Sergey Senozhatsky570f5242016-01-01 23:03:01 +09001101 * is_global_init - check if a task structure is init. Since init
1102 * is free to have sub-threads we need to check tgid.
Henne32602592006-10-06 00:44:01 -07001103 * @tsk: Task structure to be checked.
1104 *
1105 * Check if a task structure is the first user space task the kernel created.
Yacine Belkadie69f6182013-07-12 20:45:47 +02001106 *
1107 * Return: 1 if the task structure is init. 0 otherwise.
Sukadev Bhattiproluf400e192006-09-29 02:00:07 -07001108 */
Alexey Dobriyane8681712007-10-26 12:17:22 +04001109static inline int is_global_init(struct task_struct *tsk)
Pavel Emelyanovb461cc02007-10-18 23:40:09 -07001110{
Sergey Senozhatsky570f5242016-01-01 23:03:01 +09001111 return task_tgid_nr(tsk) == 1;
Pavel Emelyanovb461cc02007-10-18 23:40:09 -07001112}
Serge E. Hallynb460cbc2007-10-18 23:39:52 -07001113
Cedric Le Goater9ec52092006-10-02 02:19:00 -07001114extern struct pid *cad_pid;
1115
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116/*
1117 * Per process flags
1118 */
Peter Zijlstrac1de45c2016-11-28 23:03:05 -08001119#define PF_IDLE 0x00000002 /* I am an IDLE thread */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120#define PF_EXITING 0x00000004 /* getting shut down */
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001121#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
Laurent Vivier94886b82007-10-15 17:00:19 +02001122#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
Tejun Heo21aa9af2010-06-08 21:40:37 +02001123#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
Andi Kleen4db96cf2009-09-16 11:50:14 +02001125#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1127#define PF_DUMPCORE 0x00000200 /* dumped core */
1128#define PF_SIGNALED 0x00000400 /* killed by a signal */
1129#define PF_MEMALLOC 0x00000800 /* Allocating memory */
Vasiliy Kulikov72fa5992011-08-08 19:02:04 +04001130#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
Tejun Heo774a1222013-01-15 18:52:51 -08001132#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1134#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1135#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1136#define PF_KSWAPD 0x00040000 /* I am kswapd */
Ming Lei21caf2f2013-02-22 16:34:08 -08001137#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
Oleg Nesterov246bb0b2008-07-25 01:47:38 -07001139#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
Jens Axboeb31dc662006-06-13 08:26:10 +02001140#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1141#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
Tejun Heo14a40ff2013-03-19 13:45:20 -07001142#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
Andi Kleen4db96cf2009-09-16 11:50:14 +02001143#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
Thomas Gleixner61a87122006-06-27 02:54:56 -07001144#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
Tejun Heo58a69cb2011-02-16 09:25:31 +01001145#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
Colin Cross2b44c4d2013-07-24 17:41:33 -07001146#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147
1148/*
1149 * Only the _current_ task can read/write to tsk->flags, but other
1150 * tasks can access tsk->flags in readonly mode for example
1151 * with tsk_used_math (like during threaded core dumping).
1152 * There is however an exception to this rule during ptrace
1153 * or during fork: the ptracer task is allowed to write to the
1154 * child->flags of its traced child (same goes for fork, the parent
1155 * can write to the child->flags), because we're guaranteed the
1156 * child is not running and in turn not changing child->flags
1157 * at the same time the parent does it.
1158 */
1159#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1160#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1161#define clear_used_math() clear_stopped_child_used_math(current)
1162#define set_used_math() set_stopped_child_used_math(current)
1163#define conditional_stopped_child_used_math(condition, child) \
1164 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1165#define conditional_used_math(condition) \
1166 conditional_stopped_child_used_math(condition, current)
1167#define copy_to_stopped_child_used_math(child) \
1168 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1169/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1170#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1171#define used_math() tsk_used_math(current)
1172
Kees Cook1d4457f2014-05-21 15:23:46 -07001173/* Per-process atomic flags. */
Zefan Lia2b86f72014-09-25 09:40:17 +08001174#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
Zefan Li2ad654b2014-09-25 09:41:02 +08001175#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1176#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
Tetsuo Handa77ed2c52016-03-08 20:01:32 +09001177#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
Kees Cook1d4457f2014-05-21 15:23:46 -07001178
Kees Cook1d4457f2014-05-21 15:23:46 -07001179
Zefan Lie0e50702014-09-25 09:40:40 +08001180#define TASK_PFA_TEST(name, func) \
1181 static inline bool task_##func(struct task_struct *p) \
1182 { return test_bit(PFA_##name, &p->atomic_flags); }
1183#define TASK_PFA_SET(name, func) \
1184 static inline void task_set_##func(struct task_struct *p) \
1185 { set_bit(PFA_##name, &p->atomic_flags); }
1186#define TASK_PFA_CLEAR(name, func) \
1187 static inline void task_clear_##func(struct task_struct *p) \
1188 { clear_bit(PFA_##name, &p->atomic_flags); }
Kees Cook1d4457f2014-05-21 15:23:46 -07001189
Zefan Lie0e50702014-09-25 09:40:40 +08001190TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1191TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
Kees Cook1d4457f2014-05-21 15:23:46 -07001192
Zefan Li2ad654b2014-09-25 09:41:02 +08001193TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1194TASK_PFA_SET(SPREAD_PAGE, spread_page)
1195TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1196
1197TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1198TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1199TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
Tejun Heo544b2c92011-06-14 11:20:18 +02001200
Tetsuo Handa77ed2c52016-03-08 20:01:32 +09001201TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
1202TASK_PFA_SET(LMK_WAITING, lmk_waiting)
1203
Mel Gorman907aed42012-07-31 16:44:07 -07001204static inline void tsk_restore_flags(struct task_struct *task,
1205 unsigned long orig_flags, unsigned long flags)
1206{
1207 task->flags &= ~flags;
1208 task->flags |= orig_flags & flags;
1209}
1210
Juri Lellif82f8042014-10-07 09:52:11 +01001211extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
1212 const struct cpumask *trial);
Juri Lelli7f514122014-09-19 10:22:40 +01001213extern int task_can_attach(struct task_struct *p,
1214 const struct cpumask *cs_cpus_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215#ifdef CONFIG_SMP
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09001216extern void do_set_cpus_allowed(struct task_struct *p,
1217 const struct cpumask *new_mask);
1218
Mike Traviscd8ba7c2008-03-26 14:23:49 -07001219extern int set_cpus_allowed_ptr(struct task_struct *p,
Rusty Russell96f874e2008-11-25 02:35:14 +10301220 const struct cpumask *new_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221#else
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09001222static inline void do_set_cpus_allowed(struct task_struct *p,
1223 const struct cpumask *new_mask)
1224{
1225}
Mike Traviscd8ba7c2008-03-26 14:23:49 -07001226static inline int set_cpus_allowed_ptr(struct task_struct *p,
Rusty Russell96f874e2008-11-25 02:35:14 +10301227 const struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228{
Rusty Russell96f874e2008-11-25 02:35:14 +10301229 if (!cpumask_test_cpu(0, new_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 return -EINVAL;
1231 return 0;
1232}
1233#endif
Rusty Russelle0ad9552009-09-24 09:34:38 -06001234
Christian Borntraeger6d0d2872016-11-16 13:23:05 +01001235#ifndef cpu_relax_yield
1236#define cpu_relax_yield() cpu_relax()
1237#endif
1238
Dan Carpenterfa933842014-05-23 13:20:42 +03001239extern int yield_to(struct task_struct *p, bool preempt);
Ingo Molnar36c8b582006-07-03 00:25:41 -07001240extern void set_user_nice(struct task_struct *p, long nice);
1241extern int task_prio(const struct task_struct *p);
Dongsheng Yangd0ea0262014-01-27 22:00:45 -05001242/**
1243 * task_nice - return the nice value of a given task.
1244 * @p: the task in question.
1245 *
1246 * Return: The nice value [ -20 ... 0 ... 19 ].
1247 */
1248static inline int task_nice(const struct task_struct *p)
1249{
1250 return PRIO_TO_NICE((p)->static_prio);
1251}
Ingo Molnar36c8b582006-07-03 00:25:41 -07001252extern int can_nice(const struct task_struct *p, const int nice);
1253extern int task_curr(const struct task_struct *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254extern int idle_cpu(int cpu);
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07001255extern int sched_setscheduler(struct task_struct *, int,
1256 const struct sched_param *);
Rusty Russell961ccdd2008-06-23 13:55:38 +10001257extern int sched_setscheduler_nocheck(struct task_struct *, int,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07001258 const struct sched_param *);
Dario Faggiolid50dde52013-11-07 14:43:36 +01001259extern int sched_setattr(struct task_struct *,
1260 const struct sched_attr *);
Ingo Molnar36c8b582006-07-03 00:25:41 -07001261extern struct task_struct *idle_task(int cpu);
Paul E. McKenneyc4f30602011-11-10 12:41:56 -08001262/**
1263 * is_idle_task - is the specified task an idle task?
Randy Dunlapfa757282012-01-21 11:03:13 -08001264 * @p: the task in question.
Yacine Belkadie69f6182013-07-12 20:45:47 +02001265 *
1266 * Return: 1 if @p is an idle task. 0 otherwise.
Paul E. McKenneyc4f30602011-11-10 12:41:56 -08001267 */
Paul E. McKenney7061ca32011-12-20 08:20:46 -08001268static inline bool is_idle_task(const struct task_struct *p)
Paul E. McKenneyc4f30602011-11-10 12:41:56 -08001269{
Peter Zijlstrac1de45c2016-11-28 23:03:05 -08001270 return !!(p->flags & PF_IDLE);
Paul E. McKenneyc4f30602011-11-10 12:41:56 -08001271}
Ingo Molnar36c8b582006-07-03 00:25:41 -07001272extern struct task_struct *curr_task(int cpu);
Peter Zijlstraa458ae22016-09-20 20:29:40 +02001273extern void ia64_set_curr_task(int cpu, struct task_struct *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
1275void yield(void);
1276
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277union thread_union {
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001278#ifndef CONFIG_THREAD_INFO_IN_TASK
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 struct thread_info thread_info;
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001280#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 unsigned long stack[THREAD_SIZE/sizeof(long)];
1282};
1283
Ingo Molnarf3ac6062017-02-03 22:59:33 +01001284#ifdef CONFIG_THREAD_INFO_IN_TASK
1285static inline struct thread_info *task_thread_info(struct task_struct *task)
1286{
1287 return &task->thread_info;
1288}
1289#elif !defined(__HAVE_THREAD_FUNCTIONS)
1290# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1291#endif
1292
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001293/*
1294 * find a task by one of its numerical ids
1295 *
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001296 * find_task_by_pid_ns():
1297 * finds a task by its pid in the specified namespace
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001298 * find_task_by_vpid():
1299 * finds a task by its virtual pid
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001300 *
Pavel Emelyanove49859e2008-07-25 01:48:36 -07001301 * see also find_vpid() etc in include/linux/pid.h
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001302 */
1303
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001304extern struct task_struct *find_task_by_vpid(pid_t nr);
1305extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1306 struct pid_namespace *ns);
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001307
Harvey Harrisonb3c97522008-02-13 15:03:15 -08001308extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1309extern int wake_up_process(struct task_struct *tsk);
Samir Bellabes3e51e3e2011-05-11 18:18:05 +02001310extern void wake_up_new_task(struct task_struct *tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311#ifdef CONFIG_SMP
1312 extern void kick_process(struct task_struct *tsk);
1313#else
1314 static inline void kick_process(struct task_struct *tsk) { }
1315#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
Adrian Hunter82b89772014-05-28 11:45:04 +03001317extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1318static inline void set_task_comm(struct task_struct *tsk, const char *from)
1319{
1320 __set_task_comm(tsk, from, false);
1321}
Andrew Morton59714d62008-02-04 22:27:21 -08001322extern char *get_task_comm(char *to, struct task_struct *tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
1324#ifdef CONFIG_SMP
Peter Zijlstra317f3942011-04-05 17:23:58 +02001325void scheduler_ipi(void);
Roland McGrath85ba2d82008-07-25 19:45:58 -07001326extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327#else
Peter Zijlstra184748c2011-04-05 17:23:39 +02001328static inline void scheduler_ipi(void) { }
Roland McGrath85ba2d82008-07-25 19:45:58 -07001329static inline unsigned long wait_task_inactive(struct task_struct *p,
1330 long match_state)
1331{
1332 return 1;
1333}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334#endif
1335
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336/* set thread flags in other task's structures
1337 * - see asm/thread_info.h for TIF_xxxx flags available
1338 */
1339static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1340{
Al Viroa1261f52005-11-13 16:06:55 -08001341 set_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342}
1343
1344static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1345{
Al Viroa1261f52005-11-13 16:06:55 -08001346 clear_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347}
1348
1349static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1350{
Al Viroa1261f52005-11-13 16:06:55 -08001351 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352}
1353
1354static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1355{
Al Viroa1261f52005-11-13 16:06:55 -08001356 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357}
1358
1359static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1360{
Al Viroa1261f52005-11-13 16:06:55 -08001361 return test_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362}
1363
1364static inline void set_tsk_need_resched(struct task_struct *tsk)
1365{
1366 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1367}
1368
1369static inline void clear_tsk_need_resched(struct task_struct *tsk)
1370{
1371 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1372}
1373
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001374static inline int test_tsk_need_resched(struct task_struct *tsk)
1375{
1376 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1377}
1378
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379/*
1380 * cond_resched() and cond_resched_lock(): latency reduction via
1381 * explicit rescheduling in places that are safe. The return
1382 * value indicates whether a reschedule was done in fact.
1383 * cond_resched_lock() will drop the spinlock before scheduling,
1384 * cond_resched_softirq() will enable bhs before scheduling.
1385 */
Peter Zijlstra35a773a2016-09-19 12:57:53 +02001386#ifndef CONFIG_PREEMPT
Linus Torvaldsc3921ab2008-05-11 16:04:48 -07001387extern int _cond_resched(void);
Peter Zijlstra35a773a2016-09-19 12:57:53 +02001388#else
1389static inline int _cond_resched(void) { return 0; }
1390#endif
Frederic Weisbecker6f80bd92009-07-16 15:44:29 +02001391
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001392#define cond_resched() ({ \
Peter Zijlstra34274452014-09-24 10:18:56 +02001393 ___might_sleep(__FILE__, __LINE__, 0); \
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001394 _cond_resched(); \
1395})
Frederic Weisbecker6f80bd92009-07-16 15:44:29 +02001396
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001397extern int __cond_resched_lock(spinlock_t *lock);
1398
1399#define cond_resched_lock(lock) ({ \
Peter Zijlstra34274452014-09-24 10:18:56 +02001400 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001401 __cond_resched_lock(lock); \
1402})
1403
1404extern int __cond_resched_softirq(void);
1405
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -07001406#define cond_resched_softirq() ({ \
Peter Zijlstra34274452014-09-24 10:18:56 +02001407 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -07001408 __cond_resched_softirq(); \
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001409})
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Simon Hormanf6f3c432013-05-22 14:50:31 +09001411static inline void cond_resched_rcu(void)
1412{
1413#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1414 rcu_read_unlock();
1415 cond_resched();
1416 rcu_read_lock();
1417#endif
1418}
1419
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420/*
1421 * Does a critical section need to be broken due to another
Nick Piggin95c354f2008-01-30 13:31:20 +01001422 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
1423 * but a general need for low latency)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 */
Nick Piggin95c354f2008-01-30 13:31:20 +01001425static inline int spin_needbreak(spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426{
Nick Piggin95c354f2008-01-30 13:31:20 +01001427#ifdef CONFIG_PREEMPT
1428 return spin_is_contended(lock);
1429#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 return 0;
Nick Piggin95c354f2008-01-30 13:31:20 +01001431#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432}
1433
Peter Zijlstra75f93fe2013-09-27 17:30:03 +02001434static __always_inline bool need_resched(void)
1435{
1436 return unlikely(tif_need_resched());
1437}
1438
Thomas Gleixneree761f62013-03-21 22:49:32 +01001439/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 * Wrappers for p->thread_info->cpu access. No-op on UP.
1441 */
1442#ifdef CONFIG_SMP
1443
1444static inline unsigned int task_cpu(const struct task_struct *p)
1445{
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001446#ifdef CONFIG_THREAD_INFO_IN_TASK
1447 return p->cpu;
1448#else
Al Viroa1261f52005-11-13 16:06:55 -08001449 return task_thread_info(p)->cpu;
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001450#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451}
1452
Ingo Molnarc65cc872007-07-09 18:51:58 +02001453extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
1455#else
1456
1457static inline unsigned int task_cpu(const struct task_struct *p)
1458{
1459 return 0;
1460}
1461
1462static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1463{
1464}
1465
1466#endif /* CONFIG_SMP */
1467
Pan Xinhuid9345c62016-11-02 05:08:28 -04001468/*
1469 * In order to reduce various lock holder preemption latencies provide an
1470 * interface to see if a vCPU is currently running or not.
1471 *
1472 * This allows us to terminate optimistic spin loops and block, analogous to
1473 * the native optimistic spin heuristic of testing if the lock owner task is
1474 * running or not.
1475 */
1476#ifndef vcpu_is_preempted
1477# define vcpu_is_preempted(cpu) false
1478#endif
1479
Rusty Russell96f874e2008-11-25 02:35:14 +10301480extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1481extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07001482
Dave Hansen82455252008-02-04 22:28:59 -08001483#ifndef TASK_SIZE_OF
1484#define TASK_SIZE_OF(tsk) TASK_SIZE
1485#endif
1486
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487#endif