blob: d08f3b054194ec82bb3cb1afb34987a22c74ebf7 [file] [log] [blame]
Peter Zijlstra029632f2011-10-25 10:00:11 +02001
2#include <linux/sched.h>
Clark Williamscf4aebc22013-02-07 09:46:59 -06003#include <linux/sched/sysctl.h>
Clark Williams8bd75c72013-02-07 09:47:07 -06004#include <linux/sched/rt.h>
Frederic Weisbecker19d23dbf2016-09-26 02:29:20 +02005#include <linux/u64_stats_sync.h>
Dario Faggioliaab03e02013-11-28 11:14:43 +01006#include <linux/sched/deadline.h>
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -05007#include <linux/binfmts.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +02008#include <linux/mutex.h>
9#include <linux/spinlock.h>
10#include <linux/stop_machine.h>
Steven Rostedtb6366f02015-03-18 14:49:46 -040011#include <linux/irq_work.h>
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +020012#include <linux/tick.h>
Mel Gormanf809ca92013-10-07 11:28:57 +010013#include <linux/slab.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020014
Peter Zijlstra391e43d2011-11-15 17:14:39 +010015#include "cpupri.h"
Juri Lelli6bfd6d72013-11-07 14:43:47 +010016#include "cpudeadline.h"
Li Zefan60fed782013-03-29 14:36:43 +080017#include "cpuacct.h"
Peter Zijlstra029632f2011-10-25 10:00:11 +020018
Peter Zijlstra9148a3a2016-09-20 22:34:51 +020019#ifdef CONFIG_SCHED_DEBUG
20#define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
21#else
22#define SCHED_WARN_ON(x) ((void)(x))
23#endif
24
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040025struct rq;
Daniel Lezcano442bf3a2014-09-04 11:32:09 -040026struct cpuidle_state;
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040027
Kirill Tkhaida0c1e62014-08-20 13:47:32 +040028/* task_struct::on_rq states: */
29#define TASK_ON_RQ_QUEUED 1
Kirill Tkhaicca26e82014-08-20 13:47:42 +040030#define TASK_ON_RQ_MIGRATING 2
Kirill Tkhaida0c1e62014-08-20 13:47:32 +040031
Peter Zijlstra029632f2011-10-25 10:00:11 +020032extern __read_mostly int scheduler_running;
33
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040034extern unsigned long calc_load_update;
35extern atomic_long_t calc_load_tasks;
36
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020037extern void calc_global_load_tick(struct rq *this_rq);
Thomas Gleixnerd60585c2016-07-12 18:33:56 +020038extern long calc_load_fold_active(struct rq *this_rq, long adjust);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020039
40#ifdef CONFIG_SMP
Frederic Weisbeckercee1afc2016-04-13 15:56:50 +020041extern void cpu_load_update_active(struct rq *this_rq);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020042#else
Frederic Weisbeckercee1afc2016-04-13 15:56:50 +020043static inline void cpu_load_update_active(struct rq *this_rq) { }
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020044#endif
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040045
Peter Zijlstra029632f2011-10-25 10:00:11 +020046/*
Peter Zijlstra029632f2011-10-25 10:00:11 +020047 * Helpers for converting nanosecond timing to jiffy resolution
48 */
49#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
50
Li Zefancc1f4b12013-03-05 16:06:09 +080051/*
52 * Increase resolution of nice-level calculations for 64-bit architectures.
53 * The extra resolution improves shares distribution and load balancing of
54 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
55 * hierarchies, especially on larger systems. This is not a user-visible change
56 * and does not change the user-interface for setting shares/weights.
57 *
58 * We increase resolution only if we have enough bits to allow this increased
Peter Zijlstra21591972016-04-28 12:49:38 +020059 * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are
60 * pretty high and the returns do not justify the increased costs.
61 *
62 * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to
63 * increase coverage and consistency always enable it on 64bit platforms.
Li Zefancc1f4b12013-03-05 16:06:09 +080064 */
Peter Zijlstra21591972016-04-28 12:49:38 +020065#ifdef CONFIG_64BIT
Yuyang Du172895e2016-04-05 12:12:27 +080066# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
Yuyang Du6ecdd742016-04-05 12:12:26 +080067# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
68# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
Li Zefancc1f4b12013-03-05 16:06:09 +080069#else
Yuyang Du172895e2016-04-05 12:12:27 +080070# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
Li Zefancc1f4b12013-03-05 16:06:09 +080071# define scale_load(w) (w)
72# define scale_load_down(w) (w)
73#endif
74
Yuyang Du6ecdd742016-04-05 12:12:26 +080075/*
Yuyang Du172895e2016-04-05 12:12:27 +080076 * Task weight (visible to users) and its load (invisible to users) have
77 * independent resolution, but they should be well calibrated. We use
78 * scale_load() and scale_load_down(w) to convert between them. The
79 * following must be true:
80 *
81 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
82 *
Yuyang Du6ecdd742016-04-05 12:12:26 +080083 */
Yuyang Du172895e2016-04-05 12:12:27 +080084#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
Peter Zijlstra029632f2011-10-25 10:00:11 +020085
86/*
Dario Faggioli332ac172013-11-07 14:43:45 +010087 * Single value that decides SCHED_DEADLINE internal math precision.
88 * 10 -> just above 1us
89 * 9 -> just above 0.5us
90 */
91#define DL_SCALE (10)
92
93/*
Peter Zijlstra029632f2011-10-25 10:00:11 +020094 * These are the 'tuning knobs' of the scheduler:
Peter Zijlstra029632f2011-10-25 10:00:11 +020095 */
Peter Zijlstra029632f2011-10-25 10:00:11 +020096
97/*
98 * single value that denotes runtime == period, ie unlimited time.
99 */
100#define RUNTIME_INF ((u64)~0ULL)
101
Henrik Austad20f9cd22015-09-09 17:00:41 +0200102static inline int idle_policy(int policy)
103{
104 return policy == SCHED_IDLE;
105}
Dario Faggiolid50dde52013-11-07 14:43:36 +0100106static inline int fair_policy(int policy)
107{
108 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
109}
110
Peter Zijlstra029632f2011-10-25 10:00:11 +0200111static inline int rt_policy(int policy)
112{
Dario Faggiolid50dde52013-11-07 14:43:36 +0100113 return policy == SCHED_FIFO || policy == SCHED_RR;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200114}
115
Dario Faggioliaab03e02013-11-28 11:14:43 +0100116static inline int dl_policy(int policy)
117{
118 return policy == SCHED_DEADLINE;
119}
Henrik Austad20f9cd22015-09-09 17:00:41 +0200120static inline bool valid_policy(int policy)
121{
122 return idle_policy(policy) || fair_policy(policy) ||
123 rt_policy(policy) || dl_policy(policy);
124}
Dario Faggioliaab03e02013-11-28 11:14:43 +0100125
Peter Zijlstra029632f2011-10-25 10:00:11 +0200126static inline int task_has_rt_policy(struct task_struct *p)
127{
128 return rt_policy(p->policy);
129}
130
Dario Faggioliaab03e02013-11-28 11:14:43 +0100131static inline int task_has_dl_policy(struct task_struct *p)
132{
133 return dl_policy(p->policy);
134}
135
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100136/*
137 * Tells if entity @a should preempt entity @b.
138 */
Dario Faggioli332ac172013-11-07 14:43:45 +0100139static inline bool
140dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100141{
142 return dl_time_before(a->deadline, b->deadline);
143}
144
Peter Zijlstra029632f2011-10-25 10:00:11 +0200145/*
146 * This is the priority-queue data structure of the RT scheduling class:
147 */
148struct rt_prio_array {
149 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
150 struct list_head queue[MAX_RT_PRIO];
151};
152
153struct rt_bandwidth {
154 /* nests inside the rq lock: */
155 raw_spinlock_t rt_runtime_lock;
156 ktime_t rt_period;
157 u64 rt_runtime;
158 struct hrtimer rt_period_timer;
Peter Zijlstra4cfafd32015-05-14 12:23:11 +0200159 unsigned int rt_period_active;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200160};
Juri Lellia5e7be32014-09-19 10:22:39 +0100161
162void __dl_clear_params(struct task_struct *p);
163
Dario Faggioli332ac172013-11-07 14:43:45 +0100164/*
165 * To keep the bandwidth of -deadline tasks and groups under control
166 * we need some place where:
167 * - store the maximum -deadline bandwidth of the system (the group);
168 * - cache the fraction of that bandwidth that is currently allocated.
169 *
170 * This is all done in the data structure below. It is similar to the
171 * one used for RT-throttling (rt_bandwidth), with the main difference
172 * that, since here we are only interested in admission control, we
173 * do not decrease any runtime while the group "executes", neither we
174 * need a timer to replenish it.
175 *
176 * With respect to SMP, the bandwidth is given on a per-CPU basis,
177 * meaning that:
178 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
179 * - dl_total_bw array contains, in the i-eth element, the currently
180 * allocated bandwidth on the i-eth CPU.
181 * Moreover, groups consume bandwidth on each CPU, while tasks only
182 * consume bandwidth on the CPU they're running on.
183 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
184 * that will be shown the next time the proc or cgroup controls will
185 * be red. It on its turn can be changed by writing on its own
186 * control.
187 */
188struct dl_bandwidth {
189 raw_spinlock_t dl_runtime_lock;
190 u64 dl_runtime;
191 u64 dl_period;
192};
193
194static inline int dl_bandwidth_enabled(void)
195{
Peter Zijlstra17248132013-12-17 12:44:49 +0100196 return sysctl_sched_rt_runtime >= 0;
Dario Faggioli332ac172013-11-07 14:43:45 +0100197}
198
199extern struct dl_bw *dl_bw_of(int i);
200
201struct dl_bw {
202 raw_spinlock_t lock;
203 u64 bw, total_bw;
204};
205
Juri Lelli7f514122014-09-19 10:22:40 +0100206static inline
207void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
208{
209 dl_b->total_bw -= tsk_bw;
210}
211
212static inline
213void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
214{
215 dl_b->total_bw += tsk_bw;
216}
217
218static inline
219bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
220{
221 return dl_b->bw != -1 &&
222 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
223}
224
Peter Zijlstra029632f2011-10-25 10:00:11 +0200225extern struct mutex sched_domains_mutex;
226
227#ifdef CONFIG_CGROUP_SCHED
228
229#include <linux/cgroup.h>
230
231struct cfs_rq;
232struct rt_rq;
233
Mike Galbraith35cf4e52012-08-07 05:00:13 +0200234extern struct list_head task_groups;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200235
236struct cfs_bandwidth {
237#ifdef CONFIG_CFS_BANDWIDTH
238 raw_spinlock_t lock;
239 ktime_t period;
240 u64 quota, runtime;
Zhihui Zhang9c58c792014-09-20 21:24:36 -0400241 s64 hierarchical_quota;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200242 u64 runtime_expires;
243
Peter Zijlstra4cfafd32015-05-14 12:23:11 +0200244 int idle, period_active;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200245 struct hrtimer period_timer, slack_timer;
246 struct list_head throttled_cfs_rq;
247
248 /* statistics */
249 int nr_periods, nr_throttled;
250 u64 throttled_time;
251#endif
252};
253
254/* task group related information */
255struct task_group {
256 struct cgroup_subsys_state css;
257
258#ifdef CONFIG_FAIR_GROUP_SCHED
259 /* schedulable entities of this group on each cpu */
260 struct sched_entity **se;
261 /* runqueue "owned" by this group on each cpu */
262 struct cfs_rq **cfs_rq;
263 unsigned long shares;
264
Alex Shifa6bdde2013-06-20 10:18:46 +0800265#ifdef CONFIG_SMP
Waiman Longb0367622015-12-02 13:41:49 -0500266 /*
267 * load_avg can be heavily contended at clock tick time, so put
268 * it in its own cacheline separated from the fields above which
269 * will also be accessed at each tick.
270 */
271 atomic_long_t load_avg ____cacheline_aligned;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200272#endif
Alex Shifa6bdde2013-06-20 10:18:46 +0800273#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200274
275#ifdef CONFIG_RT_GROUP_SCHED
276 struct sched_rt_entity **rt_se;
277 struct rt_rq **rt_rq;
278
279 struct rt_bandwidth rt_bandwidth;
280#endif
281
282 struct rcu_head rcu;
283 struct list_head list;
284
285 struct task_group *parent;
286 struct list_head siblings;
287 struct list_head children;
288
289#ifdef CONFIG_SCHED_AUTOGROUP
290 struct autogroup *autogroup;
291#endif
292
293 struct cfs_bandwidth cfs_bandwidth;
294};
295
296#ifdef CONFIG_FAIR_GROUP_SCHED
297#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
298
299/*
300 * A weight of 0 or 1 can cause arithmetics problems.
301 * A weight of a cfs_rq is the sum of weights of which entities
302 * are queued on this cfs_rq, so a weight of a entity should not be
303 * too large, so as the shares value of a task group.
304 * (The default weight is 1024 - so there's no practical
305 * limitation from this.)
306 */
307#define MIN_SHARES (1UL << 1)
308#define MAX_SHARES (1UL << 18)
309#endif
310
Peter Zijlstra029632f2011-10-25 10:00:11 +0200311typedef int (*tg_visitor)(struct task_group *, void *);
312
313extern int walk_tg_tree_from(struct task_group *from,
314 tg_visitor down, tg_visitor up, void *data);
315
316/*
317 * Iterate the full tree, calling @down when first entering a node and @up when
318 * leaving it for the final time.
319 *
320 * Caller must hold rcu_lock or sufficient equivalent.
321 */
322static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
323{
324 return walk_tg_tree_from(&root_task_group, down, up, data);
325}
326
327extern int tg_nop(struct task_group *tg, void *data);
328
329extern void free_fair_sched_group(struct task_group *tg);
330extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
Peter Zijlstra8663e242016-06-22 14:58:02 +0200331extern void online_fair_sched_group(struct task_group *tg);
Peter Zijlstra6fe1f342016-01-21 22:24:16 +0100332extern void unregister_fair_sched_group(struct task_group *tg);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200333extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
334 struct sched_entity *se, int cpu,
335 struct sched_entity *parent);
336extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200337
338extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +0200339extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200340extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
341
342extern void free_rt_sched_group(struct task_group *tg);
343extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
344extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
345 struct sched_rt_entity *rt_se, int cpu,
346 struct sched_rt_entity *parent);
347
Li Zefan25cc7da2013-03-05 16:07:33 +0800348extern struct task_group *sched_create_group(struct task_group *parent);
349extern void sched_online_group(struct task_group *tg,
350 struct task_group *parent);
351extern void sched_destroy_group(struct task_group *tg);
352extern void sched_offline_group(struct task_group *tg);
353
354extern void sched_move_task(struct task_struct *tsk);
355
356#ifdef CONFIG_FAIR_GROUP_SCHED
357extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
Byungchul Parkad936d82015-10-24 01:16:19 +0900358
359#ifdef CONFIG_SMP
360extern void set_task_rq_fair(struct sched_entity *se,
361 struct cfs_rq *prev, struct cfs_rq *next);
362#else /* !CONFIG_SMP */
363static inline void set_task_rq_fair(struct sched_entity *se,
364 struct cfs_rq *prev, struct cfs_rq *next) { }
365#endif /* CONFIG_SMP */
366#endif /* CONFIG_FAIR_GROUP_SCHED */
Li Zefan25cc7da2013-03-05 16:07:33 +0800367
Peter Zijlstra029632f2011-10-25 10:00:11 +0200368#else /* CONFIG_CGROUP_SCHED */
369
370struct cfs_bandwidth { };
371
372#endif /* CONFIG_CGROUP_SCHED */
373
374/* CFS-related fields in a runqueue */
375struct cfs_rq {
376 struct load_weight load;
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200377 unsigned int nr_running, h_nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200378
379 u64 exec_clock;
380 u64 min_vruntime;
381#ifndef CONFIG_64BIT
382 u64 min_vruntime_copy;
383#endif
384
385 struct rb_root tasks_timeline;
386 struct rb_node *rb_leftmost;
387
Peter Zijlstra029632f2011-10-25 10:00:11 +0200388 /*
389 * 'curr' points to currently running entity on this cfs_rq.
390 * It is set to NULL otherwise (i.e when none are currently running).
391 */
392 struct sched_entity *curr, *next, *last, *skip;
393
394#ifdef CONFIG_SCHED_DEBUG
395 unsigned int nr_spread_over;
396#endif
397
Paul Turner2dac7542012-10-04 13:18:30 +0200398#ifdef CONFIG_SMP
399 /*
Yuyang Du9d89c252015-07-15 08:04:37 +0800400 * CFS load tracking
Paul Turner2dac7542012-10-04 13:18:30 +0200401 */
Yuyang Du9d89c252015-07-15 08:04:37 +0800402 struct sched_avg avg;
Yuyang Du13962232015-07-15 08:04:41 +0800403 u64 runnable_load_sum;
404 unsigned long runnable_load_avg;
Yuyang Du9d89c252015-07-15 08:04:37 +0800405#ifdef CONFIG_FAIR_GROUP_SCHED
406 unsigned long tg_load_avg_contrib;
407#endif
408 atomic_long_t removed_load_avg, removed_util_avg;
409#ifndef CONFIG_64BIT
410 u64 load_last_update_time_copy;
411#endif
Alex Shi141965c2013-06-26 13:05:39 +0800412
Paul Turnerc566e8e2012-10-04 13:18:30 +0200413#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Turner82958362012-10-04 13:18:31 +0200414 /*
415 * h_load = weight * f(tg)
416 *
417 * Where f(tg) is the recursive weight fraction assigned to
418 * this group.
419 */
420 unsigned long h_load;
Vladimir Davydov68520792013-07-15 17:49:19 +0400421 u64 last_h_load_update;
422 struct sched_entity *h_load_next;
423#endif /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner82958362012-10-04 13:18:31 +0200424#endif /* CONFIG_SMP */
425
Peter Zijlstra029632f2011-10-25 10:00:11 +0200426#ifdef CONFIG_FAIR_GROUP_SCHED
427 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
428
429 /*
430 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
431 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
432 * (like users, containers etc.)
433 *
434 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
435 * list is used during load balance.
436 */
437 int on_list;
438 struct list_head leaf_cfs_rq_list;
439 struct task_group *tg; /* group that "owns" this runqueue */
440
Peter Zijlstra029632f2011-10-25 10:00:11 +0200441#ifdef CONFIG_CFS_BANDWIDTH
442 int runtime_enabled;
443 u64 runtime_expires;
444 s64 runtime_remaining;
445
Paul Turnerf1b17282012-10-04 13:18:31 +0200446 u64 throttled_clock, throttled_clock_task;
447 u64 throttled_clock_task_time;
Peter Zijlstra55e16d32016-06-22 15:14:26 +0200448 int throttled, throttle_count;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200449 struct list_head throttled_list;
450#endif /* CONFIG_CFS_BANDWIDTH */
451#endif /* CONFIG_FAIR_GROUP_SCHED */
452};
453
454static inline int rt_bandwidth_enabled(void)
455{
456 return sysctl_sched_rt_runtime >= 0;
457}
458
Steven Rostedtb6366f02015-03-18 14:49:46 -0400459/* RT IPI pull logic requires IRQ_WORK */
460#ifdef CONFIG_IRQ_WORK
461# define HAVE_RT_PUSH_IPI
462#endif
463
Peter Zijlstra029632f2011-10-25 10:00:11 +0200464/* Real-Time classes' related field in a runqueue: */
465struct rt_rq {
466 struct rt_prio_array active;
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200467 unsigned int rt_nr_running;
Frederic Weisbecker01d36d02015-11-04 18:17:10 +0100468 unsigned int rr_nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200469#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
470 struct {
471 int curr; /* highest queued rt task prio */
472#ifdef CONFIG_SMP
473 int next; /* next highest */
474#endif
475 } highest_prio;
476#endif
477#ifdef CONFIG_SMP
478 unsigned long rt_nr_migratory;
479 unsigned long rt_nr_total;
480 int overloaded;
481 struct plist_head pushable_tasks;
Steven Rostedtb6366f02015-03-18 14:49:46 -0400482#ifdef HAVE_RT_PUSH_IPI
483 int push_flags;
484 int push_cpu;
485 struct irq_work push_work;
486 raw_spinlock_t push_lock;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200487#endif
Steven Rostedtb6366f02015-03-18 14:49:46 -0400488#endif /* CONFIG_SMP */
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +0400489 int rt_queued;
490
Peter Zijlstra029632f2011-10-25 10:00:11 +0200491 int rt_throttled;
492 u64 rt_time;
493 u64 rt_runtime;
494 /* Nests inside the rq lock: */
495 raw_spinlock_t rt_runtime_lock;
496
497#ifdef CONFIG_RT_GROUP_SCHED
498 unsigned long rt_nr_boosted;
499
500 struct rq *rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200501 struct task_group *tg;
502#endif
503};
504
Dario Faggioliaab03e02013-11-28 11:14:43 +0100505/* Deadline class' related fields in a runqueue */
506struct dl_rq {
507 /* runqueue is an rbtree, ordered by deadline */
508 struct rb_root rb_root;
509 struct rb_node *rb_leftmost;
510
511 unsigned long dl_nr_running;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100512
513#ifdef CONFIG_SMP
514 /*
515 * Deadline values of the currently executing and the
516 * earliest ready task on this rq. Caching these facilitates
517 * the decision wether or not a ready but not running task
518 * should migrate somewhere else.
519 */
520 struct {
521 u64 curr;
522 u64 next;
523 } earliest_dl;
524
525 unsigned long dl_nr_migratory;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100526 int overloaded;
527
528 /*
529 * Tasks on this rq that can be pushed away. They are kept in
530 * an rb-tree, ordered by tasks' deadlines, with caching
531 * of the leftmost (earliest deadline) element.
532 */
533 struct rb_root pushable_dl_tasks_root;
534 struct rb_node *pushable_dl_tasks_leftmost;
Dario Faggioli332ac172013-11-07 14:43:45 +0100535#else
536 struct dl_bw dl_bw;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100537#endif
Dario Faggioliaab03e02013-11-28 11:14:43 +0100538};
539
Peter Zijlstra029632f2011-10-25 10:00:11 +0200540#ifdef CONFIG_SMP
541
Dietmar Eggemannbbb138b2015-09-26 18:19:54 +0100542struct max_cpu_capacity {
543 raw_spinlock_t lock;
544 unsigned long val;
545 int cpu;
546};
547
Peter Zijlstra029632f2011-10-25 10:00:11 +0200548/*
549 * We add the notion of a root-domain which will be used to define per-domain
550 * variables. Each exclusive cpuset essentially defines an island domain by
551 * fully partitioning the member cpus from any other cpuset. Whenever a new
552 * exclusive cpuset is created, we also create and attach a new root-domain
553 * object.
554 *
555 */
556struct root_domain {
557 atomic_t refcount;
558 atomic_t rto_count;
559 struct rcu_head rcu;
560 cpumask_var_t span;
561 cpumask_var_t online;
562
Tim Chen4486edd2014-06-23 12:16:49 -0700563 /* Indicate more than one runnable task for any CPU */
564 bool overload;
565
Morten Rasmussena562dfc2015-05-09 16:49:57 +0100566 /* Indicate one or more cpus over-utilized (tipping point) */
567 bool overutilized;
568
Peter Zijlstra029632f2011-10-25 10:00:11 +0200569 /*
Juri Lelli1baca4c2013-11-07 14:43:38 +0100570 * The bit corresponding to a CPU gets set here if such CPU has more
571 * than one runnable -deadline task (as it is below for RT tasks).
572 */
573 cpumask_var_t dlo_mask;
574 atomic_t dlo_count;
Dario Faggioli332ac172013-11-07 14:43:45 +0100575 struct dl_bw dl_bw;
Juri Lelli6bfd6d72013-11-07 14:43:47 +0100576 struct cpudl cpudl;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100577
578 /*
Peter Zijlstra029632f2011-10-25 10:00:11 +0200579 * The "RT overload" flag: it gets set if a CPU has more than
580 * one runnable RT task.
581 */
582 cpumask_var_t rto_mask;
583 struct cpupri cpupri;
Dietmar Eggemanncd92bfd2016-08-01 19:53:35 +0100584
Dietmar Eggemannbbb138b2015-09-26 18:19:54 +0100585 /* Maximum cpu capacity in the system. */
586 struct max_cpu_capacity max_cpu_capacity;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200587};
588
589extern struct root_domain def_root_domain;
590
591#endif /* CONFIG_SMP */
592
593/*
594 * This is the main, per-CPU runqueue data structure.
595 *
596 * Locking rule: those places that want to lock multiple runqueues
597 * (such as the load balancing or the thread migration code), lock
598 * acquire operations must be ordered by ascending &runqueue.
599 */
600struct rq {
601 /* runqueue lock: */
602 raw_spinlock_t lock;
603
604 /*
605 * nr_running and cpu_load should be in the same cacheline because
606 * remote CPUs use both these fields when doing load calculation.
607 */
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200608 unsigned int nr_running;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100609#ifdef CONFIG_NUMA_BALANCING
610 unsigned int nr_numa_running;
611 unsigned int nr_preferred_running;
612#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200613 #define CPU_LOAD_IDX_MAX 5
614 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
Frederic Weisbecker3451d022011-08-10 23:21:01 +0200615#ifdef CONFIG_NO_HZ_COMMON
Frederic Weisbecker9fd81dd2016-04-19 17:36:51 +0200616#ifdef CONFIG_SMP
617 unsigned long last_load_update_tick;
618#endif /* CONFIG_SMP */
Suresh Siddha1c792db2011-12-01 17:07:32 -0800619 unsigned long nohz_flags;
Frederic Weisbecker9fd81dd2016-04-19 17:36:51 +0200620#endif /* CONFIG_NO_HZ_COMMON */
Frederic Weisbecker265f22a2013-05-03 03:39:05 +0200621#ifdef CONFIG_NO_HZ_FULL
622 unsigned long last_sched_tick;
623#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200624 /* capture load from *all* tasks on this cpu: */
625 struct load_weight load;
626 unsigned long nr_load_updates;
627 u64 nr_switches;
628
629 struct cfs_rq cfs;
630 struct rt_rq rt;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100631 struct dl_rq dl;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200632
633#ifdef CONFIG_FAIR_GROUP_SCHED
634 /* list of leaf cfs_rq on this cpu: */
635 struct list_head leaf_cfs_rq_list;
Peter Zijlstraa35b6462012-08-08 21:46:40 +0200636#endif /* CONFIG_FAIR_GROUP_SCHED */
637
Peter Zijlstra029632f2011-10-25 10:00:11 +0200638 /*
639 * This is part of a global counter where only the total sum
640 * over all CPUs matters. A task can increase this counter on
641 * one CPU and if it got migrated afterwards it may decrease
642 * it on another CPU. Always updated under the runqueue lock:
643 */
644 unsigned long nr_uninterruptible;
645
646 struct task_struct *curr, *idle, *stop;
647 unsigned long next_balance;
648 struct mm_struct *prev_mm;
649
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +0100650 unsigned int clock_skip_update;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200651 u64 clock;
652 u64 clock_task;
653
654 atomic_t nr_iowait;
655
656#ifdef CONFIG_SMP
657 struct root_domain *rd;
658 struct sched_domain *sd;
659
Nicolas Pitreced549f2014-05-26 18:19:38 -0400660 unsigned long cpu_capacity;
Vincent Guittotca6d75e2015-02-27 16:54:09 +0100661 unsigned long cpu_capacity_orig;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200662
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200663 struct callback_head *balance_callback;
664
Peter Zijlstra029632f2011-10-25 10:00:11 +0200665 unsigned char idle_balance;
666 /* For active balancing */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200667 int active_balance;
668 int push_cpu;
669 struct cpu_stop_work active_balance_work;
670 /* cpu of this runqueue: */
671 int cpu;
672 int online;
673
Peter Zijlstra367456c2012-02-20 21:49:09 +0100674 struct list_head cfs_tasks;
675
Peter Zijlstra029632f2011-10-25 10:00:11 +0200676 u64 rt_avg;
677 u64 age_stamp;
678 u64 idle_stamp;
679 u64 avg_idle;
Jason Low9bd721c2013-09-13 11:26:52 -0700680
681 /* This is used to determine avg_idle's max value */
682 u64 max_idle_balance_cost;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200683#endif
684
685#ifdef CONFIG_IRQ_TIME_ACCOUNTING
686 u64 prev_irq_time;
687#endif
688#ifdef CONFIG_PARAVIRT
689 u64 prev_steal_time;
690#endif
691#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
692 u64 prev_steal_time_rq;
693#endif
694
695 /* calc_load related fields */
696 unsigned long calc_load_update;
697 long calc_load_active;
698
699#ifdef CONFIG_SCHED_HRTICK
700#ifdef CONFIG_SMP
701 int hrtick_csd_pending;
702 struct call_single_data hrtick_csd;
703#endif
704 struct hrtimer hrtick_timer;
705#endif
706
707#ifdef CONFIG_SCHEDSTATS
708 /* latency stats */
709 struct sched_info rq_sched_info;
710 unsigned long long rq_cpu_time;
711 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
712
713 /* sys_sched_yield() stats */
714 unsigned int yld_count;
715
716 /* schedule() stats */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200717 unsigned int sched_count;
718 unsigned int sched_goidle;
719
720 /* try_to_wake_up() stats */
721 unsigned int ttwu_count;
722 unsigned int ttwu_local;
723#endif
724
725#ifdef CONFIG_SMP
726 struct llist_head wake_list;
727#endif
Daniel Lezcano442bf3a2014-09-04 11:32:09 -0400728
729#ifdef CONFIG_CPU_IDLE
730 /* Must be inspected within a rcu lock section */
731 struct cpuidle_state *idle_state;
Morten Rasmussen06910642015-01-27 13:48:07 +0000732 int idle_state_idx;
Daniel Lezcano442bf3a2014-09-04 11:32:09 -0400733#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200734};
735
736static inline int cpu_of(struct rq *rq)
737{
738#ifdef CONFIG_SMP
739 return rq->cpu;
740#else
741 return 0;
742#endif
743}
744
Peter Zijlstra1b568f02016-05-09 10:38:41 +0200745
746#ifdef CONFIG_SCHED_SMT
747
748extern struct static_key_false sched_smt_present;
749
750extern void __update_idle_core(struct rq *rq);
751
752static inline void update_idle_core(struct rq *rq)
753{
754 if (static_branch_unlikely(&sched_smt_present))
755 __update_idle_core(rq);
756}
757
758#else
759static inline void update_idle_core(struct rq *rq) { }
760#endif
761
Pranith Kumar8b06c552014-08-13 13:28:12 -0400762DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200763
Peter Zijlstra518cd622011-12-07 15:07:31 +0100764#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
Christoph Lameter4a32fea2014-08-17 12:30:27 -0500765#define this_rq() this_cpu_ptr(&runqueues)
Peter Zijlstra518cd622011-12-07 15:07:31 +0100766#define task_rq(p) cpu_rq(task_cpu(p))
767#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
Christoph Lameter4a32fea2014-08-17 12:30:27 -0500768#define raw_rq() raw_cpu_ptr(&runqueues)
Peter Zijlstra518cd622011-12-07 15:07:31 +0100769
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100770static inline u64 __rq_clock_broken(struct rq *rq)
771{
Jason Low316c1608d2015-04-28 13:00:20 -0700772 return READ_ONCE(rq->clock);
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100773}
774
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200775static inline u64 rq_clock(struct rq *rq)
776{
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100777 lockdep_assert_held(&rq->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200778 return rq->clock;
779}
780
781static inline u64 rq_clock_task(struct rq *rq)
782{
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100783 lockdep_assert_held(&rq->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200784 return rq->clock_task;
785}
786
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +0100787#define RQCF_REQ_SKIP 0x01
788#define RQCF_ACT_SKIP 0x02
789
790static inline void rq_clock_skip_update(struct rq *rq, bool skip)
791{
792 lockdep_assert_held(&rq->lock);
793 if (skip)
794 rq->clock_skip_update |= RQCF_REQ_SKIP;
795 else
796 rq->clock_skip_update &= ~RQCF_REQ_SKIP;
797}
798
Rik van Riel9942f792014-10-17 03:29:49 -0400799#ifdef CONFIG_NUMA
Rik van Riele3fe70b2014-10-17 03:29:50 -0400800enum numa_topology_type {
801 NUMA_DIRECT,
802 NUMA_GLUELESS_MESH,
803 NUMA_BACKPLANE,
804};
805extern enum numa_topology_type sched_numa_topology_type;
Rik van Riel9942f792014-10-17 03:29:49 -0400806extern int sched_max_numa_distance;
807extern bool find_numa_distance(int distance);
808#endif
809
Mel Gormanf809ca92013-10-07 11:28:57 +0100810#ifdef CONFIG_NUMA_BALANCING
Iulia Manda44dba3d2014-10-31 02:13:31 +0200811/* The regions in numa_faults array from task_struct */
812enum numa_faults_stats {
813 NUMA_MEM = 0,
814 NUMA_CPU,
815 NUMA_MEMBUF,
816 NUMA_CPUBUF
817};
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100818extern void sched_setnuma(struct task_struct *p, int node);
Mel Gormane6628d52013-10-07 11:29:02 +0100819extern int migrate_task_to(struct task_struct *p, int cpu);
Peter Zijlstraac66f542013-10-07 11:29:16 +0100820extern int migrate_swap(struct task_struct *, struct task_struct *);
Mel Gormanf809ca92013-10-07 11:28:57 +0100821#endif /* CONFIG_NUMA_BALANCING */
822
Peter Zijlstra518cd622011-12-07 15:07:31 +0100823#ifdef CONFIG_SMP
824
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200825static inline void
826queue_balance_callback(struct rq *rq,
827 struct callback_head *head,
828 void (*func)(struct rq *rq))
829{
830 lockdep_assert_held(&rq->lock);
831
832 if (unlikely(head->next))
833 return;
834
835 head->func = (void (*)(struct callback_head *))func;
836 head->next = rq->balance_callback;
837 rq->balance_callback = head;
838}
839
Peter Zijlstrae3baac42014-06-04 10:31:18 -0700840extern void sched_ttwu_pending(void);
841
Peter Zijlstra029632f2011-10-25 10:00:11 +0200842#define rcu_dereference_check_sched_domain(p) \
843 rcu_dereference_check((p), \
844 lockdep_is_held(&sched_domains_mutex))
845
846/*
847 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
848 * See detach_destroy_domains: synchronize_sched for details.
849 *
850 * The domain tree of any CPU may only be accessed from within
851 * preempt-disabled sections.
852 */
853#define for_each_domain(cpu, __sd) \
Peter Zijlstra518cd622011-12-07 15:07:31 +0100854 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
855 __sd; __sd = __sd->parent)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200856
Suresh Siddha77e81362011-11-17 11:08:23 -0800857#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
858
Peter Zijlstra518cd622011-12-07 15:07:31 +0100859/**
860 * highest_flag_domain - Return highest sched_domain containing flag.
861 * @cpu: The cpu whose highest level of sched domain is to
862 * be returned.
863 * @flag: The flag to check for the highest sched_domain
864 * for the given cpu.
865 *
866 * Returns the highest sched_domain of a cpu which contains the given flag.
867 */
868static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
869{
870 struct sched_domain *sd, *hsd = NULL;
871
872 for_each_domain(cpu, sd) {
873 if (!(sd->flags & flag))
874 break;
875 hsd = sd;
876 }
877
878 return hsd;
879}
880
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100881static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
882{
883 struct sched_domain *sd;
884
885 for_each_domain(cpu, sd) {
886 if (sd->flags & flag)
887 break;
888 }
889
890 return sd;
891}
892
Peter Zijlstra518cd622011-12-07 15:07:31 +0100893DECLARE_PER_CPU(struct sched_domain *, sd_llc);
Peter Zijlstra7d9ffa82013-07-04 12:56:46 +0800894DECLARE_PER_CPU(int, sd_llc_size);
Peter Zijlstra518cd622011-12-07 15:07:31 +0100895DECLARE_PER_CPU(int, sd_llc_id);
Peter Zijlstra0e369d72016-05-09 10:38:01 +0200896DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100897DECLARE_PER_CPU(struct sched_domain *, sd_numa);
Preeti U Murthy37dc6b52013-10-30 08:42:52 +0530898DECLARE_PER_CPU(struct sched_domain *, sd_asym);
Morten Rasmussen30786a02015-01-02 17:08:52 +0000899DECLARE_PER_CPU(struct sched_domain *, sd_ea);
Morten Rasmussen61bf6252014-12-18 14:47:18 +0000900DECLARE_PER_CPU(struct sched_domain *, sd_scs);
Peter Zijlstra518cd622011-12-07 15:07:31 +0100901
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400902struct sched_group_capacity {
Li Zefan5e6521e2013-03-05 16:06:23 +0800903 atomic_t ref;
904 /*
Yuyang Du172895e2016-04-05 12:12:27 +0800905 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400906 * for a single CPU.
Li Zefan5e6521e2013-03-05 16:06:23 +0800907 */
Vincent Guittotdc7ff762015-03-03 11:35:03 +0100908 unsigned int capacity;
Li Zefan5e6521e2013-03-05 16:06:23 +0800909 unsigned long next_update;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400910 int imbalance; /* XXX unrelated to capacity but shared group state */
Li Zefan5e6521e2013-03-05 16:06:23 +0800911
912 unsigned long cpumask[0]; /* iteration mask */
913};
914
915struct sched_group {
916 struct sched_group *next; /* Must be a circular list */
917 atomic_t ref;
918
919 unsigned int group_weight;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400920 struct sched_group_capacity *sgc;
Morten Rasmussen94c4cea2015-01-13 13:45:51 +0000921 const struct sched_group_energy const *sge;
Li Zefan5e6521e2013-03-05 16:06:23 +0800922
923 /*
924 * The CPUs this group covers.
925 *
926 * NOTE: this field is variable length. (Allocated dynamically
927 * by attaching extra space to the end of the structure,
928 * depending on how many CPUs the kernel has booted up with)
929 */
930 unsigned long cpumask[0];
931};
932
933static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
934{
935 return to_cpumask(sg->cpumask);
936}
937
938/*
939 * cpumask masking which cpus in the group are allowed to iterate up the domain
940 * tree.
941 */
942static inline struct cpumask *sched_group_mask(struct sched_group *sg)
943{
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400944 return to_cpumask(sg->sgc->cpumask);
Li Zefan5e6521e2013-03-05 16:06:23 +0800945}
946
947/**
948 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
949 * @group: The group whose first cpu is to be returned.
950 */
951static inline unsigned int group_first_cpu(struct sched_group *group)
952{
953 return cpumask_first(sched_group_cpus(group));
954}
955
Peter Zijlstrac1174872012-05-31 14:47:33 +0200956extern int group_balance_cpu(struct sched_group *sg);
957
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500958#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
959void register_sched_domain_sysctl(void);
960void unregister_sched_domain_sysctl(void);
961#else
962static inline void register_sched_domain_sysctl(void)
963{
964}
965static inline void unregister_sched_domain_sysctl(void)
966{
967}
968#endif
969
Peter Zijlstrae3baac42014-06-04 10:31:18 -0700970#else
971
972static inline void sched_ttwu_pending(void) { }
973
Peter Zijlstra518cd622011-12-07 15:07:31 +0100974#endif /* CONFIG_SMP */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200975
Peter Zijlstra391e43d2011-11-15 17:14:39 +0100976#include "stats.h"
977#include "auto_group.h"
Peter Zijlstra029632f2011-10-25 10:00:11 +0200978
979#ifdef CONFIG_CGROUP_SCHED
980
981/*
982 * Return the group to which this tasks belongs.
983 *
Tejun Heo8af01f52013-08-08 20:11:22 -0400984 * We cannot use task_css() and friends because the cgroup subsystem
985 * changes that value before the cgroup_subsys::attach() method is called,
986 * therefore we cannot pin it and might observe the wrong value.
Peter Zijlstra8323f262012-06-22 13:36:05 +0200987 *
988 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
989 * core changes this before calling sched_move_task().
990 *
991 * Instead we use a 'copy' which is updated from sched_move_task() while
992 * holding both task_struct::pi_lock and rq::lock.
Peter Zijlstra029632f2011-10-25 10:00:11 +0200993 */
994static inline struct task_group *task_group(struct task_struct *p)
995{
Peter Zijlstra8323f262012-06-22 13:36:05 +0200996 return p->sched_task_group;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200997}
998
999/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
1000static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1001{
1002#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1003 struct task_group *tg = task_group(p);
1004#endif
1005
1006#ifdef CONFIG_FAIR_GROUP_SCHED
Byungchul Parkad936d82015-10-24 01:16:19 +09001007 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001008 p->se.cfs_rq = tg->cfs_rq[cpu];
1009 p->se.parent = tg->se[cpu];
1010#endif
1011
1012#ifdef CONFIG_RT_GROUP_SCHED
1013 p->rt.rt_rq = tg->rt_rq[cpu];
1014 p->rt.parent = tg->rt_se[cpu];
1015#endif
1016}
1017
1018#else /* CONFIG_CGROUP_SCHED */
1019
1020static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1021static inline struct task_group *task_group(struct task_struct *p)
1022{
1023 return NULL;
1024}
1025
1026#endif /* CONFIG_CGROUP_SCHED */
1027
1028static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1029{
1030 set_task_rq(p, cpu);
1031#ifdef CONFIG_SMP
1032 /*
1033 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1034 * successfuly executed on another CPU. We must ensure that updates of
1035 * per-task data have been completed by this moment.
1036 */
1037 smp_wmb();
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001038#ifdef CONFIG_THREAD_INFO_IN_TASK
1039 p->cpu = cpu;
1040#else
Peter Zijlstra029632f2011-10-25 10:00:11 +02001041 task_thread_info(p)->cpu = cpu;
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001042#endif
Peter Zijlstraac66f542013-10-07 11:29:16 +01001043 p->wake_cpu = cpu;
Peter Zijlstra029632f2011-10-25 10:00:11 +02001044#endif
1045}
1046
1047/*
1048 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1049 */
1050#ifdef CONFIG_SCHED_DEBUG
Ingo Molnarc5905af2012-02-24 08:31:31 +01001051# include <linux/static_key.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +02001052# define const_debug __read_mostly
1053#else
1054# define const_debug const
1055#endif
1056
1057extern const_debug unsigned int sysctl_sched_features;
1058
1059#define SCHED_FEAT(name, enabled) \
1060 __SCHED_FEAT_##name ,
1061
1062enum {
Peter Zijlstra391e43d2011-11-15 17:14:39 +01001063#include "features.h"
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001064 __SCHED_FEAT_NR,
Peter Zijlstra029632f2011-10-25 10:00:11 +02001065};
1066
1067#undef SCHED_FEAT
1068
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001069#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001070#define SCHED_FEAT(name, enabled) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001071static __always_inline bool static_branch_##name(struct static_key *key) \
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001072{ \
Jason Baron6e76ea82014-07-02 15:52:41 +00001073 return static_key_##enabled(key); \
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001074}
1075
1076#include "features.h"
1077
1078#undef SCHED_FEAT
1079
Ingo Molnarc5905af2012-02-24 08:31:31 +01001080extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001081#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1082#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001083#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001084#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001085
Srikar Dronamraju2a595722015-08-11 21:54:21 +05301086extern struct static_key_false sched_numa_balancing;
Mel Gormancb251762016-02-05 09:08:36 +00001087extern struct static_key_false sched_schedstats;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001088
Peter Zijlstra029632f2011-10-25 10:00:11 +02001089static inline u64 global_rt_period(void)
1090{
1091 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1092}
1093
1094static inline u64 global_rt_runtime(void)
1095{
1096 if (sysctl_sched_rt_runtime < 0)
1097 return RUNTIME_INF;
1098
1099 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1100}
1101
Peter Zijlstra029632f2011-10-25 10:00:11 +02001102static inline int task_current(struct rq *rq, struct task_struct *p)
1103{
1104 return rq->curr == p;
1105}
1106
1107static inline int task_running(struct rq *rq, struct task_struct *p)
1108{
1109#ifdef CONFIG_SMP
1110 return p->on_cpu;
1111#else
1112 return task_current(rq, p);
1113#endif
1114}
1115
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04001116static inline int task_on_rq_queued(struct task_struct *p)
1117{
1118 return p->on_rq == TASK_ON_RQ_QUEUED;
1119}
Peter Zijlstra029632f2011-10-25 10:00:11 +02001120
Kirill Tkhaicca26e82014-08-20 13:47:42 +04001121static inline int task_on_rq_migrating(struct task_struct *p)
1122{
1123 return p->on_rq == TASK_ON_RQ_MIGRATING;
1124}
1125
Peter Zijlstra029632f2011-10-25 10:00:11 +02001126#ifndef prepare_arch_switch
1127# define prepare_arch_switch(next) do { } while (0)
1128#endif
Catalin Marinas01f23e12011-11-27 21:43:10 +00001129#ifndef finish_arch_post_lock_switch
1130# define finish_arch_post_lock_switch() do { } while (0)
1131#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02001132
Peter Zijlstra029632f2011-10-25 10:00:11 +02001133static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1134{
1135#ifdef CONFIG_SMP
1136 /*
1137 * We can optimise this out completely for !SMP, because the
1138 * SMP rebalancing from interrupt is the only thing that cares
1139 * here.
1140 */
1141 next->on_cpu = 1;
1142#endif
1143}
1144
1145static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1146{
1147#ifdef CONFIG_SMP
1148 /*
1149 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1150 * We must ensure this doesn't happen until the switch is completely
1151 * finished.
Peter Zijlstra95913d92015-09-29 14:45:09 +02001152 *
Peter Zijlstrab75a2252015-10-06 14:36:17 +02001153 * In particular, the load of prev->state in finish_task_switch() must
1154 * happen before this.
1155 *
Peter Zijlstra1f03e8d2016-04-04 10:57:12 +02001156 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
Peter Zijlstra029632f2011-10-25 10:00:11 +02001157 */
Peter Zijlstra95913d92015-09-29 14:45:09 +02001158 smp_store_release(&prev->on_cpu, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001159#endif
1160#ifdef CONFIG_DEBUG_SPINLOCK
1161 /* this is a valid case when another task releases the spinlock */
1162 rq->lock.owner = current;
1163#endif
1164 /*
1165 * If we are tracking spinlock dependencies then we have to
1166 * fix up the runqueue lock - which gets 'carried over' from
1167 * prev into current:
1168 */
1169 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1170
1171 raw_spin_unlock_irq(&rq->lock);
1172}
1173
Li Zefanb13095f2013-03-05 16:06:38 +08001174/*
1175 * wake flags
1176 */
1177#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
1178#define WF_FORK 0x02 /* child wakeup after fork */
1179#define WF_MIGRATED 0x4 /* internal use, task got migrated */
1180
Peter Zijlstra029632f2011-10-25 10:00:11 +02001181/*
1182 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1183 * of tasks with abnormal "nice" values across CPUs the contribution that
1184 * each task makes to its run queue's load is weighted according to its
1185 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1186 * scaled version of the new time slice allocation that they receive on time
1187 * slice expiry etc.
1188 */
1189
1190#define WEIGHT_IDLEPRIO 3
1191#define WMULT_IDLEPRIO 1431655765
1192
Andi Kleened82b8a2015-11-29 20:59:43 -08001193extern const int sched_prio_to_weight[40];
1194extern const u32 sched_prio_to_wmult[40];
Peter Zijlstra029632f2011-10-25 10:00:11 +02001195
Peter Zijlstraff77e462016-01-18 15:27:07 +01001196/*
1197 * {de,en}queue flags:
1198 *
1199 * DEQUEUE_SLEEP - task is no longer runnable
1200 * ENQUEUE_WAKEUP - task just became runnable
1201 *
1202 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1203 * are in a known state which allows modification. Such pairs
1204 * should preserve as much state as possible.
1205 *
1206 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1207 * in the runqueue.
1208 *
1209 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
1210 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
Peter Zijlstra59efa0b2016-05-10 18:24:37 +02001211 * ENQUEUE_MIGRATED - the task was migrated during wakeup
Peter Zijlstraff77e462016-01-18 15:27:07 +01001212 *
1213 */
1214
1215#define DEQUEUE_SLEEP 0x01
1216#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */
1217#define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */
1218
Peter Zijlstra1de64442015-09-30 17:44:13 +02001219#define ENQUEUE_WAKEUP 0x01
Peter Zijlstraff77e462016-01-18 15:27:07 +01001220#define ENQUEUE_RESTORE 0x02
1221#define ENQUEUE_MOVE 0x04
1222
1223#define ENQUEUE_HEAD 0x08
1224#define ENQUEUE_REPLENISH 0x10
Li Zefanc82ba9f2013-03-05 16:06:55 +08001225#ifdef CONFIG_SMP
Peter Zijlstra59efa0b2016-05-10 18:24:37 +02001226#define ENQUEUE_MIGRATED 0x20
Li Zefanc82ba9f2013-03-05 16:06:55 +08001227#else
Peter Zijlstra59efa0b2016-05-10 18:24:37 +02001228#define ENQUEUE_MIGRATED 0x00
Li Zefanc82ba9f2013-03-05 16:06:55 +08001229#endif
Li Zefanc82ba9f2013-03-05 16:06:55 +08001230
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001231#define RETRY_TASK ((void *)-1UL)
1232
Li Zefanc82ba9f2013-03-05 16:06:55 +08001233struct sched_class {
1234 const struct sched_class *next;
1235
1236 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1237 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1238 void (*yield_task) (struct rq *rq);
1239 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1240
1241 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1242
Peter Zijlstra606dba22012-02-11 06:05:00 +01001243 /*
1244 * It is the responsibility of the pick_next_task() method that will
1245 * return the next task to call put_prev_task() on the @prev task or
1246 * something equivalent.
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001247 *
1248 * May return RETRY_TASK when it finds a higher prio class has runnable
1249 * tasks.
Peter Zijlstra606dba22012-02-11 06:05:00 +01001250 */
1251 struct task_struct * (*pick_next_task) (struct rq *rq,
Peter Zijlstrae7904a22015-08-01 19:25:08 +02001252 struct task_struct *prev,
1253 struct pin_cookie cookie);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001254 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1255
1256#ifdef CONFIG_SMP
Peter Zijlstraac66f542013-10-07 11:29:16 +01001257 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
xiaofeng.yan5a4fd032015-09-23 14:55:59 +08001258 void (*migrate_task_rq)(struct task_struct *p);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001259
Li Zefanc82ba9f2013-03-05 16:06:55 +08001260 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1261
1262 void (*set_cpus_allowed)(struct task_struct *p,
1263 const struct cpumask *newmask);
1264
1265 void (*rq_online)(struct rq *rq);
1266 void (*rq_offline)(struct rq *rq);
1267#endif
1268
1269 void (*set_curr_task) (struct rq *rq);
1270 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1271 void (*task_fork) (struct task_struct *p);
Dario Faggiolie6c390f2013-11-07 14:43:35 +01001272 void (*task_dead) (struct task_struct *p);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001273
Kirill Tkhai67dfa1b2014-10-27 17:40:52 +03001274 /*
1275 * The switched_from() call is allowed to drop rq->lock, therefore we
1276 * cannot assume the switched_from/switched_to pair is serliazed by
1277 * rq->lock. They are however serialized by p->pi_lock.
1278 */
Li Zefanc82ba9f2013-03-05 16:06:55 +08001279 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1280 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1281 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1282 int oldprio);
1283
1284 unsigned int (*get_rr_interval) (struct rq *rq,
1285 struct task_struct *task);
1286
Stanislaw Gruszka6e998912014-11-12 16:58:44 +01001287 void (*update_curr) (struct rq *rq);
1288
Vincent Guittotea86cb42016-06-17 13:38:55 +02001289#define TASK_SET_GROUP 0
1290#define TASK_MOVE_GROUP 1
1291
Li Zefanc82ba9f2013-03-05 16:06:55 +08001292#ifdef CONFIG_FAIR_GROUP_SCHED
Vincent Guittotea86cb42016-06-17 13:38:55 +02001293 void (*task_change_group) (struct task_struct *p, int type);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001294#endif
1295};
Peter Zijlstra029632f2011-10-25 10:00:11 +02001296
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +01001297static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1298{
1299 prev->sched_class->put_prev_task(rq, prev);
1300}
1301
Peter Zijlstrab2bf6c32016-09-20 22:00:38 +02001302static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1303{
1304 curr->sched_class->set_curr_task(rq);
1305}
1306
Peter Zijlstra029632f2011-10-25 10:00:11 +02001307#define sched_class_highest (&stop_sched_class)
1308#define for_each_class(class) \
1309 for (class = sched_class_highest; class; class = class->next)
1310
1311extern const struct sched_class stop_sched_class;
Dario Faggioliaab03e02013-11-28 11:14:43 +01001312extern const struct sched_class dl_sched_class;
Peter Zijlstra029632f2011-10-25 10:00:11 +02001313extern const struct sched_class rt_sched_class;
1314extern const struct sched_class fair_sched_class;
1315extern const struct sched_class idle_sched_class;
1316
1317
1318#ifdef CONFIG_SMP
1319
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04001320extern void update_group_capacity(struct sched_domain *sd, int cpu);
Li Zefanb7192032013-03-07 10:00:26 +08001321
Daniel Lezcano7caff662014-01-06 12:34:38 +01001322extern void trigger_load_balance(struct rq *rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001323
Peter Zijlstrac5b28032015-05-15 17:43:35 +02001324extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1325
Peter Zijlstra029632f2011-10-25 10:00:11 +02001326#endif
1327
Daniel Lezcano442bf3a2014-09-04 11:32:09 -04001328#ifdef CONFIG_CPU_IDLE
1329static inline void idle_set_state(struct rq *rq,
1330 struct cpuidle_state *idle_state)
1331{
1332 rq->idle_state = idle_state;
1333}
1334
1335static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1336{
Peter Zijlstra9148a3a2016-09-20 22:34:51 +02001337 SCHED_WARN_ON(!rcu_read_lock_held());
Daniel Lezcano442bf3a2014-09-04 11:32:09 -04001338 return rq->idle_state;
1339}
Morten Rasmussen06910642015-01-27 13:48:07 +00001340
1341static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
1342{
1343 rq->idle_state_idx = idle_state_idx;
1344}
1345
1346static inline int idle_get_state_idx(struct rq *rq)
1347{
1348 WARN_ON(!rcu_read_lock_held());
1349 return rq->idle_state_idx;
1350}
Daniel Lezcano442bf3a2014-09-04 11:32:09 -04001351#else
1352static inline void idle_set_state(struct rq *rq,
1353 struct cpuidle_state *idle_state)
1354{
1355}
1356
1357static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1358{
1359 return NULL;
1360}
Morten Rasmussen06910642015-01-27 13:48:07 +00001361
1362static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
1363{
1364}
1365
1366static inline int idle_get_state_idx(struct rq *rq)
1367{
1368 return -1;
1369}
Daniel Lezcano442bf3a2014-09-04 11:32:09 -04001370#endif
1371
Peter Zijlstra029632f2011-10-25 10:00:11 +02001372extern void sysrq_sched_debug_show(void);
1373extern void sched_init_granularity(void);
1374extern void update_max_interval(void);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001375
1376extern void init_sched_dl_class(void);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001377extern void init_sched_rt_class(void);
1378extern void init_sched_fair_class(void);
1379
Kirill Tkhai88751252014-06-29 00:03:57 +04001380extern void resched_curr(struct rq *rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001381extern void resched_cpu(int cpu);
1382
1383extern struct rt_bandwidth def_rt_bandwidth;
1384extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1385
Dario Faggioli332ac172013-11-07 14:43:45 +01001386extern struct dl_bandwidth def_dl_bandwidth;
1387extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001388extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1389
Dario Faggioli332ac172013-11-07 14:43:45 +01001390unsigned long to_ratio(u64 period, u64 runtime);
1391
Yuyang Du540247f2015-07-15 08:04:39 +08001392extern void init_entity_runnable_average(struct sched_entity *se);
Yuyang Du2b8c41d2016-03-30 04:30:56 +08001393extern void post_init_entity_util_avg(struct sched_entity *se);
Alex Shia75cdaa2013-06-20 10:18:47 +08001394
Frederic Weisbecker76d92ac2015-07-17 22:25:49 +02001395#ifdef CONFIG_NO_HZ_FULL
1396extern bool sched_can_stop_tick(struct rq *rq);
1397
1398/*
1399 * Tick may be needed by tasks in the runqueue depending on their policy and
1400 * requirements. If tick is needed, lets send the target an IPI to kick it out of
1401 * nohz mode if necessary.
1402 */
1403static inline void sched_update_tick_dependency(struct rq *rq)
1404{
1405 int cpu;
1406
1407 if (!tick_nohz_full_enabled())
1408 return;
1409
1410 cpu = cpu_of(rq);
1411
1412 if (!tick_nohz_full_cpu(cpu))
1413 return;
1414
1415 if (sched_can_stop_tick(rq))
1416 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1417 else
1418 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1419}
1420#else
1421static inline void sched_update_tick_dependency(struct rq *rq) { }
1422#endif
1423
Dietmar Eggemannbbb138b2015-09-26 18:19:54 +01001424extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
1425
Kirill Tkhai72465442014-05-09 03:00:14 +04001426static inline void add_nr_running(struct rq *rq, unsigned count)
Peter Zijlstra029632f2011-10-25 10:00:11 +02001427{
Kirill Tkhai72465442014-05-09 03:00:14 +04001428 unsigned prev_nr = rq->nr_running;
1429
1430 rq->nr_running = prev_nr + count;
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001431
Kirill Tkhai72465442014-05-09 03:00:14 +04001432 if (prev_nr < 2 && rq->nr_running >= 2) {
Tim Chen4486edd2014-06-23 12:16:49 -07001433#ifdef CONFIG_SMP
1434 if (!rq->rd->overload)
1435 rq->rd->overload = true;
1436#endif
Tim Chen4486edd2014-06-23 12:16:49 -07001437 }
Frederic Weisbecker76d92ac2015-07-17 22:25:49 +02001438
1439 sched_update_tick_dependency(rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001440}
1441
Kirill Tkhai72465442014-05-09 03:00:14 +04001442static inline void sub_nr_running(struct rq *rq, unsigned count)
Peter Zijlstra029632f2011-10-25 10:00:11 +02001443{
Kirill Tkhai72465442014-05-09 03:00:14 +04001444 rq->nr_running -= count;
Frederic Weisbecker76d92ac2015-07-17 22:25:49 +02001445 /* Check if we still need preemption */
1446 sched_update_tick_dependency(rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001447}
1448
Frederic Weisbecker265f22a2013-05-03 03:39:05 +02001449static inline void rq_last_tick_reset(struct rq *rq)
1450{
1451#ifdef CONFIG_NO_HZ_FULL
1452 rq->last_sched_tick = jiffies;
1453#endif
1454}
1455
Peter Zijlstra029632f2011-10-25 10:00:11 +02001456extern void update_rq_clock(struct rq *rq);
1457
1458extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1459extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1460
1461extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1462
1463extern const_debug unsigned int sysctl_sched_time_avg;
1464extern const_debug unsigned int sysctl_sched_nr_migrate;
1465extern const_debug unsigned int sysctl_sched_migration_cost;
1466
1467static inline u64 sched_avg_period(void)
1468{
1469 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1470}
1471
Peter Zijlstra029632f2011-10-25 10:00:11 +02001472#ifdef CONFIG_SCHED_HRTICK
1473
1474/*
1475 * Use hrtick when:
1476 * - enabled by features
1477 * - hrtimer is actually high res
1478 */
1479static inline int hrtick_enabled(struct rq *rq)
1480{
1481 if (!sched_feat(HRTICK))
1482 return 0;
1483 if (!cpu_active(cpu_of(rq)))
1484 return 0;
1485 return hrtimer_is_hres_active(&rq->hrtick_timer);
1486}
1487
1488void hrtick_start(struct rq *rq, u64 delay);
1489
Mike Galbraithb39e66e2011-11-22 15:20:07 +01001490#else
1491
1492static inline int hrtick_enabled(struct rq *rq)
1493{
1494 return 0;
1495}
1496
Peter Zijlstra029632f2011-10-25 10:00:11 +02001497#endif /* CONFIG_SCHED_HRTICK */
1498
1499#ifdef CONFIG_SMP
1500extern void sched_avg_update(struct rq *rq);
Peter Zijlstradfbca412015-03-23 14:19:05 +01001501
1502#ifndef arch_scale_freq_capacity
1503static __always_inline
1504unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1505{
1506 return SCHED_CAPACITY_SCALE;
1507}
1508#endif
Vincent Guittotb5b48602015-02-27 16:54:08 +01001509
Morten Rasmussen8cd56012015-08-14 17:23:10 +01001510#ifndef arch_scale_cpu_capacity
1511static __always_inline
1512unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1513{
Dietmar Eggemanne3279a22015-08-15 00:04:41 +01001514 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
Morten Rasmussen8cd56012015-08-14 17:23:10 +01001515 return sd->smt_gain / sd->span_weight;
1516
1517 return SCHED_CAPACITY_SCALE;
1518}
1519#endif
1520
Peter Zijlstra029632f2011-10-25 10:00:11 +02001521static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1522{
Vincent Guittotb5b48602015-02-27 16:54:08 +01001523 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
Peter Zijlstra029632f2011-10-25 10:00:11 +02001524 sched_avg_update(rq);
1525}
1526#else
1527static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1528static inline void sched_avg_update(struct rq *rq) { }
1529#endif
1530
Peter Zijlstraeb580752015-07-31 21:28:18 +02001531struct rq_flags {
1532 unsigned long flags;
Peter Zijlstrae7904a22015-08-01 19:25:08 +02001533 struct pin_cookie cookie;
Peter Zijlstraeb580752015-07-31 21:28:18 +02001534};
1535
1536struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
Peter Zijlstra3e71a462016-04-28 16:16:33 +02001537 __acquires(rq->lock);
Peter Zijlstraeb580752015-07-31 21:28:18 +02001538struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001539 __acquires(p->pi_lock)
Peter Zijlstra3e71a462016-04-28 16:16:33 +02001540 __acquires(rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001541
Peter Zijlstraeb580752015-07-31 21:28:18 +02001542static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001543 __releases(rq->lock)
1544{
Peter Zijlstrae7904a22015-08-01 19:25:08 +02001545 lockdep_unpin_lock(&rq->lock, rf->cookie);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001546 raw_spin_unlock(&rq->lock);
1547}
1548
1549static inline void
Peter Zijlstraeb580752015-07-31 21:28:18 +02001550task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001551 __releases(rq->lock)
1552 __releases(p->pi_lock)
1553{
Peter Zijlstrae7904a22015-08-01 19:25:08 +02001554 lockdep_unpin_lock(&rq->lock, rf->cookie);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001555 raw_spin_unlock(&rq->lock);
Peter Zijlstraeb580752015-07-31 21:28:18 +02001556 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001557}
1558
Peter Zijlstra029632f2011-10-25 10:00:11 +02001559#ifdef CONFIG_SMP
1560#ifdef CONFIG_PREEMPT
1561
1562static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1563
1564/*
1565 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1566 * way at the expense of forcing extra atomic operations in all
1567 * invocations. This assures that the double_lock is acquired using the
1568 * same underlying policy as the spinlock_t on this architecture, which
1569 * reduces latency compared to the unfair variant below. However, it
1570 * also adds more overhead and therefore may reduce throughput.
1571 */
1572static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1573 __releases(this_rq->lock)
1574 __acquires(busiest->lock)
1575 __acquires(this_rq->lock)
1576{
1577 raw_spin_unlock(&this_rq->lock);
1578 double_rq_lock(this_rq, busiest);
1579
1580 return 1;
1581}
1582
1583#else
1584/*
1585 * Unfair double_lock_balance: Optimizes throughput at the expense of
1586 * latency by eliminating extra atomic operations when the locks are
1587 * already in proper order on entry. This favors lower cpu-ids and will
1588 * grant the double lock to lower cpus over higher ids under contention,
1589 * regardless of entry order into the function.
1590 */
1591static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1592 __releases(this_rq->lock)
1593 __acquires(busiest->lock)
1594 __acquires(this_rq->lock)
1595{
1596 int ret = 0;
1597
1598 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1599 if (busiest < this_rq) {
1600 raw_spin_unlock(&this_rq->lock);
1601 raw_spin_lock(&busiest->lock);
1602 raw_spin_lock_nested(&this_rq->lock,
1603 SINGLE_DEPTH_NESTING);
1604 ret = 1;
1605 } else
1606 raw_spin_lock_nested(&busiest->lock,
1607 SINGLE_DEPTH_NESTING);
1608 }
1609 return ret;
1610}
1611
1612#endif /* CONFIG_PREEMPT */
1613
1614/*
1615 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1616 */
1617static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1618{
1619 if (unlikely(!irqs_disabled())) {
1620 /* printk() doesn't work good under rq->lock */
1621 raw_spin_unlock(&this_rq->lock);
1622 BUG_ON(1);
1623 }
1624
1625 return _double_lock_balance(this_rq, busiest);
1626}
1627
1628static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1629 __releases(busiest->lock)
1630{
1631 raw_spin_unlock(&busiest->lock);
1632 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1633}
1634
Peter Zijlstra74602312013-10-10 20:17:22 +02001635static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1636{
1637 if (l1 > l2)
1638 swap(l1, l2);
1639
1640 spin_lock(l1);
1641 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1642}
1643
Mike Galbraith60e69ee2014-04-07 10:55:15 +02001644static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1645{
1646 if (l1 > l2)
1647 swap(l1, l2);
1648
1649 spin_lock_irq(l1);
1650 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1651}
1652
Peter Zijlstra74602312013-10-10 20:17:22 +02001653static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1654{
1655 if (l1 > l2)
1656 swap(l1, l2);
1657
1658 raw_spin_lock(l1);
1659 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1660}
1661
Peter Zijlstra029632f2011-10-25 10:00:11 +02001662/*
1663 * double_rq_lock - safely lock two runqueues
1664 *
1665 * Note this does not disable interrupts like task_rq_lock,
1666 * you need to do so manually before calling.
1667 */
1668static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1669 __acquires(rq1->lock)
1670 __acquires(rq2->lock)
1671{
1672 BUG_ON(!irqs_disabled());
1673 if (rq1 == rq2) {
1674 raw_spin_lock(&rq1->lock);
1675 __acquire(rq2->lock); /* Fake it out ;) */
1676 } else {
1677 if (rq1 < rq2) {
1678 raw_spin_lock(&rq1->lock);
1679 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1680 } else {
1681 raw_spin_lock(&rq2->lock);
1682 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1683 }
1684 }
1685}
1686
1687/*
1688 * double_rq_unlock - safely unlock two runqueues
1689 *
1690 * Note this does not restore interrupts like task_rq_unlock,
1691 * you need to do so manually after calling.
1692 */
1693static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1694 __releases(rq1->lock)
1695 __releases(rq2->lock)
1696{
1697 raw_spin_unlock(&rq1->lock);
1698 if (rq1 != rq2)
1699 raw_spin_unlock(&rq2->lock);
1700 else
1701 __release(rq2->lock);
1702}
1703
1704#else /* CONFIG_SMP */
1705
1706/*
1707 * double_rq_lock - safely lock two runqueues
1708 *
1709 * Note this does not disable interrupts like task_rq_lock,
1710 * you need to do so manually before calling.
1711 */
1712static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1713 __acquires(rq1->lock)
1714 __acquires(rq2->lock)
1715{
1716 BUG_ON(!irqs_disabled());
1717 BUG_ON(rq1 != rq2);
1718 raw_spin_lock(&rq1->lock);
1719 __acquire(rq2->lock); /* Fake it out ;) */
1720}
1721
1722/*
1723 * double_rq_unlock - safely unlock two runqueues
1724 *
1725 * Note this does not restore interrupts like task_rq_unlock,
1726 * you need to do so manually after calling.
1727 */
1728static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1729 __releases(rq1->lock)
1730 __releases(rq2->lock)
1731{
1732 BUG_ON(rq1 != rq2);
1733 raw_spin_unlock(&rq1->lock);
1734 __release(rq2->lock);
1735}
1736
1737#endif
1738
1739extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1740extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
Srikar Dronamraju6b55c962015-06-25 22:51:41 +05301741
1742#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02001743extern void print_cfs_stats(struct seq_file *m, int cpu);
1744extern void print_rt_stats(struct seq_file *m, int cpu);
Wanpeng Liacb32132014-10-31 06:39:33 +08001745extern void print_dl_stats(struct seq_file *m, int cpu);
Srikar Dronamraju6b55c962015-06-25 22:51:41 +05301746extern void
1747print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
Srikar Dronamraju397f2372015-06-25 22:51:43 +05301748
1749#ifdef CONFIG_NUMA_BALANCING
1750extern void
1751show_numa_stats(struct task_struct *p, struct seq_file *m);
1752extern void
1753print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1754 unsigned long tpf, unsigned long gsf, unsigned long gpf);
1755#endif /* CONFIG_NUMA_BALANCING */
1756#endif /* CONFIG_SCHED_DEBUG */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001757
1758extern void init_cfs_rq(struct cfs_rq *cfs_rq);
Abel Vesa07c54f72015-03-03 13:50:27 +02001759extern void init_rt_rq(struct rt_rq *rt_rq);
1760extern void init_dl_rq(struct dl_rq *dl_rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001761
Ben Segall1ee14e62013-10-16 11:16:12 -07001762extern void cfs_bandwidth_usage_inc(void);
1763extern void cfs_bandwidth_usage_dec(void);
Suresh Siddha1c792db2011-12-01 17:07:32 -08001764
Frederic Weisbecker3451d022011-08-10 23:21:01 +02001765#ifdef CONFIG_NO_HZ_COMMON
Suresh Siddha1c792db2011-12-01 17:07:32 -08001766enum rq_nohz_flag_bits {
1767 NOHZ_TICK_STOPPED,
1768 NOHZ_BALANCE_KICK,
1769};
1770
1771#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
Thomas Gleixner20a5c8c2016-03-10 12:54:20 +01001772
1773extern void nohz_balance_exit_idle(unsigned int cpu);
1774#else
1775static inline void nohz_balance_exit_idle(unsigned int cpu) { }
Suresh Siddha1c792db2011-12-01 17:07:32 -08001776#endif
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001777
1778#ifdef CONFIG_IRQ_TIME_ACCOUNTING
Frederic Weisbecker19d23dbf2016-09-26 02:29:20 +02001779struct irqtime {
1780 u64 hardirq_time;
1781 u64 softirq_time;
1782 u64 irq_start_time;
1783 struct u64_stats_sync sync;
1784};
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001785
Frederic Weisbecker19d23dbf2016-09-26 02:29:20 +02001786DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001787
1788static inline u64 irq_time_read(int cpu)
1789{
Frederic Weisbecker19d23dbf2016-09-26 02:29:20 +02001790 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
1791 unsigned int seq;
1792 u64 total;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001793
1794 do {
Frederic Weisbecker19d23dbf2016-09-26 02:29:20 +02001795 seq = __u64_stats_fetch_begin(&irqtime->sync);
1796 total = irqtime->softirq_time + irqtime->hardirq_time;
1797 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001798
Frederic Weisbecker19d23dbf2016-09-26 02:29:20 +02001799 return total;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001800}
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001801#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001802
1803#ifdef CONFIG_CPU_FREQ
1804DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
1805
1806/**
1807 * cpufreq_update_util - Take a note about CPU utilization changes.
Rafael J. Wysocki12bde332016-08-10 03:11:17 +02001808 * @rq: Runqueue to carry out the update for.
Rafael J. Wysocki58919e82016-08-16 22:14:55 +02001809 * @flags: Update reason flags.
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001810 *
Rafael J. Wysocki58919e82016-08-16 22:14:55 +02001811 * This function is called by the scheduler on the CPU whose utilization is
1812 * being updated.
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001813 *
1814 * It can only be called from RCU-sched read-side critical sections.
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001815 *
1816 * The way cpufreq is currently arranged requires it to evaluate the CPU
1817 * performance state (frequency/voltage) on a regular basis to prevent it from
1818 * being stuck in a completely inadequate performance level for too long.
1819 * That is not guaranteed to happen if the updates are only triggered from CFS,
1820 * though, because they may not be coming in if RT or deadline tasks are active
1821 * all the time (or there are RT and DL tasks only).
1822 *
1823 * As a workaround for that issue, this function is called by the RT and DL
1824 * sched classes to trigger extra cpufreq updates to prevent it from stalling,
1825 * but that really is a band-aid. Going forward it should be replaced with
1826 * solutions targeted more specifically at RT and DL tasks.
1827 */
Rafael J. Wysocki12bde332016-08-10 03:11:17 +02001828static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001829{
Rafael J. Wysocki58919e82016-08-16 22:14:55 +02001830 struct update_util_data *data;
1831
1832 data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
1833 if (data)
Rafael J. Wysocki12bde332016-08-10 03:11:17 +02001834 data->func(data, rq_clock(rq), flags);
1835}
1836
1837static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
1838{
1839 if (cpu_of(rq) == smp_processor_id())
1840 cpufreq_update_util(rq, flags);
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001841}
1842#else
Rafael J. Wysocki12bde332016-08-10 03:11:17 +02001843static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
1844static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001845#endif /* CONFIG_CPU_FREQ */
Linus Torvaldsbe53f582016-03-24 09:42:50 -07001846
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +02001847#ifdef arch_scale_freq_capacity
1848#ifndef arch_scale_freq_invariant
1849#define arch_scale_freq_invariant() (true)
1850#endif
1851#else /* arch_scale_freq_capacity */
1852#define arch_scale_freq_invariant() (false)
1853#endif