blob: a5a6b3e6086886b589bb8f68ca7aadb6b6cfd892 [file] [log] [blame]
Peter Zijlstra029632f2011-10-25 10:00:11 +02001
2#include <linux/sched.h>
Clark Williamscf4aebc22013-02-07 09:46:59 -06003#include <linux/sched/sysctl.h>
Clark Williams8bd75c72013-02-07 09:47:07 -06004#include <linux/sched/rt.h>
Dario Faggioliaab03e02013-11-28 11:14:43 +01005#include <linux/sched/deadline.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +02006#include <linux/mutex.h>
7#include <linux/spinlock.h>
8#include <linux/stop_machine.h>
Steven Rostedtb6366f02015-03-18 14:49:46 -04009#include <linux/irq_work.h>
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +020010#include <linux/tick.h>
Mel Gormanf809ca92013-10-07 11:28:57 +010011#include <linux/slab.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020012
Peter Zijlstra391e43d2011-11-15 17:14:39 +010013#include "cpupri.h"
Juri Lelli6bfd6d72013-11-07 14:43:47 +010014#include "cpudeadline.h"
Li Zefan60fed782013-03-29 14:36:43 +080015#include "cpuacct.h"
Peter Zijlstra029632f2011-10-25 10:00:11 +020016
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040017struct rq;
Daniel Lezcano442bf3a2014-09-04 11:32:09 -040018struct cpuidle_state;
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040019
Kirill Tkhaida0c1e62014-08-20 13:47:32 +040020/* task_struct::on_rq states: */
21#define TASK_ON_RQ_QUEUED 1
Kirill Tkhaicca26e82014-08-20 13:47:42 +040022#define TASK_ON_RQ_MIGRATING 2
Kirill Tkhaida0c1e62014-08-20 13:47:32 +040023
Peter Zijlstra029632f2011-10-25 10:00:11 +020024extern __read_mostly int scheduler_running;
25
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040026extern unsigned long calc_load_update;
27extern atomic_long_t calc_load_tasks;
28
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020029extern void calc_global_load_tick(struct rq *this_rq);
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040030extern long calc_load_fold_active(struct rq *this_rq);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020031
32#ifdef CONFIG_SMP
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040033extern void update_cpu_load_active(struct rq *this_rq);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020034#else
35static inline void update_cpu_load_active(struct rq *this_rq) { }
36#endif
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040037
Peter Zijlstra029632f2011-10-25 10:00:11 +020038/*
Peter Zijlstra029632f2011-10-25 10:00:11 +020039 * Helpers for converting nanosecond timing to jiffy resolution
40 */
41#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
42
Li Zefancc1f4b12013-03-05 16:06:09 +080043/*
44 * Increase resolution of nice-level calculations for 64-bit architectures.
45 * The extra resolution improves shares distribution and load balancing of
46 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
47 * hierarchies, especially on larger systems. This is not a user-visible change
48 * and does not change the user-interface for setting shares/weights.
49 *
50 * We increase resolution only if we have enough bits to allow this increased
51 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
52 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
53 * increased costs.
54 */
55#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
56# define SCHED_LOAD_RESOLUTION 10
57# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
58# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
59#else
60# define SCHED_LOAD_RESOLUTION 0
61# define scale_load(w) (w)
62# define scale_load_down(w) (w)
63#endif
64
65#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
66#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
67
Peter Zijlstra029632f2011-10-25 10:00:11 +020068#define NICE_0_LOAD SCHED_LOAD_SCALE
69#define NICE_0_SHIFT SCHED_LOAD_SHIFT
70
71/*
Dario Faggioli332ac172013-11-07 14:43:45 +010072 * Single value that decides SCHED_DEADLINE internal math precision.
73 * 10 -> just above 1us
74 * 9 -> just above 0.5us
75 */
76#define DL_SCALE (10)
77
78/*
Peter Zijlstra029632f2011-10-25 10:00:11 +020079 * These are the 'tuning knobs' of the scheduler:
Peter Zijlstra029632f2011-10-25 10:00:11 +020080 */
Peter Zijlstra029632f2011-10-25 10:00:11 +020081
82/*
83 * single value that denotes runtime == period, ie unlimited time.
84 */
85#define RUNTIME_INF ((u64)~0ULL)
86
Henrik Austad20f9cd22015-09-09 17:00:41 +020087static inline int idle_policy(int policy)
88{
89 return policy == SCHED_IDLE;
90}
Dario Faggiolid50dde52013-11-07 14:43:36 +010091static inline int fair_policy(int policy)
92{
93 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
94}
95
Peter Zijlstra029632f2011-10-25 10:00:11 +020096static inline int rt_policy(int policy)
97{
Dario Faggiolid50dde52013-11-07 14:43:36 +010098 return policy == SCHED_FIFO || policy == SCHED_RR;
Peter Zijlstra029632f2011-10-25 10:00:11 +020099}
100
Dario Faggioliaab03e02013-11-28 11:14:43 +0100101static inline int dl_policy(int policy)
102{
103 return policy == SCHED_DEADLINE;
104}
Henrik Austad20f9cd22015-09-09 17:00:41 +0200105static inline bool valid_policy(int policy)
106{
107 return idle_policy(policy) || fair_policy(policy) ||
108 rt_policy(policy) || dl_policy(policy);
109}
Dario Faggioliaab03e02013-11-28 11:14:43 +0100110
Peter Zijlstra029632f2011-10-25 10:00:11 +0200111static inline int task_has_rt_policy(struct task_struct *p)
112{
113 return rt_policy(p->policy);
114}
115
Dario Faggioliaab03e02013-11-28 11:14:43 +0100116static inline int task_has_dl_policy(struct task_struct *p)
117{
118 return dl_policy(p->policy);
119}
120
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100121/*
122 * Tells if entity @a should preempt entity @b.
123 */
Dario Faggioli332ac172013-11-07 14:43:45 +0100124static inline bool
125dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100126{
127 return dl_time_before(a->deadline, b->deadline);
128}
129
Peter Zijlstra029632f2011-10-25 10:00:11 +0200130/*
131 * This is the priority-queue data structure of the RT scheduling class:
132 */
133struct rt_prio_array {
134 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
135 struct list_head queue[MAX_RT_PRIO];
136};
137
138struct rt_bandwidth {
139 /* nests inside the rq lock: */
140 raw_spinlock_t rt_runtime_lock;
141 ktime_t rt_period;
142 u64 rt_runtime;
143 struct hrtimer rt_period_timer;
Peter Zijlstra4cfafd32015-05-14 12:23:11 +0200144 unsigned int rt_period_active;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200145};
Juri Lellia5e7be32014-09-19 10:22:39 +0100146
147void __dl_clear_params(struct task_struct *p);
148
Dario Faggioli332ac172013-11-07 14:43:45 +0100149/*
150 * To keep the bandwidth of -deadline tasks and groups under control
151 * we need some place where:
152 * - store the maximum -deadline bandwidth of the system (the group);
153 * - cache the fraction of that bandwidth that is currently allocated.
154 *
155 * This is all done in the data structure below. It is similar to the
156 * one used for RT-throttling (rt_bandwidth), with the main difference
157 * that, since here we are only interested in admission control, we
158 * do not decrease any runtime while the group "executes", neither we
159 * need a timer to replenish it.
160 *
161 * With respect to SMP, the bandwidth is given on a per-CPU basis,
162 * meaning that:
163 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
164 * - dl_total_bw array contains, in the i-eth element, the currently
165 * allocated bandwidth on the i-eth CPU.
166 * Moreover, groups consume bandwidth on each CPU, while tasks only
167 * consume bandwidth on the CPU they're running on.
168 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
169 * that will be shown the next time the proc or cgroup controls will
170 * be red. It on its turn can be changed by writing on its own
171 * control.
172 */
173struct dl_bandwidth {
174 raw_spinlock_t dl_runtime_lock;
175 u64 dl_runtime;
176 u64 dl_period;
177};
178
179static inline int dl_bandwidth_enabled(void)
180{
Peter Zijlstra17248132013-12-17 12:44:49 +0100181 return sysctl_sched_rt_runtime >= 0;
Dario Faggioli332ac172013-11-07 14:43:45 +0100182}
183
184extern struct dl_bw *dl_bw_of(int i);
185
186struct dl_bw {
187 raw_spinlock_t lock;
188 u64 bw, total_bw;
189};
190
Juri Lelli7f514122014-09-19 10:22:40 +0100191static inline
192void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
193{
194 dl_b->total_bw -= tsk_bw;
195}
196
197static inline
198void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
199{
200 dl_b->total_bw += tsk_bw;
201}
202
203static inline
204bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
205{
206 return dl_b->bw != -1 &&
207 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
208}
209
Peter Zijlstra029632f2011-10-25 10:00:11 +0200210extern struct mutex sched_domains_mutex;
211
212#ifdef CONFIG_CGROUP_SCHED
213
214#include <linux/cgroup.h>
215
216struct cfs_rq;
217struct rt_rq;
218
Mike Galbraith35cf4e52012-08-07 05:00:13 +0200219extern struct list_head task_groups;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200220
221struct cfs_bandwidth {
222#ifdef CONFIG_CFS_BANDWIDTH
223 raw_spinlock_t lock;
224 ktime_t period;
225 u64 quota, runtime;
Zhihui Zhang9c58c792014-09-20 21:24:36 -0400226 s64 hierarchical_quota;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200227 u64 runtime_expires;
228
Peter Zijlstra4cfafd32015-05-14 12:23:11 +0200229 int idle, period_active;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200230 struct hrtimer period_timer, slack_timer;
231 struct list_head throttled_cfs_rq;
232
233 /* statistics */
234 int nr_periods, nr_throttled;
235 u64 throttled_time;
236#endif
237};
238
239/* task group related information */
240struct task_group {
241 struct cgroup_subsys_state css;
242
243#ifdef CONFIG_FAIR_GROUP_SCHED
244 /* schedulable entities of this group on each cpu */
245 struct sched_entity **se;
246 /* runqueue "owned" by this group on each cpu */
247 struct cfs_rq **cfs_rq;
248 unsigned long shares;
249
Alex Shifa6bdde2013-06-20 10:18:46 +0800250#ifdef CONFIG_SMP
Waiman Longb0367622015-12-02 13:41:49 -0500251 /*
252 * load_avg can be heavily contended at clock tick time, so put
253 * it in its own cacheline separated from the fields above which
254 * will also be accessed at each tick.
255 */
256 atomic_long_t load_avg ____cacheline_aligned;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200257#endif
Alex Shifa6bdde2013-06-20 10:18:46 +0800258#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200259
260#ifdef CONFIG_RT_GROUP_SCHED
261 struct sched_rt_entity **rt_se;
262 struct rt_rq **rt_rq;
263
264 struct rt_bandwidth rt_bandwidth;
265#endif
266
267 struct rcu_head rcu;
268 struct list_head list;
269
270 struct task_group *parent;
271 struct list_head siblings;
272 struct list_head children;
273
274#ifdef CONFIG_SCHED_AUTOGROUP
275 struct autogroup *autogroup;
276#endif
277
278 struct cfs_bandwidth cfs_bandwidth;
279};
280
281#ifdef CONFIG_FAIR_GROUP_SCHED
282#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
283
284/*
285 * A weight of 0 or 1 can cause arithmetics problems.
286 * A weight of a cfs_rq is the sum of weights of which entities
287 * are queued on this cfs_rq, so a weight of a entity should not be
288 * too large, so as the shares value of a task group.
289 * (The default weight is 1024 - so there's no practical
290 * limitation from this.)
291 */
292#define MIN_SHARES (1UL << 1)
293#define MAX_SHARES (1UL << 18)
294#endif
295
Peter Zijlstra029632f2011-10-25 10:00:11 +0200296typedef int (*tg_visitor)(struct task_group *, void *);
297
298extern int walk_tg_tree_from(struct task_group *from,
299 tg_visitor down, tg_visitor up, void *data);
300
301/*
302 * Iterate the full tree, calling @down when first entering a node and @up when
303 * leaving it for the final time.
304 *
305 * Caller must hold rcu_lock or sufficient equivalent.
306 */
307static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
308{
309 return walk_tg_tree_from(&root_task_group, down, up, data);
310}
311
312extern int tg_nop(struct task_group *tg, void *data);
313
314extern void free_fair_sched_group(struct task_group *tg);
315extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
316extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
317extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
318 struct sched_entity *se, int cpu,
319 struct sched_entity *parent);
320extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
321extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
322
323extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +0200324extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200325extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
326
327extern void free_rt_sched_group(struct task_group *tg);
328extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
329extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
330 struct sched_rt_entity *rt_se, int cpu,
331 struct sched_rt_entity *parent);
332
Li Zefan25cc7da2013-03-05 16:07:33 +0800333extern struct task_group *sched_create_group(struct task_group *parent);
334extern void sched_online_group(struct task_group *tg,
335 struct task_group *parent);
336extern void sched_destroy_group(struct task_group *tg);
337extern void sched_offline_group(struct task_group *tg);
338
339extern void sched_move_task(struct task_struct *tsk);
340
341#ifdef CONFIG_FAIR_GROUP_SCHED
342extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
Byungchul Parkad936d82015-10-24 01:16:19 +0900343
344#ifdef CONFIG_SMP
345extern void set_task_rq_fair(struct sched_entity *se,
346 struct cfs_rq *prev, struct cfs_rq *next);
347#else /* !CONFIG_SMP */
348static inline void set_task_rq_fair(struct sched_entity *se,
349 struct cfs_rq *prev, struct cfs_rq *next) { }
350#endif /* CONFIG_SMP */
351#endif /* CONFIG_FAIR_GROUP_SCHED */
Li Zefan25cc7da2013-03-05 16:07:33 +0800352
Peter Zijlstra029632f2011-10-25 10:00:11 +0200353#else /* CONFIG_CGROUP_SCHED */
354
355struct cfs_bandwidth { };
356
357#endif /* CONFIG_CGROUP_SCHED */
358
359/* CFS-related fields in a runqueue */
360struct cfs_rq {
361 struct load_weight load;
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200362 unsigned int nr_running, h_nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200363
364 u64 exec_clock;
365 u64 min_vruntime;
366#ifndef CONFIG_64BIT
367 u64 min_vruntime_copy;
368#endif
369
370 struct rb_root tasks_timeline;
371 struct rb_node *rb_leftmost;
372
Peter Zijlstra029632f2011-10-25 10:00:11 +0200373 /*
374 * 'curr' points to currently running entity on this cfs_rq.
375 * It is set to NULL otherwise (i.e when none are currently running).
376 */
377 struct sched_entity *curr, *next, *last, *skip;
378
379#ifdef CONFIG_SCHED_DEBUG
380 unsigned int nr_spread_over;
381#endif
382
Paul Turner2dac7542012-10-04 13:18:30 +0200383#ifdef CONFIG_SMP
384 /*
Yuyang Du9d89c252015-07-15 08:04:37 +0800385 * CFS load tracking
Paul Turner2dac7542012-10-04 13:18:30 +0200386 */
Yuyang Du9d89c252015-07-15 08:04:37 +0800387 struct sched_avg avg;
Yuyang Du13962232015-07-15 08:04:41 +0800388 u64 runnable_load_sum;
389 unsigned long runnable_load_avg;
Yuyang Du9d89c252015-07-15 08:04:37 +0800390#ifdef CONFIG_FAIR_GROUP_SCHED
391 unsigned long tg_load_avg_contrib;
392#endif
393 atomic_long_t removed_load_avg, removed_util_avg;
394#ifndef CONFIG_64BIT
395 u64 load_last_update_time_copy;
396#endif
Alex Shi141965c2013-06-26 13:05:39 +0800397
Paul Turnerc566e8e2012-10-04 13:18:30 +0200398#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Turner82958362012-10-04 13:18:31 +0200399 /*
400 * h_load = weight * f(tg)
401 *
402 * Where f(tg) is the recursive weight fraction assigned to
403 * this group.
404 */
405 unsigned long h_load;
Vladimir Davydov68520792013-07-15 17:49:19 +0400406 u64 last_h_load_update;
407 struct sched_entity *h_load_next;
408#endif /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner82958362012-10-04 13:18:31 +0200409#endif /* CONFIG_SMP */
410
Peter Zijlstra029632f2011-10-25 10:00:11 +0200411#ifdef CONFIG_FAIR_GROUP_SCHED
412 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
413
414 /*
415 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
416 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
417 * (like users, containers etc.)
418 *
419 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
420 * list is used during load balance.
421 */
422 int on_list;
423 struct list_head leaf_cfs_rq_list;
424 struct task_group *tg; /* group that "owns" this runqueue */
425
Peter Zijlstra029632f2011-10-25 10:00:11 +0200426#ifdef CONFIG_CFS_BANDWIDTH
427 int runtime_enabled;
428 u64 runtime_expires;
429 s64 runtime_remaining;
430
Paul Turnerf1b17282012-10-04 13:18:31 +0200431 u64 throttled_clock, throttled_clock_task;
432 u64 throttled_clock_task_time;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200433 int throttled, throttle_count;
434 struct list_head throttled_list;
435#endif /* CONFIG_CFS_BANDWIDTH */
436#endif /* CONFIG_FAIR_GROUP_SCHED */
437};
438
439static inline int rt_bandwidth_enabled(void)
440{
441 return sysctl_sched_rt_runtime >= 0;
442}
443
Steven Rostedtb6366f02015-03-18 14:49:46 -0400444/* RT IPI pull logic requires IRQ_WORK */
445#ifdef CONFIG_IRQ_WORK
446# define HAVE_RT_PUSH_IPI
447#endif
448
Peter Zijlstra029632f2011-10-25 10:00:11 +0200449/* Real-Time classes' related field in a runqueue: */
450struct rt_rq {
451 struct rt_prio_array active;
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200452 unsigned int rt_nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200453#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
454 struct {
455 int curr; /* highest queued rt task prio */
456#ifdef CONFIG_SMP
457 int next; /* next highest */
458#endif
459 } highest_prio;
460#endif
461#ifdef CONFIG_SMP
462 unsigned long rt_nr_migratory;
463 unsigned long rt_nr_total;
464 int overloaded;
465 struct plist_head pushable_tasks;
Steven Rostedtb6366f02015-03-18 14:49:46 -0400466#ifdef HAVE_RT_PUSH_IPI
467 int push_flags;
468 int push_cpu;
469 struct irq_work push_work;
470 raw_spinlock_t push_lock;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200471#endif
Steven Rostedtb6366f02015-03-18 14:49:46 -0400472#endif /* CONFIG_SMP */
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +0400473 int rt_queued;
474
Peter Zijlstra029632f2011-10-25 10:00:11 +0200475 int rt_throttled;
476 u64 rt_time;
477 u64 rt_runtime;
478 /* Nests inside the rq lock: */
479 raw_spinlock_t rt_runtime_lock;
480
481#ifdef CONFIG_RT_GROUP_SCHED
482 unsigned long rt_nr_boosted;
483
484 struct rq *rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200485 struct task_group *tg;
486#endif
487};
488
Dario Faggioliaab03e02013-11-28 11:14:43 +0100489/* Deadline class' related fields in a runqueue */
490struct dl_rq {
491 /* runqueue is an rbtree, ordered by deadline */
492 struct rb_root rb_root;
493 struct rb_node *rb_leftmost;
494
495 unsigned long dl_nr_running;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100496
497#ifdef CONFIG_SMP
498 /*
499 * Deadline values of the currently executing and the
500 * earliest ready task on this rq. Caching these facilitates
501 * the decision wether or not a ready but not running task
502 * should migrate somewhere else.
503 */
504 struct {
505 u64 curr;
506 u64 next;
507 } earliest_dl;
508
509 unsigned long dl_nr_migratory;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100510 int overloaded;
511
512 /*
513 * Tasks on this rq that can be pushed away. They are kept in
514 * an rb-tree, ordered by tasks' deadlines, with caching
515 * of the leftmost (earliest deadline) element.
516 */
517 struct rb_root pushable_dl_tasks_root;
518 struct rb_node *pushable_dl_tasks_leftmost;
Dario Faggioli332ac172013-11-07 14:43:45 +0100519#else
520 struct dl_bw dl_bw;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100521#endif
Dario Faggioliaab03e02013-11-28 11:14:43 +0100522};
523
Peter Zijlstra029632f2011-10-25 10:00:11 +0200524#ifdef CONFIG_SMP
525
526/*
527 * We add the notion of a root-domain which will be used to define per-domain
528 * variables. Each exclusive cpuset essentially defines an island domain by
529 * fully partitioning the member cpus from any other cpuset. Whenever a new
530 * exclusive cpuset is created, we also create and attach a new root-domain
531 * object.
532 *
533 */
534struct root_domain {
535 atomic_t refcount;
536 atomic_t rto_count;
537 struct rcu_head rcu;
538 cpumask_var_t span;
539 cpumask_var_t online;
540
Tim Chen4486edd2014-06-23 12:16:49 -0700541 /* Indicate more than one runnable task for any CPU */
542 bool overload;
543
Peter Zijlstra029632f2011-10-25 10:00:11 +0200544 /*
Juri Lelli1baca4c2013-11-07 14:43:38 +0100545 * The bit corresponding to a CPU gets set here if such CPU has more
546 * than one runnable -deadline task (as it is below for RT tasks).
547 */
548 cpumask_var_t dlo_mask;
549 atomic_t dlo_count;
Dario Faggioli332ac172013-11-07 14:43:45 +0100550 struct dl_bw dl_bw;
Juri Lelli6bfd6d72013-11-07 14:43:47 +0100551 struct cpudl cpudl;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100552
553 /*
Peter Zijlstra029632f2011-10-25 10:00:11 +0200554 * The "RT overload" flag: it gets set if a CPU has more than
555 * one runnable RT task.
556 */
557 cpumask_var_t rto_mask;
558 struct cpupri cpupri;
559};
560
561extern struct root_domain def_root_domain;
562
563#endif /* CONFIG_SMP */
564
565/*
566 * This is the main, per-CPU runqueue data structure.
567 *
568 * Locking rule: those places that want to lock multiple runqueues
569 * (such as the load balancing or the thread migration code), lock
570 * acquire operations must be ordered by ascending &runqueue.
571 */
572struct rq {
573 /* runqueue lock: */
574 raw_spinlock_t lock;
575
576 /*
577 * nr_running and cpu_load should be in the same cacheline because
578 * remote CPUs use both these fields when doing load calculation.
579 */
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200580 unsigned int nr_running;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100581#ifdef CONFIG_NUMA_BALANCING
582 unsigned int nr_numa_running;
583 unsigned int nr_preferred_running;
584#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200585 #define CPU_LOAD_IDX_MAX 5
586 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
587 unsigned long last_load_update_tick;
Frederic Weisbecker3451d022011-08-10 23:21:01 +0200588#ifdef CONFIG_NO_HZ_COMMON
Peter Zijlstra029632f2011-10-25 10:00:11 +0200589 u64 nohz_stamp;
Suresh Siddha1c792db2011-12-01 17:07:32 -0800590 unsigned long nohz_flags;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200591#endif
Frederic Weisbecker265f22a2013-05-03 03:39:05 +0200592#ifdef CONFIG_NO_HZ_FULL
593 unsigned long last_sched_tick;
594#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200595 /* capture load from *all* tasks on this cpu: */
596 struct load_weight load;
597 unsigned long nr_load_updates;
598 u64 nr_switches;
599
600 struct cfs_rq cfs;
601 struct rt_rq rt;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100602 struct dl_rq dl;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200603
604#ifdef CONFIG_FAIR_GROUP_SCHED
605 /* list of leaf cfs_rq on this cpu: */
606 struct list_head leaf_cfs_rq_list;
Peter Zijlstraa35b6462012-08-08 21:46:40 +0200607#endif /* CONFIG_FAIR_GROUP_SCHED */
608
Peter Zijlstra029632f2011-10-25 10:00:11 +0200609 /*
610 * This is part of a global counter where only the total sum
611 * over all CPUs matters. A task can increase this counter on
612 * one CPU and if it got migrated afterwards it may decrease
613 * it on another CPU. Always updated under the runqueue lock:
614 */
615 unsigned long nr_uninterruptible;
616
617 struct task_struct *curr, *idle, *stop;
618 unsigned long next_balance;
619 struct mm_struct *prev_mm;
620
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +0100621 unsigned int clock_skip_update;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200622 u64 clock;
623 u64 clock_task;
624
625 atomic_t nr_iowait;
626
627#ifdef CONFIG_SMP
628 struct root_domain *rd;
629 struct sched_domain *sd;
630
Nicolas Pitreced549f2014-05-26 18:19:38 -0400631 unsigned long cpu_capacity;
Vincent Guittotca6d75e2015-02-27 16:54:09 +0100632 unsigned long cpu_capacity_orig;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200633
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200634 struct callback_head *balance_callback;
635
Peter Zijlstra029632f2011-10-25 10:00:11 +0200636 unsigned char idle_balance;
637 /* For active balancing */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200638 int active_balance;
639 int push_cpu;
640 struct cpu_stop_work active_balance_work;
641 /* cpu of this runqueue: */
642 int cpu;
643 int online;
644
Peter Zijlstra367456c2012-02-20 21:49:09 +0100645 struct list_head cfs_tasks;
646
Peter Zijlstra029632f2011-10-25 10:00:11 +0200647 u64 rt_avg;
648 u64 age_stamp;
649 u64 idle_stamp;
650 u64 avg_idle;
Jason Low9bd721c2013-09-13 11:26:52 -0700651
652 /* This is used to determine avg_idle's max value */
653 u64 max_idle_balance_cost;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200654#endif
655
656#ifdef CONFIG_IRQ_TIME_ACCOUNTING
657 u64 prev_irq_time;
658#endif
659#ifdef CONFIG_PARAVIRT
660 u64 prev_steal_time;
661#endif
662#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
663 u64 prev_steal_time_rq;
664#endif
665
666 /* calc_load related fields */
667 unsigned long calc_load_update;
668 long calc_load_active;
669
670#ifdef CONFIG_SCHED_HRTICK
671#ifdef CONFIG_SMP
672 int hrtick_csd_pending;
673 struct call_single_data hrtick_csd;
674#endif
675 struct hrtimer hrtick_timer;
676#endif
677
678#ifdef CONFIG_SCHEDSTATS
679 /* latency stats */
680 struct sched_info rq_sched_info;
681 unsigned long long rq_cpu_time;
682 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
683
684 /* sys_sched_yield() stats */
685 unsigned int yld_count;
686
687 /* schedule() stats */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200688 unsigned int sched_count;
689 unsigned int sched_goidle;
690
691 /* try_to_wake_up() stats */
692 unsigned int ttwu_count;
693 unsigned int ttwu_local;
694#endif
695
696#ifdef CONFIG_SMP
697 struct llist_head wake_list;
698#endif
Daniel Lezcano442bf3a2014-09-04 11:32:09 -0400699
700#ifdef CONFIG_CPU_IDLE
701 /* Must be inspected within a rcu lock section */
702 struct cpuidle_state *idle_state;
703#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200704};
705
706static inline int cpu_of(struct rq *rq)
707{
708#ifdef CONFIG_SMP
709 return rq->cpu;
710#else
711 return 0;
712#endif
713}
714
Pranith Kumar8b06c552014-08-13 13:28:12 -0400715DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200716
Peter Zijlstra518cd622011-12-07 15:07:31 +0100717#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
Christoph Lameter4a32fea2014-08-17 12:30:27 -0500718#define this_rq() this_cpu_ptr(&runqueues)
Peter Zijlstra518cd622011-12-07 15:07:31 +0100719#define task_rq(p) cpu_rq(task_cpu(p))
720#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
Christoph Lameter4a32fea2014-08-17 12:30:27 -0500721#define raw_rq() raw_cpu_ptr(&runqueues)
Peter Zijlstra518cd622011-12-07 15:07:31 +0100722
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100723static inline u64 __rq_clock_broken(struct rq *rq)
724{
Jason Low316c1608d2015-04-28 13:00:20 -0700725 return READ_ONCE(rq->clock);
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100726}
727
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200728static inline u64 rq_clock(struct rq *rq)
729{
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100730 lockdep_assert_held(&rq->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200731 return rq->clock;
732}
733
734static inline u64 rq_clock_task(struct rq *rq)
735{
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100736 lockdep_assert_held(&rq->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200737 return rq->clock_task;
738}
739
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +0100740#define RQCF_REQ_SKIP 0x01
741#define RQCF_ACT_SKIP 0x02
742
743static inline void rq_clock_skip_update(struct rq *rq, bool skip)
744{
745 lockdep_assert_held(&rq->lock);
746 if (skip)
747 rq->clock_skip_update |= RQCF_REQ_SKIP;
748 else
749 rq->clock_skip_update &= ~RQCF_REQ_SKIP;
750}
751
Rik van Riel9942f792014-10-17 03:29:49 -0400752#ifdef CONFIG_NUMA
Rik van Riele3fe70b2014-10-17 03:29:50 -0400753enum numa_topology_type {
754 NUMA_DIRECT,
755 NUMA_GLUELESS_MESH,
756 NUMA_BACKPLANE,
757};
758extern enum numa_topology_type sched_numa_topology_type;
Rik van Riel9942f792014-10-17 03:29:49 -0400759extern int sched_max_numa_distance;
760extern bool find_numa_distance(int distance);
761#endif
762
Mel Gormanf809ca92013-10-07 11:28:57 +0100763#ifdef CONFIG_NUMA_BALANCING
Iulia Manda44dba3d2014-10-31 02:13:31 +0200764/* The regions in numa_faults array from task_struct */
765enum numa_faults_stats {
766 NUMA_MEM = 0,
767 NUMA_CPU,
768 NUMA_MEMBUF,
769 NUMA_CPUBUF
770};
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100771extern void sched_setnuma(struct task_struct *p, int node);
Mel Gormane6628d52013-10-07 11:29:02 +0100772extern int migrate_task_to(struct task_struct *p, int cpu);
Peter Zijlstraac66f542013-10-07 11:29:16 +0100773extern int migrate_swap(struct task_struct *, struct task_struct *);
Mel Gormanf809ca92013-10-07 11:28:57 +0100774#endif /* CONFIG_NUMA_BALANCING */
775
Peter Zijlstra518cd622011-12-07 15:07:31 +0100776#ifdef CONFIG_SMP
777
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200778static inline void
779queue_balance_callback(struct rq *rq,
780 struct callback_head *head,
781 void (*func)(struct rq *rq))
782{
783 lockdep_assert_held(&rq->lock);
784
785 if (unlikely(head->next))
786 return;
787
788 head->func = (void (*)(struct callback_head *))func;
789 head->next = rq->balance_callback;
790 rq->balance_callback = head;
791}
792
Peter Zijlstrae3baac42014-06-04 10:31:18 -0700793extern void sched_ttwu_pending(void);
794
Peter Zijlstra029632f2011-10-25 10:00:11 +0200795#define rcu_dereference_check_sched_domain(p) \
796 rcu_dereference_check((p), \
797 lockdep_is_held(&sched_domains_mutex))
798
799/*
800 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
801 * See detach_destroy_domains: synchronize_sched for details.
802 *
803 * The domain tree of any CPU may only be accessed from within
804 * preempt-disabled sections.
805 */
806#define for_each_domain(cpu, __sd) \
Peter Zijlstra518cd622011-12-07 15:07:31 +0100807 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
808 __sd; __sd = __sd->parent)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200809
Suresh Siddha77e81362011-11-17 11:08:23 -0800810#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
811
Peter Zijlstra518cd622011-12-07 15:07:31 +0100812/**
813 * highest_flag_domain - Return highest sched_domain containing flag.
814 * @cpu: The cpu whose highest level of sched domain is to
815 * be returned.
816 * @flag: The flag to check for the highest sched_domain
817 * for the given cpu.
818 *
819 * Returns the highest sched_domain of a cpu which contains the given flag.
820 */
821static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
822{
823 struct sched_domain *sd, *hsd = NULL;
824
825 for_each_domain(cpu, sd) {
826 if (!(sd->flags & flag))
827 break;
828 hsd = sd;
829 }
830
831 return hsd;
832}
833
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100834static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
835{
836 struct sched_domain *sd;
837
838 for_each_domain(cpu, sd) {
839 if (sd->flags & flag)
840 break;
841 }
842
843 return sd;
844}
845
Peter Zijlstra518cd622011-12-07 15:07:31 +0100846DECLARE_PER_CPU(struct sched_domain *, sd_llc);
Peter Zijlstra7d9ffa82013-07-04 12:56:46 +0800847DECLARE_PER_CPU(int, sd_llc_size);
Peter Zijlstra518cd622011-12-07 15:07:31 +0100848DECLARE_PER_CPU(int, sd_llc_id);
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100849DECLARE_PER_CPU(struct sched_domain *, sd_numa);
Preeti U Murthy37dc6b52013-10-30 08:42:52 +0530850DECLARE_PER_CPU(struct sched_domain *, sd_busy);
851DECLARE_PER_CPU(struct sched_domain *, sd_asym);
Peter Zijlstra518cd622011-12-07 15:07:31 +0100852
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400853struct sched_group_capacity {
Li Zefan5e6521e2013-03-05 16:06:23 +0800854 atomic_t ref;
855 /*
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400856 * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
857 * for a single CPU.
Li Zefan5e6521e2013-03-05 16:06:23 +0800858 */
Vincent Guittotdc7ff762015-03-03 11:35:03 +0100859 unsigned int capacity;
Li Zefan5e6521e2013-03-05 16:06:23 +0800860 unsigned long next_update;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400861 int imbalance; /* XXX unrelated to capacity but shared group state */
Li Zefan5e6521e2013-03-05 16:06:23 +0800862 /*
863 * Number of busy cpus in this group.
864 */
865 atomic_t nr_busy_cpus;
866
867 unsigned long cpumask[0]; /* iteration mask */
868};
869
870struct sched_group {
871 struct sched_group *next; /* Must be a circular list */
872 atomic_t ref;
873
874 unsigned int group_weight;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400875 struct sched_group_capacity *sgc;
Li Zefan5e6521e2013-03-05 16:06:23 +0800876
877 /*
878 * The CPUs this group covers.
879 *
880 * NOTE: this field is variable length. (Allocated dynamically
881 * by attaching extra space to the end of the structure,
882 * depending on how many CPUs the kernel has booted up with)
883 */
884 unsigned long cpumask[0];
885};
886
887static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
888{
889 return to_cpumask(sg->cpumask);
890}
891
892/*
893 * cpumask masking which cpus in the group are allowed to iterate up the domain
894 * tree.
895 */
896static inline struct cpumask *sched_group_mask(struct sched_group *sg)
897{
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400898 return to_cpumask(sg->sgc->cpumask);
Li Zefan5e6521e2013-03-05 16:06:23 +0800899}
900
901/**
902 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
903 * @group: The group whose first cpu is to be returned.
904 */
905static inline unsigned int group_first_cpu(struct sched_group *group)
906{
907 return cpumask_first(sched_group_cpus(group));
908}
909
Peter Zijlstrac1174872012-05-31 14:47:33 +0200910extern int group_balance_cpu(struct sched_group *sg);
911
Peter Zijlstrae3baac42014-06-04 10:31:18 -0700912#else
913
914static inline void sched_ttwu_pending(void) { }
915
Peter Zijlstra518cd622011-12-07 15:07:31 +0100916#endif /* CONFIG_SMP */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200917
Peter Zijlstra391e43d2011-11-15 17:14:39 +0100918#include "stats.h"
919#include "auto_group.h"
Peter Zijlstra029632f2011-10-25 10:00:11 +0200920
921#ifdef CONFIG_CGROUP_SCHED
922
923/*
924 * Return the group to which this tasks belongs.
925 *
Tejun Heo8af01f52013-08-08 20:11:22 -0400926 * We cannot use task_css() and friends because the cgroup subsystem
927 * changes that value before the cgroup_subsys::attach() method is called,
928 * therefore we cannot pin it and might observe the wrong value.
Peter Zijlstra8323f262012-06-22 13:36:05 +0200929 *
930 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
931 * core changes this before calling sched_move_task().
932 *
933 * Instead we use a 'copy' which is updated from sched_move_task() while
934 * holding both task_struct::pi_lock and rq::lock.
Peter Zijlstra029632f2011-10-25 10:00:11 +0200935 */
936static inline struct task_group *task_group(struct task_struct *p)
937{
Peter Zijlstra8323f262012-06-22 13:36:05 +0200938 return p->sched_task_group;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200939}
940
941/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
942static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
943{
944#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
945 struct task_group *tg = task_group(p);
946#endif
947
948#ifdef CONFIG_FAIR_GROUP_SCHED
Byungchul Parkad936d82015-10-24 01:16:19 +0900949 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200950 p->se.cfs_rq = tg->cfs_rq[cpu];
951 p->se.parent = tg->se[cpu];
952#endif
953
954#ifdef CONFIG_RT_GROUP_SCHED
955 p->rt.rt_rq = tg->rt_rq[cpu];
956 p->rt.parent = tg->rt_se[cpu];
957#endif
958}
959
960#else /* CONFIG_CGROUP_SCHED */
961
962static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
963static inline struct task_group *task_group(struct task_struct *p)
964{
965 return NULL;
966}
967
968#endif /* CONFIG_CGROUP_SCHED */
969
970static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
971{
972 set_task_rq(p, cpu);
973#ifdef CONFIG_SMP
974 /*
975 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
976 * successfuly executed on another CPU. We must ensure that updates of
977 * per-task data have been completed by this moment.
978 */
979 smp_wmb();
980 task_thread_info(p)->cpu = cpu;
Peter Zijlstraac66f542013-10-07 11:29:16 +0100981 p->wake_cpu = cpu;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200982#endif
983}
984
985/*
986 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
987 */
988#ifdef CONFIG_SCHED_DEBUG
Ingo Molnarc5905af2012-02-24 08:31:31 +0100989# include <linux/static_key.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +0200990# define const_debug __read_mostly
991#else
992# define const_debug const
993#endif
994
995extern const_debug unsigned int sysctl_sched_features;
996
997#define SCHED_FEAT(name, enabled) \
998 __SCHED_FEAT_##name ,
999
1000enum {
Peter Zijlstra391e43d2011-11-15 17:14:39 +01001001#include "features.h"
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001002 __SCHED_FEAT_NR,
Peter Zijlstra029632f2011-10-25 10:00:11 +02001003};
1004
1005#undef SCHED_FEAT
1006
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001007#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001008#define SCHED_FEAT(name, enabled) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001009static __always_inline bool static_branch_##name(struct static_key *key) \
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001010{ \
Jason Baron6e76ea82014-07-02 15:52:41 +00001011 return static_key_##enabled(key); \
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001012}
1013
1014#include "features.h"
1015
1016#undef SCHED_FEAT
1017
Ingo Molnarc5905af2012-02-24 08:31:31 +01001018extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001019#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1020#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001021#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001022#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001023
Srikar Dronamraju2a595722015-08-11 21:54:21 +05301024extern struct static_key_false sched_numa_balancing;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001025
Peter Zijlstra029632f2011-10-25 10:00:11 +02001026static inline u64 global_rt_period(void)
1027{
1028 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1029}
1030
1031static inline u64 global_rt_runtime(void)
1032{
1033 if (sysctl_sched_rt_runtime < 0)
1034 return RUNTIME_INF;
1035
1036 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1037}
1038
Peter Zijlstra029632f2011-10-25 10:00:11 +02001039static inline int task_current(struct rq *rq, struct task_struct *p)
1040{
1041 return rq->curr == p;
1042}
1043
1044static inline int task_running(struct rq *rq, struct task_struct *p)
1045{
1046#ifdef CONFIG_SMP
1047 return p->on_cpu;
1048#else
1049 return task_current(rq, p);
1050#endif
1051}
1052
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04001053static inline int task_on_rq_queued(struct task_struct *p)
1054{
1055 return p->on_rq == TASK_ON_RQ_QUEUED;
1056}
Peter Zijlstra029632f2011-10-25 10:00:11 +02001057
Kirill Tkhaicca26e82014-08-20 13:47:42 +04001058static inline int task_on_rq_migrating(struct task_struct *p)
1059{
1060 return p->on_rq == TASK_ON_RQ_MIGRATING;
1061}
1062
Peter Zijlstra029632f2011-10-25 10:00:11 +02001063#ifndef prepare_arch_switch
1064# define prepare_arch_switch(next) do { } while (0)
1065#endif
Catalin Marinas01f23e12011-11-27 21:43:10 +00001066#ifndef finish_arch_post_lock_switch
1067# define finish_arch_post_lock_switch() do { } while (0)
1068#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02001069
Peter Zijlstra029632f2011-10-25 10:00:11 +02001070static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1071{
1072#ifdef CONFIG_SMP
1073 /*
1074 * We can optimise this out completely for !SMP, because the
1075 * SMP rebalancing from interrupt is the only thing that cares
1076 * here.
1077 */
1078 next->on_cpu = 1;
1079#endif
1080}
1081
1082static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1083{
1084#ifdef CONFIG_SMP
1085 /*
1086 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1087 * We must ensure this doesn't happen until the switch is completely
1088 * finished.
Peter Zijlstra95913d92015-09-29 14:45:09 +02001089 *
Peter Zijlstrab75a2252015-10-06 14:36:17 +02001090 * In particular, the load of prev->state in finish_task_switch() must
1091 * happen before this.
1092 *
Peter Zijlstra95913d92015-09-29 14:45:09 +02001093 * Pairs with the control dependency and rmb in try_to_wake_up().
Peter Zijlstra029632f2011-10-25 10:00:11 +02001094 */
Peter Zijlstra95913d92015-09-29 14:45:09 +02001095 smp_store_release(&prev->on_cpu, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001096#endif
1097#ifdef CONFIG_DEBUG_SPINLOCK
1098 /* this is a valid case when another task releases the spinlock */
1099 rq->lock.owner = current;
1100#endif
1101 /*
1102 * If we are tracking spinlock dependencies then we have to
1103 * fix up the runqueue lock - which gets 'carried over' from
1104 * prev into current:
1105 */
1106 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1107
1108 raw_spin_unlock_irq(&rq->lock);
1109}
1110
Li Zefanb13095f2013-03-05 16:06:38 +08001111/*
1112 * wake flags
1113 */
1114#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
1115#define WF_FORK 0x02 /* child wakeup after fork */
1116#define WF_MIGRATED 0x4 /* internal use, task got migrated */
1117
Peter Zijlstra029632f2011-10-25 10:00:11 +02001118/*
1119 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1120 * of tasks with abnormal "nice" values across CPUs the contribution that
1121 * each task makes to its run queue's load is weighted according to its
1122 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1123 * scaled version of the new time slice allocation that they receive on time
1124 * slice expiry etc.
1125 */
1126
1127#define WEIGHT_IDLEPRIO 3
1128#define WMULT_IDLEPRIO 1431655765
1129
Andi Kleened82b8a2015-11-29 20:59:43 -08001130extern const int sched_prio_to_weight[40];
1131extern const u32 sched_prio_to_wmult[40];
Peter Zijlstra029632f2011-10-25 10:00:11 +02001132
Peter Zijlstra1de64442015-09-30 17:44:13 +02001133#define ENQUEUE_WAKEUP 0x01
1134#define ENQUEUE_HEAD 0x02
Li Zefanc82ba9f2013-03-05 16:06:55 +08001135#ifdef CONFIG_SMP
Peter Zijlstra1de64442015-09-30 17:44:13 +02001136#define ENQUEUE_WAKING 0x04 /* sched_class::task_waking was called */
Li Zefanc82ba9f2013-03-05 16:06:55 +08001137#else
Peter Zijlstra1de64442015-09-30 17:44:13 +02001138#define ENQUEUE_WAKING 0x00
Li Zefanc82ba9f2013-03-05 16:06:55 +08001139#endif
Peter Zijlstra1de64442015-09-30 17:44:13 +02001140#define ENQUEUE_REPLENISH 0x08
1141#define ENQUEUE_RESTORE 0x10
Li Zefanc82ba9f2013-03-05 16:06:55 +08001142
Peter Zijlstra1de64442015-09-30 17:44:13 +02001143#define DEQUEUE_SLEEP 0x01
1144#define DEQUEUE_SAVE 0x02
Li Zefanc82ba9f2013-03-05 16:06:55 +08001145
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001146#define RETRY_TASK ((void *)-1UL)
1147
Li Zefanc82ba9f2013-03-05 16:06:55 +08001148struct sched_class {
1149 const struct sched_class *next;
1150
1151 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1152 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1153 void (*yield_task) (struct rq *rq);
1154 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1155
1156 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1157
Peter Zijlstra606dba22012-02-11 06:05:00 +01001158 /*
1159 * It is the responsibility of the pick_next_task() method that will
1160 * return the next task to call put_prev_task() on the @prev task or
1161 * something equivalent.
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001162 *
1163 * May return RETRY_TASK when it finds a higher prio class has runnable
1164 * tasks.
Peter Zijlstra606dba22012-02-11 06:05:00 +01001165 */
1166 struct task_struct * (*pick_next_task) (struct rq *rq,
1167 struct task_struct *prev);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001168 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1169
1170#ifdef CONFIG_SMP
Peter Zijlstraac66f542013-10-07 11:29:16 +01001171 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
xiaofeng.yan5a4fd032015-09-23 14:55:59 +08001172 void (*migrate_task_rq)(struct task_struct *p);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001173
Li Zefanc82ba9f2013-03-05 16:06:55 +08001174 void (*task_waking) (struct task_struct *task);
1175 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1176
1177 void (*set_cpus_allowed)(struct task_struct *p,
1178 const struct cpumask *newmask);
1179
1180 void (*rq_online)(struct rq *rq);
1181 void (*rq_offline)(struct rq *rq);
1182#endif
1183
1184 void (*set_curr_task) (struct rq *rq);
1185 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1186 void (*task_fork) (struct task_struct *p);
Dario Faggiolie6c390f2013-11-07 14:43:35 +01001187 void (*task_dead) (struct task_struct *p);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001188
Kirill Tkhai67dfa1b2014-10-27 17:40:52 +03001189 /*
1190 * The switched_from() call is allowed to drop rq->lock, therefore we
1191 * cannot assume the switched_from/switched_to pair is serliazed by
1192 * rq->lock. They are however serialized by p->pi_lock.
1193 */
Li Zefanc82ba9f2013-03-05 16:06:55 +08001194 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1195 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1196 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1197 int oldprio);
1198
1199 unsigned int (*get_rr_interval) (struct rq *rq,
1200 struct task_struct *task);
1201
Stanislaw Gruszka6e998912014-11-12 16:58:44 +01001202 void (*update_curr) (struct rq *rq);
1203
Li Zefanc82ba9f2013-03-05 16:06:55 +08001204#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrabc54da22015-08-31 17:13:55 +02001205 void (*task_move_group) (struct task_struct *p);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001206#endif
1207};
Peter Zijlstra029632f2011-10-25 10:00:11 +02001208
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +01001209static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1210{
1211 prev->sched_class->put_prev_task(rq, prev);
1212}
1213
Peter Zijlstra029632f2011-10-25 10:00:11 +02001214#define sched_class_highest (&stop_sched_class)
1215#define for_each_class(class) \
1216 for (class = sched_class_highest; class; class = class->next)
1217
1218extern const struct sched_class stop_sched_class;
Dario Faggioliaab03e02013-11-28 11:14:43 +01001219extern const struct sched_class dl_sched_class;
Peter Zijlstra029632f2011-10-25 10:00:11 +02001220extern const struct sched_class rt_sched_class;
1221extern const struct sched_class fair_sched_class;
1222extern const struct sched_class idle_sched_class;
1223
1224
1225#ifdef CONFIG_SMP
1226
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04001227extern void update_group_capacity(struct sched_domain *sd, int cpu);
Li Zefanb7192032013-03-07 10:00:26 +08001228
Daniel Lezcano7caff662014-01-06 12:34:38 +01001229extern void trigger_load_balance(struct rq *rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001230
Peter Zijlstrac5b28032015-05-15 17:43:35 +02001231extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1232
Peter Zijlstra029632f2011-10-25 10:00:11 +02001233#endif
1234
Daniel Lezcano442bf3a2014-09-04 11:32:09 -04001235#ifdef CONFIG_CPU_IDLE
1236static inline void idle_set_state(struct rq *rq,
1237 struct cpuidle_state *idle_state)
1238{
1239 rq->idle_state = idle_state;
1240}
1241
1242static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1243{
1244 WARN_ON(!rcu_read_lock_held());
1245 return rq->idle_state;
1246}
1247#else
1248static inline void idle_set_state(struct rq *rq,
1249 struct cpuidle_state *idle_state)
1250{
1251}
1252
1253static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1254{
1255 return NULL;
1256}
1257#endif
1258
Peter Zijlstra029632f2011-10-25 10:00:11 +02001259extern void sysrq_sched_debug_show(void);
1260extern void sched_init_granularity(void);
1261extern void update_max_interval(void);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001262
1263extern void init_sched_dl_class(void);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001264extern void init_sched_rt_class(void);
1265extern void init_sched_fair_class(void);
1266
Kirill Tkhai88751252014-06-29 00:03:57 +04001267extern void resched_curr(struct rq *rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001268extern void resched_cpu(int cpu);
1269
1270extern struct rt_bandwidth def_rt_bandwidth;
1271extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1272
Dario Faggioli332ac172013-11-07 14:43:45 +01001273extern struct dl_bandwidth def_dl_bandwidth;
1274extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001275extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1276
Dario Faggioli332ac172013-11-07 14:43:45 +01001277unsigned long to_ratio(u64 period, u64 runtime);
1278
Yuyang Du540247f2015-07-15 08:04:39 +08001279extern void init_entity_runnable_average(struct sched_entity *se);
Alex Shia75cdaa2013-06-20 10:18:47 +08001280
Kirill Tkhai72465442014-05-09 03:00:14 +04001281static inline void add_nr_running(struct rq *rq, unsigned count)
Peter Zijlstra029632f2011-10-25 10:00:11 +02001282{
Kirill Tkhai72465442014-05-09 03:00:14 +04001283 unsigned prev_nr = rq->nr_running;
1284
1285 rq->nr_running = prev_nr + count;
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001286
Kirill Tkhai72465442014-05-09 03:00:14 +04001287 if (prev_nr < 2 && rq->nr_running >= 2) {
Tim Chen4486edd2014-06-23 12:16:49 -07001288#ifdef CONFIG_SMP
1289 if (!rq->rd->overload)
1290 rq->rd->overload = true;
1291#endif
1292
1293#ifdef CONFIG_NO_HZ_FULL
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001294 if (tick_nohz_full_cpu(rq->cpu)) {
Frederic Weisbecker3882ec62014-03-18 22:54:04 +01001295 /*
1296 * Tick is needed if more than one task runs on a CPU.
1297 * Send the target an IPI to kick it out of nohz mode.
1298 *
1299 * We assume that IPI implies full memory barrier and the
1300 * new value of rq->nr_running is visible on reception
1301 * from the target.
1302 */
Frederic Weisbeckerfd2ac4f2014-03-18 21:12:53 +01001303 tick_nohz_full_kick_cpu(rq->cpu);
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001304 }
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001305#endif
Tim Chen4486edd2014-06-23 12:16:49 -07001306 }
Peter Zijlstra029632f2011-10-25 10:00:11 +02001307}
1308
Kirill Tkhai72465442014-05-09 03:00:14 +04001309static inline void sub_nr_running(struct rq *rq, unsigned count)
Peter Zijlstra029632f2011-10-25 10:00:11 +02001310{
Kirill Tkhai72465442014-05-09 03:00:14 +04001311 rq->nr_running -= count;
Peter Zijlstra029632f2011-10-25 10:00:11 +02001312}
1313
Frederic Weisbecker265f22a2013-05-03 03:39:05 +02001314static inline void rq_last_tick_reset(struct rq *rq)
1315{
1316#ifdef CONFIG_NO_HZ_FULL
1317 rq->last_sched_tick = jiffies;
1318#endif
1319}
1320
Peter Zijlstra029632f2011-10-25 10:00:11 +02001321extern void update_rq_clock(struct rq *rq);
1322
1323extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1324extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1325
1326extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1327
1328extern const_debug unsigned int sysctl_sched_time_avg;
1329extern const_debug unsigned int sysctl_sched_nr_migrate;
1330extern const_debug unsigned int sysctl_sched_migration_cost;
1331
1332static inline u64 sched_avg_period(void)
1333{
1334 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1335}
1336
Peter Zijlstra029632f2011-10-25 10:00:11 +02001337#ifdef CONFIG_SCHED_HRTICK
1338
1339/*
1340 * Use hrtick when:
1341 * - enabled by features
1342 * - hrtimer is actually high res
1343 */
1344static inline int hrtick_enabled(struct rq *rq)
1345{
1346 if (!sched_feat(HRTICK))
1347 return 0;
1348 if (!cpu_active(cpu_of(rq)))
1349 return 0;
1350 return hrtimer_is_hres_active(&rq->hrtick_timer);
1351}
1352
1353void hrtick_start(struct rq *rq, u64 delay);
1354
Mike Galbraithb39e66e2011-11-22 15:20:07 +01001355#else
1356
1357static inline int hrtick_enabled(struct rq *rq)
1358{
1359 return 0;
1360}
1361
Peter Zijlstra029632f2011-10-25 10:00:11 +02001362#endif /* CONFIG_SCHED_HRTICK */
1363
1364#ifdef CONFIG_SMP
1365extern void sched_avg_update(struct rq *rq);
Peter Zijlstradfbca412015-03-23 14:19:05 +01001366
1367#ifndef arch_scale_freq_capacity
1368static __always_inline
1369unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1370{
1371 return SCHED_CAPACITY_SCALE;
1372}
1373#endif
Vincent Guittotb5b48602015-02-27 16:54:08 +01001374
Morten Rasmussen8cd56012015-08-14 17:23:10 +01001375#ifndef arch_scale_cpu_capacity
1376static __always_inline
1377unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1378{
Dietmar Eggemanne3279a22015-08-15 00:04:41 +01001379 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
Morten Rasmussen8cd56012015-08-14 17:23:10 +01001380 return sd->smt_gain / sd->span_weight;
1381
1382 return SCHED_CAPACITY_SCALE;
1383}
1384#endif
1385
Peter Zijlstra029632f2011-10-25 10:00:11 +02001386static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1387{
Vincent Guittotb5b48602015-02-27 16:54:08 +01001388 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
Peter Zijlstra029632f2011-10-25 10:00:11 +02001389 sched_avg_update(rq);
1390}
1391#else
1392static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1393static inline void sched_avg_update(struct rq *rq) { }
1394#endif
1395
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001396/*
1397 * __task_rq_lock - lock the rq @p resides on.
1398 */
1399static inline struct rq *__task_rq_lock(struct task_struct *p)
1400 __acquires(rq->lock)
1401{
1402 struct rq *rq;
1403
1404 lockdep_assert_held(&p->pi_lock);
1405
1406 for (;;) {
1407 rq = task_rq(p);
1408 raw_spin_lock(&rq->lock);
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001409 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
1410 lockdep_pin_lock(&rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001411 return rq;
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001412 }
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001413 raw_spin_unlock(&rq->lock);
1414
1415 while (unlikely(task_on_rq_migrating(p)))
1416 cpu_relax();
1417 }
1418}
1419
1420/*
1421 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1422 */
1423static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
1424 __acquires(p->pi_lock)
1425 __acquires(rq->lock)
1426{
1427 struct rq *rq;
1428
1429 for (;;) {
1430 raw_spin_lock_irqsave(&p->pi_lock, *flags);
1431 rq = task_rq(p);
1432 raw_spin_lock(&rq->lock);
1433 /*
1434 * move_queued_task() task_rq_lock()
1435 *
1436 * ACQUIRE (rq->lock)
1437 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
1438 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
1439 * [S] ->cpu = new_cpu [L] task_rq()
1440 * [L] ->on_rq
1441 * RELEASE (rq->lock)
1442 *
1443 * If we observe the old cpu in task_rq_lock, the acquire of
1444 * the old rq->lock will fully serialize against the stores.
1445 *
1446 * If we observe the new cpu in task_rq_lock, the acquire will
1447 * pair with the WMB to ensure we must then also see migrating.
1448 */
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001449 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
1450 lockdep_pin_lock(&rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001451 return rq;
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001452 }
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001453 raw_spin_unlock(&rq->lock);
1454 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1455
1456 while (unlikely(task_on_rq_migrating(p)))
1457 cpu_relax();
1458 }
1459}
1460
1461static inline void __task_rq_unlock(struct rq *rq)
1462 __releases(rq->lock)
1463{
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001464 lockdep_unpin_lock(&rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001465 raw_spin_unlock(&rq->lock);
1466}
1467
1468static inline void
1469task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1470 __releases(rq->lock)
1471 __releases(p->pi_lock)
1472{
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001473 lockdep_unpin_lock(&rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001474 raw_spin_unlock(&rq->lock);
1475 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1476}
1477
Peter Zijlstra029632f2011-10-25 10:00:11 +02001478#ifdef CONFIG_SMP
1479#ifdef CONFIG_PREEMPT
1480
1481static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1482
1483/*
1484 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1485 * way at the expense of forcing extra atomic operations in all
1486 * invocations. This assures that the double_lock is acquired using the
1487 * same underlying policy as the spinlock_t on this architecture, which
1488 * reduces latency compared to the unfair variant below. However, it
1489 * also adds more overhead and therefore may reduce throughput.
1490 */
1491static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1492 __releases(this_rq->lock)
1493 __acquires(busiest->lock)
1494 __acquires(this_rq->lock)
1495{
1496 raw_spin_unlock(&this_rq->lock);
1497 double_rq_lock(this_rq, busiest);
1498
1499 return 1;
1500}
1501
1502#else
1503/*
1504 * Unfair double_lock_balance: Optimizes throughput at the expense of
1505 * latency by eliminating extra atomic operations when the locks are
1506 * already in proper order on entry. This favors lower cpu-ids and will
1507 * grant the double lock to lower cpus over higher ids under contention,
1508 * regardless of entry order into the function.
1509 */
1510static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1511 __releases(this_rq->lock)
1512 __acquires(busiest->lock)
1513 __acquires(this_rq->lock)
1514{
1515 int ret = 0;
1516
1517 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1518 if (busiest < this_rq) {
1519 raw_spin_unlock(&this_rq->lock);
1520 raw_spin_lock(&busiest->lock);
1521 raw_spin_lock_nested(&this_rq->lock,
1522 SINGLE_DEPTH_NESTING);
1523 ret = 1;
1524 } else
1525 raw_spin_lock_nested(&busiest->lock,
1526 SINGLE_DEPTH_NESTING);
1527 }
1528 return ret;
1529}
1530
1531#endif /* CONFIG_PREEMPT */
1532
1533/*
1534 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1535 */
1536static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1537{
1538 if (unlikely(!irqs_disabled())) {
1539 /* printk() doesn't work good under rq->lock */
1540 raw_spin_unlock(&this_rq->lock);
1541 BUG_ON(1);
1542 }
1543
1544 return _double_lock_balance(this_rq, busiest);
1545}
1546
1547static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1548 __releases(busiest->lock)
1549{
1550 raw_spin_unlock(&busiest->lock);
1551 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1552}
1553
Peter Zijlstra74602312013-10-10 20:17:22 +02001554static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1555{
1556 if (l1 > l2)
1557 swap(l1, l2);
1558
1559 spin_lock(l1);
1560 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1561}
1562
Mike Galbraith60e69ee2014-04-07 10:55:15 +02001563static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1564{
1565 if (l1 > l2)
1566 swap(l1, l2);
1567
1568 spin_lock_irq(l1);
1569 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1570}
1571
Peter Zijlstra74602312013-10-10 20:17:22 +02001572static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1573{
1574 if (l1 > l2)
1575 swap(l1, l2);
1576
1577 raw_spin_lock(l1);
1578 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1579}
1580
Peter Zijlstra029632f2011-10-25 10:00:11 +02001581/*
1582 * double_rq_lock - safely lock two runqueues
1583 *
1584 * Note this does not disable interrupts like task_rq_lock,
1585 * you need to do so manually before calling.
1586 */
1587static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1588 __acquires(rq1->lock)
1589 __acquires(rq2->lock)
1590{
1591 BUG_ON(!irqs_disabled());
1592 if (rq1 == rq2) {
1593 raw_spin_lock(&rq1->lock);
1594 __acquire(rq2->lock); /* Fake it out ;) */
1595 } else {
1596 if (rq1 < rq2) {
1597 raw_spin_lock(&rq1->lock);
1598 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1599 } else {
1600 raw_spin_lock(&rq2->lock);
1601 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1602 }
1603 }
1604}
1605
1606/*
1607 * double_rq_unlock - safely unlock two runqueues
1608 *
1609 * Note this does not restore interrupts like task_rq_unlock,
1610 * you need to do so manually after calling.
1611 */
1612static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1613 __releases(rq1->lock)
1614 __releases(rq2->lock)
1615{
1616 raw_spin_unlock(&rq1->lock);
1617 if (rq1 != rq2)
1618 raw_spin_unlock(&rq2->lock);
1619 else
1620 __release(rq2->lock);
1621}
1622
1623#else /* CONFIG_SMP */
1624
1625/*
1626 * double_rq_lock - safely lock two runqueues
1627 *
1628 * Note this does not disable interrupts like task_rq_lock,
1629 * you need to do so manually before calling.
1630 */
1631static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1632 __acquires(rq1->lock)
1633 __acquires(rq2->lock)
1634{
1635 BUG_ON(!irqs_disabled());
1636 BUG_ON(rq1 != rq2);
1637 raw_spin_lock(&rq1->lock);
1638 __acquire(rq2->lock); /* Fake it out ;) */
1639}
1640
1641/*
1642 * double_rq_unlock - safely unlock two runqueues
1643 *
1644 * Note this does not restore interrupts like task_rq_unlock,
1645 * you need to do so manually after calling.
1646 */
1647static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1648 __releases(rq1->lock)
1649 __releases(rq2->lock)
1650{
1651 BUG_ON(rq1 != rq2);
1652 raw_spin_unlock(&rq1->lock);
1653 __release(rq2->lock);
1654}
1655
1656#endif
1657
1658extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1659extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
Srikar Dronamraju6b55c962015-06-25 22:51:41 +05301660
1661#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02001662extern void print_cfs_stats(struct seq_file *m, int cpu);
1663extern void print_rt_stats(struct seq_file *m, int cpu);
Wanpeng Liacb32132014-10-31 06:39:33 +08001664extern void print_dl_stats(struct seq_file *m, int cpu);
Srikar Dronamraju6b55c962015-06-25 22:51:41 +05301665extern void
1666print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
Srikar Dronamraju397f2372015-06-25 22:51:43 +05301667
1668#ifdef CONFIG_NUMA_BALANCING
1669extern void
1670show_numa_stats(struct task_struct *p, struct seq_file *m);
1671extern void
1672print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1673 unsigned long tpf, unsigned long gsf, unsigned long gpf);
1674#endif /* CONFIG_NUMA_BALANCING */
1675#endif /* CONFIG_SCHED_DEBUG */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001676
1677extern void init_cfs_rq(struct cfs_rq *cfs_rq);
Abel Vesa07c54f72015-03-03 13:50:27 +02001678extern void init_rt_rq(struct rt_rq *rt_rq);
1679extern void init_dl_rq(struct dl_rq *dl_rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001680
Ben Segall1ee14e62013-10-16 11:16:12 -07001681extern void cfs_bandwidth_usage_inc(void);
1682extern void cfs_bandwidth_usage_dec(void);
Suresh Siddha1c792db2011-12-01 17:07:32 -08001683
Frederic Weisbecker3451d022011-08-10 23:21:01 +02001684#ifdef CONFIG_NO_HZ_COMMON
Suresh Siddha1c792db2011-12-01 17:07:32 -08001685enum rq_nohz_flag_bits {
1686 NOHZ_TICK_STOPPED,
1687 NOHZ_BALANCE_KICK,
1688};
1689
1690#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1691#endif
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001692
1693#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1694
1695DECLARE_PER_CPU(u64, cpu_hardirq_time);
1696DECLARE_PER_CPU(u64, cpu_softirq_time);
1697
1698#ifndef CONFIG_64BIT
1699DECLARE_PER_CPU(seqcount_t, irq_time_seq);
1700
1701static inline void irq_time_write_begin(void)
1702{
1703 __this_cpu_inc(irq_time_seq.sequence);
1704 smp_wmb();
1705}
1706
1707static inline void irq_time_write_end(void)
1708{
1709 smp_wmb();
1710 __this_cpu_inc(irq_time_seq.sequence);
1711}
1712
1713static inline u64 irq_time_read(int cpu)
1714{
1715 u64 irq_time;
1716 unsigned seq;
1717
1718 do {
1719 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1720 irq_time = per_cpu(cpu_softirq_time, cpu) +
1721 per_cpu(cpu_hardirq_time, cpu);
1722 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1723
1724 return irq_time;
1725}
1726#else /* CONFIG_64BIT */
1727static inline void irq_time_write_begin(void)
1728{
1729}
1730
1731static inline void irq_time_write_end(void)
1732{
1733}
1734
1735static inline u64 irq_time_read(int cpu)
1736{
1737 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1738}
1739#endif /* CONFIG_64BIT */
1740#endif /* CONFIG_IRQ_TIME_ACCOUNTING */