blob: 167ab4844ee6f33245c75e6a4d84980b16a75836 [file] [log] [blame]
Peter Zijlstra029632f2011-10-25 10:00:11 +02001
2#include <linux/sched.h>
Clark Williamscf4aebc22013-02-07 09:46:59 -06003#include <linux/sched/sysctl.h>
Clark Williams8bd75c72013-02-07 09:47:07 -06004#include <linux/sched/rt.h>
Dario Faggioliaab03e02013-11-28 11:14:43 +01005#include <linux/sched/deadline.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +02006#include <linux/mutex.h>
7#include <linux/spinlock.h>
8#include <linux/stop_machine.h>
Steven Rostedtb6366f02015-03-18 14:49:46 -04009#include <linux/irq_work.h>
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +020010#include <linux/tick.h>
Mel Gormanf809ca92013-10-07 11:28:57 +010011#include <linux/slab.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020012
Peter Zijlstra391e43d2011-11-15 17:14:39 +010013#include "cpupri.h"
Juri Lelli6bfd6d72013-11-07 14:43:47 +010014#include "cpudeadline.h"
Li Zefan60fed782013-03-29 14:36:43 +080015#include "cpuacct.h"
Peter Zijlstra029632f2011-10-25 10:00:11 +020016
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040017struct rq;
Daniel Lezcano442bf3a2014-09-04 11:32:09 -040018struct cpuidle_state;
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040019
Kirill Tkhaida0c1e62014-08-20 13:47:32 +040020/* task_struct::on_rq states: */
21#define TASK_ON_RQ_QUEUED 1
Kirill Tkhaicca26e82014-08-20 13:47:42 +040022#define TASK_ON_RQ_MIGRATING 2
Kirill Tkhaida0c1e62014-08-20 13:47:32 +040023
Peter Zijlstra029632f2011-10-25 10:00:11 +020024extern __read_mostly int scheduler_running;
25
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040026extern unsigned long calc_load_update;
27extern atomic_long_t calc_load_tasks;
28
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020029extern void calc_global_load_tick(struct rq *this_rq);
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040030extern long calc_load_fold_active(struct rq *this_rq);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020031
32#ifdef CONFIG_SMP
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040033extern void update_cpu_load_active(struct rq *this_rq);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020034#else
35static inline void update_cpu_load_active(struct rq *this_rq) { }
36#endif
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040037
Peter Zijlstra029632f2011-10-25 10:00:11 +020038/*
Peter Zijlstra029632f2011-10-25 10:00:11 +020039 * Helpers for converting nanosecond timing to jiffy resolution
40 */
41#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
42
Li Zefancc1f4b12013-03-05 16:06:09 +080043/*
44 * Increase resolution of nice-level calculations for 64-bit architectures.
45 * The extra resolution improves shares distribution and load balancing of
46 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
47 * hierarchies, especially on larger systems. This is not a user-visible change
48 * and does not change the user-interface for setting shares/weights.
49 *
50 * We increase resolution only if we have enough bits to allow this increased
51 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
52 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
53 * increased costs.
54 */
55#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
56# define SCHED_LOAD_RESOLUTION 10
57# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
58# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
59#else
60# define SCHED_LOAD_RESOLUTION 0
61# define scale_load(w) (w)
62# define scale_load_down(w) (w)
63#endif
64
65#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
66#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
67
Peter Zijlstra029632f2011-10-25 10:00:11 +020068#define NICE_0_LOAD SCHED_LOAD_SCALE
69#define NICE_0_SHIFT SCHED_LOAD_SHIFT
70
71/*
Dario Faggioli332ac172013-11-07 14:43:45 +010072 * Single value that decides SCHED_DEADLINE internal math precision.
73 * 10 -> just above 1us
74 * 9 -> just above 0.5us
75 */
76#define DL_SCALE (10)
77
78/*
Peter Zijlstra029632f2011-10-25 10:00:11 +020079 * These are the 'tuning knobs' of the scheduler:
Peter Zijlstra029632f2011-10-25 10:00:11 +020080 */
Peter Zijlstra029632f2011-10-25 10:00:11 +020081
82/*
83 * single value that denotes runtime == period, ie unlimited time.
84 */
85#define RUNTIME_INF ((u64)~0ULL)
86
Dario Faggiolid50dde52013-11-07 14:43:36 +010087static inline int fair_policy(int policy)
88{
89 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
90}
91
Peter Zijlstra029632f2011-10-25 10:00:11 +020092static inline int rt_policy(int policy)
93{
Dario Faggiolid50dde52013-11-07 14:43:36 +010094 return policy == SCHED_FIFO || policy == SCHED_RR;
Peter Zijlstra029632f2011-10-25 10:00:11 +020095}
96
Dario Faggioliaab03e02013-11-28 11:14:43 +010097static inline int dl_policy(int policy)
98{
99 return policy == SCHED_DEADLINE;
100}
101
Peter Zijlstra029632f2011-10-25 10:00:11 +0200102static inline int task_has_rt_policy(struct task_struct *p)
103{
104 return rt_policy(p->policy);
105}
106
Dario Faggioliaab03e02013-11-28 11:14:43 +0100107static inline int task_has_dl_policy(struct task_struct *p)
108{
109 return dl_policy(p->policy);
110}
111
Dario Faggioli332ac172013-11-07 14:43:45 +0100112static inline bool dl_time_before(u64 a, u64 b)
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100113{
114 return (s64)(a - b) < 0;
115}
116
117/*
118 * Tells if entity @a should preempt entity @b.
119 */
Dario Faggioli332ac172013-11-07 14:43:45 +0100120static inline bool
121dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100122{
123 return dl_time_before(a->deadline, b->deadline);
124}
125
Peter Zijlstra029632f2011-10-25 10:00:11 +0200126/*
127 * This is the priority-queue data structure of the RT scheduling class:
128 */
129struct rt_prio_array {
130 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
131 struct list_head queue[MAX_RT_PRIO];
132};
133
134struct rt_bandwidth {
135 /* nests inside the rq lock: */
136 raw_spinlock_t rt_runtime_lock;
137 ktime_t rt_period;
138 u64 rt_runtime;
139 struct hrtimer rt_period_timer;
Peter Zijlstra4cfafd32015-05-14 12:23:11 +0200140 unsigned int rt_period_active;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200141};
Juri Lellia5e7be32014-09-19 10:22:39 +0100142
143void __dl_clear_params(struct task_struct *p);
144
Dario Faggioli332ac172013-11-07 14:43:45 +0100145/*
146 * To keep the bandwidth of -deadline tasks and groups under control
147 * we need some place where:
148 * - store the maximum -deadline bandwidth of the system (the group);
149 * - cache the fraction of that bandwidth that is currently allocated.
150 *
151 * This is all done in the data structure below. It is similar to the
152 * one used for RT-throttling (rt_bandwidth), with the main difference
153 * that, since here we are only interested in admission control, we
154 * do not decrease any runtime while the group "executes", neither we
155 * need a timer to replenish it.
156 *
157 * With respect to SMP, the bandwidth is given on a per-CPU basis,
158 * meaning that:
159 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
160 * - dl_total_bw array contains, in the i-eth element, the currently
161 * allocated bandwidth on the i-eth CPU.
162 * Moreover, groups consume bandwidth on each CPU, while tasks only
163 * consume bandwidth on the CPU they're running on.
164 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
165 * that will be shown the next time the proc or cgroup controls will
166 * be red. It on its turn can be changed by writing on its own
167 * control.
168 */
169struct dl_bandwidth {
170 raw_spinlock_t dl_runtime_lock;
171 u64 dl_runtime;
172 u64 dl_period;
173};
174
175static inline int dl_bandwidth_enabled(void)
176{
Peter Zijlstra17248132013-12-17 12:44:49 +0100177 return sysctl_sched_rt_runtime >= 0;
Dario Faggioli332ac172013-11-07 14:43:45 +0100178}
179
180extern struct dl_bw *dl_bw_of(int i);
181
182struct dl_bw {
183 raw_spinlock_t lock;
184 u64 bw, total_bw;
185};
186
Juri Lelli7f514122014-09-19 10:22:40 +0100187static inline
188void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
189{
190 dl_b->total_bw -= tsk_bw;
191}
192
193static inline
194void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
195{
196 dl_b->total_bw += tsk_bw;
197}
198
199static inline
200bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
201{
202 return dl_b->bw != -1 &&
203 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
204}
205
Peter Zijlstra029632f2011-10-25 10:00:11 +0200206extern struct mutex sched_domains_mutex;
207
208#ifdef CONFIG_CGROUP_SCHED
209
210#include <linux/cgroup.h>
211
212struct cfs_rq;
213struct rt_rq;
214
Mike Galbraith35cf4e52012-08-07 05:00:13 +0200215extern struct list_head task_groups;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200216
217struct cfs_bandwidth {
218#ifdef CONFIG_CFS_BANDWIDTH
219 raw_spinlock_t lock;
220 ktime_t period;
221 u64 quota, runtime;
Zhihui Zhang9c58c792014-09-20 21:24:36 -0400222 s64 hierarchical_quota;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200223 u64 runtime_expires;
224
Peter Zijlstra4cfafd32015-05-14 12:23:11 +0200225 int idle, period_active;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200226 struct hrtimer period_timer, slack_timer;
227 struct list_head throttled_cfs_rq;
228
229 /* statistics */
230 int nr_periods, nr_throttled;
231 u64 throttled_time;
232#endif
233};
234
235/* task group related information */
236struct task_group {
237 struct cgroup_subsys_state css;
238
239#ifdef CONFIG_FAIR_GROUP_SCHED
240 /* schedulable entities of this group on each cpu */
241 struct sched_entity **se;
242 /* runqueue "owned" by this group on each cpu */
243 struct cfs_rq **cfs_rq;
244 unsigned long shares;
245
Alex Shifa6bdde2013-06-20 10:18:46 +0800246#ifdef CONFIG_SMP
Alex Shibf5b9862013-06-20 10:18:54 +0800247 atomic_long_t load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200248#endif
Alex Shifa6bdde2013-06-20 10:18:46 +0800249#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200250
251#ifdef CONFIG_RT_GROUP_SCHED
252 struct sched_rt_entity **rt_se;
253 struct rt_rq **rt_rq;
254
255 struct rt_bandwidth rt_bandwidth;
256#endif
257
258 struct rcu_head rcu;
259 struct list_head list;
260
261 struct task_group *parent;
262 struct list_head siblings;
263 struct list_head children;
264
265#ifdef CONFIG_SCHED_AUTOGROUP
266 struct autogroup *autogroup;
267#endif
268
269 struct cfs_bandwidth cfs_bandwidth;
270};
271
272#ifdef CONFIG_FAIR_GROUP_SCHED
273#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
274
275/*
276 * A weight of 0 or 1 can cause arithmetics problems.
277 * A weight of a cfs_rq is the sum of weights of which entities
278 * are queued on this cfs_rq, so a weight of a entity should not be
279 * too large, so as the shares value of a task group.
280 * (The default weight is 1024 - so there's no practical
281 * limitation from this.)
282 */
283#define MIN_SHARES (1UL << 1)
284#define MAX_SHARES (1UL << 18)
285#endif
286
Peter Zijlstra029632f2011-10-25 10:00:11 +0200287typedef int (*tg_visitor)(struct task_group *, void *);
288
289extern int walk_tg_tree_from(struct task_group *from,
290 tg_visitor down, tg_visitor up, void *data);
291
292/*
293 * Iterate the full tree, calling @down when first entering a node and @up when
294 * leaving it for the final time.
295 *
296 * Caller must hold rcu_lock or sufficient equivalent.
297 */
298static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
299{
300 return walk_tg_tree_from(&root_task_group, down, up, data);
301}
302
303extern int tg_nop(struct task_group *tg, void *data);
304
305extern void free_fair_sched_group(struct task_group *tg);
306extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
307extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
308extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
309 struct sched_entity *se, int cpu,
310 struct sched_entity *parent);
311extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
312extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
313
314extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +0200315extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200316extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
317
318extern void free_rt_sched_group(struct task_group *tg);
319extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
320extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
321 struct sched_rt_entity *rt_se, int cpu,
322 struct sched_rt_entity *parent);
323
Li Zefan25cc7da2013-03-05 16:07:33 +0800324extern struct task_group *sched_create_group(struct task_group *parent);
325extern void sched_online_group(struct task_group *tg,
326 struct task_group *parent);
327extern void sched_destroy_group(struct task_group *tg);
328extern void sched_offline_group(struct task_group *tg);
329
330extern void sched_move_task(struct task_struct *tsk);
331
332#ifdef CONFIG_FAIR_GROUP_SCHED
333extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
334#endif
335
Peter Zijlstra029632f2011-10-25 10:00:11 +0200336#else /* CONFIG_CGROUP_SCHED */
337
338struct cfs_bandwidth { };
339
340#endif /* CONFIG_CGROUP_SCHED */
341
342/* CFS-related fields in a runqueue */
343struct cfs_rq {
344 struct load_weight load;
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200345 unsigned int nr_running, h_nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200346
347 u64 exec_clock;
348 u64 min_vruntime;
349#ifndef CONFIG_64BIT
350 u64 min_vruntime_copy;
351#endif
352
353 struct rb_root tasks_timeline;
354 struct rb_node *rb_leftmost;
355
Peter Zijlstra029632f2011-10-25 10:00:11 +0200356 /*
357 * 'curr' points to currently running entity on this cfs_rq.
358 * It is set to NULL otherwise (i.e when none are currently running).
359 */
360 struct sched_entity *curr, *next, *last, *skip;
361
362#ifdef CONFIG_SCHED_DEBUG
363 unsigned int nr_spread_over;
364#endif
365
Paul Turner2dac7542012-10-04 13:18:30 +0200366#ifdef CONFIG_SMP
367 /*
Yuyang Du9d89c252015-07-15 08:04:37 +0800368 * CFS load tracking
Paul Turner2dac7542012-10-04 13:18:30 +0200369 */
Yuyang Du9d89c252015-07-15 08:04:37 +0800370 struct sched_avg avg;
Yuyang Du13962232015-07-15 08:04:41 +0800371 u64 runnable_load_sum;
372 unsigned long runnable_load_avg;
Yuyang Du9d89c252015-07-15 08:04:37 +0800373#ifdef CONFIG_FAIR_GROUP_SCHED
374 unsigned long tg_load_avg_contrib;
375#endif
376 atomic_long_t removed_load_avg, removed_util_avg;
377#ifndef CONFIG_64BIT
378 u64 load_last_update_time_copy;
379#endif
Alex Shi141965c2013-06-26 13:05:39 +0800380
Paul Turnerc566e8e2012-10-04 13:18:30 +0200381#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Turner82958362012-10-04 13:18:31 +0200382 /*
383 * h_load = weight * f(tg)
384 *
385 * Where f(tg) is the recursive weight fraction assigned to
386 * this group.
387 */
388 unsigned long h_load;
Vladimir Davydov68520792013-07-15 17:49:19 +0400389 u64 last_h_load_update;
390 struct sched_entity *h_load_next;
391#endif /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner82958362012-10-04 13:18:31 +0200392#endif /* CONFIG_SMP */
393
Peter Zijlstra029632f2011-10-25 10:00:11 +0200394#ifdef CONFIG_FAIR_GROUP_SCHED
395 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
396
397 /*
398 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
399 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
400 * (like users, containers etc.)
401 *
402 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
403 * list is used during load balance.
404 */
405 int on_list;
406 struct list_head leaf_cfs_rq_list;
407 struct task_group *tg; /* group that "owns" this runqueue */
408
Peter Zijlstra029632f2011-10-25 10:00:11 +0200409#ifdef CONFIG_CFS_BANDWIDTH
410 int runtime_enabled;
411 u64 runtime_expires;
412 s64 runtime_remaining;
413
Paul Turnerf1b17282012-10-04 13:18:31 +0200414 u64 throttled_clock, throttled_clock_task;
415 u64 throttled_clock_task_time;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200416 int throttled, throttle_count;
417 struct list_head throttled_list;
418#endif /* CONFIG_CFS_BANDWIDTH */
419#endif /* CONFIG_FAIR_GROUP_SCHED */
420};
421
422static inline int rt_bandwidth_enabled(void)
423{
424 return sysctl_sched_rt_runtime >= 0;
425}
426
Steven Rostedtb6366f02015-03-18 14:49:46 -0400427/* RT IPI pull logic requires IRQ_WORK */
428#ifdef CONFIG_IRQ_WORK
429# define HAVE_RT_PUSH_IPI
430#endif
431
Peter Zijlstra029632f2011-10-25 10:00:11 +0200432/* Real-Time classes' related field in a runqueue: */
433struct rt_rq {
434 struct rt_prio_array active;
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200435 unsigned int rt_nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200436#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
437 struct {
438 int curr; /* highest queued rt task prio */
439#ifdef CONFIG_SMP
440 int next; /* next highest */
441#endif
442 } highest_prio;
443#endif
444#ifdef CONFIG_SMP
445 unsigned long rt_nr_migratory;
446 unsigned long rt_nr_total;
447 int overloaded;
448 struct plist_head pushable_tasks;
Steven Rostedtb6366f02015-03-18 14:49:46 -0400449#ifdef HAVE_RT_PUSH_IPI
450 int push_flags;
451 int push_cpu;
452 struct irq_work push_work;
453 raw_spinlock_t push_lock;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200454#endif
Steven Rostedtb6366f02015-03-18 14:49:46 -0400455#endif /* CONFIG_SMP */
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +0400456 int rt_queued;
457
Peter Zijlstra029632f2011-10-25 10:00:11 +0200458 int rt_throttled;
459 u64 rt_time;
460 u64 rt_runtime;
461 /* Nests inside the rq lock: */
462 raw_spinlock_t rt_runtime_lock;
463
464#ifdef CONFIG_RT_GROUP_SCHED
465 unsigned long rt_nr_boosted;
466
467 struct rq *rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200468 struct task_group *tg;
469#endif
470};
471
Dario Faggioliaab03e02013-11-28 11:14:43 +0100472/* Deadline class' related fields in a runqueue */
473struct dl_rq {
474 /* runqueue is an rbtree, ordered by deadline */
475 struct rb_root rb_root;
476 struct rb_node *rb_leftmost;
477
478 unsigned long dl_nr_running;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100479
480#ifdef CONFIG_SMP
481 /*
482 * Deadline values of the currently executing and the
483 * earliest ready task on this rq. Caching these facilitates
484 * the decision wether or not a ready but not running task
485 * should migrate somewhere else.
486 */
487 struct {
488 u64 curr;
489 u64 next;
490 } earliest_dl;
491
492 unsigned long dl_nr_migratory;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100493 int overloaded;
494
495 /*
496 * Tasks on this rq that can be pushed away. They are kept in
497 * an rb-tree, ordered by tasks' deadlines, with caching
498 * of the leftmost (earliest deadline) element.
499 */
500 struct rb_root pushable_dl_tasks_root;
501 struct rb_node *pushable_dl_tasks_leftmost;
Dario Faggioli332ac172013-11-07 14:43:45 +0100502#else
503 struct dl_bw dl_bw;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100504#endif
Dario Faggioliaab03e02013-11-28 11:14:43 +0100505};
506
Peter Zijlstra029632f2011-10-25 10:00:11 +0200507#ifdef CONFIG_SMP
508
509/*
510 * We add the notion of a root-domain which will be used to define per-domain
511 * variables. Each exclusive cpuset essentially defines an island domain by
512 * fully partitioning the member cpus from any other cpuset. Whenever a new
513 * exclusive cpuset is created, we also create and attach a new root-domain
514 * object.
515 *
516 */
517struct root_domain {
518 atomic_t refcount;
519 atomic_t rto_count;
520 struct rcu_head rcu;
521 cpumask_var_t span;
522 cpumask_var_t online;
523
Tim Chen4486edd2014-06-23 12:16:49 -0700524 /* Indicate more than one runnable task for any CPU */
525 bool overload;
526
Peter Zijlstra029632f2011-10-25 10:00:11 +0200527 /*
Juri Lelli1baca4c2013-11-07 14:43:38 +0100528 * The bit corresponding to a CPU gets set here if such CPU has more
529 * than one runnable -deadline task (as it is below for RT tasks).
530 */
531 cpumask_var_t dlo_mask;
532 atomic_t dlo_count;
Dario Faggioli332ac172013-11-07 14:43:45 +0100533 struct dl_bw dl_bw;
Juri Lelli6bfd6d72013-11-07 14:43:47 +0100534 struct cpudl cpudl;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100535
536 /*
Peter Zijlstra029632f2011-10-25 10:00:11 +0200537 * The "RT overload" flag: it gets set if a CPU has more than
538 * one runnable RT task.
539 */
540 cpumask_var_t rto_mask;
541 struct cpupri cpupri;
542};
543
544extern struct root_domain def_root_domain;
545
546#endif /* CONFIG_SMP */
547
548/*
549 * This is the main, per-CPU runqueue data structure.
550 *
551 * Locking rule: those places that want to lock multiple runqueues
552 * (such as the load balancing or the thread migration code), lock
553 * acquire operations must be ordered by ascending &runqueue.
554 */
555struct rq {
556 /* runqueue lock: */
557 raw_spinlock_t lock;
558
559 /*
560 * nr_running and cpu_load should be in the same cacheline because
561 * remote CPUs use both these fields when doing load calculation.
562 */
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200563 unsigned int nr_running;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100564#ifdef CONFIG_NUMA_BALANCING
565 unsigned int nr_numa_running;
566 unsigned int nr_preferred_running;
567#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200568 #define CPU_LOAD_IDX_MAX 5
569 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
570 unsigned long last_load_update_tick;
Frederic Weisbecker3451d022011-08-10 23:21:01 +0200571#ifdef CONFIG_NO_HZ_COMMON
Peter Zijlstra029632f2011-10-25 10:00:11 +0200572 u64 nohz_stamp;
Suresh Siddha1c792db2011-12-01 17:07:32 -0800573 unsigned long nohz_flags;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200574#endif
Frederic Weisbecker265f22a2013-05-03 03:39:05 +0200575#ifdef CONFIG_NO_HZ_FULL
576 unsigned long last_sched_tick;
577#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200578 /* capture load from *all* tasks on this cpu: */
579 struct load_weight load;
580 unsigned long nr_load_updates;
581 u64 nr_switches;
582
583 struct cfs_rq cfs;
584 struct rt_rq rt;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100585 struct dl_rq dl;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200586
587#ifdef CONFIG_FAIR_GROUP_SCHED
588 /* list of leaf cfs_rq on this cpu: */
589 struct list_head leaf_cfs_rq_list;
Peter Zijlstraa35b6462012-08-08 21:46:40 +0200590#endif /* CONFIG_FAIR_GROUP_SCHED */
591
Peter Zijlstra029632f2011-10-25 10:00:11 +0200592 /*
593 * This is part of a global counter where only the total sum
594 * over all CPUs matters. A task can increase this counter on
595 * one CPU and if it got migrated afterwards it may decrease
596 * it on another CPU. Always updated under the runqueue lock:
597 */
598 unsigned long nr_uninterruptible;
599
600 struct task_struct *curr, *idle, *stop;
601 unsigned long next_balance;
602 struct mm_struct *prev_mm;
603
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +0100604 unsigned int clock_skip_update;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200605 u64 clock;
606 u64 clock_task;
607
608 atomic_t nr_iowait;
609
610#ifdef CONFIG_SMP
611 struct root_domain *rd;
612 struct sched_domain *sd;
613
Nicolas Pitreced549f2014-05-26 18:19:38 -0400614 unsigned long cpu_capacity;
Vincent Guittotca6d75e2015-02-27 16:54:09 +0100615 unsigned long cpu_capacity_orig;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200616
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200617 struct callback_head *balance_callback;
618
Peter Zijlstra029632f2011-10-25 10:00:11 +0200619 unsigned char idle_balance;
620 /* For active balancing */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200621 int active_balance;
622 int push_cpu;
623 struct cpu_stop_work active_balance_work;
624 /* cpu of this runqueue: */
625 int cpu;
626 int online;
627
Peter Zijlstra367456c2012-02-20 21:49:09 +0100628 struct list_head cfs_tasks;
629
Peter Zijlstra029632f2011-10-25 10:00:11 +0200630 u64 rt_avg;
631 u64 age_stamp;
632 u64 idle_stamp;
633 u64 avg_idle;
Jason Low9bd721c2013-09-13 11:26:52 -0700634
635 /* This is used to determine avg_idle's max value */
636 u64 max_idle_balance_cost;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200637#endif
638
639#ifdef CONFIG_IRQ_TIME_ACCOUNTING
640 u64 prev_irq_time;
641#endif
642#ifdef CONFIG_PARAVIRT
643 u64 prev_steal_time;
644#endif
645#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
646 u64 prev_steal_time_rq;
647#endif
648
649 /* calc_load related fields */
650 unsigned long calc_load_update;
651 long calc_load_active;
652
653#ifdef CONFIG_SCHED_HRTICK
654#ifdef CONFIG_SMP
655 int hrtick_csd_pending;
656 struct call_single_data hrtick_csd;
657#endif
658 struct hrtimer hrtick_timer;
659#endif
660
661#ifdef CONFIG_SCHEDSTATS
662 /* latency stats */
663 struct sched_info rq_sched_info;
664 unsigned long long rq_cpu_time;
665 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
666
667 /* sys_sched_yield() stats */
668 unsigned int yld_count;
669
670 /* schedule() stats */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200671 unsigned int sched_count;
672 unsigned int sched_goidle;
673
674 /* try_to_wake_up() stats */
675 unsigned int ttwu_count;
676 unsigned int ttwu_local;
677#endif
678
679#ifdef CONFIG_SMP
680 struct llist_head wake_list;
681#endif
Daniel Lezcano442bf3a2014-09-04 11:32:09 -0400682
683#ifdef CONFIG_CPU_IDLE
684 /* Must be inspected within a rcu lock section */
685 struct cpuidle_state *idle_state;
686#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200687};
688
689static inline int cpu_of(struct rq *rq)
690{
691#ifdef CONFIG_SMP
692 return rq->cpu;
693#else
694 return 0;
695#endif
696}
697
Pranith Kumar8b06c552014-08-13 13:28:12 -0400698DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200699
Peter Zijlstra518cd622011-12-07 15:07:31 +0100700#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
Christoph Lameter4a32fea2014-08-17 12:30:27 -0500701#define this_rq() this_cpu_ptr(&runqueues)
Peter Zijlstra518cd622011-12-07 15:07:31 +0100702#define task_rq(p) cpu_rq(task_cpu(p))
703#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
Christoph Lameter4a32fea2014-08-17 12:30:27 -0500704#define raw_rq() raw_cpu_ptr(&runqueues)
Peter Zijlstra518cd622011-12-07 15:07:31 +0100705
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100706static inline u64 __rq_clock_broken(struct rq *rq)
707{
Jason Low316c1608d2015-04-28 13:00:20 -0700708 return READ_ONCE(rq->clock);
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100709}
710
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200711static inline u64 rq_clock(struct rq *rq)
712{
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100713 lockdep_assert_held(&rq->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200714 return rq->clock;
715}
716
717static inline u64 rq_clock_task(struct rq *rq)
718{
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100719 lockdep_assert_held(&rq->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200720 return rq->clock_task;
721}
722
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +0100723#define RQCF_REQ_SKIP 0x01
724#define RQCF_ACT_SKIP 0x02
725
726static inline void rq_clock_skip_update(struct rq *rq, bool skip)
727{
728 lockdep_assert_held(&rq->lock);
729 if (skip)
730 rq->clock_skip_update |= RQCF_REQ_SKIP;
731 else
732 rq->clock_skip_update &= ~RQCF_REQ_SKIP;
733}
734
Rik van Riel9942f792014-10-17 03:29:49 -0400735#ifdef CONFIG_NUMA
Rik van Riele3fe70b2014-10-17 03:29:50 -0400736enum numa_topology_type {
737 NUMA_DIRECT,
738 NUMA_GLUELESS_MESH,
739 NUMA_BACKPLANE,
740};
741extern enum numa_topology_type sched_numa_topology_type;
Rik van Riel9942f792014-10-17 03:29:49 -0400742extern int sched_max_numa_distance;
743extern bool find_numa_distance(int distance);
744#endif
745
Mel Gormanf809ca92013-10-07 11:28:57 +0100746#ifdef CONFIG_NUMA_BALANCING
Iulia Manda44dba3d2014-10-31 02:13:31 +0200747/* The regions in numa_faults array from task_struct */
748enum numa_faults_stats {
749 NUMA_MEM = 0,
750 NUMA_CPU,
751 NUMA_MEMBUF,
752 NUMA_CPUBUF
753};
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100754extern void sched_setnuma(struct task_struct *p, int node);
Mel Gormane6628d52013-10-07 11:29:02 +0100755extern int migrate_task_to(struct task_struct *p, int cpu);
Peter Zijlstraac66f542013-10-07 11:29:16 +0100756extern int migrate_swap(struct task_struct *, struct task_struct *);
Mel Gormanf809ca92013-10-07 11:28:57 +0100757#endif /* CONFIG_NUMA_BALANCING */
758
Peter Zijlstra518cd622011-12-07 15:07:31 +0100759#ifdef CONFIG_SMP
760
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200761static inline void
762queue_balance_callback(struct rq *rq,
763 struct callback_head *head,
764 void (*func)(struct rq *rq))
765{
766 lockdep_assert_held(&rq->lock);
767
768 if (unlikely(head->next))
769 return;
770
771 head->func = (void (*)(struct callback_head *))func;
772 head->next = rq->balance_callback;
773 rq->balance_callback = head;
774}
775
Peter Zijlstrae3baac42014-06-04 10:31:18 -0700776extern void sched_ttwu_pending(void);
777
Peter Zijlstra029632f2011-10-25 10:00:11 +0200778#define rcu_dereference_check_sched_domain(p) \
779 rcu_dereference_check((p), \
780 lockdep_is_held(&sched_domains_mutex))
781
782/*
783 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
784 * See detach_destroy_domains: synchronize_sched for details.
785 *
786 * The domain tree of any CPU may only be accessed from within
787 * preempt-disabled sections.
788 */
789#define for_each_domain(cpu, __sd) \
Peter Zijlstra518cd622011-12-07 15:07:31 +0100790 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
791 __sd; __sd = __sd->parent)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200792
Suresh Siddha77e81362011-11-17 11:08:23 -0800793#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
794
Peter Zijlstra518cd622011-12-07 15:07:31 +0100795/**
796 * highest_flag_domain - Return highest sched_domain containing flag.
797 * @cpu: The cpu whose highest level of sched domain is to
798 * be returned.
799 * @flag: The flag to check for the highest sched_domain
800 * for the given cpu.
801 *
802 * Returns the highest sched_domain of a cpu which contains the given flag.
803 */
804static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
805{
806 struct sched_domain *sd, *hsd = NULL;
807
808 for_each_domain(cpu, sd) {
809 if (!(sd->flags & flag))
810 break;
811 hsd = sd;
812 }
813
814 return hsd;
815}
816
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100817static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
818{
819 struct sched_domain *sd;
820
821 for_each_domain(cpu, sd) {
822 if (sd->flags & flag)
823 break;
824 }
825
826 return sd;
827}
828
Peter Zijlstra518cd622011-12-07 15:07:31 +0100829DECLARE_PER_CPU(struct sched_domain *, sd_llc);
Peter Zijlstra7d9ffa82013-07-04 12:56:46 +0800830DECLARE_PER_CPU(int, sd_llc_size);
Peter Zijlstra518cd622011-12-07 15:07:31 +0100831DECLARE_PER_CPU(int, sd_llc_id);
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100832DECLARE_PER_CPU(struct sched_domain *, sd_numa);
Preeti U Murthy37dc6b52013-10-30 08:42:52 +0530833DECLARE_PER_CPU(struct sched_domain *, sd_busy);
834DECLARE_PER_CPU(struct sched_domain *, sd_asym);
Peter Zijlstra518cd622011-12-07 15:07:31 +0100835
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400836struct sched_group_capacity {
Li Zefan5e6521e2013-03-05 16:06:23 +0800837 atomic_t ref;
838 /*
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400839 * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
840 * for a single CPU.
Li Zefan5e6521e2013-03-05 16:06:23 +0800841 */
Vincent Guittotdc7ff762015-03-03 11:35:03 +0100842 unsigned int capacity;
Li Zefan5e6521e2013-03-05 16:06:23 +0800843 unsigned long next_update;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400844 int imbalance; /* XXX unrelated to capacity but shared group state */
Li Zefan5e6521e2013-03-05 16:06:23 +0800845 /*
846 * Number of busy cpus in this group.
847 */
848 atomic_t nr_busy_cpus;
849
850 unsigned long cpumask[0]; /* iteration mask */
851};
852
853struct sched_group {
854 struct sched_group *next; /* Must be a circular list */
855 atomic_t ref;
856
857 unsigned int group_weight;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400858 struct sched_group_capacity *sgc;
Li Zefan5e6521e2013-03-05 16:06:23 +0800859
860 /*
861 * The CPUs this group covers.
862 *
863 * NOTE: this field is variable length. (Allocated dynamically
864 * by attaching extra space to the end of the structure,
865 * depending on how many CPUs the kernel has booted up with)
866 */
867 unsigned long cpumask[0];
868};
869
870static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
871{
872 return to_cpumask(sg->cpumask);
873}
874
875/*
876 * cpumask masking which cpus in the group are allowed to iterate up the domain
877 * tree.
878 */
879static inline struct cpumask *sched_group_mask(struct sched_group *sg)
880{
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400881 return to_cpumask(sg->sgc->cpumask);
Li Zefan5e6521e2013-03-05 16:06:23 +0800882}
883
884/**
885 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
886 * @group: The group whose first cpu is to be returned.
887 */
888static inline unsigned int group_first_cpu(struct sched_group *group)
889{
890 return cpumask_first(sched_group_cpus(group));
891}
892
Peter Zijlstrac1174872012-05-31 14:47:33 +0200893extern int group_balance_cpu(struct sched_group *sg);
894
Peter Zijlstrae3baac42014-06-04 10:31:18 -0700895#else
896
897static inline void sched_ttwu_pending(void) { }
898
Peter Zijlstra518cd622011-12-07 15:07:31 +0100899#endif /* CONFIG_SMP */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200900
Peter Zijlstra391e43d2011-11-15 17:14:39 +0100901#include "stats.h"
902#include "auto_group.h"
Peter Zijlstra029632f2011-10-25 10:00:11 +0200903
904#ifdef CONFIG_CGROUP_SCHED
905
906/*
907 * Return the group to which this tasks belongs.
908 *
Tejun Heo8af01f52013-08-08 20:11:22 -0400909 * We cannot use task_css() and friends because the cgroup subsystem
910 * changes that value before the cgroup_subsys::attach() method is called,
911 * therefore we cannot pin it and might observe the wrong value.
Peter Zijlstra8323f262012-06-22 13:36:05 +0200912 *
913 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
914 * core changes this before calling sched_move_task().
915 *
916 * Instead we use a 'copy' which is updated from sched_move_task() while
917 * holding both task_struct::pi_lock and rq::lock.
Peter Zijlstra029632f2011-10-25 10:00:11 +0200918 */
919static inline struct task_group *task_group(struct task_struct *p)
920{
Peter Zijlstra8323f262012-06-22 13:36:05 +0200921 return p->sched_task_group;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200922}
923
924/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
925static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
926{
927#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
928 struct task_group *tg = task_group(p);
929#endif
930
931#ifdef CONFIG_FAIR_GROUP_SCHED
932 p->se.cfs_rq = tg->cfs_rq[cpu];
933 p->se.parent = tg->se[cpu];
934#endif
935
936#ifdef CONFIG_RT_GROUP_SCHED
937 p->rt.rt_rq = tg->rt_rq[cpu];
938 p->rt.parent = tg->rt_se[cpu];
939#endif
940}
941
942#else /* CONFIG_CGROUP_SCHED */
943
944static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
945static inline struct task_group *task_group(struct task_struct *p)
946{
947 return NULL;
948}
949
950#endif /* CONFIG_CGROUP_SCHED */
951
952static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
953{
954 set_task_rq(p, cpu);
955#ifdef CONFIG_SMP
956 /*
957 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
958 * successfuly executed on another CPU. We must ensure that updates of
959 * per-task data have been completed by this moment.
960 */
961 smp_wmb();
962 task_thread_info(p)->cpu = cpu;
Peter Zijlstraac66f542013-10-07 11:29:16 +0100963 p->wake_cpu = cpu;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200964#endif
965}
966
967/*
968 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
969 */
970#ifdef CONFIG_SCHED_DEBUG
Ingo Molnarc5905af2012-02-24 08:31:31 +0100971# include <linux/static_key.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +0200972# define const_debug __read_mostly
973#else
974# define const_debug const
975#endif
976
977extern const_debug unsigned int sysctl_sched_features;
978
979#define SCHED_FEAT(name, enabled) \
980 __SCHED_FEAT_##name ,
981
982enum {
Peter Zijlstra391e43d2011-11-15 17:14:39 +0100983#include "features.h"
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200984 __SCHED_FEAT_NR,
Peter Zijlstra029632f2011-10-25 10:00:11 +0200985};
986
987#undef SCHED_FEAT
988
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200989#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200990#define SCHED_FEAT(name, enabled) \
Ingo Molnarc5905af2012-02-24 08:31:31 +0100991static __always_inline bool static_branch_##name(struct static_key *key) \
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200992{ \
Jason Baron6e76ea82014-07-02 15:52:41 +0000993 return static_key_##enabled(key); \
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200994}
995
996#include "features.h"
997
998#undef SCHED_FEAT
999
Ingo Molnarc5905af2012-02-24 08:31:31 +01001000extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001001#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1002#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001003#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001004#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001005
Srikar Dronamraju2a595722015-08-11 21:54:21 +05301006extern struct static_key_false sched_numa_balancing;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001007
Peter Zijlstra029632f2011-10-25 10:00:11 +02001008static inline u64 global_rt_period(void)
1009{
1010 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1011}
1012
1013static inline u64 global_rt_runtime(void)
1014{
1015 if (sysctl_sched_rt_runtime < 0)
1016 return RUNTIME_INF;
1017
1018 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1019}
1020
Peter Zijlstra029632f2011-10-25 10:00:11 +02001021static inline int task_current(struct rq *rq, struct task_struct *p)
1022{
1023 return rq->curr == p;
1024}
1025
1026static inline int task_running(struct rq *rq, struct task_struct *p)
1027{
1028#ifdef CONFIG_SMP
1029 return p->on_cpu;
1030#else
1031 return task_current(rq, p);
1032#endif
1033}
1034
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04001035static inline int task_on_rq_queued(struct task_struct *p)
1036{
1037 return p->on_rq == TASK_ON_RQ_QUEUED;
1038}
Peter Zijlstra029632f2011-10-25 10:00:11 +02001039
Kirill Tkhaicca26e82014-08-20 13:47:42 +04001040static inline int task_on_rq_migrating(struct task_struct *p)
1041{
1042 return p->on_rq == TASK_ON_RQ_MIGRATING;
1043}
1044
Peter Zijlstra029632f2011-10-25 10:00:11 +02001045#ifndef prepare_arch_switch
1046# define prepare_arch_switch(next) do { } while (0)
1047#endif
Catalin Marinas01f23e12011-11-27 21:43:10 +00001048#ifndef finish_arch_post_lock_switch
1049# define finish_arch_post_lock_switch() do { } while (0)
1050#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02001051
Peter Zijlstra029632f2011-10-25 10:00:11 +02001052static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1053{
1054#ifdef CONFIG_SMP
1055 /*
1056 * We can optimise this out completely for !SMP, because the
1057 * SMP rebalancing from interrupt is the only thing that cares
1058 * here.
1059 */
1060 next->on_cpu = 1;
1061#endif
1062}
1063
1064static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1065{
1066#ifdef CONFIG_SMP
1067 /*
1068 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1069 * We must ensure this doesn't happen until the switch is completely
1070 * finished.
1071 */
1072 smp_wmb();
1073 prev->on_cpu = 0;
1074#endif
1075#ifdef CONFIG_DEBUG_SPINLOCK
1076 /* this is a valid case when another task releases the spinlock */
1077 rq->lock.owner = current;
1078#endif
1079 /*
1080 * If we are tracking spinlock dependencies then we have to
1081 * fix up the runqueue lock - which gets 'carried over' from
1082 * prev into current:
1083 */
1084 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1085
1086 raw_spin_unlock_irq(&rq->lock);
1087}
1088
Li Zefanb13095f2013-03-05 16:06:38 +08001089/*
1090 * wake flags
1091 */
1092#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
1093#define WF_FORK 0x02 /* child wakeup after fork */
1094#define WF_MIGRATED 0x4 /* internal use, task got migrated */
1095
Peter Zijlstra029632f2011-10-25 10:00:11 +02001096/*
1097 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1098 * of tasks with abnormal "nice" values across CPUs the contribution that
1099 * each task makes to its run queue's load is weighted according to its
1100 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1101 * scaled version of the new time slice allocation that they receive on time
1102 * slice expiry etc.
1103 */
1104
1105#define WEIGHT_IDLEPRIO 3
1106#define WMULT_IDLEPRIO 1431655765
1107
1108/*
1109 * Nice levels are multiplicative, with a gentle 10% change for every
1110 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1111 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1112 * that remained on nice 0.
1113 *
1114 * The "10% effect" is relative and cumulative: from _any_ nice level,
1115 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
1116 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1117 * If a task goes up by ~10% and another task goes down by ~10% then
1118 * the relative distance between them is ~25%.)
1119 */
1120static const int prio_to_weight[40] = {
1121 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1122 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1123 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1124 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1125 /* 0 */ 1024, 820, 655, 526, 423,
1126 /* 5 */ 335, 272, 215, 172, 137,
1127 /* 10 */ 110, 87, 70, 56, 45,
1128 /* 15 */ 36, 29, 23, 18, 15,
1129};
1130
1131/*
1132 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1133 *
1134 * In cases where the weight does not change often, we can use the
1135 * precalculated inverse to speed up arithmetics by turning divisions
1136 * into multiplications:
1137 */
1138static const u32 prio_to_wmult[40] = {
1139 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1140 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1141 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1142 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1143 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1144 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1145 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1146 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
1147};
1148
Li Zefanc82ba9f2013-03-05 16:06:55 +08001149#define ENQUEUE_WAKEUP 1
1150#define ENQUEUE_HEAD 2
1151#ifdef CONFIG_SMP
1152#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
1153#else
1154#define ENQUEUE_WAKING 0
1155#endif
Dario Faggioliaab03e02013-11-28 11:14:43 +01001156#define ENQUEUE_REPLENISH 8
Li Zefanc82ba9f2013-03-05 16:06:55 +08001157
1158#define DEQUEUE_SLEEP 1
1159
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001160#define RETRY_TASK ((void *)-1UL)
1161
Li Zefanc82ba9f2013-03-05 16:06:55 +08001162struct sched_class {
1163 const struct sched_class *next;
1164
1165 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1166 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1167 void (*yield_task) (struct rq *rq);
1168 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1169
1170 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1171
Peter Zijlstra606dba22012-02-11 06:05:00 +01001172 /*
1173 * It is the responsibility of the pick_next_task() method that will
1174 * return the next task to call put_prev_task() on the @prev task or
1175 * something equivalent.
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001176 *
1177 * May return RETRY_TASK when it finds a higher prio class has runnable
1178 * tasks.
Peter Zijlstra606dba22012-02-11 06:05:00 +01001179 */
1180 struct task_struct * (*pick_next_task) (struct rq *rq,
1181 struct task_struct *prev);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001182 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1183
1184#ifdef CONFIG_SMP
Peter Zijlstraac66f542013-10-07 11:29:16 +01001185 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001186 void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
1187
Li Zefanc82ba9f2013-03-05 16:06:55 +08001188 void (*task_waking) (struct task_struct *task);
1189 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1190
1191 void (*set_cpus_allowed)(struct task_struct *p,
1192 const struct cpumask *newmask);
1193
1194 void (*rq_online)(struct rq *rq);
1195 void (*rq_offline)(struct rq *rq);
1196#endif
1197
1198 void (*set_curr_task) (struct rq *rq);
1199 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1200 void (*task_fork) (struct task_struct *p);
Dario Faggiolie6c390f2013-11-07 14:43:35 +01001201 void (*task_dead) (struct task_struct *p);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001202
Kirill Tkhai67dfa1b2014-10-27 17:40:52 +03001203 /*
1204 * The switched_from() call is allowed to drop rq->lock, therefore we
1205 * cannot assume the switched_from/switched_to pair is serliazed by
1206 * rq->lock. They are however serialized by p->pi_lock.
1207 */
Li Zefanc82ba9f2013-03-05 16:06:55 +08001208 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1209 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1210 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1211 int oldprio);
1212
1213 unsigned int (*get_rr_interval) (struct rq *rq,
1214 struct task_struct *task);
1215
Stanislaw Gruszka6e998912014-11-12 16:58:44 +01001216 void (*update_curr) (struct rq *rq);
1217
Li Zefanc82ba9f2013-03-05 16:06:55 +08001218#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrabc54da22015-08-31 17:13:55 +02001219 void (*task_move_group) (struct task_struct *p);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001220#endif
1221};
Peter Zijlstra029632f2011-10-25 10:00:11 +02001222
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +01001223static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1224{
1225 prev->sched_class->put_prev_task(rq, prev);
1226}
1227
Peter Zijlstra029632f2011-10-25 10:00:11 +02001228#define sched_class_highest (&stop_sched_class)
1229#define for_each_class(class) \
1230 for (class = sched_class_highest; class; class = class->next)
1231
1232extern const struct sched_class stop_sched_class;
Dario Faggioliaab03e02013-11-28 11:14:43 +01001233extern const struct sched_class dl_sched_class;
Peter Zijlstra029632f2011-10-25 10:00:11 +02001234extern const struct sched_class rt_sched_class;
1235extern const struct sched_class fair_sched_class;
1236extern const struct sched_class idle_sched_class;
1237
1238
1239#ifdef CONFIG_SMP
1240
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04001241extern void update_group_capacity(struct sched_domain *sd, int cpu);
Li Zefanb7192032013-03-07 10:00:26 +08001242
Daniel Lezcano7caff662014-01-06 12:34:38 +01001243extern void trigger_load_balance(struct rq *rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001244
Vincent Guittot642dbc32013-04-18 18:34:26 +02001245extern void idle_enter_fair(struct rq *this_rq);
1246extern void idle_exit_fair(struct rq *this_rq);
Vincent Guittot642dbc32013-04-18 18:34:26 +02001247
Peter Zijlstrac5b28032015-05-15 17:43:35 +02001248extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1249
Peter Zijlstradc877342014-02-12 15:47:29 +01001250#else
1251
1252static inline void idle_enter_fair(struct rq *rq) { }
1253static inline void idle_exit_fair(struct rq *rq) { }
1254
Peter Zijlstra029632f2011-10-25 10:00:11 +02001255#endif
1256
Daniel Lezcano442bf3a2014-09-04 11:32:09 -04001257#ifdef CONFIG_CPU_IDLE
1258static inline void idle_set_state(struct rq *rq,
1259 struct cpuidle_state *idle_state)
1260{
1261 rq->idle_state = idle_state;
1262}
1263
1264static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1265{
1266 WARN_ON(!rcu_read_lock_held());
1267 return rq->idle_state;
1268}
1269#else
1270static inline void idle_set_state(struct rq *rq,
1271 struct cpuidle_state *idle_state)
1272{
1273}
1274
1275static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1276{
1277 return NULL;
1278}
1279#endif
1280
Peter Zijlstra029632f2011-10-25 10:00:11 +02001281extern void sysrq_sched_debug_show(void);
1282extern void sched_init_granularity(void);
1283extern void update_max_interval(void);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001284
1285extern void init_sched_dl_class(void);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001286extern void init_sched_rt_class(void);
1287extern void init_sched_fair_class(void);
1288
Kirill Tkhai88751252014-06-29 00:03:57 +04001289extern void resched_curr(struct rq *rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001290extern void resched_cpu(int cpu);
1291
1292extern struct rt_bandwidth def_rt_bandwidth;
1293extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1294
Dario Faggioli332ac172013-11-07 14:43:45 +01001295extern struct dl_bandwidth def_dl_bandwidth;
1296extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001297extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1298
Dario Faggioli332ac172013-11-07 14:43:45 +01001299unsigned long to_ratio(u64 period, u64 runtime);
1300
Yuyang Du540247f2015-07-15 08:04:39 +08001301extern void init_entity_runnable_average(struct sched_entity *se);
Alex Shia75cdaa2013-06-20 10:18:47 +08001302
Kirill Tkhai72465442014-05-09 03:00:14 +04001303static inline void add_nr_running(struct rq *rq, unsigned count)
Peter Zijlstra029632f2011-10-25 10:00:11 +02001304{
Kirill Tkhai72465442014-05-09 03:00:14 +04001305 unsigned prev_nr = rq->nr_running;
1306
1307 rq->nr_running = prev_nr + count;
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001308
Kirill Tkhai72465442014-05-09 03:00:14 +04001309 if (prev_nr < 2 && rq->nr_running >= 2) {
Tim Chen4486edd2014-06-23 12:16:49 -07001310#ifdef CONFIG_SMP
1311 if (!rq->rd->overload)
1312 rq->rd->overload = true;
1313#endif
1314
1315#ifdef CONFIG_NO_HZ_FULL
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001316 if (tick_nohz_full_cpu(rq->cpu)) {
Frederic Weisbecker3882ec62014-03-18 22:54:04 +01001317 /*
1318 * Tick is needed if more than one task runs on a CPU.
1319 * Send the target an IPI to kick it out of nohz mode.
1320 *
1321 * We assume that IPI implies full memory barrier and the
1322 * new value of rq->nr_running is visible on reception
1323 * from the target.
1324 */
Frederic Weisbeckerfd2ac4f2014-03-18 21:12:53 +01001325 tick_nohz_full_kick_cpu(rq->cpu);
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001326 }
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001327#endif
Tim Chen4486edd2014-06-23 12:16:49 -07001328 }
Peter Zijlstra029632f2011-10-25 10:00:11 +02001329}
1330
Kirill Tkhai72465442014-05-09 03:00:14 +04001331static inline void sub_nr_running(struct rq *rq, unsigned count)
Peter Zijlstra029632f2011-10-25 10:00:11 +02001332{
Kirill Tkhai72465442014-05-09 03:00:14 +04001333 rq->nr_running -= count;
Peter Zijlstra029632f2011-10-25 10:00:11 +02001334}
1335
Frederic Weisbecker265f22a2013-05-03 03:39:05 +02001336static inline void rq_last_tick_reset(struct rq *rq)
1337{
1338#ifdef CONFIG_NO_HZ_FULL
1339 rq->last_sched_tick = jiffies;
1340#endif
1341}
1342
Peter Zijlstra029632f2011-10-25 10:00:11 +02001343extern void update_rq_clock(struct rq *rq);
1344
1345extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1346extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1347
1348extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1349
1350extern const_debug unsigned int sysctl_sched_time_avg;
1351extern const_debug unsigned int sysctl_sched_nr_migrate;
1352extern const_debug unsigned int sysctl_sched_migration_cost;
1353
1354static inline u64 sched_avg_period(void)
1355{
1356 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1357}
1358
Peter Zijlstra029632f2011-10-25 10:00:11 +02001359#ifdef CONFIG_SCHED_HRTICK
1360
1361/*
1362 * Use hrtick when:
1363 * - enabled by features
1364 * - hrtimer is actually high res
1365 */
1366static inline int hrtick_enabled(struct rq *rq)
1367{
1368 if (!sched_feat(HRTICK))
1369 return 0;
1370 if (!cpu_active(cpu_of(rq)))
1371 return 0;
1372 return hrtimer_is_hres_active(&rq->hrtick_timer);
1373}
1374
1375void hrtick_start(struct rq *rq, u64 delay);
1376
Mike Galbraithb39e66e2011-11-22 15:20:07 +01001377#else
1378
1379static inline int hrtick_enabled(struct rq *rq)
1380{
1381 return 0;
1382}
1383
Peter Zijlstra029632f2011-10-25 10:00:11 +02001384#endif /* CONFIG_SCHED_HRTICK */
1385
1386#ifdef CONFIG_SMP
1387extern void sched_avg_update(struct rq *rq);
Peter Zijlstradfbca412015-03-23 14:19:05 +01001388
1389#ifndef arch_scale_freq_capacity
1390static __always_inline
1391unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1392{
1393 return SCHED_CAPACITY_SCALE;
1394}
1395#endif
Vincent Guittotb5b48602015-02-27 16:54:08 +01001396
Morten Rasmussen8cd56012015-08-14 17:23:10 +01001397#ifndef arch_scale_cpu_capacity
1398static __always_inline
1399unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1400{
Dietmar Eggemanne3279a22015-08-15 00:04:41 +01001401 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
Morten Rasmussen8cd56012015-08-14 17:23:10 +01001402 return sd->smt_gain / sd->span_weight;
1403
1404 return SCHED_CAPACITY_SCALE;
1405}
1406#endif
1407
Peter Zijlstra029632f2011-10-25 10:00:11 +02001408static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1409{
Vincent Guittotb5b48602015-02-27 16:54:08 +01001410 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
Peter Zijlstra029632f2011-10-25 10:00:11 +02001411 sched_avg_update(rq);
1412}
1413#else
1414static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1415static inline void sched_avg_update(struct rq *rq) { }
1416#endif
1417
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001418/*
1419 * __task_rq_lock - lock the rq @p resides on.
1420 */
1421static inline struct rq *__task_rq_lock(struct task_struct *p)
1422 __acquires(rq->lock)
1423{
1424 struct rq *rq;
1425
1426 lockdep_assert_held(&p->pi_lock);
1427
1428 for (;;) {
1429 rq = task_rq(p);
1430 raw_spin_lock(&rq->lock);
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001431 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
1432 lockdep_pin_lock(&rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001433 return rq;
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001434 }
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001435 raw_spin_unlock(&rq->lock);
1436
1437 while (unlikely(task_on_rq_migrating(p)))
1438 cpu_relax();
1439 }
1440}
1441
1442/*
1443 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1444 */
1445static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
1446 __acquires(p->pi_lock)
1447 __acquires(rq->lock)
1448{
1449 struct rq *rq;
1450
1451 for (;;) {
1452 raw_spin_lock_irqsave(&p->pi_lock, *flags);
1453 rq = task_rq(p);
1454 raw_spin_lock(&rq->lock);
1455 /*
1456 * move_queued_task() task_rq_lock()
1457 *
1458 * ACQUIRE (rq->lock)
1459 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
1460 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
1461 * [S] ->cpu = new_cpu [L] task_rq()
1462 * [L] ->on_rq
1463 * RELEASE (rq->lock)
1464 *
1465 * If we observe the old cpu in task_rq_lock, the acquire of
1466 * the old rq->lock will fully serialize against the stores.
1467 *
1468 * If we observe the new cpu in task_rq_lock, the acquire will
1469 * pair with the WMB to ensure we must then also see migrating.
1470 */
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001471 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
1472 lockdep_pin_lock(&rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001473 return rq;
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001474 }
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001475 raw_spin_unlock(&rq->lock);
1476 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1477
1478 while (unlikely(task_on_rq_migrating(p)))
1479 cpu_relax();
1480 }
1481}
1482
1483static inline void __task_rq_unlock(struct rq *rq)
1484 __releases(rq->lock)
1485{
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001486 lockdep_unpin_lock(&rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001487 raw_spin_unlock(&rq->lock);
1488}
1489
1490static inline void
1491task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1492 __releases(rq->lock)
1493 __releases(p->pi_lock)
1494{
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001495 lockdep_unpin_lock(&rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001496 raw_spin_unlock(&rq->lock);
1497 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1498}
1499
Peter Zijlstra029632f2011-10-25 10:00:11 +02001500#ifdef CONFIG_SMP
1501#ifdef CONFIG_PREEMPT
1502
1503static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1504
1505/*
1506 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1507 * way at the expense of forcing extra atomic operations in all
1508 * invocations. This assures that the double_lock is acquired using the
1509 * same underlying policy as the spinlock_t on this architecture, which
1510 * reduces latency compared to the unfair variant below. However, it
1511 * also adds more overhead and therefore may reduce throughput.
1512 */
1513static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1514 __releases(this_rq->lock)
1515 __acquires(busiest->lock)
1516 __acquires(this_rq->lock)
1517{
1518 raw_spin_unlock(&this_rq->lock);
1519 double_rq_lock(this_rq, busiest);
1520
1521 return 1;
1522}
1523
1524#else
1525/*
1526 * Unfair double_lock_balance: Optimizes throughput at the expense of
1527 * latency by eliminating extra atomic operations when the locks are
1528 * already in proper order on entry. This favors lower cpu-ids and will
1529 * grant the double lock to lower cpus over higher ids under contention,
1530 * regardless of entry order into the function.
1531 */
1532static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1533 __releases(this_rq->lock)
1534 __acquires(busiest->lock)
1535 __acquires(this_rq->lock)
1536{
1537 int ret = 0;
1538
1539 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1540 if (busiest < this_rq) {
1541 raw_spin_unlock(&this_rq->lock);
1542 raw_spin_lock(&busiest->lock);
1543 raw_spin_lock_nested(&this_rq->lock,
1544 SINGLE_DEPTH_NESTING);
1545 ret = 1;
1546 } else
1547 raw_spin_lock_nested(&busiest->lock,
1548 SINGLE_DEPTH_NESTING);
1549 }
1550 return ret;
1551}
1552
1553#endif /* CONFIG_PREEMPT */
1554
1555/*
1556 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1557 */
1558static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1559{
1560 if (unlikely(!irqs_disabled())) {
1561 /* printk() doesn't work good under rq->lock */
1562 raw_spin_unlock(&this_rq->lock);
1563 BUG_ON(1);
1564 }
1565
1566 return _double_lock_balance(this_rq, busiest);
1567}
1568
1569static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1570 __releases(busiest->lock)
1571{
1572 raw_spin_unlock(&busiest->lock);
1573 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1574}
1575
Peter Zijlstra74602312013-10-10 20:17:22 +02001576static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1577{
1578 if (l1 > l2)
1579 swap(l1, l2);
1580
1581 spin_lock(l1);
1582 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1583}
1584
Mike Galbraith60e69ee2014-04-07 10:55:15 +02001585static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1586{
1587 if (l1 > l2)
1588 swap(l1, l2);
1589
1590 spin_lock_irq(l1);
1591 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1592}
1593
Peter Zijlstra74602312013-10-10 20:17:22 +02001594static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1595{
1596 if (l1 > l2)
1597 swap(l1, l2);
1598
1599 raw_spin_lock(l1);
1600 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1601}
1602
Peter Zijlstra029632f2011-10-25 10:00:11 +02001603/*
1604 * double_rq_lock - safely lock two runqueues
1605 *
1606 * Note this does not disable interrupts like task_rq_lock,
1607 * you need to do so manually before calling.
1608 */
1609static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1610 __acquires(rq1->lock)
1611 __acquires(rq2->lock)
1612{
1613 BUG_ON(!irqs_disabled());
1614 if (rq1 == rq2) {
1615 raw_spin_lock(&rq1->lock);
1616 __acquire(rq2->lock); /* Fake it out ;) */
1617 } else {
1618 if (rq1 < rq2) {
1619 raw_spin_lock(&rq1->lock);
1620 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1621 } else {
1622 raw_spin_lock(&rq2->lock);
1623 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1624 }
1625 }
1626}
1627
1628/*
1629 * double_rq_unlock - safely unlock two runqueues
1630 *
1631 * Note this does not restore interrupts like task_rq_unlock,
1632 * you need to do so manually after calling.
1633 */
1634static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1635 __releases(rq1->lock)
1636 __releases(rq2->lock)
1637{
1638 raw_spin_unlock(&rq1->lock);
1639 if (rq1 != rq2)
1640 raw_spin_unlock(&rq2->lock);
1641 else
1642 __release(rq2->lock);
1643}
1644
1645#else /* CONFIG_SMP */
1646
1647/*
1648 * double_rq_lock - safely lock two runqueues
1649 *
1650 * Note this does not disable interrupts like task_rq_lock,
1651 * you need to do so manually before calling.
1652 */
1653static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1654 __acquires(rq1->lock)
1655 __acquires(rq2->lock)
1656{
1657 BUG_ON(!irqs_disabled());
1658 BUG_ON(rq1 != rq2);
1659 raw_spin_lock(&rq1->lock);
1660 __acquire(rq2->lock); /* Fake it out ;) */
1661}
1662
1663/*
1664 * double_rq_unlock - safely unlock two runqueues
1665 *
1666 * Note this does not restore interrupts like task_rq_unlock,
1667 * you need to do so manually after calling.
1668 */
1669static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1670 __releases(rq1->lock)
1671 __releases(rq2->lock)
1672{
1673 BUG_ON(rq1 != rq2);
1674 raw_spin_unlock(&rq1->lock);
1675 __release(rq2->lock);
1676}
1677
1678#endif
1679
1680extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1681extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
Srikar Dronamraju6b55c962015-06-25 22:51:41 +05301682
1683#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02001684extern void print_cfs_stats(struct seq_file *m, int cpu);
1685extern void print_rt_stats(struct seq_file *m, int cpu);
Wanpeng Liacb32132014-10-31 06:39:33 +08001686extern void print_dl_stats(struct seq_file *m, int cpu);
Srikar Dronamraju6b55c962015-06-25 22:51:41 +05301687extern void
1688print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
Srikar Dronamraju397f2372015-06-25 22:51:43 +05301689
1690#ifdef CONFIG_NUMA_BALANCING
1691extern void
1692show_numa_stats(struct task_struct *p, struct seq_file *m);
1693extern void
1694print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1695 unsigned long tpf, unsigned long gsf, unsigned long gpf);
1696#endif /* CONFIG_NUMA_BALANCING */
1697#endif /* CONFIG_SCHED_DEBUG */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001698
1699extern void init_cfs_rq(struct cfs_rq *cfs_rq);
Abel Vesa07c54f72015-03-03 13:50:27 +02001700extern void init_rt_rq(struct rt_rq *rt_rq);
1701extern void init_dl_rq(struct dl_rq *dl_rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001702
Ben Segall1ee14e62013-10-16 11:16:12 -07001703extern void cfs_bandwidth_usage_inc(void);
1704extern void cfs_bandwidth_usage_dec(void);
Suresh Siddha1c792db2011-12-01 17:07:32 -08001705
Frederic Weisbecker3451d022011-08-10 23:21:01 +02001706#ifdef CONFIG_NO_HZ_COMMON
Suresh Siddha1c792db2011-12-01 17:07:32 -08001707enum rq_nohz_flag_bits {
1708 NOHZ_TICK_STOPPED,
1709 NOHZ_BALANCE_KICK,
1710};
1711
1712#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1713#endif
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001714
1715#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1716
1717DECLARE_PER_CPU(u64, cpu_hardirq_time);
1718DECLARE_PER_CPU(u64, cpu_softirq_time);
1719
1720#ifndef CONFIG_64BIT
1721DECLARE_PER_CPU(seqcount_t, irq_time_seq);
1722
1723static inline void irq_time_write_begin(void)
1724{
1725 __this_cpu_inc(irq_time_seq.sequence);
1726 smp_wmb();
1727}
1728
1729static inline void irq_time_write_end(void)
1730{
1731 smp_wmb();
1732 __this_cpu_inc(irq_time_seq.sequence);
1733}
1734
1735static inline u64 irq_time_read(int cpu)
1736{
1737 u64 irq_time;
1738 unsigned seq;
1739
1740 do {
1741 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1742 irq_time = per_cpu(cpu_softirq_time, cpu) +
1743 per_cpu(cpu_hardirq_time, cpu);
1744 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1745
1746 return irq_time;
1747}
1748#else /* CONFIG_64BIT */
1749static inline void irq_time_write_begin(void)
1750{
1751}
1752
1753static inline void irq_time_write_end(void)
1754{
1755}
1756
1757static inline u64 irq_time_read(int cpu)
1758{
1759 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1760}
1761#endif /* CONFIG_64BIT */
1762#endif /* CONFIG_IRQ_TIME_ACCOUNTING */