blob: 055f935d4421838a3da328c6bab87ff7e84c90d4 [file] [log] [blame]
Peter Zijlstra029632f2011-10-25 10:00:11 +02001
2#include <linux/sched.h>
Clark Williamscf4aebc22013-02-07 09:46:59 -06003#include <linux/sched/sysctl.h>
Clark Williams8bd75c72013-02-07 09:47:07 -06004#include <linux/sched/rt.h>
Frederic Weisbecker19d23dbf2016-09-26 02:29:20 +02005#include <linux/u64_stats_sync.h>
Dario Faggioliaab03e02013-11-28 11:14:43 +01006#include <linux/sched/deadline.h>
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -05007#include <linux/binfmts.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +02008#include <linux/mutex.h>
9#include <linux/spinlock.h>
10#include <linux/stop_machine.h>
Steven Rostedtb6366f02015-03-18 14:49:46 -040011#include <linux/irq_work.h>
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +020012#include <linux/tick.h>
Mel Gormanf809ca92013-10-07 11:28:57 +010013#include <linux/slab.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020014
Peter Zijlstra391e43d2011-11-15 17:14:39 +010015#include "cpupri.h"
Juri Lelli6bfd6d72013-11-07 14:43:47 +010016#include "cpudeadline.h"
Li Zefan60fed782013-03-29 14:36:43 +080017#include "cpuacct.h"
Peter Zijlstra029632f2011-10-25 10:00:11 +020018
Peter Zijlstra9148a3a2016-09-20 22:34:51 +020019#ifdef CONFIG_SCHED_DEBUG
20#define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
21#else
22#define SCHED_WARN_ON(x) ((void)(x))
23#endif
24
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040025struct rq;
Daniel Lezcano442bf3a2014-09-04 11:32:09 -040026struct cpuidle_state;
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040027
Kirill Tkhaida0c1e62014-08-20 13:47:32 +040028/* task_struct::on_rq states: */
29#define TASK_ON_RQ_QUEUED 1
Kirill Tkhaicca26e82014-08-20 13:47:42 +040030#define TASK_ON_RQ_MIGRATING 2
Kirill Tkhaida0c1e62014-08-20 13:47:32 +040031
Peter Zijlstra029632f2011-10-25 10:00:11 +020032extern __read_mostly int scheduler_running;
33
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040034extern unsigned long calc_load_update;
35extern atomic_long_t calc_load_tasks;
36
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020037extern void calc_global_load_tick(struct rq *this_rq);
Thomas Gleixnerd60585c2016-07-12 18:33:56 +020038extern long calc_load_fold_active(struct rq *this_rq, long adjust);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020039
40#ifdef CONFIG_SMP
Frederic Weisbeckercee1afc2016-04-13 15:56:50 +020041extern void cpu_load_update_active(struct rq *this_rq);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020042#else
Frederic Weisbeckercee1afc2016-04-13 15:56:50 +020043static inline void cpu_load_update_active(struct rq *this_rq) { }
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020044#endif
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040045
Peter Zijlstra029632f2011-10-25 10:00:11 +020046/*
Peter Zijlstra029632f2011-10-25 10:00:11 +020047 * Helpers for converting nanosecond timing to jiffy resolution
48 */
49#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
50
Li Zefancc1f4b12013-03-05 16:06:09 +080051/*
52 * Increase resolution of nice-level calculations for 64-bit architectures.
53 * The extra resolution improves shares distribution and load balancing of
54 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
55 * hierarchies, especially on larger systems. This is not a user-visible change
56 * and does not change the user-interface for setting shares/weights.
57 *
58 * We increase resolution only if we have enough bits to allow this increased
Peter Zijlstra21591972016-04-28 12:49:38 +020059 * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are
60 * pretty high and the returns do not justify the increased costs.
61 *
62 * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to
63 * increase coverage and consistency always enable it on 64bit platforms.
Li Zefancc1f4b12013-03-05 16:06:09 +080064 */
Peter Zijlstra21591972016-04-28 12:49:38 +020065#ifdef CONFIG_64BIT
Yuyang Du172895e2016-04-05 12:12:27 +080066# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
Yuyang Du6ecdd742016-04-05 12:12:26 +080067# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
68# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
Li Zefancc1f4b12013-03-05 16:06:09 +080069#else
Yuyang Du172895e2016-04-05 12:12:27 +080070# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
Li Zefancc1f4b12013-03-05 16:06:09 +080071# define scale_load(w) (w)
72# define scale_load_down(w) (w)
73#endif
74
Yuyang Du6ecdd742016-04-05 12:12:26 +080075/*
Yuyang Du172895e2016-04-05 12:12:27 +080076 * Task weight (visible to users) and its load (invisible to users) have
77 * independent resolution, but they should be well calibrated. We use
78 * scale_load() and scale_load_down(w) to convert between them. The
79 * following must be true:
80 *
81 * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
82 *
Yuyang Du6ecdd742016-04-05 12:12:26 +080083 */
Yuyang Du172895e2016-04-05 12:12:27 +080084#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
Peter Zijlstra029632f2011-10-25 10:00:11 +020085
86/*
Dario Faggioli332ac172013-11-07 14:43:45 +010087 * Single value that decides SCHED_DEADLINE internal math precision.
88 * 10 -> just above 1us
89 * 9 -> just above 0.5us
90 */
91#define DL_SCALE (10)
92
93/*
Peter Zijlstra029632f2011-10-25 10:00:11 +020094 * These are the 'tuning knobs' of the scheduler:
Peter Zijlstra029632f2011-10-25 10:00:11 +020095 */
Peter Zijlstra029632f2011-10-25 10:00:11 +020096
97/*
98 * single value that denotes runtime == period, ie unlimited time.
99 */
100#define RUNTIME_INF ((u64)~0ULL)
101
Henrik Austad20f9cd22015-09-09 17:00:41 +0200102static inline int idle_policy(int policy)
103{
104 return policy == SCHED_IDLE;
105}
Dario Faggiolid50dde52013-11-07 14:43:36 +0100106static inline int fair_policy(int policy)
107{
108 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
109}
110
Peter Zijlstra029632f2011-10-25 10:00:11 +0200111static inline int rt_policy(int policy)
112{
Dario Faggiolid50dde52013-11-07 14:43:36 +0100113 return policy == SCHED_FIFO || policy == SCHED_RR;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200114}
115
Dario Faggioliaab03e02013-11-28 11:14:43 +0100116static inline int dl_policy(int policy)
117{
118 return policy == SCHED_DEADLINE;
119}
Henrik Austad20f9cd22015-09-09 17:00:41 +0200120static inline bool valid_policy(int policy)
121{
122 return idle_policy(policy) || fair_policy(policy) ||
123 rt_policy(policy) || dl_policy(policy);
124}
Dario Faggioliaab03e02013-11-28 11:14:43 +0100125
Peter Zijlstra029632f2011-10-25 10:00:11 +0200126static inline int task_has_rt_policy(struct task_struct *p)
127{
128 return rt_policy(p->policy);
129}
130
Dario Faggioliaab03e02013-11-28 11:14:43 +0100131static inline int task_has_dl_policy(struct task_struct *p)
132{
133 return dl_policy(p->policy);
134}
135
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100136/*
137 * Tells if entity @a should preempt entity @b.
138 */
Dario Faggioli332ac172013-11-07 14:43:45 +0100139static inline bool
140dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100141{
142 return dl_time_before(a->deadline, b->deadline);
143}
144
Peter Zijlstra029632f2011-10-25 10:00:11 +0200145/*
146 * This is the priority-queue data structure of the RT scheduling class:
147 */
148struct rt_prio_array {
149 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
150 struct list_head queue[MAX_RT_PRIO];
151};
152
153struct rt_bandwidth {
154 /* nests inside the rq lock: */
155 raw_spinlock_t rt_runtime_lock;
156 ktime_t rt_period;
157 u64 rt_runtime;
158 struct hrtimer rt_period_timer;
Peter Zijlstra4cfafd32015-05-14 12:23:11 +0200159 unsigned int rt_period_active;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200160};
Juri Lellia5e7be32014-09-19 10:22:39 +0100161
162void __dl_clear_params(struct task_struct *p);
163
Dario Faggioli332ac172013-11-07 14:43:45 +0100164/*
165 * To keep the bandwidth of -deadline tasks and groups under control
166 * we need some place where:
167 * - store the maximum -deadline bandwidth of the system (the group);
168 * - cache the fraction of that bandwidth that is currently allocated.
169 *
170 * This is all done in the data structure below. It is similar to the
171 * one used for RT-throttling (rt_bandwidth), with the main difference
172 * that, since here we are only interested in admission control, we
173 * do not decrease any runtime while the group "executes", neither we
174 * need a timer to replenish it.
175 *
176 * With respect to SMP, the bandwidth is given on a per-CPU basis,
177 * meaning that:
178 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
179 * - dl_total_bw array contains, in the i-eth element, the currently
180 * allocated bandwidth on the i-eth CPU.
181 * Moreover, groups consume bandwidth on each CPU, while tasks only
182 * consume bandwidth on the CPU they're running on.
183 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
184 * that will be shown the next time the proc or cgroup controls will
185 * be red. It on its turn can be changed by writing on its own
186 * control.
187 */
188struct dl_bandwidth {
189 raw_spinlock_t dl_runtime_lock;
190 u64 dl_runtime;
191 u64 dl_period;
192};
193
194static inline int dl_bandwidth_enabled(void)
195{
Peter Zijlstra17248132013-12-17 12:44:49 +0100196 return sysctl_sched_rt_runtime >= 0;
Dario Faggioli332ac172013-11-07 14:43:45 +0100197}
198
199extern struct dl_bw *dl_bw_of(int i);
200
201struct dl_bw {
202 raw_spinlock_t lock;
203 u64 bw, total_bw;
204};
205
Juri Lelli7f514122014-09-19 10:22:40 +0100206static inline
207void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
208{
209 dl_b->total_bw -= tsk_bw;
210}
211
212static inline
213void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
214{
215 dl_b->total_bw += tsk_bw;
216}
217
218static inline
219bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
220{
221 return dl_b->bw != -1 &&
222 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
223}
224
Peter Zijlstra029632f2011-10-25 10:00:11 +0200225extern struct mutex sched_domains_mutex;
226
227#ifdef CONFIG_CGROUP_SCHED
228
229#include <linux/cgroup.h>
230
231struct cfs_rq;
232struct rt_rq;
233
Mike Galbraith35cf4e52012-08-07 05:00:13 +0200234extern struct list_head task_groups;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200235
236struct cfs_bandwidth {
237#ifdef CONFIG_CFS_BANDWIDTH
238 raw_spinlock_t lock;
239 ktime_t period;
240 u64 quota, runtime;
Zhihui Zhang9c58c792014-09-20 21:24:36 -0400241 s64 hierarchical_quota;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200242 u64 runtime_expires;
243
Peter Zijlstra4cfafd32015-05-14 12:23:11 +0200244 int idle, period_active;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200245 struct hrtimer period_timer, slack_timer;
246 struct list_head throttled_cfs_rq;
247
248 /* statistics */
249 int nr_periods, nr_throttled;
250 u64 throttled_time;
251#endif
252};
253
254/* task group related information */
255struct task_group {
256 struct cgroup_subsys_state css;
257
258#ifdef CONFIG_FAIR_GROUP_SCHED
259 /* schedulable entities of this group on each cpu */
260 struct sched_entity **se;
261 /* runqueue "owned" by this group on each cpu */
262 struct cfs_rq **cfs_rq;
263 unsigned long shares;
264
Alex Shifa6bdde2013-06-20 10:18:46 +0800265#ifdef CONFIG_SMP
Waiman Longb0367622015-12-02 13:41:49 -0500266 /*
267 * load_avg can be heavily contended at clock tick time, so put
268 * it in its own cacheline separated from the fields above which
269 * will also be accessed at each tick.
270 */
271 atomic_long_t load_avg ____cacheline_aligned;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200272#endif
Alex Shifa6bdde2013-06-20 10:18:46 +0800273#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200274
275#ifdef CONFIG_RT_GROUP_SCHED
276 struct sched_rt_entity **rt_se;
277 struct rt_rq **rt_rq;
278
279 struct rt_bandwidth rt_bandwidth;
280#endif
281
282 struct rcu_head rcu;
283 struct list_head list;
284
285 struct task_group *parent;
286 struct list_head siblings;
287 struct list_head children;
288
289#ifdef CONFIG_SCHED_AUTOGROUP
290 struct autogroup *autogroup;
291#endif
292
293 struct cfs_bandwidth cfs_bandwidth;
294};
295
296#ifdef CONFIG_FAIR_GROUP_SCHED
297#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
298
299/*
300 * A weight of 0 or 1 can cause arithmetics problems.
301 * A weight of a cfs_rq is the sum of weights of which entities
302 * are queued on this cfs_rq, so a weight of a entity should not be
303 * too large, so as the shares value of a task group.
304 * (The default weight is 1024 - so there's no practical
305 * limitation from this.)
306 */
307#define MIN_SHARES (1UL << 1)
308#define MAX_SHARES (1UL << 18)
309#endif
310
Peter Zijlstra029632f2011-10-25 10:00:11 +0200311typedef int (*tg_visitor)(struct task_group *, void *);
312
313extern int walk_tg_tree_from(struct task_group *from,
314 tg_visitor down, tg_visitor up, void *data);
315
316/*
317 * Iterate the full tree, calling @down when first entering a node and @up when
318 * leaving it for the final time.
319 *
320 * Caller must hold rcu_lock or sufficient equivalent.
321 */
322static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
323{
324 return walk_tg_tree_from(&root_task_group, down, up, data);
325}
326
327extern int tg_nop(struct task_group *tg, void *data);
328
329extern void free_fair_sched_group(struct task_group *tg);
330extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
Peter Zijlstra8663e242016-06-22 14:58:02 +0200331extern void online_fair_sched_group(struct task_group *tg);
Peter Zijlstra6fe1f342016-01-21 22:24:16 +0100332extern void unregister_fair_sched_group(struct task_group *tg);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200333extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
334 struct sched_entity *se, int cpu,
335 struct sched_entity *parent);
336extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200337
338extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +0200339extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200340extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
341
342extern void free_rt_sched_group(struct task_group *tg);
343extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
344extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
345 struct sched_rt_entity *rt_se, int cpu,
346 struct sched_rt_entity *parent);
347
Li Zefan25cc7da2013-03-05 16:07:33 +0800348extern struct task_group *sched_create_group(struct task_group *parent);
349extern void sched_online_group(struct task_group *tg,
350 struct task_group *parent);
351extern void sched_destroy_group(struct task_group *tg);
352extern void sched_offline_group(struct task_group *tg);
353
354extern void sched_move_task(struct task_struct *tsk);
355
356#ifdef CONFIG_FAIR_GROUP_SCHED
357extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
Byungchul Parkad936d82015-10-24 01:16:19 +0900358
359#ifdef CONFIG_SMP
360extern void set_task_rq_fair(struct sched_entity *se,
361 struct cfs_rq *prev, struct cfs_rq *next);
362#else /* !CONFIG_SMP */
363static inline void set_task_rq_fair(struct sched_entity *se,
364 struct cfs_rq *prev, struct cfs_rq *next) { }
365#endif /* CONFIG_SMP */
366#endif /* CONFIG_FAIR_GROUP_SCHED */
Li Zefan25cc7da2013-03-05 16:07:33 +0800367
Peter Zijlstra029632f2011-10-25 10:00:11 +0200368#else /* CONFIG_CGROUP_SCHED */
369
370struct cfs_bandwidth { };
371
372#endif /* CONFIG_CGROUP_SCHED */
373
374/* CFS-related fields in a runqueue */
375struct cfs_rq {
376 struct load_weight load;
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200377 unsigned int nr_running, h_nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200378
379 u64 exec_clock;
380 u64 min_vruntime;
381#ifndef CONFIG_64BIT
382 u64 min_vruntime_copy;
383#endif
384
385 struct rb_root tasks_timeline;
386 struct rb_node *rb_leftmost;
387
Peter Zijlstra029632f2011-10-25 10:00:11 +0200388 /*
389 * 'curr' points to currently running entity on this cfs_rq.
390 * It is set to NULL otherwise (i.e when none are currently running).
391 */
392 struct sched_entity *curr, *next, *last, *skip;
393
394#ifdef CONFIG_SCHED_DEBUG
395 unsigned int nr_spread_over;
396#endif
397
Paul Turner2dac7542012-10-04 13:18:30 +0200398#ifdef CONFIG_SMP
399 /*
Yuyang Du9d89c252015-07-15 08:04:37 +0800400 * CFS load tracking
Paul Turner2dac7542012-10-04 13:18:30 +0200401 */
Yuyang Du9d89c252015-07-15 08:04:37 +0800402 struct sched_avg avg;
Yuyang Du13962232015-07-15 08:04:41 +0800403 u64 runnable_load_sum;
404 unsigned long runnable_load_avg;
Yuyang Du9d89c252015-07-15 08:04:37 +0800405#ifdef CONFIG_FAIR_GROUP_SCHED
406 unsigned long tg_load_avg_contrib;
407#endif
408 atomic_long_t removed_load_avg, removed_util_avg;
409#ifndef CONFIG_64BIT
410 u64 load_last_update_time_copy;
411#endif
Alex Shi141965c2013-06-26 13:05:39 +0800412
Paul Turnerc566e8e2012-10-04 13:18:30 +0200413#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Turner82958362012-10-04 13:18:31 +0200414 /*
415 * h_load = weight * f(tg)
416 *
417 * Where f(tg) is the recursive weight fraction assigned to
418 * this group.
419 */
420 unsigned long h_load;
Vladimir Davydov68520792013-07-15 17:49:19 +0400421 u64 last_h_load_update;
422 struct sched_entity *h_load_next;
423#endif /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner82958362012-10-04 13:18:31 +0200424#endif /* CONFIG_SMP */
425
Peter Zijlstra029632f2011-10-25 10:00:11 +0200426#ifdef CONFIG_FAIR_GROUP_SCHED
427 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
428
429 /*
430 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
431 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
432 * (like users, containers etc.)
433 *
434 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
435 * list is used during load balance.
436 */
437 int on_list;
438 struct list_head leaf_cfs_rq_list;
439 struct task_group *tg; /* group that "owns" this runqueue */
440
Peter Zijlstra029632f2011-10-25 10:00:11 +0200441#ifdef CONFIG_CFS_BANDWIDTH
442 int runtime_enabled;
443 u64 runtime_expires;
444 s64 runtime_remaining;
445
Paul Turnerf1b17282012-10-04 13:18:31 +0200446 u64 throttled_clock, throttled_clock_task;
447 u64 throttled_clock_task_time;
Peter Zijlstra55e16d32016-06-22 15:14:26 +0200448 int throttled, throttle_count;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200449 struct list_head throttled_list;
450#endif /* CONFIG_CFS_BANDWIDTH */
451#endif /* CONFIG_FAIR_GROUP_SCHED */
452};
453
454static inline int rt_bandwidth_enabled(void)
455{
456 return sysctl_sched_rt_runtime >= 0;
457}
458
Steven Rostedtb6366f02015-03-18 14:49:46 -0400459/* RT IPI pull logic requires IRQ_WORK */
460#ifdef CONFIG_IRQ_WORK
461# define HAVE_RT_PUSH_IPI
462#endif
463
Peter Zijlstra029632f2011-10-25 10:00:11 +0200464/* Real-Time classes' related field in a runqueue: */
465struct rt_rq {
466 struct rt_prio_array active;
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200467 unsigned int rt_nr_running;
Frederic Weisbecker01d36d02015-11-04 18:17:10 +0100468 unsigned int rr_nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200469#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
470 struct {
471 int curr; /* highest queued rt task prio */
472#ifdef CONFIG_SMP
473 int next; /* next highest */
474#endif
475 } highest_prio;
476#endif
477#ifdef CONFIG_SMP
478 unsigned long rt_nr_migratory;
479 unsigned long rt_nr_total;
480 int overloaded;
481 struct plist_head pushable_tasks;
Steven Rostedtb6366f02015-03-18 14:49:46 -0400482#ifdef HAVE_RT_PUSH_IPI
483 int push_flags;
484 int push_cpu;
485 struct irq_work push_work;
486 raw_spinlock_t push_lock;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200487#endif
Steven Rostedtb6366f02015-03-18 14:49:46 -0400488#endif /* CONFIG_SMP */
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +0400489 int rt_queued;
490
Peter Zijlstra029632f2011-10-25 10:00:11 +0200491 int rt_throttled;
492 u64 rt_time;
493 u64 rt_runtime;
494 /* Nests inside the rq lock: */
495 raw_spinlock_t rt_runtime_lock;
496
497#ifdef CONFIG_RT_GROUP_SCHED
498 unsigned long rt_nr_boosted;
499
500 struct rq *rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200501 struct task_group *tg;
502#endif
503};
504
Dario Faggioliaab03e02013-11-28 11:14:43 +0100505/* Deadline class' related fields in a runqueue */
506struct dl_rq {
507 /* runqueue is an rbtree, ordered by deadline */
508 struct rb_root rb_root;
509 struct rb_node *rb_leftmost;
510
511 unsigned long dl_nr_running;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100512
513#ifdef CONFIG_SMP
514 /*
515 * Deadline values of the currently executing and the
516 * earliest ready task on this rq. Caching these facilitates
517 * the decision wether or not a ready but not running task
518 * should migrate somewhere else.
519 */
520 struct {
521 u64 curr;
522 u64 next;
523 } earliest_dl;
524
525 unsigned long dl_nr_migratory;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100526 int overloaded;
527
528 /*
529 * Tasks on this rq that can be pushed away. They are kept in
530 * an rb-tree, ordered by tasks' deadlines, with caching
531 * of the leftmost (earliest deadline) element.
532 */
533 struct rb_root pushable_dl_tasks_root;
534 struct rb_node *pushable_dl_tasks_leftmost;
Dario Faggioli332ac172013-11-07 14:43:45 +0100535#else
536 struct dl_bw dl_bw;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100537#endif
Dario Faggioliaab03e02013-11-28 11:14:43 +0100538};
539
Peter Zijlstra029632f2011-10-25 10:00:11 +0200540#ifdef CONFIG_SMP
541
542/*
543 * We add the notion of a root-domain which will be used to define per-domain
544 * variables. Each exclusive cpuset essentially defines an island domain by
545 * fully partitioning the member cpus from any other cpuset. Whenever a new
546 * exclusive cpuset is created, we also create and attach a new root-domain
547 * object.
548 *
549 */
550struct root_domain {
551 atomic_t refcount;
552 atomic_t rto_count;
553 struct rcu_head rcu;
554 cpumask_var_t span;
555 cpumask_var_t online;
556
Tim Chen4486edd2014-06-23 12:16:49 -0700557 /* Indicate more than one runnable task for any CPU */
558 bool overload;
559
Peter Zijlstra029632f2011-10-25 10:00:11 +0200560 /*
Juri Lelli1baca4c2013-11-07 14:43:38 +0100561 * The bit corresponding to a CPU gets set here if such CPU has more
562 * than one runnable -deadline task (as it is below for RT tasks).
563 */
564 cpumask_var_t dlo_mask;
565 atomic_t dlo_count;
Dario Faggioli332ac172013-11-07 14:43:45 +0100566 struct dl_bw dl_bw;
Juri Lelli6bfd6d72013-11-07 14:43:47 +0100567 struct cpudl cpudl;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100568
569 /*
Peter Zijlstra029632f2011-10-25 10:00:11 +0200570 * The "RT overload" flag: it gets set if a CPU has more than
571 * one runnable RT task.
572 */
573 cpumask_var_t rto_mask;
574 struct cpupri cpupri;
Dietmar Eggemanncd92bfd2016-08-01 19:53:35 +0100575
576 unsigned long max_cpu_capacity;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200577};
578
579extern struct root_domain def_root_domain;
580
581#endif /* CONFIG_SMP */
582
583/*
584 * This is the main, per-CPU runqueue data structure.
585 *
586 * Locking rule: those places that want to lock multiple runqueues
587 * (such as the load balancing or the thread migration code), lock
588 * acquire operations must be ordered by ascending &runqueue.
589 */
590struct rq {
591 /* runqueue lock: */
592 raw_spinlock_t lock;
593
594 /*
595 * nr_running and cpu_load should be in the same cacheline because
596 * remote CPUs use both these fields when doing load calculation.
597 */
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200598 unsigned int nr_running;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100599#ifdef CONFIG_NUMA_BALANCING
600 unsigned int nr_numa_running;
601 unsigned int nr_preferred_running;
602#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200603 #define CPU_LOAD_IDX_MAX 5
604 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
Frederic Weisbecker3451d022011-08-10 23:21:01 +0200605#ifdef CONFIG_NO_HZ_COMMON
Frederic Weisbecker9fd81dd2016-04-19 17:36:51 +0200606#ifdef CONFIG_SMP
607 unsigned long last_load_update_tick;
608#endif /* CONFIG_SMP */
Suresh Siddha1c792db2011-12-01 17:07:32 -0800609 unsigned long nohz_flags;
Frederic Weisbecker9fd81dd2016-04-19 17:36:51 +0200610#endif /* CONFIG_NO_HZ_COMMON */
Frederic Weisbecker265f22a2013-05-03 03:39:05 +0200611#ifdef CONFIG_NO_HZ_FULL
612 unsigned long last_sched_tick;
613#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200614 /* capture load from *all* tasks on this cpu: */
615 struct load_weight load;
616 unsigned long nr_load_updates;
617 u64 nr_switches;
618
619 struct cfs_rq cfs;
620 struct rt_rq rt;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100621 struct dl_rq dl;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200622
623#ifdef CONFIG_FAIR_GROUP_SCHED
624 /* list of leaf cfs_rq on this cpu: */
625 struct list_head leaf_cfs_rq_list;
Peter Zijlstraa35b6462012-08-08 21:46:40 +0200626#endif /* CONFIG_FAIR_GROUP_SCHED */
627
Peter Zijlstra029632f2011-10-25 10:00:11 +0200628 /*
629 * This is part of a global counter where only the total sum
630 * over all CPUs matters. A task can increase this counter on
631 * one CPU and if it got migrated afterwards it may decrease
632 * it on another CPU. Always updated under the runqueue lock:
633 */
634 unsigned long nr_uninterruptible;
635
636 struct task_struct *curr, *idle, *stop;
637 unsigned long next_balance;
638 struct mm_struct *prev_mm;
639
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +0100640 unsigned int clock_skip_update;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200641 u64 clock;
642 u64 clock_task;
643
644 atomic_t nr_iowait;
645
646#ifdef CONFIG_SMP
647 struct root_domain *rd;
648 struct sched_domain *sd;
649
Nicolas Pitreced549f2014-05-26 18:19:38 -0400650 unsigned long cpu_capacity;
Vincent Guittotca6d75e2015-02-27 16:54:09 +0100651 unsigned long cpu_capacity_orig;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200652
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200653 struct callback_head *balance_callback;
654
Peter Zijlstra029632f2011-10-25 10:00:11 +0200655 unsigned char idle_balance;
656 /* For active balancing */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200657 int active_balance;
658 int push_cpu;
659 struct cpu_stop_work active_balance_work;
660 /* cpu of this runqueue: */
661 int cpu;
662 int online;
663
Peter Zijlstra367456c2012-02-20 21:49:09 +0100664 struct list_head cfs_tasks;
665
Peter Zijlstra029632f2011-10-25 10:00:11 +0200666 u64 rt_avg;
667 u64 age_stamp;
668 u64 idle_stamp;
669 u64 avg_idle;
Jason Low9bd721c2013-09-13 11:26:52 -0700670
671 /* This is used to determine avg_idle's max value */
672 u64 max_idle_balance_cost;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200673#endif
674
675#ifdef CONFIG_IRQ_TIME_ACCOUNTING
676 u64 prev_irq_time;
677#endif
678#ifdef CONFIG_PARAVIRT
679 u64 prev_steal_time;
680#endif
681#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
682 u64 prev_steal_time_rq;
683#endif
684
685 /* calc_load related fields */
686 unsigned long calc_load_update;
687 long calc_load_active;
688
689#ifdef CONFIG_SCHED_HRTICK
690#ifdef CONFIG_SMP
691 int hrtick_csd_pending;
692 struct call_single_data hrtick_csd;
693#endif
694 struct hrtimer hrtick_timer;
695#endif
696
697#ifdef CONFIG_SCHEDSTATS
698 /* latency stats */
699 struct sched_info rq_sched_info;
700 unsigned long long rq_cpu_time;
701 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
702
703 /* sys_sched_yield() stats */
704 unsigned int yld_count;
705
706 /* schedule() stats */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200707 unsigned int sched_count;
708 unsigned int sched_goidle;
709
710 /* try_to_wake_up() stats */
711 unsigned int ttwu_count;
712 unsigned int ttwu_local;
713#endif
714
715#ifdef CONFIG_SMP
716 struct llist_head wake_list;
717#endif
Daniel Lezcano442bf3a2014-09-04 11:32:09 -0400718
719#ifdef CONFIG_CPU_IDLE
720 /* Must be inspected within a rcu lock section */
721 struct cpuidle_state *idle_state;
722#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200723};
724
725static inline int cpu_of(struct rq *rq)
726{
727#ifdef CONFIG_SMP
728 return rq->cpu;
729#else
730 return 0;
731#endif
732}
733
Peter Zijlstra1b568f02016-05-09 10:38:41 +0200734
735#ifdef CONFIG_SCHED_SMT
736
737extern struct static_key_false sched_smt_present;
738
739extern void __update_idle_core(struct rq *rq);
740
741static inline void update_idle_core(struct rq *rq)
742{
743 if (static_branch_unlikely(&sched_smt_present))
744 __update_idle_core(rq);
745}
746
747#else
748static inline void update_idle_core(struct rq *rq) { }
749#endif
750
Pranith Kumar8b06c552014-08-13 13:28:12 -0400751DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200752
Peter Zijlstra518cd622011-12-07 15:07:31 +0100753#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
Christoph Lameter4a32fea2014-08-17 12:30:27 -0500754#define this_rq() this_cpu_ptr(&runqueues)
Peter Zijlstra518cd622011-12-07 15:07:31 +0100755#define task_rq(p) cpu_rq(task_cpu(p))
756#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
Christoph Lameter4a32fea2014-08-17 12:30:27 -0500757#define raw_rq() raw_cpu_ptr(&runqueues)
Peter Zijlstra518cd622011-12-07 15:07:31 +0100758
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100759static inline u64 __rq_clock_broken(struct rq *rq)
760{
Jason Low316c1608d2015-04-28 13:00:20 -0700761 return READ_ONCE(rq->clock);
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100762}
763
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200764static inline u64 rq_clock(struct rq *rq)
765{
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100766 lockdep_assert_held(&rq->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200767 return rq->clock;
768}
769
770static inline u64 rq_clock_task(struct rq *rq)
771{
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100772 lockdep_assert_held(&rq->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200773 return rq->clock_task;
774}
775
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +0100776#define RQCF_REQ_SKIP 0x01
777#define RQCF_ACT_SKIP 0x02
778
779static inline void rq_clock_skip_update(struct rq *rq, bool skip)
780{
781 lockdep_assert_held(&rq->lock);
782 if (skip)
783 rq->clock_skip_update |= RQCF_REQ_SKIP;
784 else
785 rq->clock_skip_update &= ~RQCF_REQ_SKIP;
786}
787
Rik van Riel9942f792014-10-17 03:29:49 -0400788#ifdef CONFIG_NUMA
Rik van Riele3fe70b2014-10-17 03:29:50 -0400789enum numa_topology_type {
790 NUMA_DIRECT,
791 NUMA_GLUELESS_MESH,
792 NUMA_BACKPLANE,
793};
794extern enum numa_topology_type sched_numa_topology_type;
Rik van Riel9942f792014-10-17 03:29:49 -0400795extern int sched_max_numa_distance;
796extern bool find_numa_distance(int distance);
797#endif
798
Mel Gormanf809ca92013-10-07 11:28:57 +0100799#ifdef CONFIG_NUMA_BALANCING
Iulia Manda44dba3d2014-10-31 02:13:31 +0200800/* The regions in numa_faults array from task_struct */
801enum numa_faults_stats {
802 NUMA_MEM = 0,
803 NUMA_CPU,
804 NUMA_MEMBUF,
805 NUMA_CPUBUF
806};
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100807extern void sched_setnuma(struct task_struct *p, int node);
Mel Gormane6628d52013-10-07 11:29:02 +0100808extern int migrate_task_to(struct task_struct *p, int cpu);
Peter Zijlstraac66f542013-10-07 11:29:16 +0100809extern int migrate_swap(struct task_struct *, struct task_struct *);
Mel Gormanf809ca92013-10-07 11:28:57 +0100810#endif /* CONFIG_NUMA_BALANCING */
811
Peter Zijlstra518cd622011-12-07 15:07:31 +0100812#ifdef CONFIG_SMP
813
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200814static inline void
815queue_balance_callback(struct rq *rq,
816 struct callback_head *head,
817 void (*func)(struct rq *rq))
818{
819 lockdep_assert_held(&rq->lock);
820
821 if (unlikely(head->next))
822 return;
823
824 head->func = (void (*)(struct callback_head *))func;
825 head->next = rq->balance_callback;
826 rq->balance_callback = head;
827}
828
Peter Zijlstrae3baac42014-06-04 10:31:18 -0700829extern void sched_ttwu_pending(void);
830
Peter Zijlstra029632f2011-10-25 10:00:11 +0200831#define rcu_dereference_check_sched_domain(p) \
832 rcu_dereference_check((p), \
833 lockdep_is_held(&sched_domains_mutex))
834
835/*
836 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
837 * See detach_destroy_domains: synchronize_sched for details.
838 *
839 * The domain tree of any CPU may only be accessed from within
840 * preempt-disabled sections.
841 */
842#define for_each_domain(cpu, __sd) \
Peter Zijlstra518cd622011-12-07 15:07:31 +0100843 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
844 __sd; __sd = __sd->parent)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200845
Suresh Siddha77e81362011-11-17 11:08:23 -0800846#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
847
Peter Zijlstra518cd622011-12-07 15:07:31 +0100848/**
849 * highest_flag_domain - Return highest sched_domain containing flag.
850 * @cpu: The cpu whose highest level of sched domain is to
851 * be returned.
852 * @flag: The flag to check for the highest sched_domain
853 * for the given cpu.
854 *
855 * Returns the highest sched_domain of a cpu which contains the given flag.
856 */
857static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
858{
859 struct sched_domain *sd, *hsd = NULL;
860
861 for_each_domain(cpu, sd) {
862 if (!(sd->flags & flag))
863 break;
864 hsd = sd;
865 }
866
867 return hsd;
868}
869
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100870static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
871{
872 struct sched_domain *sd;
873
874 for_each_domain(cpu, sd) {
875 if (sd->flags & flag)
876 break;
877 }
878
879 return sd;
880}
881
Peter Zijlstra518cd622011-12-07 15:07:31 +0100882DECLARE_PER_CPU(struct sched_domain *, sd_llc);
Peter Zijlstra7d9ffa82013-07-04 12:56:46 +0800883DECLARE_PER_CPU(int, sd_llc_size);
Peter Zijlstra518cd622011-12-07 15:07:31 +0100884DECLARE_PER_CPU(int, sd_llc_id);
Peter Zijlstra0e369d72016-05-09 10:38:01 +0200885DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100886DECLARE_PER_CPU(struct sched_domain *, sd_numa);
Preeti U Murthy37dc6b52013-10-30 08:42:52 +0530887DECLARE_PER_CPU(struct sched_domain *, sd_asym);
Peter Zijlstra518cd622011-12-07 15:07:31 +0100888
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400889struct sched_group_capacity {
Li Zefan5e6521e2013-03-05 16:06:23 +0800890 atomic_t ref;
891 /*
Yuyang Du172895e2016-04-05 12:12:27 +0800892 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400893 * for a single CPU.
Li Zefan5e6521e2013-03-05 16:06:23 +0800894 */
Vincent Guittotdc7ff762015-03-03 11:35:03 +0100895 unsigned int capacity;
Li Zefan5e6521e2013-03-05 16:06:23 +0800896 unsigned long next_update;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400897 int imbalance; /* XXX unrelated to capacity but shared group state */
Li Zefan5e6521e2013-03-05 16:06:23 +0800898
899 unsigned long cpumask[0]; /* iteration mask */
900};
901
902struct sched_group {
903 struct sched_group *next; /* Must be a circular list */
904 atomic_t ref;
905
906 unsigned int group_weight;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400907 struct sched_group_capacity *sgc;
Li Zefan5e6521e2013-03-05 16:06:23 +0800908
909 /*
910 * The CPUs this group covers.
911 *
912 * NOTE: this field is variable length. (Allocated dynamically
913 * by attaching extra space to the end of the structure,
914 * depending on how many CPUs the kernel has booted up with)
915 */
916 unsigned long cpumask[0];
917};
918
919static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
920{
921 return to_cpumask(sg->cpumask);
922}
923
924/*
925 * cpumask masking which cpus in the group are allowed to iterate up the domain
926 * tree.
927 */
928static inline struct cpumask *sched_group_mask(struct sched_group *sg)
929{
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400930 return to_cpumask(sg->sgc->cpumask);
Li Zefan5e6521e2013-03-05 16:06:23 +0800931}
932
933/**
934 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
935 * @group: The group whose first cpu is to be returned.
936 */
937static inline unsigned int group_first_cpu(struct sched_group *group)
938{
939 return cpumask_first(sched_group_cpus(group));
940}
941
Peter Zijlstrac1174872012-05-31 14:47:33 +0200942extern int group_balance_cpu(struct sched_group *sg);
943
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500944#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
945void register_sched_domain_sysctl(void);
946void unregister_sched_domain_sysctl(void);
947#else
948static inline void register_sched_domain_sysctl(void)
949{
950}
951static inline void unregister_sched_domain_sysctl(void)
952{
953}
954#endif
955
Peter Zijlstrae3baac42014-06-04 10:31:18 -0700956#else
957
958static inline void sched_ttwu_pending(void) { }
959
Peter Zijlstra518cd622011-12-07 15:07:31 +0100960#endif /* CONFIG_SMP */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200961
Peter Zijlstra391e43d2011-11-15 17:14:39 +0100962#include "stats.h"
963#include "auto_group.h"
Peter Zijlstra029632f2011-10-25 10:00:11 +0200964
965#ifdef CONFIG_CGROUP_SCHED
966
967/*
968 * Return the group to which this tasks belongs.
969 *
Tejun Heo8af01f52013-08-08 20:11:22 -0400970 * We cannot use task_css() and friends because the cgroup subsystem
971 * changes that value before the cgroup_subsys::attach() method is called,
972 * therefore we cannot pin it and might observe the wrong value.
Peter Zijlstra8323f262012-06-22 13:36:05 +0200973 *
974 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
975 * core changes this before calling sched_move_task().
976 *
977 * Instead we use a 'copy' which is updated from sched_move_task() while
978 * holding both task_struct::pi_lock and rq::lock.
Peter Zijlstra029632f2011-10-25 10:00:11 +0200979 */
980static inline struct task_group *task_group(struct task_struct *p)
981{
Peter Zijlstra8323f262012-06-22 13:36:05 +0200982 return p->sched_task_group;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200983}
984
985/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
986static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
987{
988#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
989 struct task_group *tg = task_group(p);
990#endif
991
992#ifdef CONFIG_FAIR_GROUP_SCHED
Byungchul Parkad936d82015-10-24 01:16:19 +0900993 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200994 p->se.cfs_rq = tg->cfs_rq[cpu];
995 p->se.parent = tg->se[cpu];
996#endif
997
998#ifdef CONFIG_RT_GROUP_SCHED
999 p->rt.rt_rq = tg->rt_rq[cpu];
1000 p->rt.parent = tg->rt_se[cpu];
1001#endif
1002}
1003
1004#else /* CONFIG_CGROUP_SCHED */
1005
1006static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1007static inline struct task_group *task_group(struct task_struct *p)
1008{
1009 return NULL;
1010}
1011
1012#endif /* CONFIG_CGROUP_SCHED */
1013
1014static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1015{
1016 set_task_rq(p, cpu);
1017#ifdef CONFIG_SMP
1018 /*
1019 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1020 * successfuly executed on another CPU. We must ensure that updates of
1021 * per-task data have been completed by this moment.
1022 */
1023 smp_wmb();
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001024#ifdef CONFIG_THREAD_INFO_IN_TASK
1025 p->cpu = cpu;
1026#else
Peter Zijlstra029632f2011-10-25 10:00:11 +02001027 task_thread_info(p)->cpu = cpu;
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001028#endif
Peter Zijlstraac66f542013-10-07 11:29:16 +01001029 p->wake_cpu = cpu;
Peter Zijlstra029632f2011-10-25 10:00:11 +02001030#endif
1031}
1032
1033/*
1034 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1035 */
1036#ifdef CONFIG_SCHED_DEBUG
Ingo Molnarc5905af2012-02-24 08:31:31 +01001037# include <linux/static_key.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +02001038# define const_debug __read_mostly
1039#else
1040# define const_debug const
1041#endif
1042
1043extern const_debug unsigned int sysctl_sched_features;
1044
1045#define SCHED_FEAT(name, enabled) \
1046 __SCHED_FEAT_##name ,
1047
1048enum {
Peter Zijlstra391e43d2011-11-15 17:14:39 +01001049#include "features.h"
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001050 __SCHED_FEAT_NR,
Peter Zijlstra029632f2011-10-25 10:00:11 +02001051};
1052
1053#undef SCHED_FEAT
1054
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001055#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001056#define SCHED_FEAT(name, enabled) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001057static __always_inline bool static_branch_##name(struct static_key *key) \
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001058{ \
Jason Baron6e76ea82014-07-02 15:52:41 +00001059 return static_key_##enabled(key); \
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001060}
1061
1062#include "features.h"
1063
1064#undef SCHED_FEAT
1065
Ingo Molnarc5905af2012-02-24 08:31:31 +01001066extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001067#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1068#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001069#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001070#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001071
Srikar Dronamraju2a595722015-08-11 21:54:21 +05301072extern struct static_key_false sched_numa_balancing;
Mel Gormancb251762016-02-05 09:08:36 +00001073extern struct static_key_false sched_schedstats;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001074
Peter Zijlstra029632f2011-10-25 10:00:11 +02001075static inline u64 global_rt_period(void)
1076{
1077 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1078}
1079
1080static inline u64 global_rt_runtime(void)
1081{
1082 if (sysctl_sched_rt_runtime < 0)
1083 return RUNTIME_INF;
1084
1085 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1086}
1087
Peter Zijlstra029632f2011-10-25 10:00:11 +02001088static inline int task_current(struct rq *rq, struct task_struct *p)
1089{
1090 return rq->curr == p;
1091}
1092
1093static inline int task_running(struct rq *rq, struct task_struct *p)
1094{
1095#ifdef CONFIG_SMP
1096 return p->on_cpu;
1097#else
1098 return task_current(rq, p);
1099#endif
1100}
1101
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04001102static inline int task_on_rq_queued(struct task_struct *p)
1103{
1104 return p->on_rq == TASK_ON_RQ_QUEUED;
1105}
Peter Zijlstra029632f2011-10-25 10:00:11 +02001106
Kirill Tkhaicca26e82014-08-20 13:47:42 +04001107static inline int task_on_rq_migrating(struct task_struct *p)
1108{
1109 return p->on_rq == TASK_ON_RQ_MIGRATING;
1110}
1111
Peter Zijlstra029632f2011-10-25 10:00:11 +02001112#ifndef prepare_arch_switch
1113# define prepare_arch_switch(next) do { } while (0)
1114#endif
Catalin Marinas01f23e12011-11-27 21:43:10 +00001115#ifndef finish_arch_post_lock_switch
1116# define finish_arch_post_lock_switch() do { } while (0)
1117#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02001118
Peter Zijlstra029632f2011-10-25 10:00:11 +02001119static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1120{
1121#ifdef CONFIG_SMP
1122 /*
1123 * We can optimise this out completely for !SMP, because the
1124 * SMP rebalancing from interrupt is the only thing that cares
1125 * here.
1126 */
1127 next->on_cpu = 1;
1128#endif
1129}
1130
1131static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1132{
1133#ifdef CONFIG_SMP
1134 /*
1135 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1136 * We must ensure this doesn't happen until the switch is completely
1137 * finished.
Peter Zijlstra95913d92015-09-29 14:45:09 +02001138 *
Peter Zijlstrab75a2252015-10-06 14:36:17 +02001139 * In particular, the load of prev->state in finish_task_switch() must
1140 * happen before this.
1141 *
Peter Zijlstra1f03e8d2016-04-04 10:57:12 +02001142 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
Peter Zijlstra029632f2011-10-25 10:00:11 +02001143 */
Peter Zijlstra95913d92015-09-29 14:45:09 +02001144 smp_store_release(&prev->on_cpu, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001145#endif
1146#ifdef CONFIG_DEBUG_SPINLOCK
1147 /* this is a valid case when another task releases the spinlock */
1148 rq->lock.owner = current;
1149#endif
1150 /*
1151 * If we are tracking spinlock dependencies then we have to
1152 * fix up the runqueue lock - which gets 'carried over' from
1153 * prev into current:
1154 */
1155 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1156
1157 raw_spin_unlock_irq(&rq->lock);
1158}
1159
Li Zefanb13095f2013-03-05 16:06:38 +08001160/*
1161 * wake flags
1162 */
1163#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
1164#define WF_FORK 0x02 /* child wakeup after fork */
1165#define WF_MIGRATED 0x4 /* internal use, task got migrated */
1166
Peter Zijlstra029632f2011-10-25 10:00:11 +02001167/*
1168 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1169 * of tasks with abnormal "nice" values across CPUs the contribution that
1170 * each task makes to its run queue's load is weighted according to its
1171 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1172 * scaled version of the new time slice allocation that they receive on time
1173 * slice expiry etc.
1174 */
1175
1176#define WEIGHT_IDLEPRIO 3
1177#define WMULT_IDLEPRIO 1431655765
1178
Andi Kleened82b8a2015-11-29 20:59:43 -08001179extern const int sched_prio_to_weight[40];
1180extern const u32 sched_prio_to_wmult[40];
Peter Zijlstra029632f2011-10-25 10:00:11 +02001181
Peter Zijlstraff77e462016-01-18 15:27:07 +01001182/*
1183 * {de,en}queue flags:
1184 *
1185 * DEQUEUE_SLEEP - task is no longer runnable
1186 * ENQUEUE_WAKEUP - task just became runnable
1187 *
1188 * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
1189 * are in a known state which allows modification. Such pairs
1190 * should preserve as much state as possible.
1191 *
1192 * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
1193 * in the runqueue.
1194 *
1195 * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
1196 * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
Peter Zijlstra59efa0b2016-05-10 18:24:37 +02001197 * ENQUEUE_MIGRATED - the task was migrated during wakeup
Peter Zijlstraff77e462016-01-18 15:27:07 +01001198 *
1199 */
1200
1201#define DEQUEUE_SLEEP 0x01
1202#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */
1203#define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */
1204
Peter Zijlstra1de64442015-09-30 17:44:13 +02001205#define ENQUEUE_WAKEUP 0x01
Peter Zijlstraff77e462016-01-18 15:27:07 +01001206#define ENQUEUE_RESTORE 0x02
1207#define ENQUEUE_MOVE 0x04
1208
1209#define ENQUEUE_HEAD 0x08
1210#define ENQUEUE_REPLENISH 0x10
Li Zefanc82ba9f2013-03-05 16:06:55 +08001211#ifdef CONFIG_SMP
Peter Zijlstra59efa0b2016-05-10 18:24:37 +02001212#define ENQUEUE_MIGRATED 0x20
Li Zefanc82ba9f2013-03-05 16:06:55 +08001213#else
Peter Zijlstra59efa0b2016-05-10 18:24:37 +02001214#define ENQUEUE_MIGRATED 0x00
Li Zefanc82ba9f2013-03-05 16:06:55 +08001215#endif
Li Zefanc82ba9f2013-03-05 16:06:55 +08001216
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001217#define RETRY_TASK ((void *)-1UL)
1218
Li Zefanc82ba9f2013-03-05 16:06:55 +08001219struct sched_class {
1220 const struct sched_class *next;
1221
1222 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1223 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1224 void (*yield_task) (struct rq *rq);
1225 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1226
1227 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1228
Peter Zijlstra606dba22012-02-11 06:05:00 +01001229 /*
1230 * It is the responsibility of the pick_next_task() method that will
1231 * return the next task to call put_prev_task() on the @prev task or
1232 * something equivalent.
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001233 *
1234 * May return RETRY_TASK when it finds a higher prio class has runnable
1235 * tasks.
Peter Zijlstra606dba22012-02-11 06:05:00 +01001236 */
1237 struct task_struct * (*pick_next_task) (struct rq *rq,
Peter Zijlstrae7904a22015-08-01 19:25:08 +02001238 struct task_struct *prev,
1239 struct pin_cookie cookie);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001240 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1241
1242#ifdef CONFIG_SMP
Peter Zijlstraac66f542013-10-07 11:29:16 +01001243 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
xiaofeng.yan5a4fd032015-09-23 14:55:59 +08001244 void (*migrate_task_rq)(struct task_struct *p);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001245
Li Zefanc82ba9f2013-03-05 16:06:55 +08001246 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1247
1248 void (*set_cpus_allowed)(struct task_struct *p,
1249 const struct cpumask *newmask);
1250
1251 void (*rq_online)(struct rq *rq);
1252 void (*rq_offline)(struct rq *rq);
1253#endif
1254
1255 void (*set_curr_task) (struct rq *rq);
1256 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1257 void (*task_fork) (struct task_struct *p);
Dario Faggiolie6c390f2013-11-07 14:43:35 +01001258 void (*task_dead) (struct task_struct *p);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001259
Kirill Tkhai67dfa1b2014-10-27 17:40:52 +03001260 /*
1261 * The switched_from() call is allowed to drop rq->lock, therefore we
1262 * cannot assume the switched_from/switched_to pair is serliazed by
1263 * rq->lock. They are however serialized by p->pi_lock.
1264 */
Li Zefanc82ba9f2013-03-05 16:06:55 +08001265 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1266 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1267 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1268 int oldprio);
1269
1270 unsigned int (*get_rr_interval) (struct rq *rq,
1271 struct task_struct *task);
1272
Stanislaw Gruszka6e998912014-11-12 16:58:44 +01001273 void (*update_curr) (struct rq *rq);
1274
Vincent Guittotea86cb42016-06-17 13:38:55 +02001275#define TASK_SET_GROUP 0
1276#define TASK_MOVE_GROUP 1
1277
Li Zefanc82ba9f2013-03-05 16:06:55 +08001278#ifdef CONFIG_FAIR_GROUP_SCHED
Vincent Guittotea86cb42016-06-17 13:38:55 +02001279 void (*task_change_group) (struct task_struct *p, int type);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001280#endif
1281};
Peter Zijlstra029632f2011-10-25 10:00:11 +02001282
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +01001283static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1284{
1285 prev->sched_class->put_prev_task(rq, prev);
1286}
1287
Peter Zijlstrab2bf6c32016-09-20 22:00:38 +02001288static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
1289{
1290 curr->sched_class->set_curr_task(rq);
1291}
1292
Peter Zijlstra029632f2011-10-25 10:00:11 +02001293#define sched_class_highest (&stop_sched_class)
1294#define for_each_class(class) \
1295 for (class = sched_class_highest; class; class = class->next)
1296
1297extern const struct sched_class stop_sched_class;
Dario Faggioliaab03e02013-11-28 11:14:43 +01001298extern const struct sched_class dl_sched_class;
Peter Zijlstra029632f2011-10-25 10:00:11 +02001299extern const struct sched_class rt_sched_class;
1300extern const struct sched_class fair_sched_class;
1301extern const struct sched_class idle_sched_class;
1302
1303
1304#ifdef CONFIG_SMP
1305
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04001306extern void update_group_capacity(struct sched_domain *sd, int cpu);
Li Zefanb7192032013-03-07 10:00:26 +08001307
Daniel Lezcano7caff662014-01-06 12:34:38 +01001308extern void trigger_load_balance(struct rq *rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001309
Peter Zijlstrac5b28032015-05-15 17:43:35 +02001310extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1311
Peter Zijlstra029632f2011-10-25 10:00:11 +02001312#endif
1313
Daniel Lezcano442bf3a2014-09-04 11:32:09 -04001314#ifdef CONFIG_CPU_IDLE
1315static inline void idle_set_state(struct rq *rq,
1316 struct cpuidle_state *idle_state)
1317{
1318 rq->idle_state = idle_state;
1319}
1320
1321static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1322{
Peter Zijlstra9148a3a2016-09-20 22:34:51 +02001323 SCHED_WARN_ON(!rcu_read_lock_held());
Daniel Lezcano442bf3a2014-09-04 11:32:09 -04001324 return rq->idle_state;
1325}
1326#else
1327static inline void idle_set_state(struct rq *rq,
1328 struct cpuidle_state *idle_state)
1329{
1330}
1331
1332static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1333{
1334 return NULL;
1335}
1336#endif
1337
Peter Zijlstra029632f2011-10-25 10:00:11 +02001338extern void sysrq_sched_debug_show(void);
1339extern void sched_init_granularity(void);
1340extern void update_max_interval(void);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001341
1342extern void init_sched_dl_class(void);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001343extern void init_sched_rt_class(void);
1344extern void init_sched_fair_class(void);
1345
Kirill Tkhai88751252014-06-29 00:03:57 +04001346extern void resched_curr(struct rq *rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001347extern void resched_cpu(int cpu);
1348
1349extern struct rt_bandwidth def_rt_bandwidth;
1350extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1351
Dario Faggioli332ac172013-11-07 14:43:45 +01001352extern struct dl_bandwidth def_dl_bandwidth;
1353extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001354extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1355
Dario Faggioli332ac172013-11-07 14:43:45 +01001356unsigned long to_ratio(u64 period, u64 runtime);
1357
Yuyang Du540247f2015-07-15 08:04:39 +08001358extern void init_entity_runnable_average(struct sched_entity *se);
Yuyang Du2b8c41d2016-03-30 04:30:56 +08001359extern void post_init_entity_util_avg(struct sched_entity *se);
Alex Shia75cdaa2013-06-20 10:18:47 +08001360
Frederic Weisbecker76d92ac2015-07-17 22:25:49 +02001361#ifdef CONFIG_NO_HZ_FULL
1362extern bool sched_can_stop_tick(struct rq *rq);
1363
1364/*
1365 * Tick may be needed by tasks in the runqueue depending on their policy and
1366 * requirements. If tick is needed, lets send the target an IPI to kick it out of
1367 * nohz mode if necessary.
1368 */
1369static inline void sched_update_tick_dependency(struct rq *rq)
1370{
1371 int cpu;
1372
1373 if (!tick_nohz_full_enabled())
1374 return;
1375
1376 cpu = cpu_of(rq);
1377
1378 if (!tick_nohz_full_cpu(cpu))
1379 return;
1380
1381 if (sched_can_stop_tick(rq))
1382 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1383 else
1384 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1385}
1386#else
1387static inline void sched_update_tick_dependency(struct rq *rq) { }
1388#endif
1389
Kirill Tkhai72465442014-05-09 03:00:14 +04001390static inline void add_nr_running(struct rq *rq, unsigned count)
Peter Zijlstra029632f2011-10-25 10:00:11 +02001391{
Kirill Tkhai72465442014-05-09 03:00:14 +04001392 unsigned prev_nr = rq->nr_running;
1393
1394 rq->nr_running = prev_nr + count;
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001395
Kirill Tkhai72465442014-05-09 03:00:14 +04001396 if (prev_nr < 2 && rq->nr_running >= 2) {
Tim Chen4486edd2014-06-23 12:16:49 -07001397#ifdef CONFIG_SMP
1398 if (!rq->rd->overload)
1399 rq->rd->overload = true;
1400#endif
Tim Chen4486edd2014-06-23 12:16:49 -07001401 }
Frederic Weisbecker76d92ac2015-07-17 22:25:49 +02001402
1403 sched_update_tick_dependency(rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001404}
1405
Kirill Tkhai72465442014-05-09 03:00:14 +04001406static inline void sub_nr_running(struct rq *rq, unsigned count)
Peter Zijlstra029632f2011-10-25 10:00:11 +02001407{
Kirill Tkhai72465442014-05-09 03:00:14 +04001408 rq->nr_running -= count;
Frederic Weisbecker76d92ac2015-07-17 22:25:49 +02001409 /* Check if we still need preemption */
1410 sched_update_tick_dependency(rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001411}
1412
Frederic Weisbecker265f22a2013-05-03 03:39:05 +02001413static inline void rq_last_tick_reset(struct rq *rq)
1414{
1415#ifdef CONFIG_NO_HZ_FULL
1416 rq->last_sched_tick = jiffies;
1417#endif
1418}
1419
Peter Zijlstra029632f2011-10-25 10:00:11 +02001420extern void update_rq_clock(struct rq *rq);
1421
1422extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1423extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1424
1425extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1426
1427extern const_debug unsigned int sysctl_sched_time_avg;
1428extern const_debug unsigned int sysctl_sched_nr_migrate;
1429extern const_debug unsigned int sysctl_sched_migration_cost;
1430
1431static inline u64 sched_avg_period(void)
1432{
1433 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1434}
1435
Peter Zijlstra029632f2011-10-25 10:00:11 +02001436#ifdef CONFIG_SCHED_HRTICK
1437
1438/*
1439 * Use hrtick when:
1440 * - enabled by features
1441 * - hrtimer is actually high res
1442 */
1443static inline int hrtick_enabled(struct rq *rq)
1444{
1445 if (!sched_feat(HRTICK))
1446 return 0;
1447 if (!cpu_active(cpu_of(rq)))
1448 return 0;
1449 return hrtimer_is_hres_active(&rq->hrtick_timer);
1450}
1451
1452void hrtick_start(struct rq *rq, u64 delay);
1453
Mike Galbraithb39e66e2011-11-22 15:20:07 +01001454#else
1455
1456static inline int hrtick_enabled(struct rq *rq)
1457{
1458 return 0;
1459}
1460
Peter Zijlstra029632f2011-10-25 10:00:11 +02001461#endif /* CONFIG_SCHED_HRTICK */
1462
1463#ifdef CONFIG_SMP
1464extern void sched_avg_update(struct rq *rq);
Peter Zijlstradfbca412015-03-23 14:19:05 +01001465
1466#ifndef arch_scale_freq_capacity
1467static __always_inline
1468unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1469{
1470 return SCHED_CAPACITY_SCALE;
1471}
1472#endif
Vincent Guittotb5b48602015-02-27 16:54:08 +01001473
Morten Rasmussen8cd56012015-08-14 17:23:10 +01001474#ifndef arch_scale_cpu_capacity
1475static __always_inline
1476unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1477{
Dietmar Eggemanne3279a22015-08-15 00:04:41 +01001478 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
Morten Rasmussen8cd56012015-08-14 17:23:10 +01001479 return sd->smt_gain / sd->span_weight;
1480
1481 return SCHED_CAPACITY_SCALE;
1482}
1483#endif
1484
Peter Zijlstra029632f2011-10-25 10:00:11 +02001485static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1486{
Vincent Guittotb5b48602015-02-27 16:54:08 +01001487 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
Peter Zijlstra029632f2011-10-25 10:00:11 +02001488 sched_avg_update(rq);
1489}
1490#else
1491static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1492static inline void sched_avg_update(struct rq *rq) { }
1493#endif
1494
Peter Zijlstraeb580752015-07-31 21:28:18 +02001495struct rq_flags {
1496 unsigned long flags;
Peter Zijlstrae7904a22015-08-01 19:25:08 +02001497 struct pin_cookie cookie;
Peter Zijlstraeb580752015-07-31 21:28:18 +02001498};
1499
1500struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
Peter Zijlstra3e71a462016-04-28 16:16:33 +02001501 __acquires(rq->lock);
Peter Zijlstraeb580752015-07-31 21:28:18 +02001502struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001503 __acquires(p->pi_lock)
Peter Zijlstra3e71a462016-04-28 16:16:33 +02001504 __acquires(rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001505
Peter Zijlstraeb580752015-07-31 21:28:18 +02001506static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001507 __releases(rq->lock)
1508{
Peter Zijlstrae7904a22015-08-01 19:25:08 +02001509 lockdep_unpin_lock(&rq->lock, rf->cookie);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001510 raw_spin_unlock(&rq->lock);
1511}
1512
1513static inline void
Peter Zijlstraeb580752015-07-31 21:28:18 +02001514task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001515 __releases(rq->lock)
1516 __releases(p->pi_lock)
1517{
Peter Zijlstrae7904a22015-08-01 19:25:08 +02001518 lockdep_unpin_lock(&rq->lock, rf->cookie);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001519 raw_spin_unlock(&rq->lock);
Peter Zijlstraeb580752015-07-31 21:28:18 +02001520 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001521}
1522
Peter Zijlstra029632f2011-10-25 10:00:11 +02001523#ifdef CONFIG_SMP
1524#ifdef CONFIG_PREEMPT
1525
1526static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1527
1528/*
1529 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1530 * way at the expense of forcing extra atomic operations in all
1531 * invocations. This assures that the double_lock is acquired using the
1532 * same underlying policy as the spinlock_t on this architecture, which
1533 * reduces latency compared to the unfair variant below. However, it
1534 * also adds more overhead and therefore may reduce throughput.
1535 */
1536static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1537 __releases(this_rq->lock)
1538 __acquires(busiest->lock)
1539 __acquires(this_rq->lock)
1540{
1541 raw_spin_unlock(&this_rq->lock);
1542 double_rq_lock(this_rq, busiest);
1543
1544 return 1;
1545}
1546
1547#else
1548/*
1549 * Unfair double_lock_balance: Optimizes throughput at the expense of
1550 * latency by eliminating extra atomic operations when the locks are
1551 * already in proper order on entry. This favors lower cpu-ids and will
1552 * grant the double lock to lower cpus over higher ids under contention,
1553 * regardless of entry order into the function.
1554 */
1555static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1556 __releases(this_rq->lock)
1557 __acquires(busiest->lock)
1558 __acquires(this_rq->lock)
1559{
1560 int ret = 0;
1561
1562 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1563 if (busiest < this_rq) {
1564 raw_spin_unlock(&this_rq->lock);
1565 raw_spin_lock(&busiest->lock);
1566 raw_spin_lock_nested(&this_rq->lock,
1567 SINGLE_DEPTH_NESTING);
1568 ret = 1;
1569 } else
1570 raw_spin_lock_nested(&busiest->lock,
1571 SINGLE_DEPTH_NESTING);
1572 }
1573 return ret;
1574}
1575
1576#endif /* CONFIG_PREEMPT */
1577
1578/*
1579 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1580 */
1581static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1582{
1583 if (unlikely(!irqs_disabled())) {
1584 /* printk() doesn't work good under rq->lock */
1585 raw_spin_unlock(&this_rq->lock);
1586 BUG_ON(1);
1587 }
1588
1589 return _double_lock_balance(this_rq, busiest);
1590}
1591
1592static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1593 __releases(busiest->lock)
1594{
1595 raw_spin_unlock(&busiest->lock);
1596 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1597}
1598
Peter Zijlstra74602312013-10-10 20:17:22 +02001599static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1600{
1601 if (l1 > l2)
1602 swap(l1, l2);
1603
1604 spin_lock(l1);
1605 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1606}
1607
Mike Galbraith60e69ee2014-04-07 10:55:15 +02001608static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1609{
1610 if (l1 > l2)
1611 swap(l1, l2);
1612
1613 spin_lock_irq(l1);
1614 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1615}
1616
Peter Zijlstra74602312013-10-10 20:17:22 +02001617static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1618{
1619 if (l1 > l2)
1620 swap(l1, l2);
1621
1622 raw_spin_lock(l1);
1623 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1624}
1625
Peter Zijlstra029632f2011-10-25 10:00:11 +02001626/*
1627 * double_rq_lock - safely lock two runqueues
1628 *
1629 * Note this does not disable interrupts like task_rq_lock,
1630 * you need to do so manually before calling.
1631 */
1632static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1633 __acquires(rq1->lock)
1634 __acquires(rq2->lock)
1635{
1636 BUG_ON(!irqs_disabled());
1637 if (rq1 == rq2) {
1638 raw_spin_lock(&rq1->lock);
1639 __acquire(rq2->lock); /* Fake it out ;) */
1640 } else {
1641 if (rq1 < rq2) {
1642 raw_spin_lock(&rq1->lock);
1643 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1644 } else {
1645 raw_spin_lock(&rq2->lock);
1646 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1647 }
1648 }
1649}
1650
1651/*
1652 * double_rq_unlock - safely unlock two runqueues
1653 *
1654 * Note this does not restore interrupts like task_rq_unlock,
1655 * you need to do so manually after calling.
1656 */
1657static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1658 __releases(rq1->lock)
1659 __releases(rq2->lock)
1660{
1661 raw_spin_unlock(&rq1->lock);
1662 if (rq1 != rq2)
1663 raw_spin_unlock(&rq2->lock);
1664 else
1665 __release(rq2->lock);
1666}
1667
1668#else /* CONFIG_SMP */
1669
1670/*
1671 * double_rq_lock - safely lock two runqueues
1672 *
1673 * Note this does not disable interrupts like task_rq_lock,
1674 * you need to do so manually before calling.
1675 */
1676static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1677 __acquires(rq1->lock)
1678 __acquires(rq2->lock)
1679{
1680 BUG_ON(!irqs_disabled());
1681 BUG_ON(rq1 != rq2);
1682 raw_spin_lock(&rq1->lock);
1683 __acquire(rq2->lock); /* Fake it out ;) */
1684}
1685
1686/*
1687 * double_rq_unlock - safely unlock two runqueues
1688 *
1689 * Note this does not restore interrupts like task_rq_unlock,
1690 * you need to do so manually after calling.
1691 */
1692static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1693 __releases(rq1->lock)
1694 __releases(rq2->lock)
1695{
1696 BUG_ON(rq1 != rq2);
1697 raw_spin_unlock(&rq1->lock);
1698 __release(rq2->lock);
1699}
1700
1701#endif
1702
1703extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1704extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
Srikar Dronamraju6b55c962015-06-25 22:51:41 +05301705
1706#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02001707extern void print_cfs_stats(struct seq_file *m, int cpu);
1708extern void print_rt_stats(struct seq_file *m, int cpu);
Wanpeng Liacb32132014-10-31 06:39:33 +08001709extern void print_dl_stats(struct seq_file *m, int cpu);
Srikar Dronamraju6b55c962015-06-25 22:51:41 +05301710extern void
1711print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
Srikar Dronamraju397f2372015-06-25 22:51:43 +05301712
1713#ifdef CONFIG_NUMA_BALANCING
1714extern void
1715show_numa_stats(struct task_struct *p, struct seq_file *m);
1716extern void
1717print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1718 unsigned long tpf, unsigned long gsf, unsigned long gpf);
1719#endif /* CONFIG_NUMA_BALANCING */
1720#endif /* CONFIG_SCHED_DEBUG */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001721
1722extern void init_cfs_rq(struct cfs_rq *cfs_rq);
Abel Vesa07c54f72015-03-03 13:50:27 +02001723extern void init_rt_rq(struct rt_rq *rt_rq);
1724extern void init_dl_rq(struct dl_rq *dl_rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001725
Ben Segall1ee14e62013-10-16 11:16:12 -07001726extern void cfs_bandwidth_usage_inc(void);
1727extern void cfs_bandwidth_usage_dec(void);
Suresh Siddha1c792db2011-12-01 17:07:32 -08001728
Frederic Weisbecker3451d022011-08-10 23:21:01 +02001729#ifdef CONFIG_NO_HZ_COMMON
Suresh Siddha1c792db2011-12-01 17:07:32 -08001730enum rq_nohz_flag_bits {
1731 NOHZ_TICK_STOPPED,
1732 NOHZ_BALANCE_KICK,
1733};
1734
1735#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
Thomas Gleixner20a5c8c2016-03-10 12:54:20 +01001736
1737extern void nohz_balance_exit_idle(unsigned int cpu);
1738#else
1739static inline void nohz_balance_exit_idle(unsigned int cpu) { }
Suresh Siddha1c792db2011-12-01 17:07:32 -08001740#endif
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001741
1742#ifdef CONFIG_IRQ_TIME_ACCOUNTING
Frederic Weisbecker19d23dbf2016-09-26 02:29:20 +02001743struct irqtime {
1744 u64 hardirq_time;
1745 u64 softirq_time;
1746 u64 irq_start_time;
1747 struct u64_stats_sync sync;
1748};
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001749
Frederic Weisbecker19d23dbf2016-09-26 02:29:20 +02001750DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001751
1752static inline u64 irq_time_read(int cpu)
1753{
Frederic Weisbecker19d23dbf2016-09-26 02:29:20 +02001754 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
1755 unsigned int seq;
1756 u64 total;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001757
1758 do {
Frederic Weisbecker19d23dbf2016-09-26 02:29:20 +02001759 seq = __u64_stats_fetch_begin(&irqtime->sync);
1760 total = irqtime->softirq_time + irqtime->hardirq_time;
1761 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001762
Frederic Weisbecker19d23dbf2016-09-26 02:29:20 +02001763 return total;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001764}
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001765#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001766
1767#ifdef CONFIG_CPU_FREQ
1768DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
1769
1770/**
1771 * cpufreq_update_util - Take a note about CPU utilization changes.
Rafael J. Wysocki12bde332016-08-10 03:11:17 +02001772 * @rq: Runqueue to carry out the update for.
Rafael J. Wysocki58919e82016-08-16 22:14:55 +02001773 * @flags: Update reason flags.
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001774 *
Rafael J. Wysocki58919e82016-08-16 22:14:55 +02001775 * This function is called by the scheduler on the CPU whose utilization is
1776 * being updated.
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001777 *
1778 * It can only be called from RCU-sched read-side critical sections.
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001779 *
1780 * The way cpufreq is currently arranged requires it to evaluate the CPU
1781 * performance state (frequency/voltage) on a regular basis to prevent it from
1782 * being stuck in a completely inadequate performance level for too long.
1783 * That is not guaranteed to happen if the updates are only triggered from CFS,
1784 * though, because they may not be coming in if RT or deadline tasks are active
1785 * all the time (or there are RT and DL tasks only).
1786 *
1787 * As a workaround for that issue, this function is called by the RT and DL
1788 * sched classes to trigger extra cpufreq updates to prevent it from stalling,
1789 * but that really is a band-aid. Going forward it should be replaced with
1790 * solutions targeted more specifically at RT and DL tasks.
1791 */
Rafael J. Wysocki12bde332016-08-10 03:11:17 +02001792static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001793{
Rafael J. Wysocki58919e82016-08-16 22:14:55 +02001794 struct update_util_data *data;
1795
1796 data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
1797 if (data)
Rafael J. Wysocki12bde332016-08-10 03:11:17 +02001798 data->func(data, rq_clock(rq), flags);
1799}
1800
1801static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
1802{
1803 if (cpu_of(rq) == smp_processor_id())
1804 cpufreq_update_util(rq, flags);
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001805}
1806#else
Rafael J. Wysocki12bde332016-08-10 03:11:17 +02001807static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
1808static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
Rafael J. Wysockiadaf9fc2016-03-10 20:44:47 +01001809#endif /* CONFIG_CPU_FREQ */
Linus Torvaldsbe53f582016-03-24 09:42:50 -07001810
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +02001811#ifdef arch_scale_freq_capacity
1812#ifndef arch_scale_freq_invariant
1813#define arch_scale_freq_invariant() (true)
1814#endif
1815#else /* arch_scale_freq_capacity */
1816#define arch_scale_freq_invariant() (false)
1817#endif