blob: 34e44be869f7a1de0d5d1614c033f06bf78947bd [file] [log] [blame]
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
Peter Zijlstra029632f2011-10-25 10:00:11 +02006#include "sched.h"
7
8#include <linux/slab.h>
Steven Rostedtb6366f02015-03-18 14:49:46 -04009#include <linux/irq_work.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020010
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070011#include "walt.h"
12
Clark Williamsce0dbbb2013-02-07 09:47:04 -060013int sched_rr_timeslice = RR_TIMESLICE;
14
Peter Zijlstra029632f2011-10-25 10:00:11 +020015static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
16
17struct rt_bandwidth def_rt_bandwidth;
18
19static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
20{
21 struct rt_bandwidth *rt_b =
22 container_of(timer, struct rt_bandwidth, rt_period_timer);
Peter Zijlstra029632f2011-10-25 10:00:11 +020023 int idle = 0;
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +020024 int overrun;
Peter Zijlstra029632f2011-10-25 10:00:11 +020025
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +020026 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstra029632f2011-10-25 10:00:11 +020027 for (;;) {
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +020028 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
Peter Zijlstra029632f2011-10-25 10:00:11 +020029 if (!overrun)
30 break;
31
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +020032 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstra029632f2011-10-25 10:00:11 +020033 idle = do_sched_rt_period_timer(rt_b, overrun);
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +020034 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstra029632f2011-10-25 10:00:11 +020035 }
Peter Zijlstra4cfafd32015-05-14 12:23:11 +020036 if (idle)
37 rt_b->rt_period_active = 0;
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +020038 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstra029632f2011-10-25 10:00:11 +020039
40 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
41}
42
43void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
44{
45 rt_b->rt_period = ns_to_ktime(period);
46 rt_b->rt_runtime = runtime;
47
48 raw_spin_lock_init(&rt_b->rt_runtime_lock);
49
50 hrtimer_init(&rt_b->rt_period_timer,
51 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
52 rt_b->rt_period_timer.function = sched_rt_period_timer;
53}
54
55static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
56{
57 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
58 return;
59
Peter Zijlstra029632f2011-10-25 10:00:11 +020060 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstra4cfafd32015-05-14 12:23:11 +020061 if (!rt_b->rt_period_active) {
62 rt_b->rt_period_active = 1;
Steven Rostedtc3a990d2016-02-16 18:37:46 -050063 /*
64 * SCHED_DEADLINE updates the bandwidth, as a run away
65 * RT task with a DL task could hog a CPU. But DL does
66 * not reset the period. If a deadline task was running
67 * without an RT task running, it can cause RT tasks to
68 * throttle when they start up. Kick the timer right away
69 * to update the period.
70 */
71 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
Peter Zijlstra4cfafd32015-05-14 12:23:11 +020072 hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
73 }
Peter Zijlstra029632f2011-10-25 10:00:11 +020074 raw_spin_unlock(&rt_b->rt_runtime_lock);
75}
76
Abel Vesa07c54f72015-03-03 13:50:27 +020077void init_rt_rq(struct rt_rq *rt_rq)
Peter Zijlstra029632f2011-10-25 10:00:11 +020078{
79 struct rt_prio_array *array;
80 int i;
81
82 array = &rt_rq->active;
83 for (i = 0; i < MAX_RT_PRIO; i++) {
84 INIT_LIST_HEAD(array->queue + i);
85 __clear_bit(i, array->bitmap);
86 }
87 /* delimiter for bitsearch: */
88 __set_bit(MAX_RT_PRIO, array->bitmap);
89
90#if defined CONFIG_SMP
91 rt_rq->highest_prio.curr = MAX_RT_PRIO;
92 rt_rq->highest_prio.next = MAX_RT_PRIO;
93 rt_rq->rt_nr_migratory = 0;
94 rt_rq->overloaded = 0;
95 plist_head_init(&rt_rq->pushable_tasks);
Steven Rostedtb6366f02015-03-18 14:49:46 -040096#endif /* CONFIG_SMP */
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +040097 /* We start is dequeued state, because no RT tasks are queued */
98 rt_rq->rt_queued = 0;
Peter Zijlstra029632f2011-10-25 10:00:11 +020099
100 rt_rq->rt_time = 0;
101 rt_rq->rt_throttled = 0;
102 rt_rq->rt_runtime = 0;
103 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
104}
105
Gregory Haskins398a1532009-01-14 09:10:04 -0500106#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra029632f2011-10-25 10:00:11 +0200107static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
108{
109 hrtimer_cancel(&rt_b->rt_period_timer);
110}
Gregory Haskins398a1532009-01-14 09:10:04 -0500111
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200112#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
113
Peter Zijlstra8f488942009-07-24 12:25:30 +0200114static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
115{
116#ifdef CONFIG_SCHED_DEBUG
117 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
118#endif
119 return container_of(rt_se, struct task_struct, rt);
120}
121
Gregory Haskins398a1532009-01-14 09:10:04 -0500122static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
123{
124 return rt_rq->rq;
125}
126
127static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
128{
129 return rt_se->rt_rq;
130}
131
Kirill Tkhai653d07a2014-03-15 02:14:55 +0400132static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
133{
134 struct rt_rq *rt_rq = rt_se->rt_rq;
135
136 return rt_rq->rq;
137}
138
Peter Zijlstra029632f2011-10-25 10:00:11 +0200139void free_rt_sched_group(struct task_group *tg)
140{
141 int i;
142
143 if (tg->rt_se)
144 destroy_rt_bandwidth(&tg->rt_bandwidth);
145
146 for_each_possible_cpu(i) {
147 if (tg->rt_rq)
148 kfree(tg->rt_rq[i]);
149 if (tg->rt_se)
150 kfree(tg->rt_se[i]);
151 }
152
153 kfree(tg->rt_rq);
154 kfree(tg->rt_se);
155}
156
157void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
158 struct sched_rt_entity *rt_se, int cpu,
159 struct sched_rt_entity *parent)
160{
161 struct rq *rq = cpu_rq(cpu);
162
163 rt_rq->highest_prio.curr = MAX_RT_PRIO;
164 rt_rq->rt_nr_boosted = 0;
165 rt_rq->rq = rq;
166 rt_rq->tg = tg;
167
168 tg->rt_rq[cpu] = rt_rq;
169 tg->rt_se[cpu] = rt_se;
170
171 if (!rt_se)
172 return;
173
174 if (!parent)
175 rt_se->rt_rq = &rq->rt;
176 else
177 rt_se->rt_rq = parent->my_q;
178
179 rt_se->my_q = rt_rq;
180 rt_se->parent = parent;
181 INIT_LIST_HEAD(&rt_se->run_list);
182}
183
184int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
185{
186 struct rt_rq *rt_rq;
187 struct sched_rt_entity *rt_se;
188 int i;
189
190 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
191 if (!tg->rt_rq)
192 goto err;
193 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
194 if (!tg->rt_se)
195 goto err;
196
197 init_rt_bandwidth(&tg->rt_bandwidth,
198 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
199
200 for_each_possible_cpu(i) {
201 rt_rq = kzalloc_node(sizeof(struct rt_rq),
202 GFP_KERNEL, cpu_to_node(i));
203 if (!rt_rq)
204 goto err;
205
206 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
207 GFP_KERNEL, cpu_to_node(i));
208 if (!rt_se)
209 goto err_free_rq;
210
Abel Vesa07c54f72015-03-03 13:50:27 +0200211 init_rt_rq(rt_rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200212 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
213 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
214 }
215
216 return 1;
217
218err_free_rq:
219 kfree(rt_rq);
220err:
221 return 0;
222}
223
Gregory Haskins398a1532009-01-14 09:10:04 -0500224#else /* CONFIG_RT_GROUP_SCHED */
225
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200226#define rt_entity_is_task(rt_se) (1)
227
Peter Zijlstra8f488942009-07-24 12:25:30 +0200228static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
229{
230 return container_of(rt_se, struct task_struct, rt);
231}
232
Gregory Haskins398a1532009-01-14 09:10:04 -0500233static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
234{
235 return container_of(rt_rq, struct rq, rt);
236}
237
Kirill Tkhai653d07a2014-03-15 02:14:55 +0400238static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
Gregory Haskins398a1532009-01-14 09:10:04 -0500239{
240 struct task_struct *p = rt_task_of(rt_se);
Kirill Tkhai653d07a2014-03-15 02:14:55 +0400241
242 return task_rq(p);
243}
244
245static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
246{
247 struct rq *rq = rq_of_rt_se(rt_se);
Gregory Haskins398a1532009-01-14 09:10:04 -0500248
249 return &rq->rt;
250}
251
Peter Zijlstra029632f2011-10-25 10:00:11 +0200252void free_rt_sched_group(struct task_group *tg) { }
253
254int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
255{
256 return 1;
257}
Gregory Haskins398a1532009-01-14 09:10:04 -0500258#endif /* CONFIG_RT_GROUP_SCHED */
259
Steven Rostedt4fd29172008-01-25 21:08:06 +0100260#ifdef CONFIG_SMP
Ingo Molnar84de4272008-01-25 21:08:15 +0100261
Peter Zijlstra8046d682015-06-11 14:46:40 +0200262static void pull_rt_task(struct rq *this_rq);
Peter Zijlstra38033c32014-01-23 20:32:21 +0100263
Peter Zijlstradc877342014-02-12 15:47:29 +0100264static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
265{
266 /* Try to pull RT tasks here if we lower this rq's prio */
267 return rq->rt.highest_prio.curr > prev->prio;
268}
269
Gregory Haskins637f5082008-01-25 21:08:18 +0100270static inline int rt_overloaded(struct rq *rq)
Steven Rostedt4fd29172008-01-25 21:08:06 +0100271{
Gregory Haskins637f5082008-01-25 21:08:18 +0100272 return atomic_read(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +0100273}
Ingo Molnar84de4272008-01-25 21:08:15 +0100274
Steven Rostedt4fd29172008-01-25 21:08:06 +0100275static inline void rt_set_overload(struct rq *rq)
276{
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400277 if (!rq->online)
278 return;
279
Rusty Russellc6c49272008-11-25 02:35:05 +1030280 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +0100281 /*
282 * Make sure the mask is visible before we set
283 * the overload count. That is checked to determine
284 * if we should look at the mask. It would be a shame
285 * if we looked at the mask, but the mask was not
286 * updated yet.
Peter Zijlstra7c3f2ab2013-10-15 12:35:07 +0200287 *
288 * Matched by the barrier in pull_rt_task().
Steven Rostedt4fd29172008-01-25 21:08:06 +0100289 */
Peter Zijlstra7c3f2ab2013-10-15 12:35:07 +0200290 smp_wmb();
Gregory Haskins637f5082008-01-25 21:08:18 +0100291 atomic_inc(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +0100292}
Ingo Molnar84de4272008-01-25 21:08:15 +0100293
Steven Rostedt4fd29172008-01-25 21:08:06 +0100294static inline void rt_clear_overload(struct rq *rq)
295{
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400296 if (!rq->online)
297 return;
298
Steven Rostedt4fd29172008-01-25 21:08:06 +0100299 /* the order here really doesn't matter */
Gregory Haskins637f5082008-01-25 21:08:18 +0100300 atomic_dec(&rq->rd->rto_count);
Rusty Russellc6c49272008-11-25 02:35:05 +1030301 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +0100302}
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100303
Gregory Haskins398a1532009-01-14 09:10:04 -0500304static void update_rt_migration(struct rt_rq *rt_rq)
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100305{
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200306 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
Gregory Haskins398a1532009-01-14 09:10:04 -0500307 if (!rt_rq->overloaded) {
308 rt_set_overload(rq_of_rt_rq(rt_rq));
309 rt_rq->overloaded = 1;
Gregory Haskinscdc8eb92008-01-25 21:08:23 +0100310 }
Gregory Haskins398a1532009-01-14 09:10:04 -0500311 } else if (rt_rq->overloaded) {
312 rt_clear_overload(rq_of_rt_rq(rt_rq));
313 rt_rq->overloaded = 0;
Gregory Haskins637f5082008-01-25 21:08:18 +0100314 }
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100315}
Steven Rostedt4fd29172008-01-25 21:08:06 +0100316
Gregory Haskins398a1532009-01-14 09:10:04 -0500317static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100318{
Peter Zijlstra29baa742012-04-23 12:11:21 +0200319 struct task_struct *p;
320
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200321 if (!rt_entity_is_task(rt_se))
322 return;
323
Peter Zijlstra29baa742012-04-23 12:11:21 +0200324 p = rt_task_of(rt_se);
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200325 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
326
327 rt_rq->rt_nr_total++;
Thomas Gleixner50605ff2016-05-11 14:23:31 +0200328 if (tsk_nr_cpus_allowed(p) > 1)
Gregory Haskins398a1532009-01-14 09:10:04 -0500329 rt_rq->rt_nr_migratory++;
330
331 update_rt_migration(rt_rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100332}
333
Gregory Haskins398a1532009-01-14 09:10:04 -0500334static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
335{
Peter Zijlstra29baa742012-04-23 12:11:21 +0200336 struct task_struct *p;
337
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200338 if (!rt_entity_is_task(rt_se))
339 return;
340
Peter Zijlstra29baa742012-04-23 12:11:21 +0200341 p = rt_task_of(rt_se);
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200342 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
343
344 rt_rq->rt_nr_total--;
Thomas Gleixner50605ff2016-05-11 14:23:31 +0200345 if (tsk_nr_cpus_allowed(p) > 1)
Gregory Haskins398a1532009-01-14 09:10:04 -0500346 rt_rq->rt_nr_migratory--;
347
348 update_rt_migration(rt_rq);
349}
350
Steven Rostedt5181f4a42011-06-16 21:55:23 -0400351static inline int has_pushable_tasks(struct rq *rq)
352{
353 return !plist_head_empty(&rq->rt.pushable_tasks);
354}
355
Peter Zijlstrafd7a4be2015-06-11 14:46:41 +0200356static DEFINE_PER_CPU(struct callback_head, rt_push_head);
357static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200358
359static void push_rt_tasks(struct rq *);
Peter Zijlstrafd7a4be2015-06-11 14:46:41 +0200360static void pull_rt_task(struct rq *);
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200361
362static inline void queue_push_tasks(struct rq *rq)
Peter Zijlstradc877342014-02-12 15:47:29 +0100363{
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200364 if (!has_pushable_tasks(rq))
365 return;
366
Peter Zijlstrafd7a4be2015-06-11 14:46:41 +0200367 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
368}
369
370static inline void queue_pull_task(struct rq *rq)
371{
372 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
Peter Zijlstradc877342014-02-12 15:47:29 +0100373}
374
Gregory Haskins917b6272008-12-29 09:39:53 -0500375static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
376{
377 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
378 plist_node_init(&p->pushable_tasks, p->prio);
379 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
Steven Rostedt5181f4a42011-06-16 21:55:23 -0400380
381 /* Update the highest prio pushable task */
382 if (p->prio < rq->rt.highest_prio.next)
383 rq->rt.highest_prio.next = p->prio;
Gregory Haskins917b6272008-12-29 09:39:53 -0500384}
385
386static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
387{
388 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
Gregory Haskins917b6272008-12-29 09:39:53 -0500389
Steven Rostedt5181f4a42011-06-16 21:55:23 -0400390 /* Update the new highest prio pushable task */
391 if (has_pushable_tasks(rq)) {
392 p = plist_first_entry(&rq->rt.pushable_tasks,
393 struct task_struct, pushable_tasks);
394 rq->rt.highest_prio.next = p->prio;
395 } else
396 rq->rt.highest_prio.next = MAX_RT_PRIO;
Ingo Molnarbcf08df2008-04-19 12:11:10 +0200397}
398
Gregory Haskins917b6272008-12-29 09:39:53 -0500399#else
400
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100401static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
402{
403}
404
405static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
406{
407}
408
Gregory Haskinsb07430a2009-01-14 08:55:39 -0500409static inline
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100410void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
411{
412}
413
Gregory Haskinsb07430a2009-01-14 08:55:39 -0500414static inline
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100415void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
416{
417}
Gregory Haskins917b6272008-12-29 09:39:53 -0500418
Peter Zijlstradc877342014-02-12 15:47:29 +0100419static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
420{
421 return false;
422}
423
Peter Zijlstra8046d682015-06-11 14:46:40 +0200424static inline void pull_rt_task(struct rq *this_rq)
Peter Zijlstradc877342014-02-12 15:47:29 +0100425{
Peter Zijlstradc877342014-02-12 15:47:29 +0100426}
427
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200428static inline void queue_push_tasks(struct rq *rq)
Peter Zijlstradc877342014-02-12 15:47:29 +0100429{
430}
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200431#endif /* CONFIG_SMP */
432
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +0400433static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
434static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
435
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100436static inline int on_rt_rq(struct sched_rt_entity *rt_se)
437{
Peter Zijlstraff77e462016-01-18 15:27:07 +0100438 return rt_se->on_rq;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100439}
440
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100441#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100442
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100443static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100444{
445 if (!rt_rq->tg)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100446 return RUNTIME_INF;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100447
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200448 return rt_rq->rt_runtime;
449}
450
451static inline u64 sched_rt_period(struct rt_rq *rt_rq)
452{
453 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100454}
455
Cheng Xuec514c42011-05-14 14:20:02 +0800456typedef struct task_group *rt_rq_iter_t;
457
Yong Zhang1c09ab02011-06-28 10:51:31 +0800458static inline struct task_group *next_task_group(struct task_group *tg)
459{
460 do {
461 tg = list_entry_rcu(tg->list.next,
462 typeof(struct task_group), list);
463 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
464
465 if (&tg->list == &task_groups)
466 tg = NULL;
467
468 return tg;
469}
470
471#define for_each_rt_rq(rt_rq, iter, rq) \
472 for (iter = container_of(&task_groups, typeof(*iter), list); \
473 (iter = next_task_group(iter)) && \
474 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
Cheng Xuec514c42011-05-14 14:20:02 +0800475
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100476#define for_each_sched_rt_entity(rt_se) \
477 for (; rt_se; rt_se = rt_se->parent)
478
479static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
480{
481 return rt_se->my_q;
482}
483
Peter Zijlstraff77e462016-01-18 15:27:07 +0100484static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
485static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100486
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100487static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100488{
Dario Faggiolif6121f42008-10-03 17:40:46 +0200489 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
Kirill Tkhai88751252014-06-29 00:03:57 +0400490 struct rq *rq = rq_of_rt_rq(rt_rq);
Yong Zhang74b7eb52010-01-29 14:57:52 +0800491 struct sched_rt_entity *rt_se;
492
Kirill Tkhai88751252014-06-29 00:03:57 +0400493 int cpu = cpu_of(rq);
Balbir Singh0c3b9162011-03-03 17:04:35 +0530494
495 rt_se = rt_rq->tg->rt_se[cpu];
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100496
Dario Faggiolif6121f42008-10-03 17:40:46 +0200497 if (rt_rq->rt_nr_running) {
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +0400498 if (!rt_se)
499 enqueue_top_rt_rq(rt_rq);
500 else if (!on_rt_rq(rt_se))
Peter Zijlstraff77e462016-01-18 15:27:07 +0100501 enqueue_rt_entity(rt_se, 0);
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +0400502
Gregory Haskinse864c492008-12-29 09:39:49 -0500503 if (rt_rq->highest_prio.curr < curr->prio)
Kirill Tkhai88751252014-06-29 00:03:57 +0400504 resched_curr(rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100505 }
506}
507
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100508static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100509{
Yong Zhang74b7eb52010-01-29 14:57:52 +0800510 struct sched_rt_entity *rt_se;
Balbir Singh0c3b9162011-03-03 17:04:35 +0530511 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
Yong Zhang74b7eb52010-01-29 14:57:52 +0800512
Balbir Singh0c3b9162011-03-03 17:04:35 +0530513 rt_se = rt_rq->tg->rt_se[cpu];
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100514
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +0400515 if (!rt_se)
516 dequeue_top_rt_rq(rt_rq);
517 else if (on_rt_rq(rt_se))
Peter Zijlstraff77e462016-01-18 15:27:07 +0100518 dequeue_rt_entity(rt_se, 0);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100519}
520
Kirill Tkhai46383642014-03-15 02:15:07 +0400521static inline int rt_rq_throttled(struct rt_rq *rt_rq)
522{
523 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
524}
525
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100526static int rt_se_boosted(struct sched_rt_entity *rt_se)
527{
528 struct rt_rq *rt_rq = group_rt_rq(rt_se);
529 struct task_struct *p;
530
531 if (rt_rq)
532 return !!rt_rq->rt_nr_boosted;
533
534 p = rt_task_of(rt_se);
535 return p->prio != p->normal_prio;
536}
537
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200538#ifdef CONFIG_SMP
Rusty Russellc6c49272008-11-25 02:35:05 +1030539static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200540{
Nathan Zimmer424c93f2013-05-09 11:24:03 -0500541 return this_rq()->rd->span;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200542}
543#else
Rusty Russellc6c49272008-11-25 02:35:05 +1030544static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200545{
Rusty Russellc6c49272008-11-25 02:35:05 +1030546 return cpu_online_mask;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200547}
548#endif
549
550static inline
551struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
552{
553 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
554}
555
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200556static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
557{
558 return &rt_rq->tg->rt_bandwidth;
559}
560
Dhaval Giani55e12e52008-06-24 23:39:43 +0530561#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100562
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100563static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100564{
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200565 return rt_rq->rt_runtime;
566}
567
568static inline u64 sched_rt_period(struct rt_rq *rt_rq)
569{
570 return ktime_to_ns(def_rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100571}
572
Cheng Xuec514c42011-05-14 14:20:02 +0800573typedef struct rt_rq *rt_rq_iter_t;
574
575#define for_each_rt_rq(rt_rq, iter, rq) \
576 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
577
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100578#define for_each_sched_rt_entity(rt_se) \
579 for (; rt_se; rt_se = NULL)
580
581static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
582{
583 return NULL;
584}
585
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100586static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100587{
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +0400588 struct rq *rq = rq_of_rt_rq(rt_rq);
589
590 if (!rt_rq->rt_nr_running)
591 return;
592
593 enqueue_top_rt_rq(rt_rq);
Kirill Tkhai88751252014-06-29 00:03:57 +0400594 resched_curr(rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100595}
596
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100597static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100598{
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +0400599 dequeue_top_rt_rq(rt_rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100600}
601
Kirill Tkhai46383642014-03-15 02:15:07 +0400602static inline int rt_rq_throttled(struct rt_rq *rt_rq)
603{
604 return rt_rq->rt_throttled;
605}
606
Rusty Russellc6c49272008-11-25 02:35:05 +1030607static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200608{
Rusty Russellc6c49272008-11-25 02:35:05 +1030609 return cpu_online_mask;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200610}
611
612static inline
613struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
614{
615 return &cpu_rq(cpu)->rt;
616}
617
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200618static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
619{
620 return &def_rt_bandwidth;
621}
622
Dhaval Giani55e12e52008-06-24 23:39:43 +0530623#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100624
Juri Lellifaa59932014-02-21 11:37:15 +0100625bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
626{
627 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
628
629 return (hrtimer_active(&rt_b->rt_period_timer) ||
630 rt_rq->rt_time < rt_b->rt_runtime);
631}
632
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200633#ifdef CONFIG_SMP
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200634/*
635 * We ran out of runtime, see if we can borrow some from our neighbours.
636 */
Juri Lelli269b26a2015-09-02 11:01:36 +0100637static void do_balance_runtime(struct rt_rq *rt_rq)
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200638{
639 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
Shawn Bohreraa7f6732013-01-14 11:55:31 -0600640 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
Juri Lelli269b26a2015-09-02 11:01:36 +0100641 int i, weight;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200642 u64 rt_period;
643
Rusty Russellc6c49272008-11-25 02:35:05 +1030644 weight = cpumask_weight(rd->span);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200645
Thomas Gleixner0986b112009-11-17 15:32:06 +0100646 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200647 rt_period = ktime_to_ns(rt_b->rt_period);
Rusty Russellc6c49272008-11-25 02:35:05 +1030648 for_each_cpu(i, rd->span) {
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200649 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
650 s64 diff;
651
652 if (iter == rt_rq)
653 continue;
654
Thomas Gleixner0986b112009-11-17 15:32:06 +0100655 raw_spin_lock(&iter->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200656 /*
657 * Either all rqs have inf runtime and there's nothing to steal
658 * or __disable_runtime() below sets a specific rq to inf to
659 * indicate its been disabled and disalow stealing.
660 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200661 if (iter->rt_runtime == RUNTIME_INF)
662 goto next;
663
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200664 /*
665 * From runqueues with spare time, take 1/n part of their
666 * spare time, but no more than our period.
667 */
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200668 diff = iter->rt_runtime - iter->rt_time;
669 if (diff > 0) {
Peter Zijlstra58838cf2008-07-24 12:43:13 +0200670 diff = div_u64((u64)diff, weight);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200671 if (rt_rq->rt_runtime + diff > rt_period)
672 diff = rt_period - rt_rq->rt_runtime;
673 iter->rt_runtime -= diff;
674 rt_rq->rt_runtime += diff;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200675 if (rt_rq->rt_runtime == rt_period) {
Thomas Gleixner0986b112009-11-17 15:32:06 +0100676 raw_spin_unlock(&iter->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200677 break;
678 }
679 }
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200680next:
Thomas Gleixner0986b112009-11-17 15:32:06 +0100681 raw_spin_unlock(&iter->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200682 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100683 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200684}
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200685
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200686/*
687 * Ensure this RQ takes back all the runtime it lend to its neighbours.
688 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200689static void __disable_runtime(struct rq *rq)
690{
691 struct root_domain *rd = rq->rd;
Cheng Xuec514c42011-05-14 14:20:02 +0800692 rt_rq_iter_t iter;
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200693 struct rt_rq *rt_rq;
694
695 if (unlikely(!scheduler_running))
696 return;
697
Cheng Xuec514c42011-05-14 14:20:02 +0800698 for_each_rt_rq(rt_rq, iter, rq) {
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200699 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
700 s64 want;
701 int i;
702
Thomas Gleixner0986b112009-11-17 15:32:06 +0100703 raw_spin_lock(&rt_b->rt_runtime_lock);
704 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200705 /*
706 * Either we're all inf and nobody needs to borrow, or we're
707 * already disabled and thus have nothing to do, or we have
708 * exactly the right amount of runtime to take out.
709 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200710 if (rt_rq->rt_runtime == RUNTIME_INF ||
711 rt_rq->rt_runtime == rt_b->rt_runtime)
712 goto balanced;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100713 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200714
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200715 /*
716 * Calculate the difference between what we started out with
717 * and what we current have, that's the amount of runtime
718 * we lend and now have to reclaim.
719 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200720 want = rt_b->rt_runtime - rt_rq->rt_runtime;
721
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200722 /*
723 * Greedy reclaim, take back as much as we can.
724 */
Rusty Russellc6c49272008-11-25 02:35:05 +1030725 for_each_cpu(i, rd->span) {
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200726 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
727 s64 diff;
728
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200729 /*
730 * Can't reclaim from ourselves or disabled runqueues.
731 */
Peter Zijlstraf1679d02008-08-14 15:49:00 +0200732 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200733 continue;
734
Thomas Gleixner0986b112009-11-17 15:32:06 +0100735 raw_spin_lock(&iter->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200736 if (want > 0) {
737 diff = min_t(s64, iter->rt_runtime, want);
738 iter->rt_runtime -= diff;
739 want -= diff;
740 } else {
741 iter->rt_runtime -= want;
742 want -= want;
743 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100744 raw_spin_unlock(&iter->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200745
746 if (!want)
747 break;
748 }
749
Thomas Gleixner0986b112009-11-17 15:32:06 +0100750 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200751 /*
752 * We cannot be left wanting - that would mean some runtime
753 * leaked out of the system.
754 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200755 BUG_ON(want);
756balanced:
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200757 /*
758 * Disable all the borrow logic by pretending we have inf
759 * runtime - in which case borrowing doesn't make sense.
760 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200761 rt_rq->rt_runtime = RUNTIME_INF;
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -0700762 rt_rq->rt_throttled = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100763 raw_spin_unlock(&rt_rq->rt_runtime_lock);
764 raw_spin_unlock(&rt_b->rt_runtime_lock);
Kirill Tkhai99b62562014-06-25 12:19:48 +0400765
766 /* Make rt_rq available for pick_next_task() */
767 sched_rt_rq_enqueue(rt_rq);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200768 }
769}
770
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200771static void __enable_runtime(struct rq *rq)
772{
Cheng Xuec514c42011-05-14 14:20:02 +0800773 rt_rq_iter_t iter;
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200774 struct rt_rq *rt_rq;
775
776 if (unlikely(!scheduler_running))
777 return;
778
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200779 /*
780 * Reset each runqueue's bandwidth settings
781 */
Cheng Xuec514c42011-05-14 14:20:02 +0800782 for_each_rt_rq(rt_rq, iter, rq) {
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200783 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
784
Thomas Gleixner0986b112009-11-17 15:32:06 +0100785 raw_spin_lock(&rt_b->rt_runtime_lock);
786 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200787 rt_rq->rt_runtime = rt_b->rt_runtime;
788 rt_rq->rt_time = 0;
Zhang, Yanminbaf25732008-09-09 11:26:33 +0800789 rt_rq->rt_throttled = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100790 raw_spin_unlock(&rt_rq->rt_runtime_lock);
791 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200792 }
793}
794
Juri Lelli269b26a2015-09-02 11:01:36 +0100795static void balance_runtime(struct rt_rq *rt_rq)
Peter Zijlstraeff65492008-06-19 14:22:26 +0200796{
Peter Zijlstra4a6184c2011-10-06 22:39:14 +0200797 if (!sched_feat(RT_RUNTIME_SHARE))
Juri Lelli269b26a2015-09-02 11:01:36 +0100798 return;
Peter Zijlstra4a6184c2011-10-06 22:39:14 +0200799
Peter Zijlstraeff65492008-06-19 14:22:26 +0200800 if (rt_rq->rt_time > rt_rq->rt_runtime) {
Thomas Gleixner0986b112009-11-17 15:32:06 +0100801 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Juri Lelli269b26a2015-09-02 11:01:36 +0100802 do_balance_runtime(rt_rq);
Thomas Gleixner0986b112009-11-17 15:32:06 +0100803 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraeff65492008-06-19 14:22:26 +0200804 }
Peter Zijlstraeff65492008-06-19 14:22:26 +0200805}
Dhaval Giani55e12e52008-06-24 23:39:43 +0530806#else /* !CONFIG_SMP */
Juri Lelli269b26a2015-09-02 11:01:36 +0100807static inline void balance_runtime(struct rt_rq *rt_rq) {}
Dhaval Giani55e12e52008-06-24 23:39:43 +0530808#endif /* CONFIG_SMP */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100809
810static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
811{
Peter Zijlstra42c62a52011-10-18 22:03:48 +0200812 int i, idle = 1, throttled = 0;
Rusty Russellc6c49272008-11-25 02:35:05 +1030813 const struct cpumask *span;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200814
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200815 span = sched_rt_period_mask();
Mike Galbraithe221d022012-08-07 10:02:38 +0200816#ifdef CONFIG_RT_GROUP_SCHED
817 /*
818 * FIXME: isolated CPUs should really leave the root task group,
819 * whether they are isolcpus or were isolated via cpusets, lest
820 * the timer run on a CPU which does not service all runqueues,
821 * potentially leaving other CPUs indefinitely throttled. If
822 * isolation is really required, the user will turn the throttle
823 * off to kill the perturbations it causes anyway. Meanwhile,
824 * this maintains functionality for boot and/or troubleshooting.
825 */
826 if (rt_b == &root_task_group.rt_bandwidth)
827 span = cpu_online_mask;
828#endif
Rusty Russellc6c49272008-11-25 02:35:05 +1030829 for_each_cpu(i, span) {
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200830 int enqueue = 0;
831 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
832 struct rq *rq = rq_of_rt_rq(rt_rq);
833
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100834 raw_spin_lock(&rq->lock);
Davidlohr Bueso3d063252018-04-02 09:49:54 -0700835 update_rq_clock(rq);
836
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200837 if (rt_rq->rt_time) {
838 u64 runtime;
839
Thomas Gleixner0986b112009-11-17 15:32:06 +0100840 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraeff65492008-06-19 14:22:26 +0200841 if (rt_rq->rt_throttled)
842 balance_runtime(rt_rq);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200843 runtime = rt_rq->rt_runtime;
844 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
845 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
846 rt_rq->rt_throttled = 0;
847 enqueue = 1;
Mike Galbraith61eadef2011-04-29 08:36:50 +0200848
849 /*
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +0100850 * When we're idle and a woken (rt) task is
851 * throttled check_preempt_curr() will set
852 * skip_update and the time between the wakeup
853 * and this unthrottle will get accounted as
854 * 'runtime'.
Mike Galbraith61eadef2011-04-29 08:36:50 +0200855 */
856 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +0100857 rq_clock_skip_update(rq, false);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200858 }
859 if (rt_rq->rt_time || rt_rq->rt_nr_running)
860 idle = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100861 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Balbir Singh0c3b9162011-03-03 17:04:35 +0530862 } else if (rt_rq->rt_nr_running) {
Peter Zijlstra8a8cde12008-06-19 14:22:28 +0200863 idle = 0;
Balbir Singh0c3b9162011-03-03 17:04:35 +0530864 if (!rt_rq_throttled(rt_rq))
865 enqueue = 1;
866 }
Peter Zijlstra42c62a52011-10-18 22:03:48 +0200867 if (rt_rq->rt_throttled)
868 throttled = 1;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200869
870 if (enqueue)
871 sched_rt_rq_enqueue(rt_rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100872 raw_spin_unlock(&rq->lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200873 }
874
Peter Zijlstra42c62a52011-10-18 22:03:48 +0200875 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
876 return 1;
877
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200878 return idle;
879}
880
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100881static inline int rt_se_prio(struct sched_rt_entity *rt_se)
882{
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100883#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100884 struct rt_rq *rt_rq = group_rt_rq(rt_se);
885
886 if (rt_rq)
Gregory Haskinse864c492008-12-29 09:39:49 -0500887 return rt_rq->highest_prio.curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100888#endif
889
890 return rt_task_of(rt_se)->prio;
891}
892
Matt Wagantall989a7682014-06-17 21:43:35 -0700893static void dump_throttled_rt_tasks(struct rt_rq *rt_rq)
894{
895 struct rt_prio_array *array = &rt_rq->active;
896 struct sched_rt_entity *rt_se;
897 char buf[500];
898 char *pos = buf;
899 char *end = buf + sizeof(buf);
900 int idx;
901
902 pos += snprintf(pos, sizeof(buf),
903 "sched: RT throttling activated for rt_rq %p (cpu %d)\n",
904 rt_rq, cpu_of(rq_of_rt_rq(rt_rq)));
905
906 if (bitmap_empty(array->bitmap, MAX_RT_PRIO))
907 goto out;
908
909 pos += snprintf(pos, end - pos, "potential CPU hogs:\n");
910 idx = sched_find_first_bit(array->bitmap);
911 while (idx < MAX_RT_PRIO) {
912 list_for_each_entry(rt_se, array->queue + idx, run_list) {
913 struct task_struct *p;
914
915 if (!rt_entity_is_task(rt_se))
916 continue;
917
918 p = rt_task_of(rt_se);
919 if (pos < end)
920 pos += snprintf(pos, end - pos, "\t%s (%d)\n",
921 p->comm, p->pid);
922 }
923 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1);
924 }
925out:
Matt Wagantall345eb972014-06-19 14:23:33 -0700926#ifdef CONFIG_PANIC_ON_RT_THROTTLING
927 /*
928 * Use pr_err() in the BUG() case since printk_sched() will
929 * not get flushed and deadlock is not a concern.
930 */
931 pr_err("%s", buf);
932 BUG();
933#else
Matt Wagantall989a7682014-06-17 21:43:35 -0700934 printk_deferred("%s", buf);
Matt Wagantall345eb972014-06-19 14:23:33 -0700935#endif
Matt Wagantall989a7682014-06-17 21:43:35 -0700936}
937
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100938static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100939{
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100940 u64 runtime = sched_rt_runtime(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100941
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100942 if (rt_rq->rt_throttled)
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100943 return rt_rq_throttled(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100944
Shan Hai5b680fd2011-11-29 11:03:56 +0800945 if (runtime >= sched_rt_period(rt_rq))
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200946 return 0;
947
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200948 balance_runtime(rt_rq);
949 runtime = sched_rt_runtime(rt_rq);
950 if (runtime == RUNTIME_INF)
951 return 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200952
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100953 if (rt_rq->rt_time > runtime) {
Peter Zijlstra7abc63b2011-10-18 22:03:48 +0200954 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
955
956 /*
957 * Don't actually throttle groups that have no runtime assigned
958 * but accrue some time due to boosting.
959 */
960 if (likely(rt_b->rt_runtime)) {
Matt Wagantall989a7682014-06-17 21:43:35 -0700961 static bool once = false;
962
Peter Zijlstra7abc63b2011-10-18 22:03:48 +0200963 rt_rq->rt_throttled = 1;
Matt Wagantall989a7682014-06-17 21:43:35 -0700964
965 if (!once) {
966 once = true;
967 dump_throttled_rt_tasks(rt_rq);
968 }
Peter Zijlstra7abc63b2011-10-18 22:03:48 +0200969 } else {
970 /*
971 * In case we did anyway, make it go away,
972 * replenishment is a joke, since it will replenish us
973 * with exactly 0 ns.
974 */
975 rt_rq->rt_time = 0;
976 }
977
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100978 if (rt_rq_throttled(rt_rq)) {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100979 sched_rt_rq_dequeue(rt_rq);
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100980 return 1;
981 }
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100982 }
983
984 return 0;
985}
986
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200987/*
988 * Update the current task's runtime statistics. Skip current tasks that
989 * are not in our scheduling class.
990 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200991static void update_curr_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200992{
993 struct task_struct *curr = rq->curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100994 struct sched_rt_entity *rt_se = &curr->rt;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200995 u64 delta_exec;
996
Peter Zijlstra06c3bc62011-02-02 13:19:48 +0100997 if (curr->sched_class != &rt_sched_class)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200998 return;
999
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001000 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
Kirill Tkhaifc79e242013-01-30 16:50:36 +04001001 if (unlikely((s64)delta_exec <= 0))
1002 return;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001003
Rafael J. Wysocki58919e82016-08-16 22:14:55 +02001004 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
Rafael J. Wysocki12bde332016-08-10 03:11:17 +02001005 cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
Wanpeng Li594dd292016-04-22 17:07:24 +08001006
Peter Zijlstra42c62a52011-10-18 22:03:48 +02001007 schedstat_set(curr->se.statistics.exec_max,
1008 max(curr->se.statistics.exec_max, delta_exec));
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001009
1010 curr->se.sum_exec_runtime += delta_exec;
Frank Mayharf06febc2008-09-12 09:54:39 -07001011 account_group_exec_runtime(curr, delta_exec);
1012
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001013 curr->se.exec_start = rq_clock_task(rq);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001014 cpuacct_charge(curr, delta_exec);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01001015
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001016 sched_rt_avg_update(rq, delta_exec);
1017
Peter Zijlstra0b148fa2008-08-19 12:33:04 +02001018 if (!rt_bandwidth_enabled())
1019 return;
1020
Dhaval Giani354d60c2008-04-19 19:44:59 +02001021 for_each_sched_rt_entity(rt_se) {
Giedrius Rekasius0b079392014-05-25 15:23:31 +01001022 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
Dhaval Giani354d60c2008-04-19 19:44:59 +02001023
Peter Zijlstracc2991c2008-08-19 12:33:03 +02001024 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
Thomas Gleixner0986b112009-11-17 15:32:06 +01001025 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstracc2991c2008-08-19 12:33:03 +02001026 rt_rq->rt_time += delta_exec;
1027 if (sched_rt_runtime_exceeded(rt_rq))
Kirill Tkhai88751252014-06-29 00:03:57 +04001028 resched_curr(rq);
Thomas Gleixner0986b112009-11-17 15:32:06 +01001029 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstracc2991c2008-08-19 12:33:03 +02001030 }
Dhaval Giani354d60c2008-04-19 19:44:59 +02001031 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001032}
1033
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +04001034static void
1035dequeue_top_rt_rq(struct rt_rq *rt_rq)
1036{
1037 struct rq *rq = rq_of_rt_rq(rt_rq);
1038
1039 BUG_ON(&rq->rt != rt_rq);
1040
1041 if (!rt_rq->rt_queued)
1042 return;
1043
1044 BUG_ON(!rq->nr_running);
1045
Kirill Tkhai72465442014-05-09 03:00:14 +04001046 sub_nr_running(rq, rt_rq->rt_nr_running);
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +04001047 rt_rq->rt_queued = 0;
1048}
1049
1050static void
1051enqueue_top_rt_rq(struct rt_rq *rt_rq)
1052{
1053 struct rq *rq = rq_of_rt_rq(rt_rq);
1054
1055 BUG_ON(&rq->rt != rt_rq);
1056
1057 if (rt_rq->rt_queued)
1058 return;
1059 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1060 return;
1061
Kirill Tkhai72465442014-05-09 03:00:14 +04001062 add_nr_running(rq, rt_rq->rt_nr_running);
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +04001063 rt_rq->rt_queued = 1;
1064}
1065
Gregory Haskins398a1532009-01-14 09:10:04 -05001066#if defined CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -05001067
Gregory Haskins398a1532009-01-14 09:10:04 -05001068static void
1069inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
Steven Rostedt63489e42008-01-25 21:08:03 +01001070{
Gregory Haskins4d984272008-12-29 09:39:49 -05001071 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins4d984272008-12-29 09:39:49 -05001072
Kirill Tkhai757dfca2013-11-27 19:59:13 +04001073#ifdef CONFIG_RT_GROUP_SCHED
1074 /*
1075 * Change rq's cpupri only if rt_rq is the top queue.
1076 */
1077 if (&rq->rt != rt_rq)
1078 return;
1079#endif
Steven Rostedt5181f4a42011-06-16 21:55:23 -04001080 if (rq->online && prio < prev_prio)
1081 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
Steven Rostedt63489e42008-01-25 21:08:03 +01001082}
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001083
Gregory Haskins398a1532009-01-14 09:10:04 -05001084static void
1085dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
Steven Rostedt63489e42008-01-25 21:08:03 +01001086{
Gregory Haskins4d984272008-12-29 09:39:49 -05001087 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001088
Kirill Tkhai757dfca2013-11-27 19:59:13 +04001089#ifdef CONFIG_RT_GROUP_SCHED
1090 /*
1091 * Change rq's cpupri only if rt_rq is the top queue.
1092 */
1093 if (&rq->rt != rt_rq)
1094 return;
1095#endif
Gregory Haskins398a1532009-01-14 09:10:04 -05001096 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1097 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1098}
1099
1100#else /* CONFIG_SMP */
1101
1102static inline
1103void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1104static inline
1105void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1106
1107#endif /* CONFIG_SMP */
1108
Steven Rostedt63489e42008-01-25 21:08:03 +01001109#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskins398a1532009-01-14 09:10:04 -05001110static void
1111inc_rt_prio(struct rt_rq *rt_rq, int prio)
1112{
1113 int prev_prio = rt_rq->highest_prio.curr;
Steven Rostedt63489e42008-01-25 21:08:03 +01001114
Gregory Haskins398a1532009-01-14 09:10:04 -05001115 if (prio < prev_prio)
1116 rt_rq->highest_prio.curr = prio;
1117
1118 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1119}
1120
1121static void
1122dec_rt_prio(struct rt_rq *rt_rq, int prio)
1123{
1124 int prev_prio = rt_rq->highest_prio.curr;
1125
1126 if (rt_rq->rt_nr_running) {
1127
1128 WARN_ON(prio < prev_prio);
Gregory Haskinse864c492008-12-29 09:39:49 -05001129
1130 /*
Gregory Haskins398a1532009-01-14 09:10:04 -05001131 * This may have been our highest task, and therefore
1132 * we may have some recomputation to do
Gregory Haskinse864c492008-12-29 09:39:49 -05001133 */
Gregory Haskins398a1532009-01-14 09:10:04 -05001134 if (prio == prev_prio) {
Gregory Haskinse864c492008-12-29 09:39:49 -05001135 struct rt_prio_array *array = &rt_rq->active;
1136
1137 rt_rq->highest_prio.curr =
Steven Rostedt764a9d62008-01-25 21:08:04 +01001138 sched_find_first_bit(array->bitmap);
Gregory Haskinse864c492008-12-29 09:39:49 -05001139 }
1140
Steven Rostedt764a9d62008-01-25 21:08:04 +01001141 } else
Gregory Haskinse864c492008-12-29 09:39:49 -05001142 rt_rq->highest_prio.curr = MAX_RT_PRIO;
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001143
Gregory Haskins398a1532009-01-14 09:10:04 -05001144 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1145}
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001146
Gregory Haskins398a1532009-01-14 09:10:04 -05001147#else
1148
1149static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1150static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1151
1152#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1153
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001154#ifdef CONFIG_RT_GROUP_SCHED
Gregory Haskins398a1532009-01-14 09:10:04 -05001155
1156static void
1157inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1158{
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001159 if (rt_se_boosted(rt_se))
Steven Rostedt764a9d62008-01-25 21:08:04 +01001160 rt_rq->rt_nr_boosted++;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01001161
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01001162 if (rt_rq->tg)
1163 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
Gregory Haskins398a1532009-01-14 09:10:04 -05001164}
1165
1166static void
1167dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1168{
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01001169 if (rt_se_boosted(rt_se))
1170 rt_rq->rt_nr_boosted--;
1171
1172 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
Gregory Haskins398a1532009-01-14 09:10:04 -05001173}
1174
1175#else /* CONFIG_RT_GROUP_SCHED */
1176
1177static void
1178inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1179{
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001180 start_rt_bandwidth(&def_rt_bandwidth);
Gregory Haskins398a1532009-01-14 09:10:04 -05001181}
1182
1183static inline
1184void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1185
1186#endif /* CONFIG_RT_GROUP_SCHED */
1187
1188static inline
Kirill Tkhai22abdef2014-03-15 02:14:49 +04001189unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1190{
1191 struct rt_rq *group_rq = group_rt_rq(rt_se);
1192
1193 if (group_rq)
1194 return group_rq->rt_nr_running;
1195 else
1196 return 1;
1197}
1198
1199static inline
Frederic Weisbecker01d36d02015-11-04 18:17:10 +01001200unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1201{
1202 struct rt_rq *group_rq = group_rt_rq(rt_se);
1203 struct task_struct *tsk;
1204
1205 if (group_rq)
1206 return group_rq->rr_nr_running;
1207
1208 tsk = rt_task_of(rt_se);
1209
1210 return (tsk->policy == SCHED_RR) ? 1 : 0;
1211}
1212
1213static inline
Gregory Haskins398a1532009-01-14 09:10:04 -05001214void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1215{
1216 int prio = rt_se_prio(rt_se);
1217
1218 WARN_ON(!rt_prio(prio));
Kirill Tkhai22abdef2014-03-15 02:14:49 +04001219 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
Frederic Weisbecker01d36d02015-11-04 18:17:10 +01001220 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
Gregory Haskins398a1532009-01-14 09:10:04 -05001221
1222 inc_rt_prio(rt_rq, prio);
1223 inc_rt_migration(rt_se, rt_rq);
1224 inc_rt_group(rt_se, rt_rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001225}
1226
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01001227static inline
1228void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1229{
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001230 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001231 WARN_ON(!rt_rq->rt_nr_running);
Kirill Tkhai22abdef2014-03-15 02:14:49 +04001232 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
Frederic Weisbecker01d36d02015-11-04 18:17:10 +01001233 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001234
Gregory Haskins398a1532009-01-14 09:10:04 -05001235 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1236 dec_rt_migration(rt_se, rt_rq);
1237 dec_rt_group(rt_se, rt_rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001238}
1239
Peter Zijlstraff77e462016-01-18 15:27:07 +01001240/*
1241 * Change rt_se->run_list location unless SAVE && !MOVE
1242 *
1243 * assumes ENQUEUE/DEQUEUE flags match
1244 */
1245static inline bool move_entity(unsigned int flags)
1246{
1247 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1248 return false;
1249
1250 return true;
1251}
1252
1253static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1254{
1255 list_del_init(&rt_se->run_list);
1256
1257 if (list_empty(array->queue + rt_se_prio(rt_se)))
1258 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1259
1260 rt_se->on_list = 0;
1261}
1262
1263static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001264{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001265 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1266 struct rt_prio_array *array = &rt_rq->active;
1267 struct rt_rq *group_rq = group_rt_rq(rt_se);
Dmitry Adamushko20b63312008-06-11 00:58:30 +02001268 struct list_head *queue = array->queue + rt_se_prio(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001269
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001270 /*
1271 * Don't enqueue the group if its throttled, or when empty.
1272 * The latter is a consequence of the former when a child group
1273 * get throttled and the current group doesn't have any other
1274 * active members.
1275 */
Peter Zijlstraff77e462016-01-18 15:27:07 +01001276 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1277 if (rt_se->on_list)
1278 __delist_rt_entity(rt_se, array);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001279 return;
Peter Zijlstraff77e462016-01-18 15:27:07 +01001280 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001281
Peter Zijlstraff77e462016-01-18 15:27:07 +01001282 if (move_entity(flags)) {
1283 WARN_ON_ONCE(rt_se->on_list);
1284 if (flags & ENQUEUE_HEAD)
1285 list_add(&rt_se->run_list, queue);
1286 else
1287 list_add_tail(&rt_se->run_list, queue);
1288
1289 __set_bit(rt_se_prio(rt_se), array->bitmap);
1290 rt_se->on_list = 1;
1291 }
1292 rt_se->on_rq = 1;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001293
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001294 inc_rt_tasks(rt_se, rt_rq);
1295}
1296
Peter Zijlstraff77e462016-01-18 15:27:07 +01001297static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001298{
1299 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1300 struct rt_prio_array *array = &rt_rq->active;
1301
Peter Zijlstraff77e462016-01-18 15:27:07 +01001302 if (move_entity(flags)) {
1303 WARN_ON_ONCE(!rt_se->on_list);
1304 __delist_rt_entity(rt_se, array);
1305 }
1306 rt_se->on_rq = 0;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001307
1308 dec_rt_tasks(rt_se, rt_rq);
1309}
1310
1311/*
1312 * Because the prio of an upper entry depends on the lower
1313 * entries, we must remove entries top - down.
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001314 */
Peter Zijlstraff77e462016-01-18 15:27:07 +01001315static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001316{
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001317 struct sched_rt_entity *back = NULL;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001318
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +02001319 for_each_sched_rt_entity(rt_se) {
1320 rt_se->back = back;
1321 back = rt_se;
1322 }
1323
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +04001324 dequeue_top_rt_rq(rt_rq_of_se(back));
1325
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +02001326 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1327 if (on_rt_rq(rt_se))
Peter Zijlstraff77e462016-01-18 15:27:07 +01001328 __dequeue_rt_entity(rt_se, flags);
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001329 }
1330}
1331
Peter Zijlstraff77e462016-01-18 15:27:07 +01001332static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001333{
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +04001334 struct rq *rq = rq_of_rt_se(rt_se);
1335
Peter Zijlstraff77e462016-01-18 15:27:07 +01001336 dequeue_rt_stack(rt_se, flags);
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001337 for_each_sched_rt_entity(rt_se)
Peter Zijlstraff77e462016-01-18 15:27:07 +01001338 __enqueue_rt_entity(rt_se, flags);
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +04001339 enqueue_top_rt_rq(&rq->rt);
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001340}
1341
Peter Zijlstraff77e462016-01-18 15:27:07 +01001342static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001343{
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +04001344 struct rq *rq = rq_of_rt_se(rt_se);
1345
Peter Zijlstraff77e462016-01-18 15:27:07 +01001346 dequeue_rt_stack(rt_se, flags);
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001347
1348 for_each_sched_rt_entity(rt_se) {
1349 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1350
1351 if (rt_rq && rt_rq->rt_nr_running)
Peter Zijlstraff77e462016-01-18 15:27:07 +01001352 __enqueue_rt_entity(rt_se, flags);
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +02001353 }
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +04001354 enqueue_top_rt_rq(&rq->rt);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001355}
1356
1357/*
1358 * Adding/removing a task to/from a priority array:
1359 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00001360static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001361enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001362{
1363 struct sched_rt_entity *rt_se = &p->rt;
1364
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001365 if (flags & ENQUEUE_WAKEUP)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001366 rt_se->timeout = 0;
1367
Peter Zijlstraff77e462016-01-18 15:27:07 +01001368 enqueue_rt_entity(rt_se, flags);
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -07001369 walt_inc_cumulative_runnable_avg(rq, p);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001370
Thomas Gleixner50605ff2016-05-11 14:23:31 +02001371 if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
Gregory Haskins917b6272008-12-29 09:39:53 -05001372 enqueue_pushable_task(rq, p);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001373}
1374
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001375static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001376{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001377 struct sched_rt_entity *rt_se = &p->rt;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001378
1379 update_curr_rt(rq);
Peter Zijlstraff77e462016-01-18 15:27:07 +01001380 dequeue_rt_entity(rt_se, flags);
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -07001381 walt_dec_cumulative_runnable_avg(rq, p);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001382
Gregory Haskins917b6272008-12-29 09:39:53 -05001383 dequeue_pushable_task(rq, p);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001384}
1385
1386/*
Richard Weinberger60686312011-11-12 18:07:57 +01001387 * Put task to the head or the end of the run list without the overhead of
1388 * dequeue followed by enqueue.
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001389 */
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001390static void
1391requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001392{
Ingo Molnar1cdad712008-06-19 09:09:15 +02001393 if (on_rt_rq(rt_se)) {
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001394 struct rt_prio_array *array = &rt_rq->active;
1395 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1396
1397 if (head)
1398 list_move(&rt_se->run_list, queue);
1399 else
1400 list_move_tail(&rt_se->run_list, queue);
Ingo Molnar1cdad712008-06-19 09:09:15 +02001401 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001402}
1403
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001404static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001405{
1406 struct sched_rt_entity *rt_se = &p->rt;
1407 struct rt_rq *rt_rq;
1408
1409 for_each_sched_rt_entity(rt_se) {
1410 rt_rq = rt_rq_of_se(rt_se);
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001411 requeue_rt_entity(rt_rq, rt_se, head);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001412 }
1413}
1414
1415static void yield_task_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001416{
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001417 requeue_task_rt(rq, rq->curr, 0);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001418}
1419
Gregory Haskinse7693a32008-01-25 21:08:09 +01001420#ifdef CONFIG_SMP
Gregory Haskins318e0892008-01-25 21:08:10 +01001421static int find_lowest_rq(struct task_struct *task);
1422
Peter Zijlstra0017d732010-03-24 18:34:10 +01001423static int
Peter Zijlstraac66f542013-10-07 11:29:16 +01001424select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
Gregory Haskinse7693a32008-01-25 21:08:09 +01001425{
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001426 struct task_struct *curr;
1427 struct rq *rq;
Steven Rostedtc37495f2011-06-16 21:55:22 -04001428
1429 /* For anything but wake ups, just return the task_cpu */
1430 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1431 goto out;
1432
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001433 rq = cpu_rq(cpu);
1434
1435 rcu_read_lock();
Jason Low316c1608d2015-04-28 13:00:20 -07001436 curr = READ_ONCE(rq->curr); /* unlocked access */
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001437
Gregory Haskins318e0892008-01-25 21:08:10 +01001438 /*
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001439 * If the current task on @p's runqueue is an RT task, then
Steven Rostedte1f47d82008-01-25 21:08:12 +01001440 * try to see if we can wake this RT task up on another
1441 * runqueue. Otherwise simply start this RT task
1442 * on its current runqueue.
1443 *
Steven Rostedt43fa5462010-09-20 22:40:03 -04001444 * We want to avoid overloading runqueues. If the woken
1445 * task is a higher priority, then it will stay on this CPU
1446 * and the lower prio task should be moved to another CPU.
1447 * Even though this will probably make the lower prio task
1448 * lose its cache, we do not want to bounce a higher task
1449 * around just because it gave up its CPU, perhaps for a
1450 * lock?
1451 *
1452 * For equal prio tasks, we just let the scheduler sort it out.
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001453 *
Gregory Haskins318e0892008-01-25 21:08:10 +01001454 * Otherwise, just let it ride on the affined RQ and the
1455 * post-schedule router will push the preempted task away
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001456 *
1457 * This test is optimistic, if we get it wrong the load-balancer
1458 * will have to sort it out.
Gregory Haskins318e0892008-01-25 21:08:10 +01001459 */
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001460 if (curr && unlikely(rt_task(curr)) &&
Thomas Gleixner50605ff2016-05-11 14:23:31 +02001461 (tsk_nr_cpus_allowed(curr) < 2 ||
Shawn Bohrer6bfa6872013-10-04 14:24:53 -05001462 curr->prio <= p->prio)) {
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001463 int target = find_lowest_rq(p);
1464
Tim Chen80e3d872014-12-12 15:38:12 -08001465 /*
1466 * Don't bother moving it if the destination CPU is
1467 * not running a lower priority task.
1468 */
1469 if (target != -1 &&
1470 p->prio < cpu_rq(target)->rt.highest_prio.curr)
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001471 cpu = target;
1472 }
1473 rcu_read_unlock();
1474
Steven Rostedtc37495f2011-06-16 21:55:22 -04001475out:
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001476 return cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01001477}
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001478
1479static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1480{
Wanpeng Li308a6232014-10-31 06:39:31 +08001481 /*
1482 * Current can't be migrated, useless to reschedule,
1483 * let's hope p can move out.
1484 */
Thomas Gleixner50605ff2016-05-11 14:23:31 +02001485 if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
Wanpeng Li308a6232014-10-31 06:39:31 +08001486 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001487 return;
1488
Wanpeng Li308a6232014-10-31 06:39:31 +08001489 /*
1490 * p is migratable, so let's not schedule it and
1491 * see if it is pushed or pulled somewhere else.
1492 */
Thomas Gleixner50605ff2016-05-11 14:23:31 +02001493 if (tsk_nr_cpus_allowed(p) != 1
Rusty Russell13b8bd02009-03-25 15:01:22 +10301494 && cpupri_find(&rq->rd->cpupri, p, NULL))
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001495 return;
1496
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001497 /*
1498 * There appears to be other cpus that can accept
1499 * current and none to run 'p', so lets reschedule
1500 * to try and push current away:
1501 */
1502 requeue_task_rt(rq, p, 1);
Kirill Tkhai88751252014-06-29 00:03:57 +04001503 resched_curr(rq);
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001504}
1505
Gregory Haskinse7693a32008-01-25 21:08:09 +01001506#endif /* CONFIG_SMP */
1507
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001508/*
1509 * Preempt the current task with a newly woken task if needed:
1510 */
Peter Zijlstra7d478722009-09-14 19:55:44 +02001511static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001512{
Gregory Haskins45c01e82008-05-12 21:20:41 +02001513 if (p->prio < rq->curr->prio) {
Kirill Tkhai88751252014-06-29 00:03:57 +04001514 resched_curr(rq);
Gregory Haskins45c01e82008-05-12 21:20:41 +02001515 return;
1516 }
1517
1518#ifdef CONFIG_SMP
1519 /*
1520 * If:
1521 *
1522 * - the newly woken task is of equal priority to the current task
1523 * - the newly woken task is non-migratable while current is migratable
1524 * - current will be preempted on the next reschedule
1525 *
1526 * we should check to see if current can readily move to a different
1527 * cpu. If so, we will reschedule to allow the push logic to try
1528 * to move current somewhere else, making room for our non-migratable
1529 * task.
1530 */
Hillf Danton8dd0de82011-06-14 18:36:24 -04001531 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001532 check_preempt_equal_prio(rq, p);
Gregory Haskins45c01e82008-05-12 21:20:41 +02001533#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001534}
1535
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001536static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1537 struct rt_rq *rt_rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001538{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001539 struct rt_prio_array *array = &rt_rq->active;
1540 struct sched_rt_entity *next = NULL;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001541 struct list_head *queue;
1542 int idx;
1543
1544 idx = sched_find_first_bit(array->bitmap);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001545 BUG_ON(idx >= MAX_RT_PRIO);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001546
1547 queue = array->queue + idx;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001548 next = list_entry(queue->next, struct sched_rt_entity, run_list);
Dmitry Adamushko326587b2008-01-25 21:08:34 +01001549
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001550 return next;
1551}
1552
Gregory Haskins917b6272008-12-29 09:39:53 -05001553static struct task_struct *_pick_next_task_rt(struct rq *rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001554{
1555 struct sched_rt_entity *rt_se;
1556 struct task_struct *p;
Peter Zijlstra606dba22012-02-11 06:05:00 +01001557 struct rt_rq *rt_rq = &rq->rt;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001558
1559 do {
1560 rt_se = pick_next_rt_entity(rq, rt_rq);
Dmitry Adamushko326587b2008-01-25 21:08:34 +01001561 BUG_ON(!rt_se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001562 rt_rq = group_rt_rq(rt_se);
1563 } while (rt_rq);
1564
1565 p = rt_task_of(rt_se);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001566 p->se.exec_start = rq_clock_task(rq);
Gregory Haskins917b6272008-12-29 09:39:53 -05001567
1568 return p;
1569}
1570
Peter Zijlstra606dba22012-02-11 06:05:00 +01001571static struct task_struct *
Matt Fleming5a91d732016-09-21 14:38:10 +01001572pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
Gregory Haskins917b6272008-12-29 09:39:53 -05001573{
Peter Zijlstra606dba22012-02-11 06:05:00 +01001574 struct task_struct *p;
1575 struct rt_rq *rt_rq = &rq->rt;
1576
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001577 if (need_pull_rt_task(rq, prev)) {
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001578 /*
1579 * This is OK, because current is on_cpu, which avoids it being
1580 * picked for load-balance and preemption/IRQs are still
1581 * disabled avoiding further scheduler activity on it and we're
1582 * being very careful to re-start the picking loop.
1583 */
Matt Fleming5a91d732016-09-21 14:38:10 +01001584 rq_unpin_lock(rq, rf);
Peter Zijlstra38033c32014-01-23 20:32:21 +01001585 pull_rt_task(rq);
Matt Fleming5a91d732016-09-21 14:38:10 +01001586 rq_repin_lock(rq, rf);
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001587 /*
1588 * pull_rt_task() can drop (and re-acquire) rq->lock; this
Kirill Tkhaia1d9a322014-04-10 17:38:36 +04001589 * means a dl or stop task can slip in, in which case we need
1590 * to re-start task selection.
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001591 */
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04001592 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
Kirill Tkhaia1d9a322014-04-10 17:38:36 +04001593 rq->dl.dl_nr_running))
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001594 return RETRY_TASK;
1595 }
Peter Zijlstra38033c32014-01-23 20:32:21 +01001596
Kirill Tkhai734ff2a2014-03-04 19:25:46 +04001597 /*
1598 * We may dequeue prev's rt_rq in put_prev_task().
1599 * So, we update time before rt_nr_running check.
1600 */
1601 if (prev->sched_class == &rt_sched_class)
1602 update_curr_rt(rq);
1603
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +04001604 if (!rt_rq->rt_queued)
Peter Zijlstra606dba22012-02-11 06:05:00 +01001605 return NULL;
1606
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +01001607 put_prev_task(rq, prev);
Peter Zijlstra606dba22012-02-11 06:05:00 +01001608
1609 p = _pick_next_task_rt(rq);
Gregory Haskins917b6272008-12-29 09:39:53 -05001610
1611 /* The running task is never eligible for pushing */
Kirill Tkhaif3f17682014-09-12 17:42:01 +04001612 dequeue_pushable_task(rq, p);
Gregory Haskins917b6272008-12-29 09:39:53 -05001613
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +02001614 queue_push_tasks(rq);
Gregory Haskins3f029d32009-07-29 11:08:47 -04001615
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001616 return p;
1617}
1618
Ingo Molnar31ee5292007-08-09 11:16:49 +02001619static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001620{
Ingo Molnarf1e14ef2007-08-09 11:16:48 +02001621 update_curr_rt(rq);
Gregory Haskins917b6272008-12-29 09:39:53 -05001622
1623 /*
1624 * The previous task needs to be made eligible for pushing
1625 * if it is still active
1626 */
Thomas Gleixner50605ff2016-05-11 14:23:31 +02001627 if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1)
Gregory Haskins917b6272008-12-29 09:39:53 -05001628 enqueue_pushable_task(rq, p);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001629}
1630
Peter Williams681f3e62007-10-24 18:23:51 +02001631#ifdef CONFIG_SMP
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001632
Steven Rostedte8fa1362008-01-25 21:08:05 +01001633/* Only try algorithms three times */
1634#define RT_MAX_TRIES 3
1635
Steven Rostedtf65eda42008-01-25 21:08:07 +01001636static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1637{
1638 if (!task_running(rq, p) &&
Kirill Tkhai60334ca2013-01-31 18:56:17 +04001639 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
Steven Rostedtf65eda42008-01-25 21:08:07 +01001640 return 1;
1641 return 0;
1642}
1643
Kirill Tkhaie23ee742013-06-07 15:37:43 -04001644/*
1645 * Return the highest pushable rq's task, which is suitable to be executed
1646 * on the cpu, NULL otherwise
1647 */
1648static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001649{
Kirill Tkhaie23ee742013-06-07 15:37:43 -04001650 struct plist_head *head = &rq->rt.pushable_tasks;
1651 struct task_struct *p;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001652
Kirill Tkhaie23ee742013-06-07 15:37:43 -04001653 if (!has_pushable_tasks(rq))
1654 return NULL;
Peter Zijlstra3d074672010-03-10 17:07:24 +01001655
Kirill Tkhaie23ee742013-06-07 15:37:43 -04001656 plist_for_each_entry(p, head, pushable_tasks) {
1657 if (pick_rt_task(rq, p, cpu))
1658 return p;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001659 }
1660
Kirill Tkhaie23ee742013-06-07 15:37:43 -04001661 return NULL;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001662}
1663
Rusty Russell0e3900e2008-11-25 02:35:13 +10301664static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001665
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001666static int find_lowest_rq(struct task_struct *task)
1667{
1668 struct sched_domain *sd;
Christoph Lameter4ba29682014-08-26 19:12:21 -05001669 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001670 int this_cpu = smp_processor_id();
1671 int cpu = task_cpu(task);
1672
Steven Rostedt0da938c2011-06-14 18:36:25 -04001673 /* Make sure the mask is initialized first */
1674 if (unlikely(!lowest_mask))
1675 return -1;
1676
Thomas Gleixner50605ff2016-05-11 14:23:31 +02001677 if (tsk_nr_cpus_allowed(task) == 1)
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001678 return -1; /* No other targets possible */
1679
1680 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
Gregory Haskins06f90db2008-01-25 21:08:13 +01001681 return -1; /* No targets found */
1682
1683 /*
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001684 * At this point we have built a mask of cpus representing the
1685 * lowest priority tasks in the system. Now we want to elect
1686 * the best one based on our affinity and topology.
1687 *
1688 * We prioritize the last cpu that the task executed on since
1689 * it is most likely cache-hot in that location.
1690 */
Rusty Russell96f874e2008-11-25 02:35:14 +10301691 if (cpumask_test_cpu(cpu, lowest_mask))
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001692 return cpu;
1693
1694 /*
1695 * Otherwise, we consult the sched_domains span maps to figure
1696 * out which cpu is logically closest to our hot cache data.
1697 */
Rusty Russelle2c88062009-11-03 14:53:15 +10301698 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1699 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001700
Xiaotian Fengcd4ae6a2011-04-22 18:53:54 +08001701 rcu_read_lock();
Rusty Russelle2c88062009-11-03 14:53:15 +10301702 for_each_domain(cpu, sd) {
1703 if (sd->flags & SD_WAKE_AFFINE) {
1704 int best_cpu;
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001705
Rusty Russelle2c88062009-11-03 14:53:15 +10301706 /*
1707 * "this_cpu" is cheaper to preempt than a
1708 * remote processor.
1709 */
1710 if (this_cpu != -1 &&
Xiaotian Fengcd4ae6a2011-04-22 18:53:54 +08001711 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1712 rcu_read_unlock();
Rusty Russelle2c88062009-11-03 14:53:15 +10301713 return this_cpu;
Xiaotian Fengcd4ae6a2011-04-22 18:53:54 +08001714 }
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001715
Rusty Russelle2c88062009-11-03 14:53:15 +10301716 best_cpu = cpumask_first_and(lowest_mask,
1717 sched_domain_span(sd));
Xiaotian Fengcd4ae6a2011-04-22 18:53:54 +08001718 if (best_cpu < nr_cpu_ids) {
1719 rcu_read_unlock();
Rusty Russelle2c88062009-11-03 14:53:15 +10301720 return best_cpu;
Xiaotian Fengcd4ae6a2011-04-22 18:53:54 +08001721 }
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001722 }
1723 }
Xiaotian Fengcd4ae6a2011-04-22 18:53:54 +08001724 rcu_read_unlock();
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001725
1726 /*
1727 * And finally, if there were no matches within the domains
1728 * just give the caller *something* to work with from the compatible
1729 * locations.
1730 */
Rusty Russelle2c88062009-11-03 14:53:15 +10301731 if (this_cpu != -1)
1732 return this_cpu;
1733
1734 cpu = cpumask_any(lowest_mask);
1735 if (cpu < nr_cpu_ids)
1736 return cpu;
1737 return -1;
Gregory Haskins07b40322008-01-25 21:08:10 +01001738}
1739
Steven Rostedte8fa1362008-01-25 21:08:05 +01001740/* Will lock the rq it finds */
Ingo Molnar4df64c02008-01-25 21:08:15 +01001741static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001742{
1743 struct rq *lowest_rq = NULL;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001744 int tries;
Ingo Molnar4df64c02008-01-25 21:08:15 +01001745 int cpu;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001746
1747 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
Gregory Haskins07b40322008-01-25 21:08:10 +01001748 cpu = find_lowest_rq(task);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001749
Gregory Haskins2de0b462008-01-25 21:08:10 +01001750 if ((cpu == -1) || (cpu == rq->cpu))
Steven Rostedte8fa1362008-01-25 21:08:05 +01001751 break;
1752
Gregory Haskins07b40322008-01-25 21:08:10 +01001753 lowest_rq = cpu_rq(cpu);
1754
Tim Chen80e3d872014-12-12 15:38:12 -08001755 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1756 /*
1757 * Target rq has tasks of equal or higher priority,
1758 * retrying does not release any lock and is unlikely
1759 * to yield a different result.
1760 */
1761 lowest_rq = NULL;
1762 break;
1763 }
1764
Steven Rostedte8fa1362008-01-25 21:08:05 +01001765 /* if the prio of this runqueue changed, try again */
Gregory Haskins07b40322008-01-25 21:08:10 +01001766 if (double_lock_balance(rq, lowest_rq)) {
Steven Rostedte8fa1362008-01-25 21:08:05 +01001767 /*
1768 * We had to unlock the run queue. In
1769 * the mean time, task could have
1770 * migrated already or had its affinity changed.
1771 * Also make sure that it wasn't scheduled on its rq.
1772 */
Gregory Haskins07b40322008-01-25 21:08:10 +01001773 if (unlikely(task_rq(task) != rq ||
Rusty Russell96f874e2008-11-25 02:35:14 +10301774 !cpumask_test_cpu(lowest_rq->cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02001775 tsk_cpus_allowed(task)) ||
Gregory Haskins07b40322008-01-25 21:08:10 +01001776 task_running(rq, task) ||
Xunlei Pang13b5ab02016-05-09 12:11:31 +08001777 !rt_task(task) ||
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04001778 !task_on_rq_queued(task))) {
Ingo Molnar4df64c02008-01-25 21:08:15 +01001779
Peter Zijlstra7f1b4392012-05-17 21:19:46 +02001780 double_unlock_balance(rq, lowest_rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001781 lowest_rq = NULL;
1782 break;
1783 }
1784 }
1785
1786 /* If this rq is still suitable use it. */
Gregory Haskinse864c492008-12-29 09:39:49 -05001787 if (lowest_rq->rt.highest_prio.curr > task->prio)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001788 break;
1789
1790 /* try again */
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001791 double_unlock_balance(rq, lowest_rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001792 lowest_rq = NULL;
1793 }
1794
1795 return lowest_rq;
1796}
1797
Gregory Haskins917b6272008-12-29 09:39:53 -05001798static struct task_struct *pick_next_pushable_task(struct rq *rq)
1799{
1800 struct task_struct *p;
1801
1802 if (!has_pushable_tasks(rq))
1803 return NULL;
1804
1805 p = plist_first_entry(&rq->rt.pushable_tasks,
1806 struct task_struct, pushable_tasks);
1807
1808 BUG_ON(rq->cpu != task_cpu(p));
1809 BUG_ON(task_current(rq, p));
Thomas Gleixner50605ff2016-05-11 14:23:31 +02001810 BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
Gregory Haskins917b6272008-12-29 09:39:53 -05001811
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04001812 BUG_ON(!task_on_rq_queued(p));
Gregory Haskins917b6272008-12-29 09:39:53 -05001813 BUG_ON(!rt_task(p));
1814
1815 return p;
1816}
1817
Steven Rostedte8fa1362008-01-25 21:08:05 +01001818/*
1819 * If the current CPU has more than one RT task, see if the non
1820 * running task can migrate over to a CPU that is running a task
1821 * of lesser priority.
1822 */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001823static int push_rt_task(struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001824{
1825 struct task_struct *next_task;
1826 struct rq *lowest_rq;
Hillf Danton311e8002011-06-16 21:55:20 -04001827 int ret = 0;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001828
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +01001829 if (!rq->rt.overloaded)
1830 return 0;
1831
Gregory Haskins917b6272008-12-29 09:39:53 -05001832 next_task = pick_next_pushable_task(rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001833 if (!next_task)
1834 return 0;
1835
Peter Zijlstra49246272010-10-17 21:46:10 +02001836retry:
Gregory Haskins697f0a42008-01-25 21:08:09 +01001837 if (unlikely(next_task == rq->curr)) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001838 WARN_ON(1);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001839 return 0;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001840 }
Steven Rostedte8fa1362008-01-25 21:08:05 +01001841
1842 /*
1843 * It's possible that the next_task slipped in of
1844 * higher priority than current. If that's the case
1845 * just reschedule current.
1846 */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001847 if (unlikely(next_task->prio < rq->curr->prio)) {
Kirill Tkhai88751252014-06-29 00:03:57 +04001848 resched_curr(rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001849 return 0;
1850 }
1851
Gregory Haskins697f0a42008-01-25 21:08:09 +01001852 /* We might release rq lock */
Steven Rostedte8fa1362008-01-25 21:08:05 +01001853 get_task_struct(next_task);
1854
1855 /* find_lock_lowest_rq locks the rq if found */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001856 lowest_rq = find_lock_lowest_rq(next_task, rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001857 if (!lowest_rq) {
1858 struct task_struct *task;
1859 /*
Hillf Danton311e8002011-06-16 21:55:20 -04001860 * find_lock_lowest_rq releases rq->lock
Gregory Haskins15635132008-12-29 09:39:53 -05001861 * so it is possible that next_task has migrated.
1862 *
1863 * We need to make sure that the task is still on the same
1864 * run-queue and is also still the next task eligible for
1865 * pushing.
Steven Rostedte8fa1362008-01-25 21:08:05 +01001866 */
Gregory Haskins917b6272008-12-29 09:39:53 -05001867 task = pick_next_pushable_task(rq);
Gregory Haskins15635132008-12-29 09:39:53 -05001868 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1869 /*
Hillf Danton311e8002011-06-16 21:55:20 -04001870 * The task hasn't migrated, and is still the next
1871 * eligible task, but we failed to find a run-queue
1872 * to push it to. Do not retry in this case, since
1873 * other cpus will pull from us when ready.
Gregory Haskins15635132008-12-29 09:39:53 -05001874 */
Gregory Haskins15635132008-12-29 09:39:53 -05001875 goto out;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001876 }
Gregory Haskins917b6272008-12-29 09:39:53 -05001877
Gregory Haskins15635132008-12-29 09:39:53 -05001878 if (!task)
1879 /* No more tasks, just exit */
1880 goto out;
1881
Gregory Haskins917b6272008-12-29 09:39:53 -05001882 /*
Gregory Haskins15635132008-12-29 09:39:53 -05001883 * Something has shifted, try again.
Gregory Haskins917b6272008-12-29 09:39:53 -05001884 */
Gregory Haskins15635132008-12-29 09:39:53 -05001885 put_task_struct(next_task);
1886 next_task = task;
1887 goto retry;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001888 }
1889
Gregory Haskins697f0a42008-01-25 21:08:09 +01001890 deactivate_task(rq, next_task, 0);
Olav Haugan77ba2b92015-08-05 08:45:21 -07001891 next_task->on_rq = TASK_ON_RQ_MIGRATING;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001892 set_task_cpu(next_task, lowest_rq->cpu);
Olav Haugan77ba2b92015-08-05 08:45:21 -07001893 next_task->on_rq = TASK_ON_RQ_QUEUED;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001894 activate_task(lowest_rq, next_task, 0);
Hillf Danton311e8002011-06-16 21:55:20 -04001895 ret = 1;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001896
Kirill Tkhai88751252014-06-29 00:03:57 +04001897 resched_curr(lowest_rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001898
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001899 double_unlock_balance(rq, lowest_rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001900
Steven Rostedte8fa1362008-01-25 21:08:05 +01001901out:
1902 put_task_struct(next_task);
1903
Hillf Danton311e8002011-06-16 21:55:20 -04001904 return ret;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001905}
1906
Steven Rostedte8fa1362008-01-25 21:08:05 +01001907static void push_rt_tasks(struct rq *rq)
1908{
1909 /* push_rt_task will return true if it moved an RT */
1910 while (push_rt_task(rq))
1911 ;
1912}
1913
Steven Rostedtb6366f02015-03-18 14:49:46 -04001914#ifdef HAVE_RT_PUSH_IPI
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04001915
Steven Rostedtb6366f02015-03-18 14:49:46 -04001916/*
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04001917 * When a high priority task schedules out from a CPU and a lower priority
1918 * task is scheduled in, a check is made to see if there's any RT tasks
1919 * on other CPUs that are waiting to run because a higher priority RT task
1920 * is currently running on its CPU. In this case, the CPU with multiple RT
1921 * tasks queued on it (overloaded) needs to be notified that a CPU has opened
1922 * up that may be able to run one of its non-running queued RT tasks.
Steven Rostedtb6366f02015-03-18 14:49:46 -04001923 *
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04001924 * All CPUs with overloaded RT tasks need to be notified as there is currently
1925 * no way to know which of these CPUs have the highest priority task waiting
1926 * to run. Instead of trying to take a spinlock on each of these CPUs,
1927 * which has shown to cause large latency when done on machines with many
1928 * CPUs, sending an IPI to the CPUs to have them push off the overloaded
1929 * RT tasks waiting to run.
1930 *
1931 * Just sending an IPI to each of the CPUs is also an issue, as on large
1932 * count CPU machines, this can cause an IPI storm on a CPU, especially
1933 * if its the only CPU with multiple RT tasks queued, and a large number
1934 * of CPUs scheduling a lower priority task at the same time.
1935 *
1936 * Each root domain has its own irq work function that can iterate over
1937 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
1938 * tassk must be checked if there's one or many CPUs that are lowering
1939 * their priority, there's a single irq work iterator that will try to
1940 * push off RT tasks that are waiting to run.
1941 *
1942 * When a CPU schedules a lower priority task, it will kick off the
1943 * irq work iterator that will jump to each CPU with overloaded RT tasks.
1944 * As it only takes the first CPU that schedules a lower priority task
1945 * to start the process, the rto_start variable is incremented and if
1946 * the atomic result is one, then that CPU will try to take the rto_lock.
1947 * This prevents high contention on the lock as the process handles all
1948 * CPUs scheduling lower priority tasks.
1949 *
1950 * All CPUs that are scheduling a lower priority task will increment the
1951 * rt_loop_next variable. This will make sure that the irq work iterator
1952 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
1953 * priority task, even if the iterator is in the middle of a scan. Incrementing
1954 * the rt_loop_next will cause the iterator to perform another scan.
1955 *
Steven Rostedtb6366f02015-03-18 14:49:46 -04001956 */
Steven Rostedt (VMware)1c679982018-01-23 20:45:37 -05001957static int rto_next_cpu(struct root_domain *rd)
Steven Rostedtb6366f02015-03-18 14:49:46 -04001958{
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04001959 int next;
Steven Rostedtb6366f02015-03-18 14:49:46 -04001960 int cpu;
1961
Steven Rostedtb6366f02015-03-18 14:49:46 -04001962 /*
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04001963 * When starting the IPI RT pushing, the rto_cpu is set to -1,
1964 * rt_next_cpu() will simply return the first CPU found in
1965 * the rto_mask.
1966 *
1967 * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
1968 * will return the next CPU found in the rto_mask.
1969 *
1970 * If there are no more CPUs left in the rto_mask, then a check is made
1971 * against rto_loop and rto_loop_next. rto_loop is only updated with
1972 * the rto_lock held, but any CPU may increment the rto_loop_next
1973 * without any locking.
Steven Rostedtb6366f02015-03-18 14:49:46 -04001974 */
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04001975 for (;;) {
Steven Rostedtb6366f02015-03-18 14:49:46 -04001976
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04001977 /* When rto_cpu is -1 this acts like cpumask_first() */
1978 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
1979
1980 rd->rto_cpu = cpu;
1981
1982 if (cpu < nr_cpu_ids)
1983 return cpu;
1984
1985 rd->rto_cpu = -1;
1986
Steven Rostedtb6366f02015-03-18 14:49:46 -04001987 /*
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04001988 * ACQUIRE ensures we see the @rto_mask changes
1989 * made prior to the @next value observed.
1990 *
1991 * Matches WMB in rt_set_overload().
Steven Rostedtb6366f02015-03-18 14:49:46 -04001992 */
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04001993 next = atomic_read_acquire(&rd->rto_loop_next);
Steven Rostedtb6366f02015-03-18 14:49:46 -04001994
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04001995 if (rd->rto_loop == next)
1996 break;
1997
1998 rd->rto_loop = next;
1999 }
2000
2001 return -1;
Steven Rostedtb6366f02015-03-18 14:49:46 -04002002}
2003
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002004static inline bool rto_start_trylock(atomic_t *v)
Steven Rostedtb6366f02015-03-18 14:49:46 -04002005{
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002006 return !atomic_cmpxchg_acquire(v, 0, 1);
Steven Rostedtb6366f02015-03-18 14:49:46 -04002007}
2008
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002009static inline void rto_start_unlock(atomic_t *v)
2010{
2011 atomic_set_release(v, 0);
2012}
Steven Rostedtb6366f02015-03-18 14:49:46 -04002013
2014static void tell_cpu_to_push(struct rq *rq)
2015{
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002016 int cpu = -1;
Steven Rostedtb6366f02015-03-18 14:49:46 -04002017
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002018 /* Keep the loop going if the IPI is currently active */
2019 atomic_inc(&rq->rd->rto_loop_next);
Steven Rostedtb6366f02015-03-18 14:49:46 -04002020
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002021 /* Only one CPU can initiate a loop at a time */
2022 if (!rto_start_trylock(&rq->rd->rto_loop_start))
Steven Rostedtb6366f02015-03-18 14:49:46 -04002023 return;
2024
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002025 raw_spin_lock(&rq->rd->rto_lock);
Steven Rostedtb6366f02015-03-18 14:49:46 -04002026
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002027 /*
2028 * The rto_cpu is updated under the lock, if it has a valid cpu
2029 * then the IPI is still running and will continue due to the
2030 * update to loop_next, and nothing needs to be done here.
2031 * Otherwise it is finishing up and an ipi needs to be sent.
2032 */
2033 if (rq->rd->rto_cpu < 0)
Steven Rostedt (VMware)1c679982018-01-23 20:45:37 -05002034 cpu = rto_next_cpu(rq->rd);
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002035
2036 raw_spin_unlock(&rq->rd->rto_lock);
2037
2038 rto_start_unlock(&rq->rd->rto_loop_start);
2039
Steven Rostedt (VMware)a384e542018-01-23 20:45:38 -05002040 if (cpu >= 0) {
2041 /* Make sure the rd does not get freed while pushing */
2042 sched_get_rd(rq->rd);
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002043 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
Steven Rostedt (VMware)a384e542018-01-23 20:45:38 -05002044 }
Steven Rostedtb6366f02015-03-18 14:49:46 -04002045}
2046
2047/* Called from hardirq context */
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002048void rto_push_irq_work_func(struct irq_work *work)
Steven Rostedtb6366f02015-03-18 14:49:46 -04002049{
Steven Rostedt (VMware)1c679982018-01-23 20:45:37 -05002050 struct root_domain *rd =
2051 container_of(work, struct root_domain, rto_push_work);
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002052 struct rq *rq;
Steven Rostedtb6366f02015-03-18 14:49:46 -04002053 int cpu;
2054
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002055 rq = this_rq();
Steven Rostedtb6366f02015-03-18 14:49:46 -04002056
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002057 /*
2058 * We do not need to grab the lock to check for has_pushable_tasks.
2059 * When it gets updated, a check is made if a push is possible.
2060 */
Steven Rostedtb6366f02015-03-18 14:49:46 -04002061 if (has_pushable_tasks(rq)) {
2062 raw_spin_lock(&rq->lock);
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002063 push_rt_tasks(rq);
Steven Rostedtb6366f02015-03-18 14:49:46 -04002064 raw_spin_unlock(&rq->lock);
2065 }
2066
Steven Rostedt (VMware)1c679982018-01-23 20:45:37 -05002067 raw_spin_lock(&rd->rto_lock);
Steven Rostedt (Red Hat)1c37ff72017-10-06 14:05:04 -04002068
Steven Rostedtb6366f02015-03-18 14:49:46 -04002069 /* Pass the IPI to the next rt overloaded queue */
Steven Rostedt (VMware)1c679982018-01-23 20:45:37 -05002070 cpu = rto_next_cpu(rd);
Steven Rostedtb6366f02015-03-18 14:49:46 -04002071
Steven Rostedt (VMware)1c679982018-01-23 20:45:37 -05002072 raw_spin_unlock(&rd->rto_lock);
Steven Rostedtb6366f02015-03-18 14:49:46 -04002073
Steven Rostedt (VMware)a384e542018-01-23 20:45:38 -05002074 if (cpu < 0) {
2075 sched_put_rd(rd);
Steven Rostedtb6366f02015-03-18 14:49:46 -04002076 return;
Steven Rostedt (VMware)a384e542018-01-23 20:45:38 -05002077 }
Steven Rostedtb6366f02015-03-18 14:49:46 -04002078
Steven Rostedtb6366f02015-03-18 14:49:46 -04002079 /* Try the next RT overloaded CPU */
Steven Rostedt (VMware)1c679982018-01-23 20:45:37 -05002080 irq_work_queue_on(&rd->rto_push_work, cpu);
Steven Rostedtb6366f02015-03-18 14:49:46 -04002081}
2082#endif /* HAVE_RT_PUSH_IPI */
2083
Peter Zijlstra8046d682015-06-11 14:46:40 +02002084static void pull_rt_task(struct rq *this_rq)
Steven Rostedtf65eda42008-01-25 21:08:07 +01002085{
Peter Zijlstra8046d682015-06-11 14:46:40 +02002086 int this_cpu = this_rq->cpu, cpu;
2087 bool resched = false;
Gregory Haskinsa8728942008-12-29 09:39:49 -05002088 struct task_struct *p;
Steven Rostedtf65eda42008-01-25 21:08:07 +01002089 struct rq *src_rq;
Steven Rostedtd2668172017-12-02 13:04:54 -05002090 int rt_overload_count = rt_overloaded(this_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01002091
Steven Rostedtd2668172017-12-02 13:04:54 -05002092 if (likely(!rt_overload_count))
Peter Zijlstra8046d682015-06-11 14:46:40 +02002093 return;
Steven Rostedtf65eda42008-01-25 21:08:07 +01002094
Peter Zijlstra7c3f2ab2013-10-15 12:35:07 +02002095 /*
2096 * Match the barrier from rt_set_overloaded; this guarantees that if we
2097 * see overloaded we must also see the rto_mask bit.
2098 */
2099 smp_rmb();
2100
Steven Rostedtd2668172017-12-02 13:04:54 -05002101 /* If we are the only overloaded CPU do nothing */
2102 if (rt_overload_count == 1 &&
2103 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2104 return;
2105
Steven Rostedtb6366f02015-03-18 14:49:46 -04002106#ifdef HAVE_RT_PUSH_IPI
2107 if (sched_feat(RT_PUSH_IPI)) {
2108 tell_cpu_to_push(this_rq);
Peter Zijlstra8046d682015-06-11 14:46:40 +02002109 return;
Steven Rostedtb6366f02015-03-18 14:49:46 -04002110 }
2111#endif
2112
Rusty Russellc6c49272008-11-25 02:35:05 +10302113 for_each_cpu(cpu, this_rq->rd->rto_mask) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01002114 if (this_cpu == cpu)
2115 continue;
2116
2117 src_rq = cpu_rq(cpu);
Gregory Haskins74ab8e42008-12-29 09:39:50 -05002118
2119 /*
2120 * Don't bother taking the src_rq->lock if the next highest
2121 * task is known to be lower-priority than our current task.
2122 * This may look racy, but if this value is about to go
2123 * logically higher, the src_rq will push this task away.
2124 * And if its going logically lower, we do not care
2125 */
2126 if (src_rq->rt.highest_prio.next >=
2127 this_rq->rt.highest_prio.curr)
2128 continue;
2129
Steven Rostedtf65eda42008-01-25 21:08:07 +01002130 /*
2131 * We can potentially drop this_rq's lock in
2132 * double_lock_balance, and another CPU could
Gregory Haskinsa8728942008-12-29 09:39:49 -05002133 * alter this_rq
Steven Rostedtf65eda42008-01-25 21:08:07 +01002134 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05002135 double_lock_balance(this_rq, src_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01002136
2137 /*
Kirill Tkhaie23ee742013-06-07 15:37:43 -04002138 * We can pull only a task, which is pushable
2139 * on its rq, and no others.
Steven Rostedtf65eda42008-01-25 21:08:07 +01002140 */
Kirill Tkhaie23ee742013-06-07 15:37:43 -04002141 p = pick_highest_pushable_task(src_rq, this_cpu);
Steven Rostedtf65eda42008-01-25 21:08:07 +01002142
2143 /*
2144 * Do we have an RT task that preempts
2145 * the to-be-scheduled task?
2146 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05002147 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01002148 WARN_ON(p == src_rq->curr);
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04002149 WARN_ON(!task_on_rq_queued(p));
Steven Rostedtf65eda42008-01-25 21:08:07 +01002150
2151 /*
2152 * There's a chance that p is higher in priority
2153 * than what's currently running on its cpu.
2154 * This is just that p is wakeing up and hasn't
2155 * had a chance to schedule. We only pull
2156 * p if it is lower in priority than the
Gregory Haskinsa8728942008-12-29 09:39:49 -05002157 * current task on the run queue
Steven Rostedtf65eda42008-01-25 21:08:07 +01002158 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05002159 if (p->prio < src_rq->curr->prio)
Mike Galbraith614ee1f2008-01-25 21:08:30 +01002160 goto skip;
Steven Rostedtf65eda42008-01-25 21:08:07 +01002161
Peter Zijlstra8046d682015-06-11 14:46:40 +02002162 resched = true;
Steven Rostedtf65eda42008-01-25 21:08:07 +01002163
2164 deactivate_task(src_rq, p, 0);
Olav Haugan77ba2b92015-08-05 08:45:21 -07002165 p->on_rq = TASK_ON_RQ_MIGRATING;
Steven Rostedtf65eda42008-01-25 21:08:07 +01002166 set_task_cpu(p, this_cpu);
Olav Haugan77ba2b92015-08-05 08:45:21 -07002167 p->on_rq = TASK_ON_RQ_QUEUED;
Steven Rostedtf65eda42008-01-25 21:08:07 +01002168 activate_task(this_rq, p, 0);
2169 /*
2170 * We continue with the search, just in
2171 * case there's an even higher prio task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002172 * in another runqueue. (low likelihood
Steven Rostedtf65eda42008-01-25 21:08:07 +01002173 * but possible)
Steven Rostedtf65eda42008-01-25 21:08:07 +01002174 */
Steven Rostedtf65eda42008-01-25 21:08:07 +01002175 }
Peter Zijlstra49246272010-10-17 21:46:10 +02002176skip:
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02002177 double_unlock_balance(this_rq, src_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01002178 }
2179
Peter Zijlstra8046d682015-06-11 14:46:40 +02002180 if (resched)
2181 resched_curr(this_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01002182}
2183
Gregory Haskins8ae121a2008-04-23 07:13:29 -04002184/*
2185 * If we are not running and we are not going to reschedule soon, we should
2186 * try to push tasks away now
2187 */
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002188static void task_woken_rt(struct rq *rq, struct task_struct *p)
Steven Rostedt4642daf2008-01-25 21:08:07 +01002189{
Steven Rostedt9a897c52008-01-25 21:08:22 +01002190 if (!task_running(rq, p) &&
Gregory Haskins8ae121a2008-04-23 07:13:29 -04002191 !test_tsk_need_resched(rq->curr) &&
Thomas Gleixner50605ff2016-05-11 14:23:31 +02002192 tsk_nr_cpus_allowed(p) > 1 &&
Juri Lelli1baca4c2013-11-07 14:43:38 +01002193 (dl_task(rq->curr) || rt_task(rq->curr)) &&
Thomas Gleixner50605ff2016-05-11 14:23:31 +02002194 (tsk_nr_cpus_allowed(rq->curr) < 2 ||
Shawn Bohrer3be209a2011-09-12 09:28:04 -05002195 rq->curr->prio <= p->prio))
Steven Rostedt4642daf2008-01-25 21:08:07 +01002196 push_rt_tasks(rq);
2197}
2198
Ingo Molnarbdd7c812008-01-25 21:08:18 +01002199/* Assumes rq->lock is held */
Gregory Haskins1f11eb62008-06-04 15:04:05 -04002200static void rq_online_rt(struct rq *rq)
Ingo Molnarbdd7c812008-01-25 21:08:18 +01002201{
2202 if (rq->rt.overloaded)
2203 rt_set_overload(rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +02002204
Peter Zijlstra7def2be2008-06-05 14:49:58 +02002205 __enable_runtime(rq);
2206
Gregory Haskinse864c492008-12-29 09:39:49 -05002207 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
Ingo Molnarbdd7c812008-01-25 21:08:18 +01002208}
2209
2210/* Assumes rq->lock is held */
Gregory Haskins1f11eb62008-06-04 15:04:05 -04002211static void rq_offline_rt(struct rq *rq)
Ingo Molnarbdd7c812008-01-25 21:08:18 +01002212{
2213 if (rq->rt.overloaded)
2214 rt_clear_overload(rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +02002215
Peter Zijlstra7def2be2008-06-05 14:49:58 +02002216 __disable_runtime(rq);
2217
Gregory Haskins6e0534f2008-05-12 21:21:01 +02002218 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
Ingo Molnarbdd7c812008-01-25 21:08:18 +01002219}
Steven Rostedtcb469842008-01-25 21:08:22 +01002220
2221/*
2222 * When switch from the rt queue, we bring ourselves to a position
2223 * that we might want to pull RT tasks from other runqueues.
2224 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002225static void switched_from_rt(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01002226{
2227 /*
2228 * If there are other RT tasks then we will reschedule
2229 * and the scheduling of the other RT tasks will handle
2230 * the balancing. But if we are the last RT task
2231 * we may need to handle the pulling of RT tasks
2232 * now.
2233 */
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04002234 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
Kirill Tkhai1158ddb2012-11-23 00:02:15 +04002235 return;
2236
Peter Zijlstrafd7a4be2015-06-11 14:46:41 +02002237 queue_pull_task(rq);
Steven Rostedtcb469842008-01-25 21:08:22 +01002238}
Rusty Russell3d8cbdf2008-11-25 09:58:41 +10302239
Li Zefan11c785b2014-02-08 14:17:45 +08002240void __init init_sched_rt_class(void)
Rusty Russell3d8cbdf2008-11-25 09:58:41 +10302241{
2242 unsigned int i;
2243
Peter Zijlstra029632f2011-10-25 10:00:11 +02002244 for_each_possible_cpu(i) {
Yinghai Lueaa95842009-06-06 14:51:36 -07002245 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
Mike Travis6ca09df2008-12-31 18:08:45 -08002246 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra029632f2011-10-25 10:00:11 +02002247 }
Rusty Russell3d8cbdf2008-11-25 09:58:41 +10302248}
Steven Rostedte8fa1362008-01-25 21:08:05 +01002249#endif /* CONFIG_SMP */
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02002250
Steven Rostedtcb469842008-01-25 21:08:22 +01002251/*
2252 * When switching a task to RT, we may overload the runqueue
2253 * with RT tasks. In this case we try to push them off to
2254 * other runqueues.
2255 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002256static void switched_to_rt(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01002257{
Steven Rostedtcb469842008-01-25 21:08:22 +01002258 /*
2259 * If we are already running, then there's nothing
2260 * that needs to be done. But if we are not running
2261 * we may need to preempt the current running task.
2262 * If that current running task is also an RT task
2263 * then see if we can move to another run queue.
2264 */
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04002265 if (task_on_rq_queued(p) && rq->curr != p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01002266#ifdef CONFIG_SMP
Thomas Gleixner50605ff2016-05-11 14:23:31 +02002267 if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
Peter Zijlstrafd7a4be2015-06-11 14:46:41 +02002268 queue_push_tasks(rq);
Sebastian Andrzej Siewior916c5cfe2017-01-24 15:40:06 +01002269#endif /* CONFIG_SMP */
Paul E. McKenneybac7bb12017-10-13 17:00:18 -07002270 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
Kirill Tkhai88751252014-06-29 00:03:57 +04002271 resched_curr(rq);
Steven Rostedtcb469842008-01-25 21:08:22 +01002272 }
2273}
2274
2275/*
2276 * Priority of the task has changed. This may cause
2277 * us to initiate a push or pull.
2278 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002279static void
2280prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01002281{
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04002282 if (!task_on_rq_queued(p))
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002283 return;
2284
2285 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01002286#ifdef CONFIG_SMP
2287 /*
2288 * If our priority decreases while running, we
2289 * may need to pull tasks to this runqueue.
2290 */
2291 if (oldprio < p->prio)
Peter Zijlstrafd7a4be2015-06-11 14:46:41 +02002292 queue_pull_task(rq);
2293
Steven Rostedtcb469842008-01-25 21:08:22 +01002294 /*
2295 * If there's a higher priority task waiting to run
Peter Zijlstrafd7a4be2015-06-11 14:46:41 +02002296 * then reschedule.
Steven Rostedtcb469842008-01-25 21:08:22 +01002297 */
Peter Zijlstrafd7a4be2015-06-11 14:46:41 +02002298 if (p->prio > rq->rt.highest_prio.curr)
Kirill Tkhai88751252014-06-29 00:03:57 +04002299 resched_curr(rq);
Steven Rostedtcb469842008-01-25 21:08:22 +01002300#else
2301 /* For UP simply resched on drop of prio */
2302 if (oldprio < p->prio)
Kirill Tkhai88751252014-06-29 00:03:57 +04002303 resched_curr(rq);
Steven Rostedtcb469842008-01-25 21:08:22 +01002304#endif /* CONFIG_SMP */
2305 } else {
2306 /*
2307 * This task is not running, but if it is
2308 * greater than the current running task
2309 * then reschedule.
2310 */
2311 if (p->prio < rq->curr->prio)
Kirill Tkhai88751252014-06-29 00:03:57 +04002312 resched_curr(rq);
Steven Rostedtcb469842008-01-25 21:08:22 +01002313 }
2314}
2315
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01002316static void watchdog(struct rq *rq, struct task_struct *p)
2317{
2318 unsigned long soft, hard;
2319
Jiri Slaby78d7d402010-03-05 13:42:54 -08002320 /* max may change after cur was read, this will be fixed next tick */
2321 soft = task_rlimit(p, RLIMIT_RTTIME);
2322 hard = task_rlimit_max(p, RLIMIT_RTTIME);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01002323
2324 if (soft != RLIM_INFINITY) {
2325 unsigned long next;
2326
Ying Xue57d2aa02012-07-17 15:03:43 +08002327 if (p->rt.watchdog_stamp != jiffies) {
2328 p->rt.timeout++;
2329 p->rt.watchdog_stamp = jiffies;
2330 }
2331
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01002332 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
Peter Zijlstra5a52dd52008-01-25 21:08:32 +01002333 if (p->rt.timeout > next)
Frank Mayharf06febc2008-09-12 09:54:39 -07002334 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01002335 }
2336}
Steven Rostedtcb469842008-01-25 21:08:22 +01002337
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002338static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02002339{
Colin Cross454c7992012-05-16 21:34:23 -07002340 struct sched_rt_entity *rt_se = &p->rt;
2341
Peter Zijlstra67e2be02007-12-20 15:01:17 +01002342 update_curr_rt(rq);
2343
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01002344 watchdog(rq, p);
2345
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02002346 /*
2347 * RR tasks need a special form of timeslice management.
2348 * FIFO tasks have no timeslices.
2349 */
2350 if (p->policy != SCHED_RR)
2351 return;
2352
Peter Zijlstrafa717062008-01-25 21:08:27 +01002353 if (--p->rt.time_slice)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02002354 return;
2355
Clark Williamsce0dbbb2013-02-07 09:47:04 -06002356 p->rt.time_slice = sched_rr_timeslice;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02002357
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02002358 /*
Li Bine9aa39b2013-10-21 20:15:43 +08002359 * Requeue to the end of queue if we (and all of our ancestors) are not
2360 * the only element on the queue
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02002361 */
Colin Cross454c7992012-05-16 21:34:23 -07002362 for_each_sched_rt_entity(rt_se) {
2363 if (rt_se->run_list.prev != rt_se->run_list.next) {
2364 requeue_task_rt(rq, p, 0);
Kirill Tkhai8aa6f0e2014-09-22 22:36:43 +04002365 resched_curr(rq);
Colin Cross454c7992012-05-16 21:34:23 -07002366 return;
2367 }
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02002368 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02002369}
2370
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002371static void set_curr_task_rt(struct rq *rq)
2372{
2373 struct task_struct *p = rq->curr;
2374
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002375 p->se.exec_start = rq_clock_task(rq);
Gregory Haskins917b6272008-12-29 09:39:53 -05002376
2377 /* The running task is never eligible for pushing */
2378 dequeue_pushable_task(rq, p);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002379}
2380
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07002381static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00002382{
2383 /*
2384 * Time slice is 0 for SCHED_FIFO tasks
2385 */
2386 if (task->policy == SCHED_RR)
Clark Williamsce0dbbb2013-02-07 09:47:04 -06002387 return sched_rr_timeslice;
Peter Williams0d721ce2009-09-21 01:31:53 +00002388 else
2389 return 0;
2390}
2391
Peter Zijlstra029632f2011-10-25 10:00:11 +02002392const struct sched_class rt_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02002393 .next = &fair_sched_class,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02002394 .enqueue_task = enqueue_task_rt,
2395 .dequeue_task = dequeue_task_rt,
2396 .yield_task = yield_task_rt,
2397
2398 .check_preempt_curr = check_preempt_curr_rt,
2399
2400 .pick_next_task = pick_next_task_rt,
2401 .put_prev_task = put_prev_task_rt,
2402
Peter Williams681f3e62007-10-24 18:23:51 +02002403#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08002404 .select_task_rq = select_task_rq_rt,
2405
Peter Zijlstra6c370672015-05-15 17:43:36 +02002406 .set_cpus_allowed = set_cpus_allowed_common,
Gregory Haskins1f11eb62008-06-04 15:04:05 -04002407 .rq_online = rq_online_rt,
2408 .rq_offline = rq_offline_rt,
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002409 .task_woken = task_woken_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01002410 .switched_from = switched_from_rt,
Peter Williams681f3e62007-10-24 18:23:51 +02002411#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02002412
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002413 .set_curr_task = set_curr_task_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02002414 .task_tick = task_tick_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01002415
Peter Williams0d721ce2009-09-21 01:31:53 +00002416 .get_rr_interval = get_rr_interval_rt,
2417
Steven Rostedtcb469842008-01-25 21:08:22 +01002418 .prio_changed = prio_changed_rt,
2419 .switched_to = switched_to_rt,
Stanislaw Gruszka6e998912014-11-12 16:58:44 +01002420
2421 .update_curr = update_curr_rt,
Pavankumar Kondeti1c847af2019-09-04 10:08:32 +05302422#ifdef CONFIG_SCHED_WALT
2423 .fixup_cumulative_runnable_avg = walt_fixup_cumulative_runnable_avg,
2424#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02002425};
Peter Zijlstraada18de2008-06-19 14:22:24 +02002426
2427#ifdef CONFIG_SCHED_DEBUG
2428extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2429
Peter Zijlstra029632f2011-10-25 10:00:11 +02002430void print_rt_stats(struct seq_file *m, int cpu)
Peter Zijlstraada18de2008-06-19 14:22:24 +02002431{
Cheng Xuec514c42011-05-14 14:20:02 +08002432 rt_rq_iter_t iter;
Peter Zijlstraada18de2008-06-19 14:22:24 +02002433 struct rt_rq *rt_rq;
2434
2435 rcu_read_lock();
Cheng Xuec514c42011-05-14 14:20:02 +08002436 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
Peter Zijlstraada18de2008-06-19 14:22:24 +02002437 print_rt_rq(m, cpu, rt_rq);
2438 rcu_read_unlock();
2439}
Dhaval Giani55e12e52008-06-24 23:39:43 +05302440#endif /* CONFIG_SCHED_DEBUG */