blob: b6993aaff4585ef1a0db530967bbe58161fa8935 [file] [log] [blame]
Peter Zijlstra029632f2011-10-25 10:00:11 +02001#include "sched.h"
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -07002#include "walt.h"
Peter Zijlstra029632f2011-10-25 10:00:11 +02003
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004/*
5 * stop-task scheduling class.
6 *
7 * The stop task is the highest priority task in the system, it preempts
8 * everything and will be preempted by nothing.
9 *
10 * See kernel/stop_machine.c
11 */
12
13#ifdef CONFIG_SMP
14static int
Peter Zijlstraac66f542013-10-07 11:29:16 +010015select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020016{
17 return task_cpu(p); /* stop tasks as never migrate */
18}
19#endif /* CONFIG_SMP */
20
21static void
22check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
23{
Peter Zijlstra1e5a7402010-10-31 12:37:04 +010024 /* we're never preempted */
Peter Zijlstra34f971f2010-09-22 13:53:15 +020025}
26
Peter Zijlstra606dba22012-02-11 06:05:00 +010027static struct task_struct *
Matt Fleming5a91d732016-09-21 14:38:10 +010028pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020029{
30 struct task_struct *stop = rq->stop;
31
Kirill Tkhaida0c1e62014-08-20 13:47:32 +040032 if (!stop || !task_on_rq_queued(stop))
Peter Zijlstra606dba22012-02-11 06:05:00 +010033 return NULL;
Peter Zijlstra34f971f2010-09-22 13:53:15 +020034
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +010035 put_prev_task(rq, prev);
Peter Zijlstra606dba22012-02-11 06:05:00 +010036
37 stop->se.exec_start = rq_clock_task(rq);
38
39 return stop;
Peter Zijlstra34f971f2010-09-22 13:53:15 +020040}
41
42static void
43enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
44{
Kirill Tkhai72465442014-05-09 03:00:14 +040045 add_nr_running(rq, 1);
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070046 walt_inc_cumulative_runnable_avg(rq, p);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020047}
48
49static void
50dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
51{
Kirill Tkhai72465442014-05-09 03:00:14 +040052 sub_nr_running(rq, 1);
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070053 walt_dec_cumulative_runnable_avg(rq, p);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020054}
55
56static void yield_task_stop(struct rq *rq)
57{
58 BUG(); /* the stop task should never yield, its pointless. */
59}
60
61static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
62{
Mike Galbraith8f618962012-08-04 05:44:14 +020063 struct task_struct *curr = rq->curr;
64 u64 delta_exec;
65
Frederic Weisbecker78becc22013-04-12 01:51:02 +020066 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
Mike Galbraith8f618962012-08-04 05:44:14 +020067 if (unlikely((s64)delta_exec < 0))
68 delta_exec = 0;
69
70 schedstat_set(curr->se.statistics.exec_max,
71 max(curr->se.statistics.exec_max, delta_exec));
72
73 curr->se.sum_exec_runtime += delta_exec;
74 account_group_exec_runtime(curr, delta_exec);
75
Frederic Weisbecker78becc22013-04-12 01:51:02 +020076 curr->se.exec_start = rq_clock_task(rq);
Mike Galbraith8f618962012-08-04 05:44:14 +020077 cpuacct_charge(curr, delta_exec);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020078}
79
80static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
81{
82}
83
84static void set_curr_task_stop(struct rq *rq)
85{
Mike Galbraith8f618962012-08-04 05:44:14 +020086 struct task_struct *stop = rq->stop;
87
Frederic Weisbecker78becc22013-04-12 01:51:02 +020088 stop->se.exec_start = rq_clock_task(rq);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020089}
90
Peter Zijlstrada7a7352011-01-17 17:03:27 +010091static void switched_to_stop(struct rq *rq, struct task_struct *p)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020092{
93 BUG(); /* its impossible to change to this class */
94}
95
Peter Zijlstrada7a7352011-01-17 17:03:27 +010096static void
97prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020098{
99 BUG(); /* how!?, what priority? */
100}
101
102static unsigned int
103get_rr_interval_stop(struct rq *rq, struct task_struct *task)
104{
105 return 0;
106}
107
Thomas Gleixner90e362f2014-11-23 23:04:52 +0100108static void update_curr_stop(struct rq *rq)
109{
110}
111
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200112/*
113 * Simple, special scheduling class for the per-CPU stop tasks:
114 */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200115const struct sched_class stop_sched_class = {
Dario Faggioliaab03e02013-11-28 11:14:43 +0100116 .next = &dl_sched_class,
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200117
118 .enqueue_task = enqueue_task_stop,
119 .dequeue_task = dequeue_task_stop,
120 .yield_task = yield_task_stop,
121
122 .check_preempt_curr = check_preempt_curr_stop,
123
124 .pick_next_task = pick_next_task_stop,
125 .put_prev_task = put_prev_task_stop,
126
127#ifdef CONFIG_SMP
128 .select_task_rq = select_task_rq_stop,
Peter Zijlstrac5b28032015-05-15 17:43:35 +0200129 .set_cpus_allowed = set_cpus_allowed_common,
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200130#endif
131
132 .set_curr_task = set_curr_task_stop,
133 .task_tick = task_tick_stop,
134
135 .get_rr_interval = get_rr_interval_stop,
136
137 .prio_changed = prio_changed_stop,
138 .switched_to = switched_to_stop,
Thomas Gleixner90e362f2014-11-23 23:04:52 +0100139 .update_curr = update_curr_stop,
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200140};