blob: d6ce65dde5412d4b4b9d8473caf92318ba7fcb24 [file] [log] [blame]
Peter Zijlstra029632f2011-10-25 10:00:11 +02001#include "sched.h"
2
Peter Zijlstra34f971f2010-09-22 13:53:15 +02003/*
4 * stop-task scheduling class.
5 *
6 * The stop task is the highest priority task in the system, it preempts
7 * everything and will be preempted by nothing.
8 *
9 * See kernel/stop_machine.c
10 */
11
12#ifdef CONFIG_SMP
13static int
Peter Zijlstraac66f542013-10-07 11:29:16 +010014select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020015{
16 return task_cpu(p); /* stop tasks as never migrate */
17}
18#endif /* CONFIG_SMP */
19
20static void
21check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
22{
Peter Zijlstra1e5a7402010-10-31 12:37:04 +010023 /* we're never preempted */
Peter Zijlstra34f971f2010-09-22 13:53:15 +020024}
25
Peter Zijlstra606dba22012-02-11 06:05:00 +010026static struct task_struct *
27pick_next_task_stop(struct rq *rq, struct task_struct *prev)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020028{
29 struct task_struct *stop = rq->stop;
30
Peter Zijlstra606dba22012-02-11 06:05:00 +010031 if (!stop || !stop->on_rq)
32 return NULL;
Peter Zijlstra34f971f2010-09-22 13:53:15 +020033
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +010034 put_prev_task(rq, prev);
Peter Zijlstra606dba22012-02-11 06:05:00 +010035
36 stop->se.exec_start = rq_clock_task(rq);
37
38 return stop;
Peter Zijlstra34f971f2010-09-22 13:53:15 +020039}
40
41static void
42enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
43{
Paul Turner953bfcd2011-07-21 09:43:27 -070044 inc_nr_running(rq);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020045}
46
47static void
48dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
49{
Paul Turner953bfcd2011-07-21 09:43:27 -070050 dec_nr_running(rq);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020051}
52
53static void yield_task_stop(struct rq *rq)
54{
55 BUG(); /* the stop task should never yield, its pointless. */
56}
57
58static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
59{
Mike Galbraith8f618962012-08-04 05:44:14 +020060 struct task_struct *curr = rq->curr;
61 u64 delta_exec;
62
Frederic Weisbecker78becc22013-04-12 01:51:02 +020063 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
Mike Galbraith8f618962012-08-04 05:44:14 +020064 if (unlikely((s64)delta_exec < 0))
65 delta_exec = 0;
66
67 schedstat_set(curr->se.statistics.exec_max,
68 max(curr->se.statistics.exec_max, delta_exec));
69
70 curr->se.sum_exec_runtime += delta_exec;
71 account_group_exec_runtime(curr, delta_exec);
72
Frederic Weisbecker78becc22013-04-12 01:51:02 +020073 curr->se.exec_start = rq_clock_task(rq);
Mike Galbraith8f618962012-08-04 05:44:14 +020074 cpuacct_charge(curr, delta_exec);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020075}
76
77static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
78{
79}
80
81static void set_curr_task_stop(struct rq *rq)
82{
Mike Galbraith8f618962012-08-04 05:44:14 +020083 struct task_struct *stop = rq->stop;
84
Frederic Weisbecker78becc22013-04-12 01:51:02 +020085 stop->se.exec_start = rq_clock_task(rq);
Peter Zijlstra34f971f2010-09-22 13:53:15 +020086}
87
Peter Zijlstrada7a7352011-01-17 17:03:27 +010088static void switched_to_stop(struct rq *rq, struct task_struct *p)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020089{
90 BUG(); /* its impossible to change to this class */
91}
92
Peter Zijlstrada7a7352011-01-17 17:03:27 +010093static void
94prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
Peter Zijlstra34f971f2010-09-22 13:53:15 +020095{
96 BUG(); /* how!?, what priority? */
97}
98
99static unsigned int
100get_rr_interval_stop(struct rq *rq, struct task_struct *task)
101{
102 return 0;
103}
104
105/*
106 * Simple, special scheduling class for the per-CPU stop tasks:
107 */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200108const struct sched_class stop_sched_class = {
Dario Faggioliaab03e02013-11-28 11:14:43 +0100109 .next = &dl_sched_class,
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200110
111 .enqueue_task = enqueue_task_stop,
112 .dequeue_task = dequeue_task_stop,
113 .yield_task = yield_task_stop,
114
115 .check_preempt_curr = check_preempt_curr_stop,
116
117 .pick_next_task = pick_next_task_stop,
118 .put_prev_task = put_prev_task_stop,
119
120#ifdef CONFIG_SMP
121 .select_task_rq = select_task_rq_stop,
122#endif
123
124 .set_curr_task = set_curr_task_stop,
125 .task_tick = task_tick_stop,
126
127 .get_rr_interval = get_rr_interval_stop,
128
129 .prio_changed = prio_changed_stop,
130 .switched_to = switched_to_stop,
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200131};