blob: 4dd256d4685308fb0caa37d2753cb65d1513eca0 [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
23/*
Peter Zijlstra21805082007-08-25 18:41:53 +020024 * Targeted preemption latency for CPU-bound tasks:
25 * (default: 20ms, units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020026 *
Peter Zijlstra21805082007-08-25 18:41:53 +020027 * NOTE: this latency value is not the same as the concept of
28 * 'timeslice length' - timeslices in CFS are of variable length.
29 * (to see the precise effective timeslice length of your workload,
30 * run vmstat and monitor the context-switches field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020031 *
32 * On SMP systems the value of this is multiplied by the log2 of the
33 * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
34 * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
Peter Zijlstra21805082007-08-25 18:41:53 +020035 * Targeted preemption latency for CPU-bound tasks:
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020036 */
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020037const_debug unsigned int sysctl_sched_latency = 20000000ULL;
38
39/*
40 * After fork, child runs first. (default) If set to 0 then
41 * parent will (try to) run first.
42 */
43const_debug unsigned int sysctl_sched_child_runs_first = 1;
Peter Zijlstra21805082007-08-25 18:41:53 +020044
45/*
46 * Minimal preemption granularity for CPU-bound tasks:
47 * (default: 2 msec, units: nanoseconds)
48 */
Ingo Molnar172ac3d2007-08-25 18:41:53 +020049unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020050
51/*
Ingo Molnar1799e352007-09-19 23:34:46 +020052 * sys_sched_yield() compat mode
53 *
54 * This option switches the agressive yield implementation of the
55 * old scheduler back on.
56 */
57unsigned int __read_mostly sysctl_sched_compat_yield;
58
59/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020060 * SCHED_BATCH wake-up granularity.
Ingo Molnar71fd3712007-08-24 20:39:10 +020061 * (default: 25 msec, units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020062 *
63 * This option delays the preemption effects of decoupled workloads
64 * and reduces their over-scheduling. Synchronous workloads will still
65 * have immediate wakeup/sleep latencies.
66 */
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020067const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020068
69/*
70 * SCHED_OTHER wake-up granularity.
71 * (default: 1 msec, units: nanoseconds)
72 *
73 * This option delays the preemption effects of decoupled workloads
74 * and reduces their over-scheduling. Synchronous workloads will still
75 * have immediate wakeup/sleep latencies.
76 */
Ingo Molnar2e09bf52007-10-15 17:00:05 +020077const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020078
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020079unsigned int sysctl_sched_runtime_limit __read_mostly;
80
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020081extern struct sched_class fair_sched_class;
82
83/**************************************************************
84 * CFS operations on generic schedulable entities:
85 */
86
87#ifdef CONFIG_FAIR_GROUP_SCHED
88
89/* cpu runqueue to which this cfs_rq is attached */
90static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
91{
92 return cfs_rq->rq;
93}
94
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020095/* An entity is a task if it doesn't "own" a runqueue */
96#define entity_is_task(se) (!se->my_q)
97
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020098#else /* CONFIG_FAIR_GROUP_SCHED */
99
100static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
101{
102 return container_of(cfs_rq, struct rq, cfs);
103}
104
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200105#define entity_is_task(se) 1
106
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200107#endif /* CONFIG_FAIR_GROUP_SCHED */
108
109static inline struct task_struct *task_of(struct sched_entity *se)
110{
111 return container_of(se, struct task_struct, se);
112}
113
114
115/**************************************************************
116 * Scheduling class tree data structure manipulation methods:
117 */
118
Peter Zijlstra02e04312007-10-15 17:00:07 +0200119static inline u64
120max_vruntime(u64 min_vruntime, u64 vruntime)
121{
122 if ((vruntime > min_vruntime) ||
123 (min_vruntime > (1ULL << 61) && vruntime < (1ULL << 50)))
124 min_vruntime = vruntime;
125
126 return min_vruntime;
127}
128
Ingo Molnare9acbff2007-10-15 17:00:04 +0200129static inline void
130set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost)
131{
132 struct sched_entity *se;
133
134 cfs_rq->rb_leftmost = leftmost;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200135 if (leftmost)
Ingo Molnare9acbff2007-10-15 17:00:04 +0200136 se = rb_entry(leftmost, struct sched_entity, run_node);
Ingo Molnare9acbff2007-10-15 17:00:04 +0200137}
138
Peter Zijlstra02e04312007-10-15 17:00:07 +0200139static inline s64
140entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra90146232007-10-15 17:00:05 +0200141{
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200142 return se->vruntime - cfs_rq->min_vruntime;
Peter Zijlstra90146232007-10-15 17:00:05 +0200143}
144
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200145/*
146 * Enqueue an entity into the rb-tree:
147 */
Ingo Molnar19ccd972007-10-15 17:00:04 +0200148static void
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200149__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
150{
151 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
152 struct rb_node *parent = NULL;
153 struct sched_entity *entry;
Peter Zijlstra90146232007-10-15 17:00:05 +0200154 s64 key = entity_key(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200155 int leftmost = 1;
156
157 /*
158 * Find the right place in the rbtree:
159 */
160 while (*link) {
161 parent = *link;
162 entry = rb_entry(parent, struct sched_entity, run_node);
163 /*
164 * We dont care about collisions. Nodes with
165 * the same key stay together.
166 */
Peter Zijlstra90146232007-10-15 17:00:05 +0200167 if (key < entity_key(cfs_rq, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200168 link = &parent->rb_left;
169 } else {
170 link = &parent->rb_right;
171 leftmost = 0;
172 }
173 }
174
175 /*
176 * Maintain a cache of leftmost tree entries (it is frequently
177 * used):
178 */
179 if (leftmost)
Ingo Molnare9acbff2007-10-15 17:00:04 +0200180 set_leftmost(cfs_rq, &se->run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200181
182 rb_link_node(&se->run_node, parent, link);
183 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200184}
185
Ingo Molnar19ccd972007-10-15 17:00:04 +0200186static void
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200187__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
188{
189 if (cfs_rq->rb_leftmost == &se->run_node)
Ingo Molnare9acbff2007-10-15 17:00:04 +0200190 set_leftmost(cfs_rq, rb_next(&se->run_node));
191
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200192 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200193}
194
195static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
196{
197 return cfs_rq->rb_leftmost;
198}
199
200static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
201{
202 return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
203}
204
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200205static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
206{
207 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
208 struct sched_entity *se = NULL;
209 struct rb_node *parent;
210
211 while (*link) {
212 parent = *link;
213 se = rb_entry(parent, struct sched_entity, run_node);
214 link = &parent->rb_right;
215 }
216
217 return se;
218}
219
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200220/**************************************************************
221 * Scheduling class statistics methods:
222 */
223
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200224static u64 __sched_period(unsigned long nr_running)
225{
226 u64 period = sysctl_sched_latency;
227 unsigned long nr_latency =
228 sysctl_sched_latency / sysctl_sched_min_granularity;
229
230 if (unlikely(nr_running > nr_latency)) {
231 period *= nr_running;
232 do_div(period, nr_latency);
233 }
234
235 return period;
236}
237
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +0200238static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200239{
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +0200240 u64 period = __sched_period(cfs_rq->nr_running);
Peter Zijlstra21805082007-08-25 18:41:53 +0200241
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +0200242 period *= se->load.weight;
243 do_div(period, cfs_rq->load.weight);
Peter Zijlstra21805082007-08-25 18:41:53 +0200244
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +0200245 return period;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200246}
247
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200248/*
249 * Update the current task's runtime statistics. Skip current tasks that
250 * are not in our scheduling class.
251 */
252static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200253__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
254 unsigned long delta_exec)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200255{
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200256 unsigned long delta_exec_weighted;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200257 u64 next_vruntime, min_vruntime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200258
Ingo Molnar8179ca232007-08-02 17:41:40 +0200259 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200260
261 curr->sum_exec_runtime += delta_exec;
Ingo Molnar7a62eab2007-10-15 17:00:06 +0200262 schedstat_add(cfs_rq, exec_clock, delta_exec);
Ingo Molnare9acbff2007-10-15 17:00:04 +0200263 delta_exec_weighted = delta_exec;
264 if (unlikely(curr->load.weight != NICE_0_LOAD)) {
265 delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
266 &curr->load);
267 }
268 curr->vruntime += delta_exec_weighted;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200269
270 /*
271 * maintain cfs_rq->min_vruntime to be a monotonic increasing
272 * value tracking the leftmost vruntime in the tree.
273 */
274 if (first_fair(cfs_rq)) {
275 next_vruntime = __pick_next_entity(cfs_rq)->vruntime;
276
277 /* min_vruntime() := !max_vruntime() */
278 min_vruntime = max_vruntime(curr->vruntime, next_vruntime);
279 if (min_vruntime == next_vruntime)
280 min_vruntime = curr->vruntime;
281 else
282 min_vruntime = next_vruntime;
283 } else
284 min_vruntime = curr->vruntime;
285
286 cfs_rq->min_vruntime =
287 max_vruntime(cfs_rq->min_vruntime, min_vruntime);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200288}
289
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200290static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200291{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200292 struct sched_entity *curr = cfs_rq->curr;
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200293 u64 now = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200294 unsigned long delta_exec;
295
296 if (unlikely(!curr))
297 return;
298
299 /*
300 * Get the amount of time the current task was running
301 * since the last time we changed load (this cannot
302 * overflow on 32 bits):
303 */
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200304 delta_exec = (unsigned long)(now - curr->exec_start);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200305
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200306 __update_curr(cfs_rq, curr, delta_exec);
307 curr->exec_start = now;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200308}
309
310static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200311update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200312{
Ingo Molnard2819182007-08-09 11:16:47 +0200313 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200314}
315
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200316static inline unsigned long
Ingo Molnar08e23882007-10-15 17:00:04 +0200317calc_weighted(unsigned long delta, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200318{
Ingo Molnar08e23882007-10-15 17:00:04 +0200319 unsigned long weight = se->load.weight;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200320
Ingo Molnar08e23882007-10-15 17:00:04 +0200321 if (unlikely(weight != NICE_0_LOAD))
322 return (u64)delta * se->load.weight >> NICE_0_SHIFT;
323 else
324 return delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200325}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200326
327/*
328 * Task is being enqueued - update stats:
329 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200330static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200331{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200332 /*
333 * Are we enqueueing a waiting task? (for current tasks
334 * a dequeue/enqueue event is a NOP)
335 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200336 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200337 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200338}
339
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200340static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200341update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200342{
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200343 schedstat_set(se->wait_max, max(se->wait_max,
344 rq_of(cfs_rq)->clock - se->wait_start));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200345 schedstat_set(se->wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200346}
347
348static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200349update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200350{
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200351 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200352 /*
353 * Mark the end of the wait period if dequeueing a
354 * waiting task:
355 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200356 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200357 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200358}
359
360/*
361 * We are picking a new current task - update its stats:
362 */
363static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200364update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200365{
366 /*
367 * We are starting a new run period:
368 */
Ingo Molnard2819182007-08-09 11:16:47 +0200369 se->exec_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200370}
371
372/*
373 * We are descheduling a task - update its stats:
374 */
375static inline void
Ingo Molnarc7e9b5b2007-08-09 11:16:48 +0200376update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200377{
378 se->exec_start = 0;
379}
380
381/**************************************************
382 * Scheduling class queueing methods:
383 */
384
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200385static void
386account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
387{
388 update_load_add(&cfs_rq->load, se->load.weight);
389 cfs_rq->nr_running++;
390 se->on_rq = 1;
391}
392
393static void
394account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
395{
396 update_load_sub(&cfs_rq->load, se->load.weight);
397 cfs_rq->nr_running--;
398 se->on_rq = 0;
399}
400
Ingo Molnar2396af62007-08-09 11:16:48 +0200401static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200402{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200403#ifdef CONFIG_SCHEDSTATS
404 if (se->sleep_start) {
Ingo Molnard2819182007-08-09 11:16:47 +0200405 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200406
407 if ((s64)delta < 0)
408 delta = 0;
409
410 if (unlikely(delta > se->sleep_max))
411 se->sleep_max = delta;
412
413 se->sleep_start = 0;
414 se->sum_sleep_runtime += delta;
415 }
416 if (se->block_start) {
Ingo Molnard2819182007-08-09 11:16:47 +0200417 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200418
419 if ((s64)delta < 0)
420 delta = 0;
421
422 if (unlikely(delta > se->block_max))
423 se->block_max = delta;
424
425 se->block_start = 0;
426 se->sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +0200427
428 /*
429 * Blocking time is in units of nanosecs, so shift by 20 to
430 * get a milliseconds-range estimation of the amount of
431 * time that the task spent sleeping:
432 */
433 if (unlikely(prof_on == SLEEP_PROFILING)) {
Ingo Molnare22f5bb2007-10-15 17:00:06 +0200434 struct task_struct *tsk = task_of(se);
435
Ingo Molnar30084fb2007-10-02 14:13:08 +0200436 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
437 delta >> 20);
438 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200439 }
440#endif
441}
442
443static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200444place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
445{
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200446 u64 min_runtime, latency;
447
448 min_runtime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +0200449
450 if (sched_feat(USE_TREE_AVG)) {
451 struct sched_entity *last = __pick_last_entity(cfs_rq);
452 if (last) {
453 min_runtime = __pick_next_entity(cfs_rq)->vruntime;
454 min_runtime += last->vruntime;
455 min_runtime >>= 1;
456 }
457 } else if (sched_feat(APPROX_AVG))
458 min_runtime += sysctl_sched_latency/2;
459
460 if (initial && sched_feat(START_DEBIT))
461 min_runtime += sched_slice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200462
463 if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) {
464 latency = sysctl_sched_latency;
465 if (min_runtime > latency)
466 min_runtime -= latency;
467 else
468 min_runtime = 0;
469 }
470
471 se->vruntime = max(se->vruntime, min_runtime);
472}
473
474static void
Ingo Molnar668031c2007-08-09 11:16:48 +0200475enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200476{
477 /*
478 * Update the fair clock.
479 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200480 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200481
Ingo Molnare9acbff2007-10-15 17:00:04 +0200482 if (wakeup) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200483 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +0200484 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +0200485 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200486
Ingo Molnard2417e52007-08-09 11:16:47 +0200487 update_stats_enqueue(cfs_rq, se);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200488 if (se != cfs_rq->curr)
489 __enqueue_entity(cfs_rq, se);
490 account_entity_enqueue(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200491}
492
493static void
Ingo Molnar525c2712007-08-09 11:16:48 +0200494dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200495{
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200496 update_stats_dequeue(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200497#ifdef CONFIG_SCHEDSTATS
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +0200498 if (sleep) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200499 if (entity_is_task(se)) {
500 struct task_struct *tsk = task_of(se);
501
502 if (tsk->state & TASK_INTERRUPTIBLE)
Ingo Molnard2819182007-08-09 11:16:47 +0200503 se->sleep_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200504 if (tsk->state & TASK_UNINTERRUPTIBLE)
Ingo Molnard2819182007-08-09 11:16:47 +0200505 se->block_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200506 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200507 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +0200508#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200509 if (se != cfs_rq->curr)
510 __dequeue_entity(cfs_rq, se);
511 account_entity_dequeue(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200512}
513
514/*
515 * Preempt the current task with a newly woken task if needed:
516 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +0200517static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +0200518check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200519{
Peter Zijlstra11697832007-09-05 14:32:49 +0200520 unsigned long ideal_runtime, delta_exec;
521
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +0200522 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +0200523 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
524 if (delta_exec > ideal_runtime)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200525 resched_task(rq_of(cfs_rq)->curr);
526}
527
528static inline void
Ingo Molnar8494f412007-08-09 11:16:48 +0200529set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200530{
531 /*
532 * Any task has to be enqueued before it get to execute on
533 * a CPU. So account for the time it spent waiting on the
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200534 * runqueue.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200535 */
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200536 update_stats_wait_end(cfs_rq, se);
Ingo Molnar79303e92007-08-09 11:16:47 +0200537 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +0200538 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +0200539#ifdef CONFIG_SCHEDSTATS
540 /*
541 * Track our maximum slice length, if the CPU's load is at
542 * least twice that of our own weight (i.e. dont track it
543 * when there are only lesser-weight tasks around):
544 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +0200545 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Ingo Molnareba1ed42007-10-15 17:00:02 +0200546 se->slice_max = max(se->slice_max,
547 se->sum_exec_runtime - se->prev_sum_exec_runtime);
548 }
549#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +0200550 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200551}
552
Ingo Molnar9948f4b2007-08-09 11:16:48 +0200553static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200554{
555 struct sched_entity *se = __pick_next_entity(cfs_rq);
556
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200557 /* 'current' is not kept within the tree. */
558 if (se)
559 __dequeue_entity(cfs_rq, se);
560
Ingo Molnar8494f412007-08-09 11:16:48 +0200561 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200562
563 return se;
564}
565
Ingo Molnarab6cde22007-08-09 11:16:48 +0200566static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200567{
568 /*
569 * If still on the runqueue then deactivate_task()
570 * was not called and update_curr() has to be done:
571 */
572 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200573 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200574
Ingo Molnarc7e9b5b2007-08-09 11:16:48 +0200575 update_stats_curr_end(cfs_rq, prev);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200576
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200577 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +0200578 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200579 /* Put 'current' back into the tree. */
580 __enqueue_entity(cfs_rq, prev);
581 }
Ingo Molnar429d43b2007-10-15 17:00:03 +0200582 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200583}
584
585static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
586{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200587 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200588 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200589 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200590 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200591
Ingo Molnar2e09bf52007-10-15 17:00:05 +0200592 if (cfs_rq->nr_running > 1)
593 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200594}
595
596/**************************************************
597 * CFS operations on tasks:
598 */
599
600#ifdef CONFIG_FAIR_GROUP_SCHED
601
602/* Walk up scheduling entities hierarchy */
603#define for_each_sched_entity(se) \
604 for (; se; se = se->parent)
605
606static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
607{
608 return p->se.cfs_rq;
609}
610
611/* runqueue on which this entity is (to be) queued */
612static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
613{
614 return se->cfs_rq;
615}
616
617/* runqueue "owned" by this group */
618static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
619{
620 return grp->my_q;
621}
622
623/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
624 * another cpu ('this_cpu')
625 */
626static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
627{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200628 return cfs_rq->tg->cfs_rq[this_cpu];
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200629}
630
631/* Iterate thr' all leaf cfs_rq's on a runqueue */
632#define for_each_leaf_cfs_rq(rq, cfs_rq) \
633 list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
634
635/* Do the two (enqueued) tasks belong to the same group ? */
636static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
637{
638 if (curr->se.cfs_rq == p->se.cfs_rq)
639 return 1;
640
641 return 0;
642}
643
644#else /* CONFIG_FAIR_GROUP_SCHED */
645
646#define for_each_sched_entity(se) \
647 for (; se; se = NULL)
648
649static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
650{
651 return &task_rq(p)->cfs;
652}
653
654static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
655{
656 struct task_struct *p = task_of(se);
657 struct rq *rq = task_rq(p);
658
659 return &rq->cfs;
660}
661
662/* runqueue "owned" by this group */
663static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
664{
665 return NULL;
666}
667
668static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
669{
670 return &cpu_rq(this_cpu)->cfs;
671}
672
673#define for_each_leaf_cfs_rq(rq, cfs_rq) \
674 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
675
676static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
677{
678 return 1;
679}
680
681#endif /* CONFIG_FAIR_GROUP_SCHED */
682
683/*
684 * The enqueue_task method is called before nr_running is
685 * increased. Here we update the fair scheduling stats and
686 * then put the task into the rbtree:
687 */
Ingo Molnarfd390f62007-08-09 11:16:48 +0200688static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200689{
690 struct cfs_rq *cfs_rq;
691 struct sched_entity *se = &p->se;
692
693 for_each_sched_entity(se) {
694 if (se->on_rq)
695 break;
696 cfs_rq = cfs_rq_of(se);
Ingo Molnar668031c2007-08-09 11:16:48 +0200697 enqueue_entity(cfs_rq, se, wakeup);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200698 }
699}
700
701/*
702 * The dequeue_task method is called before nr_running is
703 * decreased. We remove the task from the rbtree and
704 * update the fair scheduling stats:
705 */
Ingo Molnarf02231e2007-08-09 11:16:48 +0200706static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200707{
708 struct cfs_rq *cfs_rq;
709 struct sched_entity *se = &p->se;
710
711 for_each_sched_entity(se) {
712 cfs_rq = cfs_rq_of(se);
Ingo Molnar525c2712007-08-09 11:16:48 +0200713 dequeue_entity(cfs_rq, se, sleep);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200714 /* Don't dequeue parent if it has other entities besides us */
715 if (cfs_rq->load.weight)
716 break;
717 }
718}
719
720/*
Ingo Molnar1799e352007-09-19 23:34:46 +0200721 * sched_yield() support is very simple - we dequeue and enqueue.
722 *
723 * If compat_yield is turned on then we requeue to the end of the tree.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200724 */
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +0200725static void yield_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200726{
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +0200727 struct cfs_rq *cfs_rq = &rq->cfs;
Ingo Molnar1799e352007-09-19 23:34:46 +0200728 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +0200729 struct sched_entity *rightmost, *se = &rq->curr->se;
Ingo Molnar1799e352007-09-19 23:34:46 +0200730 struct rb_node *parent;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200731
732 /*
Ingo Molnar1799e352007-09-19 23:34:46 +0200733 * Are we the only task in the tree?
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200734 */
Ingo Molnar1799e352007-09-19 23:34:46 +0200735 if (unlikely(cfs_rq->nr_running == 1))
736 return;
737
738 if (likely(!sysctl_sched_compat_yield)) {
739 __update_rq_clock(rq);
740 /*
741 * Dequeue and enqueue the task to update its
742 * position within the tree:
743 */
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +0200744 dequeue_entity(cfs_rq, se, 0);
745 enqueue_entity(cfs_rq, se, 0);
Ingo Molnar1799e352007-09-19 23:34:46 +0200746
747 return;
748 }
749 /*
750 * Find the rightmost entry in the rbtree:
751 */
752 do {
753 parent = *link;
754 link = &parent->rb_right;
755 } while (*link);
756
757 rightmost = rb_entry(parent, struct sched_entity, run_node);
758 /*
759 * Already in the rightmost position?
760 */
761 if (unlikely(rightmost == se))
762 return;
763
764 /*
765 * Minimally necessary key value to be last in the tree:
766 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200767 se->vruntime = rightmost->vruntime + 1;
Ingo Molnar1799e352007-09-19 23:34:46 +0200768
769 if (cfs_rq->rb_leftmost == &se->run_node)
770 cfs_rq->rb_leftmost = rb_next(&se->run_node);
771 /*
772 * Relink the task to the rightmost position:
773 */
774 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
775 rb_link_node(&se->run_node, parent, link);
776 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200777}
778
779/*
780 * Preempt the current task with a newly woken task if needed:
781 */
Ingo Molnar2e09bf52007-10-15 17:00:05 +0200782static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200783{
784 struct task_struct *curr = rq->curr;
785 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200786
787 if (unlikely(rt_prio(p->prio))) {
Ingo Molnara8e504d2007-08-09 11:16:47 +0200788 update_rq_clock(rq);
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200789 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200790 resched_task(curr);
791 return;
792 }
Ingo Molnar2e09bf52007-10-15 17:00:05 +0200793 if (is_same_group(curr, p)) {
794 s64 delta = curr->se.vruntime - p->se.vruntime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200795
Ingo Molnar2e09bf52007-10-15 17:00:05 +0200796 if (delta > (s64)sysctl_sched_wakeup_granularity)
797 resched_task(curr);
798 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200799}
800
Ingo Molnarfb8d4722007-08-09 11:16:48 +0200801static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200802{
803 struct cfs_rq *cfs_rq = &rq->cfs;
804 struct sched_entity *se;
805
806 if (unlikely(!cfs_rq->nr_running))
807 return NULL;
808
809 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +0200810 se = pick_next_entity(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200811 cfs_rq = group_cfs_rq(se);
812 } while (cfs_rq);
813
814 return task_of(se);
815}
816
817/*
818 * Account for a descheduled task:
819 */
Ingo Molnar31ee5292007-08-09 11:16:49 +0200820static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200821{
822 struct sched_entity *se = &prev->se;
823 struct cfs_rq *cfs_rq;
824
825 for_each_sched_entity(se) {
826 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +0200827 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200828 }
829}
830
831/**************************************************
832 * Fair scheduling class load-balancing methods:
833 */
834
835/*
836 * Load-balancing iterator. Note: while the runqueue stays locked
837 * during the whole iteration, the current task might be
838 * dequeued so the iterator has to be dequeue-safe. Here we
839 * achieve that by always pre-iterating before returning
840 * the current task:
841 */
842static inline struct task_struct *
843__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
844{
845 struct task_struct *p;
846
847 if (!curr)
848 return NULL;
849
850 p = rb_entry(curr, struct task_struct, se.run_node);
851 cfs_rq->rb_load_balance_curr = rb_next(curr);
852
853 return p;
854}
855
856static struct task_struct *load_balance_start_fair(void *arg)
857{
858 struct cfs_rq *cfs_rq = arg;
859
860 return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
861}
862
863static struct task_struct *load_balance_next_fair(void *arg)
864{
865 struct cfs_rq *cfs_rq = arg;
866
867 return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
868}
869
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200870#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200871static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
872{
873 struct sched_entity *curr;
874 struct task_struct *p;
875
876 if (!cfs_rq->nr_running)
877 return MAX_PRIO;
878
879 curr = __pick_next_entity(cfs_rq);
880 p = task_of(curr);
881
882 return p->prio;
883}
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200884#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200885
Peter Williams43010652007-08-09 11:16:46 +0200886static unsigned long
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200887load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200888 unsigned long max_nr_move, unsigned long max_load_move,
889 struct sched_domain *sd, enum cpu_idle_type idle,
890 int *all_pinned, int *this_best_prio)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200891{
892 struct cfs_rq *busy_cfs_rq;
893 unsigned long load_moved, total_nr_moved = 0, nr_moved;
894 long rem_load_move = max_load_move;
895 struct rq_iterator cfs_rq_iterator;
896
897 cfs_rq_iterator.start = load_balance_start_fair;
898 cfs_rq_iterator.next = load_balance_next_fair;
899
900 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200901#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200902 struct cfs_rq *this_cfs_rq;
Ingo Molnare56f31a2007-08-10 23:05:11 +0200903 long imbalance;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200904 unsigned long maxload;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200905
906 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
907
Ingo Molnare56f31a2007-08-10 23:05:11 +0200908 imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200909 /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
910 if (imbalance <= 0)
911 continue;
912
913 /* Don't pull more than imbalance/2 */
914 imbalance /= 2;
915 maxload = min(rem_load_move, imbalance);
916
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200917 *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
918#else
Ingo Molnare56f31a2007-08-10 23:05:11 +0200919# define maxload rem_load_move
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200920#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200921 /* pass busy_cfs_rq argument into
922 * load_balance_[start|next]_fair iterators
923 */
924 cfs_rq_iterator.arg = busy_cfs_rq;
925 nr_moved = balance_tasks(this_rq, this_cpu, busiest,
926 max_nr_move, maxload, sd, idle, all_pinned,
Peter Williamsa4ac01c2007-08-09 11:16:46 +0200927 &load_moved, this_best_prio, &cfs_rq_iterator);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200928
929 total_nr_moved += nr_moved;
930 max_nr_move -= nr_moved;
931 rem_load_move -= load_moved;
932
933 if (max_nr_move <= 0 || rem_load_move <= 0)
934 break;
935 }
936
Peter Williams43010652007-08-09 11:16:46 +0200937 return max_load_move - rem_load_move;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200938}
939
940/*
941 * scheduler tick hitting a task of our scheduling class:
942 */
943static void task_tick_fair(struct rq *rq, struct task_struct *curr)
944{
945 struct cfs_rq *cfs_rq;
946 struct sched_entity *se = &curr->se;
947
948 for_each_sched_entity(se) {
949 cfs_rq = cfs_rq_of(se);
950 entity_tick(cfs_rq, se);
951 }
952}
953
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200954#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
955
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200956/*
957 * Share the fairness runtime between parent and child, thus the
958 * total amount of pressure for CPU stays equal - new tasks
959 * get a chance to run but frequent forkers are not allowed to
960 * monopolize the CPU. Note: the parent runqueue is locked,
961 * the child is not running yet.
962 */
Ingo Molnaree0827d2007-08-09 11:16:49 +0200963static void task_new_fair(struct rq *rq, struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200964{
965 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Ingo Molnar429d43b2007-10-15 17:00:03 +0200966 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200967
968 sched_info_queued(p);
969
Ting Yang7109c4422007-08-28 12:53:24 +0200970 update_curr(cfs_rq);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200971 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200972
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200973 if (sysctl_sched_child_runs_first &&
974 curr->vruntime < se->vruntime) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +0200975 /*
976 * Upon rescheduling, sched_class::put_prev_task() will place
977 * 'current' within the tree based on its new key value.
978 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200979 swap(curr->vruntime, se->vruntime);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200980 }
981
Ingo Molnare9acbff2007-10-15 17:00:04 +0200982 update_stats_enqueue(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200983 __enqueue_entity(cfs_rq, se);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200984 account_entity_enqueue(cfs_rq, se);
Ingo Molnarbb61c212007-10-15 17:00:02 +0200985 resched_task(rq->curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200986}
987
988#ifdef CONFIG_FAIR_GROUP_SCHED
989/* Account for a task changing its policy or group.
990 *
991 * This routine is mostly called to set cfs_rq->curr field when a task
992 * migrates between groups/classes.
993 */
994static void set_curr_task_fair(struct rq *rq)
995{
Bruce Ashfield7c6c16f2007-08-24 20:39:10 +0200996 struct sched_entity *se = &rq->curr->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200997
Ingo Molnarc3b64f12007-08-09 11:16:51 +0200998 for_each_sched_entity(se)
999 set_next_entity(cfs_rq_of(se), se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001000}
1001#else
1002static void set_curr_task_fair(struct rq *rq)
1003{
Dmitry Adamushkod02e5ed2007-10-15 17:00:07 +02001004 struct sched_entity *se = &rq->curr->se;
1005 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1006
1007 cfs_rq->curr = se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001008}
1009#endif
1010
1011/*
1012 * All the scheduling class methods:
1013 */
1014struct sched_class fair_sched_class __read_mostly = {
1015 .enqueue_task = enqueue_task_fair,
1016 .dequeue_task = dequeue_task_fair,
1017 .yield_task = yield_task_fair,
1018
Ingo Molnar2e09bf52007-10-15 17:00:05 +02001019 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001020
1021 .pick_next_task = pick_next_task_fair,
1022 .put_prev_task = put_prev_task_fair,
1023
1024 .load_balance = load_balance_fair,
1025
1026 .set_curr_task = set_curr_task_fair,
1027 .task_tick = task_tick_fair,
1028 .task_new = task_new_fair,
1029};
1030
1031#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02001032static void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001033{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001034 struct cfs_rq *cfs_rq;
1035
Ingo Molnarc3b64f12007-08-09 11:16:51 +02001036 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02001037 print_cfs_rq(m, cpu, cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001038}
1039#endif