blob: 480adeb63f8ffb7942ddad9daca114a1c6bad27a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
Ingo Molnarc31f2e82007-07-09 18:52:01 +020019 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
Ingo Molnarb9131762008-01-25 21:08:19 +010025 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020033#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080037#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/completion.h>
39#include <linux/kernel_stat.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070040#include <linux/debug_locks.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020041#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080045#include <linux/freezer.h>
akpm@osdl.org198e2f12006-01-12 01:05:30 -080046#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/blkdev.h>
48#include <linux/delay.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070049#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
Alexey Dobriyanb5aadf72008-10-06 13:23:43 +040057#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/seq_file.h>
Tejun Heo969c7922010-05-06 18:49:21 +020059#include <linux/stop_machine.h>
Nick Piggine692ab52007-07-26 13:40:43 +020060#include <linux/sysctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#include <linux/syscalls.h>
62#include <linux/times.h>
Jay Lan8f0ab512006-09-30 23:28:59 -070063#include <linux/tsacct_kern.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080064#include <linux/kprobes.h>
Shailabh Nagar0ff92242006-07-14 00:24:37 -070065#include <linux/delayacct.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020066#include <linux/unistd.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020067#include <linux/pagemap.h>
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +010068#include <linux/hrtimer.h>
Reynes Philippe30914a52008-03-17 16:19:05 -070069#include <linux/tick.h>
Peter Zijlstraf00b45c2008-04-19 19:45:00 +020070#include <linux/debugfs.h>
71#include <linux/ctype.h>
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020072#include <linux/ftrace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090073#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Eric Dumazet5517d862007-05-08 00:32:57 -070075#include <asm/tlb.h>
Satyam Sharma838225b2007-10-24 18:23:50 +020076#include <asm/irq_regs.h>
Gerald Schaefer335d7af2010-11-22 15:47:36 +010077#include <asm/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Gregory Haskins6e0534f2008-05-12 21:21:01 +020079#include "sched_cpupri.h"
Tejun Heo21aa9af2010-06-08 21:40:37 +020080#include "workqueue_sched.h"
Mike Galbraith5091faa2010-11-30 14:18:03 +010081#include "sched_autogroup.h"
Gregory Haskins6e0534f2008-05-12 21:21:01 +020082
Steven Rostedta8d154b2009-04-10 09:36:00 -040083#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040084#include <trace/events/sched.h>
Steven Rostedta8d154b2009-04-10 09:36:00 -040085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086/*
87 * Convert user-nice values [ -20 ... 0 ... 19 ]
88 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
89 * and back.
90 */
91#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
92#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
93#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
94
95/*
96 * 'User priority' is the nice value converted to something we
97 * can work with better when scaling various scheduler parameters,
98 * it's a [ 0 ... 39 ] range.
99 */
100#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
101#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
102#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
103
104/*
Ingo Molnard7876a02008-01-25 21:08:19 +0100105 * Helpers for converting nanosecond timing to jiffy resolution
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 */
Eric Dumazetd6322fa2007-11-09 22:39:38 +0100107#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200109#define NICE_0_LOAD SCHED_LOAD_SCALE
110#define NICE_0_SHIFT SCHED_LOAD_SHIFT
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112/*
113 * These are the 'tuning knobs' of the scheduler:
114 *
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +0200115 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 * Timeslices get refilled after they expire.
117 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#define DEF_TIMESLICE (100 * HZ / 1000)
Peter Williams2dd73a42006-06-27 02:54:34 -0700119
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200120/*
121 * single value that denotes runtime == period, ie unlimited time.
122 */
123#define RUNTIME_INF ((u64)~0ULL)
124
Ingo Molnare05606d2007-07-09 18:51:59 +0200125static inline int rt_policy(int policy)
126{
Roel Kluin3f33a7c2008-05-13 23:44:11 +0200127 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
Ingo Molnare05606d2007-07-09 18:51:59 +0200128 return 1;
129 return 0;
130}
131
132static inline int task_has_rt_policy(struct task_struct *p)
133{
134 return rt_policy(p->policy);
135}
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/*
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200138 * This is the priority-queue data structure of the RT scheduling class:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 */
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200140struct rt_prio_array {
141 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
142 struct list_head queue[MAX_RT_PRIO];
143};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200145struct rt_bandwidth {
Ingo Molnarea736ed2008-03-25 13:51:45 +0100146 /* nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100147 raw_spinlock_t rt_runtime_lock;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100148 ktime_t rt_period;
149 u64 rt_runtime;
150 struct hrtimer rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200151};
152
153static struct rt_bandwidth def_rt_bandwidth;
154
155static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
156
157static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
158{
159 struct rt_bandwidth *rt_b =
160 container_of(timer, struct rt_bandwidth, rt_period_timer);
161 ktime_t now;
162 int overrun;
163 int idle = 0;
164
165 for (;;) {
166 now = hrtimer_cb_get_time(timer);
167 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
168
169 if (!overrun)
170 break;
171
172 idle = do_sched_rt_period_timer(rt_b, overrun);
173 }
174
175 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
176}
177
178static
179void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
180{
181 rt_b->rt_period = ns_to_ktime(period);
182 rt_b->rt_runtime = runtime;
183
Thomas Gleixner0986b112009-11-17 15:32:06 +0100184 raw_spin_lock_init(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200185
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200186 hrtimer_init(&rt_b->rt_period_timer,
187 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
188 rt_b->rt_period_timer.function = sched_rt_period_timer;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200189}
190
Krzysztof Heltc8bfff62008-09-05 23:46:19 +0200191static inline int rt_bandwidth_enabled(void)
192{
193 return sysctl_sched_rt_runtime >= 0;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200194}
195
196static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
197{
198 ktime_t now;
199
Hiroshi Shimamotocac64d02009-02-25 09:59:26 -0800200 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200201 return;
202
203 if (hrtimer_active(&rt_b->rt_period_timer))
204 return;
205
Thomas Gleixner0986b112009-11-17 15:32:06 +0100206 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200207 for (;;) {
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100208 unsigned long delta;
209 ktime_t soft, hard;
210
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200211 if (hrtimer_active(&rt_b->rt_period_timer))
212 break;
213
214 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
215 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100216
217 soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
218 hard = hrtimer_get_expires(&rt_b->rt_period_timer);
219 delta = ktime_to_ns(ktime_sub(hard, soft));
220 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +0530221 HRTIMER_MODE_ABS_PINNED, 0);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200222 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100223 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200224}
225
226#ifdef CONFIG_RT_GROUP_SCHED
227static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
228{
229 hrtimer_cancel(&rt_b->rt_period_timer);
230}
231#endif
232
Heiko Carstens712555e2008-04-28 11:33:07 +0200233/*
234 * sched_domains_mutex serializes calls to arch_init_sched_domains,
235 * detach_destroy_domains and partition_sched_domains.
236 */
237static DEFINE_MUTEX(sched_domains_mutex);
238
Dhaval Giani7c941432010-01-20 13:26:18 +0100239#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200240
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700241#include <linux/cgroup.h>
242
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200243struct cfs_rq;
244
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100245static LIST_HEAD(task_groups);
246
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200247/* task group related information */
Ingo Molnar4cf86d72007-10-15 17:00:14 +0200248struct task_group {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -0700249 struct cgroup_subsys_state css;
Arun R Bharadwaj6c415b92008-12-01 20:49:05 +0530250
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100251#ifdef CONFIG_FAIR_GROUP_SCHED
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200252 /* schedulable entities of this group on each cpu */
253 struct sched_entity **se;
254 /* runqueue "owned" by this group on each cpu */
255 struct cfs_rq **cfs_rq;
256 unsigned long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800257
258 atomic_t load_weight;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100259#endif
260
261#ifdef CONFIG_RT_GROUP_SCHED
262 struct sched_rt_entity **rt_se;
263 struct rt_rq **rt_rq;
264
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200265 struct rt_bandwidth rt_bandwidth;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100266#endif
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +0100267
Srivatsa Vaddagiriae8393e2007-10-29 21:18:11 +0100268 struct rcu_head rcu;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100269 struct list_head list;
Peter Zijlstraf473aa52008-04-19 19:45:00 +0200270
271 struct task_group *parent;
272 struct list_head siblings;
273 struct list_head children;
Mike Galbraith5091faa2010-11-30 14:18:03 +0100274
275#ifdef CONFIG_SCHED_AUTOGROUP
276 struct autogroup *autogroup;
277#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200278};
279
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800280/* task_group_lock serializes the addition/removal of task groups */
Peter Zijlstra8ed36992008-02-13 15:45:39 +0100281static DEFINE_SPINLOCK(task_group_lock);
Srivatsa Vaddagiriec2c5072008-01-25 21:07:59 +0100282
Cyrill Gorcunove9036b32009-10-26 22:24:14 +0300283#ifdef CONFIG_FAIR_GROUP_SCHED
284
Yong Zhang07e06b02011-01-07 15:17:36 +0800285# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
Srivatsa Vaddagiri24e377a2007-10-15 17:00:09 +0200286
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800287/*
Lai Jiangshan2e084782008-06-12 16:42:58 +0800288 * A weight of 0 or 1 can cause arithmetics problems.
289 * A weight of a cfs_rq is the sum of weights of which entities
290 * are queued on this cfs_rq, so a weight of a entity should not be
291 * too large, so as the shares value of a task group.
Miao Xiecb4ad1f2008-04-28 12:54:56 +0800292 * (The default weight is 1024 - so there's no practical
293 * limitation from this.)
294 */
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200295#define MIN_SHARES 2
Lai Jiangshan2e084782008-06-12 16:42:58 +0800296#define MAX_SHARES (1UL << 18)
Peter Zijlstra18d95a22008-04-19 19:45:00 +0200297
Yong Zhang07e06b02011-01-07 15:17:36 +0800298static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100299#endif
300
301/* Default task group.
302 * Every task in system belong to this group at bootup.
303 */
Yong Zhang07e06b02011-01-07 15:17:36 +0800304struct task_group root_task_group;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200305
Dhaval Giani7c941432010-01-20 13:26:18 +0100306#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +0200307
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200308/* CFS-related fields in a runqueue */
309struct cfs_rq {
310 struct load_weight load;
311 unsigned long nr_running;
312
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200313 u64 exec_clock;
Ingo Molnare9acbff2007-10-15 17:00:04 +0200314 u64 min_vruntime;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200315
316 struct rb_root tasks_timeline;
317 struct rb_node *rb_leftmost;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +0200318
319 struct list_head tasks;
320 struct list_head *balance_iterator;
321
322 /*
323 * 'curr' points to currently running entity on this cfs_rq.
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200324 * It is set to NULL otherwise (i.e when none are currently running).
325 */
Rik van Rielac53db52011-02-01 09:51:03 -0500326 struct sched_entity *curr, *next, *last, *skip;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200327
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100328 unsigned int nr_spread_over;
Peter Zijlstraddc97292007-10-15 17:00:10 +0200329
Ingo Molnar62160e3f2007-10-15 17:00:03 +0200330#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200331 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
332
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100333 /*
334 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200335 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
336 * (like users, containers etc.)
337 *
338 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
339 * list is used during load balance.
340 */
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800341 int on_list;
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100342 struct list_head leaf_cfs_rq_list;
343 struct task_group *tg; /* group that "owns" this runqueue */
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200344
345#ifdef CONFIG_SMP
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200346 /*
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200347 * the part of load.weight contributed by tasks
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200348 */
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200349 unsigned long task_weight;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200350
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200351 /*
352 * h_load = weight * f(tg)
353 *
354 * Where f(tg) is the recursive weight fraction assigned to
355 * this group.
356 */
357 unsigned long h_load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200358
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200359 /*
Paul Turner3b3d1902010-11-15 15:47:08 -0800360 * Maintaining per-cpu shares distribution for group scheduling
361 *
362 * load_stamp is the last time we updated the load average
363 * load_last is the last time we updated the load average and saw load
364 * load_unacc_exec_time is currently unaccounted execution time
Peter Zijlstrac8cba852008-06-27 13:41:23 +0200365 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800366 u64 load_avg;
367 u64 load_period;
Paul Turner3b3d1902010-11-15 15:47:08 -0800368 u64 load_stamp, load_last, load_unacc_exec_time;
Peter Zijlstraf1d239f2008-06-27 13:41:38 +0200369
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800370 unsigned long load_contribution;
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200371#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200372#endif
373};
374
375/* Real-Time classes' related field in a runqueue: */
376struct rt_rq {
377 struct rt_prio_array active;
Steven Rostedt63489e42008-01-25 21:08:03 +0100378 unsigned long rt_nr_running;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100379#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -0500380 struct {
381 int curr; /* highest queued rt task prio */
Gregory Haskins398a1532009-01-14 09:10:04 -0500382#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -0500383 int next; /* next highest */
Gregory Haskins398a1532009-01-14 09:10:04 -0500384#endif
Gregory Haskinse864c492008-12-29 09:39:49 -0500385 } highest_prio;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100386#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100387#ifdef CONFIG_SMP
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100388 unsigned long rt_nr_migratory;
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200389 unsigned long rt_nr_total;
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +0100390 int overloaded;
Gregory Haskins917b6272008-12-29 09:39:53 -0500391 struct plist_head pushable_tasks;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100392#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100393 int rt_throttled;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100394 u64 rt_time;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200395 u64 rt_runtime;
Ingo Molnarea736ed2008-03-25 13:51:45 +0100396 /* Nests inside the rq lock: */
Thomas Gleixner0986b112009-11-17 15:32:06 +0100397 raw_spinlock_t rt_runtime_lock;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100398
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100399#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100400 unsigned long rt_nr_boosted;
401
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100402 struct rq *rq;
403 struct list_head leaf_rt_rq_list;
404 struct task_group *tg;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100405#endif
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200406};
407
Gregory Haskins57d885f2008-01-25 21:08:18 +0100408#ifdef CONFIG_SMP
409
410/*
411 * We add the notion of a root-domain which will be used to define per-domain
Ingo Molnar0eab9142008-01-25 21:08:19 +0100412 * variables. Each exclusive cpuset essentially defines an island domain by
413 * fully partitioning the member cpus from any other cpuset. Whenever a new
Gregory Haskins57d885f2008-01-25 21:08:18 +0100414 * exclusive cpuset is created, we also create and attach a new root-domain
415 * object.
416 *
Gregory Haskins57d885f2008-01-25 21:08:18 +0100417 */
418struct root_domain {
419 atomic_t refcount;
Rusty Russellc6c49272008-11-25 02:35:05 +1030420 cpumask_var_t span;
421 cpumask_var_t online;
Gregory Haskins637f5082008-01-25 21:08:18 +0100422
Ingo Molnar0eab9142008-01-25 21:08:19 +0100423 /*
Gregory Haskins637f5082008-01-25 21:08:18 +0100424 * The "RT overload" flag: it gets set if a CPU has more than
425 * one runnable RT task.
426 */
Rusty Russellc6c49272008-11-25 02:35:05 +1030427 cpumask_var_t rto_mask;
Ingo Molnar0eab9142008-01-25 21:08:19 +0100428 atomic_t rto_count;
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200429 struct cpupri cpupri;
Gregory Haskins57d885f2008-01-25 21:08:18 +0100430};
431
Gregory Haskinsdc938522008-01-25 21:08:26 +0100432/*
433 * By default the system creates a single root-domain with all cpus as
434 * members (mimicking the global state we have today).
435 */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100436static struct root_domain def_root_domain;
437
Christian Dietriched2d3722010-09-06 16:37:05 +0200438#endif /* CONFIG_SMP */
Gregory Haskins57d885f2008-01-25 21:08:18 +0100439
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200440/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 * This is the main, per-CPU runqueue data structure.
442 *
443 * Locking rule: those places that want to lock multiple runqueues
444 * (such as the load balancing or the thread migration code), lock
445 * acquire operations must be ordered by ascending &runqueue.
446 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700447struct rq {
Ingo Molnard8016492007-10-18 21:32:55 +0200448 /* runqueue lock: */
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100449 raw_spinlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
451 /*
452 * nr_running and cpu_load should be in the same cacheline because
453 * remote CPUs use both these fields when doing load calculation.
454 */
455 unsigned long nr_running;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200456 #define CPU_LOAD_IDX_MAX 5
457 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -0700458 unsigned long last_load_update_tick;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700459#ifdef CONFIG_NO_HZ
Mike Galbraith39c0cbe2010-03-11 17:17:13 +0100460 u64 nohz_stamp;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700461 unsigned char nohz_balance_kick;
Siddha, Suresh B46cb4b72007-05-08 00:32:51 -0700462#endif
Mike Galbraitha64692a2010-03-11 17:16:20 +0100463 unsigned int skip_clock_update;
464
Ingo Molnard8016492007-10-18 21:32:55 +0200465 /* capture load from *all* tasks on this cpu: */
466 struct load_weight load;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200467 unsigned long nr_load_updates;
468 u64 nr_switches;
469
470 struct cfs_rq cfs;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100471 struct rt_rq rt;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100472
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200473#ifdef CONFIG_FAIR_GROUP_SCHED
Ingo Molnard8016492007-10-18 21:32:55 +0200474 /* list of leaf cfs_rq on this cpu: */
475 struct list_head leaf_cfs_rq_list;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100476#endif
477#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100478 struct list_head leaf_rt_rq_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
481 /*
482 * This is part of a global counter where only the total sum
483 * over all CPUs matters. A task can increase this counter on
484 * one CPU and if it got migrated afterwards it may decrease
485 * it on another CPU. Always updated under the runqueue lock:
486 */
487 unsigned long nr_uninterruptible;
488
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200489 struct task_struct *curr, *idle, *stop;
Christoph Lameterc9819f42006-12-10 02:20:25 -0800490 unsigned long next_balance;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 struct mm_struct *prev_mm;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200492
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200493 u64 clock;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700494 u64 clock_task;
Ingo Molnar6aa645e2007-07-09 18:51:58 +0200495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 atomic_t nr_iowait;
497
498#ifdef CONFIG_SMP
Ingo Molnar0eab9142008-01-25 21:08:19 +0100499 struct root_domain *rd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 struct sched_domain *sd;
501
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +0200502 unsigned long cpu_power;
503
Henrik Austada0a522c2009-02-13 20:35:45 +0100504 unsigned char idle_at_tick;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 /* For active balancing */
Gregory Haskins3f029d32009-07-29 11:08:47 -0400506 int post_schedule;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 int active_balance;
508 int push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +0200509 struct cpu_stop_work active_balance_work;
Ingo Molnard8016492007-10-18 21:32:55 +0200510 /* cpu of this runqueue: */
511 int cpu;
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400512 int online;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
Peter Zijlstraa8a51d52008-06-27 13:41:26 +0200514 unsigned long avg_load_per_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200516 u64 rt_avg;
517 u64 age_stamp;
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100518 u64 idle_stamp;
519 u64 avg_idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520#endif
521
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700522#ifdef CONFIG_IRQ_TIME_ACCOUNTING
523 u64 prev_irq_time;
524#endif
525
Thomas Gleixnerdce48a82009-04-11 10:43:41 +0200526 /* calc_load related fields */
527 unsigned long calc_load_update;
528 long calc_load_active;
529
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100530#ifdef CONFIG_SCHED_HRTICK
Peter Zijlstra31656512008-07-18 18:01:23 +0200531#ifdef CONFIG_SMP
532 int hrtick_csd_pending;
533 struct call_single_data hrtick_csd;
534#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100535 struct hrtimer hrtick_timer;
536#endif
537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538#ifdef CONFIG_SCHEDSTATS
539 /* latency stats */
540 struct sched_info rq_sched_info;
Ken Chen9c2c4802008-12-16 23:41:22 -0800541 unsigned long long rq_cpu_time;
542 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
544 /* sys_sched_yield() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200545 unsigned int yld_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
547 /* schedule() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200548 unsigned int sched_switch;
549 unsigned int sched_count;
550 unsigned int sched_goidle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
552 /* try_to_wake_up() stats */
Ken Chen480b9432007-10-18 21:32:56 +0200553 unsigned int ttwu_count;
554 unsigned int ttwu_local;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555#endif
556};
557
Fenghua Yuf34e3b62007-07-19 01:48:13 -0700558static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
Mike Galbraitha64692a2010-03-11 17:16:20 +0100560
Peter Zijlstra1e5a7402010-10-31 12:37:04 +0100561static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
Ingo Molnardd41f592007-07-09 18:51:59 +0200562
Christoph Lameter0a2966b2006-09-25 23:30:51 -0700563static inline int cpu_of(struct rq *rq)
564{
565#ifdef CONFIG_SMP
566 return rq->cpu;
567#else
568 return 0;
569#endif
570}
571
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800572#define rcu_dereference_check_sched_domain(p) \
Paul E. McKenneyd11c5632010-02-22 17:04:50 -0800573 rcu_dereference_check((p), \
574 rcu_read_lock_sched_held() || \
575 lockdep_is_held(&sched_domains_mutex))
576
Ingo Molnar20d315d2007-07-09 18:51:58 +0200577/*
Nick Piggin674311d2005-06-25 14:57:27 -0700578 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -0700579 * See detach_destroy_domains: synchronize_sched for details.
Nick Piggin674311d2005-06-25 14:57:27 -0700580 *
581 * The domain tree of any CPU may only be accessed from within
582 * preempt-disabled sections.
583 */
Ingo Molnar48f24c42006-07-03 00:25:40 -0700584#define for_each_domain(cpu, __sd) \
Paul E. McKenney497f0ab2010-02-22 17:04:51 -0800585 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
588#define this_rq() (&__get_cpu_var(runqueues))
589#define task_rq(p) cpu_rq(task_cpu(p))
590#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
Hitoshi Mitake54d35f22009-06-29 14:44:57 +0900591#define raw_rq() (&__raw_get_cpu_var(runqueues))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200593#ifdef CONFIG_CGROUP_SCHED
594
595/*
596 * Return the group to which this tasks belongs.
597 *
598 * We use task_subsys_state_check() and extend the RCU verification
599 * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
600 * holds that lock for each task it moves into the cgroup. Therefore
601 * by holding that lock, we pin the task to the current cgroup.
602 */
603static inline struct task_group *task_group(struct task_struct *p)
604{
Mike Galbraith5091faa2010-11-30 14:18:03 +0100605 struct task_group *tg;
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200606 struct cgroup_subsys_state *css;
607
608 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
609 lockdep_is_held(&task_rq(p)->lock));
Mike Galbraith5091faa2010-11-30 14:18:03 +0100610 tg = container_of(css, struct task_group, css);
611
612 return autogroup_task_group(p, tg);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200613}
614
615/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
616static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
617{
618#ifdef CONFIG_FAIR_GROUP_SCHED
619 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
620 p->se.parent = task_group(p)->se[cpu];
621#endif
622
623#ifdef CONFIG_RT_GROUP_SCHED
624 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
625 p->rt.parent = task_group(p)->rt_se[cpu];
626#endif
627}
628
629#else /* CONFIG_CGROUP_SCHED */
630
631static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
632static inline struct task_group *task_group(struct task_struct *p)
633{
634 return NULL;
635}
636
637#endif /* CONFIG_CGROUP_SCHED */
638
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100639static void update_rq_clock_task(struct rq *rq, s64 delta);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700640
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100641static void update_rq_clock(struct rq *rq)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200642{
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100643 s64 delta;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700644
Mike Galbraithf26f9af2010-12-08 11:05:42 +0100645 if (rq->skip_clock_update)
646 return;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700647
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100648 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
649 rq->clock += delta;
650 update_rq_clock_task(rq, delta);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200651}
652
Ingo Molnare436d802007-07-19 21:28:35 +0200653/*
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200654 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
655 */
656#ifdef CONFIG_SCHED_DEBUG
657# define const_debug __read_mostly
658#else
659# define const_debug static const
660#endif
661
Ingo Molnar017730c2008-05-12 21:20:52 +0200662/**
Randy Dunlap1fd06bb2011-03-15 16:12:30 -0700663 * runqueue_is_locked - Returns true if the current cpu runqueue is locked
Randy Dunlape17b38b2009-10-11 19:12:00 -0700664 * @cpu: the processor in question.
Ingo Molnar017730c2008-05-12 21:20:52 +0200665 *
Ingo Molnar017730c2008-05-12 21:20:52 +0200666 * This interface allows printk to be called with the runqueue lock
667 * held and know whether or not it is OK to wake up the klogd.
668 */
Andrew Morton89f19f02009-09-19 11:55:44 -0700669int runqueue_is_locked(int cpu)
Ingo Molnar017730c2008-05-12 21:20:52 +0200670{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100671 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
Ingo Molnar017730c2008-05-12 21:20:52 +0200672}
673
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200674/*
675 * Debugging: various feature bits
676 */
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200677
678#define SCHED_FEAT(name, enabled) \
679 __SCHED_FEAT_##name ,
680
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200681enum {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200682#include "sched_features.h"
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200683};
684
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200685#undef SCHED_FEAT
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200686
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200687#define SCHED_FEAT(name, enabled) \
688 (1UL << __SCHED_FEAT_##name) * enabled |
689
690const_debug unsigned int sysctl_sched_features =
691#include "sched_features.h"
692 0;
693
694#undef SCHED_FEAT
695
696#ifdef CONFIG_SCHED_DEBUG
697#define SCHED_FEAT(name, enabled) \
698 #name ,
699
Harvey Harrison983ed7a2008-04-24 18:17:55 -0700700static __read_mostly char *sched_feat_names[] = {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200701#include "sched_features.h"
702 NULL
703};
704
705#undef SCHED_FEAT
706
Li Zefan34f3a812008-10-30 15:23:32 +0800707static int sched_feat_show(struct seq_file *m, void *v)
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200708{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200709 int i;
710
711 for (i = 0; sched_feat_names[i]; i++) {
Li Zefan34f3a812008-10-30 15:23:32 +0800712 if (!(sysctl_sched_features & (1UL << i)))
713 seq_puts(m, "NO_");
714 seq_printf(m, "%s ", sched_feat_names[i]);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200715 }
Li Zefan34f3a812008-10-30 15:23:32 +0800716 seq_puts(m, "\n");
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200717
Li Zefan34f3a812008-10-30 15:23:32 +0800718 return 0;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200719}
720
721static ssize_t
722sched_feat_write(struct file *filp, const char __user *ubuf,
723 size_t cnt, loff_t *ppos)
724{
725 char buf[64];
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400726 char *cmp;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200727 int neg = 0;
728 int i;
729
730 if (cnt > 63)
731 cnt = 63;
732
733 if (copy_from_user(&buf, ubuf, cnt))
734 return -EFAULT;
735
736 buf[cnt] = 0;
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400737 cmp = strstrip(buf);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200738
Hillf Danton524429c2011-01-06 20:58:12 +0800739 if (strncmp(cmp, "NO_", 3) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200740 neg = 1;
741 cmp += 3;
742 }
743
744 for (i = 0; sched_feat_names[i]; i++) {
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400745 if (strcmp(cmp, sched_feat_names[i]) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200746 if (neg)
747 sysctl_sched_features &= ~(1UL << i);
748 else
749 sysctl_sched_features |= (1UL << i);
750 break;
751 }
752 }
753
754 if (!sched_feat_names[i])
755 return -EINVAL;
756
Jan Blunck42994722009-11-20 17:40:37 +0100757 *ppos += cnt;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200758
759 return cnt;
760}
761
Li Zefan34f3a812008-10-30 15:23:32 +0800762static int sched_feat_open(struct inode *inode, struct file *filp)
763{
764 return single_open(filp, sched_feat_show, NULL);
765}
766
Alexey Dobriyan828c0952009-10-01 15:43:56 -0700767static const struct file_operations sched_feat_fops = {
Li Zefan34f3a812008-10-30 15:23:32 +0800768 .open = sched_feat_open,
769 .write = sched_feat_write,
770 .read = seq_read,
771 .llseek = seq_lseek,
772 .release = single_release,
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200773};
774
775static __init int sched_init_debug(void)
776{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200777 debugfs_create_file("sched_features", 0644, NULL, NULL,
778 &sched_feat_fops);
779
780 return 0;
781}
782late_initcall(sched_init_debug);
783
784#endif
785
786#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200787
788/*
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +0100789 * Number of tasks to iterate in a single balance run.
790 * Limited because this is done with IRQs disabled.
791 */
792const_debug unsigned int sysctl_sched_nr_migrate = 32;
793
794/*
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200795 * period over which we average the RT time consumption, measured
796 * in ms.
797 *
798 * default: 1s
799 */
800const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
801
802/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100803 * period over which we measure -rt task cpu usage in us.
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100804 * default: 1s
805 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100806unsigned int sysctl_sched_rt_period = 1000000;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100807
Ingo Molnar6892b752008-02-13 14:02:36 +0100808static __read_mostly int scheduler_running;
809
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100810/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100811 * part of the period that we allow rt tasks to run in us.
812 * default: 0.95s
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100813 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100814int sysctl_sched_rt_runtime = 950000;
815
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200816static inline u64 global_rt_period(void)
817{
818 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
819}
820
821static inline u64 global_rt_runtime(void)
822{
roel kluine26873b2008-07-22 16:51:15 -0400823 if (sysctl_sched_rt_runtime < 0)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200824 return RUNTIME_INF;
825
826 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
827}
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829#ifndef prepare_arch_switch
Nick Piggin4866cde2005-06-25 14:57:23 -0700830# define prepare_arch_switch(next) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831#endif
Nick Piggin4866cde2005-06-25 14:57:23 -0700832#ifndef finish_arch_switch
833# define finish_arch_switch(prev) do { } while (0)
834#endif
835
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100836static inline int task_current(struct rq *rq, struct task_struct *p)
837{
838 return rq->curr == p;
839}
840
Nick Piggin4866cde2005-06-25 14:57:23 -0700841#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar70b97a72006-07-03 00:25:42 -0700842static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700843{
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100844 return task_current(rq, p);
Nick Piggin4866cde2005-06-25 14:57:23 -0700845}
846
Ingo Molnar70b97a72006-07-03 00:25:42 -0700847static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700848{
849}
850
Ingo Molnar70b97a72006-07-03 00:25:42 -0700851static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700852{
Ingo Molnarda04c032005-09-13 11:17:59 +0200853#ifdef CONFIG_DEBUG_SPINLOCK
854 /* this is a valid case when another task releases the spinlock */
855 rq->lock.owner = current;
856#endif
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700857 /*
858 * If we are tracking spinlock dependencies then we have to
859 * fix up the runqueue lock - which gets 'carried over' from
860 * prev into current:
861 */
862 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
863
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100864 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700865}
866
867#else /* __ARCH_WANT_UNLOCKED_CTXSW */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700868static inline int task_running(struct rq *rq, struct task_struct *p)
Nick Piggin4866cde2005-06-25 14:57:23 -0700869{
870#ifdef CONFIG_SMP
871 return p->oncpu;
872#else
Dmitry Adamushko051a1d12007-12-18 15:21:13 +0100873 return task_current(rq, p);
Nick Piggin4866cde2005-06-25 14:57:23 -0700874#endif
875}
876
Ingo Molnar70b97a72006-07-03 00:25:42 -0700877static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -0700878{
879#ifdef CONFIG_SMP
880 /*
881 * We can optimise this out completely for !SMP, because the
882 * SMP rebalancing from interrupt is the only thing that cares
883 * here.
884 */
885 next->oncpu = 1;
886#endif
887#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100888 raw_spin_unlock_irq(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700889#else
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100890 raw_spin_unlock(&rq->lock);
Nick Piggin4866cde2005-06-25 14:57:23 -0700891#endif
892}
893
Ingo Molnar70b97a72006-07-03 00:25:42 -0700894static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
Nick Piggin4866cde2005-06-25 14:57:23 -0700895{
896#ifdef CONFIG_SMP
897 /*
898 * After ->oncpu is cleared, the task can be moved to a different CPU.
899 * We must ensure this doesn't happen until the switch is completely
900 * finished.
901 */
902 smp_wmb();
903 prev->oncpu = 0;
904#endif
905#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
906 local_irq_enable();
907#endif
908}
909#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
911/*
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100912 * Check whether the task is waking, we use this to synchronize ->cpus_allowed
913 * against ttwu().
Peter Zijlstra0970d292010-02-15 14:45:54 +0100914 */
915static inline int task_is_waking(struct task_struct *p)
916{
Peter Zijlstra0017d732010-03-24 18:34:10 +0100917 return unlikely(p->state == TASK_WAKING);
Peter Zijlstra0970d292010-02-15 14:45:54 +0100918}
919
920/*
Ingo Molnarb29739f2006-06-27 02:54:51 -0700921 * __task_rq_lock - lock the runqueue a given task resides on.
922 * Must be called interrupts disabled.
923 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700924static inline struct rq *__task_rq_lock(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700925 __acquires(rq->lock)
926{
Peter Zijlstra0970d292010-02-15 14:45:54 +0100927 struct rq *rq;
928
Andi Kleen3a5c3592007-10-15 17:00:14 +0200929 for (;;) {
Peter Zijlstra0970d292010-02-15 14:45:54 +0100930 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100931 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100932 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200933 return rq;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100934 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700935 }
Ingo Molnarb29739f2006-06-27 02:54:51 -0700936}
937
938/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 * task_rq_lock - lock the runqueue a given task resides on and disable
Ingo Molnar41a2d6c2007-12-05 15:46:09 +0100940 * interrupts. Note the ordering: we can safely lookup the task_rq without
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 * explicitly disabling preemption.
942 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700943static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 __acquires(rq->lock)
945{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700946 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
Andi Kleen3a5c3592007-10-15 17:00:14 +0200948 for (;;) {
949 local_irq_save(*flags);
950 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100951 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100952 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200953 return rq;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100954 raw_spin_unlock_irqrestore(&rq->lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956}
957
Alexey Dobriyana9957442007-10-15 17:00:13 +0200958static void __task_rq_unlock(struct rq *rq)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700959 __releases(rq->lock)
960{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100961 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700962}
963
Ingo Molnar70b97a72006-07-03 00:25:42 -0700964static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 __releases(rq->lock)
966{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100967 raw_spin_unlock_irqrestore(&rq->lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968}
969
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970/*
Robert P. J. Daycc2a73b2006-12-10 02:20:00 -0800971 * this_rq_lock - lock this runqueue and disable interrupts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200973static struct rq *this_rq_lock(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 __acquires(rq->lock)
975{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700976 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
978 local_irq_disable();
979 rq = this_rq();
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100980 raw_spin_lock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
982 return rq;
983}
984
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100985#ifdef CONFIG_SCHED_HRTICK
986/*
987 * Use HR-timers to deliver accurate preemption points.
988 *
989 * Its all a bit involved since we cannot program an hrt while holding the
990 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
991 * reschedule event.
992 *
993 * When we get rescheduled we reprogram the hrtick_timer outside of the
994 * rq->lock.
995 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100996
997/*
998 * Use hrtick when:
999 * - enabled by features
1000 * - hrtimer is actually high res
1001 */
1002static inline int hrtick_enabled(struct rq *rq)
1003{
1004 if (!sched_feat(HRTICK))
1005 return 0;
Ingo Molnarba420592008-07-20 11:02:06 +02001006 if (!cpu_active(cpu_of(rq)))
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001007 return 0;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001008 return hrtimer_is_hres_active(&rq->hrtick_timer);
1009}
1010
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001011static void hrtick_clear(struct rq *rq)
1012{
1013 if (hrtimer_active(&rq->hrtick_timer))
1014 hrtimer_cancel(&rq->hrtick_timer);
1015}
1016
1017/*
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001018 * High-resolution timer tick.
1019 * Runs from hardirq context with interrupts disabled.
1020 */
1021static enum hrtimer_restart hrtick(struct hrtimer *timer)
1022{
1023 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1024
1025 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1026
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001027 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02001028 update_rq_clock(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001029 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001030 raw_spin_unlock(&rq->lock);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001031
1032 return HRTIMER_NORESTART;
1033}
1034
Rabin Vincent95e904c2008-05-11 05:55:33 +05301035#ifdef CONFIG_SMP
Peter Zijlstra31656512008-07-18 18:01:23 +02001036/*
1037 * called from hardirq (IPI) context
1038 */
1039static void __hrtick_start(void *arg)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001040{
Peter Zijlstra31656512008-07-18 18:01:23 +02001041 struct rq *rq = arg;
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001042
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001043 raw_spin_lock(&rq->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001044 hrtimer_restart(&rq->hrtick_timer);
1045 rq->hrtick_csd_pending = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001046 raw_spin_unlock(&rq->lock);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001047}
1048
Peter Zijlstra31656512008-07-18 18:01:23 +02001049/*
1050 * Called to set the hrtick timer state.
1051 *
1052 * called with rq->lock held and irqs disabled
1053 */
1054static void hrtick_start(struct rq *rq, u64 delay)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001055{
Peter Zijlstra31656512008-07-18 18:01:23 +02001056 struct hrtimer *timer = &rq->hrtick_timer;
1057 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001058
Arjan van de Vencc584b22008-09-01 15:02:30 -07001059 hrtimer_set_expires(timer, time);
Peter Zijlstra31656512008-07-18 18:01:23 +02001060
1061 if (rq == this_rq()) {
1062 hrtimer_restart(timer);
1063 } else if (!rq->hrtick_csd_pending) {
Peter Zijlstra6e275632009-02-25 13:59:48 +01001064 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001065 rq->hrtick_csd_pending = 1;
1066 }
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001067}
1068
1069static int
1070hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1071{
1072 int cpu = (int)(long)hcpu;
1073
1074 switch (action) {
1075 case CPU_UP_CANCELED:
1076 case CPU_UP_CANCELED_FROZEN:
1077 case CPU_DOWN_PREPARE:
1078 case CPU_DOWN_PREPARE_FROZEN:
1079 case CPU_DEAD:
1080 case CPU_DEAD_FROZEN:
Peter Zijlstra31656512008-07-18 18:01:23 +02001081 hrtick_clear(cpu_rq(cpu));
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001082 return NOTIFY_OK;
1083 }
1084
1085 return NOTIFY_DONE;
1086}
1087
Rakib Mullickfa748202008-09-22 14:55:45 -07001088static __init void init_hrtick(void)
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001089{
1090 hotcpu_notifier(hotplug_hrtick, 0);
1091}
Peter Zijlstra31656512008-07-18 18:01:23 +02001092#else
1093/*
1094 * Called to set the hrtick timer state.
1095 *
1096 * called with rq->lock held and irqs disabled
1097 */
1098static void hrtick_start(struct rq *rq, u64 delay)
1099{
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +01001100 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +05301101 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +02001102}
1103
Andrew Morton006c75f2008-09-22 14:55:46 -07001104static inline void init_hrtick(void)
Peter Zijlstra31656512008-07-18 18:01:23 +02001105{
1106}
Rabin Vincent95e904c2008-05-11 05:55:33 +05301107#endif /* CONFIG_SMP */
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001108
1109static void init_rq_hrtick(struct rq *rq)
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001110{
Peter Zijlstra31656512008-07-18 18:01:23 +02001111#ifdef CONFIG_SMP
1112 rq->hrtick_csd_pending = 0;
1113
1114 rq->hrtick_csd.flags = 0;
1115 rq->hrtick_csd.func = __hrtick_start;
1116 rq->hrtick_csd.info = rq;
1117#endif
1118
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001119 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1120 rq->hrtick_timer.function = hrtick;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001121}
Andrew Morton006c75f2008-09-22 14:55:46 -07001122#else /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001123static inline void hrtick_clear(struct rq *rq)
1124{
1125}
1126
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001127static inline void init_rq_hrtick(struct rq *rq)
1128{
1129}
1130
Peter Zijlstrab328ca12008-04-29 10:02:46 +02001131static inline void init_hrtick(void)
1132{
1133}
Andrew Morton006c75f2008-09-22 14:55:46 -07001134#endif /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001135
Ingo Molnar1b9f19c2007-07-09 18:51:59 +02001136/*
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001137 * resched_task - mark a task 'to be rescheduled now'.
1138 *
1139 * On UP this means the setting of the need_resched flag, on SMP it
1140 * might also involve a cross-CPU call to trigger the scheduler on
1141 * the target CPU.
1142 */
1143#ifdef CONFIG_SMP
1144
1145#ifndef tsk_is_polling
1146#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1147#endif
1148
Peter Zijlstra31656512008-07-18 18:01:23 +02001149static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001150{
1151 int cpu;
1152
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001153 assert_raw_spin_locked(&task_rq(p)->lock);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001154
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001155 if (test_tsk_need_resched(p))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001156 return;
1157
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001158 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001159
1160 cpu = task_cpu(p);
1161 if (cpu == smp_processor_id())
1162 return;
1163
1164 /* NEED_RESCHED must be visible before we test polling */
1165 smp_mb();
1166 if (!tsk_is_polling(p))
1167 smp_send_reschedule(cpu);
1168}
1169
1170static void resched_cpu(int cpu)
1171{
1172 struct rq *rq = cpu_rq(cpu);
1173 unsigned long flags;
1174
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001175 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001176 return;
1177 resched_task(cpu_curr(cpu));
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001178 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001179}
Thomas Gleixner06d83082008-03-22 09:20:24 +01001180
1181#ifdef CONFIG_NO_HZ
1182/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07001183 * In the semi idle case, use the nearest busy cpu for migrating timers
1184 * from an idle cpu. This is good for power-savings.
1185 *
1186 * We don't do similar optimization for completely idle system, as
1187 * selecting an idle cpu will add more delays to the timers than intended
1188 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
1189 */
1190int get_nohz_timer_target(void)
1191{
1192 int cpu = smp_processor_id();
1193 int i;
1194 struct sched_domain *sd;
1195
1196 for_each_domain(cpu, sd) {
1197 for_each_cpu(i, sched_domain_span(sd))
1198 if (!idle_cpu(i))
1199 return i;
1200 }
1201 return cpu;
1202}
1203/*
Thomas Gleixner06d83082008-03-22 09:20:24 +01001204 * When add_timer_on() enqueues a timer into the timer wheel of an
1205 * idle CPU then this timer might expire before the next timer event
1206 * which is scheduled to wake up that CPU. In case of a completely
1207 * idle system the next event might even be infinite time into the
1208 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1209 * leaves the inner idle loop so the newly added timer is taken into
1210 * account when the CPU goes back to idle and evaluates the timer
1211 * wheel for the next timer event.
1212 */
1213void wake_up_idle_cpu(int cpu)
1214{
1215 struct rq *rq = cpu_rq(cpu);
1216
1217 if (cpu == smp_processor_id())
1218 return;
1219
1220 /*
1221 * This is safe, as this function is called with the timer
1222 * wheel base lock of (cpu) held. When the CPU is on the way
1223 * to idle and has not yet set rq->curr to idle then it will
1224 * be serialized on the timer wheel base lock and take the new
1225 * timer into account automatically.
1226 */
1227 if (rq->curr != rq->idle)
1228 return;
1229
1230 /*
1231 * We can set TIF_RESCHED on the idle task of the other CPU
1232 * lockless. The worst case is that the other CPU runs the
1233 * idle task through an additional NOOP schedule()
1234 */
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08001235 set_tsk_need_resched(rq->idle);
Thomas Gleixner06d83082008-03-22 09:20:24 +01001236
1237 /* NEED_RESCHED must be visible before we test polling */
1238 smp_mb();
1239 if (!tsk_is_polling(rq->idle))
1240 smp_send_reschedule(cpu);
1241}
Mike Galbraith39c0cbe2010-03-11 17:17:13 +01001242
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001243#endif /* CONFIG_NO_HZ */
Thomas Gleixner06d83082008-03-22 09:20:24 +01001244
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001245static u64 sched_avg_period(void)
1246{
1247 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1248}
1249
1250static void sched_avg_update(struct rq *rq)
1251{
1252 s64 period = sched_avg_period();
1253
1254 while ((s64)(rq->clock - rq->age_stamp) > period) {
Will Deacon0d98bb22010-05-24 12:11:43 -07001255 /*
1256 * Inline assembly required to prevent the compiler
1257 * optimising this loop into a divmod call.
1258 * See __iter_div_u64_rem() for another example of this.
1259 */
1260 asm("" : "+rm" (rq->age_stamp));
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001261 rq->age_stamp += period;
1262 rq->rt_avg /= 2;
1263 }
1264}
1265
1266static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1267{
1268 rq->rt_avg += rt_delta;
1269 sched_avg_update(rq);
1270}
1271
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001272#else /* !CONFIG_SMP */
Peter Zijlstra31656512008-07-18 18:01:23 +02001273static void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001274{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001275 assert_raw_spin_locked(&task_rq(p)->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +02001276 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001277}
Peter Zijlstrae9e92502009-09-01 10:34:37 +02001278
1279static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1280{
1281}
Suresh Siddhada2b71e2010-08-23 13:42:51 -07001282
1283static void sched_avg_update(struct rq *rq)
1284{
1285}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001286#endif /* CONFIG_SMP */
Ingo Molnarc24d20d2007-07-09 18:51:59 +02001287
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001288#if BITS_PER_LONG == 32
1289# define WMULT_CONST (~0UL)
1290#else
1291# define WMULT_CONST (1UL << 32)
1292#endif
1293
1294#define WMULT_SHIFT 32
1295
Ingo Molnar194081e2007-08-09 11:16:51 +02001296/*
1297 * Shift right and round:
1298 */
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001299#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
Ingo Molnar194081e2007-08-09 11:16:51 +02001300
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001301/*
1302 * delta *= weight / lw
1303 */
Ingo Molnarcb1c4fc2007-08-02 17:41:40 +02001304static unsigned long
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001305calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1306 struct load_weight *lw)
1307{
1308 u64 tmp;
1309
Lai Jiangshan7a232e02008-06-12 16:43:07 +08001310 if (!lw->inv_weight) {
1311 if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1312 lw->inv_weight = 1;
1313 else
1314 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1315 / (lw->weight+1);
1316 }
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001317
1318 tmp = (u64)delta_exec * weight;
1319 /*
1320 * Check whether we'd overflow the 64-bit multiplication:
1321 */
Ingo Molnar194081e2007-08-09 11:16:51 +02001322 if (unlikely(tmp > WMULT_CONST))
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001323 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
Ingo Molnar194081e2007-08-09 11:16:51 +02001324 WMULT_SHIFT/2);
1325 else
Ingo Molnarcf2ab462007-09-05 14:32:49 +02001326 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001327
Ingo Molnarecf691d2007-08-02 17:41:40 +02001328 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001329}
1330
Ingo Molnar10919852007-10-15 17:00:04 +02001331static inline void update_load_add(struct load_weight *lw, unsigned long inc)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001332{
1333 lw->weight += inc;
Ingo Molnare89996a2008-03-14 23:48:28 +01001334 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001335}
1336
Ingo Molnar10919852007-10-15 17:00:04 +02001337static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001338{
1339 lw->weight -= dec;
Ingo Molnare89996a2008-03-14 23:48:28 +01001340 lw->inv_weight = 0;
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001341}
1342
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001343static inline void update_load_set(struct load_weight *lw, unsigned long w)
1344{
1345 lw->weight = w;
1346 lw->inv_weight = 0;
1347}
1348
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349/*
Peter Williams2dd73a42006-06-27 02:54:34 -07001350 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1351 * of tasks with abnormal "nice" values across CPUs the contribution that
1352 * each task makes to its run queue's load is weighted according to its
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01001353 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
Peter Williams2dd73a42006-06-27 02:54:34 -07001354 * scaled version of the new time slice allocation that they receive on time
1355 * slice expiry etc.
1356 */
1357
Peter Zijlstracce7ade2009-01-15 14:53:37 +01001358#define WEIGHT_IDLEPRIO 3
1359#define WMULT_IDLEPRIO 1431655765
Ingo Molnardd41f592007-07-09 18:51:59 +02001360
1361/*
1362 * Nice levels are multiplicative, with a gentle 10% change for every
1363 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1364 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1365 * that remained on nice 0.
1366 *
1367 * The "10% effect" is relative and cumulative: from _any_ nice level,
1368 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
Ingo Molnarf9153ee2007-07-16 09:46:30 +02001369 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1370 * If a task goes up by ~10% and another task goes down by ~10% then
1371 * the relative distance between them is ~25%.)
Ingo Molnardd41f592007-07-09 18:51:59 +02001372 */
1373static const int prio_to_weight[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001374 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1375 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1376 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1377 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1378 /* 0 */ 1024, 820, 655, 526, 423,
1379 /* 5 */ 335, 272, 215, 172, 137,
1380 /* 10 */ 110, 87, 70, 56, 45,
1381 /* 15 */ 36, 29, 23, 18, 15,
Ingo Molnardd41f592007-07-09 18:51:59 +02001382};
1383
Ingo Molnar5714d2d2007-07-16 09:46:31 +02001384/*
1385 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1386 *
1387 * In cases where the weight does not change often, we can use the
1388 * precalculated inverse to speed up arithmetics by turning divisions
1389 * into multiplications:
1390 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001391static const u32 prio_to_wmult[40] = {
Ingo Molnar254753d2007-08-09 11:16:51 +02001392 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1393 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1394 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1395 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1396 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1397 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1398 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1399 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
Ingo Molnardd41f592007-07-09 18:51:59 +02001400};
Peter Williams2dd73a42006-06-27 02:54:34 -07001401
Bharata B Raoef12fef2009-03-31 10:02:22 +05301402/* Time spent by the tasks of the cpu accounting group executing in ... */
1403enum cpuacct_stat_index {
1404 CPUACCT_STAT_USER, /* ... user mode */
1405 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
1406
1407 CPUACCT_STAT_NSTATS,
1408};
1409
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001410#ifdef CONFIG_CGROUP_CPUACCT
1411static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
Bharata B Raoef12fef2009-03-31 10:02:22 +05301412static void cpuacct_update_stats(struct task_struct *tsk,
1413 enum cpuacct_stat_index idx, cputime_t val);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001414#else
1415static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
Bharata B Raoef12fef2009-03-31 10:02:22 +05301416static inline void cpuacct_update_stats(struct task_struct *tsk,
1417 enum cpuacct_stat_index idx, cputime_t val) {}
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01001418#endif
1419
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001420static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1421{
1422 update_load_add(&rq->load, load);
1423}
1424
1425static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1426{
1427 update_load_sub(&rq->load, load);
1428}
1429
Ingo Molnar7940ca32008-08-19 13:40:47 +02001430#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
Peter Zijlstraeb755802008-08-19 12:33:05 +02001431typedef int (*tg_visitor)(struct task_group *, void *);
1432
1433/*
1434 * Iterate the full tree, calling @down when first entering a node and @up when
1435 * leaving it for the final time.
1436 */
1437static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1438{
1439 struct task_group *parent, *child;
1440 int ret;
1441
1442 rcu_read_lock();
1443 parent = &root_task_group;
1444down:
1445 ret = (*down)(parent, data);
1446 if (ret)
1447 goto out_unlock;
1448 list_for_each_entry_rcu(child, &parent->children, siblings) {
1449 parent = child;
1450 goto down;
1451
1452up:
1453 continue;
1454 }
1455 ret = (*up)(parent, data);
1456 if (ret)
1457 goto out_unlock;
1458
1459 child = parent;
1460 parent = parent->parent;
1461 if (parent)
1462 goto up;
1463out_unlock:
1464 rcu_read_unlock();
1465
1466 return ret;
1467}
1468
1469static int tg_nop(struct task_group *tg, void *data)
1470{
1471 return 0;
1472}
1473#endif
1474
Gregory Haskinse7693a32008-01-25 21:08:09 +01001475#ifdef CONFIG_SMP
Peter Zijlstraf5f08f32009-09-10 13:35:28 +02001476/* Used instead of source_load when we know the type == 0 */
1477static unsigned long weighted_cpuload(const int cpu)
1478{
1479 return cpu_rq(cpu)->load.weight;
1480}
1481
1482/*
1483 * Return a low guess at the load of a migration-source cpu weighted
1484 * according to the scheduling class and "nice" value.
1485 *
1486 * We want to under-estimate the load of migration sources, to
1487 * balance conservatively.
1488 */
1489static unsigned long source_load(int cpu, int type)
1490{
1491 struct rq *rq = cpu_rq(cpu);
1492 unsigned long total = weighted_cpuload(cpu);
1493
1494 if (type == 0 || !sched_feat(LB_BIAS))
1495 return total;
1496
1497 return min(rq->cpu_load[type-1], total);
1498}
1499
1500/*
1501 * Return a high guess at the load of a migration-target cpu weighted
1502 * according to the scheduling class and "nice" value.
1503 */
1504static unsigned long target_load(int cpu, int type)
1505{
1506 struct rq *rq = cpu_rq(cpu);
1507 unsigned long total = weighted_cpuload(cpu);
1508
1509 if (type == 0 || !sched_feat(LB_BIAS))
1510 return total;
1511
1512 return max(rq->cpu_load[type-1], total);
1513}
1514
Peter Zijlstraae154be2009-09-10 14:40:57 +02001515static unsigned long power_of(int cpu)
1516{
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02001517 return cpu_rq(cpu)->cpu_power;
Peter Zijlstraae154be2009-09-10 14:40:57 +02001518}
1519
Gregory Haskinse7693a32008-01-25 21:08:09 +01001520static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001521
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001522static unsigned long cpu_avg_load_per_task(int cpu)
1523{
1524 struct rq *rq = cpu_rq(cpu);
Ingo Molnaraf6d5962008-11-29 20:45:15 +01001525 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001526
Steven Rostedt4cd42622008-11-26 21:04:24 -05001527 if (nr_running)
1528 rq->avg_load_per_task = rq->load.weight / nr_running;
Balbir Singha2d47772008-11-12 16:19:00 +05301529 else
1530 rq->avg_load_per_task = 0;
Peter Zijlstraa8a51d52008-06-27 13:41:26 +02001531
1532 return rq->avg_load_per_task;
1533}
1534
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001535#ifdef CONFIG_FAIR_GROUP_SCHED
1536
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001537/*
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001538 * Compute the cpu's hierarchical load factor for each task group.
1539 * This needs to be done in a top-down fashion because the load of a child
1540 * group is a fraction of its parents load.
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001541 */
Peter Zijlstraeb755802008-08-19 12:33:05 +02001542static int tg_load_down(struct task_group *tg, void *data)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001543{
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001544 unsigned long load;
Peter Zijlstraeb755802008-08-19 12:33:05 +02001545 long cpu = (long)data;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001546
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001547 if (!tg->parent) {
1548 load = cpu_rq(cpu)->load.weight;
1549 } else {
1550 load = tg->parent->cfs_rq[cpu]->h_load;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001551 load *= tg->se[cpu]->load.weight;
Peter Zijlstrac8cba852008-06-27 13:41:23 +02001552 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1553 }
1554
1555 tg->cfs_rq[cpu]->h_load = load;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001556
Peter Zijlstraeb755802008-08-19 12:33:05 +02001557 return 0;
Peter Zijlstra4d8d5952008-06-27 13:41:19 +02001558}
1559
Peter Zijlstraeb755802008-08-19 12:33:05 +02001560static void update_h_load(long cpu)
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001561{
Peter Zijlstraeb755802008-08-19 12:33:05 +02001562 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001563}
1564
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001565#endif
1566
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001567#ifdef CONFIG_PREEMPT
1568
Peter Zijlstrab78bb862009-09-15 14:23:18 +02001569static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1570
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001571/*
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001572 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1573 * way at the expense of forcing extra atomic operations in all
1574 * invocations. This assures that the double_lock is acquired using the
1575 * same underlying policy as the spinlock_t on this architecture, which
1576 * reduces latency compared to the unfair variant below. However, it
1577 * also adds more overhead and therefore may reduce throughput.
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001578 */
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001579static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1580 __releases(this_rq->lock)
1581 __acquires(busiest->lock)
1582 __acquires(this_rq->lock)
1583{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001584 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001585 double_rq_lock(this_rq, busiest);
1586
1587 return 1;
1588}
1589
1590#else
1591/*
1592 * Unfair double_lock_balance: Optimizes throughput at the expense of
1593 * latency by eliminating extra atomic operations when the locks are
1594 * already in proper order on entry. This favors lower cpu-ids and will
1595 * grant the double lock to lower cpus over higher ids under contention,
1596 * regardless of entry order into the function.
1597 */
1598static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001599 __releases(this_rq->lock)
1600 __acquires(busiest->lock)
1601 __acquires(this_rq->lock)
1602{
1603 int ret = 0;
1604
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001605 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001606 if (busiest < this_rq) {
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001607 raw_spin_unlock(&this_rq->lock);
1608 raw_spin_lock(&busiest->lock);
1609 raw_spin_lock_nested(&this_rq->lock,
1610 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001611 ret = 1;
1612 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001613 raw_spin_lock_nested(&busiest->lock,
1614 SINGLE_DEPTH_NESTING);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001615 }
1616 return ret;
1617}
1618
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001619#endif /* CONFIG_PREEMPT */
1620
1621/*
1622 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1623 */
1624static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1625{
1626 if (unlikely(!irqs_disabled())) {
1627 /* printk() doesn't work good under rq->lock */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001628 raw_spin_unlock(&this_rq->lock);
Gregory Haskins8f45e2b2008-12-29 09:39:51 -05001629 BUG_ON(1);
1630 }
1631
1632 return _double_lock_balance(this_rq, busiest);
1633}
1634
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001635static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1636 __releases(busiest->lock)
1637{
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001638 raw_spin_unlock(&busiest->lock);
Alexey Dobriyan70574a92008-11-28 22:08:00 +03001639 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1640}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001641
1642/*
1643 * double_rq_lock - safely lock two runqueues
1644 *
1645 * Note this does not disable interrupts like task_rq_lock,
1646 * you need to do so manually before calling.
1647 */
1648static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1649 __acquires(rq1->lock)
1650 __acquires(rq2->lock)
1651{
1652 BUG_ON(!irqs_disabled());
1653 if (rq1 == rq2) {
1654 raw_spin_lock(&rq1->lock);
1655 __acquire(rq2->lock); /* Fake it out ;) */
1656 } else {
1657 if (rq1 < rq2) {
1658 raw_spin_lock(&rq1->lock);
1659 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1660 } else {
1661 raw_spin_lock(&rq2->lock);
1662 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1663 }
1664 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001665}
1666
1667/*
1668 * double_rq_unlock - safely unlock two runqueues
1669 *
1670 * Note this does not restore interrupts like task_rq_unlock,
1671 * you need to do so manually after calling.
1672 */
1673static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1674 __releases(rq1->lock)
1675 __releases(rq2->lock)
1676{
1677 raw_spin_unlock(&rq1->lock);
1678 if (rq1 != rq2)
1679 raw_spin_unlock(&rq2->lock);
1680 else
1681 __release(rq2->lock);
1682}
1683
Mike Galbraithd95f4122011-02-01 09:50:51 -05001684#else /* CONFIG_SMP */
1685
1686/*
1687 * double_rq_lock - safely lock two runqueues
1688 *
1689 * Note this does not disable interrupts like task_rq_lock,
1690 * you need to do so manually before calling.
1691 */
1692static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1693 __acquires(rq1->lock)
1694 __acquires(rq2->lock)
1695{
1696 BUG_ON(!irqs_disabled());
1697 BUG_ON(rq1 != rq2);
1698 raw_spin_lock(&rq1->lock);
1699 __acquire(rq2->lock); /* Fake it out ;) */
1700}
1701
1702/*
1703 * double_rq_unlock - safely unlock two runqueues
1704 *
1705 * Note this does not restore interrupts like task_rq_unlock,
1706 * you need to do so manually after calling.
1707 */
1708static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1709 __releases(rq1->lock)
1710 __releases(rq2->lock)
1711{
1712 BUG_ON(rq1 != rq2);
1713 raw_spin_unlock(&rq1->lock);
1714 __release(rq2->lock);
1715}
1716
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001717#endif
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001718
Peter Zijlstra74f51872010-04-22 21:50:19 +02001719static void calc_load_account_idle(struct rq *this_rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01001720static void update_sysctl(void);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01001721static int get_update_sysctl_factor(void);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07001722static void update_cpu_load(struct rq *this_rq);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02001723
Peter Zijlstracd29fe62009-11-27 17:32:46 +01001724static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1725{
1726 set_task_rq(p, cpu);
1727#ifdef CONFIG_SMP
1728 /*
1729 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1730 * successfuly executed on another CPU. We must ensure that updates of
1731 * per-task data have been completed by this moment.
1732 */
1733 smp_wmb();
1734 task_thread_info(p)->cpu = cpu;
1735#endif
1736}
Peter Zijlstra18d95a22008-04-19 19:45:00 +02001737
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001738static const struct sched_class rt_sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02001739
Peter Zijlstra34f971f2010-09-22 13:53:15 +02001740#define sched_class_highest (&stop_sched_class)
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001741#define for_each_class(class) \
1742 for (class = sched_class_highest; class; class = class->next)
Ingo Molnardd41f592007-07-09 18:51:59 +02001743
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001744#include "sched_stats.h"
1745
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001746static void inc_nr_running(struct rq *rq)
Ingo Molnar6363ca52008-05-29 11:28:57 +02001747{
1748 rq->nr_running++;
Ingo Molnar6363ca52008-05-29 11:28:57 +02001749}
1750
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001751static void dec_nr_running(struct rq *rq)
Ingo Molnar9c217242007-08-02 17:41:40 +02001752{
1753 rq->nr_running--;
Ingo Molnar9c217242007-08-02 17:41:40 +02001754}
1755
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001756static void set_load_weight(struct task_struct *p)
1757{
Ingo Molnardd41f592007-07-09 18:51:59 +02001758 /*
1759 * SCHED_IDLE tasks get minimal weight:
1760 */
1761 if (p->policy == SCHED_IDLE) {
1762 p->se.load.weight = WEIGHT_IDLEPRIO;
1763 p->se.load.inv_weight = WMULT_IDLEPRIO;
1764 return;
1765 }
1766
1767 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1768 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
Ingo Molnar45bf76d2007-07-09 18:51:59 +02001769}
1770
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001771static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
Gregory Haskins2087a1a2008-06-27 14:30:00 -06001772{
Mike Galbraitha64692a2010-03-11 17:16:20 +01001773 update_rq_clock(rq);
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001774 sched_info_queued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001775 p->sched_class->enqueue_task(rq, p, flags);
Ingo Molnardd41f592007-07-09 18:51:59 +02001776 p->se.on_rq = 1;
1777}
1778
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001779static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnardd41f592007-07-09 18:51:59 +02001780{
Mike Galbraitha64692a2010-03-11 17:16:20 +01001781 update_rq_clock(rq);
Ankita Garg46ac22b2008-07-01 14:30:06 +05301782 sched_info_dequeued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001783 p->sched_class->dequeue_task(rq, p, flags);
Ingo Molnardd41f592007-07-09 18:51:59 +02001784 p->se.on_rq = 0;
Ingo Molnar71f8bd42007-07-09 18:51:59 +02001785}
1786
1787/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001788 * activate_task - move a task to the runqueue.
1789 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001790static void activate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001791{
1792 if (task_contributes_to_load(p))
1793 rq->nr_uninterruptible--;
1794
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001795 enqueue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001796 inc_nr_running(rq);
1797}
1798
1799/*
1800 * deactivate_task - remove a task from the runqueue.
1801 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001802static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001803{
1804 if (task_contributes_to_load(p))
1805 rq->nr_uninterruptible++;
1806
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001807 dequeue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001808 dec_nr_running(rq);
1809}
1810
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001811#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1812
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001813/*
1814 * There are no locks covering percpu hardirq/softirq time.
1815 * They are only modified in account_system_vtime, on corresponding CPU
1816 * with interrupts disabled. So, writes are safe.
1817 * They are read and saved off onto struct rq in update_rq_clock().
1818 * This may result in other CPU reading this CPU's irq time and can
1819 * race with irq/account_system_vtime on this CPU. We would either get old
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001820 * or new value with a side effect of accounting a slice of irq time to wrong
1821 * task when irq is in progress while we read rq->clock. That is a worthy
1822 * compromise in place of having locks on each irq in account_system_time.
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001823 */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001824static DEFINE_PER_CPU(u64, cpu_hardirq_time);
1825static DEFINE_PER_CPU(u64, cpu_softirq_time);
1826
1827static DEFINE_PER_CPU(u64, irq_start_time);
1828static int sched_clock_irqtime;
1829
1830void enable_sched_clock_irqtime(void)
1831{
1832 sched_clock_irqtime = 1;
1833}
1834
1835void disable_sched_clock_irqtime(void)
1836{
1837 sched_clock_irqtime = 0;
1838}
1839
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001840#ifndef CONFIG_64BIT
1841static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
1842
1843static inline void irq_time_write_begin(void)
1844{
1845 __this_cpu_inc(irq_time_seq.sequence);
1846 smp_wmb();
1847}
1848
1849static inline void irq_time_write_end(void)
1850{
1851 smp_wmb();
1852 __this_cpu_inc(irq_time_seq.sequence);
1853}
1854
1855static inline u64 irq_time_read(int cpu)
1856{
1857 u64 irq_time;
1858 unsigned seq;
1859
1860 do {
1861 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1862 irq_time = per_cpu(cpu_softirq_time, cpu) +
1863 per_cpu(cpu_hardirq_time, cpu);
1864 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1865
1866 return irq_time;
1867}
1868#else /* CONFIG_64BIT */
1869static inline void irq_time_write_begin(void)
1870{
1871}
1872
1873static inline void irq_time_write_end(void)
1874{
1875}
1876
1877static inline u64 irq_time_read(int cpu)
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001878{
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001879 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1880}
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001881#endif /* CONFIG_64BIT */
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001882
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001883/*
1884 * Called before incrementing preempt_count on {soft,}irq_enter
1885 * and before decrementing preempt_count on {soft,}irq_exit.
1886 */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001887void account_system_vtime(struct task_struct *curr)
1888{
1889 unsigned long flags;
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001890 s64 delta;
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001891 int cpu;
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001892
1893 if (!sched_clock_irqtime)
1894 return;
1895
1896 local_irq_save(flags);
1897
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001898 cpu = smp_processor_id();
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001899 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
1900 __this_cpu_add(irq_start_time, delta);
1901
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001902 irq_time_write_begin();
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001903 /*
1904 * We do not account for softirq time from ksoftirqd here.
1905 * We want to continue accounting softirq time to ksoftirqd thread
1906 * in that case, so as not to confuse scheduler with a special task
1907 * that do not consume any time, but still wants to run.
1908 */
1909 if (hardirq_count())
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001910 __this_cpu_add(cpu_hardirq_time, delta);
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -08001911 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001912 __this_cpu_add(cpu_softirq_time, delta);
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001913
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001914 irq_time_write_end();
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001915 local_irq_restore(flags);
1916}
Ingo Molnarb7dadc32010-10-18 20:00:37 +02001917EXPORT_SYMBOL_GPL(account_system_vtime);
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001918
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001919static void update_rq_clock_task(struct rq *rq, s64 delta)
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07001920{
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001921 s64 irq_delta;
1922
Peter Zijlstra8e92c202010-12-09 14:15:34 +01001923 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001924
1925 /*
1926 * Since irq_time is only updated on {soft,}irq_exit, we might run into
1927 * this case when a previous update_rq_clock() happened inside a
1928 * {soft,}irq region.
1929 *
1930 * When this happens, we stop ->clock_task and only update the
1931 * prev_irq_time stamp to account for the part that fit, so that a next
1932 * update will consume the rest. This ensures ->clock_task is
1933 * monotonic.
1934 *
1935 * It does however cause some slight miss-attribution of {soft,}irq
1936 * time, a more accurate solution would be to update the irq_time using
1937 * the current rq->clock timestamp, except that would require using
1938 * atomic ops.
1939 */
1940 if (irq_delta > delta)
1941 irq_delta = delta;
1942
1943 rq->prev_irq_time += irq_delta;
1944 delta -= irq_delta;
1945 rq->clock_task += delta;
1946
1947 if (irq_delta && sched_feat(NONIRQ_POWER))
1948 sched_rt_avg_update(rq, irq_delta);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07001949}
1950
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08001951static int irqtime_account_hi_update(void)
1952{
1953 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1954 unsigned long flags;
1955 u64 latest_ns;
1956 int ret = 0;
1957
1958 local_irq_save(flags);
1959 latest_ns = this_cpu_read(cpu_hardirq_time);
1960 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
1961 ret = 1;
1962 local_irq_restore(flags);
1963 return ret;
1964}
1965
1966static int irqtime_account_si_update(void)
1967{
1968 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1969 unsigned long flags;
1970 u64 latest_ns;
1971 int ret = 0;
1972
1973 local_irq_save(flags);
1974 latest_ns = this_cpu_read(cpu_softirq_time);
1975 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
1976 ret = 1;
1977 local_irq_restore(flags);
1978 return ret;
1979}
1980
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001981#else /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001982
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08001983#define sched_clock_irqtime (0)
1984
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001985static void update_rq_clock_task(struct rq *rq, s64 delta)
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001986{
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001987 rq->clock_task += delta;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001988}
1989
Peter Zijlstrafe44d622010-12-09 14:15:34 +01001990#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -07001991
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001992#include "sched_idletask.c"
1993#include "sched_fair.c"
1994#include "sched_rt.c"
Mike Galbraith5091faa2010-11-30 14:18:03 +01001995#include "sched_autogroup.c"
Peter Zijlstra34f971f2010-09-22 13:53:15 +02001996#include "sched_stoptask.c"
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001997#ifdef CONFIG_SCHED_DEBUG
1998# include "sched_debug.c"
1999#endif
2000
Peter Zijlstra34f971f2010-09-22 13:53:15 +02002001void sched_set_stop_task(int cpu, struct task_struct *stop)
2002{
2003 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
2004 struct task_struct *old_stop = cpu_rq(cpu)->stop;
2005
2006 if (stop) {
2007 /*
2008 * Make it appear like a SCHED_FIFO task, its something
2009 * userspace knows about and won't get confused about.
2010 *
2011 * Also, it will make PI more or less work without too
2012 * much confusion -- but then, stop work should not
2013 * rely on PI working anyway.
2014 */
2015 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
2016
2017 stop->sched_class = &stop_sched_class;
2018 }
2019
2020 cpu_rq(cpu)->stop = stop;
2021
2022 if (old_stop) {
2023 /*
2024 * Reset it back to a normal scheduling class so that
2025 * it can die in pieces.
2026 */
2027 old_stop->sched_class = &rt_sched_class;
2028 }
2029}
2030
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002031/*
Ingo Molnardd41f592007-07-09 18:51:59 +02002032 * __normal_prio - return the priority that is based on the static prio
Ingo Molnar71f8bd42007-07-09 18:51:59 +02002033 */
Ingo Molnar14531182007-07-09 18:51:59 +02002034static inline int __normal_prio(struct task_struct *p)
2035{
Ingo Molnardd41f592007-07-09 18:51:59 +02002036 return p->static_prio;
Ingo Molnar14531182007-07-09 18:51:59 +02002037}
2038
2039/*
Ingo Molnarb29739f2006-06-27 02:54:51 -07002040 * Calculate the expected normal priority: i.e. priority
2041 * without taking RT-inheritance into account. Might be
2042 * boosted by interactivity modifiers. Changes upon fork,
2043 * setprio syscalls, and whenever the interactivity
2044 * estimator recalculates.
2045 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002046static inline int normal_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07002047{
2048 int prio;
2049
Ingo Molnare05606d2007-07-09 18:51:59 +02002050 if (task_has_rt_policy(p))
Ingo Molnarb29739f2006-06-27 02:54:51 -07002051 prio = MAX_RT_PRIO-1 - p->rt_priority;
2052 else
2053 prio = __normal_prio(p);
2054 return prio;
2055}
2056
2057/*
2058 * Calculate the current priority, i.e. the priority
2059 * taken into account by the scheduler. This value might
2060 * be boosted by RT tasks, or might be boosted by
2061 * interactivity modifiers. Will be RT if the task got
2062 * RT-boosted. If not then it returns p->normal_prio.
2063 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002064static int effective_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -07002065{
2066 p->normal_prio = normal_prio(p);
2067 /*
2068 * If we are RT tasks or we were boosted to RT priority,
2069 * keep the priority unchanged. Otherwise, update priority
2070 * to the normal priority:
2071 */
2072 if (!rt_prio(p->prio))
2073 return p->normal_prio;
2074 return p->prio;
2075}
2076
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077/**
2078 * task_curr - is this task currently executing on a CPU?
2079 * @p: the task in question.
2080 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002081inline int task_curr(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082{
2083 return cpu_curr(task_cpu(p)) == p;
2084}
2085
Steven Rostedtcb469842008-01-25 21:08:22 +01002086static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2087 const struct sched_class *prev_class,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002088 int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01002089{
2090 if (prev_class != p->sched_class) {
2091 if (prev_class->switched_from)
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002092 prev_class->switched_from(rq, p);
2093 p->sched_class->switched_to(rq, p);
2094 } else if (oldprio != p->prio)
2095 p->sched_class->prio_changed(rq, p, oldprio);
Steven Rostedtcb469842008-01-25 21:08:22 +01002096}
2097
Peter Zijlstra1e5a7402010-10-31 12:37:04 +01002098static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2099{
2100 const struct sched_class *class;
2101
2102 if (p->sched_class == rq->curr->sched_class) {
2103 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
2104 } else {
2105 for_each_class(class) {
2106 if (class == rq->curr->sched_class)
2107 break;
2108 if (class == p->sched_class) {
2109 resched_task(rq->curr);
2110 break;
2111 }
2112 }
2113 }
2114
2115 /*
2116 * A queue event has occurred, and we're going to schedule. In
2117 * this case, we can save a useless back to back clock update.
2118 */
Mike Galbraithf26f9af2010-12-08 11:05:42 +01002119 if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr))
Peter Zijlstra1e5a7402010-10-31 12:37:04 +01002120 rq->skip_clock_update = 1;
2121}
2122
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123#ifdef CONFIG_SMP
Ingo Molnarcc367732007-10-15 17:00:18 +02002124/*
2125 * Is this task likely cache-hot:
2126 */
Gregory Haskinse7693a32008-01-25 21:08:09 +01002127static int
Ingo Molnarcc367732007-10-15 17:00:18 +02002128task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2129{
2130 s64 delta;
2131
Peter Zijlstrae6c8fba2009-12-16 18:04:33 +01002132 if (p->sched_class != &fair_sched_class)
2133 return 0;
2134
Nikhil Raoef8002f2010-10-13 12:09:35 -07002135 if (unlikely(p->policy == SCHED_IDLE))
2136 return 0;
2137
Ingo Molnarf540a602008-03-15 17:10:34 +01002138 /*
2139 * Buddy candidates are cache hot:
2140 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02002141 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
Peter Zijlstra47932412008-11-04 21:25:09 +01002142 (&p->se == cfs_rq_of(&p->se)->next ||
2143 &p->se == cfs_rq_of(&p->se)->last))
Ingo Molnarf540a602008-03-15 17:10:34 +01002144 return 1;
2145
Ingo Molnar6bc16652007-10-15 17:00:18 +02002146 if (sysctl_sched_migration_cost == -1)
2147 return 1;
2148 if (sysctl_sched_migration_cost == 0)
2149 return 0;
2150
Ingo Molnarcc367732007-10-15 17:00:18 +02002151 delta = now - p->se.exec_start;
2152
2153 return delta < (s64)sysctl_sched_migration_cost;
2154}
2155
Ingo Molnardd41f592007-07-09 18:51:59 +02002156void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
Ingo Molnarc65cc872007-07-09 18:51:58 +02002157{
Peter Zijlstrae2912002009-12-16 18:04:36 +01002158#ifdef CONFIG_SCHED_DEBUG
2159 /*
2160 * We should never call set_task_cpu() on a blocked task,
2161 * ttwu() will sort out the placement.
2162 */
Peter Zijlstra077614e2009-12-17 13:16:31 +01002163 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2164 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
Peter Zijlstrae2912002009-12-16 18:04:36 +01002165#endif
2166
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +08002167 trace_sched_migrate_task(p, new_cpu);
Peter Zijlstracbc34ed2008-12-10 08:08:22 +01002168
Peter Zijlstra0c697742009-12-22 15:43:19 +01002169 if (task_cpu(p) != new_cpu) {
2170 p->se.nr_migrations++;
2171 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2172 }
Ingo Molnardd41f592007-07-09 18:51:59 +02002173
2174 __set_task_cpu(p, new_cpu);
Ingo Molnarc65cc872007-07-09 18:51:58 +02002175}
2176
Tejun Heo969c7922010-05-06 18:49:21 +02002177struct migration_arg {
Ingo Molnar36c8b582006-07-03 00:25:41 -07002178 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 int dest_cpu;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002180};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
Tejun Heo969c7922010-05-06 18:49:21 +02002182static int migration_cpu_stop(void *data);
2183
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184/*
2185 * The task's runqueue lock must be held.
2186 * Returns true if you have to wait for migration thread.
2187 */
Nikanth Karthikesanb7a2b392010-11-26 12:37:09 +05302188static bool migrate_task(struct task_struct *p, struct rq *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 /*
2191 * If the task is not on a runqueue (and not running), then
Peter Zijlstrae2912002009-12-16 18:04:36 +01002192 * the next wake-up will properly place the task.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 */
Tejun Heo969c7922010-05-06 18:49:21 +02002194 return p->se.on_rq || task_running(rq, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195}
2196
2197/*
2198 * wait_task_inactive - wait for a thread to unschedule.
2199 *
Roland McGrath85ba2d82008-07-25 19:45:58 -07002200 * If @match_state is nonzero, it's the @p->state value just checked and
2201 * not expected to change. If it changes, i.e. @p might have woken up,
2202 * then return zero. When we succeed in waiting for @p to be off its CPU,
2203 * we return a positive number (its total switch count). If a second call
2204 * a short while later returns the same number, the caller can be sure that
2205 * @p has remained unscheduled the whole time.
2206 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 * The caller must ensure that the task *will* unschedule sometime soon,
2208 * else this function might spin for a *long* time. This function can't
2209 * be called with interrupts off, or it may introduce deadlock with
2210 * smp_call_function() if an IPI is sent by the same process we are
2211 * waiting to become inactive.
2212 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002213unsigned long wait_task_inactive(struct task_struct *p, long match_state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214{
2215 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002216 int running, on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002217 unsigned long ncsw;
Ingo Molnar70b97a72006-07-03 00:25:42 -07002218 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
Andi Kleen3a5c3592007-10-15 17:00:14 +02002220 for (;;) {
2221 /*
2222 * We do the initial early heuristics without holding
2223 * any task-queue locks at all. We'll only try to get
2224 * the runqueue lock when things look like they will
2225 * work out!
2226 */
2227 rq = task_rq(p);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002228
Andi Kleen3a5c3592007-10-15 17:00:14 +02002229 /*
2230 * If the task is actively running on another CPU
2231 * still, just relax and busy-wait without holding
2232 * any locks.
2233 *
2234 * NOTE! Since we don't hold any locks, it's not
2235 * even sure that "rq" stays as the right runqueue!
2236 * But we don't care, since "task_running()" will
2237 * return false if the runqueue has changed and p
2238 * is actually now running somewhere else!
2239 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07002240 while (task_running(rq, p)) {
2241 if (match_state && unlikely(p->state != match_state))
2242 return 0;
Andi Kleen3a5c3592007-10-15 17:00:14 +02002243 cpu_relax();
Roland McGrath85ba2d82008-07-25 19:45:58 -07002244 }
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002245
Andi Kleen3a5c3592007-10-15 17:00:14 +02002246 /*
2247 * Ok, time to look more closely! We need the rq
2248 * lock now, to be *sure*. If we're wrong, we'll
2249 * just go back and repeat.
2250 */
2251 rq = task_rq_lock(p, &flags);
Peter Zijlstra27a9da62010-05-04 20:36:56 +02002252 trace_sched_wait_task(p);
Andi Kleen3a5c3592007-10-15 17:00:14 +02002253 running = task_running(rq, p);
2254 on_rq = p->se.on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07002255 ncsw = 0;
Oleg Nesterovf31e11d2008-08-20 16:54:44 -07002256 if (!match_state || p->state == match_state)
Oleg Nesterov93dcf552008-08-20 16:54:44 -07002257 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
Andi Kleen3a5c3592007-10-15 17:00:14 +02002258 task_rq_unlock(rq, &flags);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07002259
Andi Kleen3a5c3592007-10-15 17:00:14 +02002260 /*
Roland McGrath85ba2d82008-07-25 19:45:58 -07002261 * If it changed from the expected state, bail out now.
2262 */
2263 if (unlikely(!ncsw))
2264 break;
2265
2266 /*
Andi Kleen3a5c3592007-10-15 17:00:14 +02002267 * Was it really running after all now that we
2268 * checked with the proper locks actually held?
2269 *
2270 * Oops. Go back and try again..
2271 */
2272 if (unlikely(running)) {
2273 cpu_relax();
2274 continue;
2275 }
2276
2277 /*
2278 * It's not enough that it's not actively running,
2279 * it must be off the runqueue _entirely_, and not
2280 * preempted!
2281 *
Luis Henriques80dd99b2009-03-16 19:58:09 +00002282 * So if it was still runnable (but just not actively
Andi Kleen3a5c3592007-10-15 17:00:14 +02002283 * running right now), it's preempted, and we should
2284 * yield - it could be a while.
2285 */
2286 if (unlikely(on_rq)) {
Thomas Gleixner8eb90c32011-02-23 23:52:21 +00002287 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
2288
2289 set_current_state(TASK_UNINTERRUPTIBLE);
2290 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
Andi Kleen3a5c3592007-10-15 17:00:14 +02002291 continue;
2292 }
2293
2294 /*
2295 * Ahh, all good. It wasn't running, and it wasn't
2296 * runnable, which means that it will never become
2297 * running in the future either. We're all done!
2298 */
2299 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 }
Roland McGrath85ba2d82008-07-25 19:45:58 -07002301
2302 return ncsw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303}
2304
2305/***
2306 * kick_process - kick a running thread to enter/exit the kernel
2307 * @p: the to-be-kicked thread
2308 *
2309 * Cause a process which is running on another CPU to enter
2310 * kernel-mode, without any delay. (to get signals handled.)
2311 *
2312 * NOTE: this function doesnt have to take the runqueue lock,
2313 * because all it wants to ensure is that the remote task enters
2314 * the kernel. If the IPI races and the task has been migrated
2315 * to another CPU then no harm is done and the purpose has been
2316 * achieved as well.
2317 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002318void kick_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319{
2320 int cpu;
2321
2322 preempt_disable();
2323 cpu = task_cpu(p);
2324 if ((cpu != smp_processor_id()) && task_curr(p))
2325 smp_send_reschedule(cpu);
2326 preempt_enable();
2327}
Rusty Russellb43e3522009-06-12 22:27:00 -06002328EXPORT_SYMBOL_GPL(kick_process);
Nick Piggin476d1392005-06-25 14:57:29 -07002329#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002331#ifdef CONFIG_SMP
Oleg Nesterov30da6882010-03-15 10:10:19 +01002332/*
2333 * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
2334 */
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002335static int select_fallback_rq(int cpu, struct task_struct *p)
2336{
2337 int dest_cpu;
2338 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2339
2340 /* Look for allowed, online CPU in same node. */
2341 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2342 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2343 return dest_cpu;
2344
2345 /* Any allowed, online CPU? */
2346 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2347 if (dest_cpu < nr_cpu_ids)
2348 return dest_cpu;
2349
2350 /* No more Mr. Nice Guy. */
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01002351 dest_cpu = cpuset_cpus_allowed_fallback(p);
2352 /*
2353 * Don't tell them about moving exiting tasks or
2354 * kernel threads (both mm NULL), since they never
2355 * leave kernel.
2356 */
2357 if (p->mm && printk_ratelimit()) {
2358 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
2359 task_pid_nr(p), p->comm, cpu);
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002360 }
2361
2362 return dest_cpu;
2363}
2364
Peter Zijlstrae2912002009-12-16 18:04:36 +01002365/*
Oleg Nesterov30da6882010-03-15 10:10:19 +01002366 * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
Peter Zijlstrae2912002009-12-16 18:04:36 +01002367 */
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002368static inline
Peter Zijlstra0017d732010-03-24 18:34:10 +01002369int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002370{
Peter Zijlstra0017d732010-03-24 18:34:10 +01002371 int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
Peter Zijlstrae2912002009-12-16 18:04:36 +01002372
2373 /*
2374 * In order not to call set_task_cpu() on a blocking task we need
2375 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2376 * cpu.
2377 *
2378 * Since this is common to all placement strategies, this lives here.
2379 *
2380 * [ this allows ->select_task() to simply return task_cpu(p) and
2381 * not worry about this generic constraint ]
2382 */
2383 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
Peter Zijlstra70f11202009-12-20 17:36:27 +01002384 !cpu_online(cpu)))
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01002385 cpu = select_fallback_rq(task_cpu(p), p);
Peter Zijlstrae2912002009-12-16 18:04:36 +01002386
2387 return cpu;
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002388}
Mike Galbraith09a40af2010-04-15 07:29:59 +02002389
2390static void update_avg(u64 *avg, u64 sample)
2391{
2392 s64 diff = sample - *avg;
2393 *avg += diff >> 3;
2394}
Peter Zijlstra970b13b2009-11-25 13:31:39 +01002395#endif
2396
Tejun Heo9ed38112009-12-03 15:08:03 +09002397static inline void ttwu_activate(struct task_struct *p, struct rq *rq,
2398 bool is_sync, bool is_migrate, bool is_local,
2399 unsigned long en_flags)
2400{
2401 schedstat_inc(p, se.statistics.nr_wakeups);
2402 if (is_sync)
2403 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2404 if (is_migrate)
2405 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2406 if (is_local)
2407 schedstat_inc(p, se.statistics.nr_wakeups_local);
2408 else
2409 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2410
2411 activate_task(rq, p, en_flags);
2412}
2413
2414static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq,
2415 int wake_flags, bool success)
2416{
2417 trace_sched_wakeup(p, success);
2418 check_preempt_curr(rq, p, wake_flags);
2419
2420 p->state = TASK_RUNNING;
2421#ifdef CONFIG_SMP
2422 if (p->sched_class->task_woken)
2423 p->sched_class->task_woken(rq, p);
2424
2425 if (unlikely(rq->idle_stamp)) {
2426 u64 delta = rq->clock - rq->idle_stamp;
2427 u64 max = 2*sysctl_sched_migration_cost;
2428
2429 if (delta > max)
2430 rq->avg_idle = max;
2431 else
2432 update_avg(&rq->avg_idle, delta);
2433 rq->idle_stamp = 0;
2434 }
2435#endif
Tejun Heo21aa9af2010-06-08 21:40:37 +02002436 /* if a worker is waking up, notify workqueue */
2437 if ((p->flags & PF_WQ_WORKER) && success)
2438 wq_worker_waking_up(p, cpu_of(rq));
Tejun Heo9ed38112009-12-03 15:08:03 +09002439}
2440
2441/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 * try_to_wake_up - wake up a thread
Tejun Heo9ed38112009-12-03 15:08:03 +09002443 * @p: the thread to be awakened
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 * @state: the mask of task states that can be woken
Tejun Heo9ed38112009-12-03 15:08:03 +09002445 * @wake_flags: wake modifier flags (WF_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 *
2447 * Put it on the run-queue if it's not already there. The "current"
2448 * thread is always on the run-queue (except when the actual
2449 * re-schedule is in progress), and as such you're allowed to do
2450 * the simpler "current->state = TASK_RUNNING" to mark yourself
2451 * runnable without the overhead of this.
2452 *
Tejun Heo9ed38112009-12-03 15:08:03 +09002453 * Returns %true if @p was woken up, %false if it was already running
2454 * or @state didn't match @p's state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 */
Peter Zijlstra7d478722009-09-14 19:55:44 +02002456static int try_to_wake_up(struct task_struct *p, unsigned int state,
2457 int wake_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458{
Ingo Molnarcc367732007-10-15 17:00:18 +02002459 int cpu, orig_cpu, this_cpu, success = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 unsigned long flags;
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002461 unsigned long en_flags = ENQUEUE_WAKEUP;
Dan Carpenterab3b3aa2010-03-06 14:17:52 +03002462 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002464 this_cpu = get_cpu();
Peter Zijlstra2398f2c2008-06-27 13:41:35 +02002465
Linus Torvalds04e2f172008-02-23 18:05:03 -08002466 smp_wmb();
Dan Carpenterab3b3aa2010-03-06 14:17:52 +03002467 rq = task_rq_lock(p, &flags);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002468 if (!(p->state & state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 goto out;
2470
Ingo Molnardd41f592007-07-09 18:51:59 +02002471 if (p->se.on_rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 goto out_running;
2473
2474 cpu = task_cpu(p);
Ingo Molnarcc367732007-10-15 17:00:18 +02002475 orig_cpu = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476
2477#ifdef CONFIG_SMP
2478 if (unlikely(task_running(rq, p)))
2479 goto out_activate;
2480
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002481 /*
2482 * In order to handle concurrent wakeups and release the rq->lock
2483 * we put the task in TASK_WAKING state.
Ingo Molnareb240732009-09-16 21:09:13 +02002484 *
2485 * First fix up the nr_uninterruptible count:
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002486 */
Peter Zijlstracc87f762010-03-26 12:22:14 +01002487 if (task_contributes_to_load(p)) {
2488 if (likely(cpu_online(orig_cpu)))
2489 rq->nr_uninterruptible--;
2490 else
2491 this_rq()->nr_uninterruptible--;
2492 }
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002493 p->state = TASK_WAKING;
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002494
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002495 if (p->sched_class->task_waking) {
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002496 p->sched_class->task_waking(rq, p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002497 en_flags |= ENQUEUE_WAKING;
Peter Zijlstra0970d292010-02-15 14:45:54 +01002498 }
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002499
Peter Zijlstra0017d732010-03-24 18:34:10 +01002500 cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
2501 if (cpu != orig_cpu)
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002502 set_task_cpu(p, cpu);
Peter Zijlstra0017d732010-03-24 18:34:10 +01002503 __task_rq_unlock(rq);
Peter Zijlstraab19cb22009-11-27 15:44:43 +01002504
Peter Zijlstra0970d292010-02-15 14:45:54 +01002505 rq = cpu_rq(cpu);
2506 raw_spin_lock(&rq->lock);
Mike Galbraithf5dc3752009-10-09 08:35:03 +02002507
Peter Zijlstra0970d292010-02-15 14:45:54 +01002508 /*
2509 * We migrated the task without holding either rq->lock, however
2510 * since the task is not on the task list itself, nobody else
2511 * will try and migrate the task, hence the rq should match the
2512 * cpu we just moved it to.
2513 */
2514 WARN_ON(task_cpu(p) != cpu);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002515 WARN_ON(p->state != TASK_WAKING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516
Gregory Haskinse7693a32008-01-25 21:08:09 +01002517#ifdef CONFIG_SCHEDSTATS
2518 schedstat_inc(rq, ttwu_count);
2519 if (cpu == this_cpu)
2520 schedstat_inc(rq, ttwu_local);
2521 else {
2522 struct sched_domain *sd;
2523 for_each_domain(this_cpu, sd) {
Rusty Russell758b2cd2008-11-25 02:35:04 +10302524 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Gregory Haskinse7693a32008-01-25 21:08:09 +01002525 schedstat_inc(sd, ttwu_wake_remote);
2526 break;
2527 }
2528 }
2529 }
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002530#endif /* CONFIG_SCHEDSTATS */
Gregory Haskinse7693a32008-01-25 21:08:09 +01002531
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532out_activate:
2533#endif /* CONFIG_SMP */
Tejun Heo9ed38112009-12-03 15:08:03 +09002534 ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu,
2535 cpu == this_cpu, en_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 success = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537out_running:
Tejun Heo9ed38112009-12-03 15:08:03 +09002538 ttwu_post_activation(p, rq, wake_flags, success);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539out:
2540 task_rq_unlock(rq, &flags);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02002541 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542
2543 return success;
2544}
2545
David Howells50fa6102009-04-28 15:01:38 +01002546/**
Tejun Heo21aa9af2010-06-08 21:40:37 +02002547 * try_to_wake_up_local - try to wake up a local task with rq lock held
2548 * @p: the thread to be awakened
2549 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04002550 * Put @p on the run-queue if it's not already there. The caller must
Tejun Heo21aa9af2010-06-08 21:40:37 +02002551 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2552 * the current task. this_rq() stays locked over invocation.
2553 */
2554static void try_to_wake_up_local(struct task_struct *p)
2555{
2556 struct rq *rq = task_rq(p);
2557 bool success = false;
2558
2559 BUG_ON(rq != this_rq());
2560 BUG_ON(p == current);
2561 lockdep_assert_held(&rq->lock);
2562
2563 if (!(p->state & TASK_NORMAL))
2564 return;
2565
2566 if (!p->se.on_rq) {
2567 if (likely(!task_running(rq, p))) {
2568 schedstat_inc(rq, ttwu_count);
2569 schedstat_inc(rq, ttwu_local);
2570 }
2571 ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP);
2572 success = true;
2573 }
2574 ttwu_post_activation(p, rq, 0, success);
2575}
2576
2577/**
David Howells50fa6102009-04-28 15:01:38 +01002578 * wake_up_process - Wake up a specific process
2579 * @p: The process to be woken up.
2580 *
2581 * Attempt to wake up the nominated process and move it to the set of runnable
2582 * processes. Returns 1 if the process was woken up, 0 if it was already
2583 * running.
2584 *
2585 * It may be assumed that this function implies a write memory barrier before
2586 * changing the task state if and only if any tasks are woken up.
2587 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002588int wake_up_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589{
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05002590 return try_to_wake_up(p, TASK_ALL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592EXPORT_SYMBOL(wake_up_process);
2593
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002594int wake_up_state(struct task_struct *p, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595{
2596 return try_to_wake_up(p, state, 0);
2597}
2598
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599/*
2600 * Perform scheduler related setup for a newly forked process p.
2601 * p is forked by current.
Ingo Molnardd41f592007-07-09 18:51:59 +02002602 *
2603 * __sched_fork() is basic setup used by init_idle() too:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002605static void __sched_fork(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606{
Ingo Molnardd41f592007-07-09 18:51:59 +02002607 p->se.exec_start = 0;
2608 p->se.sum_exec_runtime = 0;
Ingo Molnarf6cf8912007-08-28 12:53:24 +02002609 p->se.prev_sum_exec_runtime = 0;
Ingo Molnar6c594c22008-12-14 12:34:15 +01002610 p->se.nr_migrations = 0;
Peter Zijlstrada7a7352011-01-17 17:03:27 +01002611 p->se.vruntime = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002612
2613#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03002614 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02002615#endif
Nick Piggin476d1392005-06-25 14:57:29 -07002616
Peter Zijlstrafa717062008-01-25 21:08:27 +01002617 INIT_LIST_HEAD(&p->rt.run_list);
Ingo Molnardd41f592007-07-09 18:51:59 +02002618 p->se.on_rq = 0;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +02002619 INIT_LIST_HEAD(&p->se.group_node);
Nick Piggin476d1392005-06-25 14:57:29 -07002620
Avi Kivitye107be32007-07-26 13:40:43 +02002621#ifdef CONFIG_PREEMPT_NOTIFIERS
2622 INIT_HLIST_HEAD(&p->preempt_notifiers);
2623#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002624}
2625
2626/*
2627 * fork()/clone()-time setup:
2628 */
2629void sched_fork(struct task_struct *p, int clone_flags)
2630{
2631 int cpu = get_cpu();
2632
2633 __sched_fork(p);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002634 /*
Peter Zijlstra0017d732010-03-24 18:34:10 +01002635 * We mark the process as running here. This guarantees that
Peter Zijlstra06b83b52009-12-16 18:04:35 +01002636 * nobody will actually run it, and a signal or other external
2637 * event cannot wake it up and insert it on the runqueue either.
2638 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01002639 p->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02002640
Ingo Molnarb29739f2006-06-27 02:54:51 -07002641 /*
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002642 * Revert to default priority/policy on fork if requested.
2643 */
2644 if (unlikely(p->sched_reset_on_fork)) {
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002645 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002646 p->policy = SCHED_NORMAL;
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002647 p->normal_prio = p->static_prio;
2648 }
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002649
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002650 if (PRIO_TO_NICE(p->static_prio) < 0) {
2651 p->static_prio = NICE_TO_PRIO(0);
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002652 p->normal_prio = p->static_prio;
Mike Galbraith6c697bd2009-06-17 10:48:02 +02002653 set_load_weight(p);
2654 }
2655
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02002656 /*
2657 * We don't need the reset flag anymore after the fork. It has
2658 * fulfilled its duty:
2659 */
2660 p->sched_reset_on_fork = 0;
2661 }
Lennart Poetteringca94c442009-06-15 17:17:47 +02002662
Peter Williamsf83f9ac2009-09-24 06:47:10 +00002663 /*
2664 * Make sure we do not leak PI boosting priority to the child.
2665 */
2666 p->prio = current->normal_prio;
2667
Hiroshi Shimamoto2ddbf952007-10-15 17:00:11 +02002668 if (!rt_prio(p->prio))
2669 p->sched_class = &fair_sched_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07002670
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002671 if (p->sched_class->task_fork)
2672 p->sched_class->task_fork(p);
2673
Peter Zijlstra86951592010-06-22 11:44:53 +02002674 /*
2675 * The child is not yet in the pid-hash so no cgroup attach races,
2676 * and the cgroup is pinned to this child due to cgroup_fork()
2677 * is ran before sched_fork().
2678 *
2679 * Silence PROVE_RCU.
2680 */
2681 rcu_read_lock();
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002682 set_task_cpu(p, cpu);
Peter Zijlstra86951592010-06-22 11:44:53 +02002683 rcu_read_unlock();
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02002684
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002685#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Ingo Molnardd41f592007-07-09 18:51:59 +02002686 if (likely(sched_info_on()))
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07002687 memset(&p->sched_info, 0, sizeof(p->sched_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688#endif
Chen, Kenneth Wd6077cb2006-02-14 13:53:10 -08002689#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
Nick Piggin4866cde2005-06-25 14:57:23 -07002690 p->oncpu = 0;
2691#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692#ifdef CONFIG_PREEMPT
Nick Piggin4866cde2005-06-25 14:57:23 -07002693 /* Want to start with kernel preemption disabled. */
Al Viroa1261f52005-11-13 16:06:55 -08002694 task_thread_info(p)->preempt_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695#endif
Dario Faggioli806c09a2010-11-30 19:51:33 +01002696#ifdef CONFIG_SMP
Gregory Haskins917b6272008-12-29 09:39:53 -05002697 plist_node_init(&p->pushable_tasks, MAX_PRIO);
Dario Faggioli806c09a2010-11-30 19:51:33 +01002698#endif
Gregory Haskins917b6272008-12-29 09:39:53 -05002699
Nick Piggin476d1392005-06-25 14:57:29 -07002700 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701}
2702
2703/*
2704 * wake_up_new_task - wake up a newly created task for the first time.
2705 *
2706 * This function will do some initial scheduler statistics housekeeping
2707 * that must be done for every newly created context, then puts the task
2708 * on the runqueue and wakes it.
2709 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002710void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711{
2712 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02002713 struct rq *rq;
Andrew Mortonc8906922010-03-11 14:08:43 -08002714 int cpu __maybe_unused = get_cpu();
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002715
2716#ifdef CONFIG_SMP
Peter Zijlstra0017d732010-03-24 18:34:10 +01002717 rq = task_rq_lock(p, &flags);
2718 p->state = TASK_WAKING;
2719
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002720 /*
2721 * Fork balancing, do it here and not earlier because:
2722 * - cpus_allowed can change in the fork path
2723 * - any previously selected cpu might disappear through hotplug
2724 *
Peter Zijlstra0017d732010-03-24 18:34:10 +01002725 * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2726 * without people poking at ->cpus_allowed.
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002727 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01002728 cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002729 set_task_cpu(p, cpu);
Peter Zijlstra0017d732010-03-24 18:34:10 +01002730
2731 p->state = TASK_RUNNING;
2732 task_rq_unlock(rq, &flags);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002733#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734
Peter Zijlstra0017d732010-03-24 18:34:10 +01002735 rq = task_rq_lock(p, &flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01002736 activate_task(rq, p, 0);
Peter Zijlstra27a9da62010-05-04 20:36:56 +02002737 trace_sched_wakeup_new(p, 1);
Peter Zijlstraa7558e02009-09-14 20:02:34 +02002738 check_preempt_curr(rq, p, WF_FORK);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002739#ifdef CONFIG_SMP
Peter Zijlstraefbbd052009-12-16 18:04:40 +01002740 if (p->sched_class->task_woken)
2741 p->sched_class->task_woken(rq, p);
Steven Rostedt9a897c52008-01-25 21:08:22 +01002742#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02002743 task_rq_unlock(rq, &flags);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01002744 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745}
2746
Avi Kivitye107be32007-07-26 13:40:43 +02002747#ifdef CONFIG_PREEMPT_NOTIFIERS
2748
2749/**
Luis Henriques80dd99b2009-03-16 19:58:09 +00002750 * preempt_notifier_register - tell me when current is being preempted & rescheduled
Randy Dunlap421cee22007-07-31 00:37:50 -07002751 * @notifier: notifier struct to register
Avi Kivitye107be32007-07-26 13:40:43 +02002752 */
2753void preempt_notifier_register(struct preempt_notifier *notifier)
2754{
2755 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2756}
2757EXPORT_SYMBOL_GPL(preempt_notifier_register);
2758
2759/**
2760 * preempt_notifier_unregister - no longer interested in preemption notifications
Randy Dunlap421cee22007-07-31 00:37:50 -07002761 * @notifier: notifier struct to unregister
Avi Kivitye107be32007-07-26 13:40:43 +02002762 *
2763 * This is safe to call from within a preemption notifier.
2764 */
2765void preempt_notifier_unregister(struct preempt_notifier *notifier)
2766{
2767 hlist_del(&notifier->link);
2768}
2769EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2770
2771static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2772{
2773 struct preempt_notifier *notifier;
2774 struct hlist_node *node;
2775
2776 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2777 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2778}
2779
2780static void
2781fire_sched_out_preempt_notifiers(struct task_struct *curr,
2782 struct task_struct *next)
2783{
2784 struct preempt_notifier *notifier;
2785 struct hlist_node *node;
2786
2787 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2788 notifier->ops->sched_out(notifier, next);
2789}
2790
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002791#else /* !CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002792
2793static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2794{
2795}
2796
2797static void
2798fire_sched_out_preempt_notifiers(struct task_struct *curr,
2799 struct task_struct *next)
2800{
2801}
2802
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02002803#endif /* CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02002804
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805/**
Nick Piggin4866cde2005-06-25 14:57:23 -07002806 * prepare_task_switch - prepare to switch tasks
2807 * @rq: the runqueue preparing to switch
Randy Dunlap421cee22007-07-31 00:37:50 -07002808 * @prev: the current task that is being switched out
Nick Piggin4866cde2005-06-25 14:57:23 -07002809 * @next: the task we are going to switch to.
2810 *
2811 * This is called with the rq lock held and interrupts off. It must
2812 * be paired with a subsequent finish_task_switch after the context
2813 * switch.
2814 *
2815 * prepare_task_switch sets up locking and calls architecture specific
2816 * hooks.
2817 */
Avi Kivitye107be32007-07-26 13:40:43 +02002818static inline void
2819prepare_task_switch(struct rq *rq, struct task_struct *prev,
2820 struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -07002821{
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002822 sched_info_switch(prev, next);
2823 perf_event_task_sched_out(prev, next);
Avi Kivitye107be32007-07-26 13:40:43 +02002824 fire_sched_out_preempt_notifiers(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07002825 prepare_lock_switch(rq, next);
2826 prepare_arch_switch(next);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002827 trace_sched_switch(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07002828}
2829
2830/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 * finish_task_switch - clean up after a task-switch
Jeff Garzik344baba2005-09-07 01:15:17 -04002832 * @rq: runqueue associated with task-switch
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 * @prev: the thread we just switched away from.
2834 *
Nick Piggin4866cde2005-06-25 14:57:23 -07002835 * finish_task_switch must be called after the context switch, paired
2836 * with a prepare_task_switch call before the context switch.
2837 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2838 * and do any other architecture-specific cleanup actions.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 *
2840 * Note that we may have delayed dropping an mm in context_switch(). If
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01002841 * so, we finish that here outside of the runqueue lock. (Doing it
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 * with the lock held can cause deadlocks; see schedule() for
2843 * details.)
2844 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02002845static void finish_task_switch(struct rq *rq, struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 __releases(rq->lock)
2847{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 struct mm_struct *mm = rq->prev_mm;
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002849 long prev_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850
2851 rq->prev_mm = NULL;
2852
2853 /*
2854 * A task struct has one reference for the use as "current".
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002855 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002856 * schedule one last time. The schedule call will never return, and
2857 * the scheduled task must drop that reference.
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002858 * The test for TASK_DEAD must occur while the runqueue locks are
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 * still held, otherwise prev could be scheduled on another cpu, die
2860 * there before we look at prev->state, and then the reference would
2861 * be dropped twice.
2862 * Manfred Spraul <manfred@colorfullife.com>
2863 */
Oleg Nesterov55a101f2006-09-29 02:01:10 -07002864 prev_state = prev->state;
Nick Piggin4866cde2005-06-25 14:57:23 -07002865 finish_arch_switch(prev);
Jamie Iles8381f652010-01-08 15:27:33 +00002866#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2867 local_irq_disable();
2868#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Peter Zijlstra49f47432009-12-27 11:51:52 +01002869 perf_event_task_sched_in(current);
Jamie Iles8381f652010-01-08 15:27:33 +00002870#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2871 local_irq_enable();
2872#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
Nick Piggin4866cde2005-06-25 14:57:23 -07002873 finish_lock_switch(rq, prev);
Steven Rostedte8fa1362008-01-25 21:08:05 +01002874
Avi Kivitye107be32007-07-26 13:40:43 +02002875 fire_sched_in_preempt_notifiers(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 if (mm)
2877 mmdrop(mm);
Oleg Nesterovc394cc92006-09-29 02:01:11 -07002878 if (unlikely(prev_state == TASK_DEAD)) {
bibo maoc6fd91f2006-03-26 01:38:20 -08002879 /*
2880 * Remove function-return probe instances associated with this
2881 * task and put them back on the free list.
Ingo Molnar9761eea2007-07-09 18:52:00 +02002882 */
bibo maoc6fd91f2006-03-26 01:38:20 -08002883 kprobe_flush_task(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 put_task_struct(prev);
bibo maoc6fd91f2006-03-26 01:38:20 -08002885 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886}
2887
Gregory Haskins3f029d32009-07-29 11:08:47 -04002888#ifdef CONFIG_SMP
2889
2890/* assumes rq->lock is held */
2891static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2892{
2893 if (prev->sched_class->pre_schedule)
2894 prev->sched_class->pre_schedule(rq, prev);
2895}
2896
2897/* rq->lock is NOT held, but preemption is disabled */
2898static inline void post_schedule(struct rq *rq)
2899{
2900 if (rq->post_schedule) {
2901 unsigned long flags;
2902
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002903 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04002904 if (rq->curr->sched_class->post_schedule)
2905 rq->curr->sched_class->post_schedule(rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002906 raw_spin_unlock_irqrestore(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04002907
2908 rq->post_schedule = 0;
2909 }
2910}
2911
2912#else
2913
2914static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2915{
2916}
2917
2918static inline void post_schedule(struct rq *rq)
2919{
2920}
2921
2922#endif
2923
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924/**
2925 * schedule_tail - first thing a freshly forked thread must call.
2926 * @prev: the thread we just switched away from.
2927 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07002928asmlinkage void schedule_tail(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 __releases(rq->lock)
2930{
Ingo Molnar70b97a72006-07-03 00:25:42 -07002931 struct rq *rq = this_rq();
2932
Nick Piggin4866cde2005-06-25 14:57:23 -07002933 finish_task_switch(rq, prev);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002934
Gregory Haskins3f029d32009-07-29 11:08:47 -04002935 /*
2936 * FIXME: do we need to worry about rq being invalidated by the
2937 * task_switch?
2938 */
2939 post_schedule(rq);
Steven Rostedtda19ab52009-07-29 00:21:22 -04002940
Nick Piggin4866cde2005-06-25 14:57:23 -07002941#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2942 /* In this case, finish_task_switch does not reenable preemption */
2943 preempt_enable();
2944#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 if (current->set_child_tid)
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002946 put_user(task_pid_vnr(current), current->set_child_tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947}
2948
2949/*
2950 * context_switch - switch to the new MM and the new
2951 * thread's register state.
2952 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002953static inline void
Ingo Molnar70b97a72006-07-03 00:25:42 -07002954context_switch(struct rq *rq, struct task_struct *prev,
Ingo Molnar36c8b582006-07-03 00:25:41 -07002955 struct task_struct *next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956{
Ingo Molnardd41f592007-07-09 18:51:59 +02002957 struct mm_struct *mm, *oldmm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958
Avi Kivitye107be32007-07-26 13:40:43 +02002959 prepare_task_switch(rq, prev, next);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002960
Ingo Molnardd41f592007-07-09 18:51:59 +02002961 mm = next->mm;
2962 oldmm = prev->active_mm;
Zachary Amsden9226d122007-02-13 13:26:21 +01002963 /*
2964 * For paravirt, this is coupled with an exit in switch_to to
2965 * combine the page table reload and the switch backend into
2966 * one hypercall.
2967 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -08002968 arch_start_context_switch(prev);
Zachary Amsden9226d122007-02-13 13:26:21 +01002969
Heiko Carstens31915ab2010-09-16 14:42:25 +02002970 if (!mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 next->active_mm = oldmm;
2972 atomic_inc(&oldmm->mm_count);
2973 enter_lazy_tlb(oldmm, next);
2974 } else
2975 switch_mm(oldmm, mm, next);
2976
Heiko Carstens31915ab2010-09-16 14:42:25 +02002977 if (!prev->mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 prev->active_mm = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 rq->prev_mm = oldmm;
2980 }
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07002981 /*
2982 * Since the runqueue lock will be released by the next
2983 * task (which is an invalid locking op but in the case
2984 * of the scheduler it's an obvious special-case), so we
2985 * do an early lockdep release here:
2986 */
2987#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07002988 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07002989#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990
2991 /* Here we just switch the register state and the stack. */
2992 switch_to(prev, next, prev);
2993
Ingo Molnardd41f592007-07-09 18:51:59 +02002994 barrier();
2995 /*
2996 * this_rq must be evaluated again because prev may have moved
2997 * CPUs since it called schedule(), thus the 'rq' on its stack
2998 * frame will be invalid.
2999 */
3000 finish_task_switch(this_rq(), prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001}
3002
3003/*
3004 * nr_running, nr_uninterruptible and nr_context_switches:
3005 *
3006 * externally visible scheduler statistics: current number of runnable
3007 * threads, current number of uninterruptible-sleeping threads, total
3008 * number of context switches performed since bootup.
3009 */
3010unsigned long nr_running(void)
3011{
3012 unsigned long i, sum = 0;
3013
3014 for_each_online_cpu(i)
3015 sum += cpu_rq(i)->nr_running;
3016
3017 return sum;
3018}
3019
3020unsigned long nr_uninterruptible(void)
3021{
3022 unsigned long i, sum = 0;
3023
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003024 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025 sum += cpu_rq(i)->nr_uninterruptible;
3026
3027 /*
3028 * Since we read the counters lockless, it might be slightly
3029 * inaccurate. Do not allow it to go below zero though:
3030 */
3031 if (unlikely((long)sum < 0))
3032 sum = 0;
3033
3034 return sum;
3035}
3036
3037unsigned long long nr_context_switches(void)
3038{
Steven Rostedtcc94abf2006-06-27 02:54:31 -07003039 int i;
3040 unsigned long long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003042 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043 sum += cpu_rq(i)->nr_switches;
3044
3045 return sum;
3046}
3047
3048unsigned long nr_iowait(void)
3049{
3050 unsigned long i, sum = 0;
3051
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003052 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 sum += atomic_read(&cpu_rq(i)->nr_iowait);
3054
3055 return sum;
3056}
3057
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02003058unsigned long nr_iowait_cpu(int cpu)
Arjan van de Ven69d25872009-09-21 17:04:08 -07003059{
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02003060 struct rq *this = cpu_rq(cpu);
Arjan van de Ven69d25872009-09-21 17:04:08 -07003061 return atomic_read(&this->nr_iowait);
3062}
3063
3064unsigned long this_cpu_load(void)
3065{
3066 struct rq *this = this_rq();
3067 return this->cpu_load[0];
3068}
3069
3070
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003071/* Variables and functions for calc_load */
3072static atomic_long_t calc_load_tasks;
3073static unsigned long calc_load_update;
3074unsigned long avenrun[3];
3075EXPORT_SYMBOL(avenrun);
3076
Peter Zijlstra74f51872010-04-22 21:50:19 +02003077static long calc_load_fold_active(struct rq *this_rq)
3078{
3079 long nr_active, delta = 0;
3080
3081 nr_active = this_rq->nr_running;
3082 nr_active += (long) this_rq->nr_uninterruptible;
3083
3084 if (nr_active != this_rq->calc_load_active) {
3085 delta = nr_active - this_rq->calc_load_active;
3086 this_rq->calc_load_active = nr_active;
3087 }
3088
3089 return delta;
3090}
3091
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003092static unsigned long
3093calc_load(unsigned long load, unsigned long exp, unsigned long active)
3094{
3095 load *= exp;
3096 load += active * (FIXED_1 - exp);
3097 load += 1UL << (FSHIFT - 1);
3098 return load >> FSHIFT;
3099}
3100
Peter Zijlstra74f51872010-04-22 21:50:19 +02003101#ifdef CONFIG_NO_HZ
3102/*
3103 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
3104 *
3105 * When making the ILB scale, we should try to pull this in as well.
3106 */
3107static atomic_long_t calc_load_tasks_idle;
3108
3109static void calc_load_account_idle(struct rq *this_rq)
3110{
3111 long delta;
3112
3113 delta = calc_load_fold_active(this_rq);
3114 if (delta)
3115 atomic_long_add(delta, &calc_load_tasks_idle);
3116}
3117
3118static long calc_load_fold_idle(void)
3119{
3120 long delta = 0;
3121
3122 /*
3123 * Its got a race, we don't care...
3124 */
3125 if (atomic_long_read(&calc_load_tasks_idle))
3126 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
3127
3128 return delta;
3129}
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003130
3131/**
3132 * fixed_power_int - compute: x^n, in O(log n) time
3133 *
3134 * @x: base of the power
3135 * @frac_bits: fractional bits of @x
3136 * @n: power to raise @x to.
3137 *
3138 * By exploiting the relation between the definition of the natural power
3139 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
3140 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
3141 * (where: n_i \elem {0, 1}, the binary vector representing n),
3142 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
3143 * of course trivially computable in O(log_2 n), the length of our binary
3144 * vector.
3145 */
3146static unsigned long
3147fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
3148{
3149 unsigned long result = 1UL << frac_bits;
3150
3151 if (n) for (;;) {
3152 if (n & 1) {
3153 result *= x;
3154 result += 1UL << (frac_bits - 1);
3155 result >>= frac_bits;
3156 }
3157 n >>= 1;
3158 if (!n)
3159 break;
3160 x *= x;
3161 x += 1UL << (frac_bits - 1);
3162 x >>= frac_bits;
3163 }
3164
3165 return result;
3166}
3167
3168/*
3169 * a1 = a0 * e + a * (1 - e)
3170 *
3171 * a2 = a1 * e + a * (1 - e)
3172 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
3173 * = a0 * e^2 + a * (1 - e) * (1 + e)
3174 *
3175 * a3 = a2 * e + a * (1 - e)
3176 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
3177 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
3178 *
3179 * ...
3180 *
3181 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
3182 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
3183 * = a0 * e^n + a * (1 - e^n)
3184 *
3185 * [1] application of the geometric series:
3186 *
3187 * n 1 - x^(n+1)
3188 * S_n := \Sum x^i = -------------
3189 * i=0 1 - x
3190 */
3191static unsigned long
3192calc_load_n(unsigned long load, unsigned long exp,
3193 unsigned long active, unsigned int n)
3194{
3195
3196 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
3197}
3198
3199/*
3200 * NO_HZ can leave us missing all per-cpu ticks calling
3201 * calc_load_account_active(), but since an idle CPU folds its delta into
3202 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
3203 * in the pending idle delta if our idle period crossed a load cycle boundary.
3204 *
3205 * Once we've updated the global active value, we need to apply the exponential
3206 * weights adjusted to the number of cycles missed.
3207 */
3208static void calc_global_nohz(unsigned long ticks)
3209{
3210 long delta, active, n;
3211
3212 if (time_before(jiffies, calc_load_update))
3213 return;
3214
3215 /*
3216 * If we crossed a calc_load_update boundary, make sure to fold
3217 * any pending idle changes, the respective CPUs might have
3218 * missed the tick driven calc_load_account_active() update
3219 * due to NO_HZ.
3220 */
3221 delta = calc_load_fold_idle();
3222 if (delta)
3223 atomic_long_add(delta, &calc_load_tasks);
3224
3225 /*
3226 * If we were idle for multiple load cycles, apply them.
3227 */
3228 if (ticks >= LOAD_FREQ) {
3229 n = ticks / LOAD_FREQ;
3230
3231 active = atomic_long_read(&calc_load_tasks);
3232 active = active > 0 ? active * FIXED_1 : 0;
3233
3234 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
3235 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
3236 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
3237
3238 calc_load_update += n * LOAD_FREQ;
3239 }
3240
3241 /*
3242 * Its possible the remainder of the above division also crosses
3243 * a LOAD_FREQ period, the regular check in calc_global_load()
3244 * which comes after this will take care of that.
3245 *
3246 * Consider us being 11 ticks before a cycle completion, and us
3247 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
3248 * age us 4 cycles, and the test in calc_global_load() will
3249 * pick up the final one.
3250 */
3251}
Peter Zijlstra74f51872010-04-22 21:50:19 +02003252#else
3253static void calc_load_account_idle(struct rq *this_rq)
3254{
3255}
3256
3257static inline long calc_load_fold_idle(void)
3258{
3259 return 0;
3260}
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003261
3262static void calc_global_nohz(unsigned long ticks)
3263{
3264}
Peter Zijlstra74f51872010-04-22 21:50:19 +02003265#endif
3266
Thomas Gleixner2d024942009-05-02 20:08:52 +02003267/**
3268 * get_avenrun - get the load average array
3269 * @loads: pointer to dest load array
3270 * @offset: offset to add
3271 * @shift: shift count to shift the result left
3272 *
3273 * These values are estimates at best, so no need for locking.
3274 */
3275void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
3276{
3277 loads[0] = (avenrun[0] + offset) << shift;
3278 loads[1] = (avenrun[1] + offset) << shift;
3279 loads[2] = (avenrun[2] + offset) << shift;
3280}
3281
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003282/*
3283 * calc_load - update the avenrun load estimates 10 ticks after the
3284 * CPUs have updated calc_load_tasks.
3285 */
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003286void calc_global_load(unsigned long ticks)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003287{
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003288 long active;
3289
Peter Zijlstra0f004f52010-11-30 19:48:45 +01003290 calc_global_nohz(ticks);
3291
3292 if (time_before(jiffies, calc_load_update + 10))
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003293 return;
3294
3295 active = atomic_long_read(&calc_load_tasks);
3296 active = active > 0 ? active * FIXED_1 : 0;
3297
3298 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
3299 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
3300 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
3301
3302 calc_load_update += LOAD_FREQ;
3303}
3304
3305/*
Peter Zijlstra74f51872010-04-22 21:50:19 +02003306 * Called from update_cpu_load() to periodically update this CPU's
3307 * active count.
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003308 */
3309static void calc_load_account_active(struct rq *this_rq)
3310{
Peter Zijlstra74f51872010-04-22 21:50:19 +02003311 long delta;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003312
Peter Zijlstra74f51872010-04-22 21:50:19 +02003313 if (time_before(jiffies, this_rq->calc_load_update))
3314 return;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003315
Peter Zijlstra74f51872010-04-22 21:50:19 +02003316 delta = calc_load_fold_active(this_rq);
3317 delta += calc_load_fold_idle();
3318 if (delta)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003319 atomic_long_add(delta, &calc_load_tasks);
Peter Zijlstra74f51872010-04-22 21:50:19 +02003320
3321 this_rq->calc_load_update += LOAD_FREQ;
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08003322}
3323
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324/*
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003325 * The exact cpuload at various idx values, calculated at every tick would be
3326 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
3327 *
3328 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
3329 * on nth tick when cpu may be busy, then we have:
3330 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3331 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
3332 *
3333 * decay_load_missed() below does efficient calculation of
3334 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3335 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
3336 *
3337 * The calculation is approximated on a 128 point scale.
3338 * degrade_zero_ticks is the number of ticks after which load at any
3339 * particular idx is approximated to be zero.
3340 * degrade_factor is a precomputed table, a row for each load idx.
3341 * Each column corresponds to degradation factor for a power of two ticks,
3342 * based on 128 point scale.
3343 * Example:
3344 * row 2, col 3 (=12) says that the degradation at load idx 2 after
3345 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
3346 *
3347 * With this power of 2 load factors, we can degrade the load n times
3348 * by looking at 1 bits in n and doing as many mult/shift instead of
3349 * n mult/shifts needed by the exact degradation.
3350 */
3351#define DEGRADE_SHIFT 7
3352static const unsigned char
3353 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
3354static const unsigned char
3355 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
3356 {0, 0, 0, 0, 0, 0, 0, 0},
3357 {64, 32, 8, 0, 0, 0, 0, 0},
3358 {96, 72, 40, 12, 1, 0, 0},
3359 {112, 98, 75, 43, 15, 1, 0},
3360 {120, 112, 98, 76, 45, 16, 2} };
3361
3362/*
3363 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
3364 * would be when CPU is idle and so we just decay the old load without
3365 * adding any new load.
3366 */
3367static unsigned long
3368decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
3369{
3370 int j = 0;
3371
3372 if (!missed_updates)
3373 return load;
3374
3375 if (missed_updates >= degrade_zero_ticks[idx])
3376 return 0;
3377
3378 if (idx == 1)
3379 return load >> missed_updates;
3380
3381 while (missed_updates) {
3382 if (missed_updates % 2)
3383 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
3384
3385 missed_updates >>= 1;
3386 j++;
3387 }
3388 return load;
3389}
3390
3391/*
Ingo Molnardd41f592007-07-09 18:51:59 +02003392 * Update rq->cpu_load[] statistics. This function is usually called every
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003393 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
3394 * every tick. We fix it up based on jiffies.
Ingo Molnar48f24c42006-07-03 00:25:40 -07003395 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003396static void update_cpu_load(struct rq *this_rq)
Ingo Molnar48f24c42006-07-03 00:25:40 -07003397{
Dmitry Adamushko495eca42007-10-15 17:00:06 +02003398 unsigned long this_load = this_rq->load.weight;
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003399 unsigned long curr_jiffies = jiffies;
3400 unsigned long pending_updates;
Ingo Molnardd41f592007-07-09 18:51:59 +02003401 int i, scale;
3402
3403 this_rq->nr_load_updates++;
Ingo Molnardd41f592007-07-09 18:51:59 +02003404
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003405 /* Avoid repeated calls on same jiffy, when moving in and out of idle */
3406 if (curr_jiffies == this_rq->last_load_update_tick)
3407 return;
3408
3409 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3410 this_rq->last_load_update_tick = curr_jiffies;
3411
Ingo Molnardd41f592007-07-09 18:51:59 +02003412 /* Update our load: */
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003413 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3414 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
Ingo Molnardd41f592007-07-09 18:51:59 +02003415 unsigned long old_load, new_load;
3416
3417 /* scale is effectively 1 << i now, and >> i divides by scale */
3418
3419 old_load = this_rq->cpu_load[i];
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003420 old_load = decay_load_missed(old_load, pending_updates - 1, i);
Ingo Molnardd41f592007-07-09 18:51:59 +02003421 new_load = this_load;
Ingo Molnara25707f2007-10-15 17:00:03 +02003422 /*
3423 * Round up the averaging division if load is increasing. This
3424 * prevents us from getting stuck on 9 if the load is 10, for
3425 * example.
3426 */
3427 if (new_load > old_load)
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003428 new_load += scale - 1;
3429
3430 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
Ingo Molnardd41f592007-07-09 18:51:59 +02003431 }
Suresh Siddhada2b71e2010-08-23 13:42:51 -07003432
3433 sched_avg_update(this_rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003434}
3435
3436static void update_cpu_load_active(struct rq *this_rq)
3437{
3438 update_cpu_load(this_rq);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02003439
Peter Zijlstra74f51872010-04-22 21:50:19 +02003440 calc_load_account_active(this_rq);
Ingo Molnar48f24c42006-07-03 00:25:40 -07003441}
3442
Ingo Molnardd41f592007-07-09 18:51:59 +02003443#ifdef CONFIG_SMP
3444
Ingo Molnar48f24c42006-07-03 00:25:40 -07003445/*
Peter Zijlstra38022902009-12-16 18:04:37 +01003446 * sched_exec - execve() is a valuable balancing opportunity, because at
3447 * this point the task has the smallest effective memory and cache footprint.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448 */
Peter Zijlstra38022902009-12-16 18:04:37 +01003449void sched_exec(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450{
Peter Zijlstra38022902009-12-16 18:04:37 +01003451 struct task_struct *p = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003453 struct rq *rq;
Peter Zijlstra0017d732010-03-24 18:34:10 +01003454 int dest_cpu;
Peter Zijlstra38022902009-12-16 18:04:37 +01003455
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456 rq = task_rq_lock(p, &flags);
Peter Zijlstra0017d732010-03-24 18:34:10 +01003457 dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
3458 if (dest_cpu == smp_processor_id())
3459 goto unlock;
Peter Zijlstra38022902009-12-16 18:04:37 +01003460
3461 /*
3462 * select_task_rq() can race against ->cpus_allowed
3463 */
Oleg Nesterov30da6882010-03-15 10:10:19 +01003464 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
Nikanth Karthikesanb7a2b392010-11-26 12:37:09 +05303465 likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) {
Tejun Heo969c7922010-05-06 18:49:21 +02003466 struct migration_arg arg = { p, dest_cpu };
Ingo Molnar36c8b582006-07-03 00:25:41 -07003467
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 task_rq_unlock(rq, &flags);
Tejun Heo969c7922010-05-06 18:49:21 +02003469 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 return;
3471 }
Peter Zijlstra0017d732010-03-24 18:34:10 +01003472unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 task_rq_unlock(rq, &flags);
3474}
3475
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476#endif
3477
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478DEFINE_PER_CPU(struct kernel_stat, kstat);
3479
3480EXPORT_PER_CPU_SYMBOL(kstat);
3481
3482/*
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003483 * Return any ns on the sched_clock that have not yet been accounted in
Frank Mayharf06febc2008-09-12 09:54:39 -07003484 * @p in case that task is currently running.
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003485 *
3486 * Called with task_rq_lock() held on @rq.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487 */
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003488static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3489{
3490 u64 ns = 0;
3491
3492 if (task_current(rq, p)) {
3493 update_rq_clock(rq);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07003494 ns = rq->clock_task - p->se.exec_start;
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003495 if ((s64)ns < 0)
3496 ns = 0;
3497 }
3498
3499 return ns;
3500}
3501
Frank Mayharbb34d922008-09-12 09:54:39 -07003502unsigned long long task_delta_exec(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 unsigned long flags;
Ingo Molnar41b86e92007-07-09 18:51:58 +02003505 struct rq *rq;
Frank Mayharbb34d922008-09-12 09:54:39 -07003506 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003507
Ingo Molnar41b86e92007-07-09 18:51:58 +02003508 rq = task_rq_lock(p, &flags);
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003509 ns = do_task_delta_exec(p, rq);
3510 task_rq_unlock(rq, &flags);
Ingo Molnar15084872008-09-30 08:28:17 +02003511
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003512 return ns;
3513}
Frank Mayharf06febc2008-09-12 09:54:39 -07003514
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003515/*
3516 * Return accounted runtime for the task.
3517 * In case the task is currently running, return the runtime plus current's
3518 * pending runtime that have not been accounted yet.
3519 */
3520unsigned long long task_sched_runtime(struct task_struct *p)
3521{
3522 unsigned long flags;
3523 struct rq *rq;
3524 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003525
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09003526 rq = task_rq_lock(p, &flags);
3527 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
3528 task_rq_unlock(rq, &flags);
3529
3530 return ns;
3531}
3532
3533/*
3534 * Return sum_exec_runtime for the thread group.
3535 * In case the task is currently running, return the sum plus current's
3536 * pending runtime that have not been accounted yet.
3537 *
3538 * Note that the thread group might have other running tasks as well,
3539 * so the return value not includes other pending runtime that other
3540 * running tasks might have.
3541 */
3542unsigned long long thread_group_sched_runtime(struct task_struct *p)
3543{
3544 struct task_cputime totals;
3545 unsigned long flags;
3546 struct rq *rq;
3547 u64 ns;
3548
3549 rq = task_rq_lock(p, &flags);
3550 thread_group_cputime(p, &totals);
3551 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552 task_rq_unlock(rq, &flags);
3553
3554 return ns;
3555}
3556
3557/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558 * Account user cpu time to a process.
3559 * @p: the process that the cpu time gets accounted to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560 * @cputime: the cpu time spent in user space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003561 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003563void account_user_time(struct task_struct *p, cputime_t cputime,
3564 cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565{
3566 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3567 cputime64_t tmp;
3568
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003569 /* Add user time to process. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003571 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003572 account_group_user_time(p, cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573
3574 /* Add user time to cpustat. */
3575 tmp = cputime_to_cputime64(cputime);
3576 if (TASK_NICE(p) > 0)
3577 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3578 else
3579 cpustat->user = cputime64_add(cpustat->user, tmp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05303580
3581 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
Jonathan Lim49b5cf32008-07-25 01:48:40 -07003582 /* Account for user time used */
3583 acct_update_integrals(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584}
3585
3586/*
Laurent Vivier94886b82007-10-15 17:00:19 +02003587 * Account guest cpu time to a process.
3588 * @p: the process that the cpu time gets accounted to
3589 * @cputime: the cpu time spent in virtual machine since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003590 * @cputime_scaled: cputime scaled by cpu frequency
Laurent Vivier94886b82007-10-15 17:00:19 +02003591 */
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003592static void account_guest_time(struct task_struct *p, cputime_t cputime,
3593 cputime_t cputime_scaled)
Laurent Vivier94886b82007-10-15 17:00:19 +02003594{
3595 cputime64_t tmp;
3596 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3597
3598 tmp = cputime_to_cputime64(cputime);
3599
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003600 /* Add guest time to process. */
Laurent Vivier94886b82007-10-15 17:00:19 +02003601 p->utime = cputime_add(p->utime, cputime);
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003602 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
Frank Mayharf06febc2008-09-12 09:54:39 -07003603 account_group_user_time(p, cputime);
Laurent Vivier94886b82007-10-15 17:00:19 +02003604 p->gtime = cputime_add(p->gtime, cputime);
3605
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003606 /* Add guest time to cpustat. */
Ryota Ozakice0e7b22009-10-24 01:20:10 +09003607 if (TASK_NICE(p) > 0) {
3608 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3609 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3610 } else {
3611 cpustat->user = cputime64_add(cpustat->user, tmp);
3612 cpustat->guest = cputime64_add(cpustat->guest, tmp);
3613 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003614}
3615
3616/*
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003617 * Account system cpu time to a process and desired cpustat field
3618 * @p: the process that the cpu time gets accounted to
3619 * @cputime: the cpu time spent in kernel space since the last update
3620 * @cputime_scaled: cputime scaled by cpu frequency
3621 * @target_cputime64: pointer to cpustat field that has to be updated
3622 */
3623static inline
3624void __account_system_time(struct task_struct *p, cputime_t cputime,
3625 cputime_t cputime_scaled, cputime64_t *target_cputime64)
3626{
3627 cputime64_t tmp = cputime_to_cputime64(cputime);
3628
3629 /* Add system time to process. */
3630 p->stime = cputime_add(p->stime, cputime);
3631 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
3632 account_group_system_time(p, cputime);
3633
3634 /* Add system time to cpustat. */
3635 *target_cputime64 = cputime64_add(*target_cputime64, tmp);
3636 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3637
3638 /* Account for system time used */
3639 acct_update_integrals(p);
3640}
3641
3642/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643 * Account system cpu time to a process.
3644 * @p: the process that the cpu time gets accounted to
3645 * @hardirq_offset: the offset to subtract from hardirq_count()
3646 * @cputime: the cpu time spent in kernel space since the last update
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003647 * @cputime_scaled: cputime scaled by cpu frequency
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 */
3649void account_system_time(struct task_struct *p, int hardirq_offset,
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003650 cputime_t cputime, cputime_t cputime_scaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651{
3652 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003653 cputime64_t *target_cputime64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003655 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
Martin Schwidefsky457533a2008-12-31 15:11:37 +01003656 account_guest_time(p, cputime, cputime_scaled);
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003657 return;
3658 }
Laurent Vivier94886b82007-10-15 17:00:19 +02003659
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660 if (hardirq_count() - hardirq_offset)
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003661 target_cputime64 = &cpustat->irq;
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -07003662 else if (in_serving_softirq())
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003663 target_cputime64 = &cpustat->softirq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 else
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003665 target_cputime64 = &cpustat->system;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003666
Venkatesh Pallipadi70a89a62010-12-21 17:09:02 -08003667 __account_system_time(p, cputime, cputime_scaled, target_cputime64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668}
3669
3670/*
3671 * Account for involuntary wait time.
Venkatesh Pallipadi544b4a12011-02-25 15:13:16 -08003672 * @cputime: the cpu time spent in involuntary wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003674void account_steal_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003676 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003677 cputime64_t cputime64 = cputime_to_cputime64(cputime);
3678
3679 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680}
3681
Christoph Lameter7835b982006-12-10 02:20:22 -08003682/*
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003683 * Account for idle time.
3684 * @cputime: the cpu time spent in idle wait
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003686void account_idle_time(cputime_t cputime)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687{
3688 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003689 cputime64_t cputime64 = cputime_to_cputime64(cputime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690 struct rq *rq = this_rq();
3691
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003692 if (atomic_read(&rq->nr_iowait) > 0)
3693 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
3694 else
3695 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
Christoph Lameter7835b982006-12-10 02:20:22 -08003696}
3697
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003698#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3699
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003700#ifdef CONFIG_IRQ_TIME_ACCOUNTING
3701/*
3702 * Account a tick to a process and cpustat
3703 * @p: the process that the cpu time gets accounted to
3704 * @user_tick: is the tick from userspace
3705 * @rq: the pointer to rq
3706 *
3707 * Tick demultiplexing follows the order
3708 * - pending hardirq update
3709 * - pending softirq update
3710 * - user_time
3711 * - idle_time
3712 * - system time
3713 * - check for guest_time
3714 * - else account as system_time
3715 *
3716 * Check for hardirq is done both for system and user time as there is
3717 * no timer going off while we are on hardirq and hence we may never get an
3718 * opportunity to update it solely in system time.
3719 * p->stime and friends are only updated on system time and not on irq
3720 * softirq as those do not count in task exec_runtime any more.
3721 */
3722static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3723 struct rq *rq)
3724{
3725 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3726 cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
3727 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3728
3729 if (irqtime_account_hi_update()) {
3730 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3731 } else if (irqtime_account_si_update()) {
3732 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
Venkatesh Pallipadi414bee92010-12-21 17:09:04 -08003733 } else if (this_cpu_ksoftirqd() == p) {
3734 /*
3735 * ksoftirqd time do not get accounted in cpu_softirq_time.
3736 * So, we have to handle it separately here.
3737 * Also, p->stime needs to be updated for ksoftirqd.
3738 */
3739 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3740 &cpustat->softirq);
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003741 } else if (user_tick) {
3742 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3743 } else if (p == rq->idle) {
3744 account_idle_time(cputime_one_jiffy);
3745 } else if (p->flags & PF_VCPU) { /* System time or guest time */
3746 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
3747 } else {
3748 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3749 &cpustat->system);
3750 }
3751}
3752
3753static void irqtime_account_idle_ticks(int ticks)
3754{
3755 int i;
3756 struct rq *rq = this_rq();
3757
3758 for (i = 0; i < ticks; i++)
3759 irqtime_account_process_tick(current, 0, rq);
3760}
Venkatesh Pallipadi544b4a12011-02-25 15:13:16 -08003761#else /* CONFIG_IRQ_TIME_ACCOUNTING */
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003762static void irqtime_account_idle_ticks(int ticks) {}
3763static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3764 struct rq *rq) {}
Venkatesh Pallipadi544b4a12011-02-25 15:13:16 -08003765#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003766
3767/*
3768 * Account a single tick of cpu time.
3769 * @p: the process that the cpu time gets accounted to
3770 * @user_tick: indicates if the tick is a user or a system tick
3771 */
3772void account_process_tick(struct task_struct *p, int user_tick)
3773{
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003774 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003775 struct rq *rq = this_rq();
3776
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003777 if (sched_clock_irqtime) {
3778 irqtime_account_process_tick(p, user_tick, rq);
3779 return;
3780 }
3781
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003782 if (user_tick)
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003783 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
Eric Dumazetf5f293a2009-04-29 14:44:49 +02003784 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003785 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003786 one_jiffy_scaled);
3787 else
Stanislaw Gruszkaa42548a2009-07-29 12:15:29 +02003788 account_idle_time(cputime_one_jiffy);
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003789}
3790
3791/*
3792 * Account multiple ticks of steal time.
3793 * @p: the process from which the cpu time has been stolen
3794 * @ticks: number of stolen ticks
3795 */
3796void account_steal_ticks(unsigned long ticks)
3797{
3798 account_steal_time(jiffies_to_cputime(ticks));
3799}
3800
3801/*
3802 * Account multiple ticks of idle time.
3803 * @ticks: number of stolen ticks
3804 */
3805void account_idle_ticks(unsigned long ticks)
3806{
Venkatesh Pallipadiabb74ce2010-12-21 17:09:03 -08003807
3808 if (sched_clock_irqtime) {
3809 irqtime_account_idle_ticks(ticks);
3810 return;
3811 }
3812
Martin Schwidefsky79741dd2008-12-31 15:11:38 +01003813 account_idle_time(jiffies_to_cputime(ticks));
3814}
3815
3816#endif
3817
Christoph Lameter7835b982006-12-10 02:20:22 -08003818/*
Balbir Singh49048622008-09-05 18:12:23 +02003819 * Use precise platform statistics if available:
3820 */
3821#ifdef CONFIG_VIRT_CPU_ACCOUNTING
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003822void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003823{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003824 *ut = p->utime;
3825 *st = p->stime;
Balbir Singh49048622008-09-05 18:12:23 +02003826}
3827
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003828void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003829{
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003830 struct task_cputime cputime;
3831
3832 thread_group_cputime(p, &cputime);
3833
3834 *ut = cputime.utime;
3835 *st = cputime.stime;
Balbir Singh49048622008-09-05 18:12:23 +02003836}
3837#else
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003838
3839#ifndef nsecs_to_cputime
Hidetoshi Setob7b20df92009-11-26 14:49:27 +09003840# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003841#endif
3842
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003843void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
Balbir Singh49048622008-09-05 18:12:23 +02003844{
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003845 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
Balbir Singh49048622008-09-05 18:12:23 +02003846
3847 /*
3848 * Use CFS's precise accounting:
3849 */
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003850 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
Balbir Singh49048622008-09-05 18:12:23 +02003851
3852 if (total) {
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003853 u64 temp = rtime;
Balbir Singh49048622008-09-05 18:12:23 +02003854
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003855 temp *= utime;
Balbir Singh49048622008-09-05 18:12:23 +02003856 do_div(temp, total);
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003857 utime = (cputime_t)temp;
3858 } else
3859 utime = rtime;
Balbir Singh49048622008-09-05 18:12:23 +02003860
3861 /*
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003862 * Compare with previous values, to keep monotonicity:
Balbir Singh49048622008-09-05 18:12:23 +02003863 */
Hidetoshi Seto761b1d22009-11-12 13:33:45 +09003864 p->prev_utime = max(p->prev_utime, utime);
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003865 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
Balbir Singh49048622008-09-05 18:12:23 +02003866
Hidetoshi Setod99ca3b2009-12-02 17:26:47 +09003867 *ut = p->prev_utime;
3868 *st = p->prev_stime;
Hidetoshi Setod180c5b2009-11-26 14:48:30 +09003869}
Balbir Singh49048622008-09-05 18:12:23 +02003870
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003871/*
3872 * Must be called with siglock held.
3873 */
3874void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3875{
3876 struct signal_struct *sig = p->signal;
3877 struct task_cputime cputime;
3878 cputime_t rtime, utime, total;
3879
3880 thread_group_cputime(p, &cputime);
3881
3882 total = cputime_add(cputime.utime, cputime.stime);
3883 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
3884
3885 if (total) {
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003886 u64 temp = rtime;
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003887
Stanislaw Gruszkae75e8632010-09-14 16:35:14 +02003888 temp *= cputime.utime;
Hidetoshi Seto0cf55e12009-12-02 17:28:07 +09003889 do_div(temp, total);
3890 utime = (cputime_t)temp;
3891 } else
3892 utime = rtime;
3893
3894 sig->prev_utime = max(sig->prev_utime, utime);
3895 sig->prev_stime = max(sig->prev_stime,
3896 cputime_sub(rtime, sig->prev_utime));
3897
3898 *ut = sig->prev_utime;
3899 *st = sig->prev_stime;
Balbir Singh49048622008-09-05 18:12:23 +02003900}
3901#endif
3902
Balbir Singh49048622008-09-05 18:12:23 +02003903/*
Christoph Lameter7835b982006-12-10 02:20:22 -08003904 * This function gets called by the timer code, with HZ frequency.
3905 * We call it with interrupts disabled.
3906 *
3907 * It also gets called by the fork code, when changing the parent's
3908 * timeslices.
3909 */
3910void scheduler_tick(void)
3911{
Christoph Lameter7835b982006-12-10 02:20:22 -08003912 int cpu = smp_processor_id();
3913 struct rq *rq = cpu_rq(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02003914 struct task_struct *curr = rq->curr;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02003915
3916 sched_clock_tick();
Christoph Lameter7835b982006-12-10 02:20:22 -08003917
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003918 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02003919 update_rq_clock(rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07003920 update_cpu_load_active(rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01003921 curr->sched_class->task_tick(rq, curr, 0);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01003922 raw_spin_unlock(&rq->lock);
Ingo Molnardd41f592007-07-09 18:51:59 +02003923
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02003924 perf_event_task_tick();
Peter Zijlstrae220d2d2009-05-23 18:28:55 +02003925
Christoph Lametere418e1c2006-12-10 02:20:23 -08003926#ifdef CONFIG_SMP
Ingo Molnardd41f592007-07-09 18:51:59 +02003927 rq->idle_at_tick = idle_cpu(cpu);
3928 trigger_load_balance(rq, cpu);
Christoph Lametere418e1c2006-12-10 02:20:23 -08003929#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930}
3931
Lai Jiangshan132380a2009-04-02 14:18:25 +08003932notrace unsigned long get_parent_ip(unsigned long addr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003933{
3934 if (in_lock_functions(addr)) {
3935 addr = CALLER_ADDR2;
3936 if (in_lock_functions(addr))
3937 addr = CALLER_ADDR3;
3938 }
3939 return addr;
3940}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941
Steven Rostedt7e49fcc2009-01-22 19:01:40 -05003942#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3943 defined(CONFIG_PREEMPT_TRACER))
3944
Srinivasa Ds43627582008-02-23 15:24:04 -08003945void __kprobes add_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003946{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003947#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948 /*
3949 * Underflow?
3950 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003951 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3952 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003953#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954 preempt_count() += val;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003955#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956 /*
3957 * Spinlock count overflowing soon?
3958 */
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08003959 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3960 PREEMPT_MASK - 10);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003961#endif
3962 if (preempt_count() == val)
3963 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964}
3965EXPORT_SYMBOL(add_preempt_count);
3966
Srinivasa Ds43627582008-02-23 15:24:04 -08003967void __kprobes sub_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003969#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970 /*
3971 * Underflow?
3972 */
Ingo Molnar01e3eb82009-01-12 13:00:50 +01003973 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003974 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975 /*
3976 * Is the spinlock portion underflowing?
3977 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003978 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3979 !(preempt_count() & PREEMPT_MASK)))
3980 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003981#endif
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07003982
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02003983 if (preempt_count() == val)
3984 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 preempt_count() -= val;
3986}
3987EXPORT_SYMBOL(sub_preempt_count);
3988
3989#endif
3990
3991/*
Ingo Molnardd41f592007-07-09 18:51:59 +02003992 * Print scheduling while atomic bug:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993 */
Ingo Molnardd41f592007-07-09 18:51:59 +02003994static noinline void __schedule_bug(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995{
Satyam Sharma838225b2007-10-24 18:23:50 +02003996 struct pt_regs *regs = get_irq_regs();
3997
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01003998 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3999 prev->comm, prev->pid, preempt_count());
Satyam Sharma838225b2007-10-24 18:23:50 +02004000
Ingo Molnardd41f592007-07-09 18:51:59 +02004001 debug_show_held_locks(prev);
Arjan van de Vene21f5b12008-05-23 09:05:58 -07004002 print_modules();
Ingo Molnardd41f592007-07-09 18:51:59 +02004003 if (irqs_disabled())
4004 print_irqtrace_events(prev);
Satyam Sharma838225b2007-10-24 18:23:50 +02004005
4006 if (regs)
4007 show_regs(regs);
4008 else
4009 dump_stack();
Ingo Molnardd41f592007-07-09 18:51:59 +02004010}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004011
Ingo Molnardd41f592007-07-09 18:51:59 +02004012/*
4013 * Various schedule()-time debugging checks and statistics:
4014 */
4015static inline void schedule_debug(struct task_struct *prev)
4016{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017 /*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004018 * Test if we are atomic. Since do_exit() needs to call into
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019 * schedule() atomically, we ignore that path for now.
4020 * Otherwise, whine if we are scheduling when we should not be.
4021 */
Roel Kluin3f33a7c2008-05-13 23:44:11 +02004022 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
Ingo Molnardd41f592007-07-09 18:51:59 +02004023 __schedule_bug(prev);
4024
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4026
Ingo Molnar2d723762007-10-15 17:00:12 +02004027 schedstat_inc(this_rq(), sched_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02004028#ifdef CONFIG_SCHEDSTATS
4029 if (unlikely(prev->lock_depth >= 0)) {
Yong Zhangfce20972011-01-14 15:57:39 +08004030 schedstat_inc(this_rq(), rq_sched_info.bkl_count);
Ingo Molnar2d723762007-10-15 17:00:12 +02004031 schedstat_inc(prev, sched_info.bkl_count);
Ingo Molnarb8efb562007-10-15 17:00:10 +02004032 }
4033#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02004034}
4035
Peter Zijlstra6cecd082009-11-30 13:00:37 +01004036static void put_prev_task(struct rq *rq, struct task_struct *prev)
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004037{
Mike Galbraitha64692a2010-03-11 17:16:20 +01004038 if (prev->se.on_rq)
4039 update_rq_clock(rq);
Peter Zijlstra6cecd082009-11-30 13:00:37 +01004040 prev->sched_class->put_prev_task(rq, prev);
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004041}
4042
Ingo Molnardd41f592007-07-09 18:51:59 +02004043/*
4044 * Pick up the highest-prio task:
4045 */
4046static inline struct task_struct *
Wang Chenb67802e2009-03-02 13:55:26 +08004047pick_next_task(struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02004048{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02004049 const struct sched_class *class;
Ingo Molnardd41f592007-07-09 18:51:59 +02004050 struct task_struct *p;
4051
4052 /*
4053 * Optimization: we know that if all tasks are in
4054 * the fair class we can call that function directly:
4055 */
4056 if (likely(rq->nr_running == rq->cfs.nr_running)) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02004057 p = fair_sched_class.pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004058 if (likely(p))
4059 return p;
4060 }
4061
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004062 for_each_class(class) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02004063 p = class->pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02004064 if (p)
4065 return p;
Ingo Molnardd41f592007-07-09 18:51:59 +02004066 }
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004067
4068 BUG(); /* the idle class will always have a runnable task */
Ingo Molnardd41f592007-07-09 18:51:59 +02004069}
4070
4071/*
4072 * schedule() is the main scheduler function.
4073 */
Peter Zijlstraff743342009-03-13 12:21:26 +01004074asmlinkage void __sched schedule(void)
Ingo Molnardd41f592007-07-09 18:51:59 +02004075{
4076 struct task_struct *prev, *next;
Harvey Harrison67ca7bd2008-02-15 09:56:36 -08004077 unsigned long *switch_count;
Ingo Molnardd41f592007-07-09 18:51:59 +02004078 struct rq *rq;
Peter Zijlstra31656512008-07-18 18:01:23 +02004079 int cpu;
Ingo Molnardd41f592007-07-09 18:51:59 +02004080
Peter Zijlstraff743342009-03-13 12:21:26 +01004081need_resched:
4082 preempt_disable();
Ingo Molnardd41f592007-07-09 18:51:59 +02004083 cpu = smp_processor_id();
4084 rq = cpu_rq(cpu);
Paul E. McKenney25502a62010-04-01 17:37:01 -07004085 rcu_note_context_switch(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02004086 prev = rq->curr;
Ingo Molnardd41f592007-07-09 18:51:59 +02004087
Ingo Molnardd41f592007-07-09 18:51:59 +02004088 schedule_debug(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089
Peter Zijlstra31656512008-07-18 18:01:23 +02004090 if (sched_feat(HRTICK))
Mike Galbraithf333fdc2008-05-12 21:20:55 +02004091 hrtick_clear(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004092
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004093 raw_spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094
Oleg Nesterov246d86b2010-05-19 14:57:11 +02004095 switch_count = &prev->nivcsw;
Ingo Molnardd41f592007-07-09 18:51:59 +02004096 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
Tejun Heo21aa9af2010-06-08 21:40:37 +02004097 if (unlikely(signal_pending_state(prev->state, prev))) {
Ingo Molnardd41f592007-07-09 18:51:59 +02004098 prev->state = TASK_RUNNING;
Tejun Heo21aa9af2010-06-08 21:40:37 +02004099 } else {
4100 /*
4101 * If a worker is going to sleep, notify and
4102 * ask workqueue whether it wants to wake up a
4103 * task to maintain concurrency. If so, wake
4104 * up the task.
4105 */
4106 if (prev->flags & PF_WQ_WORKER) {
4107 struct task_struct *to_wakeup;
4108
4109 to_wakeup = wq_worker_sleeping(prev, cpu);
4110 if (to_wakeup)
4111 try_to_wake_up_local(to_wakeup);
4112 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004113 deactivate_task(rq, prev, DEQUEUE_SLEEP);
Tejun Heo21aa9af2010-06-08 21:40:37 +02004114 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004115 switch_count = &prev->nvcsw;
4116 }
4117
Gregory Haskins3f029d32009-07-29 11:08:47 -04004118 pre_schedule(rq, prev);
Steven Rostedtf65eda42008-01-25 21:08:07 +01004119
Ingo Molnardd41f592007-07-09 18:51:59 +02004120 if (unlikely(!rq->nr_running))
4121 idle_balance(cpu, rq);
4122
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01004123 put_prev_task(rq, prev);
Wang Chenb67802e2009-03-02 13:55:26 +08004124 next = pick_next_task(rq);
Mike Galbraithf26f9af2010-12-08 11:05:42 +01004125 clear_tsk_need_resched(prev);
4126 rq->skip_clock_update = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128 if (likely(prev != next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129 rq->nr_switches++;
4130 rq->curr = next;
4131 ++*switch_count;
4132
Ingo Molnardd41f592007-07-09 18:51:59 +02004133 context_switch(rq, prev, next); /* unlocks the rq */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004134 /*
Oleg Nesterov246d86b2010-05-19 14:57:11 +02004135 * The context switch have flipped the stack from under us
4136 * and restored the local variables which were saved when
4137 * this task called schedule() in the past. prev == current
4138 * is still correct, but it can be moved to another cpu/rq.
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004139 */
4140 cpu = smp_processor_id();
4141 rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004143 raw_spin_unlock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144
Gregory Haskins3f029d32009-07-29 11:08:47 -04004145 post_schedule(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147 preempt_enable_no_resched();
Peter Zijlstraff743342009-03-13 12:21:26 +01004148 if (need_resched())
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149 goto need_resched;
4150}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151EXPORT_SYMBOL(schedule);
4152
Frederic Weisbeckerc08f7822009-12-02 20:49:17 +01004153#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004154/*
4155 * Look out! "owner" is an entirely speculative pointer
4156 * access and not reliable.
4157 */
4158int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
4159{
4160 unsigned int cpu;
4161 struct rq *rq;
4162
4163 if (!sched_feat(OWNER_SPIN))
4164 return 0;
4165
4166#ifdef CONFIG_DEBUG_PAGEALLOC
4167 /*
4168 * Need to access the cpu field knowing that
4169 * DEBUG_PAGEALLOC could have unmapped it if
4170 * the mutex owner just released it and exited.
4171 */
4172 if (probe_kernel_address(&owner->cpu, cpu))
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004173 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004174#else
4175 cpu = owner->cpu;
4176#endif
4177
4178 /*
4179 * Even if the access succeeded (likely case),
4180 * the cpu field may no longer be valid.
4181 */
4182 if (cpu >= nr_cpumask_bits)
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004183 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004184
4185 /*
4186 * We need to validate that we can do a
4187 * get_cpu() and that we have the percpu area.
4188 */
4189 if (!cpu_online(cpu))
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004190 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004191
4192 rq = cpu_rq(cpu);
4193
4194 for (;;) {
4195 /*
4196 * Owner changed, break to re-assess state.
4197 */
Tim Chen9d0f4dc2010-08-18 15:00:27 -07004198 if (lock->owner != owner) {
4199 /*
4200 * If the lock has switched to a different owner,
4201 * we likely have heavy contention. Return 0 to quit
4202 * optimistic spinning and not contend further:
4203 */
4204 if (lock->owner)
4205 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004206 break;
Tim Chen9d0f4dc2010-08-18 15:00:27 -07004207 }
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004208
4209 /*
4210 * Is that owner really running on that cpu?
4211 */
4212 if (task_thread_info(rq->curr) != owner || need_resched())
4213 return 0;
4214
Gerald Schaefer335d7af2010-11-22 15:47:36 +01004215 arch_mutex_cpu_relax();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004216 }
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02004217
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01004218 return 1;
4219}
4220#endif
4221
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222#ifdef CONFIG_PREEMPT
4223/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004224 * this is the entry point to schedule() from in-kernel preemption
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004225 * off of preempt_enable. Kernel preemptions off return from interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226 * occur there and call schedule directly.
4227 */
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004228asmlinkage void __sched notrace preempt_schedule(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229{
4230 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01004231
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232 /*
4233 * If there is a non-zero preempt_count or interrupts are disabled,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004234 * we do not want to preempt the current task. Just return..
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235 */
Nick Pigginbeed33a2006-10-11 01:21:52 -07004236 if (likely(ti->preempt_count || irqs_disabled()))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 return;
4238
Andi Kleen3a5c3592007-10-15 17:00:14 +02004239 do {
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004240 add_preempt_count_notrace(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004241 schedule();
Steven Rostedtd1f74e22010-06-02 21:52:29 -04004242 sub_preempt_count_notrace(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004243
4244 /*
4245 * Check again in case we missed a preemption opportunity
4246 * between schedule and now.
4247 */
4248 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08004249 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251EXPORT_SYMBOL(preempt_schedule);
4252
4253/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004254 * this is the entry point to schedule() from kernel preemption
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255 * off of irq context.
4256 * Note, that this is called and return with irqs disabled. This will
4257 * protect us against recursive calling from irq.
4258 */
4259asmlinkage void __sched preempt_schedule_irq(void)
4260{
4261 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01004262
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004263 /* Catch callers which need to be fixed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264 BUG_ON(ti->preempt_count || !irqs_disabled());
4265
Andi Kleen3a5c3592007-10-15 17:00:14 +02004266 do {
4267 add_preempt_count(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02004268 local_irq_enable();
4269 schedule();
4270 local_irq_disable();
Andi Kleen3a5c3592007-10-15 17:00:14 +02004271 sub_preempt_count(PREEMPT_ACTIVE);
4272
4273 /*
4274 * Check again in case we missed a preemption opportunity
4275 * between schedule and now.
4276 */
4277 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08004278 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279}
4280
4281#endif /* CONFIG_PREEMPT */
4282
Peter Zijlstra63859d42009-09-15 19:14:42 +02004283int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004284 void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285{
Peter Zijlstra63859d42009-09-15 19:14:42 +02004286 return try_to_wake_up(curr->private, mode, wake_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288EXPORT_SYMBOL(default_wake_function);
4289
4290/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004291 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
4292 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293 * number) then we wake all the non-exclusive tasks and one exclusive task.
4294 *
4295 * There are circumstances in which we can try to wake a task which has already
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004296 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4298 */
Johannes Weiner78ddb082009-04-14 16:53:05 +02004299static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
Peter Zijlstra63859d42009-09-15 19:14:42 +02004300 int nr_exclusive, int wake_flags, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301{
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004302 wait_queue_t *curr, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02004304 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
Ingo Molnar48f24c42006-07-03 00:25:40 -07004305 unsigned flags = curr->flags;
4306
Peter Zijlstra63859d42009-09-15 19:14:42 +02004307 if (curr->func(curr, mode, wake_flags, key) &&
Ingo Molnar48f24c42006-07-03 00:25:40 -07004308 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309 break;
4310 }
4311}
4312
4313/**
4314 * __wake_up - wake up threads blocked on a waitqueue.
4315 * @q: the waitqueue
4316 * @mode: which threads
4317 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Martin Waitz67be2dd2005-05-01 08:59:26 -07004318 * @key: is directly passed to the wakeup function
David Howells50fa6102009-04-28 15:01:38 +01004319 *
4320 * It may be assumed that this function implies a write memory barrier before
4321 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08004323void __wake_up(wait_queue_head_t *q, unsigned int mode,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004324 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325{
4326 unsigned long flags;
4327
4328 spin_lock_irqsave(&q->lock, flags);
4329 __wake_up_common(q, mode, nr_exclusive, 0, key);
4330 spin_unlock_irqrestore(&q->lock, flags);
4331}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332EXPORT_SYMBOL(__wake_up);
4333
4334/*
4335 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4336 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08004337void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338{
4339 __wake_up_common(q, mode, 1, 0, NULL);
4340}
Michal Nazarewicz22c43c82010-05-05 12:53:11 +02004341EXPORT_SYMBOL_GPL(__wake_up_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342
Davide Libenzi4ede8162009-03-31 15:24:20 -07004343void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4344{
4345 __wake_up_common(q, mode, 1, 0, key);
4346}
Trond Myklebustbf294b42011-02-21 11:05:41 -08004347EXPORT_SYMBOL_GPL(__wake_up_locked_key);
Davide Libenzi4ede8162009-03-31 15:24:20 -07004348
Linus Torvalds1da177e2005-04-16 15:20:36 -07004349/**
Davide Libenzi4ede8162009-03-31 15:24:20 -07004350 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351 * @q: the waitqueue
4352 * @mode: which threads
4353 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Davide Libenzi4ede8162009-03-31 15:24:20 -07004354 * @key: opaque value to be passed to wakeup targets
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355 *
4356 * The sync wakeup differs that the waker knows that it will schedule
4357 * away soon, so while the target thread will be woken up, it will not
4358 * be migrated to another CPU - ie. the two threads are 'synchronized'
4359 * with each other. This can prevent needless bouncing between CPUs.
4360 *
4361 * On UP it can prevent extra preemption.
David Howells50fa6102009-04-28 15:01:38 +01004362 *
4363 * It may be assumed that this function implies a write memory barrier before
4364 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004365 */
Davide Libenzi4ede8162009-03-31 15:24:20 -07004366void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4367 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368{
4369 unsigned long flags;
Peter Zijlstra7d478722009-09-14 19:55:44 +02004370 int wake_flags = WF_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371
4372 if (unlikely(!q))
4373 return;
4374
4375 if (unlikely(!nr_exclusive))
Peter Zijlstra7d478722009-09-14 19:55:44 +02004376 wake_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377
4378 spin_lock_irqsave(&q->lock, flags);
Peter Zijlstra7d478722009-09-14 19:55:44 +02004379 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380 spin_unlock_irqrestore(&q->lock, flags);
4381}
Davide Libenzi4ede8162009-03-31 15:24:20 -07004382EXPORT_SYMBOL_GPL(__wake_up_sync_key);
4383
4384/*
4385 * __wake_up_sync - see __wake_up_sync_key()
4386 */
4387void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4388{
4389 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
4390}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4392
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004393/**
4394 * complete: - signals a single thread waiting on this completion
4395 * @x: holds the state of this particular completion
4396 *
4397 * This will wake up a single thread waiting on this completion. Threads will be
4398 * awakened in the same order in which they were queued.
4399 *
4400 * See also complete_all(), wait_for_completion() and related routines.
David Howells50fa6102009-04-28 15:01:38 +01004401 *
4402 * It may be assumed that this function implies a write memory barrier before
4403 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004404 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004405void complete(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406{
4407 unsigned long flags;
4408
4409 spin_lock_irqsave(&x->wait.lock, flags);
4410 x->done++;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004411 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412 spin_unlock_irqrestore(&x->wait.lock, flags);
4413}
4414EXPORT_SYMBOL(complete);
4415
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004416/**
4417 * complete_all: - signals all threads waiting on this completion
4418 * @x: holds the state of this particular completion
4419 *
4420 * This will wake up all threads waiting on this particular completion event.
David Howells50fa6102009-04-28 15:01:38 +01004421 *
4422 * It may be assumed that this function implies a write memory barrier before
4423 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004424 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004425void complete_all(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426{
4427 unsigned long flags;
4428
4429 spin_lock_irqsave(&x->wait.lock, flags);
4430 x->done += UINT_MAX/2;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05004431 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432 spin_unlock_irqrestore(&x->wait.lock, flags);
4433}
4434EXPORT_SYMBOL(complete_all);
4435
Andi Kleen8cbbe862007-10-15 17:00:14 +02004436static inline long __sched
4437do_wait_for_common(struct completion *x, long timeout, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439 if (!x->done) {
4440 DECLARE_WAITQUEUE(wait, current);
4441
Changli Gaoa93d2f12010-05-07 14:33:26 +08004442 __add_wait_queue_tail_exclusive(&x->wait, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443 do {
Oleg Nesterov94d3d822008-08-20 16:54:41 -07004444 if (signal_pending_state(state, current)) {
Oleg Nesterovea71a542008-06-20 18:32:20 +04004445 timeout = -ERESTARTSYS;
4446 break;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004447 }
4448 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004450 timeout = schedule_timeout(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 spin_lock_irq(&x->wait.lock);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004452 } while (!x->done && timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453 __remove_wait_queue(&x->wait, &wait);
Oleg Nesterovea71a542008-06-20 18:32:20 +04004454 if (!x->done)
4455 return timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456 }
4457 x->done--;
Oleg Nesterovea71a542008-06-20 18:32:20 +04004458 return timeout ?: 1;
Andi Kleen8cbbe862007-10-15 17:00:14 +02004459}
4460
4461static long __sched
4462wait_for_common(struct completion *x, long timeout, int state)
4463{
4464 might_sleep();
4465
4466 spin_lock_irq(&x->wait.lock);
4467 timeout = do_wait_for_common(x, timeout, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02004469 return timeout;
4470}
4471
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004472/**
4473 * wait_for_completion: - waits for completion of a task
4474 * @x: holds the state of this particular completion
4475 *
4476 * This waits to be signaled for completion of a specific task. It is NOT
4477 * interruptible and there is no timeout.
4478 *
4479 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4480 * and interrupt capability. Also see complete().
4481 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004482void __sched wait_for_completion(struct completion *x)
Andi Kleen8cbbe862007-10-15 17:00:14 +02004483{
4484 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485}
4486EXPORT_SYMBOL(wait_for_completion);
4487
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004488/**
4489 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4490 * @x: holds the state of this particular completion
4491 * @timeout: timeout value in jiffies
4492 *
4493 * This waits for either a completion of a specific task to be signaled or for a
4494 * specified timeout to expire. The timeout is in jiffies. It is not
4495 * interruptible.
4496 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02004497unsigned long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4499{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004500 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501}
4502EXPORT_SYMBOL(wait_for_completion_timeout);
4503
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004504/**
4505 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4506 * @x: holds the state of this particular completion
4507 *
4508 * This waits for completion of a specific task to be signaled. It is
4509 * interruptible.
4510 */
Andi Kleen8cbbe862007-10-15 17:00:14 +02004511int __sched wait_for_completion_interruptible(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512{
Andi Kleen51e97992007-10-18 21:32:55 +02004513 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4514 if (t == -ERESTARTSYS)
4515 return t;
4516 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517}
4518EXPORT_SYMBOL(wait_for_completion_interruptible);
4519
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004520/**
4521 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4522 * @x: holds the state of this particular completion
4523 * @timeout: timeout value in jiffies
4524 *
4525 * This waits for either a completion of a specific task to be signaled or for a
4526 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4527 */
NeilBrown6bf41232011-01-05 12:50:16 +11004528long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529wait_for_completion_interruptible_timeout(struct completion *x,
4530 unsigned long timeout)
4531{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004532 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533}
4534EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4535
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02004536/**
4537 * wait_for_completion_killable: - waits for completion of a task (killable)
4538 * @x: holds the state of this particular completion
4539 *
4540 * This waits to be signaled for completion of a specific task. It can be
4541 * interrupted by a kill signal.
4542 */
Matthew Wilcox009e5772007-12-06 12:29:54 -05004543int __sched wait_for_completion_killable(struct completion *x)
4544{
4545 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4546 if (t == -ERESTARTSYS)
4547 return t;
4548 return 0;
4549}
4550EXPORT_SYMBOL(wait_for_completion_killable);
4551
Dave Chinnerbe4de352008-08-15 00:40:44 -07004552/**
Sage Weil0aa12fb2010-05-29 09:12:30 -07004553 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
4554 * @x: holds the state of this particular completion
4555 * @timeout: timeout value in jiffies
4556 *
4557 * This waits for either a completion of a specific task to be
4558 * signaled or for a specified timeout to expire. It can be
4559 * interrupted by a kill signal. The timeout is in jiffies.
4560 */
NeilBrown6bf41232011-01-05 12:50:16 +11004561long __sched
Sage Weil0aa12fb2010-05-29 09:12:30 -07004562wait_for_completion_killable_timeout(struct completion *x,
4563 unsigned long timeout)
4564{
4565 return wait_for_common(x, timeout, TASK_KILLABLE);
4566}
4567EXPORT_SYMBOL(wait_for_completion_killable_timeout);
4568
4569/**
Dave Chinnerbe4de352008-08-15 00:40:44 -07004570 * try_wait_for_completion - try to decrement a completion without blocking
4571 * @x: completion structure
4572 *
4573 * Returns: 0 if a decrement cannot be done without blocking
4574 * 1 if a decrement succeeded.
4575 *
4576 * If a completion is being used as a counting completion,
4577 * attempt to decrement the counter without blocking. This
4578 * enables us to avoid waiting if the resource the completion
4579 * is protecting is not available.
4580 */
4581bool try_wait_for_completion(struct completion *x)
4582{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004583 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004584 int ret = 1;
4585
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004586 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004587 if (!x->done)
4588 ret = 0;
4589 else
4590 x->done--;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004591 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004592 return ret;
4593}
4594EXPORT_SYMBOL(try_wait_for_completion);
4595
4596/**
4597 * completion_done - Test to see if a completion has any waiters
4598 * @x: completion structure
4599 *
4600 * Returns: 0 if there are waiters (wait_for_completion() in progress)
4601 * 1 if there are no waiters.
4602 *
4603 */
4604bool completion_done(struct completion *x)
4605{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004606 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07004607 int ret = 1;
4608
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004609 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004610 if (!x->done)
4611 ret = 0;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01004612 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07004613 return ret;
4614}
4615EXPORT_SYMBOL(completion_done);
4616
Andi Kleen8cbbe862007-10-15 17:00:14 +02004617static long __sched
4618sleep_on_common(wait_queue_head_t *q, int state, long timeout)
Ingo Molnar0fec1712007-07-09 18:52:01 +02004619{
4620 unsigned long flags;
4621 wait_queue_t wait;
4622
4623 init_waitqueue_entry(&wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624
Andi Kleen8cbbe862007-10-15 17:00:14 +02004625 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004626
Andi Kleen8cbbe862007-10-15 17:00:14 +02004627 spin_lock_irqsave(&q->lock, flags);
4628 __add_wait_queue(q, &wait);
4629 spin_unlock(&q->lock);
4630 timeout = schedule_timeout(timeout);
4631 spin_lock_irq(&q->lock);
4632 __remove_wait_queue(q, &wait);
4633 spin_unlock_irqrestore(&q->lock, flags);
4634
4635 return timeout;
4636}
4637
4638void __sched interruptible_sleep_on(wait_queue_head_t *q)
4639{
4640 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004642EXPORT_SYMBOL(interruptible_sleep_on);
4643
Ingo Molnar0fec1712007-07-09 18:52:01 +02004644long __sched
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004645interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004647 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4650
Ingo Molnar0fec1712007-07-09 18:52:01 +02004651void __sched sleep_on(wait_queue_head_t *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004653 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655EXPORT_SYMBOL(sleep_on);
4656
Ingo Molnar0fec1712007-07-09 18:52:01 +02004657long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658{
Andi Kleen8cbbe862007-10-15 17:00:14 +02004659 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661EXPORT_SYMBOL(sleep_on_timeout);
4662
Ingo Molnarb29739f2006-06-27 02:54:51 -07004663#ifdef CONFIG_RT_MUTEXES
4664
4665/*
4666 * rt_mutex_setprio - set the current priority of a task
4667 * @p: task
4668 * @prio: prio value (kernel-internal form)
4669 *
4670 * This function changes the 'effective' priority of a task. It does
4671 * not touch ->normal_prio like __setscheduler().
4672 *
4673 * Used by the rt_mutex code to implement priority inheritance logic.
4674 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004675void rt_mutex_setprio(struct task_struct *p, int prio)
Ingo Molnarb29739f2006-06-27 02:54:51 -07004676{
4677 unsigned long flags;
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004678 int oldprio, on_rq, running;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004679 struct rq *rq;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004680 const struct sched_class *prev_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004681
4682 BUG_ON(prio < 0 || prio > MAX_PRIO);
4683
4684 rq = task_rq_lock(p, &flags);
4685
Steven Rostedta8027072010-09-20 15:13:34 -04004686 trace_sched_pi_setprio(p, prio);
Andrew Mortond5f9f942007-05-08 20:27:06 -07004687 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004688 prev_class = p->sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02004689 on_rq = p->se.on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01004690 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004691 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004692 dequeue_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004693 if (running)
4694 p->sched_class->put_prev_task(rq, p);
Ingo Molnardd41f592007-07-09 18:51:59 +02004695
4696 if (rt_prio(prio))
4697 p->sched_class = &rt_sched_class;
4698 else
4699 p->sched_class = &fair_sched_class;
4700
Ingo Molnarb29739f2006-06-27 02:54:51 -07004701 p->prio = prio;
4702
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07004703 if (running)
4704 p->sched_class->set_curr_task(rq);
Peter Zijlstrada7a7352011-01-17 17:03:27 +01004705 if (on_rq)
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004706 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01004707
Peter Zijlstrada7a7352011-01-17 17:03:27 +01004708 check_class_changed(rq, p, prev_class, oldprio);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004709 task_rq_unlock(rq, &flags);
4710}
4711
4712#endif
4713
Ingo Molnar36c8b582006-07-03 00:25:41 -07004714void set_user_nice(struct task_struct *p, long nice)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715{
Ingo Molnardd41f592007-07-09 18:51:59 +02004716 int old_prio, delta, on_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004718 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719
4720 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4721 return;
4722 /*
4723 * We have to be careful, if called from sys_setpriority(),
4724 * the task might be in the middle of scheduling on another CPU.
4725 */
4726 rq = task_rq_lock(p, &flags);
4727 /*
4728 * The RT priorities are set via sched_setscheduler(), but we still
4729 * allow the 'normal' nice value to be set - but as expected
4730 * it wont have any effect on scheduling until the task is
Ingo Molnardd41f592007-07-09 18:51:59 +02004731 * SCHED_FIFO/SCHED_RR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732 */
Ingo Molnare05606d2007-07-09 18:51:59 +02004733 if (task_has_rt_policy(p)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734 p->static_prio = NICE_TO_PRIO(nice);
4735 goto out_unlock;
4736 }
Ingo Molnardd41f592007-07-09 18:51:59 +02004737 on_rq = p->se.on_rq;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02004738 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02004739 dequeue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741 p->static_prio = NICE_TO_PRIO(nice);
Peter Williams2dd73a42006-06-27 02:54:34 -07004742 set_load_weight(p);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004743 old_prio = p->prio;
4744 p->prio = effective_prio(p);
4745 delta = p->prio - old_prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746
Ingo Molnardd41f592007-07-09 18:51:59 +02004747 if (on_rq) {
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004748 enqueue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749 /*
Andrew Mortond5f9f942007-05-08 20:27:06 -07004750 * If the task increased its priority or is running and
4751 * lowered its priority, then reschedule its CPU:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752 */
Andrew Mortond5f9f942007-05-08 20:27:06 -07004753 if (delta < 0 || (delta > 0 && task_running(rq, p)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754 resched_task(rq->curr);
4755 }
4756out_unlock:
4757 task_rq_unlock(rq, &flags);
4758}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004759EXPORT_SYMBOL(set_user_nice);
4760
Matt Mackalle43379f2005-05-01 08:59:00 -07004761/*
4762 * can_nice - check if a task can reduce its nice value
4763 * @p: task
4764 * @nice: nice value
4765 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004766int can_nice(const struct task_struct *p, const int nice)
Matt Mackalle43379f2005-05-01 08:59:00 -07004767{
Matt Mackall024f4742005-08-18 11:24:19 -07004768 /* convert nice value [19,-20] to rlimit style value [1,40] */
4769 int nice_rlim = 20 - nice;
Ingo Molnar48f24c42006-07-03 00:25:40 -07004770
Jiri Slaby78d7d402010-03-05 13:42:54 -08004771 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
Matt Mackalle43379f2005-05-01 08:59:00 -07004772 capable(CAP_SYS_NICE));
4773}
4774
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775#ifdef __ARCH_WANT_SYS_NICE
4776
4777/*
4778 * sys_nice - change the priority of the current process.
4779 * @increment: priority increment
4780 *
4781 * sys_setpriority is a more generic, but much slower function that
4782 * does similar things.
4783 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004784SYSCALL_DEFINE1(nice, int, increment)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785{
Ingo Molnar48f24c42006-07-03 00:25:40 -07004786 long nice, retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004787
4788 /*
4789 * Setpriority might change our priority at the same moment.
4790 * We don't have to worry. Conceptually one call occurs first
4791 * and we have a single winner.
4792 */
Matt Mackalle43379f2005-05-01 08:59:00 -07004793 if (increment < -40)
4794 increment = -40;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795 if (increment > 40)
4796 increment = 40;
4797
Américo Wang2b8f8362009-02-16 18:54:21 +08004798 nice = TASK_NICE(current) + increment;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004799 if (nice < -20)
4800 nice = -20;
4801 if (nice > 19)
4802 nice = 19;
4803
Matt Mackalle43379f2005-05-01 08:59:00 -07004804 if (increment < 0 && !can_nice(current, nice))
4805 return -EPERM;
4806
Linus Torvalds1da177e2005-04-16 15:20:36 -07004807 retval = security_task_setnice(current, nice);
4808 if (retval)
4809 return retval;
4810
4811 set_user_nice(current, nice);
4812 return 0;
4813}
4814
4815#endif
4816
4817/**
4818 * task_prio - return the priority value of a given task.
4819 * @p: the task in question.
4820 *
4821 * This is the priority value as seen by users in /proc.
4822 * RT tasks are offset by -200. Normal tasks are centered
4823 * around 0, value goes from -16 to +15.
4824 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004825int task_prio(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004826{
4827 return p->prio - MAX_RT_PRIO;
4828}
4829
4830/**
4831 * task_nice - return the nice value of a given task.
4832 * @p: the task in question.
4833 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004834int task_nice(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004835{
4836 return TASK_NICE(p);
4837}
Pavel Roskin150d8be2008-03-05 16:56:37 -05004838EXPORT_SYMBOL(task_nice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004839
4840/**
4841 * idle_cpu - is a given cpu idle currently?
4842 * @cpu: the processor in question.
4843 */
4844int idle_cpu(int cpu)
4845{
4846 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
4847}
4848
Linus Torvalds1da177e2005-04-16 15:20:36 -07004849/**
4850 * idle_task - return the idle task for a given cpu.
4851 * @cpu: the processor in question.
4852 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07004853struct task_struct *idle_task(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004854{
4855 return cpu_rq(cpu)->idle;
4856}
4857
4858/**
4859 * find_process_by_pid - find a process with a matching PID value.
4860 * @pid: the pid in question.
4861 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02004862static struct task_struct *find_process_by_pid(pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863{
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07004864 return pid ? find_task_by_vpid(pid) : current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865}
4866
4867/* Actually do priority change: must hold rq lock. */
Ingo Molnardd41f592007-07-09 18:51:59 +02004868static void
4869__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004870{
Ingo Molnardd41f592007-07-09 18:51:59 +02004871 BUG_ON(p->se.on_rq);
Ingo Molnar48f24c42006-07-03 00:25:40 -07004872
Linus Torvalds1da177e2005-04-16 15:20:36 -07004873 p->policy = policy;
4874 p->rt_priority = prio;
Ingo Molnarb29739f2006-06-27 02:54:51 -07004875 p->normal_prio = normal_prio(p);
4876 /* we are holding p->pi_lock already */
4877 p->prio = rt_mutex_getprio(p);
Peter Zijlstraffd44db2009-11-10 20:12:01 +01004878 if (rt_prio(p->prio))
4879 p->sched_class = &rt_sched_class;
4880 else
4881 p->sched_class = &fair_sched_class;
Peter Williams2dd73a42006-06-27 02:54:34 -07004882 set_load_weight(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883}
4884
David Howellsc69e8d92008-11-14 10:39:19 +11004885/*
4886 * check the target process has a UID that matches the current process's
4887 */
4888static bool check_same_owner(struct task_struct *p)
4889{
4890 const struct cred *cred = current_cred(), *pcred;
4891 bool match;
4892
4893 rcu_read_lock();
4894 pcred = __task_cred(p);
Serge E. Hallynb0e77592011-03-23 16:43:24 -07004895 if (cred->user->user_ns == pcred->user->user_ns)
4896 match = (cred->euid == pcred->euid ||
4897 cred->euid == pcred->uid);
4898 else
4899 match = false;
David Howellsc69e8d92008-11-14 10:39:19 +11004900 rcu_read_unlock();
4901 return match;
4902}
4903
Rusty Russell961ccdd2008-06-23 13:55:38 +10004904static int __sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07004905 const struct sched_param *param, bool user)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004906{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004907 int retval, oldprio, oldpolicy = -1, on_rq, running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004908 unsigned long flags;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01004909 const struct sched_class *prev_class;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004910 struct rq *rq;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004911 int reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004912
Steven Rostedt66e53932006-06-27 02:54:44 -07004913 /* may grab non-irq protected spin_locks */
4914 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07004915recheck:
4916 /* double check policy once rq lock held */
Lennart Poetteringca94c442009-06-15 17:17:47 +02004917 if (policy < 0) {
4918 reset_on_fork = p->sched_reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004919 policy = oldpolicy = p->policy;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004920 } else {
4921 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
4922 policy &= ~SCHED_RESET_ON_FORK;
4923
4924 if (policy != SCHED_FIFO && policy != SCHED_RR &&
4925 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4926 policy != SCHED_IDLE)
4927 return -EINVAL;
4928 }
4929
Linus Torvalds1da177e2005-04-16 15:20:36 -07004930 /*
4931 * Valid priorities for SCHED_FIFO and SCHED_RR are
Ingo Molnardd41f592007-07-09 18:51:59 +02004932 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4933 * SCHED_BATCH and SCHED_IDLE is 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004934 */
4935 if (param->sched_priority < 0 ||
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07004936 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
Steven Rostedtd46523e2005-07-25 16:28:39 -04004937 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004938 return -EINVAL;
Ingo Molnare05606d2007-07-09 18:51:59 +02004939 if (rt_policy(policy) != (param->sched_priority != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004940 return -EINVAL;
4941
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004942 /*
4943 * Allow unprivileged RT tasks to decrease priority:
4944 */
Rusty Russell961ccdd2008-06-23 13:55:38 +10004945 if (user && !capable(CAP_SYS_NICE)) {
Ingo Molnare05606d2007-07-09 18:51:59 +02004946 if (rt_policy(policy)) {
Oleg Nesterova44702e2010-06-11 01:09:44 +02004947 unsigned long rlim_rtprio =
4948 task_rlimit(p, RLIMIT_RTPRIO);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07004949
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004950 /* can't set/change the rt policy */
4951 if (policy != p->policy && !rlim_rtprio)
4952 return -EPERM;
4953
4954 /* can't increase priority */
4955 if (param->sched_priority > p->rt_priority &&
4956 param->sched_priority > rlim_rtprio)
4957 return -EPERM;
4958 }
Darren Hartc02aa732011-02-17 15:37:07 -08004959
Ingo Molnardd41f592007-07-09 18:51:59 +02004960 /*
Darren Hartc02aa732011-02-17 15:37:07 -08004961 * Treat SCHED_IDLE as nice 20. Only allow a switch to
4962 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
Ingo Molnardd41f592007-07-09 18:51:59 +02004963 */
Darren Hartc02aa732011-02-17 15:37:07 -08004964 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
4965 if (!can_nice(p, TASK_NICE(p)))
4966 return -EPERM;
4967 }
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07004968
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004969 /* can't change other user's priorities */
David Howellsc69e8d92008-11-14 10:39:19 +11004970 if (!check_same_owner(p))
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004971 return -EPERM;
Lennart Poetteringca94c442009-06-15 17:17:47 +02004972
4973 /* Normal users shall not reset the sched_reset_on_fork flag */
4974 if (p->sched_reset_on_fork && !reset_on_fork)
4975 return -EPERM;
Olivier Croquette37e4ab32005-06-25 14:57:32 -07004976 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004977
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07004978 if (user) {
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09004979 retval = security_task_setscheduler(p);
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07004980 if (retval)
4981 return retval;
4982 }
4983
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984 /*
Ingo Molnarb29739f2006-06-27 02:54:51 -07004985 * make sure no PI-waiters arrive (or leave) while we are
4986 * changing the priority of the task:
4987 */
Thomas Gleixner1d615482009-11-17 14:54:03 +01004988 raw_spin_lock_irqsave(&p->pi_lock, flags);
Ingo Molnarb29739f2006-06-27 02:54:51 -07004989 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004990 * To be able to change p->policy safely, the apropriate
4991 * runqueue lock must be held.
4992 */
Ingo Molnarb29739f2006-06-27 02:54:51 -07004993 rq = __task_rq_lock(p);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02004994
Peter Zijlstra34f971f2010-09-22 13:53:15 +02004995 /*
4996 * Changing the policy of the stop threads its a very bad idea
4997 */
4998 if (p == rq->stop) {
4999 __task_rq_unlock(rq);
5000 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5001 return -EINVAL;
5002 }
5003
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02005004#ifdef CONFIG_RT_GROUP_SCHED
5005 if (user) {
5006 /*
5007 * Do not allow realtime tasks into groups that have no runtime
5008 * assigned.
5009 */
5010 if (rt_bandwidth_enabled() && rt_policy(policy) &&
Mike Galbraithf4493772011-01-13 04:54:50 +01005011 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
5012 !task_group_is_autogroup(task_group(p))) {
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02005013 __task_rq_unlock(rq);
5014 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5015 return -EPERM;
5016 }
5017 }
5018#endif
5019
Linus Torvalds1da177e2005-04-16 15:20:36 -07005020 /* recheck policy now with rq lock held */
5021 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
5022 policy = oldpolicy = -1;
Ingo Molnarb29739f2006-06-27 02:54:51 -07005023 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01005024 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005025 goto recheck;
5026 }
Ingo Molnardd41f592007-07-09 18:51:59 +02005027 on_rq = p->se.on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01005028 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005029 if (on_rq)
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005030 deactivate_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005031 if (running)
5032 p->sched_class->put_prev_task(rq, p);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02005033
Lennart Poetteringca94c442009-06-15 17:17:47 +02005034 p->sched_reset_on_fork = reset_on_fork;
5035
Linus Torvalds1da177e2005-04-16 15:20:36 -07005036 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01005037 prev_class = p->sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02005038 __setscheduler(rq, p, policy, param->sched_priority);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02005039
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07005040 if (running)
5041 p->sched_class->set_curr_task(rq);
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005042 if (on_rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02005043 activate_task(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005044
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005045 check_class_changed(rq, p, prev_class, oldprio);
Ingo Molnarb29739f2006-06-27 02:54:51 -07005046 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01005047 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Ingo Molnarb29739f2006-06-27 02:54:51 -07005048
Thomas Gleixner95e02ca2006-06-27 02:55:02 -07005049 rt_mutex_adjust_pi(p);
5050
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051 return 0;
5052}
Rusty Russell961ccdd2008-06-23 13:55:38 +10005053
5054/**
5055 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
5056 * @p: the task in question.
5057 * @policy: new policy.
5058 * @param: structure containing the new RT priority.
5059 *
5060 * NOTE that the task may be already dead.
5061 */
5062int sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07005063 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10005064{
5065 return __sched_setscheduler(p, policy, param, true);
5066}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067EXPORT_SYMBOL_GPL(sched_setscheduler);
5068
Rusty Russell961ccdd2008-06-23 13:55:38 +10005069/**
5070 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
5071 * @p: the task in question.
5072 * @policy: new policy.
5073 * @param: structure containing the new RT priority.
5074 *
5075 * Just like sched_setscheduler, only don't bother checking if the
5076 * current context has permission. For example, this is needed in
5077 * stop_machine(): we create temporary high priority worker threads,
5078 * but our caller might not have that capability.
5079 */
5080int sched_setscheduler_nocheck(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07005081 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10005082{
5083 return __sched_setscheduler(p, policy, param, false);
5084}
5085
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07005086static int
5087do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005088{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005089 struct sched_param lparam;
5090 struct task_struct *p;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005091 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005092
5093 if (!param || pid < 0)
5094 return -EINVAL;
5095 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
5096 return -EFAULT;
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07005097
5098 rcu_read_lock();
5099 retval = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100 p = find_process_by_pid(pid);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07005101 if (p != NULL)
5102 retval = sched_setscheduler(p, policy, &lparam);
5103 rcu_read_unlock();
Ingo Molnar36c8b582006-07-03 00:25:41 -07005104
Linus Torvalds1da177e2005-04-16 15:20:36 -07005105 return retval;
5106}
5107
5108/**
5109 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
5110 * @pid: the pid in question.
5111 * @policy: new policy.
5112 * @param: structure containing the new RT priority.
5113 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005114SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
5115 struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005116{
Jason Baronc21761f2006-01-18 17:43:03 -08005117 /* negative values for policy are not valid */
5118 if (policy < 0)
5119 return -EINVAL;
5120
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121 return do_sched_setscheduler(pid, policy, param);
5122}
5123
5124/**
5125 * sys_sched_setparam - set/change the RT priority of a thread
5126 * @pid: the pid in question.
5127 * @param: structure containing the new RT priority.
5128 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005129SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005130{
5131 return do_sched_setscheduler(pid, -1, param);
5132}
5133
5134/**
5135 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5136 * @pid: the pid in question.
5137 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005138SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005140 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005141 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005142
5143 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005144 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145
5146 retval = -ESRCH;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005147 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148 p = find_process_by_pid(pid);
5149 if (p) {
5150 retval = security_task_getscheduler(p);
5151 if (!retval)
Lennart Poetteringca94c442009-06-15 17:17:47 +02005152 retval = p->policy
5153 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154 }
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005155 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005156 return retval;
5157}
5158
5159/**
Lennart Poetteringca94c442009-06-15 17:17:47 +02005160 * sys_sched_getparam - get the RT priority of a thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161 * @pid: the pid in question.
5162 * @param: structure containing the RT priority.
5163 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005164SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005165{
5166 struct sched_param lp;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005167 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005168 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005169
5170 if (!param || pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005171 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005173 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174 p = find_process_by_pid(pid);
5175 retval = -ESRCH;
5176 if (!p)
5177 goto out_unlock;
5178
5179 retval = security_task_getscheduler(p);
5180 if (retval)
5181 goto out_unlock;
5182
5183 lp.sched_priority = p->rt_priority;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005184 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185
5186 /*
5187 * This one might sleep, we cannot do it with a spinlock held ...
5188 */
5189 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5190
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191 return retval;
5192
5193out_unlock:
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00005194 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005195 return retval;
5196}
5197
Rusty Russell96f874e2008-11-25 02:35:14 +10305198long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305200 cpumask_var_t cpus_allowed, new_mask;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005201 struct task_struct *p;
5202 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005204 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005205 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005206
5207 p = find_process_by_pid(pid);
5208 if (!p) {
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005209 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005210 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211 return -ESRCH;
5212 }
5213
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005214 /* Prevent p going away */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215 get_task_struct(p);
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005216 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005217
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305218 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5219 retval = -ENOMEM;
5220 goto out_put_task;
5221 }
5222 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5223 retval = -ENOMEM;
5224 goto out_free_cpus_allowed;
5225 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 retval = -EPERM;
Serge E. Hallynb0e77592011-03-23 16:43:24 -07005227 if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228 goto out_unlock;
5229
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09005230 retval = security_task_setscheduler(p);
David Quigleye7834f82006-06-23 02:03:59 -07005231 if (retval)
5232 goto out_unlock;
5233
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305234 cpuset_cpus_allowed(p, cpus_allowed);
5235 cpumask_and(new_mask, in_mask, cpus_allowed);
Peter Zijlstra49246272010-10-17 21:46:10 +02005236again:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305237 retval = set_cpus_allowed_ptr(p, new_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238
Paul Menage8707d8b2007-10-18 23:40:22 -07005239 if (!retval) {
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305240 cpuset_cpus_allowed(p, cpus_allowed);
5241 if (!cpumask_subset(new_mask, cpus_allowed)) {
Paul Menage8707d8b2007-10-18 23:40:22 -07005242 /*
5243 * We must have raced with a concurrent cpuset
5244 * update. Just reset the cpus_allowed to the
5245 * cpuset's cpus_allowed
5246 */
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305247 cpumask_copy(new_mask, cpus_allowed);
Paul Menage8707d8b2007-10-18 23:40:22 -07005248 goto again;
5249 }
5250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005251out_unlock:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305252 free_cpumask_var(new_mask);
5253out_free_cpus_allowed:
5254 free_cpumask_var(cpus_allowed);
5255out_put_task:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256 put_task_struct(p);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005257 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258 return retval;
5259}
5260
5261static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
Rusty Russell96f874e2008-11-25 02:35:14 +10305262 struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263{
Rusty Russell96f874e2008-11-25 02:35:14 +10305264 if (len < cpumask_size())
5265 cpumask_clear(new_mask);
5266 else if (len > cpumask_size())
5267 len = cpumask_size();
5268
Linus Torvalds1da177e2005-04-16 15:20:36 -07005269 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
5270}
5271
5272/**
5273 * sys_sched_setaffinity - set the cpu affinity of a process
5274 * @pid: pid of the process
5275 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5276 * @user_mask_ptr: user-space pointer to the new cpu mask
5277 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005278SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5279 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005280{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305281 cpumask_var_t new_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282 int retval;
5283
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305284 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5285 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286
Rusty Russell5a16f3d2008-11-25 02:35:11 +10305287 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5288 if (retval == 0)
5289 retval = sched_setaffinity(pid, new_mask);
5290 free_cpumask_var(new_mask);
5291 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005292}
5293
Rusty Russell96f874e2008-11-25 02:35:14 +10305294long sched_getaffinity(pid_t pid, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005295{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005296 struct task_struct *p;
Thomas Gleixner31605682009-12-08 20:24:16 +00005297 unsigned long flags;
5298 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005300
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005301 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005302 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005303
5304 retval = -ESRCH;
5305 p = find_process_by_pid(pid);
5306 if (!p)
5307 goto out_unlock;
5308
David Quigleye7834f82006-06-23 02:03:59 -07005309 retval = security_task_getscheduler(p);
5310 if (retval)
5311 goto out_unlock;
5312
Thomas Gleixner31605682009-12-08 20:24:16 +00005313 rq = task_rq_lock(p, &flags);
Rusty Russell96f874e2008-11-25 02:35:14 +10305314 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
Thomas Gleixner31605682009-12-08 20:24:16 +00005315 task_rq_unlock(rq, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005316
5317out_unlock:
Thomas Gleixner23f5d142009-12-09 10:15:01 +00005318 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01005319 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005320
Ulrich Drepper9531b622007-08-09 11:16:46 +02005321 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005322}
5323
5324/**
5325 * sys_sched_getaffinity - get the cpu affinity of a process
5326 * @pid: pid of the process
5327 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5328 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5329 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005330SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5331 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005332{
5333 int ret;
Rusty Russellf17c8602008-11-25 02:35:11 +10305334 cpumask_var_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005335
Anton Blanchard84fba5e2010-04-06 17:02:19 +10005336 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005337 return -EINVAL;
5338 if (len & (sizeof(unsigned long)-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005339 return -EINVAL;
5340
Rusty Russellf17c8602008-11-25 02:35:11 +10305341 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5342 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005343
Rusty Russellf17c8602008-11-25 02:35:11 +10305344 ret = sched_getaffinity(pid, mask);
5345 if (ret == 0) {
KOSAKI Motohiro8bc037f2010-03-17 09:36:58 +09005346 size_t retlen = min_t(size_t, len, cpumask_size());
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005347
5348 if (copy_to_user(user_mask_ptr, mask, retlen))
Rusty Russellf17c8602008-11-25 02:35:11 +10305349 ret = -EFAULT;
5350 else
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09005351 ret = retlen;
Rusty Russellf17c8602008-11-25 02:35:11 +10305352 }
5353 free_cpumask_var(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005354
Rusty Russellf17c8602008-11-25 02:35:11 +10305355 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005356}
5357
5358/**
5359 * sys_sched_yield - yield the current processor to other threads.
5360 *
Ingo Molnardd41f592007-07-09 18:51:59 +02005361 * This function yields the current CPU to other tasks. If there are no
5362 * other threads running on this CPU then this function will return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005363 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005364SYSCALL_DEFINE0(sched_yield)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005365{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005366 struct rq *rq = this_rq_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005367
Ingo Molnar2d723762007-10-15 17:00:12 +02005368 schedstat_inc(rq, yld_count);
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +02005369 current->sched_class->yield_task(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005370
5371 /*
5372 * Since we are going to call schedule() anyway, there's
5373 * no need to preempt or enable interrupts:
5374 */
5375 __release(rq->lock);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07005376 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +01005377 do_raw_spin_unlock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005378 preempt_enable_no_resched();
5379
5380 schedule();
5381
5382 return 0;
5383}
5384
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005385static inline int should_resched(void)
5386{
5387 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
5388}
5389
Andrew Mortone7b38402006-06-30 01:56:00 -07005390static void __cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005391{
Frederic Weisbeckere7aaaa62009-07-16 15:44:29 +02005392 add_preempt_count(PREEMPT_ACTIVE);
5393 schedule();
5394 sub_preempt_count(PREEMPT_ACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005395}
5396
Herbert Xu02b67cc32008-01-25 21:08:28 +01005397int __sched _cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005398{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005399 if (should_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005400 __cond_resched();
5401 return 1;
5402 }
5403 return 0;
5404}
Herbert Xu02b67cc32008-01-25 21:08:28 +01005405EXPORT_SYMBOL(_cond_resched);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005406
5407/*
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005408 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005409 * call schedule, and on return reacquire the lock.
5410 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005411 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
Linus Torvalds1da177e2005-04-16 15:20:36 -07005412 * operations here to prevent schedule() from being called twice (once via
5413 * spin_unlock(), once by hand).
5414 */
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005415int __cond_resched_lock(spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005417 int resched = should_resched();
Jan Kara6df3cec2005-06-13 15:52:32 -07005418 int ret = 0;
5419
Peter Zijlstraf607c662009-07-20 19:16:29 +02005420 lockdep_assert_held(lock);
5421
Nick Piggin95c354f2008-01-30 13:31:20 +01005422 if (spin_needbreak(lock) || resched) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423 spin_unlock(lock);
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005424 if (resched)
Nick Piggin95c354f2008-01-30 13:31:20 +01005425 __cond_resched();
5426 else
5427 cpu_relax();
Jan Kara6df3cec2005-06-13 15:52:32 -07005428 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005429 spin_lock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005430 }
Jan Kara6df3cec2005-06-13 15:52:32 -07005431 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005432}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005433EXPORT_SYMBOL(__cond_resched_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005434
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005435int __sched __cond_resched_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436{
5437 BUG_ON(!in_softirq());
5438
Peter Zijlstrad86ee482009-07-10 14:57:57 +02005439 if (should_resched()) {
Thomas Gleixner98d825672007-05-23 13:58:18 -07005440 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005441 __cond_resched();
5442 local_bh_disable();
5443 return 1;
5444 }
5445 return 0;
5446}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02005447EXPORT_SYMBOL(__cond_resched_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005448
Linus Torvalds1da177e2005-04-16 15:20:36 -07005449/**
5450 * yield - yield the current processor to other threads.
5451 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08005452 * This is a shortcut for kernel-space yielding - it marks the
Linus Torvalds1da177e2005-04-16 15:20:36 -07005453 * thread runnable and calls sys_sched_yield().
5454 */
5455void __sched yield(void)
5456{
5457 set_current_state(TASK_RUNNING);
5458 sys_sched_yield();
5459}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005460EXPORT_SYMBOL(yield);
5461
Mike Galbraithd95f4122011-02-01 09:50:51 -05005462/**
5463 * yield_to - yield the current processor to another thread in
5464 * your thread group, or accelerate that thread toward the
5465 * processor it's on.
5466 *
5467 * It's the caller's job to ensure that the target task struct
5468 * can't go away on us before we can do any checks.
5469 *
5470 * Returns true if we indeed boosted the target task.
5471 */
5472bool __sched yield_to(struct task_struct *p, bool preempt)
5473{
5474 struct task_struct *curr = current;
5475 struct rq *rq, *p_rq;
5476 unsigned long flags;
5477 bool yielded = 0;
5478
5479 local_irq_save(flags);
5480 rq = this_rq();
5481
5482again:
5483 p_rq = task_rq(p);
5484 double_rq_lock(rq, p_rq);
5485 while (task_rq(p) != p_rq) {
5486 double_rq_unlock(rq, p_rq);
5487 goto again;
5488 }
5489
5490 if (!curr->sched_class->yield_to_task)
5491 goto out;
5492
5493 if (curr->sched_class != p->sched_class)
5494 goto out;
5495
5496 if (task_running(p_rq, p) || p->state)
5497 goto out;
5498
5499 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
Venkatesh Pallipadi6d1cafd2011-03-01 16:28:21 -08005500 if (yielded) {
Mike Galbraithd95f4122011-02-01 09:50:51 -05005501 schedstat_inc(rq, yld_count);
Venkatesh Pallipadi6d1cafd2011-03-01 16:28:21 -08005502 /*
5503 * Make p's CPU reschedule; pick_next_entity takes care of
5504 * fairness.
5505 */
5506 if (preempt && rq != p_rq)
5507 resched_task(p_rq->curr);
5508 }
Mike Galbraithd95f4122011-02-01 09:50:51 -05005509
5510out:
5511 double_rq_unlock(rq, p_rq);
5512 local_irq_restore(flags);
5513
5514 if (yielded)
5515 schedule();
5516
5517 return yielded;
5518}
5519EXPORT_SYMBOL_GPL(yield_to);
5520
Linus Torvalds1da177e2005-04-16 15:20:36 -07005521/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005522 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
Linus Torvalds1da177e2005-04-16 15:20:36 -07005523 * that process accounting knows that this is a task in IO wait state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005524 */
5525void __sched io_schedule(void)
5526{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005527 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005528
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005529 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005530 atomic_inc(&rq->nr_iowait);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005531 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005532 schedule();
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005533 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005534 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005535 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005536}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005537EXPORT_SYMBOL(io_schedule);
5538
5539long __sched io_schedule_timeout(long timeout)
5540{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09005541 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005542 long ret;
5543
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005544 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005545 atomic_inc(&rq->nr_iowait);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005546 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005547 ret = schedule_timeout(timeout);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07005548 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005549 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07005550 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005551 return ret;
5552}
5553
5554/**
5555 * sys_sched_get_priority_max - return maximum RT priority.
5556 * @policy: scheduling class.
5557 *
5558 * this syscall returns the maximum rt_priority that can be used
5559 * by a given scheduling class.
5560 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005561SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005562{
5563 int ret = -EINVAL;
5564
5565 switch (policy) {
5566 case SCHED_FIFO:
5567 case SCHED_RR:
5568 ret = MAX_USER_RT_PRIO-1;
5569 break;
5570 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005571 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005572 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005573 ret = 0;
5574 break;
5575 }
5576 return ret;
5577}
5578
5579/**
5580 * sys_sched_get_priority_min - return minimum RT priority.
5581 * @policy: scheduling class.
5582 *
5583 * this syscall returns the minimum rt_priority that can be used
5584 * by a given scheduling class.
5585 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01005586SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005587{
5588 int ret = -EINVAL;
5589
5590 switch (policy) {
5591 case SCHED_FIFO:
5592 case SCHED_RR:
5593 ret = 1;
5594 break;
5595 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08005596 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02005597 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005598 ret = 0;
5599 }
5600 return ret;
5601}
5602
5603/**
5604 * sys_sched_rr_get_interval - return the default timeslice of a process.
5605 * @pid: pid of the process.
5606 * @interval: userspace pointer to the timeslice value.
5607 *
5608 * this syscall writes the default timeslice value of a given process
5609 * into the user-space timespec buffer. A value of '0' means infinity.
5610 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01005611SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
Heiko Carstens754fe8d2009-01-14 14:14:09 +01005612 struct timespec __user *, interval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005613{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005614 struct task_struct *p;
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005615 unsigned int time_slice;
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005616 unsigned long flags;
5617 struct rq *rq;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005618 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005619 struct timespec t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005620
5621 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02005622 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005623
5624 retval = -ESRCH;
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005625 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005626 p = find_process_by_pid(pid);
5627 if (!p)
5628 goto out_unlock;
5629
5630 retval = security_task_getscheduler(p);
5631 if (retval)
5632 goto out_unlock;
5633
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01005634 rq = task_rq_lock(p, &flags);
5635 time_slice = p->sched_class->get_rr_interval(rq, p);
5636 task_rq_unlock(rq, &flags);
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005637
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005638 rcu_read_unlock();
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02005639 jiffies_to_timespec(time_slice, &t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005641 return retval;
Andi Kleen3a5c3592007-10-15 17:00:14 +02005642
Linus Torvalds1da177e2005-04-16 15:20:36 -07005643out_unlock:
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00005644 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005645 return retval;
5646}
5647
Steven Rostedt7c731e02008-05-12 21:20:41 +02005648static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005649
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005650void sched_show_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005651{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005652 unsigned long free = 0;
Ingo Molnar36c8b582006-07-03 00:25:41 -07005653 unsigned state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005654
Linus Torvalds1da177e2005-04-16 15:20:36 -07005655 state = p->state ? __ffs(p->state) + 1 : 0;
Erik Gilling28d06862010-11-19 18:08:51 -08005656 printk(KERN_INFO "%-15.15s %c", p->comm,
Andreas Mohr2ed6e342006-07-10 04:43:52 -07005657 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
Ingo Molnar4bd77322007-07-11 21:21:47 +02005658#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07005659 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005660 printk(KERN_CONT " running ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005661 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005662 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005663#else
5664 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005665 printk(KERN_CONT " running task ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005666 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005667 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005668#endif
5669#ifdef CONFIG_DEBUG_STACK_USAGE
Eric Sandeen7c9f8862008-04-22 16:38:23 -05005670 free = stack_not_used(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671#endif
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005672 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
David Rientjesaa47b7e2009-05-04 01:38:05 -07005673 task_pid_nr(p), task_pid_nr(p->real_parent),
5674 (unsigned long)task_thread_info(p)->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005675
Nick Piggin5fb5e6d2008-01-25 21:08:34 +01005676 show_stack(p, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005677}
5678
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005679void show_state_filter(unsigned long state_filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005680{
Ingo Molnar36c8b582006-07-03 00:25:41 -07005681 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005682
Ingo Molnar4bd77322007-07-11 21:21:47 +02005683#if BITS_PER_LONG == 32
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005684 printk(KERN_INFO
5685 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005686#else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005687 printk(KERN_INFO
5688 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005689#endif
5690 read_lock(&tasklist_lock);
5691 do_each_thread(g, p) {
5692 /*
5693 * reset the NMI-timeout, listing all files on a slow
5694 * console might take alot of time:
5695 */
5696 touch_nmi_watchdog();
Ingo Molnar39bc89f2007-04-25 20:50:03 -07005697 if (!state_filter || (p->state & state_filter))
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01005698 sched_show_task(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005699 } while_each_thread(g, p);
5700
Jeremy Fitzhardinge04c91672007-05-08 00:28:05 -07005701 touch_all_softlockup_watchdogs();
5702
Ingo Molnardd41f592007-07-09 18:51:59 +02005703#ifdef CONFIG_SCHED_DEBUG
5704 sysrq_sched_debug_show();
5705#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005706 read_unlock(&tasklist_lock);
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005707 /*
5708 * Only show locks if all tasks are dumped:
5709 */
Shmulik Ladkani93335a22009-11-25 15:23:41 +02005710 if (!state_filter)
Ingo Molnare59e2ae2006-12-06 20:35:59 -08005711 debug_show_all_locks();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005712}
5713
Ingo Molnar1df21052007-07-09 18:51:58 +02005714void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5715{
Ingo Molnardd41f592007-07-09 18:51:59 +02005716 idle->sched_class = &idle_sched_class;
Ingo Molnar1df21052007-07-09 18:51:58 +02005717}
5718
Ingo Molnarf340c0d2005-06-28 16:40:42 +02005719/**
5720 * init_idle - set up an idle thread for a given CPU
5721 * @idle: task in question
5722 * @cpu: cpu the idle task belongs to
5723 *
5724 * NOTE: this function does not set the idle thread's NEED_RESCHED
5725 * flag, to make booting more robust.
5726 */
Nick Piggin5c1e1762006-10-03 01:14:04 -07005727void __cpuinit init_idle(struct task_struct *idle, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005728{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005729 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005730 unsigned long flags;
5731
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005732 raw_spin_lock_irqsave(&rq->lock, flags);
Ingo Molnar5cbd54e2008-11-12 20:05:50 +01005733
Ingo Molnardd41f592007-07-09 18:51:59 +02005734 __sched_fork(idle);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01005735 idle->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02005736 idle->se.exec_start = sched_clock();
5737
Rusty Russell96f874e2008-11-25 02:35:14 +10305738 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02005739 /*
5740 * We're having a chicken and egg problem, even though we are
5741 * holding rq->lock, the cpu isn't yet set to this cpu so the
5742 * lockdep check in task_group() will fail.
5743 *
5744 * Similar case to sched_fork(). / Alternatively we could
5745 * use task_rq_lock() here and obtain the other rq->lock.
5746 *
5747 * Silence PROVE_RCU
5748 */
5749 rcu_read_lock();
Ingo Molnardd41f592007-07-09 18:51:59 +02005750 __set_task_cpu(idle, cpu);
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02005751 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005752
Linus Torvalds1da177e2005-04-16 15:20:36 -07005753 rq->curr = rq->idle = idle;
Nick Piggin4866cde2005-06-25 14:57:23 -07005754#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5755 idle->oncpu = 1;
5756#endif
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005757 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005758
5759 /* Set the preempt count _outside_ the spinlocks! */
Linus Torvalds8e3e0762008-05-10 20:58:02 -07005760#if defined(CONFIG_PREEMPT)
5761 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
5762#else
Al Viroa1261f52005-11-13 16:06:55 -08005763 task_thread_info(idle)->preempt_count = 0;
Linus Torvalds8e3e0762008-05-10 20:58:02 -07005764#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02005765 /*
5766 * The idle tasks have their own, simple scheduling class:
5767 */
5768 idle->sched_class = &idle_sched_class;
Steven Rostedt868baf02011-02-10 21:26:13 -05005769 ftrace_graph_init_idle_task(idle, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005770}
5771
5772/*
5773 * In a system that switches off the HZ timer nohz_cpu_mask
5774 * indicates which cpus entered this state. This is used
5775 * in the rcu update to wait only for active cpus. For system
5776 * which do not switch off the HZ timer nohz_cpu_mask should
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305777 * always be CPU_BITS_NONE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005778 */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10305779cpumask_var_t nohz_cpu_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005780
Ingo Molnar19978ca2007-11-09 22:39:38 +01005781/*
5782 * Increase the granularity value when there are more CPUs,
5783 * because with more CPUs the 'effective latency' as visible
5784 * to users decreases. But the relationship is not linear,
5785 * so pick a second-best guess by going with the log2 of the
5786 * number of CPUs.
5787 *
5788 * This idea comes from the SD scheduler of Con Kolivas:
5789 */
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01005790static int get_update_sysctl_factor(void)
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005791{
Mike Galbraith4ca3ef72009-12-10 09:25:53 +01005792 unsigned int cpus = min_t(int, num_online_cpus(), 8);
Christian Ehrhardt1983a922009-11-30 12:16:47 +01005793 unsigned int factor;
5794
5795 switch (sysctl_sched_tunable_scaling) {
5796 case SCHED_TUNABLESCALING_NONE:
5797 factor = 1;
5798 break;
5799 case SCHED_TUNABLESCALING_LINEAR:
5800 factor = cpus;
5801 break;
5802 case SCHED_TUNABLESCALING_LOG:
5803 default:
5804 factor = 1 + ilog2(cpus);
5805 break;
5806 }
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005807
Christian Ehrhardtacb4a842009-11-30 12:16:48 +01005808 return factor;
5809}
5810
5811static void update_sysctl(void)
5812{
5813 unsigned int factor = get_update_sysctl_factor();
5814
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005815#define SET_SYSCTL(name) \
5816 (sysctl_##name = (factor) * normalized_sysctl_##name)
5817 SET_SYSCTL(sched_min_granularity);
5818 SET_SYSCTL(sched_latency);
5819 SET_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005820#undef SET_SYSCTL
5821}
5822
Ingo Molnar19978ca2007-11-09 22:39:38 +01005823static inline void sched_init_granularity(void)
5824{
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005825 update_sysctl();
Ingo Molnar19978ca2007-11-09 22:39:38 +01005826}
5827
Linus Torvalds1da177e2005-04-16 15:20:36 -07005828#ifdef CONFIG_SMP
5829/*
5830 * This is how migration works:
5831 *
Tejun Heo969c7922010-05-06 18:49:21 +02005832 * 1) we invoke migration_cpu_stop() on the target CPU using
5833 * stop_one_cpu().
5834 * 2) stopper starts to run (implicitly forcing the migrated thread
5835 * off the CPU)
5836 * 3) it checks whether the migrated task is still in the wrong runqueue.
5837 * 4) if it's in the wrong runqueue then the migration thread removes
Linus Torvalds1da177e2005-04-16 15:20:36 -07005838 * it and puts it into the right queue.
Tejun Heo969c7922010-05-06 18:49:21 +02005839 * 5) stopper completes and stop_one_cpu() returns and the migration
5840 * is done.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005841 */
5842
5843/*
5844 * Change a given task's CPU affinity. Migrate the thread to a
5845 * proper CPU and schedule it away if the CPU it's executing on
5846 * is removed from the allowed bitmask.
5847 *
5848 * NOTE: the caller must have a valid reference to the task, the
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005849 * task must not exit() & deallocate itself prematurely. The
Linus Torvalds1da177e2005-04-16 15:20:36 -07005850 * call is not atomic; no spinlocks may be held.
5851 */
Rusty Russell96f874e2008-11-25 02:35:14 +10305852int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005853{
5854 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07005855 struct rq *rq;
Tejun Heo969c7922010-05-06 18:49:21 +02005856 unsigned int dest_cpu;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005857 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005858
Peter Zijlstra65cc8e42010-03-25 21:05:16 +01005859 /*
5860 * Serialize against TASK_WAKING so that ttwu() and wunt() can
5861 * drop the rq->lock and still rely on ->cpus_allowed.
5862 */
5863again:
5864 while (task_is_waking(p))
5865 cpu_relax();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005866 rq = task_rq_lock(p, &flags);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +01005867 if (task_is_waking(p)) {
5868 task_rq_unlock(rq, &flags);
5869 goto again;
5870 }
Peter Zijlstrae2912002009-12-16 18:04:36 +01005871
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01005872 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005873 ret = -EINVAL;
5874 goto out;
5875 }
5876
David Rientjes9985b0b2008-06-05 12:57:11 -07005877 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
Rusty Russell96f874e2008-11-25 02:35:14 +10305878 !cpumask_equal(&p->cpus_allowed, new_mask))) {
David Rientjes9985b0b2008-06-05 12:57:11 -07005879 ret = -EINVAL;
5880 goto out;
5881 }
5882
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005883 if (p->sched_class->set_cpus_allowed)
Mike Traviscd8ba7c2008-03-26 14:23:49 -07005884 p->sched_class->set_cpus_allowed(p, new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005885 else {
Rusty Russell96f874e2008-11-25 02:35:14 +10305886 cpumask_copy(&p->cpus_allowed, new_mask);
5887 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01005888 }
5889
Linus Torvalds1da177e2005-04-16 15:20:36 -07005890 /* Can the task run on the task's current CPU? If so, we're done */
Rusty Russell96f874e2008-11-25 02:35:14 +10305891 if (cpumask_test_cpu(task_cpu(p), new_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005892 goto out;
5893
Tejun Heo969c7922010-05-06 18:49:21 +02005894 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
Nikanth Karthikesanb7a2b392010-11-26 12:37:09 +05305895 if (migrate_task(p, rq)) {
Tejun Heo969c7922010-05-06 18:49:21 +02005896 struct migration_arg arg = { p, dest_cpu };
Linus Torvalds1da177e2005-04-16 15:20:36 -07005897 /* Need help from migration thread: drop lock and wait. */
5898 task_rq_unlock(rq, &flags);
Tejun Heo969c7922010-05-06 18:49:21 +02005899 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005900 tlb_migrate_finish(p->mm);
5901 return 0;
5902 }
5903out:
5904 task_rq_unlock(rq, &flags);
Ingo Molnar48f24c42006-07-03 00:25:40 -07005905
Linus Torvalds1da177e2005-04-16 15:20:36 -07005906 return ret;
5907}
Mike Traviscd8ba7c2008-03-26 14:23:49 -07005908EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005909
5910/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01005911 * Move (not current) task off this cpu, onto dest cpu. We're doing
Linus Torvalds1da177e2005-04-16 15:20:36 -07005912 * this because either it can't run here any more (set_cpus_allowed()
5913 * away from this CPU, or CPU going down), or because we're
5914 * attempting to rebalance this task on exec (sched_exec).
5915 *
5916 * So we race with normal scheduler movements, but that's OK, as long
5917 * as the task is no longer on this CPU.
Kirill Korotaevefc30812006-06-27 02:54:32 -07005918 *
5919 * Returns non-zero if task was successfully migrated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005920 */
Kirill Korotaevefc30812006-06-27 02:54:32 -07005921static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005922{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005923 struct rq *rq_dest, *rq_src;
Peter Zijlstrae2912002009-12-16 18:04:36 +01005924 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005925
Max Krasnyanskye761b772008-07-15 04:43:49 -07005926 if (unlikely(!cpu_active(dest_cpu)))
Kirill Korotaevefc30812006-06-27 02:54:32 -07005927 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005928
5929 rq_src = cpu_rq(src_cpu);
5930 rq_dest = cpu_rq(dest_cpu);
5931
5932 double_rq_lock(rq_src, rq_dest);
5933 /* Already moved. */
5934 if (task_cpu(p) != src_cpu)
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005935 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005936 /* Affinity changed (again). */
Rusty Russell96f874e2008-11-25 02:35:14 +10305937 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005938 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005939
Peter Zijlstrae2912002009-12-16 18:04:36 +01005940 /*
5941 * If we're not on a rq, the next wake-up will ensure we're
5942 * placed properly.
5943 */
5944 if (p->se.on_rq) {
Ingo Molnar2e1cb742007-08-09 11:16:49 +02005945 deactivate_task(rq_src, p, 0);
Peter Zijlstrae2912002009-12-16 18:04:36 +01005946 set_task_cpu(p, dest_cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02005947 activate_task(rq_dest, p, 0);
Peter Zijlstra15afe092008-09-20 23:38:02 +02005948 check_preempt_curr(rq_dest, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005949 }
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005950done:
Kirill Korotaevefc30812006-06-27 02:54:32 -07005951 ret = 1;
Linus Torvaldsb1e38732008-07-10 11:25:03 -07005952fail:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005953 double_rq_unlock(rq_src, rq_dest);
Kirill Korotaevefc30812006-06-27 02:54:32 -07005954 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005955}
5956
5957/*
Tejun Heo969c7922010-05-06 18:49:21 +02005958 * migration_cpu_stop - this will be executed by a highprio stopper thread
5959 * and performs thread migration by bumping thread off CPU then
5960 * 'pushing' onto another runqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005961 */
Tejun Heo969c7922010-05-06 18:49:21 +02005962static int migration_cpu_stop(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005963{
Tejun Heo969c7922010-05-06 18:49:21 +02005964 struct migration_arg *arg = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005965
Tejun Heo969c7922010-05-06 18:49:21 +02005966 /*
5967 * The original target cpu might have gone down and we might
5968 * be on another cpu but it doesn't matter.
5969 */
5970 local_irq_disable();
5971 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
5972 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005973 return 0;
5974}
5975
5976#ifdef CONFIG_HOTPLUG_CPU
Linus Torvalds1da177e2005-04-16 15:20:36 -07005977
Ingo Molnar48f24c42006-07-03 00:25:40 -07005978/*
5979 * Ensures that the idle task is using init_mm right before its cpu goes
Linus Torvalds1da177e2005-04-16 15:20:36 -07005980 * offline.
5981 */
5982void idle_task_exit(void)
5983{
5984 struct mm_struct *mm = current->active_mm;
5985
5986 BUG_ON(cpu_online(smp_processor_id()));
5987
5988 if (mm != &init_mm)
5989 switch_mm(mm, &init_mm, current);
5990 mmdrop(mm);
5991}
5992
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01005993/*
5994 * While a dead CPU has no uninterruptible tasks queued at this point,
5995 * it might still have a nonzero ->nr_uninterruptible counter, because
5996 * for performance reasons the counter is not stricly tracking tasks to
5997 * their home CPUs. So we just add the counter to another CPU's counter,
5998 * to keep the global sum constant after CPU-down:
5999 */
6000static void migrate_nr_uninterruptible(struct rq *rq_src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006001{
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006002 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006003
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006004 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
6005 rq_src->nr_uninterruptible = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006006}
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02006007
6008/*
6009 * remove the tasks which were accounted by rq from calc_load_tasks.
6010 */
6011static void calc_global_load_remove(struct rq *rq)
6012{
6013 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
Thomas Gleixnera468d382009-07-17 14:15:46 +02006014 rq->calc_load_active = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02006015}
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006016
6017/*
6018 * Migrate all tasks from the rq, sleeping tasks will be migrated by
6019 * try_to_wake_up()->select_task_rq().
6020 *
6021 * Called with rq->lock held even though we'er in stop_machine() and
6022 * there's no concurrency possible, we hold the required locks anyway
6023 * because of lock validation efforts.
6024 */
6025static void migrate_tasks(unsigned int dead_cpu)
6026{
6027 struct rq *rq = cpu_rq(dead_cpu);
6028 struct task_struct *next, *stop = rq->stop;
6029 int dest_cpu;
6030
6031 /*
6032 * Fudge the rq selection such that the below task selection loop
6033 * doesn't get stuck on the currently eligible stop task.
6034 *
6035 * We're currently inside stop_machine() and the rq is either stuck
6036 * in the stop_machine_cpu_stop() loop, or we're executing this code,
6037 * either way we should never end up calling schedule() until we're
6038 * done here.
6039 */
6040 rq->stop = NULL;
6041
6042 for ( ; ; ) {
6043 /*
6044 * There's this thread running, bail when that's the only
6045 * remaining thread.
6046 */
6047 if (rq->nr_running == 1)
6048 break;
6049
6050 next = pick_next_task(rq);
6051 BUG_ON(!next);
6052 next->sched_class->put_prev_task(rq, next);
6053
6054 /* Find suitable destination for @next, with force if needed. */
6055 dest_cpu = select_fallback_rq(dead_cpu, next);
6056 raw_spin_unlock(&rq->lock);
6057
6058 __migrate_task(next, dead_cpu, dest_cpu);
6059
6060 raw_spin_lock(&rq->lock);
6061 }
6062
6063 rq->stop = stop;
6064}
6065
Linus Torvalds1da177e2005-04-16 15:20:36 -07006066#endif /* CONFIG_HOTPLUG_CPU */
6067
Nick Piggine692ab52007-07-26 13:40:43 +02006068#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
6069
6070static struct ctl_table sd_ctl_dir[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02006071 {
6072 .procname = "sched_domain",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006073 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02006074 },
Eric W. Biederman56992302009-11-05 15:38:40 -08006075 {}
Nick Piggine692ab52007-07-26 13:40:43 +02006076};
6077
6078static struct ctl_table sd_ctl_root[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02006079 {
6080 .procname = "kernel",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006081 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02006082 .child = sd_ctl_dir,
6083 },
Eric W. Biederman56992302009-11-05 15:38:40 -08006084 {}
Nick Piggine692ab52007-07-26 13:40:43 +02006085};
6086
6087static struct ctl_table *sd_alloc_ctl_entry(int n)
6088{
6089 struct ctl_table *entry =
Milton Miller5cf9f062007-10-15 17:00:19 +02006090 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
Nick Piggine692ab52007-07-26 13:40:43 +02006091
Nick Piggine692ab52007-07-26 13:40:43 +02006092 return entry;
6093}
6094
Milton Miller6382bc92007-10-15 17:00:19 +02006095static void sd_free_ctl_entry(struct ctl_table **tablep)
6096{
Milton Millercd7900762007-10-17 16:55:11 +02006097 struct ctl_table *entry;
Milton Miller6382bc92007-10-15 17:00:19 +02006098
Milton Millercd7900762007-10-17 16:55:11 +02006099 /*
6100 * In the intermediate directories, both the child directory and
6101 * procname are dynamically allocated and could fail but the mode
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006102 * will always be set. In the lowest directory the names are
Milton Millercd7900762007-10-17 16:55:11 +02006103 * static strings and all have proc handlers.
6104 */
6105 for (entry = *tablep; entry->mode; entry++) {
Milton Miller6382bc92007-10-15 17:00:19 +02006106 if (entry->child)
6107 sd_free_ctl_entry(&entry->child);
Milton Millercd7900762007-10-17 16:55:11 +02006108 if (entry->proc_handler == NULL)
6109 kfree(entry->procname);
6110 }
Milton Miller6382bc92007-10-15 17:00:19 +02006111
6112 kfree(*tablep);
6113 *tablep = NULL;
6114}
6115
Nick Piggine692ab52007-07-26 13:40:43 +02006116static void
Alexey Dobriyane0361852007-08-09 11:16:46 +02006117set_table_entry(struct ctl_table *entry,
Nick Piggine692ab52007-07-26 13:40:43 +02006118 const char *procname, void *data, int maxlen,
6119 mode_t mode, proc_handler *proc_handler)
6120{
Nick Piggine692ab52007-07-26 13:40:43 +02006121 entry->procname = procname;
6122 entry->data = data;
6123 entry->maxlen = maxlen;
6124 entry->mode = mode;
6125 entry->proc_handler = proc_handler;
6126}
6127
6128static struct ctl_table *
6129sd_alloc_ctl_domain_table(struct sched_domain *sd)
6130{
Ingo Molnara5d8c342008-10-09 11:35:51 +02006131 struct ctl_table *table = sd_alloc_ctl_entry(13);
Nick Piggine692ab52007-07-26 13:40:43 +02006132
Milton Millerad1cdc12007-10-15 17:00:19 +02006133 if (table == NULL)
6134 return NULL;
6135
Alexey Dobriyane0361852007-08-09 11:16:46 +02006136 set_table_entry(&table[0], "min_interval", &sd->min_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02006137 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006138 set_table_entry(&table[1], "max_interval", &sd->max_interval,
Nick Piggine692ab52007-07-26 13:40:43 +02006139 sizeof(long), 0644, proc_doulongvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006140 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006141 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006142 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006143 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006144 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006145 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006146 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006147 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006148 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
Nick Piggine692ab52007-07-26 13:40:43 +02006149 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006150 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
Nick Piggine692ab52007-07-26 13:40:43 +02006151 sizeof(int), 0644, proc_dointvec_minmax);
Alexey Dobriyane0361852007-08-09 11:16:46 +02006152 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
Nick Piggine692ab52007-07-26 13:40:43 +02006153 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02006154 set_table_entry(&table[9], "cache_nice_tries",
Nick Piggine692ab52007-07-26 13:40:43 +02006155 &sd->cache_nice_tries,
6156 sizeof(int), 0644, proc_dointvec_minmax);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02006157 set_table_entry(&table[10], "flags", &sd->flags,
Nick Piggine692ab52007-07-26 13:40:43 +02006158 sizeof(int), 0644, proc_dointvec_minmax);
Ingo Molnara5d8c342008-10-09 11:35:51 +02006159 set_table_entry(&table[11], "name", sd->name,
6160 CORENAME_MAX_SIZE, 0444, proc_dostring);
6161 /* &table[12] is terminator */
Nick Piggine692ab52007-07-26 13:40:43 +02006162
6163 return table;
6164}
6165
Ingo Molnar9a4e7152007-11-28 15:52:56 +01006166static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
Nick Piggine692ab52007-07-26 13:40:43 +02006167{
6168 struct ctl_table *entry, *table;
6169 struct sched_domain *sd;
6170 int domain_num = 0, i;
6171 char buf[32];
6172
6173 for_each_domain(cpu, sd)
6174 domain_num++;
6175 entry = table = sd_alloc_ctl_entry(domain_num + 1);
Milton Millerad1cdc12007-10-15 17:00:19 +02006176 if (table == NULL)
6177 return NULL;
Nick Piggine692ab52007-07-26 13:40:43 +02006178
6179 i = 0;
6180 for_each_domain(cpu, sd) {
6181 snprintf(buf, 32, "domain%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02006182 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006183 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02006184 entry->child = sd_alloc_ctl_domain_table(sd);
6185 entry++;
6186 i++;
6187 }
6188 return table;
6189}
6190
6191static struct ctl_table_header *sd_sysctl_header;
Milton Miller6382bc92007-10-15 17:00:19 +02006192static void register_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02006193{
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006194 int i, cpu_num = num_possible_cpus();
Nick Piggine692ab52007-07-26 13:40:43 +02006195 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
6196 char buf[32];
6197
Milton Miller73785472007-10-24 18:23:48 +02006198 WARN_ON(sd_ctl_dir[0].child);
6199 sd_ctl_dir[0].child = entry;
6200
Milton Millerad1cdc12007-10-15 17:00:19 +02006201 if (entry == NULL)
6202 return;
6203
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006204 for_each_possible_cpu(i) {
Nick Piggine692ab52007-07-26 13:40:43 +02006205 snprintf(buf, 32, "cpu%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02006206 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02006207 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02006208 entry->child = sd_alloc_ctl_cpu_table(i);
Milton Miller97b6ea72007-10-15 17:00:19 +02006209 entry++;
Nick Piggine692ab52007-07-26 13:40:43 +02006210 }
Milton Miller73785472007-10-24 18:23:48 +02006211
6212 WARN_ON(sd_sysctl_header);
Nick Piggine692ab52007-07-26 13:40:43 +02006213 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
6214}
Milton Miller6382bc92007-10-15 17:00:19 +02006215
Milton Miller73785472007-10-24 18:23:48 +02006216/* may be called multiple times per register */
Milton Miller6382bc92007-10-15 17:00:19 +02006217static void unregister_sched_domain_sysctl(void)
6218{
Milton Miller73785472007-10-24 18:23:48 +02006219 if (sd_sysctl_header)
6220 unregister_sysctl_table(sd_sysctl_header);
Milton Miller6382bc92007-10-15 17:00:19 +02006221 sd_sysctl_header = NULL;
Milton Miller73785472007-10-24 18:23:48 +02006222 if (sd_ctl_dir[0].child)
6223 sd_free_ctl_entry(&sd_ctl_dir[0].child);
Milton Miller6382bc92007-10-15 17:00:19 +02006224}
Nick Piggine692ab52007-07-26 13:40:43 +02006225#else
Milton Miller6382bc92007-10-15 17:00:19 +02006226static void register_sched_domain_sysctl(void)
6227{
6228}
6229static void unregister_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02006230{
6231}
6232#endif
6233
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006234static void set_rq_online(struct rq *rq)
6235{
6236 if (!rq->online) {
6237 const struct sched_class *class;
6238
Rusty Russellc6c49272008-11-25 02:35:05 +10306239 cpumask_set_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006240 rq->online = 1;
6241
6242 for_each_class(class) {
6243 if (class->rq_online)
6244 class->rq_online(rq);
6245 }
6246 }
6247}
6248
6249static void set_rq_offline(struct rq *rq)
6250{
6251 if (rq->online) {
6252 const struct sched_class *class;
6253
6254 for_each_class(class) {
6255 if (class->rq_offline)
6256 class->rq_offline(rq);
6257 }
6258
Rusty Russellc6c49272008-11-25 02:35:05 +10306259 cpumask_clear_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006260 rq->online = 0;
6261 }
6262}
6263
Linus Torvalds1da177e2005-04-16 15:20:36 -07006264/*
6265 * migration_call - callback that gets triggered when a CPU is added.
6266 * Here we can start up the necessary migration thread for the new CPU.
6267 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07006268static int __cpuinit
6269migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006270{
Ingo Molnar48f24c42006-07-03 00:25:40 -07006271 int cpu = (long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006272 unsigned long flags;
Tejun Heo969c7922010-05-06 18:49:21 +02006273 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006274
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006275 switch (action & ~CPU_TASKS_FROZEN) {
Gautham R Shenoy5be93612007-05-09 02:34:04 -07006276
Linus Torvalds1da177e2005-04-16 15:20:36 -07006277 case CPU_UP_PREPARE:
Thomas Gleixnera468d382009-07-17 14:15:46 +02006278 rq->calc_load_update = calc_load_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006279 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006280
Linus Torvalds1da177e2005-04-16 15:20:36 -07006281 case CPU_ONLINE:
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006282 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006283 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006284 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306285 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006286
6287 set_rq_online(rq);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04006288 }
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006289 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006290 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006291
Linus Torvalds1da177e2005-04-16 15:20:36 -07006292#ifdef CONFIG_HOTPLUG_CPU
Gregory Haskins08f503b2008-03-10 17:59:11 -04006293 case CPU_DYING:
Gregory Haskins57d885f2008-01-25 21:08:18 +01006294 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006295 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006296 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306297 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006298 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006299 }
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006300 migrate_tasks(cpu);
6301 BUG_ON(rq->nr_running != 1); /* the migration thread */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006302 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01006303
6304 migrate_nr_uninterruptible(rq);
6305 calc_global_load_remove(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006306 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006307#endif
6308 }
6309 return NOTIFY_OK;
6310}
6311
Paul Mackerrasf38b0822009-06-02 21:05:16 +10006312/*
6313 * Register at high priority so that task migration (migrate_all_tasks)
6314 * happens before everything else. This has to be lower priority than
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006315 * the notifier in the perf_event subsystem, though.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006316 */
Chandra Seetharaman26c21432006-06-27 02:54:10 -07006317static struct notifier_block __cpuinitdata migration_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006318 .notifier_call = migration_call,
Tejun Heo50a323b2010-06-08 21:40:36 +02006319 .priority = CPU_PRI_MIGRATION,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006320};
6321
Tejun Heo3a101d02010-06-08 21:40:36 +02006322static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
6323 unsigned long action, void *hcpu)
6324{
6325 switch (action & ~CPU_TASKS_FROZEN) {
6326 case CPU_ONLINE:
6327 case CPU_DOWN_FAILED:
6328 set_cpu_active((long)hcpu, true);
6329 return NOTIFY_OK;
6330 default:
6331 return NOTIFY_DONE;
6332 }
6333}
6334
6335static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
6336 unsigned long action, void *hcpu)
6337{
6338 switch (action & ~CPU_TASKS_FROZEN) {
6339 case CPU_DOWN_PREPARE:
6340 set_cpu_active((long)hcpu, false);
6341 return NOTIFY_OK;
6342 default:
6343 return NOTIFY_DONE;
6344 }
6345}
6346
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006347static int __init migration_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006348{
6349 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -07006350 int err;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006351
Tejun Heo3a101d02010-06-08 21:40:36 +02006352 /* Initialize migration for the boot CPU */
Akinobu Mita07dccf32006-09-29 02:00:22 -07006353 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6354 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006355 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6356 register_cpu_notifier(&migration_notifier);
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006357
Tejun Heo3a101d02010-06-08 21:40:36 +02006358 /* Register cpu active notifiers */
6359 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
6360 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
6361
Thomas Gleixnera004cd42009-07-21 09:54:05 +02006362 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07006364early_initcall(migration_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006365#endif
6366
6367#ifdef CONFIG_SMP
Christoph Lameter476f3532007-05-06 14:48:58 -07006368
Ingo Molnar3e9830d2007-10-15 17:00:13 +02006369#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006370
Mike Travisf6630112009-11-17 18:22:15 -06006371static __read_mostly int sched_domain_debug_enabled;
6372
6373static int __init sched_domain_debug_setup(char *str)
6374{
6375 sched_domain_debug_enabled = 1;
6376
6377 return 0;
6378}
6379early_param("sched_debug", sched_domain_debug_setup);
6380
Mike Travis7c16ec52008-04-04 18:11:11 -07006381static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
Rusty Russell96f874e2008-11-25 02:35:14 +10306382 struct cpumask *groupmask)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006383{
6384 struct sched_group *group = sd->groups;
Mike Travis434d53b2008-04-04 18:11:04 -07006385 char str[256];
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006386
Rusty Russell968ea6d2008-12-13 21:55:51 +10306387 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
Rusty Russell96f874e2008-11-25 02:35:14 +10306388 cpumask_clear(groupmask);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006389
6390 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6391
6392 if (!(sd->flags & SD_LOAD_BALANCE)) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006393 printk("does not load-balance\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006394 if (sd->parent)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006395 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6396 " has parent");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006397 return -1;
6398 }
6399
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006400 printk(KERN_CONT "span %s level %s\n", str, sd->name);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006401
Rusty Russell758b2cd2008-11-25 02:35:04 +10306402 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006403 printk(KERN_ERR "ERROR: domain->span does not contain "
6404 "CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006405 }
Rusty Russell758b2cd2008-11-25 02:35:04 +10306406 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006407 printk(KERN_ERR "ERROR: domain->groups does not contain"
6408 " CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006409 }
6410
6411 printk(KERN_DEBUG "%*s groups:", level + 1, "");
6412 do {
6413 if (!group) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006414 printk("\n");
6415 printk(KERN_ERR "ERROR: group is NULL\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006416 break;
6417 }
6418
Peter Zijlstra18a38852009-09-01 10:34:39 +02006419 if (!group->cpu_power) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006420 printk(KERN_CONT "\n");
6421 printk(KERN_ERR "ERROR: domain->cpu_power not "
6422 "set\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006423 break;
6424 }
6425
Rusty Russell758b2cd2008-11-25 02:35:04 +10306426 if (!cpumask_weight(sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006427 printk(KERN_CONT "\n");
6428 printk(KERN_ERR "ERROR: empty group\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006429 break;
6430 }
6431
Rusty Russell758b2cd2008-11-25 02:35:04 +10306432 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006433 printk(KERN_CONT "\n");
6434 printk(KERN_ERR "ERROR: repeated CPUs\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006435 break;
6436 }
6437
Rusty Russell758b2cd2008-11-25 02:35:04 +10306438 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006439
Rusty Russell968ea6d2008-12-13 21:55:51 +10306440 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306441
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006442 printk(KERN_CONT " %s", str);
Peter Zijlstra18a38852009-09-01 10:34:39 +02006443 if (group->cpu_power != SCHED_LOAD_SCALE) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006444 printk(KERN_CONT " (cpu_power = %d)",
6445 group->cpu_power);
Gautham R Shenoy381512c2009-04-14 09:09:36 +05306446 }
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006447
6448 group = group->next;
6449 } while (group != sd->groups);
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006450 printk(KERN_CONT "\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006451
Rusty Russell758b2cd2008-11-25 02:35:04 +10306452 if (!cpumask_equal(sched_domain_span(sd), groupmask))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006453 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006454
Rusty Russell758b2cd2008-11-25 02:35:04 +10306455 if (sd->parent &&
6456 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006457 printk(KERN_ERR "ERROR: parent span is not a superset "
6458 "of domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006459 return 0;
6460}
6461
Linus Torvalds1da177e2005-04-16 15:20:36 -07006462static void sched_domain_debug(struct sched_domain *sd, int cpu)
6463{
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306464 cpumask_var_t groupmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006465 int level = 0;
6466
Mike Travisf6630112009-11-17 18:22:15 -06006467 if (!sched_domain_debug_enabled)
6468 return;
6469
Nick Piggin41c7ce92005-06-25 14:57:24 -07006470 if (!sd) {
6471 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6472 return;
6473 }
6474
Linus Torvalds1da177e2005-04-16 15:20:36 -07006475 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6476
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306477 if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006478 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
6479 return;
6480 }
6481
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006482 for (;;) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006483 if (sched_domain_debug_one(sd, cpu, level, groupmask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006484 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006485 level++;
6486 sd = sd->parent;
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08006487 if (!sd)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02006488 break;
6489 }
Rusty Russelld5dd3db2008-11-25 02:35:12 +10306490 free_cpumask_var(groupmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006491}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006492#else /* !CONFIG_SCHED_DEBUG */
Ingo Molnar48f24c42006-07-03 00:25:40 -07006493# define sched_domain_debug(sd, cpu) do { } while (0)
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006494#endif /* CONFIG_SCHED_DEBUG */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006495
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006496static int sd_degenerate(struct sched_domain *sd)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006497{
Rusty Russell758b2cd2008-11-25 02:35:04 +10306498 if (cpumask_weight(sched_domain_span(sd)) == 1)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006499 return 1;
6500
6501 /* Following flags need at least 2 groups */
6502 if (sd->flags & (SD_LOAD_BALANCE |
6503 SD_BALANCE_NEWIDLE |
6504 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006505 SD_BALANCE_EXEC |
6506 SD_SHARE_CPUPOWER |
6507 SD_SHARE_PKG_RESOURCES)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006508 if (sd->groups != sd->groups->next)
6509 return 0;
6510 }
6511
6512 /* Following flags don't use groups */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02006513 if (sd->flags & (SD_WAKE_AFFINE))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006514 return 0;
6515
6516 return 1;
6517}
6518
Ingo Molnar48f24c42006-07-03 00:25:40 -07006519static int
6520sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
Suresh Siddha245af2c2005-06-25 14:57:25 -07006521{
6522 unsigned long cflags = sd->flags, pflags = parent->flags;
6523
6524 if (sd_degenerate(parent))
6525 return 1;
6526
Rusty Russell758b2cd2008-11-25 02:35:04 +10306527 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
Suresh Siddha245af2c2005-06-25 14:57:25 -07006528 return 0;
6529
Suresh Siddha245af2c2005-06-25 14:57:25 -07006530 /* Flags needing groups don't count if only 1 group in parent */
6531 if (parent->groups == parent->groups->next) {
6532 pflags &= ~(SD_LOAD_BALANCE |
6533 SD_BALANCE_NEWIDLE |
6534 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07006535 SD_BALANCE_EXEC |
6536 SD_SHARE_CPUPOWER |
6537 SD_SHARE_PKG_RESOURCES);
Ken Chen54364992008-12-07 18:47:37 -08006538 if (nr_node_ids == 1)
6539 pflags &= ~SD_SERIALIZE;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006540 }
6541 if (~cflags & pflags)
6542 return 0;
6543
6544 return 1;
6545}
6546
Rusty Russellc6c49272008-11-25 02:35:05 +10306547static void free_rootdomain(struct root_domain *rd)
6548{
Peter Zijlstra047106a2009-11-16 10:28:09 +01006549 synchronize_sched();
6550
Rusty Russell68e74562008-11-25 02:35:13 +10306551 cpupri_cleanup(&rd->cpupri);
6552
Rusty Russellc6c49272008-11-25 02:35:05 +10306553 free_cpumask_var(rd->rto_mask);
6554 free_cpumask_var(rd->online);
6555 free_cpumask_var(rd->span);
6556 kfree(rd);
6557}
6558
Gregory Haskins57d885f2008-01-25 21:08:18 +01006559static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6560{
Ingo Molnara0490fa2009-02-12 11:35:40 +01006561 struct root_domain *old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006562 unsigned long flags;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006563
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006564 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006565
6566 if (rq->rd) {
Ingo Molnara0490fa2009-02-12 11:35:40 +01006567 old_rd = rq->rd;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006568
Rusty Russellc6c49272008-11-25 02:35:05 +10306569 if (cpumask_test_cpu(rq->cpu, old_rd->online))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006570 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006571
Rusty Russellc6c49272008-11-25 02:35:05 +10306572 cpumask_clear_cpu(rq->cpu, old_rd->span);
Gregory Haskinsdc938522008-01-25 21:08:26 +01006573
Ingo Molnara0490fa2009-02-12 11:35:40 +01006574 /*
6575 * If we dont want to free the old_rt yet then
6576 * set old_rd to NULL to skip the freeing later
6577 * in this function:
6578 */
6579 if (!atomic_dec_and_test(&old_rd->refcount))
6580 old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006581 }
6582
6583 atomic_inc(&rd->refcount);
6584 rq->rd = rd;
6585
Rusty Russellc6c49272008-11-25 02:35:05 +10306586 cpumask_set_cpu(rq->cpu, rd->span);
Gregory Haskins00aec932009-07-30 10:57:23 -04006587 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006588 set_rq_online(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006589
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006590 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnara0490fa2009-02-12 11:35:40 +01006591
6592 if (old_rd)
6593 free_rootdomain(old_rd);
Gregory Haskins57d885f2008-01-25 21:08:18 +01006594}
6595
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006596static int init_rootdomain(struct root_domain *rd)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006597{
6598 memset(rd, 0, sizeof(*rd));
6599
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006600 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
Li Zefan0c910d22009-01-06 17:39:06 +08006601 goto out;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006602 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10306603 goto free_span;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006604 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10306605 goto free_online;
Gregory Haskins6e0534f2008-05-12 21:21:01 +02006606
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006607 if (cpupri_init(&rd->cpupri) != 0)
Rusty Russell68e74562008-11-25 02:35:13 +10306608 goto free_rto_mask;
Rusty Russellc6c49272008-11-25 02:35:05 +10306609 return 0;
6610
Rusty Russell68e74562008-11-25 02:35:13 +10306611free_rto_mask:
6612 free_cpumask_var(rd->rto_mask);
Rusty Russellc6c49272008-11-25 02:35:05 +10306613free_online:
6614 free_cpumask_var(rd->online);
6615free_span:
6616 free_cpumask_var(rd->span);
Li Zefan0c910d22009-01-06 17:39:06 +08006617out:
Rusty Russellc6c49272008-11-25 02:35:05 +10306618 return -ENOMEM;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006619}
6620
6621static void init_defrootdomain(void)
6622{
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006623 init_rootdomain(&def_root_domain);
Rusty Russellc6c49272008-11-25 02:35:05 +10306624
Gregory Haskins57d885f2008-01-25 21:08:18 +01006625 atomic_set(&def_root_domain.refcount, 1);
6626}
6627
Gregory Haskinsdc938522008-01-25 21:08:26 +01006628static struct root_domain *alloc_rootdomain(void)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006629{
6630 struct root_domain *rd;
6631
6632 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
6633 if (!rd)
6634 return NULL;
6635
Pekka Enberg68c38fc2010-07-15 23:18:22 +03006636 if (init_rootdomain(rd) != 0) {
Rusty Russellc6c49272008-11-25 02:35:05 +10306637 kfree(rd);
6638 return NULL;
6639 }
Gregory Haskins57d885f2008-01-25 21:08:18 +01006640
6641 return rd;
6642}
6643
Linus Torvalds1da177e2005-04-16 15:20:36 -07006644/*
Ingo Molnar0eab9142008-01-25 21:08:19 +01006645 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
Linus Torvalds1da177e2005-04-16 15:20:36 -07006646 * hold the hotplug lock.
6647 */
Ingo Molnar0eab9142008-01-25 21:08:19 +01006648static void
6649cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006650{
Ingo Molnar70b97a72006-07-03 00:25:42 -07006651 struct rq *rq = cpu_rq(cpu);
Suresh Siddha245af2c2005-06-25 14:57:25 -07006652 struct sched_domain *tmp;
6653
Peter Zijlstra669c55e2010-04-16 14:59:29 +02006654 for (tmp = sd; tmp; tmp = tmp->parent)
6655 tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
6656
Suresh Siddha245af2c2005-06-25 14:57:25 -07006657 /* Remove the sched domains which do not contribute to scheduling. */
Li Zefanf29c9b12008-11-06 09:45:16 +08006658 for (tmp = sd; tmp; ) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006659 struct sched_domain *parent = tmp->parent;
6660 if (!parent)
6661 break;
Li Zefanf29c9b12008-11-06 09:45:16 +08006662
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006663 if (sd_parent_degenerate(tmp, parent)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006664 tmp->parent = parent->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006665 if (parent->parent)
6666 parent->parent->child = tmp;
Li Zefanf29c9b12008-11-06 09:45:16 +08006667 } else
6668 tmp = tmp->parent;
Suresh Siddha245af2c2005-06-25 14:57:25 -07006669 }
6670
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006671 if (sd && sd_degenerate(sd)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07006672 sd = sd->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07006673 if (sd)
6674 sd->child = NULL;
6675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006676
6677 sched_domain_debug(sd, cpu);
6678
Gregory Haskins57d885f2008-01-25 21:08:18 +01006679 rq_attach_root(rq, rd);
Nick Piggin674311d2005-06-25 14:57:27 -07006680 rcu_assign_pointer(rq->sd, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006681}
6682
6683/* cpus with isolated domains */
Rusty Russelldcc30a32008-11-25 02:35:12 +10306684static cpumask_var_t cpu_isolated_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006685
6686/* Setup the mask of cpus configured for isolated domains */
6687static int __init isolated_cpu_setup(char *str)
6688{
Rusty Russellbdddd292009-12-02 14:09:16 +10306689 alloc_bootmem_cpumask_var(&cpu_isolated_map);
Rusty Russell968ea6d2008-12-13 21:55:51 +10306690 cpulist_parse(str, cpu_isolated_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006691 return 1;
6692}
6693
Ingo Molnar8927f492007-10-15 17:00:13 +02006694__setup("isolcpus=", isolated_cpu_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006695
6696/*
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006697 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
6698 * to a function which identifies what group(along with sched group) a CPU
Rusty Russell96f874e2008-11-25 02:35:14 +10306699 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
6700 * (due to the fact that we keep track of groups covered with a struct cpumask).
Linus Torvalds1da177e2005-04-16 15:20:36 -07006701 *
6702 * init_sched_build_groups will build a circular linked list of the groups
6703 * covered by the given span, and will set each group's ->cpumask correctly,
6704 * and ->cpu_power to 0.
6705 */
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07006706static void
Rusty Russell96f874e2008-11-25 02:35:14 +10306707init_sched_build_groups(const struct cpumask *span,
6708 const struct cpumask *cpu_map,
6709 int (*group_fn)(int cpu, const struct cpumask *cpu_map,
Mike Travis7c16ec52008-04-04 18:11:11 -07006710 struct sched_group **sg,
Rusty Russell96f874e2008-11-25 02:35:14 +10306711 struct cpumask *tmpmask),
6712 struct cpumask *covered, struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006713{
6714 struct sched_group *first = NULL, *last = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006715 int i;
6716
Rusty Russell96f874e2008-11-25 02:35:14 +10306717 cpumask_clear(covered);
Mike Travis7c16ec52008-04-04 18:11:11 -07006718
Rusty Russellabcd0832008-11-25 02:35:02 +10306719 for_each_cpu(i, span) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006720 struct sched_group *sg;
Mike Travis7c16ec52008-04-04 18:11:11 -07006721 int group = group_fn(i, cpu_map, &sg, tmpmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006722 int j;
6723
Rusty Russell758b2cd2008-11-25 02:35:04 +10306724 if (cpumask_test_cpu(i, covered))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006725 continue;
6726
Rusty Russell758b2cd2008-11-25 02:35:04 +10306727 cpumask_clear(sched_group_cpus(sg));
Peter Zijlstra18a38852009-09-01 10:34:39 +02006728 sg->cpu_power = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006729
Rusty Russellabcd0832008-11-25 02:35:02 +10306730 for_each_cpu(j, span) {
Mike Travis7c16ec52008-04-04 18:11:11 -07006731 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006732 continue;
6733
Rusty Russell96f874e2008-11-25 02:35:14 +10306734 cpumask_set_cpu(j, covered);
Rusty Russell758b2cd2008-11-25 02:35:04 +10306735 cpumask_set_cpu(j, sched_group_cpus(sg));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006736 }
6737 if (!first)
6738 first = sg;
6739 if (last)
6740 last->next = sg;
6741 last = sg;
6742 }
6743 last->next = first;
6744}
6745
John Hawkes9c1cfda2005-09-06 15:18:14 -07006746#define SD_NODES_PER_DOMAIN 16
Linus Torvalds1da177e2005-04-16 15:20:36 -07006747
John Hawkes9c1cfda2005-09-06 15:18:14 -07006748#ifdef CONFIG_NUMA
akpm@osdl.org198e2f12006-01-12 01:05:30 -08006749
John Hawkes9c1cfda2005-09-06 15:18:14 -07006750/**
6751 * find_next_best_node - find the next node to include in a sched_domain
6752 * @node: node whose sched_domain we're building
6753 * @used_nodes: nodes already in the sched_domain
6754 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006755 * Find the next node to include in a given scheduling domain. Simply
John Hawkes9c1cfda2005-09-06 15:18:14 -07006756 * finds the closest node not already in the @used_nodes map.
6757 *
6758 * Should use nodemask_t.
6759 */
Mike Travisc5f59f02008-04-04 18:11:10 -07006760static int find_next_best_node(int node, nodemask_t *used_nodes)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006761{
6762 int i, n, val, min_val, best_node = 0;
6763
6764 min_val = INT_MAX;
6765
Mike Travis076ac2a2008-05-12 21:21:12 +02006766 for (i = 0; i < nr_node_ids; i++) {
John Hawkes9c1cfda2005-09-06 15:18:14 -07006767 /* Start at @node */
Mike Travis076ac2a2008-05-12 21:21:12 +02006768 n = (node + i) % nr_node_ids;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006769
6770 if (!nr_cpus_node(n))
6771 continue;
6772
6773 /* Skip already used nodes */
Mike Travisc5f59f02008-04-04 18:11:10 -07006774 if (node_isset(n, *used_nodes))
John Hawkes9c1cfda2005-09-06 15:18:14 -07006775 continue;
6776
6777 /* Simple min distance search */
6778 val = node_distance(node, n);
6779
6780 if (val < min_val) {
6781 min_val = val;
6782 best_node = n;
6783 }
6784 }
6785
Mike Travisc5f59f02008-04-04 18:11:10 -07006786 node_set(best_node, *used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006787 return best_node;
6788}
6789
6790/**
6791 * sched_domain_node_span - get a cpumask for a node's sched_domain
6792 * @node: node whose cpumask we're constructing
Randy Dunlap73486722008-04-22 10:07:22 -07006793 * @span: resulting cpumask
John Hawkes9c1cfda2005-09-06 15:18:14 -07006794 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006795 * Given a node, construct a good cpumask for its sched_domain to span. It
John Hawkes9c1cfda2005-09-06 15:18:14 -07006796 * should be one that prevents unnecessary balancing, but also spreads tasks
6797 * out optimally.
6798 */
Rusty Russell96f874e2008-11-25 02:35:14 +10306799static void sched_domain_node_span(int node, struct cpumask *span)
John Hawkes9c1cfda2005-09-06 15:18:14 -07006800{
Mike Travisc5f59f02008-04-04 18:11:10 -07006801 nodemask_t used_nodes;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006802 int i;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006803
Mike Travis6ca09df2008-12-31 18:08:45 -08006804 cpumask_clear(span);
Mike Travisc5f59f02008-04-04 18:11:10 -07006805 nodes_clear(used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006806
Mike Travis6ca09df2008-12-31 18:08:45 -08006807 cpumask_or(span, span, cpumask_of_node(node));
Mike Travisc5f59f02008-04-04 18:11:10 -07006808 node_set(node, used_nodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006809
6810 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
Mike Travisc5f59f02008-04-04 18:11:10 -07006811 int next_node = find_next_best_node(node, &used_nodes);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006812
Mike Travis6ca09df2008-12-31 18:08:45 -08006813 cpumask_or(span, span, cpumask_of_node(next_node));
John Hawkes9c1cfda2005-09-06 15:18:14 -07006814 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07006815}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006816#endif /* CONFIG_NUMA */
John Hawkes9c1cfda2005-09-06 15:18:14 -07006817
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07006818int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07006819
John Hawkes9c1cfda2005-09-06 15:18:14 -07006820/*
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306821 * The cpus mask in sched_group and sched_domain hangs off the end.
Ingo Molnar4200efd2009-05-19 09:22:19 +02006822 *
6823 * ( See the the comments in include/linux/sched.h:struct sched_group
6824 * and struct sched_domain. )
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306825 */
6826struct static_sched_group {
6827 struct sched_group sg;
6828 DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
6829};
6830
6831struct static_sched_domain {
6832 struct sched_domain sd;
6833 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
6834};
6835
Andreas Herrmann49a02c52009-08-18 12:51:52 +02006836struct s_data {
6837#ifdef CONFIG_NUMA
6838 int sd_allnodes;
6839 cpumask_var_t domainspan;
6840 cpumask_var_t covered;
6841 cpumask_var_t notcovered;
6842#endif
6843 cpumask_var_t nodemask;
6844 cpumask_var_t this_sibling_map;
6845 cpumask_var_t this_core_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02006846 cpumask_var_t this_book_map;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02006847 cpumask_var_t send_covered;
6848 cpumask_var_t tmpmask;
6849 struct sched_group **sched_group_nodes;
6850 struct root_domain *rd;
6851};
6852
Andreas Herrmann2109b992009-08-18 12:53:00 +02006853enum s_alloc {
6854 sa_sched_groups = 0,
6855 sa_rootdomain,
6856 sa_tmpmask,
6857 sa_send_covered,
Heiko Carstens01a08542010-08-31 10:28:16 +02006858 sa_this_book_map,
Andreas Herrmann2109b992009-08-18 12:53:00 +02006859 sa_this_core_map,
6860 sa_this_sibling_map,
6861 sa_nodemask,
6862 sa_sched_group_nodes,
6863#ifdef CONFIG_NUMA
6864 sa_notcovered,
6865 sa_covered,
6866 sa_domainspan,
6867#endif
6868 sa_none,
6869};
6870
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306871/*
Ingo Molnar48f24c42006-07-03 00:25:40 -07006872 * SMT sched-domains:
John Hawkes9c1cfda2005-09-06 15:18:14 -07006873 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006874#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306875static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
Tejun Heo1871e522009-10-29 22:34:13 +09006876static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006877
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006878static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306879cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
6880 struct sched_group **sg, struct cpumask *unused)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006881{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006882 if (sg)
Tejun Heo1871e522009-10-29 22:34:13 +09006883 *sg = &per_cpu(sched_groups, cpu).sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006884 return cpu;
6885}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006886#endif /* CONFIG_SCHED_SMT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006887
Ingo Molnar48f24c42006-07-03 00:25:40 -07006888/*
6889 * multi-core sched-domains:
6890 */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006891#ifdef CONFIG_SCHED_MC
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306892static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
6893static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006894
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006895static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306896cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
6897 struct sched_group **sg, struct cpumask *mask)
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006898{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006899 int group;
Heiko Carstensf2698932010-08-31 10:28:15 +02006900#ifdef CONFIG_SCHED_SMT
Rusty Russellc69fc562009-03-13 14:49:46 +10306901 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306902 group = cpumask_first(mask);
Heiko Carstensf2698932010-08-31 10:28:15 +02006903#else
6904 group = cpu;
6905#endif
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006906 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306907 *sg = &per_cpu(sched_group_core, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006908 return group;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006909}
Heiko Carstensf2698932010-08-31 10:28:15 +02006910#endif /* CONFIG_SCHED_MC */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006911
Heiko Carstens01a08542010-08-31 10:28:16 +02006912/*
6913 * book sched-domains:
6914 */
6915#ifdef CONFIG_SCHED_BOOK
6916static DEFINE_PER_CPU(struct static_sched_domain, book_domains);
6917static DEFINE_PER_CPU(struct static_sched_group, sched_group_book);
6918
Linus Torvalds1da177e2005-04-16 15:20:36 -07006919static int
Heiko Carstens01a08542010-08-31 10:28:16 +02006920cpu_to_book_group(int cpu, const struct cpumask *cpu_map,
6921 struct sched_group **sg, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006922{
Heiko Carstens01a08542010-08-31 10:28:16 +02006923 int group = cpu;
6924#ifdef CONFIG_SCHED_MC
6925 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
6926 group = cpumask_first(mask);
6927#elif defined(CONFIG_SCHED_SMT)
6928 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
6929 group = cpumask_first(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006930#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02006931 if (sg)
6932 *sg = &per_cpu(sched_group_book, group).sg;
6933 return group;
6934}
6935#endif /* CONFIG_SCHED_BOOK */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006936
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306937static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
6938static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
Ingo Molnar48f24c42006-07-03 00:25:40 -07006939
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006940static int
Rusty Russell96f874e2008-11-25 02:35:14 +10306941cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
6942 struct sched_group **sg, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006943{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006944 int group;
Heiko Carstens01a08542010-08-31 10:28:16 +02006945#ifdef CONFIG_SCHED_BOOK
6946 cpumask_and(mask, cpu_book_mask(cpu), cpu_map);
6947 group = cpumask_first(mask);
6948#elif defined(CONFIG_SCHED_MC)
Mike Travis6ca09df2008-12-31 18:08:45 -08006949 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306950 group = cpumask_first(mask);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08006951#elif defined(CONFIG_SCHED_SMT)
Rusty Russellc69fc562009-03-13 14:49:46 +10306952 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306953 group = cpumask_first(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006954#else
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006955 group = cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006956#endif
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006957 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306958 *sg = &per_cpu(sched_group_phys, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006959 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006960}
6961
6962#ifdef CONFIG_NUMA
John Hawkes9c1cfda2005-09-06 15:18:14 -07006963/*
6964 * The init_sched_build_groups can't handle what we want to do with node
6965 * groups, so roll our own. Now each node has its own list of groups which
6966 * gets dynamically allocated.
6967 */
Rusty Russell62ea9ce2009-01-11 01:04:16 +01006968static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
Mike Travis434d53b2008-04-04 18:11:04 -07006969static struct sched_group ***sched_group_nodes_bycpu;
John Hawkes9c1cfda2005-09-06 15:18:14 -07006970
Rusty Russell62ea9ce2009-01-11 01:04:16 +01006971static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306972static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
John Hawkes9c1cfda2005-09-06 15:18:14 -07006973
Rusty Russell96f874e2008-11-25 02:35:14 +10306974static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
6975 struct sched_group **sg,
6976 struct cpumask *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006977{
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006978 int group;
6979
Mike Travis6ca09df2008-12-31 18:08:45 -08006980 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10306981 group = cpumask_first(nodemask);
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006982
6983 if (sg)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306984 *sg = &per_cpu(sched_group_allnodes, group).sg;
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006985 return group;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006986}
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08006987
Siddha, Suresh B08069032006-03-27 01:15:23 -08006988static void init_numa_sched_groups_power(struct sched_group *group_head)
6989{
6990 struct sched_group *sg = group_head;
6991 int j;
6992
6993 if (!sg)
6994 return;
Andi Kleen3a5c3592007-10-15 17:00:14 +02006995 do {
Rusty Russell758b2cd2008-11-25 02:35:04 +10306996 for_each_cpu(j, sched_group_cpus(sg)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02006997 struct sched_domain *sd;
Siddha, Suresh B08069032006-03-27 01:15:23 -08006998
Rusty Russell6c99e9a2008-11-25 02:35:04 +10306999 sd = &per_cpu(phys_domains, j).sd;
Miao Xie13318a72009-04-15 09:59:10 +08007000 if (j != group_first_cpu(sd->groups)) {
Andi Kleen3a5c3592007-10-15 17:00:14 +02007001 /*
7002 * Only add "power" once for each
7003 * physical package.
7004 */
7005 continue;
7006 }
7007
Peter Zijlstra18a38852009-09-01 10:34:39 +02007008 sg->cpu_power += sd->groups->cpu_power;
Siddha, Suresh B08069032006-03-27 01:15:23 -08007009 }
Andi Kleen3a5c3592007-10-15 17:00:14 +02007010 sg = sg->next;
7011 } while (sg != group_head);
Siddha, Suresh B08069032006-03-27 01:15:23 -08007012}
Andreas Herrmann0601a882009-08-18 13:01:11 +02007013
7014static int build_numa_sched_groups(struct s_data *d,
7015 const struct cpumask *cpu_map, int num)
7016{
7017 struct sched_domain *sd;
7018 struct sched_group *sg, *prev;
7019 int n, j;
7020
7021 cpumask_clear(d->covered);
7022 cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
7023 if (cpumask_empty(d->nodemask)) {
7024 d->sched_group_nodes[num] = NULL;
7025 goto out;
7026 }
7027
7028 sched_domain_node_span(num, d->domainspan);
7029 cpumask_and(d->domainspan, d->domainspan, cpu_map);
7030
7031 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
7032 GFP_KERNEL, num);
7033 if (!sg) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007034 printk(KERN_WARNING "Can not alloc domain group for node %d\n",
7035 num);
Andreas Herrmann0601a882009-08-18 13:01:11 +02007036 return -ENOMEM;
7037 }
7038 d->sched_group_nodes[num] = sg;
7039
7040 for_each_cpu(j, d->nodemask) {
7041 sd = &per_cpu(node_domains, j).sd;
7042 sd->groups = sg;
7043 }
7044
Peter Zijlstra18a38852009-09-01 10:34:39 +02007045 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02007046 cpumask_copy(sched_group_cpus(sg), d->nodemask);
7047 sg->next = sg;
7048 cpumask_or(d->covered, d->covered, d->nodemask);
7049
7050 prev = sg;
7051 for (j = 0; j < nr_node_ids; j++) {
7052 n = (num + j) % nr_node_ids;
7053 cpumask_complement(d->notcovered, d->covered);
7054 cpumask_and(d->tmpmask, d->notcovered, cpu_map);
7055 cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
7056 if (cpumask_empty(d->tmpmask))
7057 break;
7058 cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
7059 if (cpumask_empty(d->tmpmask))
7060 continue;
7061 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
7062 GFP_KERNEL, num);
7063 if (!sg) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007064 printk(KERN_WARNING
7065 "Can not alloc domain group for node %d\n", j);
Andreas Herrmann0601a882009-08-18 13:01:11 +02007066 return -ENOMEM;
7067 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02007068 sg->cpu_power = 0;
Andreas Herrmann0601a882009-08-18 13:01:11 +02007069 cpumask_copy(sched_group_cpus(sg), d->tmpmask);
7070 sg->next = prev->next;
7071 cpumask_or(d->covered, d->covered, d->tmpmask);
7072 prev->next = sg;
7073 prev = sg;
7074 }
7075out:
7076 return 0;
7077}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007078#endif /* CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007079
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07007080#ifdef CONFIG_NUMA
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007081/* Free memory allocated for various sched_group structures */
Rusty Russell96f874e2008-11-25 02:35:14 +10307082static void free_sched_groups(const struct cpumask *cpu_map,
7083 struct cpumask *nodemask)
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007084{
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07007085 int cpu, i;
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007086
Rusty Russellabcd0832008-11-25 02:35:02 +10307087 for_each_cpu(cpu, cpu_map) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007088 struct sched_group **sched_group_nodes
7089 = sched_group_nodes_bycpu[cpu];
7090
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007091 if (!sched_group_nodes)
7092 continue;
7093
Mike Travis076ac2a2008-05-12 21:21:12 +02007094 for (i = 0; i < nr_node_ids; i++) {
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007095 struct sched_group *oldsg, *sg = sched_group_nodes[i];
7096
Mike Travis6ca09df2008-12-31 18:08:45 -08007097 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
Rusty Russell96f874e2008-11-25 02:35:14 +10307098 if (cpumask_empty(nodemask))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007099 continue;
7100
7101 if (sg == NULL)
7102 continue;
7103 sg = sg->next;
7104next_sg:
7105 oldsg = sg;
7106 sg = sg->next;
7107 kfree(oldsg);
7108 if (oldsg != sched_group_nodes[i])
7109 goto next_sg;
7110 }
7111 kfree(sched_group_nodes);
7112 sched_group_nodes_bycpu[cpu] = NULL;
7113 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007114}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007115#else /* !CONFIG_NUMA */
Rusty Russell96f874e2008-11-25 02:35:14 +10307116static void free_sched_groups(const struct cpumask *cpu_map,
7117 struct cpumask *nodemask)
Siddha, Suresh Ba6160582006-10-03 01:14:06 -07007118{
7119}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007120#endif /* CONFIG_NUMA */
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007121
Linus Torvalds1da177e2005-04-16 15:20:36 -07007122/*
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007123 * Initialize sched groups cpu_power.
7124 *
7125 * cpu_power indicates the capacity of sched group, which is used while
7126 * distributing the load between different sched groups in a sched domain.
7127 * Typically cpu_power for all the groups in a sched domain will be same unless
7128 * there are asymmetries in the topology. If there are asymmetries, group
7129 * having more cpu_power will pickup more load compared to the group having
7130 * less cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007131 */
7132static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7133{
7134 struct sched_domain *child;
7135 struct sched_group *group;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007136 long power;
7137 int weight;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007138
7139 WARN_ON(!sd || !sd->groups);
7140
Miao Xie13318a72009-04-15 09:59:10 +08007141 if (cpu != group_first_cpu(sd->groups))
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007142 return;
7143
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07007144 sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
7145
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007146 child = sd->child;
7147
Peter Zijlstra18a38852009-09-01 10:34:39 +02007148 sd->groups->cpu_power = 0;
Eric Dumazet5517d862007-05-08 00:32:57 -07007149
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007150 if (!child) {
7151 power = SCHED_LOAD_SCALE;
7152 weight = cpumask_weight(sched_domain_span(sd));
7153 /*
7154 * SMT siblings share the power of a single core.
Peter Zijlstraa52bfd732009-09-01 10:34:35 +02007155 * Usually multiple threads get a better yield out of
7156 * that one core than a single thread would have,
7157 * reflect that in sd->smt_gain.
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007158 */
Peter Zijlstraa52bfd732009-09-01 10:34:35 +02007159 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
7160 power *= sd->smt_gain;
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007161 power /= weight;
Peter Zijlstraa52bfd732009-09-01 10:34:35 +02007162 power >>= SCHED_LOAD_SHIFT;
7163 }
Peter Zijlstra18a38852009-09-01 10:34:39 +02007164 sd->groups->cpu_power += power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007165 return;
7166 }
7167
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007168 /*
Peter Zijlstraf93e65c2009-09-01 10:34:32 +02007169 * Add cpu_power of each child group to this groups cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007170 */
7171 group = child->groups;
7172 do {
Peter Zijlstra18a38852009-09-01 10:34:39 +02007173 sd->groups->cpu_power += group->cpu_power;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007174 group = group->next;
7175 } while (group != child->groups);
7176}
7177
7178/*
Mike Travis7c16ec52008-04-04 18:11:11 -07007179 * Initializers for schedule domains
7180 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
7181 */
7182
Ingo Molnara5d8c342008-10-09 11:35:51 +02007183#ifdef CONFIG_SCHED_DEBUG
7184# define SD_INIT_NAME(sd, type) sd->name = #type
7185#else
7186# define SD_INIT_NAME(sd, type) do { } while (0)
7187#endif
7188
Mike Travis7c16ec52008-04-04 18:11:11 -07007189#define SD_INIT(sd, type) sd_init_##type(sd)
Ingo Molnara5d8c342008-10-09 11:35:51 +02007190
Mike Travis7c16ec52008-04-04 18:11:11 -07007191#define SD_INIT_FUNC(type) \
7192static noinline void sd_init_##type(struct sched_domain *sd) \
7193{ \
7194 memset(sd, 0, sizeof(*sd)); \
7195 *sd = SD_##type##_INIT; \
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007196 sd->level = SD_LV_##type; \
Ingo Molnara5d8c342008-10-09 11:35:51 +02007197 SD_INIT_NAME(sd, type); \
Mike Travis7c16ec52008-04-04 18:11:11 -07007198}
7199
7200SD_INIT_FUNC(CPU)
7201#ifdef CONFIG_NUMA
7202 SD_INIT_FUNC(ALLNODES)
7203 SD_INIT_FUNC(NODE)
7204#endif
7205#ifdef CONFIG_SCHED_SMT
7206 SD_INIT_FUNC(SIBLING)
7207#endif
7208#ifdef CONFIG_SCHED_MC
7209 SD_INIT_FUNC(MC)
7210#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007211#ifdef CONFIG_SCHED_BOOK
7212 SD_INIT_FUNC(BOOK)
7213#endif
Mike Travis7c16ec52008-04-04 18:11:11 -07007214
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007215static int default_relax_domain_level = -1;
7216
7217static int __init setup_relax_domain_level(char *str)
7218{
Li Zefan30e0e172008-05-13 10:27:17 +08007219 unsigned long val;
7220
7221 val = simple_strtoul(str, NULL, 0);
7222 if (val < SD_LV_MAX)
7223 default_relax_domain_level = val;
7224
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007225 return 1;
7226}
7227__setup("relax_domain_level=", setup_relax_domain_level);
7228
7229static void set_domain_attribute(struct sched_domain *sd,
7230 struct sched_domain_attr *attr)
7231{
7232 int request;
7233
7234 if (!attr || attr->relax_domain_level < 0) {
7235 if (default_relax_domain_level < 0)
7236 return;
7237 else
7238 request = default_relax_domain_level;
7239 } else
7240 request = attr->relax_domain_level;
7241 if (request < sd->level) {
7242 /* turn off idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02007243 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007244 } else {
7245 /* turn on idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02007246 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007247 }
7248}
7249
Andreas Herrmann2109b992009-08-18 12:53:00 +02007250static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
7251 const struct cpumask *cpu_map)
7252{
7253 switch (what) {
7254 case sa_sched_groups:
7255 free_sched_groups(cpu_map, d->tmpmask); /* fall through */
7256 d->sched_group_nodes = NULL;
7257 case sa_rootdomain:
7258 free_rootdomain(d->rd); /* fall through */
7259 case sa_tmpmask:
7260 free_cpumask_var(d->tmpmask); /* fall through */
7261 case sa_send_covered:
7262 free_cpumask_var(d->send_covered); /* fall through */
Heiko Carstens01a08542010-08-31 10:28:16 +02007263 case sa_this_book_map:
7264 free_cpumask_var(d->this_book_map); /* fall through */
Andreas Herrmann2109b992009-08-18 12:53:00 +02007265 case sa_this_core_map:
7266 free_cpumask_var(d->this_core_map); /* fall through */
7267 case sa_this_sibling_map:
7268 free_cpumask_var(d->this_sibling_map); /* fall through */
7269 case sa_nodemask:
7270 free_cpumask_var(d->nodemask); /* fall through */
7271 case sa_sched_group_nodes:
7272#ifdef CONFIG_NUMA
7273 kfree(d->sched_group_nodes); /* fall through */
7274 case sa_notcovered:
7275 free_cpumask_var(d->notcovered); /* fall through */
7276 case sa_covered:
7277 free_cpumask_var(d->covered); /* fall through */
7278 case sa_domainspan:
7279 free_cpumask_var(d->domainspan); /* fall through */
7280#endif
7281 case sa_none:
7282 break;
7283 }
7284}
7285
7286static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
7287 const struct cpumask *cpu_map)
7288{
7289#ifdef CONFIG_NUMA
7290 if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
7291 return sa_none;
7292 if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
7293 return sa_domainspan;
7294 if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
7295 return sa_covered;
7296 /* Allocate the per-node list of sched groups */
7297 d->sched_group_nodes = kcalloc(nr_node_ids,
7298 sizeof(struct sched_group *), GFP_KERNEL);
7299 if (!d->sched_group_nodes) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007300 printk(KERN_WARNING "Can not alloc sched group node list\n");
Andreas Herrmann2109b992009-08-18 12:53:00 +02007301 return sa_notcovered;
7302 }
7303 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
7304#endif
7305 if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
7306 return sa_sched_group_nodes;
7307 if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
7308 return sa_nodemask;
7309 if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
7310 return sa_this_sibling_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02007311 if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL))
Andreas Herrmann2109b992009-08-18 12:53:00 +02007312 return sa_this_core_map;
Heiko Carstens01a08542010-08-31 10:28:16 +02007313 if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
7314 return sa_this_book_map;
Andreas Herrmann2109b992009-08-18 12:53:00 +02007315 if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
7316 return sa_send_covered;
7317 d->rd = alloc_rootdomain();
7318 if (!d->rd) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01007319 printk(KERN_WARNING "Cannot alloc root domain\n");
Andreas Herrmann2109b992009-08-18 12:53:00 +02007320 return sa_tmpmask;
7321 }
7322 return sa_rootdomain;
7323}
7324
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02007325static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
7326 const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
7327{
7328 struct sched_domain *sd = NULL;
7329#ifdef CONFIG_NUMA
7330 struct sched_domain *parent;
7331
7332 d->sd_allnodes = 0;
7333 if (cpumask_weight(cpu_map) >
7334 SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
7335 sd = &per_cpu(allnodes_domains, i).sd;
7336 SD_INIT(sd, ALLNODES);
7337 set_domain_attribute(sd, attr);
7338 cpumask_copy(sched_domain_span(sd), cpu_map);
7339 cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
7340 d->sd_allnodes = 1;
7341 }
7342 parent = sd;
7343
7344 sd = &per_cpu(node_domains, i).sd;
7345 SD_INIT(sd, NODE);
7346 set_domain_attribute(sd, attr);
7347 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
7348 sd->parent = parent;
7349 if (parent)
7350 parent->child = sd;
7351 cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
7352#endif
7353 return sd;
7354}
7355
Andreas Herrmann87cce662009-08-18 12:54:55 +02007356static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
7357 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7358 struct sched_domain *parent, int i)
7359{
7360 struct sched_domain *sd;
7361 sd = &per_cpu(phys_domains, i).sd;
7362 SD_INIT(sd, CPU);
7363 set_domain_attribute(sd, attr);
7364 cpumask_copy(sched_domain_span(sd), d->nodemask);
7365 sd->parent = parent;
7366 if (parent)
7367 parent->child = sd;
7368 cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
7369 return sd;
7370}
7371
Heiko Carstens01a08542010-08-31 10:28:16 +02007372static struct sched_domain *__build_book_sched_domain(struct s_data *d,
7373 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7374 struct sched_domain *parent, int i)
7375{
7376 struct sched_domain *sd = parent;
7377#ifdef CONFIG_SCHED_BOOK
7378 sd = &per_cpu(book_domains, i).sd;
7379 SD_INIT(sd, BOOK);
7380 set_domain_attribute(sd, attr);
7381 cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i));
7382 sd->parent = parent;
7383 parent->child = sd;
7384 cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask);
7385#endif
7386 return sd;
7387}
7388
Andreas Herrmann410c4082009-08-18 12:56:14 +02007389static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
7390 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7391 struct sched_domain *parent, int i)
7392{
7393 struct sched_domain *sd = parent;
7394#ifdef CONFIG_SCHED_MC
7395 sd = &per_cpu(core_domains, i).sd;
7396 SD_INIT(sd, MC);
7397 set_domain_attribute(sd, attr);
7398 cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
7399 sd->parent = parent;
7400 parent->child = sd;
7401 cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
7402#endif
7403 return sd;
7404}
7405
Andreas Herrmannd8173532009-08-18 12:57:03 +02007406static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
7407 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7408 struct sched_domain *parent, int i)
7409{
7410 struct sched_domain *sd = parent;
7411#ifdef CONFIG_SCHED_SMT
7412 sd = &per_cpu(cpu_domains, i).sd;
7413 SD_INIT(sd, SIBLING);
7414 set_domain_attribute(sd, attr);
7415 cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
7416 sd->parent = parent;
7417 parent->child = sd;
7418 cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
7419#endif
7420 return sd;
7421}
7422
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007423static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
7424 const struct cpumask *cpu_map, int cpu)
7425{
7426 switch (l) {
7427#ifdef CONFIG_SCHED_SMT
7428 case SD_LV_SIBLING: /* set up CPU (sibling) groups */
7429 cpumask_and(d->this_sibling_map, cpu_map,
7430 topology_thread_cpumask(cpu));
7431 if (cpu == cpumask_first(d->this_sibling_map))
7432 init_sched_build_groups(d->this_sibling_map, cpu_map,
7433 &cpu_to_cpu_group,
7434 d->send_covered, d->tmpmask);
7435 break;
7436#endif
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02007437#ifdef CONFIG_SCHED_MC
7438 case SD_LV_MC: /* set up multi-core groups */
7439 cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
7440 if (cpu == cpumask_first(d->this_core_map))
7441 init_sched_build_groups(d->this_core_map, cpu_map,
7442 &cpu_to_core_group,
7443 d->send_covered, d->tmpmask);
7444 break;
7445#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007446#ifdef CONFIG_SCHED_BOOK
7447 case SD_LV_BOOK: /* set up book groups */
7448 cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu));
7449 if (cpu == cpumask_first(d->this_book_map))
7450 init_sched_build_groups(d->this_book_map, cpu_map,
7451 &cpu_to_book_group,
7452 d->send_covered, d->tmpmask);
7453 break;
7454#endif
Andreas Herrmann86548092009-08-18 12:59:28 +02007455 case SD_LV_CPU: /* set up physical groups */
7456 cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
7457 if (!cpumask_empty(d->nodemask))
7458 init_sched_build_groups(d->nodemask, cpu_map,
7459 &cpu_to_phys_group,
7460 d->send_covered, d->tmpmask);
7461 break;
Andreas Herrmannde616e32009-08-18 13:00:13 +02007462#ifdef CONFIG_NUMA
7463 case SD_LV_ALLNODES:
7464 init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
7465 d->send_covered, d->tmpmask);
7466 break;
7467#endif
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007468 default:
7469 break;
7470 }
7471}
7472
Mike Travis7c16ec52008-04-04 18:11:11 -07007473/*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007474 * Build sched domains for a given set of cpus and attach the sched domains
7475 * to the individual cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07007476 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307477static int __build_sched_domains(const struct cpumask *cpu_map,
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007478 struct sched_domain_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007479{
Andreas Herrmann2109b992009-08-18 12:53:00 +02007480 enum s_alloc alloc_state = sa_none;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007481 struct s_data d;
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007482 struct sched_domain *sd;
Andreas Herrmann2109b992009-08-18 12:53:00 +02007483 int i;
John Hawkesd1b55132005-09-06 15:18:14 -07007484#ifdef CONFIG_NUMA
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007485 d.sd_allnodes = 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10307486#endif
7487
Andreas Herrmann2109b992009-08-18 12:53:00 +02007488 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
7489 if (alloc_state != sa_rootdomain)
7490 goto error;
7491 alloc_state = sa_sched_groups;
Mike Travis7c16ec52008-04-04 18:11:11 -07007492
Linus Torvalds1da177e2005-04-16 15:20:36 -07007493 /*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007494 * Set up domains for cpus specified by the cpu_map.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007495 */
Rusty Russellabcd0832008-11-25 02:35:02 +10307496 for_each_cpu(i, cpu_map) {
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007497 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
7498 cpu_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007499
Andreas Herrmann7f4588f2009-08-18 12:54:06 +02007500 sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
Andreas Herrmann87cce662009-08-18 12:54:55 +02007501 sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
Heiko Carstens01a08542010-08-31 10:28:16 +02007502 sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmann410c4082009-08-18 12:56:14 +02007503 sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
Andreas Herrmannd8173532009-08-18 12:57:03 +02007504 sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007505 }
7506
Rusty Russellabcd0832008-11-25 02:35:02 +10307507 for_each_cpu(i, cpu_map) {
Andreas Herrmann0e8e85c2009-08-18 12:57:51 +02007508 build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
Heiko Carstens01a08542010-08-31 10:28:16 +02007509 build_sched_groups(&d, SD_LV_BOOK, cpu_map, i);
Andreas Herrmanna2af04c2009-08-18 12:58:38 +02007510 build_sched_groups(&d, SD_LV_MC, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007511 }
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007512
Linus Torvalds1da177e2005-04-16 15:20:36 -07007513 /* Set up physical groups */
Andreas Herrmann86548092009-08-18 12:59:28 +02007514 for (i = 0; i < nr_node_ids; i++)
7515 build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007516
7517#ifdef CONFIG_NUMA
7518 /* Set up node groups */
Andreas Herrmannde616e32009-08-18 13:00:13 +02007519 if (d.sd_allnodes)
7520 build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007521
Andreas Herrmann0601a882009-08-18 13:01:11 +02007522 for (i = 0; i < nr_node_ids; i++)
7523 if (build_numa_sched_groups(&d, cpu_map, i))
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007524 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007525#endif
7526
7527 /* Calculate CPU power for physical packages and nodes */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007528#ifdef CONFIG_SCHED_SMT
Rusty Russellabcd0832008-11-25 02:35:02 +10307529 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007530 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007531 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007532 }
7533#endif
7534#ifdef CONFIG_SCHED_MC
Rusty Russellabcd0832008-11-25 02:35:02 +10307535 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007536 sd = &per_cpu(core_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007537 init_sched_groups_power(i, sd);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007538 }
7539#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02007540#ifdef CONFIG_SCHED_BOOK
7541 for_each_cpu(i, cpu_map) {
7542 sd = &per_cpu(book_domains, i).sd;
7543 init_sched_groups_power(i, sd);
7544 }
7545#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07007546
Rusty Russellabcd0832008-11-25 02:35:02 +10307547 for_each_cpu(i, cpu_map) {
Andreas Herrmann294b0c92009-08-18 13:02:29 +02007548 sd = &per_cpu(phys_domains, i).sd;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07007549 init_sched_groups_power(i, sd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007550 }
7551
John Hawkes9c1cfda2005-09-06 15:18:14 -07007552#ifdef CONFIG_NUMA
Mike Travis076ac2a2008-05-12 21:21:12 +02007553 for (i = 0; i < nr_node_ids; i++)
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007554 init_numa_sched_groups_power(d.sched_group_nodes[i]);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007555
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007556 if (d.sd_allnodes) {
Siddha, Suresh B6711cab2006-12-10 02:20:07 -08007557 struct sched_group *sg;
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07007558
Rusty Russell96f874e2008-11-25 02:35:14 +10307559 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007560 d.tmpmask);
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07007561 init_numa_sched_groups_power(sg);
7562 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07007563#endif
7564
Linus Torvalds1da177e2005-04-16 15:20:36 -07007565 /* Attach the domains */
Rusty Russellabcd0832008-11-25 02:35:02 +10307566 for_each_cpu(i, cpu_map) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007567#ifdef CONFIG_SCHED_SMT
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307568 sd = &per_cpu(cpu_domains, i).sd;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08007569#elif defined(CONFIG_SCHED_MC)
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307570 sd = &per_cpu(core_domains, i).sd;
Heiko Carstens01a08542010-08-31 10:28:16 +02007571#elif defined(CONFIG_SCHED_BOOK)
7572 sd = &per_cpu(book_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007573#else
Rusty Russell6c99e9a2008-11-25 02:35:04 +10307574 sd = &per_cpu(phys_domains, i).sd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007575#endif
Andreas Herrmann49a02c52009-08-18 12:51:52 +02007576 cpu_attach_domain(sd, d.rd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007577 }
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007578
Andreas Herrmann2109b992009-08-18 12:53:00 +02007579 d.sched_group_nodes = NULL; /* don't free this we still need it */
7580 __free_domain_allocs(&d, sa_tmpmask, cpu_map);
7581 return 0;
Rusty Russell3404c8d2008-11-25 02:35:03 +10307582
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07007583error:
Andreas Herrmann2109b992009-08-18 12:53:00 +02007584 __free_domain_allocs(&d, alloc_state, cpu_map);
7585 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007586}
Paul Jackson029190c2007-10-18 23:40:20 -07007587
Rusty Russell96f874e2008-11-25 02:35:14 +10307588static int build_sched_domains(const struct cpumask *cpu_map)
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007589{
7590 return __build_sched_domains(cpu_map, NULL);
7591}
7592
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307593static cpumask_var_t *doms_cur; /* current sched domains */
Paul Jackson029190c2007-10-18 23:40:20 -07007594static int ndoms_cur; /* number of sched domains in 'doms_cur' */
Ingo Molnar4285f5942008-05-16 17:47:14 +02007595static struct sched_domain_attr *dattr_cur;
7596 /* attribues of custom domains in 'doms_cur' */
Paul Jackson029190c2007-10-18 23:40:20 -07007597
7598/*
7599 * Special case: If a kmalloc of a doms_cur partition (array of
Rusty Russell42128232008-11-25 02:35:12 +10307600 * cpumask) fails, then fallback to a single sched domain,
7601 * as determined by the single cpumask fallback_doms.
Paul Jackson029190c2007-10-18 23:40:20 -07007602 */
Rusty Russell42128232008-11-25 02:35:12 +10307603static cpumask_var_t fallback_doms;
Paul Jackson029190c2007-10-18 23:40:20 -07007604
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007605/*
7606 * arch_update_cpu_topology lets virtualized architectures update the
7607 * cpu core maps. It is supposed to return 1 if the topology changed
7608 * or 0 if it stayed the same.
7609 */
7610int __attribute__((weak)) arch_update_cpu_topology(void)
Heiko Carstens22e52b02008-03-12 18:31:59 +01007611{
Heiko Carstensee79d1b2008-12-09 18:49:50 +01007612 return 0;
Heiko Carstens22e52b02008-03-12 18:31:59 +01007613}
7614
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307615cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7616{
7617 int i;
7618 cpumask_var_t *doms;
7619
7620 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7621 if (!doms)
7622 return NULL;
7623 for (i = 0; i < ndoms; i++) {
7624 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7625 free_sched_domains(doms, i);
7626 return NULL;
7627 }
7628 }
7629 return doms;
7630}
7631
7632void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7633{
7634 unsigned int i;
7635 for (i = 0; i < ndoms; i++)
7636 free_cpumask_var(doms[i]);
7637 kfree(doms);
7638}
7639
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007640/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007641 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
Paul Jackson029190c2007-10-18 23:40:20 -07007642 * For now this just excludes isolated cpus, but could be used to
7643 * exclude other special cases in the future.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007644 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307645static int arch_init_sched_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007646{
Milton Miller73785472007-10-24 18:23:48 +02007647 int err;
7648
Heiko Carstens22e52b02008-03-12 18:31:59 +01007649 arch_update_cpu_topology();
Paul Jackson029190c2007-10-18 23:40:20 -07007650 ndoms_cur = 1;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307651 doms_cur = alloc_sched_domains(ndoms_cur);
Paul Jackson029190c2007-10-18 23:40:20 -07007652 if (!doms_cur)
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307653 doms_cur = &fallback_doms;
7654 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007655 dattr_cur = NULL;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307656 err = build_sched_domains(doms_cur[0]);
Milton Miller6382bc92007-10-15 17:00:19 +02007657 register_sched_domain_sysctl();
Milton Miller73785472007-10-24 18:23:48 +02007658
7659 return err;
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007660}
7661
Rusty Russell96f874e2008-11-25 02:35:14 +10307662static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
7663 struct cpumask *tmpmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007664{
Mike Travis7c16ec52008-04-04 18:11:11 -07007665 free_sched_groups(cpu_map, tmpmask);
John Hawkes9c1cfda2005-09-06 15:18:14 -07007666}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007667
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007668/*
7669 * Detach sched domains from a group of cpus specified in cpu_map
7670 * These cpus will now be attached to the NULL domain
7671 */
Rusty Russell96f874e2008-11-25 02:35:14 +10307672static void detach_destroy_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007673{
Rusty Russell96f874e2008-11-25 02:35:14 +10307674 /* Save because hotplug lock held. */
7675 static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007676 int i;
7677
Rusty Russellabcd0832008-11-25 02:35:02 +10307678 for_each_cpu(i, cpu_map)
Gregory Haskins57d885f2008-01-25 21:08:18 +01007679 cpu_attach_domain(NULL, &def_root_domain, i);
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007680 synchronize_sched();
Rusty Russell96f874e2008-11-25 02:35:14 +10307681 arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07007682}
7683
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007684/* handle null as "default" */
7685static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7686 struct sched_domain_attr *new, int idx_new)
7687{
7688 struct sched_domain_attr tmp;
7689
7690 /* fast path */
7691 if (!new && !cur)
7692 return 1;
7693
7694 tmp = SD_ATTR_INIT;
7695 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7696 new ? (new + idx_new) : &tmp,
7697 sizeof(struct sched_domain_attr));
7698}
7699
Paul Jackson029190c2007-10-18 23:40:20 -07007700/*
7701 * Partition sched domains as specified by the 'ndoms_new'
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007702 * cpumasks in the array doms_new[] of cpumasks. This compares
Paul Jackson029190c2007-10-18 23:40:20 -07007703 * doms_new[] to the current sched domain partitioning, doms_cur[].
7704 * It destroys each deleted domain and builds each new domain.
7705 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307706 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007707 * The masks don't intersect (don't overlap.) We should setup one
7708 * sched domain for each mask. CPUs not in any of the cpumasks will
7709 * not be load balanced. If the same cpumask appears both in the
Paul Jackson029190c2007-10-18 23:40:20 -07007710 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7711 * it as it is.
7712 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307713 * The passed in 'doms_new' should be allocated using
7714 * alloc_sched_domains. This routine takes ownership of it and will
7715 * free_sched_domains it when done with it. If the caller failed the
7716 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7717 * and partition_sched_domains() will fallback to the single partition
7718 * 'fallback_doms', it also forces the domains to be rebuilt.
Paul Jackson029190c2007-10-18 23:40:20 -07007719 *
Rusty Russell96f874e2008-11-25 02:35:14 +10307720 * If doms_new == NULL it will be replaced with cpu_online_mask.
Li Zefan700018e2008-11-18 14:02:03 +08007721 * ndoms_new == 0 is a special case for destroying existing domains,
7722 * and it will not create the default domain.
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007723 *
Paul Jackson029190c2007-10-18 23:40:20 -07007724 * Call with hotplug lock held
7725 */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307726void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007727 struct sched_domain_attr *dattr_new)
Paul Jackson029190c2007-10-18 23:40:20 -07007728{
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007729 int i, j, n;
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007730 int new_topology;
Paul Jackson029190c2007-10-18 23:40:20 -07007731
Heiko Carstens712555e2008-04-28 11:33:07 +02007732 mutex_lock(&sched_domains_mutex);
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007733
Milton Miller73785472007-10-24 18:23:48 +02007734 /* always unregister in case we don't destroy any domains */
7735 unregister_sched_domain_sysctl();
7736
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007737 /* Let architecture update cpu core mappings. */
7738 new_topology = arch_update_cpu_topology();
7739
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007740 n = doms_new ? ndoms_new : 0;
Paul Jackson029190c2007-10-18 23:40:20 -07007741
7742 /* Destroy deleted domains */
7743 for (i = 0; i < ndoms_cur; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007744 for (j = 0; j < n && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307745 if (cpumask_equal(doms_cur[i], doms_new[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007746 && dattrs_equal(dattr_cur, i, dattr_new, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007747 goto match1;
7748 }
7749 /* no match - a current sched domain not in new doms_new[] */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307750 detach_destroy_domains(doms_cur[i]);
Paul Jackson029190c2007-10-18 23:40:20 -07007751match1:
7752 ;
7753 }
7754
Max Krasnyanskye761b772008-07-15 04:43:49 -07007755 if (doms_new == NULL) {
7756 ndoms_cur = 0;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307757 doms_new = &fallback_doms;
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007758 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
Li Zefanfaa2f982008-11-04 16:20:23 +08007759 WARN_ON_ONCE(dattr_new);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007760 }
7761
Paul Jackson029190c2007-10-18 23:40:20 -07007762 /* Build new domains */
7763 for (i = 0; i < ndoms_new; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01007764 for (j = 0; j < ndoms_cur && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307765 if (cpumask_equal(doms_new[i], doms_cur[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007766 && dattrs_equal(dattr_new, i, dattr_cur, j))
Paul Jackson029190c2007-10-18 23:40:20 -07007767 goto match2;
7768 }
7769 /* no match - add a new doms_new */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307770 __build_sched_domains(doms_new[i],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007771 dattr_new ? dattr_new + i : NULL);
Paul Jackson029190c2007-10-18 23:40:20 -07007772match2:
7773 ;
7774 }
7775
7776 /* Remember the new sched domains */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10307777 if (doms_cur != &fallback_doms)
7778 free_sched_domains(doms_cur, ndoms_cur);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007779 kfree(dattr_cur); /* kfree(NULL) is safe */
Paul Jackson029190c2007-10-18 23:40:20 -07007780 doms_cur = doms_new;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09007781 dattr_cur = dattr_new;
Paul Jackson029190c2007-10-18 23:40:20 -07007782 ndoms_cur = ndoms_new;
Milton Miller73785472007-10-24 18:23:48 +02007783
7784 register_sched_domain_sysctl();
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01007785
Heiko Carstens712555e2008-04-28 11:33:07 +02007786 mutex_unlock(&sched_domains_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -07007787}
7788
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007789#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
Li Zefanc70f22d2009-01-05 19:07:50 +08007790static void arch_reinit_sched_domains(void)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007791{
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007792 get_online_cpus();
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07007793
7794 /* Destroy domains first to force the rebuild */
7795 partition_sched_domains(0, NULL, NULL);
7796
Max Krasnyanskye761b772008-07-15 04:43:49 -07007797 rebuild_sched_domains();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007798 put_online_cpus();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007799}
7800
7801static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7802{
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307803 unsigned int level = 0;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007804
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307805 if (sscanf(buf, "%u", &level) != 1)
7806 return -EINVAL;
7807
7808 /*
7809 * level is always be positive so don't check for
7810 * level < POWERSAVINGS_BALANCE_NONE which is 0
7811 * What happens on 0 or 1 byte write,
7812 * need to check for count as well?
7813 */
7814
7815 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007816 return -EINVAL;
7817
7818 if (smt)
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307819 sched_smt_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007820 else
Gautham R Shenoyafb8a9b2008-12-18 23:26:09 +05307821 sched_mc_power_savings = level;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007822
Li Zefanc70f22d2009-01-05 19:07:50 +08007823 arch_reinit_sched_domains();
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007824
Li Zefanc70f22d2009-01-05 19:07:50 +08007825 return count;
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007826}
7827
Adrian Bunk6707de002007-08-12 18:08:19 +02007828#ifdef CONFIG_SCHED_MC
Andi Kleenf718cd42008-07-29 22:33:52 -07007829static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007830 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007831 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007832{
7833 return sprintf(page, "%u\n", sched_mc_power_savings);
7834}
Andi Kleenf718cd42008-07-29 22:33:52 -07007835static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007836 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007837 const char *buf, size_t count)
7838{
7839 return sched_power_savings_store(buf, count, 0);
7840}
Andi Kleenf718cd42008-07-29 22:33:52 -07007841static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7842 sched_mc_power_savings_show,
7843 sched_mc_power_savings_store);
Adrian Bunk6707de002007-08-12 18:08:19 +02007844#endif
7845
7846#ifdef CONFIG_SCHED_SMT
Andi Kleenf718cd42008-07-29 22:33:52 -07007847static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007848 struct sysdev_class_attribute *attr,
Andi Kleenf718cd42008-07-29 22:33:52 -07007849 char *page)
Adrian Bunk6707de002007-08-12 18:08:19 +02007850{
7851 return sprintf(page, "%u\n", sched_smt_power_savings);
7852}
Andi Kleenf718cd42008-07-29 22:33:52 -07007853static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
Andi Kleenc9be0a32010-01-05 12:47:58 +01007854 struct sysdev_class_attribute *attr,
Adrian Bunk6707de002007-08-12 18:08:19 +02007855 const char *buf, size_t count)
7856{
7857 return sched_power_savings_store(buf, count, 1);
7858}
Andi Kleenf718cd42008-07-29 22:33:52 -07007859static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7860 sched_smt_power_savings_show,
Adrian Bunk6707de002007-08-12 18:08:19 +02007861 sched_smt_power_savings_store);
7862#endif
7863
Li Zefan39aac642009-01-05 19:18:02 +08007864int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007865{
7866 int err = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07007867
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007868#ifdef CONFIG_SCHED_SMT
7869 if (smt_capable())
7870 err = sysfs_create_file(&cls->kset.kobj,
7871 &attr_sched_smt_power_savings.attr);
7872#endif
7873#ifdef CONFIG_SCHED_MC
7874 if (!err && mc_capable())
7875 err = sysfs_create_file(&cls->kset.kobj,
7876 &attr_sched_mc_power_savings.attr);
7877#endif
7878 return err;
7879}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007880#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07007881
Linus Torvalds1da177e2005-04-16 15:20:36 -07007882/*
Tejun Heo3a101d02010-06-08 21:40:36 +02007883 * Update cpusets according to cpu_active mask. If cpusets are
7884 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7885 * around partition_sched_domains().
Linus Torvalds1da177e2005-04-16 15:20:36 -07007886 */
Tejun Heo0b2e9182010-06-21 23:53:31 +02007887static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7888 void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007889{
Tejun Heo3a101d02010-06-08 21:40:36 +02007890 switch (action & ~CPU_TASKS_FROZEN) {
Max Krasnyanskye761b772008-07-15 04:43:49 -07007891 case CPU_ONLINE:
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007892 case CPU_DOWN_FAILED:
Tejun Heo3a101d02010-06-08 21:40:36 +02007893 cpuset_update_active_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07007894 return NOTIFY_OK;
Max Krasnyanskye761b772008-07-15 04:43:49 -07007895 default:
7896 return NOTIFY_DONE;
7897 }
7898}
Tejun Heo3a101d02010-06-08 21:40:36 +02007899
Tejun Heo0b2e9182010-06-21 23:53:31 +02007900static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7901 void *hcpu)
Tejun Heo3a101d02010-06-08 21:40:36 +02007902{
7903 switch (action & ~CPU_TASKS_FROZEN) {
7904 case CPU_DOWN_PREPARE:
7905 cpuset_update_active_cpus();
7906 return NOTIFY_OK;
7907 default:
7908 return NOTIFY_DONE;
7909 }
7910}
Max Krasnyanskye761b772008-07-15 04:43:49 -07007911
7912static int update_runtime(struct notifier_block *nfb,
7913 unsigned long action, void *hcpu)
7914{
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007915 int cpu = (int)(long)hcpu;
7916
Linus Torvalds1da177e2005-04-16 15:20:36 -07007917 switch (action) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007918 case CPU_DOWN_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007919 case CPU_DOWN_PREPARE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007920 disable_runtime(cpu_rq(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007921 return NOTIFY_OK;
7922
Linus Torvalds1da177e2005-04-16 15:20:36 -07007923 case CPU_DOWN_FAILED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007924 case CPU_DOWN_FAILED_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007925 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07007926 case CPU_ONLINE_FROZEN:
Peter Zijlstra7def2be2008-06-05 14:49:58 +02007927 enable_runtime(cpu_rq(cpu));
Max Krasnyanskye761b772008-07-15 04:43:49 -07007928 return NOTIFY_OK;
7929
Linus Torvalds1da177e2005-04-16 15:20:36 -07007930 default:
7931 return NOTIFY_DONE;
7932 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007933}
Linus Torvalds1da177e2005-04-16 15:20:36 -07007934
7935void __init sched_init_smp(void)
7936{
Rusty Russelldcc30a32008-11-25 02:35:12 +10307937 cpumask_var_t non_isolated_cpus;
7938
7939 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
Yong Zhangcb5fd132009-09-14 20:20:16 +08007940 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
Nick Piggin5c1e1762006-10-03 01:14:04 -07007941
Mike Travis434d53b2008-04-04 18:11:04 -07007942#if defined(CONFIG_NUMA)
7943 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
7944 GFP_KERNEL);
7945 BUG_ON(sched_group_nodes_bycpu == NULL);
7946#endif
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007947 get_online_cpus();
Heiko Carstens712555e2008-04-28 11:33:07 +02007948 mutex_lock(&sched_domains_mutex);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01007949 arch_init_sched_domains(cpu_active_mask);
Rusty Russelldcc30a32008-11-25 02:35:12 +10307950 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7951 if (cpumask_empty(non_isolated_cpus))
7952 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
Heiko Carstens712555e2008-04-28 11:33:07 +02007953 mutex_unlock(&sched_domains_mutex);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01007954 put_online_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07007955
Tejun Heo3a101d02010-06-08 21:40:36 +02007956 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7957 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
Max Krasnyanskye761b772008-07-15 04:43:49 -07007958
7959 /* RT runtime code needs to handle some hotplug events */
7960 hotcpu_notifier(update_runtime, 0);
7961
Peter Zijlstrab328ca12008-04-29 10:02:46 +02007962 init_hrtick();
Nick Piggin5c1e1762006-10-03 01:14:04 -07007963
7964 /* Move init over to a non-isolated CPU */
Rusty Russelldcc30a32008-11-25 02:35:12 +10307965 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
Nick Piggin5c1e1762006-10-03 01:14:04 -07007966 BUG();
Ingo Molnar19978ca2007-11-09 22:39:38 +01007967 sched_init_granularity();
Rusty Russelldcc30a32008-11-25 02:35:12 +10307968 free_cpumask_var(non_isolated_cpus);
Rusty Russell42128232008-11-25 02:35:12 +10307969
Rusty Russell0e3900e2008-11-25 02:35:13 +10307970 init_sched_rt_class();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007971}
7972#else
7973void __init sched_init_smp(void)
7974{
Ingo Molnar19978ca2007-11-09 22:39:38 +01007975 sched_init_granularity();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007976}
7977#endif /* CONFIG_SMP */
7978
Arun R Bharadwajcd1bb942009-04-16 12:15:34 +05307979const_debug unsigned int sysctl_timer_migration = 1;
7980
Linus Torvalds1da177e2005-04-16 15:20:36 -07007981int in_sched_functions(unsigned long addr)
7982{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007983 return in_lock_functions(addr) ||
7984 (addr >= (unsigned long)__sched_text_start
7985 && addr < (unsigned long)__sched_text_end);
7986}
7987
Alexey Dobriyana9957442007-10-15 17:00:13 +02007988static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02007989{
7990 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra4a55bd52008-04-19 19:45:00 +02007991 INIT_LIST_HEAD(&cfs_rq->tasks);
Ingo Molnardd41f592007-07-09 18:51:59 +02007992#ifdef CONFIG_FAIR_GROUP_SCHED
7993 cfs_rq->rq = rq;
Paul Turnerf07333b2011-01-21 20:45:03 -08007994 /* allow initial update_cfs_load() to truncate */
Peter Zijlstra6ea72f12011-01-26 13:36:03 +01007995#ifdef CONFIG_SMP
Paul Turnerf07333b2011-01-21 20:45:03 -08007996 cfs_rq->load_stamp = 1;
Ingo Molnardd41f592007-07-09 18:51:59 +02007997#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02007998#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02007999 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
Ingo Molnardd41f592007-07-09 18:51:59 +02008000}
8001
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008002static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
8003{
8004 struct rt_prio_array *array;
8005 int i;
8006
8007 array = &rt_rq->active;
8008 for (i = 0; i < MAX_RT_PRIO; i++) {
8009 INIT_LIST_HEAD(array->queue + i);
8010 __clear_bit(i, array->bitmap);
8011 }
8012 /* delimiter for bitsearch: */
8013 __set_bit(MAX_RT_PRIO, array->bitmap);
8014
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008015#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskinse864c492008-12-29 09:39:49 -05008016 rt_rq->highest_prio.curr = MAX_RT_PRIO;
Gregory Haskins398a1532009-01-14 09:10:04 -05008017#ifdef CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -05008018 rt_rq->highest_prio.next = MAX_RT_PRIO;
Peter Zijlstra48d5e252008-01-25 21:08:31 +01008019#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008020#endif
8021#ifdef CONFIG_SMP
8022 rt_rq->rt_nr_migratory = 0;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008023 rt_rq->overloaded = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008024 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008025#endif
8026
8027 rt_rq->rt_time = 0;
8028 rt_rq->rt_throttled = 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008029 rt_rq->rt_runtime = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +01008030 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008031
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008032#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01008033 rt_rq->rt_nr_boosted = 0;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008034 rt_rq->rq = rq;
8035#endif
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008036}
8037
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008038#ifdef CONFIG_FAIR_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008039static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008040 struct sched_entity *se, int cpu,
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008041 struct sched_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008042{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008043 struct rq *rq = cpu_rq(cpu);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008044 tg->cfs_rq[cpu] = cfs_rq;
8045 init_cfs_rq(cfs_rq, rq);
8046 cfs_rq->tg = tg;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008047
8048 tg->se[cpu] = se;
Yong Zhang07e06b02011-01-07 15:17:36 +08008049 /* se could be NULL for root_task_group */
Dhaval Giani354d60c2008-04-19 19:44:59 +02008050 if (!se)
8051 return;
8052
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008053 if (!parent)
8054 se->cfs_rq = &rq->cfs;
8055 else
8056 se->cfs_rq = parent->my_q;
8057
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008058 se->my_q = cfs_rq;
Paul Turner94371782010-11-15 15:47:10 -08008059 update_load_set(&se->load, 0);
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008060 se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008061}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008062#endif
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008063
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008064#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008065static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008066 struct sched_rt_entity *rt_se, int cpu,
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008067 struct sched_rt_entity *parent)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008068{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008069 struct rq *rq = cpu_rq(cpu);
8070
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008071 tg->rt_rq[cpu] = rt_rq;
8072 init_rt_rq(rt_rq, rq);
8073 rt_rq->tg = tg;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008074 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008075
8076 tg->rt_se[cpu] = rt_se;
Dhaval Giani354d60c2008-04-19 19:44:59 +02008077 if (!rt_se)
8078 return;
8079
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008080 if (!parent)
8081 rt_se->rt_rq = &rq->rt;
8082 else
8083 rt_se->rt_rq = parent->my_q;
8084
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008085 rt_se->my_q = rt_rq;
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008086 rt_se->parent = parent;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008087 INIT_LIST_HEAD(&rt_se->run_list);
8088}
8089#endif
8090
Linus Torvalds1da177e2005-04-16 15:20:36 -07008091void __init sched_init(void)
8092{
Ingo Molnardd41f592007-07-09 18:51:59 +02008093 int i, j;
Mike Travis434d53b2008-04-04 18:11:04 -07008094 unsigned long alloc_size = 0, ptr;
8095
8096#ifdef CONFIG_FAIR_GROUP_SCHED
8097 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8098#endif
8099#ifdef CONFIG_RT_GROUP_SCHED
8100 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
8101#endif
Rusty Russelldf7c8e82009-03-19 15:22:20 +10308102#ifdef CONFIG_CPUMASK_OFFSTACK
Rusty Russell8c083f02009-03-19 15:22:20 +10308103 alloc_size += num_possible_cpus() * cpumask_size();
Rusty Russelldf7c8e82009-03-19 15:22:20 +10308104#endif
Mike Travis434d53b2008-04-04 18:11:04 -07008105 if (alloc_size) {
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03008106 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
Mike Travis434d53b2008-04-04 18:11:04 -07008107
8108#ifdef CONFIG_FAIR_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008109 root_task_group.se = (struct sched_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07008110 ptr += nr_cpu_ids * sizeof(void **);
8111
Yong Zhang07e06b02011-01-07 15:17:36 +08008112 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07008113 ptr += nr_cpu_ids * sizeof(void **);
Peter Zijlstraeff766a2008-04-19 19:45:00 +02008114
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008115#endif /* CONFIG_FAIR_GROUP_SCHED */
Mike Travis434d53b2008-04-04 18:11:04 -07008116#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008117 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07008118 ptr += nr_cpu_ids * sizeof(void **);
8119
Yong Zhang07e06b02011-01-07 15:17:36 +08008120 root_task_group.rt_rq = (struct rt_rq **)ptr;
Peter Zijlstraeff766a2008-04-19 19:45:00 +02008121 ptr += nr_cpu_ids * sizeof(void **);
8122
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008123#endif /* CONFIG_RT_GROUP_SCHED */
Rusty Russelldf7c8e82009-03-19 15:22:20 +10308124#ifdef CONFIG_CPUMASK_OFFSTACK
8125 for_each_possible_cpu(i) {
8126 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
8127 ptr += cpumask_size();
8128 }
8129#endif /* CONFIG_CPUMASK_OFFSTACK */
Mike Travis434d53b2008-04-04 18:11:04 -07008130 }
Ingo Molnardd41f592007-07-09 18:51:59 +02008131
Gregory Haskins57d885f2008-01-25 21:08:18 +01008132#ifdef CONFIG_SMP
8133 init_defrootdomain();
8134#endif
8135
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008136 init_rt_bandwidth(&def_rt_bandwidth,
8137 global_rt_period(), global_rt_runtime());
8138
8139#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008140 init_rt_bandwidth(&root_task_group.rt_bandwidth,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008141 global_rt_period(), global_rt_runtime());
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008142#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008143
Dhaval Giani7c941432010-01-20 13:26:18 +01008144#ifdef CONFIG_CGROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008145 list_add(&root_task_group.list, &task_groups);
8146 INIT_LIST_HEAD(&root_task_group.children);
Mike Galbraith5091faa2010-11-30 14:18:03 +01008147 autogroup_init(&init_task);
Dhaval Giani7c941432010-01-20 13:26:18 +01008148#endif /* CONFIG_CGROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008149
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08008150 for_each_possible_cpu(i) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07008151 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008152
8153 rq = cpu_rq(i);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008154 raw_spin_lock_init(&rq->lock);
Nick Piggin78979862005-06-25 14:57:13 -07008155 rq->nr_running = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02008156 rq->calc_load_active = 0;
8157 rq->calc_load_update = jiffies + LOAD_FREQ;
Ingo Molnardd41f592007-07-09 18:51:59 +02008158 init_cfs_rq(&rq->cfs, rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01008159 init_rt_rq(&rq->rt, rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008160#ifdef CONFIG_FAIR_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08008161 root_task_group.shares = root_task_group_load;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008162 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
Dhaval Giani354d60c2008-04-19 19:44:59 +02008163 /*
Yong Zhang07e06b02011-01-07 15:17:36 +08008164 * How much cpu bandwidth does root_task_group get?
Dhaval Giani354d60c2008-04-19 19:44:59 +02008165 *
8166 * In case of task-groups formed thr' the cgroup filesystem, it
8167 * gets 100% of the cpu resources in the system. This overall
8168 * system cpu resource is divided among the tasks of
Yong Zhang07e06b02011-01-07 15:17:36 +08008169 * root_task_group and its child task-groups in a fair manner,
Dhaval Giani354d60c2008-04-19 19:44:59 +02008170 * based on each entity's (task or task-group's) weight
8171 * (se->load.weight).
8172 *
Yong Zhang07e06b02011-01-07 15:17:36 +08008173 * In other words, if root_task_group has 10 tasks of weight
Dhaval Giani354d60c2008-04-19 19:44:59 +02008174 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8175 * then A0's share of the cpu resource is:
8176 *
Ingo Molnar0d905bc2009-05-04 19:13:30 +02008177 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
Dhaval Giani354d60c2008-04-19 19:44:59 +02008178 *
Yong Zhang07e06b02011-01-07 15:17:36 +08008179 * We achieve this by letting root_task_group's tasks sit
8180 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
Dhaval Giani354d60c2008-04-19 19:44:59 +02008181 */
Yong Zhang07e06b02011-01-07 15:17:36 +08008182 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
Dhaval Giani354d60c2008-04-19 19:44:59 +02008183#endif /* CONFIG_FAIR_GROUP_SCHED */
8184
8185 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008186#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008187 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
Yong Zhang07e06b02011-01-07 15:17:36 +08008188 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008189#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07008190
Ingo Molnardd41f592007-07-09 18:51:59 +02008191 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
8192 rq->cpu_load[j] = 0;
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07008193
8194 rq->last_load_update_tick = jiffies;
8195
Linus Torvalds1da177e2005-04-16 15:20:36 -07008196#ifdef CONFIG_SMP
Nick Piggin41c7ce92005-06-25 14:57:24 -07008197 rq->sd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01008198 rq->rd = NULL;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02008199 rq->cpu_power = SCHED_LOAD_SCALE;
Gregory Haskins3f029d32009-07-29 11:08:47 -04008200 rq->post_schedule = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008201 rq->active_balance = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02008202 rq->next_balance = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008203 rq->push_cpu = 0;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07008204 rq->cpu = i;
Gregory Haskins1f11eb62008-06-04 15:04:05 -04008205 rq->online = 0;
Mike Galbraitheae0c9d2009-11-10 03:50:02 +01008206 rq->idle_stamp = 0;
8207 rq->avg_idle = 2*sysctl_sched_migration_cost;
Gregory Haskinsdc938522008-01-25 21:08:26 +01008208 rq_attach_root(rq, &def_root_domain);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008209#ifdef CONFIG_NO_HZ
8210 rq->nohz_balance_kick = 0;
8211 init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i));
8212#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07008213#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01008214 init_rq_hrtick(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008215 atomic_set(&rq->nr_iowait, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008216 }
8217
Peter Williams2dd73a42006-06-27 02:54:34 -07008218 set_load_weight(&init_task);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008219
Avi Kivitye107be32007-07-26 13:40:43 +02008220#ifdef CONFIG_PREEMPT_NOTIFIERS
8221 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
8222#endif
8223
Christoph Lameterc9819f42006-12-10 02:20:25 -08008224#ifdef CONFIG_SMP
Carlos R. Mafra962cf362008-05-15 11:15:37 -03008225 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
Christoph Lameterc9819f42006-12-10 02:20:25 -08008226#endif
8227
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008228#ifdef CONFIG_RT_MUTEXES
Thomas Gleixner1d615482009-11-17 14:54:03 +01008229 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07008230#endif
8231
Linus Torvalds1da177e2005-04-16 15:20:36 -07008232 /*
8233 * The boot idle thread does lazy MMU switching as well:
8234 */
8235 atomic_inc(&init_mm.mm_count);
8236 enter_lazy_tlb(&init_mm, current);
8237
8238 /*
8239 * Make us the idle thread. Technically, schedule() should not be
8240 * called from this thread, however somewhere below it might be,
8241 * but because we are the idle thread, we just pick up running again
8242 * when this runqueue becomes "idle".
8243 */
8244 init_idle(current, smp_processor_id());
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02008245
8246 calc_load_update = jiffies + LOAD_FREQ;
8247
Ingo Molnardd41f592007-07-09 18:51:59 +02008248 /*
8249 * During early bootup we pretend to be a normal task:
8250 */
8251 current->sched_class = &fair_sched_class;
Ingo Molnar6892b752008-02-13 14:02:36 +01008252
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10308253 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
Rusty Russell49557e62009-11-02 20:37:20 +10308254 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10308255#ifdef CONFIG_SMP
Rusty Russell7d1e6a92008-11-25 02:35:09 +10308256#ifdef CONFIG_NO_HZ
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008257 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
8258 alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
8259 atomic_set(&nohz.load_balancer, nr_cpu_ids);
8260 atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
8261 atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
Rusty Russell7d1e6a92008-11-25 02:35:09 +10308262#endif
Rusty Russellbdddd292009-12-02 14:09:16 +10308263 /* May be allocated at isolcpus cmdline parse time */
8264 if (cpu_isolated_map == NULL)
8265 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
Rusty Russellbf4d83f2008-11-25 09:57:51 +10308266#endif /* SMP */
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10308267
Ingo Molnar6892b752008-02-13 14:02:36 +01008268 scheduler_running = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008269}
8270
8271#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008272static inline int preempt_count_equals(int preempt_offset)
8273{
Frederic Weisbecker234da7b2009-12-16 20:21:05 +01008274 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008275
Arnd Bergmann4ba82162011-01-25 22:52:22 +01008276 return (nested == preempt_offset);
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008277}
8278
Simon Kagstromd8948372009-12-23 11:08:18 +01008279void __might_sleep(const char *file, int line, int preempt_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008280{
Ingo Molnar48f24c42006-07-03 00:25:40 -07008281#ifdef in_atomic
Linus Torvalds1da177e2005-04-16 15:20:36 -07008282 static unsigned long prev_jiffy; /* ratelimiting */
8283
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02008284 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
8285 system_state != SYSTEM_RUNNING || oops_in_progress)
Ingo Molnaraef745f2008-08-28 11:34:43 +02008286 return;
8287 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8288 return;
8289 prev_jiffy = jiffies;
8290
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01008291 printk(KERN_ERR
8292 "BUG: sleeping function called from invalid context at %s:%d\n",
8293 file, line);
8294 printk(KERN_ERR
8295 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8296 in_atomic(), irqs_disabled(),
8297 current->pid, current->comm);
Ingo Molnaraef745f2008-08-28 11:34:43 +02008298
8299 debug_show_held_locks(current);
8300 if (irqs_disabled())
8301 print_irqtrace_events(current);
8302 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008303#endif
8304}
8305EXPORT_SYMBOL(__might_sleep);
8306#endif
8307
8308#ifdef CONFIG_MAGIC_SYSRQ
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008309static void normalize_task(struct rq *rq, struct task_struct *p)
8310{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008311 const struct sched_class *prev_class = p->sched_class;
8312 int old_prio = p->prio;
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008313 int on_rq;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02008314
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008315 on_rq = p->se.on_rq;
8316 if (on_rq)
8317 deactivate_task(rq, p, 0);
8318 __setscheduler(rq, p, SCHED_NORMAL, 0);
8319 if (on_rq) {
8320 activate_task(rq, p, 0);
8321 resched_task(rq->curr);
8322 }
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008323
8324 check_class_changed(rq, p, prev_class, old_prio);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008325}
8326
Linus Torvalds1da177e2005-04-16 15:20:36 -07008327void normalize_rt_tasks(void)
8328{
Ingo Molnara0f98a12007-06-17 18:37:45 +02008329 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008330 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07008331 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008332
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01008333 read_lock_irqsave(&tasklist_lock, flags);
Ingo Molnara0f98a12007-06-17 18:37:45 +02008334 do_each_thread(g, p) {
Ingo Molnar178be792007-10-15 17:00:18 +02008335 /*
8336 * Only normalize user tasks:
8337 */
8338 if (!p->mm)
8339 continue;
8340
Ingo Molnardd41f592007-07-09 18:51:59 +02008341 p->se.exec_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02008342#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03008343 p->se.statistics.wait_start = 0;
8344 p->se.statistics.sleep_start = 0;
8345 p->se.statistics.block_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02008346#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02008347
8348 if (!rt_task(p)) {
8349 /*
8350 * Renice negative nice level userspace
8351 * tasks back to 0:
8352 */
8353 if (TASK_NICE(p) < 0 && p->mm)
8354 set_user_nice(p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008355 continue;
Ingo Molnardd41f592007-07-09 18:51:59 +02008356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008357
Thomas Gleixner1d615482009-11-17 14:54:03 +01008358 raw_spin_lock(&p->pi_lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -07008359 rq = __task_rq_lock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008360
Ingo Molnar178be792007-10-15 17:00:18 +02008361 normalize_task(rq, p);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02008362
Ingo Molnarb29739f2006-06-27 02:54:51 -07008363 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01008364 raw_spin_unlock(&p->pi_lock);
Ingo Molnara0f98a12007-06-17 18:37:45 +02008365 } while_each_thread(g, p);
8366
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01008367 read_unlock_irqrestore(&tasklist_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008368}
8369
8370#endif /* CONFIG_MAGIC_SYSRQ */
Linus Torvalds1df5c102005-09-12 07:59:21 -07008371
Jason Wessel67fc4e02010-05-20 21:04:21 -05008372#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008373/*
Jason Wessel67fc4e02010-05-20 21:04:21 -05008374 * These functions are only useful for the IA64 MCA handling, or kdb.
Linus Torvalds1df5c102005-09-12 07:59:21 -07008375 *
8376 * They can only be called when the whole system has been
8377 * stopped - every CPU needs to be quiescent, and no scheduling
8378 * activity can take place. Using them for anything else would
8379 * be a serious bug, and as a result, they aren't even visible
8380 * under any other configuration.
8381 */
8382
8383/**
8384 * curr_task - return the current task for a given cpu.
8385 * @cpu: the processor in question.
8386 *
8387 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8388 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07008389struct task_struct *curr_task(int cpu)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008390{
8391 return cpu_curr(cpu);
8392}
8393
Jason Wessel67fc4e02010-05-20 21:04:21 -05008394#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
8395
8396#ifdef CONFIG_IA64
Linus Torvalds1df5c102005-09-12 07:59:21 -07008397/**
8398 * set_curr_task - set the current task for a given cpu.
8399 * @cpu: the processor in question.
8400 * @p: the task pointer to set.
8401 *
8402 * Description: This function must only be used when non-maskable interrupts
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01008403 * are serviced on a separate stack. It allows the architecture to switch the
8404 * notion of the current task on a cpu in a non-blocking manner. This function
Linus Torvalds1df5c102005-09-12 07:59:21 -07008405 * must be called with all CPU's synchronized, and interrupts disabled, the
8406 * and caller must save the original value of the current task (see
8407 * curr_task() above) and restore that value before reenabling interrupts and
8408 * re-starting the system.
8409 *
8410 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8411 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07008412void set_curr_task(int cpu, struct task_struct *p)
Linus Torvalds1df5c102005-09-12 07:59:21 -07008413{
8414 cpu_curr(cpu) = p;
8415}
8416
8417#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008418
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008419#ifdef CONFIG_FAIR_GROUP_SCHED
8420static void free_fair_sched_group(struct task_group *tg)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008421{
8422 int i;
8423
8424 for_each_possible_cpu(i) {
8425 if (tg->cfs_rq)
8426 kfree(tg->cfs_rq[i]);
8427 if (tg->se)
8428 kfree(tg->se[i]);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008429 }
8430
8431 kfree(tg->cfs_rq);
8432 kfree(tg->se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008433}
8434
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008435static
8436int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008437{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008438 struct cfs_rq *cfs_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008439 struct sched_entity *se;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008440 struct rq *rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008441 int i;
8442
Mike Travis434d53b2008-04-04 18:11:04 -07008443 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008444 if (!tg->cfs_rq)
8445 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008446 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008447 if (!tg->se)
8448 goto err;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008449
8450 tg->shares = NICE_0_LOAD;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008451
8452 for_each_possible_cpu(i) {
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008453 rq = cpu_rq(i);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008454
Li Zefaneab17222008-10-29 17:03:22 +08008455 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8456 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008457 if (!cfs_rq)
8458 goto err;
8459
Li Zefaneab17222008-10-29 17:03:22 +08008460 se = kzalloc_node(sizeof(struct sched_entity),
8461 GFP_KERNEL, cpu_to_node(i));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008462 if (!se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008463 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008464
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008465 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008466 }
8467
8468 return 1;
8469
Peter Zijlstra49246272010-10-17 21:46:10 +02008470err_free_rq:
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008471 kfree(cfs_rq);
Peter Zijlstra49246272010-10-17 21:46:10 +02008472err:
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008473 return 0;
8474}
8475
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008476static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8477{
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008478 struct rq *rq = cpu_rq(cpu);
8479 unsigned long flags;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008480
8481 /*
8482 * Only empty task groups can be destroyed; so we can speculatively
8483 * check on_list without danger of it being re-added.
8484 */
8485 if (!tg->cfs_rq[cpu]->on_list)
8486 return;
8487
8488 raw_spin_lock_irqsave(&rq->lock, flags);
Paul Turner822bc182010-11-29 16:55:40 -08008489 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008490 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008491}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008492#else /* !CONFG_FAIR_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008493static inline void free_fair_sched_group(struct task_group *tg)
8494{
8495}
8496
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008497static inline
8498int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008499{
8500 return 1;
8501}
8502
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008503static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8504{
8505}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008506#endif /* CONFIG_FAIR_GROUP_SCHED */
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008507
8508#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008509static void free_rt_sched_group(struct task_group *tg)
8510{
8511 int i;
8512
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008513 destroy_rt_bandwidth(&tg->rt_bandwidth);
8514
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008515 for_each_possible_cpu(i) {
8516 if (tg->rt_rq)
8517 kfree(tg->rt_rq[i]);
8518 if (tg->rt_se)
8519 kfree(tg->rt_se[i]);
8520 }
8521
8522 kfree(tg->rt_rq);
8523 kfree(tg->rt_se);
8524}
8525
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008526static
8527int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008528{
8529 struct rt_rq *rt_rq;
Li Zefaneab17222008-10-29 17:03:22 +08008530 struct sched_rt_entity *rt_se;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008531 struct rq *rq;
8532 int i;
8533
Mike Travis434d53b2008-04-04 18:11:04 -07008534 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008535 if (!tg->rt_rq)
8536 goto err;
Mike Travis434d53b2008-04-04 18:11:04 -07008537 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008538 if (!tg->rt_se)
8539 goto err;
8540
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008541 init_rt_bandwidth(&tg->rt_bandwidth,
8542 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008543
8544 for_each_possible_cpu(i) {
8545 rq = cpu_rq(i);
8546
Li Zefaneab17222008-10-29 17:03:22 +08008547 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8548 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008549 if (!rt_rq)
8550 goto err;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008551
Li Zefaneab17222008-10-29 17:03:22 +08008552 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8553 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008554 if (!rt_se)
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008555 goto err_free_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008556
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008557 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008558 }
8559
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008560 return 1;
8561
Peter Zijlstra49246272010-10-17 21:46:10 +02008562err_free_rq:
Phil Carmodydfc12eb2009-12-10 14:29:37 +02008563 kfree(rt_rq);
Peter Zijlstra49246272010-10-17 21:46:10 +02008564err:
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008565 return 0;
8566}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008567#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008568static inline void free_rt_sched_group(struct task_group *tg)
8569{
8570}
8571
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008572static inline
8573int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008574{
8575 return 1;
8576}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008577#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008578
Dhaval Giani7c941432010-01-20 13:26:18 +01008579#ifdef CONFIG_CGROUP_SCHED
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008580static void free_sched_group(struct task_group *tg)
8581{
8582 free_fair_sched_group(tg);
8583 free_rt_sched_group(tg);
Mike Galbraithe9aa1dd2011-01-05 11:11:25 +01008584 autogroup_free(tg);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008585 kfree(tg);
8586}
8587
8588/* allocate runqueue etc for a new task group */
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008589struct task_group *sched_create_group(struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008590{
8591 struct task_group *tg;
8592 unsigned long flags;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008593
8594 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
8595 if (!tg)
8596 return ERR_PTR(-ENOMEM);
8597
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008598 if (!alloc_fair_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008599 goto err;
8600
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008601 if (!alloc_rt_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008602 goto err;
8603
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008604 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008605 list_add_rcu(&tg->list, &task_groups);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008606
8607 WARN_ON(!parent); /* root should already exist */
8608
8609 tg->parent = parent;
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008610 INIT_LIST_HEAD(&tg->children);
Zhang, Yanmin09f27242030-08-14 15:56:40 +08008611 list_add_rcu(&tg->siblings, &parent->children);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008612 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008613
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008614 return tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008615
8616err:
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008617 free_sched_group(tg);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008618 return ERR_PTR(-ENOMEM);
8619}
8620
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008621/* rcu callback to free various structures associated with a task group */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008622static void free_sched_group_rcu(struct rcu_head *rhp)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008623{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008624 /* now it should be safe to free those cfs_rqs */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008625 free_sched_group(container_of(rhp, struct task_group, rcu));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008626}
8627
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008628/* Destroy runqueue etc associated with a task group */
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008629void sched_destroy_group(struct task_group *tg)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008630{
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008631 unsigned long flags;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008632 int i;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008633
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008634 /* end participation in shares distribution */
8635 for_each_possible_cpu(i)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01008636 unregister_fair_sched_group(tg, i);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08008637
8638 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008639 list_del_rcu(&tg->list);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02008640 list_del_rcu(&tg->siblings);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008641 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008642
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008643 /* wait for possible concurrent references to cfs_rqs complete */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008644 call_rcu(&tg->rcu, free_sched_group_rcu);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008645}
8646
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008647/* change task's runqueue when it moves between groups.
Ingo Molnar3a252012007-10-15 17:00:12 +02008648 * The caller of this function should have put the task in its new group
8649 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
8650 * reflect its new group.
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008651 */
8652void sched_move_task(struct task_struct *tsk)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008653{
8654 int on_rq, running;
8655 unsigned long flags;
8656 struct rq *rq;
8657
8658 rq = task_rq_lock(tsk, &flags);
8659
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01008660 running = task_current(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008661 on_rq = tsk->se.on_rq;
8662
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008663 if (on_rq)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008664 dequeue_task(rq, tsk, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008665 if (unlikely(running))
8666 tsk->sched_class->put_prev_task(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008667
Peter Zijlstra810b3812008-02-29 15:21:01 -05008668#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008669 if (tsk->sched_class->task_move_group)
8670 tsk->sched_class->task_move_group(tsk, on_rq);
8671 else
Peter Zijlstra810b3812008-02-29 15:21:01 -05008672#endif
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008673 set_task_rq(tsk, task_cpu(tsk));
Peter Zijlstra810b3812008-02-29 15:21:01 -05008674
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07008675 if (unlikely(running))
8676 tsk->sched_class->set_curr_task(rq);
8677 if (on_rq)
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01008678 enqueue_task(rq, tsk, 0);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008679
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008680 task_rq_unlock(rq, &flags);
8681}
Dhaval Giani7c941432010-01-20 13:26:18 +01008682#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008683
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008684#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008685static DEFINE_MUTEX(shares_mutex);
8686
Ingo Molnar4cf86d72007-10-15 17:00:14 +02008687int sched_group_set_shares(struct task_group *tg, unsigned long shares)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008688{
8689 int i;
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008690 unsigned long flags;
Ingo Molnarc61935f2008-01-22 11:24:58 +01008691
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008692 /*
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02008693 * We can't change the weight of the root cgroup.
8694 */
8695 if (!tg->se[0])
8696 return -EINVAL;
8697
Peter Zijlstra18d95a22008-04-19 19:45:00 +02008698 if (shares < MIN_SHARES)
8699 shares = MIN_SHARES;
Miao Xiecb4ad1f2008-04-28 12:54:56 +08008700 else if (shares > MAX_SHARES)
8701 shares = MAX_SHARES;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01008702
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008703 mutex_lock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008704 if (tg->shares == shares)
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008705 goto done;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008706
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008707 tg->shares = shares;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008708 for_each_possible_cpu(i) {
Paul Turner94371782010-11-15 15:47:10 -08008709 struct rq *rq = cpu_rq(i);
8710 struct sched_entity *se;
8711
8712 se = tg->se[i];
8713 /* Propagate contribution to hierarchy */
8714 raw_spin_lock_irqsave(&rq->lock, flags);
8715 for_each_sched_entity(se)
Paul Turner6d5ab292011-01-21 20:45:01 -08008716 update_cfs_shares(group_cfs_rq(se));
Paul Turner94371782010-11-15 15:47:10 -08008717 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02008718 }
Srivatsa Vaddagiri6b2d7702008-01-25 21:08:00 +01008719
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008720done:
Peter Zijlstra8ed36992008-02-13 15:45:39 +01008721 mutex_unlock(&shares_mutex);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02008722 return 0;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02008723}
8724
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008725unsigned long sched_group_shares(struct task_group *tg)
8726{
8727 return tg->shares;
8728}
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008729#endif
Dhaval Giani5cb350b2007-10-15 17:00:14 +02008730
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01008731#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008732/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008733 * Ensure that the real time constraints are schedulable.
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008734 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008735static DEFINE_MUTEX(rt_constraints_mutex);
8736
8737static unsigned long to_ratio(u64 period, u64 runtime)
8738{
8739 if (runtime == RUNTIME_INF)
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008740 return 1ULL << 20;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008741
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008742 return div64_u64(runtime << 20, period);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008743}
8744
Dhaval Giani521f1a242008-02-28 15:21:56 +05308745/* Must be called with tasklist_lock held */
8746static inline int tg_has_rt_tasks(struct task_group *tg)
8747{
8748 struct task_struct *g, *p;
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008749
Dhaval Giani521f1a242008-02-28 15:21:56 +05308750 do_each_thread(g, p) {
8751 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
8752 return 1;
8753 } while_each_thread(g, p);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008754
Dhaval Giani521f1a242008-02-28 15:21:56 +05308755 return 0;
8756}
8757
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008758struct rt_schedulable_data {
8759 struct task_group *tg;
8760 u64 rt_period;
8761 u64 rt_runtime;
8762};
8763
8764static int tg_schedulable(struct task_group *tg, void *data)
8765{
8766 struct rt_schedulable_data *d = data;
8767 struct task_group *child;
8768 unsigned long total, sum = 0;
8769 u64 period, runtime;
8770
8771 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8772 runtime = tg->rt_bandwidth.rt_runtime;
8773
8774 if (tg == d->tg) {
8775 period = d->rt_period;
8776 runtime = d->rt_runtime;
8777 }
8778
Peter Zijlstra4653f802008-09-23 15:33:44 +02008779 /*
8780 * Cannot have more runtime than the period.
8781 */
8782 if (runtime > period && runtime != RUNTIME_INF)
8783 return -EINVAL;
8784
8785 /*
8786 * Ensure we don't starve existing RT tasks.
8787 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008788 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
8789 return -EBUSY;
8790
8791 total = to_ratio(period, runtime);
8792
Peter Zijlstra4653f802008-09-23 15:33:44 +02008793 /*
8794 * Nobody can have more than the global setting allows.
8795 */
8796 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
8797 return -EINVAL;
8798
8799 /*
8800 * The sum of our children's runtime should not exceed our own.
8801 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008802 list_for_each_entry_rcu(child, &tg->children, siblings) {
8803 period = ktime_to_ns(child->rt_bandwidth.rt_period);
8804 runtime = child->rt_bandwidth.rt_runtime;
8805
8806 if (child == d->tg) {
8807 period = d->rt_period;
8808 runtime = d->rt_runtime;
8809 }
8810
8811 sum += to_ratio(period, runtime);
8812 }
8813
8814 if (sum > total)
8815 return -EINVAL;
8816
8817 return 0;
8818}
8819
8820static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8821{
8822 struct rt_schedulable_data data = {
8823 .tg = tg,
8824 .rt_period = period,
8825 .rt_runtime = runtime,
8826 };
8827
8828 return walk_tg_tree(tg_schedulable, tg_nop, &data);
8829}
8830
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008831static int tg_set_bandwidth(struct task_group *tg,
8832 u64 rt_period, u64 rt_runtime)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008833{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008834 int i, err = 0;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008835
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008836 mutex_lock(&rt_constraints_mutex);
Dhaval Giani521f1a242008-02-28 15:21:56 +05308837 read_lock(&tasklist_lock);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008838 err = __rt_schedulable(tg, rt_period, rt_runtime);
8839 if (err)
Dhaval Giani521f1a242008-02-28 15:21:56 +05308840 goto unlock;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008841
Thomas Gleixner0986b112009-11-17 15:32:06 +01008842 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008843 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8844 tg->rt_bandwidth.rt_runtime = rt_runtime;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008845
8846 for_each_possible_cpu(i) {
8847 struct rt_rq *rt_rq = tg->rt_rq[i];
8848
Thomas Gleixner0986b112009-11-17 15:32:06 +01008849 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008850 rt_rq->rt_runtime = rt_runtime;
Thomas Gleixner0986b112009-11-17 15:32:06 +01008851 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008852 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008853 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstra49246272010-10-17 21:46:10 +02008854unlock:
Dhaval Giani521f1a242008-02-28 15:21:56 +05308855 read_unlock(&tasklist_lock);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008856 mutex_unlock(&rt_constraints_mutex);
8857
8858 return err;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01008859}
8860
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008861int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8862{
8863 u64 rt_runtime, rt_period;
8864
8865 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8866 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8867 if (rt_runtime_us < 0)
8868 rt_runtime = RUNTIME_INF;
8869
8870 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8871}
8872
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008873long sched_group_rt_runtime(struct task_group *tg)
8874{
8875 u64 rt_runtime_us;
8876
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008877 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008878 return -1;
8879
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008880 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01008881 do_div(rt_runtime_us, NSEC_PER_USEC);
8882 return rt_runtime_us;
8883}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008884
8885int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8886{
8887 u64 rt_runtime, rt_period;
8888
8889 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8890 rt_runtime = tg->rt_bandwidth.rt_runtime;
8891
Raistlin619b0482008-06-26 18:54:09 +02008892 if (rt_period == 0)
8893 return -EINVAL;
8894
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008895 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8896}
8897
8898long sched_group_rt_period(struct task_group *tg)
8899{
8900 u64 rt_period_us;
8901
8902 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8903 do_div(rt_period_us, NSEC_PER_USEC);
8904 return rt_period_us;
8905}
8906
8907static int sched_rt_global_constraints(void)
8908{
Peter Zijlstra4653f802008-09-23 15:33:44 +02008909 u64 runtime, period;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008910 int ret = 0;
8911
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008912 if (sysctl_sched_rt_period <= 0)
8913 return -EINVAL;
8914
Peter Zijlstra4653f802008-09-23 15:33:44 +02008915 runtime = global_rt_runtime();
8916 period = global_rt_period();
8917
8918 /*
8919 * Sanity check on the sysctl variables.
8920 */
8921 if (runtime > period && runtime != RUNTIME_INF)
8922 return -EINVAL;
Peter Zijlstra10b612f2008-06-19 14:22:27 +02008923
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008924 mutex_lock(&rt_constraints_mutex);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008925 read_lock(&tasklist_lock);
Peter Zijlstra4653f802008-09-23 15:33:44 +02008926 ret = __rt_schedulable(NULL, 0, 0);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02008927 read_unlock(&tasklist_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008928 mutex_unlock(&rt_constraints_mutex);
8929
8930 return ret;
8931}
Dhaval Giani54e99122009-02-27 15:13:54 +05308932
8933int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
8934{
8935 /* Don't accept realtime tasks when there is no way for them to run */
8936 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8937 return 0;
8938
8939 return 1;
8940}
8941
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008942#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008943static int sched_rt_global_constraints(void)
8944{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008945 unsigned long flags;
8946 int i;
8947
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07008948 if (sysctl_sched_rt_period <= 0)
8949 return -EINVAL;
8950
Peter Zijlstra60aa6052009-05-05 17:50:21 +02008951 /*
8952 * There's always some RT tasks in the root group
8953 * -- migration, kstopmachine etc..
8954 */
8955 if (sysctl_sched_rt_runtime == 0)
8956 return -EBUSY;
8957
Thomas Gleixner0986b112009-11-17 15:32:06 +01008958 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008959 for_each_possible_cpu(i) {
8960 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8961
Thomas Gleixner0986b112009-11-17 15:32:06 +01008962 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008963 rt_rq->rt_runtime = global_rt_runtime();
Thomas Gleixner0986b112009-11-17 15:32:06 +01008964 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008965 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01008966 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02008967
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008968 return 0;
8969}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02008970#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008971
8972int sched_rt_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07008973 void __user *buffer, size_t *lenp,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008974 loff_t *ppos)
8975{
8976 int ret;
8977 int old_period, old_runtime;
8978 static DEFINE_MUTEX(mutex);
8979
8980 mutex_lock(&mutex);
8981 old_period = sysctl_sched_rt_period;
8982 old_runtime = sysctl_sched_rt_runtime;
8983
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07008984 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02008985
8986 if (!ret && write) {
8987 ret = sched_rt_global_constraints();
8988 if (ret) {
8989 sysctl_sched_rt_period = old_period;
8990 sysctl_sched_rt_runtime = old_runtime;
8991 } else {
8992 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8993 def_rt_bandwidth.rt_period =
8994 ns_to_ktime(global_rt_period());
8995 }
8996 }
8997 mutex_unlock(&mutex);
8998
8999 return ret;
9000}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009001
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009002#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009003
9004/* return corresponding task_group object of a cgroup */
Paul Menage2b01dfe2007-10-24 18:23:50 +02009005static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009006{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009007 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
9008 struct task_group, css);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009009}
9010
9011static struct cgroup_subsys_state *
Paul Menage2b01dfe2007-10-24 18:23:50 +02009012cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009013{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009014 struct task_group *tg, *parent;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009015
Paul Menage2b01dfe2007-10-24 18:23:50 +02009016 if (!cgrp->parent) {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009017 /* This is early initialization for the top cgroup */
Yong Zhang07e06b02011-01-07 15:17:36 +08009018 return &root_task_group.css;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009019 }
9020
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02009021 parent = cgroup_tg(cgrp->parent);
9022 tg = sched_create_group(parent);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009023 if (IS_ERR(tg))
9024 return ERR_PTR(-ENOMEM);
9025
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009026 return &tg->css;
9027}
9028
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009029static void
9030cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009031{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009032 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009033
9034 sched_destroy_group(tg);
9035}
9036
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009037static int
Ben Blumbe367d02009-09-23 15:56:31 -07009038cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009039{
Peter Zijlstrab68aa232008-02-13 15:45:40 +01009040#ifdef CONFIG_RT_GROUP_SCHED
Dhaval Giani54e99122009-02-27 15:13:54 +05309041 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
Peter Zijlstrab68aa232008-02-13 15:45:40 +01009042 return -EINVAL;
9043#else
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009044 /* We don't support RT-tasks being in separate groups */
9045 if (tsk->sched_class != &fair_sched_class)
9046 return -EINVAL;
Peter Zijlstrab68aa232008-02-13 15:45:40 +01009047#endif
Ben Blumbe367d02009-09-23 15:56:31 -07009048 return 0;
9049}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009050
Ben Blumbe367d02009-09-23 15:56:31 -07009051static int
9052cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9053 struct task_struct *tsk, bool threadgroup)
9054{
9055 int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
9056 if (retval)
9057 return retval;
9058 if (threadgroup) {
9059 struct task_struct *c;
9060 rcu_read_lock();
9061 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
9062 retval = cpu_cgroup_can_attach_task(cgrp, c);
9063 if (retval) {
9064 rcu_read_unlock();
9065 return retval;
9066 }
9067 }
9068 rcu_read_unlock();
9069 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009070 return 0;
9071}
9072
9073static void
Paul Menage2b01dfe2007-10-24 18:23:50 +02009074cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
Ben Blumbe367d02009-09-23 15:56:31 -07009075 struct cgroup *old_cont, struct task_struct *tsk,
9076 bool threadgroup)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009077{
9078 sched_move_task(tsk);
Ben Blumbe367d02009-09-23 15:56:31 -07009079 if (threadgroup) {
9080 struct task_struct *c;
9081 rcu_read_lock();
9082 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
9083 sched_move_task(c);
9084 }
9085 rcu_read_unlock();
9086 }
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009087}
9088
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01009089static void
Peter Zijlstrad41d5a02011-02-07 17:02:20 +01009090cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
9091 struct cgroup *old_cgrp, struct task_struct *task)
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01009092{
9093 /*
9094 * cgroup_exit() is called in the copy_process() failure path.
9095 * Ignore this case since the task hasn't ran yet, this avoids
9096 * trying to poke a half freed task state from generic code.
9097 */
9098 if (!(task->flags & PF_EXITING))
9099 return;
9100
9101 sched_move_task(task);
9102}
9103
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009104#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagef4c753b2008-04-29 00:59:56 -07009105static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
Paul Menage2b01dfe2007-10-24 18:23:50 +02009106 u64 shareval)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009107{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009108 return sched_group_set_shares(cgroup_tg(cgrp), shareval);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009109}
9110
Paul Menagef4c753b2008-04-29 00:59:56 -07009111static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009112{
Paul Menage2b01dfe2007-10-24 18:23:50 +02009113 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009114
9115 return (u64) tg->shares;
9116}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009117#endif /* CONFIG_FAIR_GROUP_SCHED */
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009118
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009119#ifdef CONFIG_RT_GROUP_SCHED
Mirco Tischler0c708142008-05-14 16:05:46 -07009120static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
Paul Menage06ecb272008-04-29 01:00:06 -07009121 s64 val)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009122{
Paul Menage06ecb272008-04-29 01:00:06 -07009123 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009124}
9125
Paul Menage06ecb272008-04-29 01:00:06 -07009126static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009127{
Paul Menage06ecb272008-04-29 01:00:06 -07009128 return sched_group_rt_runtime(cgroup_tg(cgrp));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009129}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009130
9131static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
9132 u64 rt_period_us)
9133{
9134 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
9135}
9136
9137static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
9138{
9139 return sched_group_rt_period(cgroup_tg(cgrp));
9140}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02009141#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009142
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009143static struct cftype cpu_files[] = {
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009144#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009145 {
9146 .name = "shares",
Paul Menagef4c753b2008-04-29 00:59:56 -07009147 .read_u64 = cpu_shares_read_u64,
9148 .write_u64 = cpu_shares_write_u64,
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009149 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009150#endif
9151#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009152 {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01009153 .name = "rt_runtime_us",
Paul Menage06ecb272008-04-29 01:00:06 -07009154 .read_s64 = cpu_rt_runtime_read,
9155 .write_s64 = cpu_rt_runtime_write,
Peter Zijlstra6f505b12008-01-25 21:08:30 +01009156 },
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009157 {
9158 .name = "rt_period_us",
Paul Menagef4c753b2008-04-29 00:59:56 -07009159 .read_u64 = cpu_rt_period_read_uint,
9160 .write_u64 = cpu_rt_period_write_uint,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02009161 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009162#endif
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009163};
9164
9165static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
9166{
Paul Menagefe5c7cc2007-10-29 21:18:11 +01009167 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009168}
9169
9170struct cgroup_subsys cpu_cgroup_subsys = {
Ingo Molnar38605ca2007-10-29 21:18:11 +01009171 .name = "cpu",
9172 .create = cpu_cgroup_create,
9173 .destroy = cpu_cgroup_destroy,
9174 .can_attach = cpu_cgroup_can_attach,
9175 .attach = cpu_cgroup_attach,
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01009176 .exit = cpu_cgroup_exit,
Ingo Molnar38605ca2007-10-29 21:18:11 +01009177 .populate = cpu_cgroup_populate,
9178 .subsys_id = cpu_cgroup_subsys_id,
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07009179 .early_init = 1,
9180};
9181
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01009182#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009183
9184#ifdef CONFIG_CGROUP_CPUACCT
9185
9186/*
9187 * CPU accounting code for task groups.
9188 *
9189 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
9190 * (balbir@in.ibm.com).
9191 */
9192
Bharata B Rao934352f2008-11-10 20:41:13 +05309193/* track cpu usage of a group of tasks and its child groups */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009194struct cpuacct {
9195 struct cgroup_subsys_state css;
9196 /* cpuusage holds pointer to a u64-type object on every cpu */
Tejun Heo43cf38e2010-02-02 14:38:57 +09009197 u64 __percpu *cpuusage;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309198 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
Bharata B Rao934352f2008-11-10 20:41:13 +05309199 struct cpuacct *parent;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009200};
9201
9202struct cgroup_subsys cpuacct_subsys;
9203
9204/* return cpu accounting group corresponding to this container */
Dhaval Giani32cd7562008-02-29 10:02:43 +05309205static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009206{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309207 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009208 struct cpuacct, css);
9209}
9210
9211/* return cpu accounting group to which this task belongs */
9212static inline struct cpuacct *task_ca(struct task_struct *tsk)
9213{
9214 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
9215 struct cpuacct, css);
9216}
9217
9218/* create a new cpu accounting group */
9219static struct cgroup_subsys_state *cpuacct_create(
Dhaval Giani32cd7562008-02-29 10:02:43 +05309220 struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009221{
9222 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309223 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009224
9225 if (!ca)
Bharata B Raoef12fef2009-03-31 10:02:22 +05309226 goto out;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009227
9228 ca->cpuusage = alloc_percpu(u64);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309229 if (!ca->cpuusage)
9230 goto out_free_ca;
9231
9232 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9233 if (percpu_counter_init(&ca->cpustat[i], 0))
9234 goto out_free_counters;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009235
Bharata B Rao934352f2008-11-10 20:41:13 +05309236 if (cgrp->parent)
9237 ca->parent = cgroup_ca(cgrp->parent);
9238
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009239 return &ca->css;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309240
9241out_free_counters:
9242 while (--i >= 0)
9243 percpu_counter_destroy(&ca->cpustat[i]);
9244 free_percpu(ca->cpuusage);
9245out_free_ca:
9246 kfree(ca);
9247out:
9248 return ERR_PTR(-ENOMEM);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009249}
9250
9251/* destroy an existing cpu accounting group */
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01009252static void
Dhaval Giani32cd7562008-02-29 10:02:43 +05309253cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009254{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309255 struct cpuacct *ca = cgroup_ca(cgrp);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309256 int i;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009257
Bharata B Raoef12fef2009-03-31 10:02:22 +05309258 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
9259 percpu_counter_destroy(&ca->cpustat[i]);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009260 free_percpu(ca->cpuusage);
9261 kfree(ca);
9262}
9263
Ken Chen720f5492008-12-15 22:02:01 -08009264static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9265{
Rusty Russellb36128c2009-02-20 16:29:08 +09009266 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08009267 u64 data;
9268
9269#ifndef CONFIG_64BIT
9270 /*
9271 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
9272 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009273 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009274 data = *cpuusage;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009275 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009276#else
9277 data = *cpuusage;
9278#endif
9279
9280 return data;
9281}
9282
9283static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
9284{
Rusty Russellb36128c2009-02-20 16:29:08 +09009285 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08009286
9287#ifndef CONFIG_64BIT
9288 /*
9289 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
9290 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009291 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009292 *cpuusage = val;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01009293 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08009294#else
9295 *cpuusage = val;
9296#endif
9297}
9298
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009299/* return total cpu usage (in nanoseconds) of a group */
Dhaval Giani32cd7562008-02-29 10:02:43 +05309300static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009301{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309302 struct cpuacct *ca = cgroup_ca(cgrp);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009303 u64 totalcpuusage = 0;
9304 int i;
9305
Ken Chen720f5492008-12-15 22:02:01 -08009306 for_each_present_cpu(i)
9307 totalcpuusage += cpuacct_cpuusage_read(ca, i);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009308
9309 return totalcpuusage;
9310}
9311
Dhaval Giani0297b802008-02-29 10:02:44 +05309312static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
9313 u64 reset)
9314{
9315 struct cpuacct *ca = cgroup_ca(cgrp);
9316 int err = 0;
9317 int i;
9318
9319 if (reset) {
9320 err = -EINVAL;
9321 goto out;
9322 }
9323
Ken Chen720f5492008-12-15 22:02:01 -08009324 for_each_present_cpu(i)
9325 cpuacct_cpuusage_write(ca, i, 0);
Dhaval Giani0297b802008-02-29 10:02:44 +05309326
Dhaval Giani0297b802008-02-29 10:02:44 +05309327out:
9328 return err;
9329}
9330
Ken Chene9515c32008-12-15 22:04:15 -08009331static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
9332 struct seq_file *m)
9333{
9334 struct cpuacct *ca = cgroup_ca(cgroup);
9335 u64 percpu;
9336 int i;
9337
9338 for_each_present_cpu(i) {
9339 percpu = cpuacct_cpuusage_read(ca, i);
9340 seq_printf(m, "%llu ", (unsigned long long) percpu);
9341 }
9342 seq_printf(m, "\n");
9343 return 0;
9344}
9345
Bharata B Raoef12fef2009-03-31 10:02:22 +05309346static const char *cpuacct_stat_desc[] = {
9347 [CPUACCT_STAT_USER] = "user",
9348 [CPUACCT_STAT_SYSTEM] = "system",
9349};
9350
9351static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
9352 struct cgroup_map_cb *cb)
9353{
9354 struct cpuacct *ca = cgroup_ca(cgrp);
9355 int i;
9356
9357 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
9358 s64 val = percpu_counter_read(&ca->cpustat[i]);
9359 val = cputime64_to_clock_t(val);
9360 cb->fill(cb, cpuacct_stat_desc[i], val);
9361 }
9362 return 0;
9363}
9364
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009365static struct cftype files[] = {
9366 {
9367 .name = "usage",
Paul Menagef4c753b2008-04-29 00:59:56 -07009368 .read_u64 = cpuusage_read,
9369 .write_u64 = cpuusage_write,
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009370 },
Ken Chene9515c32008-12-15 22:04:15 -08009371 {
9372 .name = "usage_percpu",
9373 .read_seq_string = cpuacct_percpu_seq_read,
9374 },
Bharata B Raoef12fef2009-03-31 10:02:22 +05309375 {
9376 .name = "stat",
9377 .read_map = cpuacct_stats_show,
9378 },
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009379};
9380
Dhaval Giani32cd7562008-02-29 10:02:43 +05309381static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009382{
Dhaval Giani32cd7562008-02-29 10:02:43 +05309383 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009384}
9385
9386/*
9387 * charge this task's execution time to its accounting group.
9388 *
9389 * called with rq->lock held.
9390 */
9391static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9392{
9393 struct cpuacct *ca;
Bharata B Rao934352f2008-11-10 20:41:13 +05309394 int cpu;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009395
Li Zefanc40c6f82009-02-26 15:40:15 +08009396 if (unlikely(!cpuacct_subsys.active))
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009397 return;
9398
Bharata B Rao934352f2008-11-10 20:41:13 +05309399 cpu = task_cpu(tsk);
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309400
9401 rcu_read_lock();
9402
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009403 ca = task_ca(tsk);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009404
Bharata B Rao934352f2008-11-10 20:41:13 +05309405 for (; ca; ca = ca->parent) {
Rusty Russellb36128c2009-02-20 16:29:08 +09009406 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009407 *cpuusage += cputime;
9408 }
Bharata B Raoa18b83b2009-03-23 10:02:53 +05309409
9410 rcu_read_unlock();
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009411}
9412
Bharata B Raoef12fef2009-03-31 10:02:22 +05309413/*
Anton Blanchardfa535a72010-02-02 14:46:13 -08009414 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
9415 * in cputime_t units. As a result, cpuacct_update_stats calls
9416 * percpu_counter_add with values large enough to always overflow the
9417 * per cpu batch limit causing bad SMP scalability.
9418 *
9419 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
9420 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
9421 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
9422 */
9423#ifdef CONFIG_SMP
9424#define CPUACCT_BATCH \
9425 min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
9426#else
9427#define CPUACCT_BATCH 0
9428#endif
9429
9430/*
Bharata B Raoef12fef2009-03-31 10:02:22 +05309431 * Charge the system/user time to the task's accounting group.
9432 */
9433static void cpuacct_update_stats(struct task_struct *tsk,
9434 enum cpuacct_stat_index idx, cputime_t val)
9435{
9436 struct cpuacct *ca;
Anton Blanchardfa535a72010-02-02 14:46:13 -08009437 int batch = CPUACCT_BATCH;
Bharata B Raoef12fef2009-03-31 10:02:22 +05309438
9439 if (unlikely(!cpuacct_subsys.active))
9440 return;
9441
9442 rcu_read_lock();
9443 ca = task_ca(tsk);
9444
9445 do {
Anton Blanchardfa535a72010-02-02 14:46:13 -08009446 __percpu_counter_add(&ca->cpustat[idx], val, batch);
Bharata B Raoef12fef2009-03-31 10:02:22 +05309447 ca = ca->parent;
9448 } while (ca);
9449 rcu_read_unlock();
9450}
9451
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01009452struct cgroup_subsys cpuacct_subsys = {
9453 .name = "cpuacct",
9454 .create = cpuacct_create,
9455 .destroy = cpuacct_destroy,
9456 .populate = cpuacct_populate,
9457 .subsys_id = cpuacct_subsys_id,
9458};
9459#endif /* CONFIG_CGROUP_CPUACCT */
Paul E. McKenney03b042b2009-06-25 09:08:16 -07009460