blob: 8b51b2d9b1fda1bfebf50f9ebde60d6c831cbb42 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Peter Zijlstra391e43d2011-11-15 17:14:39 +01002 * kernel/sched/core.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
Ingo Molnarc31f2e82007-07-09 18:52:01 +020019 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
Ingo Molnarb9131762008-01-25 21:08:19 +010025 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020033#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080037#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/completion.h>
39#include <linux/kernel_stat.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070040#include <linux/debug_locks.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020041#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080045#include <linux/freezer.h>
akpm@osdl.org198e2f12006-01-12 01:05:30 -080046#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <linux/blkdev.h>
48#include <linux/delay.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070049#include <linux/pid_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
Alexey Dobriyanb5aadf72008-10-06 13:23:43 +040057#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/seq_file.h>
Nick Piggine692ab52007-07-26 13:40:43 +020059#include <linux/sysctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#include <linux/syscalls.h>
61#include <linux/times.h>
Jay Lan8f0ab512006-09-30 23:28:59 -070062#include <linux/tsacct_kern.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080063#include <linux/kprobes.h>
Shailabh Nagar0ff92242006-07-14 00:24:37 -070064#include <linux/delayacct.h>
Ingo Molnardff06c12007-07-09 18:52:00 +020065#include <linux/unistd.h>
Jens Axboef5ff8422007-09-21 09:19:54 +020066#include <linux/pagemap.h>
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +010067#include <linux/hrtimer.h>
Reynes Philippe30914a52008-03-17 16:19:05 -070068#include <linux/tick.h>
Peter Zijlstraf00b45c2008-04-19 19:45:00 +020069#include <linux/debugfs.h>
70#include <linux/ctype.h>
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020071#include <linux/ftrace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090072#include <linux/slab.h>
Carsten Emdef1c6f1a2011-10-26 23:14:16 +020073#include <linux/init_task.h>
Al Viro40401532012-02-13 03:58:52 +000074#include <linux/binfmts.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
David Howells96f951e2012-03-28 18:30:03 +010076#include <asm/switch_to.h>
Eric Dumazet5517d862007-05-08 00:32:57 -070077#include <asm/tlb.h>
Satyam Sharma838225b2007-10-24 18:23:50 +020078#include <asm/irq_regs.h>
Christian Borntraegerdb7e5272012-01-11 08:58:16 +010079#include <asm/mutex.h>
Glauber Costae6e66852011-07-11 15:28:17 -040080#ifdef CONFIG_PARAVIRT
81#include <asm/paravirt.h>
82#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Peter Zijlstra029632f2011-10-25 10:00:11 +020084#include "sched.h"
Peter Zijlstra391e43d2011-11-15 17:14:39 +010085#include "../workqueue_sched.h"
Thomas Gleixner29d5e042012-04-20 13:05:45 +000086#include "../smpboot.h"
Gregory Haskins6e0534f2008-05-12 21:21:01 +020087
Steven Rostedta8d154b2009-04-10 09:36:00 -040088#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040089#include <trace/events/sched.h>
Steven Rostedta8d154b2009-04-10 09:36:00 -040090
Peter Zijlstra029632f2011-10-25 10:00:11 +020091void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
Paul Turner58088ad2011-07-21 09:43:31 -070092{
93 unsigned long delta;
94 ktime_t soft, hard, now;
95
96 for (;;) {
97 if (hrtimer_active(period_timer))
98 break;
99
100 now = hrtimer_cb_get_time(period_timer);
101 hrtimer_forward(period_timer, now, period);
102
103 soft = hrtimer_get_softexpires(period_timer);
104 hard = hrtimer_get_expires(period_timer);
105 delta = ktime_to_ns(ktime_sub(hard, soft));
106 __hrtimer_start_range_ns(period_timer, soft, delta,
107 HRTIMER_MODE_ABS_PINNED, 0);
108 }
109}
110
Peter Zijlstra029632f2011-10-25 10:00:11 +0200111DEFINE_MUTEX(sched_domains_mutex);
112DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +0200113
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100114static void update_rq_clock_task(struct rq *rq, s64 delta);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700115
Peter Zijlstra029632f2011-10-25 10:00:11 +0200116void update_rq_clock(struct rq *rq)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200117{
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100118 s64 delta;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700119
Mike Galbraith61eadef2011-04-29 08:36:50 +0200120 if (rq->skip_clock_update > 0)
Mike Galbraithf26f9af2010-12-08 11:05:42 +0100121 return;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700122
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100123 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
124 rq->clock += delta;
125 update_rq_clock_task(rq, delta);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200126}
127
Ingo Molnare436d802007-07-19 21:28:35 +0200128/*
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200129 * Debugging: various feature bits
130 */
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200131
132#define SCHED_FEAT(name, enabled) \
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200133 (1UL << __SCHED_FEAT_##name) * enabled |
134
135const_debug unsigned int sysctl_sched_features =
Peter Zijlstra391e43d2011-11-15 17:14:39 +0100136#include "features.h"
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200137 0;
138
139#undef SCHED_FEAT
140
141#ifdef CONFIG_SCHED_DEBUG
142#define SCHED_FEAT(name, enabled) \
143 #name ,
144
Hiroshi Shimamoto12925312012-05-25 15:41:54 +0900145static const char * const sched_feat_names[] = {
Peter Zijlstra391e43d2011-11-15 17:14:39 +0100146#include "features.h"
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200147};
148
149#undef SCHED_FEAT
150
Li Zefan34f3a812008-10-30 15:23:32 +0800151static int sched_feat_show(struct seq_file *m, void *v)
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200152{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200153 int i;
154
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200155 for (i = 0; i < __SCHED_FEAT_NR; i++) {
Li Zefan34f3a812008-10-30 15:23:32 +0800156 if (!(sysctl_sched_features & (1UL << i)))
157 seq_puts(m, "NO_");
158 seq_printf(m, "%s ", sched_feat_names[i]);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200159 }
Li Zefan34f3a812008-10-30 15:23:32 +0800160 seq_puts(m, "\n");
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200161
Li Zefan34f3a812008-10-30 15:23:32 +0800162 return 0;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200163}
164
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200165#ifdef HAVE_JUMP_LABEL
166
Ingo Molnarc5905af2012-02-24 08:31:31 +0100167#define jump_label_key__true STATIC_KEY_INIT_TRUE
168#define jump_label_key__false STATIC_KEY_INIT_FALSE
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200169
170#define SCHED_FEAT(name, enabled) \
171 jump_label_key__##enabled ,
172
Ingo Molnarc5905af2012-02-24 08:31:31 +0100173struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200174#include "features.h"
175};
176
177#undef SCHED_FEAT
178
179static void sched_feat_disable(int i)
180{
Ingo Molnarc5905af2012-02-24 08:31:31 +0100181 if (static_key_enabled(&sched_feat_keys[i]))
182 static_key_slow_dec(&sched_feat_keys[i]);
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200183}
184
185static void sched_feat_enable(int i)
186{
Ingo Molnarc5905af2012-02-24 08:31:31 +0100187 if (!static_key_enabled(&sched_feat_keys[i]))
188 static_key_slow_inc(&sched_feat_keys[i]);
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200189}
190#else
191static void sched_feat_disable(int i) { };
192static void sched_feat_enable(int i) { };
193#endif /* HAVE_JUMP_LABEL */
194
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200195static ssize_t
196sched_feat_write(struct file *filp, const char __user *ubuf,
197 size_t cnt, loff_t *ppos)
198{
199 char buf[64];
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400200 char *cmp;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200201 int neg = 0;
202 int i;
203
204 if (cnt > 63)
205 cnt = 63;
206
207 if (copy_from_user(&buf, ubuf, cnt))
208 return -EFAULT;
209
210 buf[cnt] = 0;
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400211 cmp = strstrip(buf);
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200212
Hillf Danton524429c2011-01-06 20:58:12 +0800213 if (strncmp(cmp, "NO_", 3) == 0) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200214 neg = 1;
215 cmp += 3;
216 }
217
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200218 for (i = 0; i < __SCHED_FEAT_NR; i++) {
Mathieu Desnoyers77401912010-09-13 17:47:00 -0400219 if (strcmp(cmp, sched_feat_names[i]) == 0) {
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200220 if (neg) {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200221 sysctl_sched_features &= ~(1UL << i);
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200222 sched_feat_disable(i);
223 } else {
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200224 sysctl_sched_features |= (1UL << i);
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200225 sched_feat_enable(i);
226 }
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200227 break;
228 }
229 }
230
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200231 if (i == __SCHED_FEAT_NR)
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200232 return -EINVAL;
233
Jan Blunck42994722009-11-20 17:40:37 +0100234 *ppos += cnt;
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200235
236 return cnt;
237}
238
Li Zefan34f3a812008-10-30 15:23:32 +0800239static int sched_feat_open(struct inode *inode, struct file *filp)
240{
241 return single_open(filp, sched_feat_show, NULL);
242}
243
Alexey Dobriyan828c0952009-10-01 15:43:56 -0700244static const struct file_operations sched_feat_fops = {
Li Zefan34f3a812008-10-30 15:23:32 +0800245 .open = sched_feat_open,
246 .write = sched_feat_write,
247 .read = seq_read,
248 .llseek = seq_lseek,
249 .release = single_release,
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200250};
251
252static __init int sched_init_debug(void)
253{
Peter Zijlstraf00b45c2008-04-19 19:45:00 +0200254 debugfs_create_file("sched_features", 0644, NULL, NULL,
255 &sched_feat_fops);
256
257 return 0;
258}
259late_initcall(sched_init_debug);
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200260#endif /* CONFIG_SCHED_DEBUG */
Ingo Molnarbf5c91b2007-10-15 17:00:04 +0200261
262/*
Peter Zijlstrab82d9fd2007-11-09 22:39:39 +0100263 * Number of tasks to iterate in a single balance run.
264 * Limited because this is done with IRQs disabled.
265 */
266const_debug unsigned int sysctl_sched_nr_migrate = 32;
267
268/*
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200269 * period over which we average the RT time consumption, measured
270 * in ms.
271 *
272 * default: 1s
273 */
274const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
275
276/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100277 * period over which we measure -rt task cpu usage in us.
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100278 * default: 1s
279 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100280unsigned int sysctl_sched_rt_period = 1000000;
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100281
Peter Zijlstra029632f2011-10-25 10:00:11 +0200282__read_mostly int scheduler_running;
Ingo Molnar6892b752008-02-13 14:02:36 +0100283
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100284/*
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100285 * part of the period that we allow rt tasks to run in us.
286 * default: 0.95s
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100287 */
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100288int sysctl_sched_rt_runtime = 950000;
289
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
292/*
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200293 * __task_rq_lock - lock the rq @p resides on.
Ingo Molnarb29739f2006-06-27 02:54:51 -0700294 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700295static inline struct rq *__task_rq_lock(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700296 __acquires(rq->lock)
297{
Peter Zijlstra0970d292010-02-15 14:45:54 +0100298 struct rq *rq;
299
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200300 lockdep_assert_held(&p->pi_lock);
301
Andi Kleen3a5c3592007-10-15 17:00:14 +0200302 for (;;) {
Peter Zijlstra0970d292010-02-15 14:45:54 +0100303 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100304 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100305 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200306 return rq;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100307 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700308 }
Ingo Molnarb29739f2006-06-27 02:54:51 -0700309}
310
311/*
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200312 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 */
Ingo Molnar70b97a72006-07-03 00:25:42 -0700314static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200315 __acquires(p->pi_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 __acquires(rq->lock)
317{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700318 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
Andi Kleen3a5c3592007-10-15 17:00:14 +0200320 for (;;) {
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200321 raw_spin_lock_irqsave(&p->pi_lock, *flags);
Andi Kleen3a5c3592007-10-15 17:00:14 +0200322 rq = task_rq(p);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100323 raw_spin_lock(&rq->lock);
Peter Zijlstra65cc8e42010-03-25 21:05:16 +0100324 if (likely(rq == task_rq(p)))
Andi Kleen3a5c3592007-10-15 17:00:14 +0200325 return rq;
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200326 raw_spin_unlock(&rq->lock);
327 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329}
330
Alexey Dobriyana9957442007-10-15 17:00:13 +0200331static void __task_rq_unlock(struct rq *rq)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700332 __releases(rq->lock)
333{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100334 raw_spin_unlock(&rq->lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -0700335}
336
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200337static inline void
338task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 __releases(rq->lock)
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200340 __releases(p->pi_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200342 raw_spin_unlock(&rq->lock);
343 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346/*
Robert P. J. Daycc2a73b2006-12-10 02:20:00 -0800347 * this_rq_lock - lock this runqueue and disable interrupts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200349static struct rq *this_rq_lock(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 __acquires(rq->lock)
351{
Ingo Molnar70b97a72006-07-03 00:25:42 -0700352 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354 local_irq_disable();
355 rq = this_rq();
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100356 raw_spin_lock(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
358 return rq;
359}
360
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100361#ifdef CONFIG_SCHED_HRTICK
362/*
363 * Use HR-timers to deliver accurate preemption points.
364 *
365 * Its all a bit involved since we cannot program an hrt while holding the
366 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
367 * reschedule event.
368 *
369 * When we get rescheduled we reprogram the hrtick_timer outside of the
370 * rq->lock.
371 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100372
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100373static void hrtick_clear(struct rq *rq)
374{
375 if (hrtimer_active(&rq->hrtick_timer))
376 hrtimer_cancel(&rq->hrtick_timer);
377}
378
379/*
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100380 * High-resolution timer tick.
381 * Runs from hardirq context with interrupts disabled.
382 */
383static enum hrtimer_restart hrtick(struct hrtimer *timer)
384{
385 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
386
387 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
388
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100389 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200390 update_rq_clock(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100391 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100392 raw_spin_unlock(&rq->lock);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100393
394 return HRTIMER_NORESTART;
395}
396
Rabin Vincent95e904c2008-05-11 05:55:33 +0530397#ifdef CONFIG_SMP
Peter Zijlstra31656512008-07-18 18:01:23 +0200398/*
399 * called from hardirq (IPI) context
400 */
401static void __hrtick_start(void *arg)
Peter Zijlstrab328ca12008-04-29 10:02:46 +0200402{
Peter Zijlstra31656512008-07-18 18:01:23 +0200403 struct rq *rq = arg;
Peter Zijlstrab328ca12008-04-29 10:02:46 +0200404
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100405 raw_spin_lock(&rq->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +0200406 hrtimer_restart(&rq->hrtick_timer);
407 rq->hrtick_csd_pending = 0;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100408 raw_spin_unlock(&rq->lock);
Peter Zijlstrab328ca12008-04-29 10:02:46 +0200409}
410
Peter Zijlstra31656512008-07-18 18:01:23 +0200411/*
412 * Called to set the hrtick timer state.
413 *
414 * called with rq->lock held and irqs disabled
415 */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200416void hrtick_start(struct rq *rq, u64 delay)
Peter Zijlstrab328ca12008-04-29 10:02:46 +0200417{
Peter Zijlstra31656512008-07-18 18:01:23 +0200418 struct hrtimer *timer = &rq->hrtick_timer;
419 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
Peter Zijlstrab328ca12008-04-29 10:02:46 +0200420
Arjan van de Vencc584b22008-09-01 15:02:30 -0700421 hrtimer_set_expires(timer, time);
Peter Zijlstra31656512008-07-18 18:01:23 +0200422
423 if (rq == this_rq()) {
424 hrtimer_restart(timer);
425 } else if (!rq->hrtick_csd_pending) {
Peter Zijlstra6e275632009-02-25 13:59:48 +0100426 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +0200427 rq->hrtick_csd_pending = 1;
428 }
Peter Zijlstrab328ca12008-04-29 10:02:46 +0200429}
430
431static int
432hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
433{
434 int cpu = (int)(long)hcpu;
435
436 switch (action) {
437 case CPU_UP_CANCELED:
438 case CPU_UP_CANCELED_FROZEN:
439 case CPU_DOWN_PREPARE:
440 case CPU_DOWN_PREPARE_FROZEN:
441 case CPU_DEAD:
442 case CPU_DEAD_FROZEN:
Peter Zijlstra31656512008-07-18 18:01:23 +0200443 hrtick_clear(cpu_rq(cpu));
Peter Zijlstrab328ca12008-04-29 10:02:46 +0200444 return NOTIFY_OK;
445 }
446
447 return NOTIFY_DONE;
448}
449
Rakib Mullickfa748202008-09-22 14:55:45 -0700450static __init void init_hrtick(void)
Peter Zijlstrab328ca12008-04-29 10:02:46 +0200451{
452 hotcpu_notifier(hotplug_hrtick, 0);
453}
Peter Zijlstra31656512008-07-18 18:01:23 +0200454#else
455/*
456 * Called to set the hrtick timer state.
457 *
458 * called with rq->lock held and irqs disabled
459 */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200460void hrtick_start(struct rq *rq, u64 delay)
Peter Zijlstra31656512008-07-18 18:01:23 +0200461{
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100462 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
Arun R Bharadwaj5c333862009-04-16 12:14:37 +0530463 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstra31656512008-07-18 18:01:23 +0200464}
465
Andrew Morton006c75f2008-09-22 14:55:46 -0700466static inline void init_hrtick(void)
Peter Zijlstra31656512008-07-18 18:01:23 +0200467{
468}
Rabin Vincent95e904c2008-05-11 05:55:33 +0530469#endif /* CONFIG_SMP */
Peter Zijlstrab328ca12008-04-29 10:02:46 +0200470
471static void init_rq_hrtick(struct rq *rq)
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100472{
Peter Zijlstra31656512008-07-18 18:01:23 +0200473#ifdef CONFIG_SMP
474 rq->hrtick_csd_pending = 0;
475
476 rq->hrtick_csd.flags = 0;
477 rq->hrtick_csd.func = __hrtick_start;
478 rq->hrtick_csd.info = rq;
479#endif
480
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100481 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
482 rq->hrtick_timer.function = hrtick;
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100483}
Andrew Morton006c75f2008-09-22 14:55:46 -0700484#else /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100485static inline void hrtick_clear(struct rq *rq)
486{
487}
488
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100489static inline void init_rq_hrtick(struct rq *rq)
490{
491}
492
Peter Zijlstrab328ca12008-04-29 10:02:46 +0200493static inline void init_hrtick(void)
494{
495}
Andrew Morton006c75f2008-09-22 14:55:46 -0700496#endif /* CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100497
Ingo Molnar1b9f19c2007-07-09 18:51:59 +0200498/*
Ingo Molnarc24d20d2007-07-09 18:51:59 +0200499 * resched_task - mark a task 'to be rescheduled now'.
500 *
501 * On UP this means the setting of the need_resched flag, on SMP it
502 * might also involve a cross-CPU call to trigger the scheduler on
503 * the target CPU.
504 */
505#ifdef CONFIG_SMP
506
507#ifndef tsk_is_polling
508#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
509#endif
510
Peter Zijlstra029632f2011-10-25 10:00:11 +0200511void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +0200512{
513 int cpu;
514
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100515 assert_raw_spin_locked(&task_rq(p)->lock);
Ingo Molnarc24d20d2007-07-09 18:51:59 +0200516
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +0800517 if (test_tsk_need_resched(p))
Ingo Molnarc24d20d2007-07-09 18:51:59 +0200518 return;
519
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +0800520 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +0200521
522 cpu = task_cpu(p);
523 if (cpu == smp_processor_id())
524 return;
525
526 /* NEED_RESCHED must be visible before we test polling */
527 smp_mb();
528 if (!tsk_is_polling(p))
529 smp_send_reschedule(cpu);
530}
531
Peter Zijlstra029632f2011-10-25 10:00:11 +0200532void resched_cpu(int cpu)
Ingo Molnarc24d20d2007-07-09 18:51:59 +0200533{
534 struct rq *rq = cpu_rq(cpu);
535 unsigned long flags;
536
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100537 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
Ingo Molnarc24d20d2007-07-09 18:51:59 +0200538 return;
539 resched_task(cpu_curr(cpu));
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100540 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarc24d20d2007-07-09 18:51:59 +0200541}
Thomas Gleixner06d83082008-03-22 09:20:24 +0100542
543#ifdef CONFIG_NO_HZ
544/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700545 * In the semi idle case, use the nearest busy cpu for migrating timers
546 * from an idle cpu. This is good for power-savings.
547 *
548 * We don't do similar optimization for completely idle system, as
549 * selecting an idle cpu will add more delays to the timers than intended
550 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
551 */
552int get_nohz_timer_target(void)
553{
554 int cpu = smp_processor_id();
555 int i;
556 struct sched_domain *sd;
557
Peter Zijlstra057f3fa2011-04-18 11:24:34 +0200558 rcu_read_lock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700559 for_each_domain(cpu, sd) {
Peter Zijlstra057f3fa2011-04-18 11:24:34 +0200560 for_each_cpu(i, sched_domain_span(sd)) {
561 if (!idle_cpu(i)) {
562 cpu = i;
563 goto unlock;
564 }
565 }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700566 }
Peter Zijlstra057f3fa2011-04-18 11:24:34 +0200567unlock:
568 rcu_read_unlock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700569 return cpu;
570}
571/*
Thomas Gleixner06d83082008-03-22 09:20:24 +0100572 * When add_timer_on() enqueues a timer into the timer wheel of an
573 * idle CPU then this timer might expire before the next timer event
574 * which is scheduled to wake up that CPU. In case of a completely
575 * idle system the next event might even be infinite time into the
576 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
577 * leaves the inner idle loop so the newly added timer is taken into
578 * account when the CPU goes back to idle and evaluates the timer
579 * wheel for the next timer event.
580 */
581void wake_up_idle_cpu(int cpu)
582{
583 struct rq *rq = cpu_rq(cpu);
584
585 if (cpu == smp_processor_id())
586 return;
587
588 /*
589 * This is safe, as this function is called with the timer
590 * wheel base lock of (cpu) held. When the CPU is on the way
591 * to idle and has not yet set rq->curr to idle then it will
592 * be serialized on the timer wheel base lock and take the new
593 * timer into account automatically.
594 */
595 if (rq->curr != rq->idle)
596 return;
597
598 /*
599 * We can set TIF_RESCHED on the idle task of the other CPU
600 * lockless. The worst case is that the other CPU runs the
601 * idle task through an additional NOOP schedule()
602 */
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +0800603 set_tsk_need_resched(rq->idle);
Thomas Gleixner06d83082008-03-22 09:20:24 +0100604
605 /* NEED_RESCHED must be visible before we test polling */
606 smp_mb();
607 if (!tsk_is_polling(rq->idle))
608 smp_send_reschedule(cpu);
609}
Mike Galbraith39c0cbe2010-03-11 17:17:13 +0100610
Suresh Siddhaca380622011-10-03 15:09:00 -0700611static inline bool got_nohz_idle_kick(void)
612{
Suresh Siddha1c792db2011-12-01 17:07:32 -0800613 int cpu = smp_processor_id();
614 return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
Suresh Siddhaca380622011-10-03 15:09:00 -0700615}
616
617#else /* CONFIG_NO_HZ */
618
619static inline bool got_nohz_idle_kick(void)
620{
621 return false;
622}
623
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +0200624#endif /* CONFIG_NO_HZ */
Thomas Gleixner06d83082008-03-22 09:20:24 +0100625
Peter Zijlstra029632f2011-10-25 10:00:11 +0200626void sched_avg_update(struct rq *rq)
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200627{
628 s64 period = sched_avg_period();
629
630 while ((s64)(rq->clock - rq->age_stamp) > period) {
Will Deacon0d98bb22010-05-24 12:11:43 -0700631 /*
632 * Inline assembly required to prevent the compiler
633 * optimising this loop into a divmod call.
634 * See __iter_div_u64_rem() for another example of this.
635 */
636 asm("" : "+rm" (rq->age_stamp));
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200637 rq->age_stamp += period;
638 rq->rt_avg /= 2;
639 }
640}
641
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +0200642#else /* !CONFIG_SMP */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200643void resched_task(struct task_struct *p)
Ingo Molnarc24d20d2007-07-09 18:51:59 +0200644{
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100645 assert_raw_spin_locked(&task_rq(p)->lock);
Peter Zijlstra31656512008-07-18 18:01:23 +0200646 set_tsk_need_resched(p);
Ingo Molnarc24d20d2007-07-09 18:51:59 +0200647}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +0200648#endif /* CONFIG_SMP */
Ingo Molnarc24d20d2007-07-09 18:51:59 +0200649
Paul Turnera790de92011-07-21 09:43:29 -0700650#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
651 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
Peter Zijlstraeb755802008-08-19 12:33:05 +0200652/*
Paul Turner82774342011-07-21 09:43:35 -0700653 * Iterate task_group tree rooted at *from, calling @down when first entering a
654 * node and @up when leaving it for the final time.
655 *
656 * Caller must hold rcu_lock or sufficient equivalent.
Peter Zijlstraeb755802008-08-19 12:33:05 +0200657 */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200658int walk_tg_tree_from(struct task_group *from,
Paul Turner82774342011-07-21 09:43:35 -0700659 tg_visitor down, tg_visitor up, void *data)
Peter Zijlstraeb755802008-08-19 12:33:05 +0200660{
661 struct task_group *parent, *child;
662 int ret;
663
Paul Turner82774342011-07-21 09:43:35 -0700664 parent = from;
665
Peter Zijlstraeb755802008-08-19 12:33:05 +0200666down:
667 ret = (*down)(parent, data);
668 if (ret)
Paul Turner82774342011-07-21 09:43:35 -0700669 goto out;
Peter Zijlstraeb755802008-08-19 12:33:05 +0200670 list_for_each_entry_rcu(child, &parent->children, siblings) {
671 parent = child;
672 goto down;
673
674up:
675 continue;
676 }
677 ret = (*up)(parent, data);
Paul Turner82774342011-07-21 09:43:35 -0700678 if (ret || parent == from)
679 goto out;
Peter Zijlstraeb755802008-08-19 12:33:05 +0200680
681 child = parent;
682 parent = parent->parent;
683 if (parent)
684 goto up;
Paul Turner82774342011-07-21 09:43:35 -0700685out:
Peter Zijlstraeb755802008-08-19 12:33:05 +0200686 return ret;
687}
688
Peter Zijlstra029632f2011-10-25 10:00:11 +0200689int tg_nop(struct task_group *tg, void *data)
Peter Zijlstraeb755802008-08-19 12:33:05 +0200690{
691 return 0;
692}
693#endif
694
Ingo Molnar45bf76d2007-07-09 18:51:59 +0200695static void set_load_weight(struct task_struct *p)
696{
Nikhil Raof05998d2011-05-18 10:09:38 -0700697 int prio = p->static_prio - MAX_RT_PRIO;
698 struct load_weight *load = &p->se.load;
699
Ingo Molnardd41f592007-07-09 18:51:59 +0200700 /*
701 * SCHED_IDLE tasks get minimal weight:
702 */
703 if (p->policy == SCHED_IDLE) {
Nikhil Raoc8b28112011-05-18 14:37:48 -0700704 load->weight = scale_load(WEIGHT_IDLEPRIO);
Nikhil Raof05998d2011-05-18 10:09:38 -0700705 load->inv_weight = WMULT_IDLEPRIO;
Ingo Molnardd41f592007-07-09 18:51:59 +0200706 return;
707 }
708
Nikhil Raoc8b28112011-05-18 14:37:48 -0700709 load->weight = scale_load(prio_to_weight[prio]);
Nikhil Raof05998d2011-05-18 10:09:38 -0700710 load->inv_weight = prio_to_wmult[prio];
Ingo Molnar45bf76d2007-07-09 18:51:59 +0200711}
712
Peter Zijlstra371fd7e2010-03-24 16:38:48 +0100713static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
Gregory Haskins2087a1a2008-06-27 14:30:00 -0600714{
Mike Galbraitha64692a2010-03-11 17:16:20 +0100715 update_rq_clock(rq);
Ingo Molnar71f8bd42007-07-09 18:51:59 +0200716 sched_info_queued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +0100717 p->sched_class->enqueue_task(rq, p, flags);
Ingo Molnardd41f592007-07-09 18:51:59 +0200718}
719
Peter Zijlstra371fd7e2010-03-24 16:38:48 +0100720static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnardd41f592007-07-09 18:51:59 +0200721{
Mike Galbraitha64692a2010-03-11 17:16:20 +0100722 update_rq_clock(rq);
Ankita Garg46ac22b2008-07-01 14:30:06 +0530723 sched_info_dequeued(p);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +0100724 p->sched_class->dequeue_task(rq, p, flags);
Ingo Molnar71f8bd42007-07-09 18:51:59 +0200725}
726
Peter Zijlstra029632f2011-10-25 10:00:11 +0200727void activate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +0100728{
729 if (task_contributes_to_load(p))
730 rq->nr_uninterruptible--;
731
Peter Zijlstra371fd7e2010-03-24 16:38:48 +0100732 enqueue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +0100733}
734
Peter Zijlstra029632f2011-10-25 10:00:11 +0200735void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +0100736{
737 if (task_contributes_to_load(p))
738 rq->nr_uninterruptible++;
739
Peter Zijlstra371fd7e2010-03-24 16:38:48 +0100740 dequeue_task(rq, p, flags);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +0100741}
742
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100743static void update_rq_clock_task(struct rq *rq, s64 delta)
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700744{
Glauber Costa095c0aa2011-07-11 15:28:18 -0400745/*
746 * In theory, the compile should just see 0 here, and optimize out the call
747 * to sched_rt_avg_update. But I don't trust it...
748 */
749#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
750 s64 steal = 0, irq_delta = 0;
751#endif
752#ifdef CONFIG_IRQ_TIME_ACCOUNTING
Peter Zijlstra8e92c202010-12-09 14:15:34 +0100753 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100754
755 /*
756 * Since irq_time is only updated on {soft,}irq_exit, we might run into
757 * this case when a previous update_rq_clock() happened inside a
758 * {soft,}irq region.
759 *
760 * When this happens, we stop ->clock_task and only update the
761 * prev_irq_time stamp to account for the part that fit, so that a next
762 * update will consume the rest. This ensures ->clock_task is
763 * monotonic.
764 *
765 * It does however cause some slight miss-attribution of {soft,}irq
766 * time, a more accurate solution would be to update the irq_time using
767 * the current rq->clock timestamp, except that would require using
768 * atomic ops.
769 */
770 if (irq_delta > delta)
771 irq_delta = delta;
772
773 rq->prev_irq_time += irq_delta;
774 delta -= irq_delta;
Glauber Costa095c0aa2011-07-11 15:28:18 -0400775#endif
776#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
Ingo Molnarc5905af2012-02-24 08:31:31 +0100777 if (static_key_false((&paravirt_steal_rq_enabled))) {
Glauber Costa095c0aa2011-07-11 15:28:18 -0400778 u64 st;
779
780 steal = paravirt_steal_clock(cpu_of(rq));
781 steal -= rq->prev_steal_time_rq;
782
783 if (unlikely(steal > delta))
784 steal = delta;
785
786 st = steal_ticks(steal);
787 steal = st * TICK_NSEC;
788
789 rq->prev_steal_time_rq += steal;
790
791 delta -= steal;
792 }
793#endif
794
Peter Zijlstrafe44d622010-12-09 14:15:34 +0100795 rq->clock_task += delta;
796
Glauber Costa095c0aa2011-07-11 15:28:18 -0400797#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
798 if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
799 sched_rt_avg_update(rq, irq_delta + steal);
800#endif
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -0700801}
802
Peter Zijlstra34f971f2010-09-22 13:53:15 +0200803void sched_set_stop_task(int cpu, struct task_struct *stop)
804{
805 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
806 struct task_struct *old_stop = cpu_rq(cpu)->stop;
807
808 if (stop) {
809 /*
810 * Make it appear like a SCHED_FIFO task, its something
811 * userspace knows about and won't get confused about.
812 *
813 * Also, it will make PI more or less work without too
814 * much confusion -- but then, stop work should not
815 * rely on PI working anyway.
816 */
817 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
818
819 stop->sched_class = &stop_sched_class;
820 }
821
822 cpu_rq(cpu)->stop = stop;
823
824 if (old_stop) {
825 /*
826 * Reset it back to a normal scheduling class so that
827 * it can die in pieces.
828 */
829 old_stop->sched_class = &rt_sched_class;
830 }
831}
832
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +0100833/*
Ingo Molnardd41f592007-07-09 18:51:59 +0200834 * __normal_prio - return the priority that is based on the static prio
Ingo Molnar71f8bd42007-07-09 18:51:59 +0200835 */
Ingo Molnar14531182007-07-09 18:51:59 +0200836static inline int __normal_prio(struct task_struct *p)
837{
Ingo Molnardd41f592007-07-09 18:51:59 +0200838 return p->static_prio;
Ingo Molnar14531182007-07-09 18:51:59 +0200839}
840
841/*
Ingo Molnarb29739f2006-06-27 02:54:51 -0700842 * Calculate the expected normal priority: i.e. priority
843 * without taking RT-inheritance into account. Might be
844 * boosted by interactivity modifiers. Changes upon fork,
845 * setprio syscalls, and whenever the interactivity
846 * estimator recalculates.
847 */
Ingo Molnar36c8b582006-07-03 00:25:41 -0700848static inline int normal_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700849{
850 int prio;
851
Ingo Molnare05606d2007-07-09 18:51:59 +0200852 if (task_has_rt_policy(p))
Ingo Molnarb29739f2006-06-27 02:54:51 -0700853 prio = MAX_RT_PRIO-1 - p->rt_priority;
854 else
855 prio = __normal_prio(p);
856 return prio;
857}
858
859/*
860 * Calculate the current priority, i.e. the priority
861 * taken into account by the scheduler. This value might
862 * be boosted by RT tasks, or might be boosted by
863 * interactivity modifiers. Will be RT if the task got
864 * RT-boosted. If not then it returns p->normal_prio.
865 */
Ingo Molnar36c8b582006-07-03 00:25:41 -0700866static int effective_prio(struct task_struct *p)
Ingo Molnarb29739f2006-06-27 02:54:51 -0700867{
868 p->normal_prio = normal_prio(p);
869 /*
870 * If we are RT tasks or we were boosted to RT priority,
871 * keep the priority unchanged. Otherwise, update priority
872 * to the normal priority:
873 */
874 if (!rt_prio(p->prio))
875 return p->normal_prio;
876 return p->prio;
877}
878
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879/**
880 * task_curr - is this task currently executing on a CPU?
881 * @p: the task in question.
882 */
Ingo Molnar36c8b582006-07-03 00:25:41 -0700883inline int task_curr(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884{
885 return cpu_curr(task_cpu(p)) == p;
886}
887
Steven Rostedtcb469842008-01-25 21:08:22 +0100888static inline void check_class_changed(struct rq *rq, struct task_struct *p,
889 const struct sched_class *prev_class,
Peter Zijlstrada7a7352011-01-17 17:03:27 +0100890 int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +0100891{
892 if (prev_class != p->sched_class) {
893 if (prev_class->switched_from)
Peter Zijlstrada7a7352011-01-17 17:03:27 +0100894 prev_class->switched_from(rq, p);
895 p->sched_class->switched_to(rq, p);
896 } else if (oldprio != p->prio)
897 p->sched_class->prio_changed(rq, p, oldprio);
Steven Rostedtcb469842008-01-25 21:08:22 +0100898}
899
Peter Zijlstra029632f2011-10-25 10:00:11 +0200900void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra1e5a7402010-10-31 12:37:04 +0100901{
902 const struct sched_class *class;
903
904 if (p->sched_class == rq->curr->sched_class) {
905 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
906 } else {
907 for_each_class(class) {
908 if (class == rq->curr->sched_class)
909 break;
910 if (class == p->sched_class) {
911 resched_task(rq->curr);
912 break;
913 }
914 }
915 }
916
917 /*
918 * A queue event has occurred, and we're going to schedule. In
919 * this case, we can save a useless back to back clock update.
920 */
Peter Zijlstrafd2f4412011-04-05 17:23:44 +0200921 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
Peter Zijlstra1e5a7402010-10-31 12:37:04 +0100922 rq->skip_clock_update = 1;
923}
924
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925#ifdef CONFIG_SMP
Ingo Molnardd41f592007-07-09 18:51:59 +0200926void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
Ingo Molnarc65cc872007-07-09 18:51:58 +0200927{
Peter Zijlstrae2912002009-12-16 18:04:36 +0100928#ifdef CONFIG_SCHED_DEBUG
929 /*
930 * We should never call set_task_cpu() on a blocked task,
931 * ttwu() will sort out the placement.
932 */
Peter Zijlstra077614e2009-12-17 13:16:31 +0100933 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
934 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200935
936#ifdef CONFIG_LOCKDEP
Peter Zijlstra6c6c54e2011-06-03 17:37:07 +0200937 /*
938 * The caller should hold either p->pi_lock or rq->lock, when changing
939 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
940 *
941 * sched_move_task() holds both and thus holding either pins the cgroup,
Peter Zijlstra8323f262012-06-22 13:36:05 +0200942 * see task_group().
Peter Zijlstra6c6c54e2011-06-03 17:37:07 +0200943 *
944 * Furthermore, all task_rq users should acquire both locks, see
945 * task_rq_lock().
946 */
Peter Zijlstra0122ec52011-04-05 17:23:51 +0200947 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
948 lockdep_is_held(&task_rq(p)->lock)));
949#endif
Peter Zijlstrae2912002009-12-16 18:04:36 +0100950#endif
951
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +0800952 trace_sched_migrate_task(p, new_cpu);
Peter Zijlstracbc34ed2008-12-10 08:08:22 +0100953
Peter Zijlstra0c697742009-12-22 15:43:19 +0100954 if (task_cpu(p) != new_cpu) {
955 p->se.nr_migrations++;
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200956 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
Peter Zijlstra0c697742009-12-22 15:43:19 +0100957 }
Ingo Molnardd41f592007-07-09 18:51:59 +0200958
959 __set_task_cpu(p, new_cpu);
Ingo Molnarc65cc872007-07-09 18:51:58 +0200960}
961
Tejun Heo969c7922010-05-06 18:49:21 +0200962struct migration_arg {
Ingo Molnar36c8b582006-07-03 00:25:41 -0700963 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 int dest_cpu;
Ingo Molnar70b97a72006-07-03 00:25:42 -0700965};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
Tejun Heo969c7922010-05-06 18:49:21 +0200967static int migration_cpu_stop(void *data);
968
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 * wait_task_inactive - wait for a thread to unschedule.
971 *
Roland McGrath85ba2d82008-07-25 19:45:58 -0700972 * If @match_state is nonzero, it's the @p->state value just checked and
973 * not expected to change. If it changes, i.e. @p might have woken up,
974 * then return zero. When we succeed in waiting for @p to be off its CPU,
975 * we return a positive number (its total switch count). If a second call
976 * a short while later returns the same number, the caller can be sure that
977 * @p has remained unscheduled the whole time.
978 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 * The caller must ensure that the task *will* unschedule sometime soon,
980 * else this function might spin for a *long* time. This function can't
981 * be called with interrupts off, or it may introduce deadlock with
982 * smp_call_function() if an IPI is sent by the same process we are
983 * waiting to become inactive.
984 */
Roland McGrath85ba2d82008-07-25 19:45:58 -0700985unsigned long wait_task_inactive(struct task_struct *p, long match_state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986{
987 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +0200988 int running, on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -0700989 unsigned long ncsw;
Ingo Molnar70b97a72006-07-03 00:25:42 -0700990 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
Andi Kleen3a5c3592007-10-15 17:00:14 +0200992 for (;;) {
993 /*
994 * We do the initial early heuristics without holding
995 * any task-queue locks at all. We'll only try to get
996 * the runqueue lock when things look like they will
997 * work out!
998 */
999 rq = task_rq(p);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07001000
Andi Kleen3a5c3592007-10-15 17:00:14 +02001001 /*
1002 * If the task is actively running on another CPU
1003 * still, just relax and busy-wait without holding
1004 * any locks.
1005 *
1006 * NOTE! Since we don't hold any locks, it's not
1007 * even sure that "rq" stays as the right runqueue!
1008 * But we don't care, since "task_running()" will
1009 * return false if the runqueue has changed and p
1010 * is actually now running somewhere else!
1011 */
Roland McGrath85ba2d82008-07-25 19:45:58 -07001012 while (task_running(rq, p)) {
1013 if (match_state && unlikely(p->state != match_state))
1014 return 0;
Andi Kleen3a5c3592007-10-15 17:00:14 +02001015 cpu_relax();
Roland McGrath85ba2d82008-07-25 19:45:58 -07001016 }
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07001017
Andi Kleen3a5c3592007-10-15 17:00:14 +02001018 /*
1019 * Ok, time to look more closely! We need the rq
1020 * lock now, to be *sure*. If we're wrong, we'll
1021 * just go back and repeat.
1022 */
1023 rq = task_rq_lock(p, &flags);
Peter Zijlstra27a9da62010-05-04 20:36:56 +02001024 trace_sched_wait_task(p);
Andi Kleen3a5c3592007-10-15 17:00:14 +02001025 running = task_running(rq, p);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001026 on_rq = p->on_rq;
Roland McGrath85ba2d82008-07-25 19:45:58 -07001027 ncsw = 0;
Oleg Nesterovf31e11d2008-08-20 16:54:44 -07001028 if (!match_state || p->state == match_state)
Oleg Nesterov93dcf552008-08-20 16:54:44 -07001029 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
Peter Zijlstra0122ec52011-04-05 17:23:51 +02001030 task_rq_unlock(rq, p, &flags);
Linus Torvaldsfa490cf2007-06-18 09:34:40 -07001031
Andi Kleen3a5c3592007-10-15 17:00:14 +02001032 /*
Roland McGrath85ba2d82008-07-25 19:45:58 -07001033 * If it changed from the expected state, bail out now.
1034 */
1035 if (unlikely(!ncsw))
1036 break;
1037
1038 /*
Andi Kleen3a5c3592007-10-15 17:00:14 +02001039 * Was it really running after all now that we
1040 * checked with the proper locks actually held?
1041 *
1042 * Oops. Go back and try again..
1043 */
1044 if (unlikely(running)) {
1045 cpu_relax();
1046 continue;
1047 }
1048
1049 /*
1050 * It's not enough that it's not actively running,
1051 * it must be off the runqueue _entirely_, and not
1052 * preempted!
1053 *
Luis Henriques80dd99b2009-03-16 19:58:09 +00001054 * So if it was still runnable (but just not actively
Andi Kleen3a5c3592007-10-15 17:00:14 +02001055 * running right now), it's preempted, and we should
1056 * yield - it could be a while.
1057 */
1058 if (unlikely(on_rq)) {
Thomas Gleixner8eb90c32011-02-23 23:52:21 +00001059 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1060
1061 set_current_state(TASK_UNINTERRUPTIBLE);
1062 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
Andi Kleen3a5c3592007-10-15 17:00:14 +02001063 continue;
1064 }
1065
1066 /*
1067 * Ahh, all good. It wasn't running, and it wasn't
1068 * runnable, which means that it will never become
1069 * running in the future either. We're all done!
1070 */
1071 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 }
Roland McGrath85ba2d82008-07-25 19:45:58 -07001073
1074 return ncsw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075}
1076
1077/***
1078 * kick_process - kick a running thread to enter/exit the kernel
1079 * @p: the to-be-kicked thread
1080 *
1081 * Cause a process which is running on another CPU to enter
1082 * kernel-mode, without any delay. (to get signals handled.)
1083 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001084 * NOTE: this function doesn't have to take the runqueue lock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 * because all it wants to ensure is that the remote task enters
1086 * the kernel. If the IPI races and the task has been migrated
1087 * to another CPU then no harm is done and the purpose has been
1088 * achieved as well.
1089 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001090void kick_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091{
1092 int cpu;
1093
1094 preempt_disable();
1095 cpu = task_cpu(p);
1096 if ((cpu != smp_processor_id()) && task_curr(p))
1097 smp_send_reschedule(cpu);
1098 preempt_enable();
1099}
Rusty Russellb43e3522009-06-12 22:27:00 -06001100EXPORT_SYMBOL_GPL(kick_process);
Nick Piggin476d1392005-06-25 14:57:29 -07001101#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
Peter Zijlstra970b13b2009-11-25 13:31:39 +01001103#ifdef CONFIG_SMP
Oleg Nesterov30da6882010-03-15 10:10:19 +01001104/*
Peter Zijlstra013fdb82011-04-05 17:23:45 +02001105 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
Oleg Nesterov30da6882010-03-15 10:10:19 +01001106 */
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01001107static int select_fallback_rq(int cpu, struct task_struct *p)
1108{
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01001109 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
Peter Zijlstra2baab4e2012-03-20 15:57:01 +01001110 enum { cpuset, possible, fail } state = cpuset;
1111 int dest_cpu;
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01001112
1113 /* Look for allowed, online CPU in same node. */
Srivatsa S. Bhate3831ed2012-03-30 19:40:28 +05301114 for_each_cpu(dest_cpu, nodemask) {
Peter Zijlstra2baab4e2012-03-20 15:57:01 +01001115 if (!cpu_online(dest_cpu))
1116 continue;
1117 if (!cpu_active(dest_cpu))
1118 continue;
Peter Zijlstrafa17b502011-06-16 12:23:22 +02001119 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01001120 return dest_cpu;
Peter Zijlstra2baab4e2012-03-20 15:57:01 +01001121 }
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01001122
Peter Zijlstra2baab4e2012-03-20 15:57:01 +01001123 for (;;) {
1124 /* Any allowed, online CPU? */
Srivatsa S. Bhate3831ed2012-03-30 19:40:28 +05301125 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
Peter Zijlstra2baab4e2012-03-20 15:57:01 +01001126 if (!cpu_online(dest_cpu))
1127 continue;
1128 if (!cpu_active(dest_cpu))
1129 continue;
1130 goto out;
1131 }
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01001132
Peter Zijlstra2baab4e2012-03-20 15:57:01 +01001133 switch (state) {
1134 case cpuset:
1135 /* No more Mr. Nice Guy. */
1136 cpuset_cpus_allowed_fallback(p);
1137 state = possible;
1138 break;
1139
1140 case possible:
1141 do_set_cpus_allowed(p, cpu_possible_mask);
1142 state = fail;
1143 break;
1144
1145 case fail:
1146 BUG();
1147 break;
1148 }
1149 }
1150
1151out:
1152 if (state != cpuset) {
1153 /*
1154 * Don't tell them about moving exiting tasks or
1155 * kernel threads (both mm NULL), since they never
1156 * leave kernel.
1157 */
1158 if (p->mm && printk_ratelimit()) {
1159 printk_sched("process %d (%s) no longer affine to cpu%d\n",
1160 task_pid_nr(p), p->comm, cpu);
1161 }
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01001162 }
1163
1164 return dest_cpu;
1165}
1166
Peter Zijlstrae2912002009-12-16 18:04:36 +01001167/*
Peter Zijlstra013fdb82011-04-05 17:23:45 +02001168 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
Peter Zijlstrae2912002009-12-16 18:04:36 +01001169 */
Peter Zijlstra970b13b2009-11-25 13:31:39 +01001170static inline
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001171int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
Peter Zijlstra970b13b2009-11-25 13:31:39 +01001172{
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001173 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
Peter Zijlstrae2912002009-12-16 18:04:36 +01001174
1175 /*
1176 * In order not to call set_task_cpu() on a blocking task we need
1177 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1178 * cpu.
1179 *
1180 * Since this is common to all placement strategies, this lives here.
1181 *
1182 * [ this allows ->select_task() to simply return task_cpu(p) and
1183 * not worry about this generic constraint ]
1184 */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02001185 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
Peter Zijlstra70f11202009-12-20 17:36:27 +01001186 !cpu_online(cpu)))
Peter Zijlstra5da9a0f2009-12-16 18:04:38 +01001187 cpu = select_fallback_rq(task_cpu(p), p);
Peter Zijlstrae2912002009-12-16 18:04:36 +01001188
1189 return cpu;
Peter Zijlstra970b13b2009-11-25 13:31:39 +01001190}
Mike Galbraith09a40af2010-04-15 07:29:59 +02001191
1192static void update_avg(u64 *avg, u64 sample)
1193{
1194 s64 diff = sample - *avg;
1195 *avg += diff >> 3;
1196}
Peter Zijlstra970b13b2009-11-25 13:31:39 +01001197#endif
1198
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02001199static void
Peter Zijlstrab84cb5d2011-04-05 17:23:55 +02001200ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
Tejun Heo9ed38112009-12-03 15:08:03 +09001201{
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02001202#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrab84cb5d2011-04-05 17:23:55 +02001203 struct rq *rq = this_rq();
Tejun Heo9ed38112009-12-03 15:08:03 +09001204
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02001205#ifdef CONFIG_SMP
1206 int this_cpu = smp_processor_id();
Tejun Heo9ed38112009-12-03 15:08:03 +09001207
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02001208 if (cpu == this_cpu) {
1209 schedstat_inc(rq, ttwu_local);
1210 schedstat_inc(p, se.statistics.nr_wakeups_local);
1211 } else {
1212 struct sched_domain *sd;
1213
1214 schedstat_inc(p, se.statistics.nr_wakeups_remote);
Peter Zijlstra057f3fa2011-04-18 11:24:34 +02001215 rcu_read_lock();
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02001216 for_each_domain(this_cpu, sd) {
1217 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1218 schedstat_inc(sd, ttwu_wake_remote);
1219 break;
1220 }
1221 }
Peter Zijlstra057f3fa2011-04-18 11:24:34 +02001222 rcu_read_unlock();
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02001223 }
Peter Zijlstraf339b9d2011-05-31 10:49:20 +02001224
1225 if (wake_flags & WF_MIGRATED)
1226 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1227
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02001228#endif /* CONFIG_SMP */
1229
1230 schedstat_inc(rq, ttwu_count);
1231 schedstat_inc(p, se.statistics.nr_wakeups);
1232
1233 if (wake_flags & WF_SYNC)
1234 schedstat_inc(p, se.statistics.nr_wakeups_sync);
1235
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02001236#endif /* CONFIG_SCHEDSTATS */
Tejun Heo9ed38112009-12-03 15:08:03 +09001237}
1238
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02001239static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
Tejun Heo9ed38112009-12-03 15:08:03 +09001240{
Tejun Heo9ed38112009-12-03 15:08:03 +09001241 activate_task(rq, p, en_flags);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001242 p->on_rq = 1;
Peter Zijlstrac2f71152011-04-13 13:28:56 +02001243
1244 /* if a worker is waking up, notify workqueue */
1245 if (p->flags & PF_WQ_WORKER)
1246 wq_worker_waking_up(p, cpu_of(rq));
Tejun Heo9ed38112009-12-03 15:08:03 +09001247}
1248
Peter Zijlstra23f41ee2011-04-05 17:23:56 +02001249/*
1250 * Mark the task runnable and perform wakeup-preemption.
1251 */
Peter Zijlstra89363382011-04-05 17:23:42 +02001252static void
Peter Zijlstra23f41ee2011-04-05 17:23:56 +02001253ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Tejun Heo9ed38112009-12-03 15:08:03 +09001254{
Peter Zijlstra89363382011-04-05 17:23:42 +02001255 trace_sched_wakeup(p, true);
Tejun Heo9ed38112009-12-03 15:08:03 +09001256 check_preempt_curr(rq, p, wake_flags);
1257
1258 p->state = TASK_RUNNING;
1259#ifdef CONFIG_SMP
1260 if (p->sched_class->task_woken)
1261 p->sched_class->task_woken(rq, p);
1262
Steven Rostedte69c6342010-12-06 17:10:31 -05001263 if (rq->idle_stamp) {
Tejun Heo9ed38112009-12-03 15:08:03 +09001264 u64 delta = rq->clock - rq->idle_stamp;
1265 u64 max = 2*sysctl_sched_migration_cost;
1266
1267 if (delta > max)
1268 rq->avg_idle = max;
1269 else
1270 update_avg(&rq->avg_idle, delta);
1271 rq->idle_stamp = 0;
1272 }
1273#endif
1274}
1275
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02001276static void
1277ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1278{
1279#ifdef CONFIG_SMP
1280 if (p->sched_contributes_to_load)
1281 rq->nr_uninterruptible--;
1282#endif
1283
1284 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1285 ttwu_do_wakeup(rq, p, wake_flags);
1286}
1287
1288/*
1289 * Called in case the task @p isn't fully descheduled from its runqueue,
1290 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1291 * since all we need to do is flip p->state to TASK_RUNNING, since
1292 * the task is still ->on_rq.
1293 */
1294static int ttwu_remote(struct task_struct *p, int wake_flags)
1295{
1296 struct rq *rq;
1297 int ret = 0;
1298
1299 rq = __task_rq_lock(p);
1300 if (p->on_rq) {
1301 ttwu_do_wakeup(rq, p, wake_flags);
1302 ret = 1;
1303 }
1304 __task_rq_unlock(rq);
1305
1306 return ret;
1307}
1308
Peter Zijlstra317f3942011-04-05 17:23:58 +02001309#ifdef CONFIG_SMP
Peter Zijlstrafa14ff42011-09-12 13:06:17 +02001310static void sched_ttwu_pending(void)
Peter Zijlstra317f3942011-04-05 17:23:58 +02001311{
1312 struct rq *rq = this_rq();
Peter Zijlstrafa14ff42011-09-12 13:06:17 +02001313 struct llist_node *llist = llist_del_all(&rq->wake_list);
1314 struct task_struct *p;
Peter Zijlstra317f3942011-04-05 17:23:58 +02001315
1316 raw_spin_lock(&rq->lock);
1317
Peter Zijlstrafa14ff42011-09-12 13:06:17 +02001318 while (llist) {
1319 p = llist_entry(llist, struct task_struct, wake_entry);
1320 llist = llist_next(llist);
Peter Zijlstra317f3942011-04-05 17:23:58 +02001321 ttwu_do_activate(rq, p, 0);
1322 }
1323
1324 raw_spin_unlock(&rq->lock);
1325}
1326
1327void scheduler_ipi(void)
1328{
Suresh Siddhaca380622011-10-03 15:09:00 -07001329 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
Peter Zijlstrac5d753a2011-07-19 15:07:25 -07001330 return;
1331
1332 /*
1333 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1334 * traditionally all their work was done from the interrupt return
1335 * path. Now that we actually do some work, we need to make sure
1336 * we do call them.
1337 *
1338 * Some archs already do call them, luckily irq_enter/exit nest
1339 * properly.
1340 *
1341 * Arguably we should visit all archs and update all handlers,
1342 * however a fair share of IPIs are still resched only so this would
1343 * somewhat pessimize the simple resched case.
1344 */
1345 irq_enter();
Peter Zijlstrafa14ff42011-09-12 13:06:17 +02001346 sched_ttwu_pending();
Suresh Siddhaca380622011-10-03 15:09:00 -07001347
1348 /*
1349 * Check if someone kicked us for doing the nohz idle load balance.
1350 */
Suresh Siddha6eb57e02011-10-03 15:09:01 -07001351 if (unlikely(got_nohz_idle_kick() && !need_resched())) {
1352 this_rq()->idle_balance = 1;
Suresh Siddhaca380622011-10-03 15:09:00 -07001353 raise_softirq_irqoff(SCHED_SOFTIRQ);
Suresh Siddha6eb57e02011-10-03 15:09:01 -07001354 }
Peter Zijlstrac5d753a2011-07-19 15:07:25 -07001355 irq_exit();
Peter Zijlstra317f3942011-04-05 17:23:58 +02001356}
1357
1358static void ttwu_queue_remote(struct task_struct *p, int cpu)
1359{
Peter Zijlstrafa14ff42011-09-12 13:06:17 +02001360 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
Peter Zijlstra317f3942011-04-05 17:23:58 +02001361 smp_send_reschedule(cpu);
1362}
Peter Zijlstrad6aa8f82011-05-26 14:21:33 +02001363
Peter Zijlstra39be3502012-01-26 12:44:34 +01001364bool cpus_share_cache(int this_cpu, int that_cpu)
Peter Zijlstra518cd622011-12-07 15:07:31 +01001365{
1366 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1367}
Peter Zijlstrad6aa8f82011-05-26 14:21:33 +02001368#endif /* CONFIG_SMP */
Peter Zijlstra317f3942011-04-05 17:23:58 +02001369
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02001370static void ttwu_queue(struct task_struct *p, int cpu)
1371{
1372 struct rq *rq = cpu_rq(cpu);
1373
Daniel Hellstrom17d9f312011-05-20 04:01:10 +00001374#if defined(CONFIG_SMP)
Peter Zijlstra39be3502012-01-26 12:44:34 +01001375 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
Peter Zijlstraf01114c2011-05-31 12:26:55 +02001376 sched_clock_cpu(cpu); /* sync clocks x-cpu */
Peter Zijlstra317f3942011-04-05 17:23:58 +02001377 ttwu_queue_remote(p, cpu);
1378 return;
1379 }
1380#endif
1381
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02001382 raw_spin_lock(&rq->lock);
1383 ttwu_do_activate(rq, p, 0);
1384 raw_spin_unlock(&rq->lock);
Tejun Heo9ed38112009-12-03 15:08:03 +09001385}
1386
1387/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 * try_to_wake_up - wake up a thread
Tejun Heo9ed38112009-12-03 15:08:03 +09001389 * @p: the thread to be awakened
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 * @state: the mask of task states that can be woken
Tejun Heo9ed38112009-12-03 15:08:03 +09001391 * @wake_flags: wake modifier flags (WF_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 *
1393 * Put it on the run-queue if it's not already there. The "current"
1394 * thread is always on the run-queue (except when the actual
1395 * re-schedule is in progress), and as such you're allowed to do
1396 * the simpler "current->state = TASK_RUNNING" to mark yourself
1397 * runnable without the overhead of this.
1398 *
Tejun Heo9ed38112009-12-03 15:08:03 +09001399 * Returns %true if @p was woken up, %false if it was already running
1400 * or @state didn't match @p's state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 */
Peter Zijlstrae4a52bc2011-04-05 17:23:54 +02001402static int
1403try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 unsigned long flags;
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02001406 int cpu, success = 0;
Peter Zijlstra2398f2c2008-06-27 13:41:35 +02001407
Linus Torvalds04e2f172008-02-23 18:05:03 -08001408 smp_wmb();
Peter Zijlstra013fdb82011-04-05 17:23:45 +02001409 raw_spin_lock_irqsave(&p->pi_lock, flags);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02001410 if (!(p->state & state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 goto out;
1412
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02001413 success = 1; /* we're going to change ->state */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 cpu = task_cpu(p);
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02001415
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02001416 if (p->on_rq && ttwu_remote(p, wake_flags))
1417 goto stat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418
1419#ifdef CONFIG_SMP
Peter Zijlstrae9c84312009-09-15 14:43:03 +02001420 /*
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02001421 * If the owning (remote) cpu is still in the middle of schedule() with
1422 * this task as prev, wait until its done referencing the task.
Peter Zijlstrae9c84312009-09-15 14:43:03 +02001423 */
Peter Zijlstraf3e94782012-09-12 11:22:00 +02001424 while (p->on_cpu)
Peter Zijlstrae4a52bc2011-04-05 17:23:54 +02001425 cpu_relax();
Peter Zijlstrae4a52bc2011-04-05 17:23:54 +02001426 /*
1427 * Pairs with the smp_wmb() in finish_lock_switch().
1428 */
1429 smp_rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
Peter Zijlstraa8e4f2e2011-04-05 17:23:49 +02001431 p->sched_contributes_to_load = !!task_contributes_to_load(p);
Peter Zijlstrae9c84312009-09-15 14:43:03 +02001432 p->state = TASK_WAKING;
Peter Zijlstraefbbd052009-12-16 18:04:40 +01001433
Peter Zijlstrae4a52bc2011-04-05 17:23:54 +02001434 if (p->sched_class->task_waking)
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02001435 p->sched_class->task_waking(p);
Peter Zijlstraab19cb22009-11-27 15:44:43 +01001436
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001437 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
Peter Zijlstraf339b9d2011-05-31 10:49:20 +02001438 if (task_cpu(p) != cpu) {
1439 wake_flags |= WF_MIGRATED;
Mike Galbraithf5dc3752009-10-09 08:35:03 +02001440 set_task_cpu(p, cpu);
Peter Zijlstraf339b9d2011-05-31 10:49:20 +02001441 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
Peter Zijlstrac05fbaf2011-04-05 17:23:57 +02001444 ttwu_queue(p, cpu);
1445stat:
Peter Zijlstrab84cb5d2011-04-05 17:23:55 +02001446 ttwu_stat(p, cpu, wake_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447out:
Peter Zijlstra013fdb82011-04-05 17:23:45 +02001448 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
1450 return success;
1451}
1452
David Howells50fa6102009-04-28 15:01:38 +01001453/**
Tejun Heo21aa9af2010-06-08 21:40:37 +02001454 * try_to_wake_up_local - try to wake up a local task with rq lock held
1455 * @p: the thread to be awakened
1456 *
Peter Zijlstra2acca552011-04-05 17:23:50 +02001457 * Put @p on the run-queue if it's not already there. The caller must
Tejun Heo21aa9af2010-06-08 21:40:37 +02001458 * ensure that this_rq() is locked, @p is bound to this_rq() and not
Peter Zijlstra2acca552011-04-05 17:23:50 +02001459 * the current task.
Tejun Heo21aa9af2010-06-08 21:40:37 +02001460 */
1461static void try_to_wake_up_local(struct task_struct *p)
1462{
1463 struct rq *rq = task_rq(p);
Tejun Heo21aa9af2010-06-08 21:40:37 +02001464
1465 BUG_ON(rq != this_rq());
1466 BUG_ON(p == current);
1467 lockdep_assert_held(&rq->lock);
1468
Peter Zijlstra2acca552011-04-05 17:23:50 +02001469 if (!raw_spin_trylock(&p->pi_lock)) {
1470 raw_spin_unlock(&rq->lock);
1471 raw_spin_lock(&p->pi_lock);
1472 raw_spin_lock(&rq->lock);
Tejun Heo21aa9af2010-06-08 21:40:37 +02001473 }
Peter Zijlstra2acca552011-04-05 17:23:50 +02001474
Tejun Heo21aa9af2010-06-08 21:40:37 +02001475 if (!(p->state & TASK_NORMAL))
Peter Zijlstra2acca552011-04-05 17:23:50 +02001476 goto out;
Tejun Heo21aa9af2010-06-08 21:40:37 +02001477
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001478 if (!p->on_rq)
Peter Zijlstrad7c01d22011-04-05 17:23:43 +02001479 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1480
Peter Zijlstra23f41ee2011-04-05 17:23:56 +02001481 ttwu_do_wakeup(rq, p, 0);
Peter Zijlstrab84cb5d2011-04-05 17:23:55 +02001482 ttwu_stat(p, smp_processor_id(), 0);
Peter Zijlstra2acca552011-04-05 17:23:50 +02001483out:
1484 raw_spin_unlock(&p->pi_lock);
Tejun Heo21aa9af2010-06-08 21:40:37 +02001485}
1486
1487/**
David Howells50fa6102009-04-28 15:01:38 +01001488 * wake_up_process - Wake up a specific process
1489 * @p: The process to be woken up.
1490 *
1491 * Attempt to wake up the nominated process and move it to the set of runnable
1492 * processes. Returns 1 if the process was woken up, 0 if it was already
1493 * running.
1494 *
1495 * It may be assumed that this function implies a write memory barrier before
1496 * changing the task state if and only if any tasks are woken up.
1497 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001498int wake_up_process(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499{
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05001500 return try_to_wake_up(p, TASK_ALL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502EXPORT_SYMBOL(wake_up_process);
1503
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001504int wake_up_state(struct task_struct *p, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505{
1506 return try_to_wake_up(p, state, 0);
1507}
1508
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509/*
1510 * Perform scheduler related setup for a newly forked process p.
1511 * p is forked by current.
Ingo Molnardd41f592007-07-09 18:51:59 +02001512 *
1513 * __sched_fork() is basic setup used by init_idle() too:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001515static void __sched_fork(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516{
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001517 p->on_rq = 0;
1518
1519 p->se.on_rq = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02001520 p->se.exec_start = 0;
1521 p->se.sum_exec_runtime = 0;
Ingo Molnarf6cf8912007-08-28 12:53:24 +02001522 p->se.prev_sum_exec_runtime = 0;
Ingo Molnar6c594c22008-12-14 12:34:15 +01001523 p->se.nr_migrations = 0;
Peter Zijlstrada7a7352011-01-17 17:03:27 +01001524 p->se.vruntime = 0;
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001525 INIT_LIST_HEAD(&p->se.group_node);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001526
1527#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03001528 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001529#endif
Nick Piggin476d1392005-06-25 14:57:29 -07001530
Peter Zijlstrafa717062008-01-25 21:08:27 +01001531 INIT_LIST_HEAD(&p->rt.run_list);
Nick Piggin476d1392005-06-25 14:57:29 -07001532
Avi Kivitye107be32007-07-26 13:40:43 +02001533#ifdef CONFIG_PREEMPT_NOTIFIERS
1534 INIT_HLIST_HEAD(&p->preempt_notifiers);
1535#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02001536}
1537
1538/*
1539 * fork()/clone()-time setup:
1540 */
Samir Bellabes3e51e3e2011-05-11 18:18:05 +02001541void sched_fork(struct task_struct *p)
Ingo Molnardd41f592007-07-09 18:51:59 +02001542{
Peter Zijlstra0122ec52011-04-05 17:23:51 +02001543 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02001544 int cpu = get_cpu();
1545
1546 __sched_fork(p);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01001547 /*
Peter Zijlstra0017d732010-03-24 18:34:10 +01001548 * We mark the process as running here. This guarantees that
Peter Zijlstra06b83b52009-12-16 18:04:35 +01001549 * nobody will actually run it, and a signal or other external
1550 * event cannot wake it up and insert it on the runqueue either.
1551 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01001552 p->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02001553
Ingo Molnarb29739f2006-06-27 02:54:51 -07001554 /*
Mike Galbraithc350a042011-07-27 17:14:55 +02001555 * Make sure we do not leak PI boosting priority to the child.
1556 */
1557 p->prio = current->normal_prio;
1558
1559 /*
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02001560 * Revert to default priority/policy on fork if requested.
1561 */
1562 if (unlikely(p->sched_reset_on_fork)) {
Mike Galbraithc350a042011-07-27 17:14:55 +02001563 if (task_has_rt_policy(p)) {
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02001564 p->policy = SCHED_NORMAL;
Mike Galbraith6c697bd2009-06-17 10:48:02 +02001565 p->static_prio = NICE_TO_PRIO(0);
Mike Galbraithc350a042011-07-27 17:14:55 +02001566 p->rt_priority = 0;
1567 } else if (PRIO_TO_NICE(p->static_prio) < 0)
1568 p->static_prio = NICE_TO_PRIO(0);
1569
1570 p->prio = p->normal_prio = __normal_prio(p);
1571 set_load_weight(p);
Mike Galbraith6c697bd2009-06-17 10:48:02 +02001572
Mike Galbraithb9dc29e2009-06-17 10:46:01 +02001573 /*
1574 * We don't need the reset flag anymore after the fork. It has
1575 * fulfilled its duty:
1576 */
1577 p->sched_reset_on_fork = 0;
1578 }
Lennart Poetteringca94c442009-06-15 17:17:47 +02001579
Hiroshi Shimamoto2ddbf952007-10-15 17:00:11 +02001580 if (!rt_prio(p->prio))
1581 p->sched_class = &fair_sched_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07001582
Peter Zijlstracd29fe62009-11-27 17:32:46 +01001583 if (p->sched_class->task_fork)
1584 p->sched_class->task_fork(p);
1585
Peter Zijlstra86951592010-06-22 11:44:53 +02001586 /*
1587 * The child is not yet in the pid-hash so no cgroup attach races,
1588 * and the cgroup is pinned to this child due to cgroup_fork()
1589 * is ran before sched_fork().
1590 *
1591 * Silence PROVE_RCU.
1592 */
Peter Zijlstra0122ec52011-04-05 17:23:51 +02001593 raw_spin_lock_irqsave(&p->pi_lock, flags);
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02001594 set_task_cpu(p, cpu);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02001595 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Peter Zijlstra5f3edc12009-09-10 13:42:00 +02001596
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07001597#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Ingo Molnardd41f592007-07-09 18:51:59 +02001598 if (likely(sched_info_on()))
Chandra Seetharaman52f17b62006-07-14 00:24:38 -07001599 memset(&p->sched_info, 0, sizeof(p->sched_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600#endif
Peter Zijlstra3ca7a442011-04-05 17:23:40 +02001601#if defined(CONFIG_SMP)
1602 p->on_cpu = 0;
Nick Piggin4866cde2005-06-25 14:57:23 -07001603#endif
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +02001604#ifdef CONFIG_PREEMPT_COUNT
Nick Piggin4866cde2005-06-25 14:57:23 -07001605 /* Want to start with kernel preemption disabled. */
Al Viroa1261f52005-11-13 16:06:55 -08001606 task_thread_info(p)->preempt_count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607#endif
Dario Faggioli806c09a2010-11-30 19:51:33 +01001608#ifdef CONFIG_SMP
Gregory Haskins917b6272008-12-29 09:39:53 -05001609 plist_node_init(&p->pushable_tasks, MAX_PRIO);
Dario Faggioli806c09a2010-11-30 19:51:33 +01001610#endif
Gregory Haskins917b6272008-12-29 09:39:53 -05001611
Nick Piggin476d1392005-06-25 14:57:29 -07001612 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613}
1614
1615/*
1616 * wake_up_new_task - wake up a newly created task for the first time.
1617 *
1618 * This function will do some initial scheduler statistics housekeeping
1619 * that must be done for every newly created context, then puts the task
1620 * on the runqueue and wakes it.
1621 */
Samir Bellabes3e51e3e2011-05-11 18:18:05 +02001622void wake_up_new_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623{
1624 unsigned long flags;
Ingo Molnardd41f592007-07-09 18:51:59 +02001625 struct rq *rq;
Peter Zijlstrafabf3182010-01-21 21:04:57 +01001626
Peter Zijlstraab2515c2011-04-05 17:23:52 +02001627 raw_spin_lock_irqsave(&p->pi_lock, flags);
Peter Zijlstrafabf3182010-01-21 21:04:57 +01001628#ifdef CONFIG_SMP
1629 /*
1630 * Fork balancing, do it here and not earlier because:
1631 * - cpus_allowed can change in the fork path
1632 * - any previously selected cpu might disappear through hotplug
Peter Zijlstrafabf3182010-01-21 21:04:57 +01001633 */
Peter Zijlstraab2515c2011-04-05 17:23:52 +02001634 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
Peter Zijlstrafabf3182010-01-21 21:04:57 +01001635#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Peter Zijlstraab2515c2011-04-05 17:23:52 +02001637 rq = __task_rq_lock(p);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01001638 activate_task(rq, p, 0);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001639 p->on_rq = 1;
Peter Zijlstra89363382011-04-05 17:23:42 +02001640 trace_sched_wakeup_new(p, true);
Peter Zijlstraa7558e02009-09-14 20:02:34 +02001641 check_preempt_curr(rq, p, WF_FORK);
Steven Rostedt9a897c52008-01-25 21:08:22 +01001642#ifdef CONFIG_SMP
Peter Zijlstraefbbd052009-12-16 18:04:40 +01001643 if (p->sched_class->task_woken)
1644 p->sched_class->task_woken(rq, p);
Steven Rostedt9a897c52008-01-25 21:08:22 +01001645#endif
Peter Zijlstra0122ec52011-04-05 17:23:51 +02001646 task_rq_unlock(rq, p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647}
1648
Avi Kivitye107be32007-07-26 13:40:43 +02001649#ifdef CONFIG_PREEMPT_NOTIFIERS
1650
1651/**
Luis Henriques80dd99b2009-03-16 19:58:09 +00001652 * preempt_notifier_register - tell me when current is being preempted & rescheduled
Randy Dunlap421cee22007-07-31 00:37:50 -07001653 * @notifier: notifier struct to register
Avi Kivitye107be32007-07-26 13:40:43 +02001654 */
1655void preempt_notifier_register(struct preempt_notifier *notifier)
1656{
1657 hlist_add_head(&notifier->link, &current->preempt_notifiers);
1658}
1659EXPORT_SYMBOL_GPL(preempt_notifier_register);
1660
1661/**
1662 * preempt_notifier_unregister - no longer interested in preemption notifications
Randy Dunlap421cee22007-07-31 00:37:50 -07001663 * @notifier: notifier struct to unregister
Avi Kivitye107be32007-07-26 13:40:43 +02001664 *
1665 * This is safe to call from within a preemption notifier.
1666 */
1667void preempt_notifier_unregister(struct preempt_notifier *notifier)
1668{
1669 hlist_del(&notifier->link);
1670}
1671EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1672
1673static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1674{
1675 struct preempt_notifier *notifier;
1676 struct hlist_node *node;
1677
1678 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1679 notifier->ops->sched_in(notifier, raw_smp_processor_id());
1680}
1681
1682static void
1683fire_sched_out_preempt_notifiers(struct task_struct *curr,
1684 struct task_struct *next)
1685{
1686 struct preempt_notifier *notifier;
1687 struct hlist_node *node;
1688
1689 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1690 notifier->ops->sched_out(notifier, next);
1691}
1692
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001693#else /* !CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02001694
1695static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1696{
1697}
1698
1699static void
1700fire_sched_out_preempt_notifiers(struct task_struct *curr,
1701 struct task_struct *next)
1702{
1703}
1704
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02001705#endif /* CONFIG_PREEMPT_NOTIFIERS */
Avi Kivitye107be32007-07-26 13:40:43 +02001706
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707/**
Nick Piggin4866cde2005-06-25 14:57:23 -07001708 * prepare_task_switch - prepare to switch tasks
1709 * @rq: the runqueue preparing to switch
Randy Dunlap421cee22007-07-31 00:37:50 -07001710 * @prev: the current task that is being switched out
Nick Piggin4866cde2005-06-25 14:57:23 -07001711 * @next: the task we are going to switch to.
1712 *
1713 * This is called with the rq lock held and interrupts off. It must
1714 * be paired with a subsequent finish_task_switch after the context
1715 * switch.
1716 *
1717 * prepare_task_switch sets up locking and calls architecture specific
1718 * hooks.
1719 */
Avi Kivitye107be32007-07-26 13:40:43 +02001720static inline void
1721prepare_task_switch(struct rq *rq, struct task_struct *prev,
1722 struct task_struct *next)
Nick Piggin4866cde2005-06-25 14:57:23 -07001723{
Andrew Vagin895dd922012-07-12 14:14:29 +04001724 trace_sched_switch(prev, next);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001725 sched_info_switch(prev, next);
1726 perf_event_task_sched_out(prev, next);
Avi Kivitye107be32007-07-26 13:40:43 +02001727 fire_sched_out_preempt_notifiers(prev, next);
Nick Piggin4866cde2005-06-25 14:57:23 -07001728 prepare_lock_switch(rq, next);
1729 prepare_arch_switch(next);
1730}
1731
1732/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 * finish_task_switch - clean up after a task-switch
Jeff Garzik344baba2005-09-07 01:15:17 -04001734 * @rq: runqueue associated with task-switch
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 * @prev: the thread we just switched away from.
1736 *
Nick Piggin4866cde2005-06-25 14:57:23 -07001737 * finish_task_switch must be called after the context switch, paired
1738 * with a prepare_task_switch call before the context switch.
1739 * finish_task_switch will reconcile locking set up by prepare_task_switch,
1740 * and do any other architecture-specific cleanup actions.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 *
1742 * Note that we may have delayed dropping an mm in context_switch(). If
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01001743 * so, we finish that here outside of the runqueue lock. (Doing it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 * with the lock held can cause deadlocks; see schedule() for
1745 * details.)
1746 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02001747static void finish_task_switch(struct rq *rq, struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 __releases(rq->lock)
1749{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 struct mm_struct *mm = rq->prev_mm;
Oleg Nesterov55a101f2006-09-29 02:01:10 -07001751 long prev_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
1753 rq->prev_mm = NULL;
1754
1755 /*
1756 * A task struct has one reference for the use as "current".
Oleg Nesterovc394cc92006-09-29 02:01:11 -07001757 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
Oleg Nesterov55a101f2006-09-29 02:01:10 -07001758 * schedule one last time. The schedule call will never return, and
1759 * the scheduled task must drop that reference.
Oleg Nesterovc394cc92006-09-29 02:01:11 -07001760 * The test for TASK_DEAD must occur while the runqueue locks are
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 * still held, otherwise prev could be scheduled on another cpu, die
1762 * there before we look at prev->state, and then the reference would
1763 * be dropped twice.
1764 * Manfred Spraul <manfred@colorfullife.com>
1765 */
Oleg Nesterov55a101f2006-09-29 02:01:10 -07001766 prev_state = prev->state;
Frederic Weisbeckerbaa36042012-06-18 17:54:14 +02001767 account_switch_vtime(prev);
Nick Piggin4866cde2005-06-25 14:57:23 -07001768 finish_arch_switch(prev);
Stephane Eraniana8d757e2011-08-25 15:58:03 +02001769 perf_event_task_sched_in(prev, current);
Nick Piggin4866cde2005-06-25 14:57:23 -07001770 finish_lock_switch(rq, prev);
Catalin Marinas01f23e12011-11-27 21:43:10 +00001771 finish_arch_post_lock_switch();
Steven Rostedte8fa1362008-01-25 21:08:05 +01001772
Avi Kivitye107be32007-07-26 13:40:43 +02001773 fire_sched_in_preempt_notifiers(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 if (mm)
1775 mmdrop(mm);
Oleg Nesterovc394cc92006-09-29 02:01:11 -07001776 if (unlikely(prev_state == TASK_DEAD)) {
bibo maoc6fd91f2006-03-26 01:38:20 -08001777 /*
1778 * Remove function-return probe instances associated with this
1779 * task and put them back on the free list.
Ingo Molnar9761eea2007-07-09 18:52:00 +02001780 */
bibo maoc6fd91f2006-03-26 01:38:20 -08001781 kprobe_flush_task(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 put_task_struct(prev);
bibo maoc6fd91f2006-03-26 01:38:20 -08001783 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784}
1785
Gregory Haskins3f029d32009-07-29 11:08:47 -04001786#ifdef CONFIG_SMP
1787
1788/* assumes rq->lock is held */
1789static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
1790{
1791 if (prev->sched_class->pre_schedule)
1792 prev->sched_class->pre_schedule(rq, prev);
1793}
1794
1795/* rq->lock is NOT held, but preemption is disabled */
1796static inline void post_schedule(struct rq *rq)
1797{
1798 if (rq->post_schedule) {
1799 unsigned long flags;
1800
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001801 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04001802 if (rq->curr->sched_class->post_schedule)
1803 rq->curr->sched_class->post_schedule(rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001804 raw_spin_unlock_irqrestore(&rq->lock, flags);
Gregory Haskins3f029d32009-07-29 11:08:47 -04001805
1806 rq->post_schedule = 0;
1807 }
1808}
1809
1810#else
1811
1812static inline void pre_schedule(struct rq *rq, struct task_struct *p)
1813{
1814}
1815
1816static inline void post_schedule(struct rq *rq)
1817{
1818}
1819
1820#endif
1821
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822/**
1823 * schedule_tail - first thing a freshly forked thread must call.
1824 * @prev: the thread we just switched away from.
1825 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07001826asmlinkage void schedule_tail(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 __releases(rq->lock)
1828{
Ingo Molnar70b97a72006-07-03 00:25:42 -07001829 struct rq *rq = this_rq();
1830
Nick Piggin4866cde2005-06-25 14:57:23 -07001831 finish_task_switch(rq, prev);
Steven Rostedtda19ab52009-07-29 00:21:22 -04001832
Gregory Haskins3f029d32009-07-29 11:08:47 -04001833 /*
1834 * FIXME: do we need to worry about rq being invalidated by the
1835 * task_switch?
1836 */
1837 post_schedule(rq);
Steven Rostedtda19ab52009-07-29 00:21:22 -04001838
Nick Piggin4866cde2005-06-25 14:57:23 -07001839#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1840 /* In this case, finish_task_switch does not reenable preemption */
1841 preempt_enable();
1842#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 if (current->set_child_tid)
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001844 put_user(task_pid_vnr(current), current->set_child_tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845}
1846
1847/*
1848 * context_switch - switch to the new MM and the new
1849 * thread's register state.
1850 */
Ingo Molnardd41f592007-07-09 18:51:59 +02001851static inline void
Ingo Molnar70b97a72006-07-03 00:25:42 -07001852context_switch(struct rq *rq, struct task_struct *prev,
Ingo Molnar36c8b582006-07-03 00:25:41 -07001853 struct task_struct *next)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854{
Ingo Molnardd41f592007-07-09 18:51:59 +02001855 struct mm_struct *mm, *oldmm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
Avi Kivitye107be32007-07-26 13:40:43 +02001857 prepare_task_switch(rq, prev, next);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001858
Ingo Molnardd41f592007-07-09 18:51:59 +02001859 mm = next->mm;
1860 oldmm = prev->active_mm;
Zachary Amsden9226d122007-02-13 13:26:21 +01001861 /*
1862 * For paravirt, this is coupled with an exit in switch_to to
1863 * combine the page table reload and the switch backend into
1864 * one hypercall.
1865 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -08001866 arch_start_context_switch(prev);
Zachary Amsden9226d122007-02-13 13:26:21 +01001867
Heiko Carstens31915ab2010-09-16 14:42:25 +02001868 if (!mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 next->active_mm = oldmm;
1870 atomic_inc(&oldmm->mm_count);
1871 enter_lazy_tlb(oldmm, next);
1872 } else
1873 switch_mm(oldmm, mm, next);
1874
Heiko Carstens31915ab2010-09-16 14:42:25 +02001875 if (!prev->mm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 prev->active_mm = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 rq->prev_mm = oldmm;
1878 }
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07001879 /*
1880 * Since the runqueue lock will be released by the next
1881 * task (which is an invalid locking op but in the case
1882 * of the scheduler it's an obvious special-case), so we
1883 * do an early lockdep release here:
1884 */
1885#ifndef __ARCH_WANT_UNLOCKED_CTXSW
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07001886 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Ingo Molnar3a5f5e42006-07-14 00:24:27 -07001887#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
1889 /* Here we just switch the register state and the stack. */
1890 switch_to(prev, next, prev);
1891
Ingo Molnardd41f592007-07-09 18:51:59 +02001892 barrier();
1893 /*
1894 * this_rq must be evaluated again because prev may have moved
1895 * CPUs since it called schedule(), thus the 'rq' on its stack
1896 * frame will be invalid.
1897 */
1898 finish_task_switch(this_rq(), prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899}
1900
1901/*
1902 * nr_running, nr_uninterruptible and nr_context_switches:
1903 *
1904 * externally visible scheduler statistics: current number of runnable
1905 * threads, current number of uninterruptible-sleeping threads, total
1906 * number of context switches performed since bootup.
1907 */
1908unsigned long nr_running(void)
1909{
1910 unsigned long i, sum = 0;
1911
1912 for_each_online_cpu(i)
1913 sum += cpu_rq(i)->nr_running;
1914
1915 return sum;
1916}
1917
1918unsigned long nr_uninterruptible(void)
1919{
1920 unsigned long i, sum = 0;
1921
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08001922 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 sum += cpu_rq(i)->nr_uninterruptible;
1924
1925 /*
1926 * Since we read the counters lockless, it might be slightly
1927 * inaccurate. Do not allow it to go below zero though:
1928 */
1929 if (unlikely((long)sum < 0))
1930 sum = 0;
1931
1932 return sum;
1933}
1934
1935unsigned long long nr_context_switches(void)
1936{
Steven Rostedtcc94abf2006-06-27 02:54:31 -07001937 int i;
1938 unsigned long long sum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08001940 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 sum += cpu_rq(i)->nr_switches;
1942
1943 return sum;
1944}
1945
1946unsigned long nr_iowait(void)
1947{
1948 unsigned long i, sum = 0;
1949
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08001950 for_each_possible_cpu(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 sum += atomic_read(&cpu_rq(i)->nr_iowait);
1952
1953 return sum;
1954}
1955
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02001956unsigned long nr_iowait_cpu(int cpu)
Arjan van de Ven69d25872009-09-21 17:04:08 -07001957{
Peter Zijlstra8c215bd2010-07-01 09:07:17 +02001958 struct rq *this = cpu_rq(cpu);
Arjan van de Ven69d25872009-09-21 17:04:08 -07001959 return atomic_read(&this->nr_iowait);
1960}
1961
1962unsigned long this_cpu_load(void)
1963{
1964 struct rq *this = this_rq();
1965 return this->cpu_load[0];
1966}
1967
1968
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02001969/*
1970 * Global load-average calculations
1971 *
1972 * We take a distributed and async approach to calculating the global load-avg
1973 * in order to minimize overhead.
1974 *
1975 * The global load average is an exponentially decaying average of nr_running +
1976 * nr_uninterruptible.
1977 *
1978 * Once every LOAD_FREQ:
1979 *
1980 * nr_active = 0;
1981 * for_each_possible_cpu(cpu)
1982 * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
1983 *
1984 * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
1985 *
1986 * Due to a number of reasons the above turns in the mess below:
1987 *
1988 * - for_each_possible_cpu() is prohibitively expensive on machines with
1989 * serious number of cpus, therefore we need to take a distributed approach
1990 * to calculating nr_active.
1991 *
1992 * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
1993 * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
1994 *
1995 * So assuming nr_active := 0 when we start out -- true per definition, we
1996 * can simply take per-cpu deltas and fold those into a global accumulate
1997 * to obtain the same result. See calc_load_fold_active().
1998 *
1999 * Furthermore, in order to avoid synchronizing all per-cpu delta folding
2000 * across the machine, we assume 10 ticks is sufficient time for every
2001 * cpu to have completed this task.
2002 *
2003 * This places an upper-bound on the IRQ-off latency of the machine. Then
2004 * again, being late doesn't loose the delta, just wrecks the sample.
2005 *
2006 * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
2007 * this would add another cross-cpu cacheline miss and atomic operation
2008 * to the wakeup path. Instead we increment on whatever cpu the task ran
2009 * when it went into uninterruptible state and decrement on whatever cpu
2010 * did the wakeup. This means that only the sum of nr_uninterruptible over
2011 * all cpus yields the correct result.
2012 *
2013 * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
2014 */
2015
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002016/* Variables and functions for calc_load */
2017static atomic_long_t calc_load_tasks;
2018static unsigned long calc_load_update;
2019unsigned long avenrun[3];
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002020EXPORT_SYMBOL(avenrun); /* should be removed */
2021
2022/**
2023 * get_avenrun - get the load average array
2024 * @loads: pointer to dest load array
2025 * @offset: offset to add
2026 * @shift: shift count to shift the result left
2027 *
2028 * These values are estimates at best, so no need for locking.
2029 */
2030void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2031{
2032 loads[0] = (avenrun[0] + offset) << shift;
2033 loads[1] = (avenrun[1] + offset) << shift;
2034 loads[2] = (avenrun[2] + offset) << shift;
2035}
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002036
Peter Zijlstra74f51872010-04-22 21:50:19 +02002037static long calc_load_fold_active(struct rq *this_rq)
2038{
2039 long nr_active, delta = 0;
2040
2041 nr_active = this_rq->nr_running;
2042 nr_active += (long) this_rq->nr_uninterruptible;
2043
2044 if (nr_active != this_rq->calc_load_active) {
2045 delta = nr_active - this_rq->calc_load_active;
2046 this_rq->calc_load_active = nr_active;
2047 }
2048
2049 return delta;
2050}
2051
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002052/*
2053 * a1 = a0 * e + a * (1 - e)
2054 */
Peter Zijlstra0f004f52010-11-30 19:48:45 +01002055static unsigned long
2056calc_load(unsigned long load, unsigned long exp, unsigned long active)
2057{
2058 load *= exp;
2059 load += active * (FIXED_1 - exp);
2060 load += 1UL << (FSHIFT - 1);
2061 return load >> FSHIFT;
2062}
2063
Peter Zijlstra74f51872010-04-22 21:50:19 +02002064#ifdef CONFIG_NO_HZ
2065/*
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002066 * Handle NO_HZ for the global load-average.
2067 *
2068 * Since the above described distributed algorithm to compute the global
2069 * load-average relies on per-cpu sampling from the tick, it is affected by
2070 * NO_HZ.
2071 *
2072 * The basic idea is to fold the nr_active delta into a global idle-delta upon
2073 * entering NO_HZ state such that we can include this as an 'extra' cpu delta
2074 * when we read the global state.
2075 *
2076 * Obviously reality has to ruin such a delightfully simple scheme:
2077 *
2078 * - When we go NO_HZ idle during the window, we can negate our sample
2079 * contribution, causing under-accounting.
2080 *
2081 * We avoid this by keeping two idle-delta counters and flipping them
2082 * when the window starts, thus separating old and new NO_HZ load.
2083 *
2084 * The only trick is the slight shift in index flip for read vs write.
2085 *
2086 * 0s 5s 10s 15s
2087 * +10 +10 +10 +10
2088 * |-|-----------|-|-----------|-|-----------|-|
2089 * r:0 0 1 1 0 0 1 1 0
2090 * w:0 1 1 0 0 1 1 0 0
2091 *
2092 * This ensures we'll fold the old idle contribution in this window while
2093 * accumlating the new one.
2094 *
2095 * - When we wake up from NO_HZ idle during the window, we push up our
2096 * contribution, since we effectively move our sample point to a known
2097 * busy state.
2098 *
2099 * This is solved by pushing the window forward, and thus skipping the
2100 * sample, for this cpu (effectively using the idle-delta for this cpu which
2101 * was in effect at the time the window opened). This also solves the issue
2102 * of having to deal with a cpu having been in NOHZ idle for multiple
2103 * LOAD_FREQ intervals.
Peter Zijlstra74f51872010-04-22 21:50:19 +02002104 *
2105 * When making the ILB scale, we should try to pull this in as well.
2106 */
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002107static atomic_long_t calc_load_idle[2];
2108static int calc_load_idx;
Peter Zijlstra74f51872010-04-22 21:50:19 +02002109
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002110static inline int calc_load_write_idx(void)
Peter Zijlstra74f51872010-04-22 21:50:19 +02002111{
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002112 int idx = calc_load_idx;
2113
2114 /*
2115 * See calc_global_nohz(), if we observe the new index, we also
2116 * need to observe the new update time.
2117 */
2118 smp_rmb();
2119
2120 /*
2121 * If the folding window started, make sure we start writing in the
2122 * next idle-delta.
2123 */
2124 if (!time_before(jiffies, calc_load_update))
2125 idx++;
2126
2127 return idx & 1;
2128}
2129
2130static inline int calc_load_read_idx(void)
2131{
2132 return calc_load_idx & 1;
2133}
2134
2135void calc_load_enter_idle(void)
2136{
2137 struct rq *this_rq = this_rq();
Peter Zijlstra74f51872010-04-22 21:50:19 +02002138 long delta;
2139
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002140 /*
2141 * We're going into NOHZ mode, if there's any pending delta, fold it
2142 * into the pending idle delta.
2143 */
Peter Zijlstra74f51872010-04-22 21:50:19 +02002144 delta = calc_load_fold_active(this_rq);
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002145 if (delta) {
2146 int idx = calc_load_write_idx();
2147 atomic_long_add(delta, &calc_load_idle[idx]);
2148 }
2149}
2150
2151void calc_load_exit_idle(void)
2152{
2153 struct rq *this_rq = this_rq();
2154
2155 /*
2156 * If we're still before the sample window, we're done.
2157 */
2158 if (time_before(jiffies, this_rq->calc_load_update))
2159 return;
2160
2161 /*
2162 * We woke inside or after the sample window, this means we're already
2163 * accounted through the nohz accounting, so skip the entire deal and
2164 * sync up for the next window.
2165 */
2166 this_rq->calc_load_update = calc_load_update;
2167 if (time_before(jiffies, this_rq->calc_load_update + 10))
2168 this_rq->calc_load_update += LOAD_FREQ;
Peter Zijlstra74f51872010-04-22 21:50:19 +02002169}
2170
2171static long calc_load_fold_idle(void)
2172{
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002173 int idx = calc_load_read_idx();
Peter Zijlstra74f51872010-04-22 21:50:19 +02002174 long delta = 0;
2175
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002176 if (atomic_long_read(&calc_load_idle[idx]))
2177 delta = atomic_long_xchg(&calc_load_idle[idx], 0);
Peter Zijlstra74f51872010-04-22 21:50:19 +02002178
2179 return delta;
2180}
Peter Zijlstra0f004f52010-11-30 19:48:45 +01002181
2182/**
2183 * fixed_power_int - compute: x^n, in O(log n) time
2184 *
2185 * @x: base of the power
2186 * @frac_bits: fractional bits of @x
2187 * @n: power to raise @x to.
2188 *
2189 * By exploiting the relation between the definition of the natural power
2190 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
2191 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
2192 * (where: n_i \elem {0, 1}, the binary vector representing n),
2193 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
2194 * of course trivially computable in O(log_2 n), the length of our binary
2195 * vector.
2196 */
2197static unsigned long
2198fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
2199{
2200 unsigned long result = 1UL << frac_bits;
2201
2202 if (n) for (;;) {
2203 if (n & 1) {
2204 result *= x;
2205 result += 1UL << (frac_bits - 1);
2206 result >>= frac_bits;
2207 }
2208 n >>= 1;
2209 if (!n)
2210 break;
2211 x *= x;
2212 x += 1UL << (frac_bits - 1);
2213 x >>= frac_bits;
2214 }
2215
2216 return result;
2217}
2218
2219/*
2220 * a1 = a0 * e + a * (1 - e)
2221 *
2222 * a2 = a1 * e + a * (1 - e)
2223 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
2224 * = a0 * e^2 + a * (1 - e) * (1 + e)
2225 *
2226 * a3 = a2 * e + a * (1 - e)
2227 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
2228 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
2229 *
2230 * ...
2231 *
2232 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
2233 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
2234 * = a0 * e^n + a * (1 - e^n)
2235 *
2236 * [1] application of the geometric series:
2237 *
2238 * n 1 - x^(n+1)
2239 * S_n := \Sum x^i = -------------
2240 * i=0 1 - x
2241 */
2242static unsigned long
2243calc_load_n(unsigned long load, unsigned long exp,
2244 unsigned long active, unsigned int n)
2245{
2246
2247 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
2248}
2249
2250/*
2251 * NO_HZ can leave us missing all per-cpu ticks calling
2252 * calc_load_account_active(), but since an idle CPU folds its delta into
2253 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
2254 * in the pending idle delta if our idle period crossed a load cycle boundary.
2255 *
2256 * Once we've updated the global active value, we need to apply the exponential
2257 * weights adjusted to the number of cycles missed.
2258 */
Peter Zijlstrac308b562012-03-01 15:04:46 +01002259static void calc_global_nohz(void)
Peter Zijlstra0f004f52010-11-30 19:48:45 +01002260{
2261 long delta, active, n;
2262
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002263 if (!time_before(jiffies, calc_load_update + 10)) {
2264 /*
2265 * Catch-up, fold however many we are behind still
2266 */
2267 delta = jiffies - calc_load_update - 10;
2268 n = 1 + (delta / LOAD_FREQ);
2269
2270 active = atomic_long_read(&calc_load_tasks);
2271 active = active > 0 ? active * FIXED_1 : 0;
2272
2273 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
2274 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
2275 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
2276
2277 calc_load_update += n * LOAD_FREQ;
2278 }
Peter Zijlstra0f004f52010-11-30 19:48:45 +01002279
2280 /*
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002281 * Flip the idle index...
2282 *
2283 * Make sure we first write the new time then flip the index, so that
2284 * calc_load_write_idx() will see the new time when it reads the new
2285 * index, this avoids a double flip messing things up.
Peter Zijlstra0f004f52010-11-30 19:48:45 +01002286 */
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002287 smp_wmb();
2288 calc_load_idx++;
Peter Zijlstra0f004f52010-11-30 19:48:45 +01002289}
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002290#else /* !CONFIG_NO_HZ */
Peter Zijlstra74f51872010-04-22 21:50:19 +02002291
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002292static inline long calc_load_fold_idle(void) { return 0; }
2293static inline void calc_global_nohz(void) { }
Peter Zijlstra0f004f52010-11-30 19:48:45 +01002294
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002295#endif /* CONFIG_NO_HZ */
Thomas Gleixner2d024942009-05-02 20:08:52 +02002296
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002297/*
2298 * calc_load - update the avenrun load estimates 10 ticks after the
2299 * CPUs have updated calc_load_tasks.
2300 */
Peter Zijlstra0f004f52010-11-30 19:48:45 +01002301void calc_global_load(unsigned long ticks)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002302{
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002303 long active, delta;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002304
Peter Zijlstra0f004f52010-11-30 19:48:45 +01002305 if (time_before(jiffies, calc_load_update + 10))
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002306 return;
2307
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002308 /*
2309 * Fold the 'old' idle-delta to include all NO_HZ cpus.
2310 */
2311 delta = calc_load_fold_idle();
2312 if (delta)
2313 atomic_long_add(delta, &calc_load_tasks);
2314
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002315 active = atomic_long_read(&calc_load_tasks);
2316 active = active > 0 ? active * FIXED_1 : 0;
2317
2318 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
2319 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
2320 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
2321
2322 calc_load_update += LOAD_FREQ;
Peter Zijlstrac308b562012-03-01 15:04:46 +01002323
2324 /*
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002325 * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
Peter Zijlstrac308b562012-03-01 15:04:46 +01002326 */
2327 calc_global_nohz();
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002328}
2329
2330/*
Peter Zijlstra74f51872010-04-22 21:50:19 +02002331 * Called from update_cpu_load() to periodically update this CPU's
2332 * active count.
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002333 */
2334static void calc_load_account_active(struct rq *this_rq)
2335{
Peter Zijlstra74f51872010-04-22 21:50:19 +02002336 long delta;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002337
Peter Zijlstra74f51872010-04-22 21:50:19 +02002338 if (time_before(jiffies, this_rq->calc_load_update))
2339 return;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002340
Peter Zijlstra74f51872010-04-22 21:50:19 +02002341 delta = calc_load_fold_active(this_rq);
Peter Zijlstra74f51872010-04-22 21:50:19 +02002342 if (delta)
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002343 atomic_long_add(delta, &calc_load_tasks);
Peter Zijlstra74f51872010-04-22 21:50:19 +02002344
2345 this_rq->calc_load_update += LOAD_FREQ;
Jack Steinerdb1b1fe2006-03-31 02:31:21 -08002346}
2347
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348/*
Peter Zijlstra5167e8d2012-06-22 15:52:09 +02002349 * End of global load-average stuff
2350 */
2351
2352/*
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07002353 * The exact cpuload at various idx values, calculated at every tick would be
2354 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
2355 *
2356 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
2357 * on nth tick when cpu may be busy, then we have:
2358 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2359 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
2360 *
2361 * decay_load_missed() below does efficient calculation of
2362 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2363 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
2364 *
2365 * The calculation is approximated on a 128 point scale.
2366 * degrade_zero_ticks is the number of ticks after which load at any
2367 * particular idx is approximated to be zero.
2368 * degrade_factor is a precomputed table, a row for each load idx.
2369 * Each column corresponds to degradation factor for a power of two ticks,
2370 * based on 128 point scale.
2371 * Example:
2372 * row 2, col 3 (=12) says that the degradation at load idx 2 after
2373 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
2374 *
2375 * With this power of 2 load factors, we can degrade the load n times
2376 * by looking at 1 bits in n and doing as many mult/shift instead of
2377 * n mult/shifts needed by the exact degradation.
2378 */
2379#define DEGRADE_SHIFT 7
2380static const unsigned char
2381 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
2382static const unsigned char
2383 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
2384 {0, 0, 0, 0, 0, 0, 0, 0},
2385 {64, 32, 8, 0, 0, 0, 0, 0},
2386 {96, 72, 40, 12, 1, 0, 0},
2387 {112, 98, 75, 43, 15, 1, 0},
2388 {120, 112, 98, 76, 45, 16, 2} };
2389
2390/*
2391 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
2392 * would be when CPU is idle and so we just decay the old load without
2393 * adding any new load.
2394 */
2395static unsigned long
2396decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
2397{
2398 int j = 0;
2399
2400 if (!missed_updates)
2401 return load;
2402
2403 if (missed_updates >= degrade_zero_ticks[idx])
2404 return 0;
2405
2406 if (idx == 1)
2407 return load >> missed_updates;
2408
2409 while (missed_updates) {
2410 if (missed_updates % 2)
2411 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
2412
2413 missed_updates >>= 1;
2414 j++;
2415 }
2416 return load;
2417}
2418
2419/*
Ingo Molnardd41f592007-07-09 18:51:59 +02002420 * Update rq->cpu_load[] statistics. This function is usually called every
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07002421 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
2422 * every tick. We fix it up based on jiffies.
Ingo Molnar48f24c42006-07-03 00:25:40 -07002423 */
Peter Zijlstra556061b2012-05-11 17:31:26 +02002424static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
2425 unsigned long pending_updates)
Ingo Molnar48f24c42006-07-03 00:25:40 -07002426{
Ingo Molnardd41f592007-07-09 18:51:59 +02002427 int i, scale;
2428
2429 this_rq->nr_load_updates++;
Ingo Molnardd41f592007-07-09 18:51:59 +02002430
2431 /* Update our load: */
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07002432 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
2433 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
Ingo Molnardd41f592007-07-09 18:51:59 +02002434 unsigned long old_load, new_load;
2435
2436 /* scale is effectively 1 << i now, and >> i divides by scale */
2437
2438 old_load = this_rq->cpu_load[i];
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07002439 old_load = decay_load_missed(old_load, pending_updates - 1, i);
Ingo Molnardd41f592007-07-09 18:51:59 +02002440 new_load = this_load;
Ingo Molnara25707f2007-10-15 17:00:03 +02002441 /*
2442 * Round up the averaging division if load is increasing. This
2443 * prevents us from getting stuck on 9 if the load is 10, for
2444 * example.
2445 */
2446 if (new_load > old_load)
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07002447 new_load += scale - 1;
2448
2449 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
Ingo Molnardd41f592007-07-09 18:51:59 +02002450 }
Suresh Siddhada2b71e2010-08-23 13:42:51 -07002451
2452 sched_avg_update(this_rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07002453}
2454
Peter Zijlstra5aaa0b72012-05-17 17:15:29 +02002455#ifdef CONFIG_NO_HZ
2456/*
2457 * There is no sane way to deal with nohz on smp when using jiffies because the
2458 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
2459 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
2460 *
2461 * Therefore we cannot use the delta approach from the regular tick since that
2462 * would seriously skew the load calculation. However we'll make do for those
2463 * updates happening while idle (nohz_idle_balance) or coming out of idle
2464 * (tick_nohz_idle_exit).
2465 *
2466 * This means we might still be one tick off for nohz periods.
2467 */
2468
Peter Zijlstra556061b2012-05-11 17:31:26 +02002469/*
2470 * Called from nohz_idle_balance() to update the load ratings before doing the
2471 * idle balance.
2472 */
2473void update_idle_cpu_load(struct rq *this_rq)
2474{
Peter Zijlstra5aaa0b72012-05-17 17:15:29 +02002475 unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
Peter Zijlstra556061b2012-05-11 17:31:26 +02002476 unsigned long load = this_rq->load.weight;
2477 unsigned long pending_updates;
2478
2479 /*
Peter Zijlstra5aaa0b72012-05-17 17:15:29 +02002480 * bail if there's load or we're actually up-to-date.
Peter Zijlstra556061b2012-05-11 17:31:26 +02002481 */
2482 if (load || curr_jiffies == this_rq->last_load_update_tick)
2483 return;
2484
2485 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
2486 this_rq->last_load_update_tick = curr_jiffies;
2487
2488 __update_cpu_load(this_rq, load, pending_updates);
2489}
2490
2491/*
Peter Zijlstra5aaa0b72012-05-17 17:15:29 +02002492 * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
2493 */
2494void update_cpu_load_nohz(void)
2495{
2496 struct rq *this_rq = this_rq();
2497 unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
2498 unsigned long pending_updates;
2499
2500 if (curr_jiffies == this_rq->last_load_update_tick)
2501 return;
2502
2503 raw_spin_lock(&this_rq->lock);
2504 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
2505 if (pending_updates) {
2506 this_rq->last_load_update_tick = curr_jiffies;
2507 /*
2508 * We were idle, this means load 0, the current load might be
2509 * !0 due to remote wakeups and the sort.
2510 */
2511 __update_cpu_load(this_rq, 0, pending_updates);
2512 }
2513 raw_spin_unlock(&this_rq->lock);
2514}
2515#endif /* CONFIG_NO_HZ */
2516
2517/*
Peter Zijlstra556061b2012-05-11 17:31:26 +02002518 * Called from scheduler_tick()
2519 */
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07002520static void update_cpu_load_active(struct rq *this_rq)
2521{
Peter Zijlstra556061b2012-05-11 17:31:26 +02002522 /*
Peter Zijlstra5aaa0b72012-05-17 17:15:29 +02002523 * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
Peter Zijlstra556061b2012-05-11 17:31:26 +02002524 */
2525 this_rq->last_load_update_tick = jiffies;
2526 __update_cpu_load(this_rq, this_rq->load.weight, 1);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02002527
Peter Zijlstra74f51872010-04-22 21:50:19 +02002528 calc_load_account_active(this_rq);
Ingo Molnar48f24c42006-07-03 00:25:40 -07002529}
2530
Ingo Molnardd41f592007-07-09 18:51:59 +02002531#ifdef CONFIG_SMP
2532
Ingo Molnar48f24c42006-07-03 00:25:40 -07002533/*
Peter Zijlstra38022902009-12-16 18:04:37 +01002534 * sched_exec - execve() is a valuable balancing opportunity, because at
2535 * this point the task has the smallest effective memory and cache footprint.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 */
Peter Zijlstra38022902009-12-16 18:04:37 +01002537void sched_exec(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538{
Peter Zijlstra38022902009-12-16 18:04:37 +01002539 struct task_struct *p = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 unsigned long flags;
Peter Zijlstra0017d732010-03-24 18:34:10 +01002541 int dest_cpu;
Peter Zijlstra38022902009-12-16 18:04:37 +01002542
Peter Zijlstra8f42ced2011-04-05 17:23:53 +02002543 raw_spin_lock_irqsave(&p->pi_lock, flags);
Peter Zijlstra7608dec2011-04-05 17:23:46 +02002544 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
Peter Zijlstra0017d732010-03-24 18:34:10 +01002545 if (dest_cpu == smp_processor_id())
2546 goto unlock;
Peter Zijlstra38022902009-12-16 18:04:37 +01002547
Peter Zijlstra8f42ced2011-04-05 17:23:53 +02002548 if (likely(cpu_active(dest_cpu))) {
Tejun Heo969c7922010-05-06 18:49:21 +02002549 struct migration_arg arg = { p, dest_cpu };
Ingo Molnar36c8b582006-07-03 00:25:41 -07002550
Peter Zijlstra8f42ced2011-04-05 17:23:53 +02002551 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2552 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 return;
2554 }
Peter Zijlstra0017d732010-03-24 18:34:10 +01002555unlock:
Peter Zijlstra8f42ced2011-04-05 17:23:53 +02002556 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557}
2558
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559#endif
2560
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561DEFINE_PER_CPU(struct kernel_stat, kstat);
Glauber Costa3292beb2011-11-28 14:45:17 -02002562DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563
2564EXPORT_PER_CPU_SYMBOL(kstat);
Glauber Costa3292beb2011-11-28 14:45:17 -02002565EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566
2567/*
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09002568 * Return any ns on the sched_clock that have not yet been accounted in
Frank Mayharf06febc2008-09-12 09:54:39 -07002569 * @p in case that task is currently running.
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09002570 *
2571 * Called with task_rq_lock() held on @rq.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 */
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09002573static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2574{
2575 u64 ns = 0;
2576
2577 if (task_current(rq, p)) {
2578 update_rq_clock(rq);
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07002579 ns = rq->clock_task - p->se.exec_start;
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09002580 if ((s64)ns < 0)
2581 ns = 0;
2582 }
2583
2584 return ns;
2585}
2586
Frank Mayharbb34d922008-09-12 09:54:39 -07002587unsigned long long task_delta_exec(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 unsigned long flags;
Ingo Molnar41b86e92007-07-09 18:51:58 +02002590 struct rq *rq;
Frank Mayharbb34d922008-09-12 09:54:39 -07002591 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07002592
Ingo Molnar41b86e92007-07-09 18:51:58 +02002593 rq = task_rq_lock(p, &flags);
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09002594 ns = do_task_delta_exec(p, rq);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002595 task_rq_unlock(rq, p, &flags);
Ingo Molnar15084872008-09-30 08:28:17 +02002596
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09002597 return ns;
2598}
Frank Mayharf06febc2008-09-12 09:54:39 -07002599
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09002600/*
2601 * Return accounted runtime for the task.
2602 * In case the task is currently running, return the runtime plus current's
2603 * pending runtime that have not been accounted yet.
2604 */
2605unsigned long long task_sched_runtime(struct task_struct *p)
2606{
2607 unsigned long flags;
2608 struct rq *rq;
2609 u64 ns = 0;
Ingo Molnar48f24c42006-07-03 00:25:40 -07002610
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09002611 rq = task_rq_lock(p, &flags);
2612 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02002613 task_rq_unlock(rq, p, &flags);
Hidetoshi Setoc5f8d992009-03-31 16:56:03 +09002614
2615 return ns;
2616}
2617
Balbir Singh49048622008-09-05 18:12:23 +02002618/*
Christoph Lameter7835b982006-12-10 02:20:22 -08002619 * This function gets called by the timer code, with HZ frequency.
2620 * We call it with interrupts disabled.
Christoph Lameter7835b982006-12-10 02:20:22 -08002621 */
2622void scheduler_tick(void)
2623{
Christoph Lameter7835b982006-12-10 02:20:22 -08002624 int cpu = smp_processor_id();
2625 struct rq *rq = cpu_rq(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02002626 struct task_struct *curr = rq->curr;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02002627
2628 sched_clock_tick();
Christoph Lameter7835b982006-12-10 02:20:22 -08002629
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002630 raw_spin_lock(&rq->lock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +02002631 update_rq_clock(rq);
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07002632 update_cpu_load_active(rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01002633 curr->sched_class->task_tick(rq, curr, 0);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002634 raw_spin_unlock(&rq->lock);
Ingo Molnardd41f592007-07-09 18:51:59 +02002635
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02002636 perf_event_task_tick();
Peter Zijlstrae220d2d2009-05-23 18:28:55 +02002637
Christoph Lametere418e1c2006-12-10 02:20:23 -08002638#ifdef CONFIG_SMP
Suresh Siddha6eb57e02011-10-03 15:09:01 -07002639 rq->idle_balance = idle_cpu(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02002640 trigger_load_balance(rq, cpu);
Christoph Lametere418e1c2006-12-10 02:20:23 -08002641#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642}
2643
Lai Jiangshan132380a2009-04-02 14:18:25 +08002644notrace unsigned long get_parent_ip(unsigned long addr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02002645{
2646 if (in_lock_functions(addr)) {
2647 addr = CALLER_ADDR2;
2648 if (in_lock_functions(addr))
2649 addr = CALLER_ADDR3;
2650 }
2651 return addr;
2652}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653
Steven Rostedt7e49fcc2009-01-22 19:01:40 -05002654#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2655 defined(CONFIG_PREEMPT_TRACER))
2656
Srinivasa Ds43627582008-02-23 15:24:04 -08002657void __kprobes add_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02002659#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 /*
2661 * Underflow?
2662 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07002663 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2664 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02002665#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 preempt_count() += val;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02002667#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 /*
2669 * Spinlock count overflowing soon?
2670 */
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08002671 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2672 PREEMPT_MASK - 10);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02002673#endif
2674 if (preempt_count() == val)
2675 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676}
2677EXPORT_SYMBOL(add_preempt_count);
2678
Srinivasa Ds43627582008-02-23 15:24:04 -08002679void __kprobes sub_preempt_count(int val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02002681#ifdef CONFIG_DEBUG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 /*
2683 * Underflow?
2684 */
Ingo Molnar01e3eb82009-01-12 13:00:50 +01002685 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07002686 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 /*
2688 * Is the spinlock portion underflowing?
2689 */
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07002690 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
2691 !(preempt_count() & PREEMPT_MASK)))
2692 return;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02002693#endif
Ingo Molnar9a11b49a2006-07-03 00:24:33 -07002694
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +02002695 if (preempt_count() == val)
2696 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 preempt_count() -= val;
2698}
2699EXPORT_SYMBOL(sub_preempt_count);
2700
2701#endif
2702
2703/*
Ingo Molnardd41f592007-07-09 18:51:59 +02002704 * Print scheduling while atomic bug:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 */
Ingo Molnardd41f592007-07-09 18:51:59 +02002706static noinline void __schedule_bug(struct task_struct *prev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707{
Dave Jones664dfa62011-12-22 16:39:30 -05002708 if (oops_in_progress)
2709 return;
2710
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01002711 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
2712 prev->comm, prev->pid, preempt_count());
Satyam Sharma838225b2007-10-24 18:23:50 +02002713
Ingo Molnardd41f592007-07-09 18:51:59 +02002714 debug_show_held_locks(prev);
Arjan van de Vene21f5b12008-05-23 09:05:58 -07002715 print_modules();
Ingo Molnardd41f592007-07-09 18:51:59 +02002716 if (irqs_disabled())
2717 print_irqtrace_events(prev);
Stephen Boyd6135fc12012-03-28 17:10:47 -07002718 dump_stack();
Konstantin Khlebnikov1c2927f2012-05-10 16:20:04 +04002719 add_taint(TAINT_WARN);
Ingo Molnardd41f592007-07-09 18:51:59 +02002720}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721
Ingo Molnardd41f592007-07-09 18:51:59 +02002722/*
2723 * Various schedule()-time debugging checks and statistics:
2724 */
2725static inline void schedule_debug(struct task_struct *prev)
2726{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 /*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01002728 * Test if we are atomic. Since do_exit() needs to call into
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 * schedule() atomically, we ignore that path for now.
2730 * Otherwise, whine if we are scheduling when we should not be.
2731 */
Roel Kluin3f33a7c2008-05-13 23:44:11 +02002732 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
Ingo Molnardd41f592007-07-09 18:51:59 +02002733 __schedule_bug(prev);
Paul E. McKenneyb3fbab02011-05-24 08:31:09 -07002734 rcu_sleep_check();
Ingo Molnardd41f592007-07-09 18:51:59 +02002735
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
2737
Ingo Molnar2d723762007-10-15 17:00:12 +02002738 schedstat_inc(this_rq(), sched_count);
Ingo Molnardd41f592007-07-09 18:51:59 +02002739}
2740
Peter Zijlstra6cecd082009-11-30 13:00:37 +01002741static void put_prev_task(struct rq *rq, struct task_struct *prev)
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01002742{
Mike Galbraith61eadef2011-04-29 08:36:50 +02002743 if (prev->on_rq || rq->skip_clock_update < 0)
Mike Galbraitha64692a2010-03-11 17:16:20 +01002744 update_rq_clock(rq);
Peter Zijlstra6cecd082009-11-30 13:00:37 +01002745 prev->sched_class->put_prev_task(rq, prev);
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01002746}
2747
Ingo Molnardd41f592007-07-09 18:51:59 +02002748/*
2749 * Pick up the highest-prio task:
2750 */
2751static inline struct task_struct *
Wang Chenb67802e2009-03-02 13:55:26 +08002752pick_next_task(struct rq *rq)
Ingo Molnardd41f592007-07-09 18:51:59 +02002753{
Ingo Molnar5522d5d2007-10-15 17:00:12 +02002754 const struct sched_class *class;
Ingo Molnardd41f592007-07-09 18:51:59 +02002755 struct task_struct *p;
2756
2757 /*
2758 * Optimization: we know that if all tasks are in
2759 * the fair class we can call that function directly:
2760 */
Paul Turner953bfcd2011-07-21 09:43:27 -07002761 if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02002762 p = fair_sched_class.pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02002763 if (likely(p))
2764 return p;
2765 }
2766
Peter Zijlstra34f971f2010-09-22 13:53:15 +02002767 for_each_class(class) {
Ingo Molnarfb8d4722007-08-09 11:16:48 +02002768 p = class->pick_next_task(rq);
Ingo Molnardd41f592007-07-09 18:51:59 +02002769 if (p)
2770 return p;
Ingo Molnardd41f592007-07-09 18:51:59 +02002771 }
Peter Zijlstra34f971f2010-09-22 13:53:15 +02002772
2773 BUG(); /* the idle class will always have a runnable task */
Ingo Molnardd41f592007-07-09 18:51:59 +02002774}
2775
2776/*
Thomas Gleixnerc259e012011-06-22 19:47:00 +02002777 * __schedule() is the main scheduler function.
Pekka Enbergedde96e2012-08-04 11:49:47 +03002778 *
2779 * The main means of driving the scheduler and thus entering this function are:
2780 *
2781 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
2782 *
2783 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
2784 * paths. For example, see arch/x86/entry_64.S.
2785 *
2786 * To drive preemption between tasks, the scheduler sets the flag in timer
2787 * interrupt handler scheduler_tick().
2788 *
2789 * 3. Wakeups don't really cause entry into schedule(). They add a
2790 * task to the run-queue and that's it.
2791 *
2792 * Now, if the new task added to the run-queue preempts the current
2793 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
2794 * called on the nearest possible occasion:
2795 *
2796 * - If the kernel is preemptible (CONFIG_PREEMPT=y):
2797 *
2798 * - in syscall or exception context, at the next outmost
2799 * preempt_enable(). (this might be as soon as the wake_up()'s
2800 * spin_unlock()!)
2801 *
2802 * - in IRQ context, return from interrupt-handler to
2803 * preemptible context
2804 *
2805 * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
2806 * then at the next:
2807 *
2808 * - cond_resched() call
2809 * - explicit schedule() call
2810 * - return from syscall or exception to user-space
2811 * - return from interrupt-handler to user-space
Ingo Molnardd41f592007-07-09 18:51:59 +02002812 */
Thomas Gleixnerc259e012011-06-22 19:47:00 +02002813static void __sched __schedule(void)
Ingo Molnardd41f592007-07-09 18:51:59 +02002814{
2815 struct task_struct *prev, *next;
Harvey Harrison67ca7bd2008-02-15 09:56:36 -08002816 unsigned long *switch_count;
Ingo Molnardd41f592007-07-09 18:51:59 +02002817 struct rq *rq;
Peter Zijlstra31656512008-07-18 18:01:23 +02002818 int cpu;
Ingo Molnardd41f592007-07-09 18:51:59 +02002819
Peter Zijlstraff743342009-03-13 12:21:26 +01002820need_resched:
2821 preempt_disable();
Ingo Molnardd41f592007-07-09 18:51:59 +02002822 cpu = smp_processor_id();
2823 rq = cpu_rq(cpu);
Paul E. McKenney25502a62010-04-01 17:37:01 -07002824 rcu_note_context_switch(cpu);
Ingo Molnardd41f592007-07-09 18:51:59 +02002825 prev = rq->curr;
Ingo Molnardd41f592007-07-09 18:51:59 +02002826
Ingo Molnardd41f592007-07-09 18:51:59 +02002827 schedule_debug(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828
Peter Zijlstra31656512008-07-18 18:01:23 +02002829 if (sched_feat(HRTICK))
Mike Galbraithf333fdc2008-05-12 21:20:55 +02002830 hrtick_clear(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002831
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002832 raw_spin_lock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833
Oleg Nesterov246d86b2010-05-19 14:57:11 +02002834 switch_count = &prev->nivcsw;
Ingo Molnardd41f592007-07-09 18:51:59 +02002835 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
Tejun Heo21aa9af2010-06-08 21:40:37 +02002836 if (unlikely(signal_pending_state(prev->state, prev))) {
Ingo Molnardd41f592007-07-09 18:51:59 +02002837 prev->state = TASK_RUNNING;
Tejun Heo21aa9af2010-06-08 21:40:37 +02002838 } else {
Peter Zijlstra2acca552011-04-05 17:23:50 +02002839 deactivate_task(rq, prev, DEQUEUE_SLEEP);
2840 prev->on_rq = 0;
2841
Tejun Heo21aa9af2010-06-08 21:40:37 +02002842 /*
Peter Zijlstra2acca552011-04-05 17:23:50 +02002843 * If a worker went to sleep, notify and ask workqueue
2844 * whether it wants to wake up a task to maintain
2845 * concurrency.
Tejun Heo21aa9af2010-06-08 21:40:37 +02002846 */
2847 if (prev->flags & PF_WQ_WORKER) {
2848 struct task_struct *to_wakeup;
2849
2850 to_wakeup = wq_worker_sleeping(prev, cpu);
2851 if (to_wakeup)
2852 try_to_wake_up_local(to_wakeup);
2853 }
Tejun Heo21aa9af2010-06-08 21:40:37 +02002854 }
Ingo Molnardd41f592007-07-09 18:51:59 +02002855 switch_count = &prev->nvcsw;
2856 }
2857
Gregory Haskins3f029d32009-07-29 11:08:47 -04002858 pre_schedule(rq, prev);
Steven Rostedtf65eda42008-01-25 21:08:07 +01002859
Ingo Molnardd41f592007-07-09 18:51:59 +02002860 if (unlikely(!rq->nr_running))
2861 idle_balance(cpu, rq);
2862
Mike Galbraithdf1c99d2009-03-10 19:08:11 +01002863 put_prev_task(rq, prev);
Wang Chenb67802e2009-03-02 13:55:26 +08002864 next = pick_next_task(rq);
Mike Galbraithf26f9af2010-12-08 11:05:42 +01002865 clear_tsk_need_resched(prev);
2866 rq->skip_clock_update = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 if (likely(prev != next)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 rq->nr_switches++;
2870 rq->curr = next;
2871 ++*switch_count;
2872
Ingo Molnardd41f592007-07-09 18:51:59 +02002873 context_switch(rq, prev, next); /* unlocks the rq */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002874 /*
Oleg Nesterov246d86b2010-05-19 14:57:11 +02002875 * The context switch have flipped the stack from under us
2876 * and restored the local variables which were saved when
2877 * this task called schedule() in the past. prev == current
2878 * is still correct, but it can be moved to another cpu/rq.
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002879 */
2880 cpu = smp_processor_id();
2881 rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 } else
Thomas Gleixner05fa7852009-11-17 14:28:38 +01002883 raw_spin_unlock_irq(&rq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884
Gregory Haskins3f029d32009-07-29 11:08:47 -04002885 post_schedule(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886
Thomas Gleixnerba74c142011-03-21 13:32:17 +01002887 sched_preempt_enable_no_resched();
Peter Zijlstraff743342009-03-13 12:21:26 +01002888 if (need_resched())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 goto need_resched;
2890}
Thomas Gleixnerc259e012011-06-22 19:47:00 +02002891
Thomas Gleixner9c40cef2011-06-22 19:47:01 +02002892static inline void sched_submit_work(struct task_struct *tsk)
2893{
Thomas Gleixner3c7d5182011-07-17 20:46:52 +02002894 if (!tsk->state || tsk_is_pi_blocked(tsk))
Thomas Gleixner9c40cef2011-06-22 19:47:01 +02002895 return;
2896 /*
2897 * If we are going to sleep and we have plugged IO queued,
2898 * make sure to submit it to avoid deadlocks.
2899 */
2900 if (blk_needs_flush_plug(tsk))
2901 blk_schedule_flush_plug(tsk);
2902}
2903
Simon Kirby6ebbe7a2011-09-22 17:03:46 -07002904asmlinkage void __sched schedule(void)
Thomas Gleixnerc259e012011-06-22 19:47:00 +02002905{
Thomas Gleixner9c40cef2011-06-22 19:47:01 +02002906 struct task_struct *tsk = current;
2907
2908 sched_submit_work(tsk);
Thomas Gleixnerc259e012011-06-22 19:47:00 +02002909 __schedule();
2910}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911EXPORT_SYMBOL(schedule);
2912
Thomas Gleixnerc5491ea2011-03-21 12:09:35 +01002913/**
2914 * schedule_preempt_disabled - called with preemption disabled
2915 *
2916 * Returns with preemption disabled. Note: preempt_count must be 1
2917 */
2918void __sched schedule_preempt_disabled(void)
2919{
Thomas Gleixnerba74c142011-03-21 13:32:17 +01002920 sched_preempt_enable_no_resched();
Thomas Gleixnerc5491ea2011-03-21 12:09:35 +01002921 schedule();
2922 preempt_disable();
2923}
2924
Frederic Weisbeckerc08f7822009-12-02 20:49:17 +01002925#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02002926
2927static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
2928{
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02002929 if (lock->owner != owner)
Thomas Gleixner307bf982011-06-10 15:08:55 +02002930 return false;
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02002931
2932 /*
2933 * Ensure we emit the owner->on_cpu, dereference _after_ checking
2934 * lock->owner still matches owner, if that fails, owner might
2935 * point to free()d memory, if it still matches, the rcu_read_lock()
2936 * ensures the memory stays valid.
2937 */
2938 barrier();
2939
Thomas Gleixner307bf982011-06-10 15:08:55 +02002940 return owner->on_cpu;
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02002941}
2942
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01002943/*
2944 * Look out! "owner" is an entirely speculative pointer
2945 * access and not reliable.
2946 */
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02002947int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01002948{
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01002949 if (!sched_feat(OWNER_SPIN))
2950 return 0;
2951
Thomas Gleixner307bf982011-06-10 15:08:55 +02002952 rcu_read_lock();
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02002953 while (owner_running(lock, owner)) {
2954 if (need_resched())
Thomas Gleixner307bf982011-06-10 15:08:55 +02002955 break;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01002956
Gerald Schaefer335d7af2010-11-22 15:47:36 +01002957 arch_mutex_cpu_relax();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01002958 }
Thomas Gleixner307bf982011-06-10 15:08:55 +02002959 rcu_read_unlock();
Benjamin Herrenschmidt4b402212010-04-16 23:20:00 +02002960
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02002961 /*
Thomas Gleixner307bf982011-06-10 15:08:55 +02002962 * We break out the loop above on need_resched() and when the
2963 * owner changed, which is a sign for heavy contention. Return
2964 * success only when lock->owner is NULL.
Peter Zijlstrac6eb3dd2011-04-05 17:23:41 +02002965 */
Thomas Gleixner307bf982011-06-10 15:08:55 +02002966 return lock->owner == NULL;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +01002967}
2968#endif
2969
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970#ifdef CONFIG_PREEMPT
2971/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07002972 * this is the entry point to schedule() from in-kernel preemption
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01002973 * off of preempt_enable. Kernel preemptions off return from interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 * occur there and call schedule directly.
2975 */
Steven Rostedtd1f74e22010-06-02 21:52:29 -04002976asmlinkage void __sched notrace preempt_schedule(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977{
2978 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01002979
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 /*
2981 * If there is a non-zero preempt_count or interrupts are disabled,
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01002982 * we do not want to preempt the current task. Just return..
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 */
Nick Pigginbeed33a2006-10-11 01:21:52 -07002984 if (likely(ti->preempt_count || irqs_disabled()))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 return;
2986
Andi Kleen3a5c3592007-10-15 17:00:14 +02002987 do {
Steven Rostedtd1f74e22010-06-02 21:52:29 -04002988 add_preempt_count_notrace(PREEMPT_ACTIVE);
Thomas Gleixnerc259e012011-06-22 19:47:00 +02002989 __schedule();
Steven Rostedtd1f74e22010-06-02 21:52:29 -04002990 sub_preempt_count_notrace(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02002991
2992 /*
2993 * Check again in case we missed a preemption opportunity
2994 * between schedule and now.
2995 */
2996 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08002997 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999EXPORT_SYMBOL(preempt_schedule);
3000
3001/*
Andreas Mohr2ed6e342006-07-10 04:43:52 -07003002 * this is the entry point to schedule() from kernel preemption
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 * off of irq context.
3004 * Note, that this is called and return with irqs disabled. This will
3005 * protect us against recursive calling from irq.
3006 */
3007asmlinkage void __sched preempt_schedule_irq(void)
3008{
3009 struct thread_info *ti = current_thread_info();
Ingo Molnar6478d882008-01-25 21:08:33 +01003010
Andreas Mohr2ed6e342006-07-10 04:43:52 -07003011 /* Catch callers which need to be fixed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 BUG_ON(ti->preempt_count || !irqs_disabled());
3013
Andi Kleen3a5c3592007-10-15 17:00:14 +02003014 do {
3015 add_preempt_count(PREEMPT_ACTIVE);
Andi Kleen3a5c3592007-10-15 17:00:14 +02003016 local_irq_enable();
Thomas Gleixnerc259e012011-06-22 19:47:00 +02003017 __schedule();
Andi Kleen3a5c3592007-10-15 17:00:14 +02003018 local_irq_disable();
Andi Kleen3a5c3592007-10-15 17:00:14 +02003019 sub_preempt_count(PREEMPT_ACTIVE);
3020
3021 /*
3022 * Check again in case we missed a preemption opportunity
3023 * between schedule and now.
3024 */
3025 barrier();
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +08003026 } while (need_resched());
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027}
3028
3029#endif /* CONFIG_PREEMPT */
3030
Peter Zijlstra63859d42009-09-15 19:14:42 +02003031int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07003032 void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033{
Peter Zijlstra63859d42009-09-15 19:14:42 +02003034 return try_to_wake_up(curr->private, mode, wake_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036EXPORT_SYMBOL(default_wake_function);
3037
3038/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003039 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3040 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 * number) then we wake all the non-exclusive tasks and one exclusive task.
3042 *
3043 * There are circumstances in which we can try to wake a task which has already
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01003044 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3046 */
Johannes Weiner78ddb082009-04-14 16:53:05 +02003047static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
Peter Zijlstra63859d42009-09-15 19:14:42 +02003048 int nr_exclusive, int wake_flags, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049{
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02003050 wait_queue_t *curr, *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051
Matthias Kaehlcke2e458742007-10-15 17:00:02 +02003052 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
Ingo Molnar48f24c42006-07-03 00:25:40 -07003053 unsigned flags = curr->flags;
3054
Peter Zijlstra63859d42009-09-15 19:14:42 +02003055 if (curr->func(curr, mode, wake_flags, key) &&
Ingo Molnar48f24c42006-07-03 00:25:40 -07003056 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057 break;
3058 }
3059}
3060
3061/**
3062 * __wake_up - wake up threads blocked on a waitqueue.
3063 * @q: the waitqueue
3064 * @mode: which threads
3065 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Martin Waitz67be2dd2005-05-01 08:59:26 -07003066 * @key: is directly passed to the wakeup function
David Howells50fa6102009-04-28 15:01:38 +01003067 *
3068 * It may be assumed that this function implies a write memory barrier before
3069 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08003071void __wake_up(wait_queue_head_t *q, unsigned int mode,
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07003072 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073{
3074 unsigned long flags;
3075
3076 spin_lock_irqsave(&q->lock, flags);
3077 __wake_up_common(q, mode, nr_exclusive, 0, key);
3078 spin_unlock_irqrestore(&q->lock, flags);
3079}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080EXPORT_SYMBOL(__wake_up);
3081
3082/*
3083 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3084 */
Thomas Gleixner63b20012011-12-01 00:04:00 +01003085void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086{
Thomas Gleixner63b20012011-12-01 00:04:00 +01003087 __wake_up_common(q, mode, nr, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088}
Michal Nazarewicz22c43c82010-05-05 12:53:11 +02003089EXPORT_SYMBOL_GPL(__wake_up_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090
Davide Libenzi4ede8162009-03-31 15:24:20 -07003091void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
3092{
3093 __wake_up_common(q, mode, 1, 0, key);
3094}
Trond Myklebustbf294b42011-02-21 11:05:41 -08003095EXPORT_SYMBOL_GPL(__wake_up_locked_key);
Davide Libenzi4ede8162009-03-31 15:24:20 -07003096
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097/**
Davide Libenzi4ede8162009-03-31 15:24:20 -07003098 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099 * @q: the waitqueue
3100 * @mode: which threads
3101 * @nr_exclusive: how many wake-one or wake-many threads to wake up
Davide Libenzi4ede8162009-03-31 15:24:20 -07003102 * @key: opaque value to be passed to wakeup targets
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103 *
3104 * The sync wakeup differs that the waker knows that it will schedule
3105 * away soon, so while the target thread will be woken up, it will not
3106 * be migrated to another CPU - ie. the two threads are 'synchronized'
3107 * with each other. This can prevent needless bouncing between CPUs.
3108 *
3109 * On UP it can prevent extra preemption.
David Howells50fa6102009-04-28 15:01:38 +01003110 *
3111 * It may be assumed that this function implies a write memory barrier before
3112 * changing the task state if and only if any tasks are woken up.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113 */
Davide Libenzi4ede8162009-03-31 15:24:20 -07003114void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
3115 int nr_exclusive, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116{
3117 unsigned long flags;
Peter Zijlstra7d478722009-09-14 19:55:44 +02003118 int wake_flags = WF_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119
3120 if (unlikely(!q))
3121 return;
3122
3123 if (unlikely(!nr_exclusive))
Peter Zijlstra7d478722009-09-14 19:55:44 +02003124 wake_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125
3126 spin_lock_irqsave(&q->lock, flags);
Peter Zijlstra7d478722009-09-14 19:55:44 +02003127 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128 spin_unlock_irqrestore(&q->lock, flags);
3129}
Davide Libenzi4ede8162009-03-31 15:24:20 -07003130EXPORT_SYMBOL_GPL(__wake_up_sync_key);
3131
3132/*
3133 * __wake_up_sync - see __wake_up_sync_key()
3134 */
3135void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
3136{
3137 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
3138}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
3140
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02003141/**
3142 * complete: - signals a single thread waiting on this completion
3143 * @x: holds the state of this particular completion
3144 *
3145 * This will wake up a single thread waiting on this completion. Threads will be
3146 * awakened in the same order in which they were queued.
3147 *
3148 * See also complete_all(), wait_for_completion() and related routines.
David Howells50fa6102009-04-28 15:01:38 +01003149 *
3150 * It may be assumed that this function implies a write memory barrier before
3151 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02003152 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02003153void complete(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154{
3155 unsigned long flags;
3156
3157 spin_lock_irqsave(&x->wait.lock, flags);
3158 x->done++;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05003159 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160 spin_unlock_irqrestore(&x->wait.lock, flags);
3161}
3162EXPORT_SYMBOL(complete);
3163
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02003164/**
3165 * complete_all: - signals all threads waiting on this completion
3166 * @x: holds the state of this particular completion
3167 *
3168 * This will wake up all threads waiting on this particular completion event.
David Howells50fa6102009-04-28 15:01:38 +01003169 *
3170 * It may be assumed that this function implies a write memory barrier before
3171 * changing the task state if and only if any tasks are woken up.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02003172 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02003173void complete_all(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174{
3175 unsigned long flags;
3176
3177 spin_lock_irqsave(&x->wait.lock, flags);
3178 x->done += UINT_MAX/2;
Matthew Wilcoxd9514f62007-12-06 11:07:07 -05003179 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 spin_unlock_irqrestore(&x->wait.lock, flags);
3181}
3182EXPORT_SYMBOL(complete_all);
3183
Andi Kleen8cbbe862007-10-15 17:00:14 +02003184static inline long __sched
3185do_wait_for_common(struct completion *x, long timeout, int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 if (!x->done) {
3188 DECLARE_WAITQUEUE(wait, current);
3189
Changli Gaoa93d2f12010-05-07 14:33:26 +08003190 __add_wait_queue_tail_exclusive(&x->wait, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 do {
Oleg Nesterov94d3d822008-08-20 16:54:41 -07003192 if (signal_pending_state(state, current)) {
Oleg Nesterovea71a542008-06-20 18:32:20 +04003193 timeout = -ERESTARTSYS;
3194 break;
Andi Kleen8cbbe862007-10-15 17:00:14 +02003195 }
3196 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02003198 timeout = schedule_timeout(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199 spin_lock_irq(&x->wait.lock);
Oleg Nesterovea71a542008-06-20 18:32:20 +04003200 } while (!x->done && timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201 __remove_wait_queue(&x->wait, &wait);
Oleg Nesterovea71a542008-06-20 18:32:20 +04003202 if (!x->done)
3203 return timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 }
3205 x->done--;
Oleg Nesterovea71a542008-06-20 18:32:20 +04003206 return timeout ?: 1;
Andi Kleen8cbbe862007-10-15 17:00:14 +02003207}
3208
3209static long __sched
3210wait_for_common(struct completion *x, long timeout, int state)
3211{
3212 might_sleep();
3213
3214 spin_lock_irq(&x->wait.lock);
3215 timeout = do_wait_for_common(x, timeout, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216 spin_unlock_irq(&x->wait.lock);
Andi Kleen8cbbe862007-10-15 17:00:14 +02003217 return timeout;
3218}
3219
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02003220/**
3221 * wait_for_completion: - waits for completion of a task
3222 * @x: holds the state of this particular completion
3223 *
3224 * This waits to be signaled for completion of a specific task. It is NOT
3225 * interruptible and there is no timeout.
3226 *
3227 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
3228 * and interrupt capability. Also see complete().
3229 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02003230void __sched wait_for_completion(struct completion *x)
Andi Kleen8cbbe862007-10-15 17:00:14 +02003231{
3232 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233}
3234EXPORT_SYMBOL(wait_for_completion);
3235
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02003236/**
3237 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
3238 * @x: holds the state of this particular completion
3239 * @timeout: timeout value in jiffies
3240 *
3241 * This waits for either a completion of a specific task to be signaled or for a
3242 * specified timeout to expire. The timeout is in jiffies. It is not
3243 * interruptible.
J. Bruce Fieldsc6dc7f02011-10-06 15:22:46 -04003244 *
3245 * The return value is 0 if timed out, and positive (at least 1, or number of
3246 * jiffies left till timeout) if completed.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02003247 */
Ingo Molnarb15136e2007-10-24 18:23:48 +02003248unsigned long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249wait_for_completion_timeout(struct completion *x, unsigned long timeout)
3250{
Andi Kleen8cbbe862007-10-15 17:00:14 +02003251 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252}
3253EXPORT_SYMBOL(wait_for_completion_timeout);
3254
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02003255/**
3256 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
3257 * @x: holds the state of this particular completion
3258 *
3259 * This waits for completion of a specific task to be signaled. It is
3260 * interruptible.
J. Bruce Fieldsc6dc7f02011-10-06 15:22:46 -04003261 *
3262 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02003263 */
Andi Kleen8cbbe862007-10-15 17:00:14 +02003264int __sched wait_for_completion_interruptible(struct completion *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265{
Andi Kleen51e97992007-10-18 21:32:55 +02003266 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
3267 if (t == -ERESTARTSYS)
3268 return t;
3269 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270}
3271EXPORT_SYMBOL(wait_for_completion_interruptible);
3272
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02003273/**
3274 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
3275 * @x: holds the state of this particular completion
3276 * @timeout: timeout value in jiffies
3277 *
3278 * This waits for either a completion of a specific task to be signaled or for a
3279 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
J. Bruce Fieldsc6dc7f02011-10-06 15:22:46 -04003280 *
3281 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3282 * positive (at least 1, or number of jiffies left till timeout) if completed.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02003283 */
NeilBrown6bf41232011-01-05 12:50:16 +11003284long __sched
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285wait_for_completion_interruptible_timeout(struct completion *x,
3286 unsigned long timeout)
3287{
Andi Kleen8cbbe862007-10-15 17:00:14 +02003288 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289}
3290EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
3291
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02003292/**
3293 * wait_for_completion_killable: - waits for completion of a task (killable)
3294 * @x: holds the state of this particular completion
3295 *
3296 * This waits to be signaled for completion of a specific task. It can be
3297 * interrupted by a kill signal.
J. Bruce Fieldsc6dc7f02011-10-06 15:22:46 -04003298 *
3299 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
Kevin Diggs65eb3dc2008-08-26 10:26:54 +02003300 */
Matthew Wilcox009e5772007-12-06 12:29:54 -05003301int __sched wait_for_completion_killable(struct completion *x)
3302{
3303 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
3304 if (t == -ERESTARTSYS)
3305 return t;
3306 return 0;
3307}
3308EXPORT_SYMBOL(wait_for_completion_killable);
3309
Dave Chinnerbe4de352008-08-15 00:40:44 -07003310/**
Sage Weil0aa12fb2010-05-29 09:12:30 -07003311 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
3312 * @x: holds the state of this particular completion
3313 * @timeout: timeout value in jiffies
3314 *
3315 * This waits for either a completion of a specific task to be
3316 * signaled or for a specified timeout to expire. It can be
3317 * interrupted by a kill signal. The timeout is in jiffies.
J. Bruce Fieldsc6dc7f02011-10-06 15:22:46 -04003318 *
3319 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3320 * positive (at least 1, or number of jiffies left till timeout) if completed.
Sage Weil0aa12fb2010-05-29 09:12:30 -07003321 */
NeilBrown6bf41232011-01-05 12:50:16 +11003322long __sched
Sage Weil0aa12fb2010-05-29 09:12:30 -07003323wait_for_completion_killable_timeout(struct completion *x,
3324 unsigned long timeout)
3325{
3326 return wait_for_common(x, timeout, TASK_KILLABLE);
3327}
3328EXPORT_SYMBOL(wait_for_completion_killable_timeout);
3329
3330/**
Dave Chinnerbe4de352008-08-15 00:40:44 -07003331 * try_wait_for_completion - try to decrement a completion without blocking
3332 * @x: completion structure
3333 *
3334 * Returns: 0 if a decrement cannot be done without blocking
3335 * 1 if a decrement succeeded.
3336 *
3337 * If a completion is being used as a counting completion,
3338 * attempt to decrement the counter without blocking. This
3339 * enables us to avoid waiting if the resource the completion
3340 * is protecting is not available.
3341 */
3342bool try_wait_for_completion(struct completion *x)
3343{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01003344 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07003345 int ret = 1;
3346
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01003347 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07003348 if (!x->done)
3349 ret = 0;
3350 else
3351 x->done--;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01003352 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07003353 return ret;
3354}
3355EXPORT_SYMBOL(try_wait_for_completion);
3356
3357/**
3358 * completion_done - Test to see if a completion has any waiters
3359 * @x: completion structure
3360 *
3361 * Returns: 0 if there are waiters (wait_for_completion() in progress)
3362 * 1 if there are no waiters.
3363 *
3364 */
3365bool completion_done(struct completion *x)
3366{
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01003367 unsigned long flags;
Dave Chinnerbe4de352008-08-15 00:40:44 -07003368 int ret = 1;
3369
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01003370 spin_lock_irqsave(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07003371 if (!x->done)
3372 ret = 0;
Rafael J. Wysocki7539a3b2009-12-13 00:07:30 +01003373 spin_unlock_irqrestore(&x->wait.lock, flags);
Dave Chinnerbe4de352008-08-15 00:40:44 -07003374 return ret;
3375}
3376EXPORT_SYMBOL(completion_done);
3377
Andi Kleen8cbbe862007-10-15 17:00:14 +02003378static long __sched
3379sleep_on_common(wait_queue_head_t *q, int state, long timeout)
Ingo Molnar0fec1712007-07-09 18:52:01 +02003380{
3381 unsigned long flags;
3382 wait_queue_t wait;
3383
3384 init_waitqueue_entry(&wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385
Andi Kleen8cbbe862007-10-15 17:00:14 +02003386 __set_current_state(state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387
Andi Kleen8cbbe862007-10-15 17:00:14 +02003388 spin_lock_irqsave(&q->lock, flags);
3389 __add_wait_queue(q, &wait);
3390 spin_unlock(&q->lock);
3391 timeout = schedule_timeout(timeout);
3392 spin_lock_irq(&q->lock);
3393 __remove_wait_queue(q, &wait);
3394 spin_unlock_irqrestore(&q->lock, flags);
3395
3396 return timeout;
3397}
3398
3399void __sched interruptible_sleep_on(wait_queue_head_t *q)
3400{
3401 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403EXPORT_SYMBOL(interruptible_sleep_on);
3404
Ingo Molnar0fec1712007-07-09 18:52:01 +02003405long __sched
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07003406interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407{
Andi Kleen8cbbe862007-10-15 17:00:14 +02003408 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3411
Ingo Molnar0fec1712007-07-09 18:52:01 +02003412void __sched sleep_on(wait_queue_head_t *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413{
Andi Kleen8cbbe862007-10-15 17:00:14 +02003414 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416EXPORT_SYMBOL(sleep_on);
3417
Ingo Molnar0fec1712007-07-09 18:52:01 +02003418long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419{
Andi Kleen8cbbe862007-10-15 17:00:14 +02003420 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422EXPORT_SYMBOL(sleep_on_timeout);
3423
Ingo Molnarb29739f2006-06-27 02:54:51 -07003424#ifdef CONFIG_RT_MUTEXES
3425
3426/*
3427 * rt_mutex_setprio - set the current priority of a task
3428 * @p: task
3429 * @prio: prio value (kernel-internal form)
3430 *
3431 * This function changes the 'effective' priority of a task. It does
3432 * not touch ->normal_prio like __setscheduler().
3433 *
3434 * Used by the rt_mutex code to implement priority inheritance logic.
3435 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07003436void rt_mutex_setprio(struct task_struct *p, int prio)
Ingo Molnarb29739f2006-06-27 02:54:51 -07003437{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02003438 int oldprio, on_rq, running;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003439 struct rq *rq;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01003440 const struct sched_class *prev_class;
Ingo Molnarb29739f2006-06-27 02:54:51 -07003441
3442 BUG_ON(prio < 0 || prio > MAX_PRIO);
3443
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003444 rq = __task_rq_lock(p);
Ingo Molnarb29739f2006-06-27 02:54:51 -07003445
Thomas Gleixner1c4dd992011-06-06 20:07:38 +02003446 /*
3447 * Idle task boosting is a nono in general. There is one
3448 * exception, when PREEMPT_RT and NOHZ is active:
3449 *
3450 * The idle task calls get_next_timer_interrupt() and holds
3451 * the timer wheel base->lock on the CPU and another CPU wants
3452 * to access the timer (probably to cancel it). We can safely
3453 * ignore the boosting request, as the idle CPU runs this code
3454 * with interrupts disabled and will complete the lock
3455 * protected section without being interrupted. So there is no
3456 * real need to boost.
3457 */
3458 if (unlikely(p == rq->idle)) {
3459 WARN_ON(p != rq->curr);
3460 WARN_ON(p->pi_blocked_on);
3461 goto out_unlock;
3462 }
3463
Steven Rostedta8027072010-09-20 15:13:34 -04003464 trace_sched_pi_setprio(p, prio);
Andrew Mortond5f9f942007-05-08 20:27:06 -07003465 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01003466 prev_class = p->sched_class;
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02003467 on_rq = p->on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01003468 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07003469 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02003470 dequeue_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07003471 if (running)
3472 p->sched_class->put_prev_task(rq, p);
Ingo Molnardd41f592007-07-09 18:51:59 +02003473
3474 if (rt_prio(prio))
3475 p->sched_class = &rt_sched_class;
3476 else
3477 p->sched_class = &fair_sched_class;
3478
Ingo Molnarb29739f2006-06-27 02:54:51 -07003479 p->prio = prio;
3480
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07003481 if (running)
3482 p->sched_class->set_curr_task(rq);
Peter Zijlstrada7a7352011-01-17 17:03:27 +01003483 if (on_rq)
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003484 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01003485
Peter Zijlstrada7a7352011-01-17 17:03:27 +01003486 check_class_changed(rq, p, prev_class, oldprio);
Thomas Gleixner1c4dd992011-06-06 20:07:38 +02003487out_unlock:
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003488 __task_rq_unlock(rq);
Ingo Molnarb29739f2006-06-27 02:54:51 -07003489}
Ingo Molnarb29739f2006-06-27 02:54:51 -07003490#endif
Ingo Molnar36c8b582006-07-03 00:25:41 -07003491void set_user_nice(struct task_struct *p, long nice)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492{
Ingo Molnardd41f592007-07-09 18:51:59 +02003493 int old_prio, delta, on_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003495 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496
3497 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3498 return;
3499 /*
3500 * We have to be careful, if called from sys_setpriority(),
3501 * the task might be in the middle of scheduling on another CPU.
3502 */
3503 rq = task_rq_lock(p, &flags);
3504 /*
3505 * The RT priorities are set via sched_setscheduler(), but we still
3506 * allow the 'normal' nice value to be set - but as expected
3507 * it wont have any effect on scheduling until the task is
Ingo Molnardd41f592007-07-09 18:51:59 +02003508 * SCHED_FIFO/SCHED_RR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509 */
Ingo Molnare05606d2007-07-09 18:51:59 +02003510 if (task_has_rt_policy(p)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511 p->static_prio = NICE_TO_PRIO(nice);
3512 goto out_unlock;
3513 }
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02003514 on_rq = p->on_rq;
Peter Zijlstrac09595f2008-06-27 13:41:14 +02003515 if (on_rq)
Ingo Molnar69be72c2007-08-09 11:16:49 +02003516 dequeue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518 p->static_prio = NICE_TO_PRIO(nice);
Peter Williams2dd73a42006-06-27 02:54:34 -07003519 set_load_weight(p);
Ingo Molnarb29739f2006-06-27 02:54:51 -07003520 old_prio = p->prio;
3521 p->prio = effective_prio(p);
3522 delta = p->prio - old_prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523
Ingo Molnardd41f592007-07-09 18:51:59 +02003524 if (on_rq) {
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003525 enqueue_task(rq, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526 /*
Andrew Mortond5f9f942007-05-08 20:27:06 -07003527 * If the task increased its priority or is running and
3528 * lowered its priority, then reschedule its CPU:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529 */
Andrew Mortond5f9f942007-05-08 20:27:06 -07003530 if (delta < 0 || (delta > 0 && task_running(rq, p)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531 resched_task(rq->curr);
3532 }
3533out_unlock:
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003534 task_rq_unlock(rq, p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536EXPORT_SYMBOL(set_user_nice);
3537
Matt Mackalle43379f2005-05-01 08:59:00 -07003538/*
3539 * can_nice - check if a task can reduce its nice value
3540 * @p: task
3541 * @nice: nice value
3542 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07003543int can_nice(const struct task_struct *p, const int nice)
Matt Mackalle43379f2005-05-01 08:59:00 -07003544{
Matt Mackall024f4742005-08-18 11:24:19 -07003545 /* convert nice value [19,-20] to rlimit style value [1,40] */
3546 int nice_rlim = 20 - nice;
Ingo Molnar48f24c42006-07-03 00:25:40 -07003547
Jiri Slaby78d7d402010-03-05 13:42:54 -08003548 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
Matt Mackalle43379f2005-05-01 08:59:00 -07003549 capable(CAP_SYS_NICE));
3550}
3551
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552#ifdef __ARCH_WANT_SYS_NICE
3553
3554/*
3555 * sys_nice - change the priority of the current process.
3556 * @increment: priority increment
3557 *
3558 * sys_setpriority is a more generic, but much slower function that
3559 * does similar things.
3560 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01003561SYSCALL_DEFINE1(nice, int, increment)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562{
Ingo Molnar48f24c42006-07-03 00:25:40 -07003563 long nice, retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564
3565 /*
3566 * Setpriority might change our priority at the same moment.
3567 * We don't have to worry. Conceptually one call occurs first
3568 * and we have a single winner.
3569 */
Matt Mackalle43379f2005-05-01 08:59:00 -07003570 if (increment < -40)
3571 increment = -40;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572 if (increment > 40)
3573 increment = 40;
3574
Américo Wang2b8f8362009-02-16 18:54:21 +08003575 nice = TASK_NICE(current) + increment;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576 if (nice < -20)
3577 nice = -20;
3578 if (nice > 19)
3579 nice = 19;
3580
Matt Mackalle43379f2005-05-01 08:59:00 -07003581 if (increment < 0 && !can_nice(current, nice))
3582 return -EPERM;
3583
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584 retval = security_task_setnice(current, nice);
3585 if (retval)
3586 return retval;
3587
3588 set_user_nice(current, nice);
3589 return 0;
3590}
3591
3592#endif
3593
3594/**
3595 * task_prio - return the priority value of a given task.
3596 * @p: the task in question.
3597 *
3598 * This is the priority value as seen by users in /proc.
3599 * RT tasks are offset by -200. Normal tasks are centered
3600 * around 0, value goes from -16 to +15.
3601 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07003602int task_prio(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603{
3604 return p->prio - MAX_RT_PRIO;
3605}
3606
3607/**
3608 * task_nice - return the nice value of a given task.
3609 * @p: the task in question.
3610 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07003611int task_nice(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612{
3613 return TASK_NICE(p);
3614}
Pavel Roskin150d8be2008-03-05 16:56:37 -05003615EXPORT_SYMBOL(task_nice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616
3617/**
3618 * idle_cpu - is a given cpu idle currently?
3619 * @cpu: the processor in question.
3620 */
3621int idle_cpu(int cpu)
3622{
Thomas Gleixner908a3282011-09-15 15:32:06 +02003623 struct rq *rq = cpu_rq(cpu);
3624
3625 if (rq->curr != rq->idle)
3626 return 0;
3627
3628 if (rq->nr_running)
3629 return 0;
3630
3631#ifdef CONFIG_SMP
3632 if (!llist_empty(&rq->wake_list))
3633 return 0;
3634#endif
3635
3636 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637}
3638
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639/**
3640 * idle_task - return the idle task for a given cpu.
3641 * @cpu: the processor in question.
3642 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07003643struct task_struct *idle_task(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644{
3645 return cpu_rq(cpu)->idle;
3646}
3647
3648/**
3649 * find_process_by_pid - find a process with a matching PID value.
3650 * @pid: the pid in question.
3651 */
Alexey Dobriyana9957442007-10-15 17:00:13 +02003652static struct task_struct *find_process_by_pid(pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653{
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07003654 return pid ? find_task_by_vpid(pid) : current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655}
3656
3657/* Actually do priority change: must hold rq lock. */
Ingo Molnardd41f592007-07-09 18:51:59 +02003658static void
3659__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661 p->policy = policy;
3662 p->rt_priority = prio;
Ingo Molnarb29739f2006-06-27 02:54:51 -07003663 p->normal_prio = normal_prio(p);
3664 /* we are holding p->pi_lock already */
3665 p->prio = rt_mutex_getprio(p);
Peter Zijlstraffd44db2009-11-10 20:12:01 +01003666 if (rt_prio(p->prio))
3667 p->sched_class = &rt_sched_class;
3668 else
3669 p->sched_class = &fair_sched_class;
Peter Williams2dd73a42006-06-27 02:54:34 -07003670 set_load_weight(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671}
3672
David Howellsc69e8d92008-11-14 10:39:19 +11003673/*
3674 * check the target process has a UID that matches the current process's
3675 */
3676static bool check_same_owner(struct task_struct *p)
3677{
3678 const struct cred *cred = current_cred(), *pcred;
3679 bool match;
3680
3681 rcu_read_lock();
3682 pcred = __task_cred(p);
Eric W. Biederman9c806aa2012-02-02 18:54:02 -08003683 match = (uid_eq(cred->euid, pcred->euid) ||
3684 uid_eq(cred->euid, pcred->uid));
David Howellsc69e8d92008-11-14 10:39:19 +11003685 rcu_read_unlock();
3686 return match;
3687}
3688
Rusty Russell961ccdd2008-06-23 13:55:38 +10003689static int __sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07003690 const struct sched_param *param, bool user)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02003692 int retval, oldprio, oldpolicy = -1, on_rq, running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 unsigned long flags;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01003694 const struct sched_class *prev_class;
Ingo Molnar70b97a72006-07-03 00:25:42 -07003695 struct rq *rq;
Lennart Poetteringca94c442009-06-15 17:17:47 +02003696 int reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697
Steven Rostedt66e53932006-06-27 02:54:44 -07003698 /* may grab non-irq protected spin_locks */
3699 BUG_ON(in_interrupt());
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700recheck:
3701 /* double check policy once rq lock held */
Lennart Poetteringca94c442009-06-15 17:17:47 +02003702 if (policy < 0) {
3703 reset_on_fork = p->sched_reset_on_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704 policy = oldpolicy = p->policy;
Lennart Poetteringca94c442009-06-15 17:17:47 +02003705 } else {
3706 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
3707 policy &= ~SCHED_RESET_ON_FORK;
3708
3709 if (policy != SCHED_FIFO && policy != SCHED_RR &&
3710 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3711 policy != SCHED_IDLE)
3712 return -EINVAL;
3713 }
3714
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715 /*
3716 * Valid priorities for SCHED_FIFO and SCHED_RR are
Ingo Molnardd41f592007-07-09 18:51:59 +02003717 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3718 * SCHED_BATCH and SCHED_IDLE is 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 */
3720 if (param->sched_priority < 0 ||
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07003721 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
Steven Rostedtd46523e2005-07-25 16:28:39 -04003722 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723 return -EINVAL;
Ingo Molnare05606d2007-07-09 18:51:59 +02003724 if (rt_policy(policy) != (param->sched_priority != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725 return -EINVAL;
3726
Olivier Croquette37e4ab32005-06-25 14:57:32 -07003727 /*
3728 * Allow unprivileged RT tasks to decrease priority:
3729 */
Rusty Russell961ccdd2008-06-23 13:55:38 +10003730 if (user && !capable(CAP_SYS_NICE)) {
Ingo Molnare05606d2007-07-09 18:51:59 +02003731 if (rt_policy(policy)) {
Oleg Nesterova44702e2010-06-11 01:09:44 +02003732 unsigned long rlim_rtprio =
3733 task_rlimit(p, RLIMIT_RTPRIO);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07003734
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07003735 /* can't set/change the rt policy */
3736 if (policy != p->policy && !rlim_rtprio)
3737 return -EPERM;
3738
3739 /* can't increase priority */
3740 if (param->sched_priority > p->rt_priority &&
3741 param->sched_priority > rlim_rtprio)
3742 return -EPERM;
3743 }
Darren Hartc02aa732011-02-17 15:37:07 -08003744
Ingo Molnardd41f592007-07-09 18:51:59 +02003745 /*
Darren Hartc02aa732011-02-17 15:37:07 -08003746 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3747 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
Ingo Molnardd41f592007-07-09 18:51:59 +02003748 */
Darren Hartc02aa732011-02-17 15:37:07 -08003749 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
3750 if (!can_nice(p, TASK_NICE(p)))
3751 return -EPERM;
3752 }
Oleg Nesterov8dc3e902006-09-29 02:00:50 -07003753
Olivier Croquette37e4ab32005-06-25 14:57:32 -07003754 /* can't change other user's priorities */
David Howellsc69e8d92008-11-14 10:39:19 +11003755 if (!check_same_owner(p))
Olivier Croquette37e4ab32005-06-25 14:57:32 -07003756 return -EPERM;
Lennart Poetteringca94c442009-06-15 17:17:47 +02003757
3758 /* Normal users shall not reset the sched_reset_on_fork flag */
3759 if (p->sched_reset_on_fork && !reset_on_fork)
3760 return -EPERM;
Olivier Croquette37e4ab32005-06-25 14:57:32 -07003761 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07003763 if (user) {
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09003764 retval = security_task_setscheduler(p);
Jeremy Fitzhardinge725aad22008-08-03 09:33:03 -07003765 if (retval)
3766 return retval;
3767 }
3768
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769 /*
Ingo Molnarb29739f2006-06-27 02:54:51 -07003770 * make sure no PI-waiters arrive (or leave) while we are
3771 * changing the priority of the task:
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003772 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003773 * To be able to change p->policy safely, the appropriate
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774 * runqueue lock must be held.
3775 */
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003776 rq = task_rq_lock(p, &flags);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02003777
Peter Zijlstra34f971f2010-09-22 13:53:15 +02003778 /*
3779 * Changing the policy of the stop threads its a very bad idea
3780 */
3781 if (p == rq->stop) {
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003782 task_rq_unlock(rq, p, &flags);
Peter Zijlstra34f971f2010-09-22 13:53:15 +02003783 return -EINVAL;
3784 }
3785
Dario Faggiolia51e9192011-03-24 14:00:18 +01003786 /*
3787 * If not changing anything there's no need to proceed further:
3788 */
3789 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
3790 param->sched_priority == p->rt_priority))) {
Namhyung Kim45afb172012-07-07 16:49:02 +09003791 task_rq_unlock(rq, p, &flags);
Dario Faggiolia51e9192011-03-24 14:00:18 +01003792 return 0;
3793 }
3794
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02003795#ifdef CONFIG_RT_GROUP_SCHED
3796 if (user) {
3797 /*
3798 * Do not allow realtime tasks into groups that have no runtime
3799 * assigned.
3800 */
3801 if (rt_bandwidth_enabled() && rt_policy(policy) &&
Mike Galbraithf4493772011-01-13 04:54:50 +01003802 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
3803 !task_group_is_autogroup(task_group(p))) {
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003804 task_rq_unlock(rq, p, &flags);
Peter Zijlstradc61b1d2010-06-08 11:40:42 +02003805 return -EPERM;
3806 }
3807 }
3808#endif
3809
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810 /* recheck policy now with rq lock held */
3811 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3812 policy = oldpolicy = -1;
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003813 task_rq_unlock(rq, p, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 goto recheck;
3815 }
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02003816 on_rq = p->on_rq;
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01003817 running = task_current(rq, p);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07003818 if (on_rq)
Peter Zijlstra4ca9b722012-01-25 11:50:51 +01003819 dequeue_task(rq, p, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07003820 if (running)
3821 p->sched_class->put_prev_task(rq, p);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02003822
Lennart Poetteringca94c442009-06-15 17:17:47 +02003823 p->sched_reset_on_fork = reset_on_fork;
3824
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 oldprio = p->prio;
Thomas Gleixner83ab0aa2010-02-17 09:05:48 +01003826 prev_class = p->sched_class;
Ingo Molnardd41f592007-07-09 18:51:59 +02003827 __setscheduler(rq, p, policy, param->sched_priority);
Dmitry Adamushkof6b53202007-10-15 17:00:08 +02003828
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07003829 if (running)
3830 p->sched_class->set_curr_task(rq);
Peter Zijlstrada7a7352011-01-17 17:03:27 +01003831 if (on_rq)
Peter Zijlstra4ca9b722012-01-25 11:50:51 +01003832 enqueue_task(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01003833
Peter Zijlstrada7a7352011-01-17 17:03:27 +01003834 check_class_changed(rq, p, prev_class, oldprio);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02003835 task_rq_unlock(rq, p, &flags);
Ingo Molnarb29739f2006-06-27 02:54:51 -07003836
Thomas Gleixner95e02ca2006-06-27 02:55:02 -07003837 rt_mutex_adjust_pi(p);
3838
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 return 0;
3840}
Rusty Russell961ccdd2008-06-23 13:55:38 +10003841
3842/**
3843 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
3844 * @p: the task in question.
3845 * @policy: new policy.
3846 * @param: structure containing the new RT priority.
3847 *
3848 * NOTE that the task may be already dead.
3849 */
3850int sched_setscheduler(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07003851 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10003852{
3853 return __sched_setscheduler(p, policy, param, true);
3854}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855EXPORT_SYMBOL_GPL(sched_setscheduler);
3856
Rusty Russell961ccdd2008-06-23 13:55:38 +10003857/**
3858 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
3859 * @p: the task in question.
3860 * @policy: new policy.
3861 * @param: structure containing the new RT priority.
3862 *
3863 * Just like sched_setscheduler, only don't bother checking if the
3864 * current context has permission. For example, this is needed in
3865 * stop_machine(): we create temporary high priority worker threads,
3866 * but our caller might not have that capability.
3867 */
3868int sched_setscheduler_nocheck(struct task_struct *p, int policy,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07003869 const struct sched_param *param)
Rusty Russell961ccdd2008-06-23 13:55:38 +10003870{
3871 return __sched_setscheduler(p, policy, param, false);
3872}
3873
Ingo Molnar95cdf3b2005-09-10 00:26:11 -07003874static int
3875do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877 struct sched_param lparam;
3878 struct task_struct *p;
Ingo Molnar36c8b582006-07-03 00:25:41 -07003879 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880
3881 if (!param || pid < 0)
3882 return -EINVAL;
3883 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
3884 return -EFAULT;
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07003885
3886 rcu_read_lock();
3887 retval = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888 p = find_process_by_pid(pid);
Oleg Nesterov5fe1d752006-09-29 02:00:48 -07003889 if (p != NULL)
3890 retval = sched_setscheduler(p, policy, &lparam);
3891 rcu_read_unlock();
Ingo Molnar36c8b582006-07-03 00:25:41 -07003892
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893 return retval;
3894}
3895
3896/**
3897 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
3898 * @pid: the pid in question.
3899 * @policy: new policy.
3900 * @param: structure containing the new RT priority.
3901 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01003902SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3903 struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904{
Jason Baronc21761f2006-01-18 17:43:03 -08003905 /* negative values for policy are not valid */
3906 if (policy < 0)
3907 return -EINVAL;
3908
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909 return do_sched_setscheduler(pid, policy, param);
3910}
3911
3912/**
3913 * sys_sched_setparam - set/change the RT priority of a thread
3914 * @pid: the pid in question.
3915 * @param: structure containing the new RT priority.
3916 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01003917SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003918{
3919 return do_sched_setscheduler(pid, -1, param);
3920}
3921
3922/**
3923 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3924 * @pid: the pid in question.
3925 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01003926SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003927{
Ingo Molnar36c8b582006-07-03 00:25:41 -07003928 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02003929 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930
3931 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02003932 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933
3934 retval = -ESRCH;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00003935 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936 p = find_process_by_pid(pid);
3937 if (p) {
3938 retval = security_task_getscheduler(p);
3939 if (!retval)
Lennart Poetteringca94c442009-06-15 17:17:47 +02003940 retval = p->policy
3941 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942 }
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00003943 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944 return retval;
3945}
3946
3947/**
Lennart Poetteringca94c442009-06-15 17:17:47 +02003948 * sys_sched_getparam - get the RT priority of a thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949 * @pid: the pid in question.
3950 * @param: structure containing the RT priority.
3951 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01003952SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953{
3954 struct sched_param lp;
Ingo Molnar36c8b582006-07-03 00:25:41 -07003955 struct task_struct *p;
Andi Kleen3a5c3592007-10-15 17:00:14 +02003956 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957
3958 if (!param || pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02003959 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00003961 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 p = find_process_by_pid(pid);
3963 retval = -ESRCH;
3964 if (!p)
3965 goto out_unlock;
3966
3967 retval = security_task_getscheduler(p);
3968 if (retval)
3969 goto out_unlock;
3970
3971 lp.sched_priority = p->rt_priority;
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00003972 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973
3974 /*
3975 * This one might sleep, we cannot do it with a spinlock held ...
3976 */
3977 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
3978
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 return retval;
3980
3981out_unlock:
Thomas Gleixner5fe85be2009-12-09 10:14:58 +00003982 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003983 return retval;
3984}
3985
Rusty Russell96f874e2008-11-25 02:35:14 +10303986long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10303988 cpumask_var_t cpus_allowed, new_mask;
Ingo Molnar36c8b582006-07-03 00:25:41 -07003989 struct task_struct *p;
3990 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003991
Gautham R Shenoy95402b32008-01-25 21:08:02 +01003992 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00003993 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994
3995 p = find_process_by_pid(pid);
3996 if (!p) {
Thomas Gleixner23f5d142009-12-09 10:15:01 +00003997 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01003998 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003999 return -ESRCH;
4000 }
4001
Thomas Gleixner23f5d142009-12-09 10:15:01 +00004002 /* Prevent p going away */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004003 get_task_struct(p);
Thomas Gleixner23f5d142009-12-09 10:15:01 +00004004 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304006 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4007 retval = -ENOMEM;
4008 goto out_put_task;
4009 }
4010 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4011 retval = -ENOMEM;
4012 goto out_free_cpus_allowed;
4013 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014 retval = -EPERM;
Linus Torvaldsc49c41a2012-01-14 18:36:33 -08004015 if (!check_same_owner(p) && !ns_capable(task_user_ns(p), CAP_SYS_NICE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016 goto out_unlock;
4017
KOSAKI Motohirob0ae1982010-10-15 04:21:18 +09004018 retval = security_task_setscheduler(p);
David Quigleye7834f82006-06-23 02:03:59 -07004019 if (retval)
4020 goto out_unlock;
4021
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304022 cpuset_cpus_allowed(p, cpus_allowed);
4023 cpumask_and(new_mask, in_mask, cpus_allowed);
Peter Zijlstra49246272010-10-17 21:46:10 +02004024again:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304025 retval = set_cpus_allowed_ptr(p, new_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026
Paul Menage8707d8b2007-10-18 23:40:22 -07004027 if (!retval) {
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304028 cpuset_cpus_allowed(p, cpus_allowed);
4029 if (!cpumask_subset(new_mask, cpus_allowed)) {
Paul Menage8707d8b2007-10-18 23:40:22 -07004030 /*
4031 * We must have raced with a concurrent cpuset
4032 * update. Just reset the cpus_allowed to the
4033 * cpuset's cpus_allowed
4034 */
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304035 cpumask_copy(new_mask, cpus_allowed);
Paul Menage8707d8b2007-10-18 23:40:22 -07004036 goto again;
4037 }
4038 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039out_unlock:
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304040 free_cpumask_var(new_mask);
4041out_free_cpus_allowed:
4042 free_cpumask_var(cpus_allowed);
4043out_put_task:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044 put_task_struct(p);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01004045 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046 return retval;
4047}
4048
4049static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
Rusty Russell96f874e2008-11-25 02:35:14 +10304050 struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051{
Rusty Russell96f874e2008-11-25 02:35:14 +10304052 if (len < cpumask_size())
4053 cpumask_clear(new_mask);
4054 else if (len > cpumask_size())
4055 len = cpumask_size();
4056
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4058}
4059
4060/**
4061 * sys_sched_setaffinity - set the cpu affinity of a process
4062 * @pid: pid of the process
4063 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4064 * @user_mask_ptr: user-space pointer to the new cpu mask
4065 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004066SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4067 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068{
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304069 cpumask_var_t new_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070 int retval;
4071
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304072 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4073 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074
Rusty Russell5a16f3d2008-11-25 02:35:11 +10304075 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4076 if (retval == 0)
4077 retval = sched_setaffinity(pid, new_mask);
4078 free_cpumask_var(new_mask);
4079 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080}
4081
Rusty Russell96f874e2008-11-25 02:35:14 +10304082long sched_getaffinity(pid_t pid, struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083{
Ingo Molnar36c8b582006-07-03 00:25:41 -07004084 struct task_struct *p;
Thomas Gleixner31605682009-12-08 20:24:16 +00004085 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087
Gautham R Shenoy95402b32008-01-25 21:08:02 +01004088 get_online_cpus();
Thomas Gleixner23f5d142009-12-09 10:15:01 +00004089 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090
4091 retval = -ESRCH;
4092 p = find_process_by_pid(pid);
4093 if (!p)
4094 goto out_unlock;
4095
David Quigleye7834f82006-06-23 02:03:59 -07004096 retval = security_task_getscheduler(p);
4097 if (retval)
4098 goto out_unlock;
4099
Peter Zijlstra013fdb82011-04-05 17:23:45 +02004100 raw_spin_lock_irqsave(&p->pi_lock, flags);
Rusty Russell96f874e2008-11-25 02:35:14 +10304101 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
Peter Zijlstra013fdb82011-04-05 17:23:45 +02004102 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103
4104out_unlock:
Thomas Gleixner23f5d142009-12-09 10:15:01 +00004105 rcu_read_unlock();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01004106 put_online_cpus();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107
Ulrich Drepper9531b622007-08-09 11:16:46 +02004108 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109}
4110
4111/**
4112 * sys_sched_getaffinity - get the cpu affinity of a process
4113 * @pid: pid of the process
4114 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4115 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4116 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004117SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4118 unsigned long __user *, user_mask_ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119{
4120 int ret;
Rusty Russellf17c8602008-11-25 02:35:11 +10304121 cpumask_var_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122
Anton Blanchard84fba5e2010-04-06 17:02:19 +10004123 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09004124 return -EINVAL;
4125 if (len & (sizeof(unsigned long)-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004126 return -EINVAL;
4127
Rusty Russellf17c8602008-11-25 02:35:11 +10304128 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4129 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130
Rusty Russellf17c8602008-11-25 02:35:11 +10304131 ret = sched_getaffinity(pid, mask);
4132 if (ret == 0) {
KOSAKI Motohiro8bc037f2010-03-17 09:36:58 +09004133 size_t retlen = min_t(size_t, len, cpumask_size());
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09004134
4135 if (copy_to_user(user_mask_ptr, mask, retlen))
Rusty Russellf17c8602008-11-25 02:35:11 +10304136 ret = -EFAULT;
4137 else
KOSAKI Motohirocd3d8032010-03-12 16:15:36 +09004138 ret = retlen;
Rusty Russellf17c8602008-11-25 02:35:11 +10304139 }
4140 free_cpumask_var(mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004141
Rusty Russellf17c8602008-11-25 02:35:11 +10304142 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004143}
4144
4145/**
4146 * sys_sched_yield - yield the current processor to other threads.
4147 *
Ingo Molnardd41f592007-07-09 18:51:59 +02004148 * This function yields the current CPU to other tasks. If there are no
4149 * other threads running on this CPU then this function will return.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004151SYSCALL_DEFINE0(sched_yield)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152{
Ingo Molnar70b97a72006-07-03 00:25:42 -07004153 struct rq *rq = this_rq_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154
Ingo Molnar2d723762007-10-15 17:00:12 +02004155 schedstat_inc(rq, yld_count);
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +02004156 current->sched_class->yield_task(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157
4158 /*
4159 * Since we are going to call schedule() anyway, there's
4160 * no need to preempt or enable interrupts:
4161 */
4162 __release(rq->lock);
Ingo Molnar8a25d5d2006-07-03 00:24:54 -07004163 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +01004164 do_raw_spin_unlock(&rq->lock);
Thomas Gleixnerba74c142011-03-21 13:32:17 +01004165 sched_preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166
4167 schedule();
4168
4169 return 0;
4170}
4171
Peter Zijlstrad86ee482009-07-10 14:57:57 +02004172static inline int should_resched(void)
4173{
4174 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
4175}
4176
Andrew Mortone7b38402006-06-30 01:56:00 -07004177static void __cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178{
Frederic Weisbeckere7aaaa62009-07-16 15:44:29 +02004179 add_preempt_count(PREEMPT_ACTIVE);
Thomas Gleixnerc259e012011-06-22 19:47:00 +02004180 __schedule();
Frederic Weisbeckere7aaaa62009-07-16 15:44:29 +02004181 sub_preempt_count(PREEMPT_ACTIVE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182}
4183
Herbert Xu02b67cc32008-01-25 21:08:28 +01004184int __sched _cond_resched(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02004186 if (should_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187 __cond_resched();
4188 return 1;
4189 }
4190 return 0;
4191}
Herbert Xu02b67cc32008-01-25 21:08:28 +01004192EXPORT_SYMBOL(_cond_resched);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193
4194/*
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02004195 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196 * call schedule, and on return reacquire the lock.
4197 *
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004198 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199 * operations here to prevent schedule() from being called twice (once via
4200 * spin_unlock(), once by hand).
4201 */
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02004202int __cond_resched_lock(spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004203{
Peter Zijlstrad86ee482009-07-10 14:57:57 +02004204 int resched = should_resched();
Jan Kara6df3cec2005-06-13 15:52:32 -07004205 int ret = 0;
4206
Peter Zijlstraf607c662009-07-20 19:16:29 +02004207 lockdep_assert_held(lock);
4208
Nick Piggin95c354f2008-01-30 13:31:20 +01004209 if (spin_needbreak(lock) || resched) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210 spin_unlock(lock);
Peter Zijlstrad86ee482009-07-10 14:57:57 +02004211 if (resched)
Nick Piggin95c354f2008-01-30 13:31:20 +01004212 __cond_resched();
4213 else
4214 cpu_relax();
Jan Kara6df3cec2005-06-13 15:52:32 -07004215 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216 spin_lock(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217 }
Jan Kara6df3cec2005-06-13 15:52:32 -07004218 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02004220EXPORT_SYMBOL(__cond_resched_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02004222int __sched __cond_resched_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223{
4224 BUG_ON(!in_softirq());
4225
Peter Zijlstrad86ee482009-07-10 14:57:57 +02004226 if (should_resched()) {
Thomas Gleixner98d825672007-05-23 13:58:18 -07004227 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 __cond_resched();
4229 local_bh_disable();
4230 return 1;
4231 }
4232 return 0;
4233}
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02004234EXPORT_SYMBOL(__cond_resched_softirq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236/**
4237 * yield - yield the current processor to other threads.
4238 *
Peter Zijlstra8e3fabf2012-03-06 18:54:26 +01004239 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4240 *
4241 * The scheduler is at all times free to pick the calling task as the most
4242 * eligible task to run, if removing the yield() call from your code breaks
4243 * it, its already broken.
4244 *
4245 * Typical broken usage is:
4246 *
4247 * while (!event)
4248 * yield();
4249 *
4250 * where one assumes that yield() will let 'the other' process run that will
4251 * make event true. If the current task is a SCHED_FIFO task that will never
4252 * happen. Never use yield() as a progress guarantee!!
4253 *
4254 * If you want to use yield() to wait for something, use wait_event().
4255 * If you want to use yield() to be 'nice' for others, use cond_resched().
4256 * If you still want to use yield(), do not!
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257 */
4258void __sched yield(void)
4259{
4260 set_current_state(TASK_RUNNING);
4261 sys_sched_yield();
4262}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263EXPORT_SYMBOL(yield);
4264
Mike Galbraithd95f4122011-02-01 09:50:51 -05004265/**
4266 * yield_to - yield the current processor to another thread in
4267 * your thread group, or accelerate that thread toward the
4268 * processor it's on.
Randy Dunlap16addf92011-03-18 09:34:53 -07004269 * @p: target task
4270 * @preempt: whether task preemption is allowed or not
Mike Galbraithd95f4122011-02-01 09:50:51 -05004271 *
4272 * It's the caller's job to ensure that the target task struct
4273 * can't go away on us before we can do any checks.
4274 *
4275 * Returns true if we indeed boosted the target task.
4276 */
4277bool __sched yield_to(struct task_struct *p, bool preempt)
4278{
4279 struct task_struct *curr = current;
4280 struct rq *rq, *p_rq;
4281 unsigned long flags;
4282 bool yielded = 0;
4283
4284 local_irq_save(flags);
4285 rq = this_rq();
4286
4287again:
4288 p_rq = task_rq(p);
4289 double_rq_lock(rq, p_rq);
4290 while (task_rq(p) != p_rq) {
4291 double_rq_unlock(rq, p_rq);
4292 goto again;
4293 }
4294
4295 if (!curr->sched_class->yield_to_task)
4296 goto out;
4297
4298 if (curr->sched_class != p->sched_class)
4299 goto out;
4300
4301 if (task_running(p_rq, p) || p->state)
4302 goto out;
4303
4304 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
Venkatesh Pallipadi6d1cafd2011-03-01 16:28:21 -08004305 if (yielded) {
Mike Galbraithd95f4122011-02-01 09:50:51 -05004306 schedstat_inc(rq, yld_count);
Venkatesh Pallipadi6d1cafd2011-03-01 16:28:21 -08004307 /*
4308 * Make p's CPU reschedule; pick_next_entity takes care of
4309 * fairness.
4310 */
4311 if (preempt && rq != p_rq)
4312 resched_task(p_rq->curr);
4313 }
Mike Galbraithd95f4122011-02-01 09:50:51 -05004314
4315out:
4316 double_rq_unlock(rq, p_rq);
4317 local_irq_restore(flags);
4318
4319 if (yielded)
4320 schedule();
4321
4322 return yielded;
4323}
4324EXPORT_SYMBOL_GPL(yield_to);
4325
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004327 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328 * that process accounting knows that this is a task in IO wait state.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329 */
4330void __sched io_schedule(void)
4331{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09004332 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333
Shailabh Nagar0ff92242006-07-14 00:24:37 -07004334 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335 atomic_inc(&rq->nr_iowait);
Jens Axboe73c10102011-03-08 13:19:51 +01004336 blk_flush_plug(current);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07004337 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338 schedule();
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07004339 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07004341 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343EXPORT_SYMBOL(io_schedule);
4344
4345long __sched io_schedule_timeout(long timeout)
4346{
Hitoshi Mitake54d35f22009-06-29 14:44:57 +09004347 struct rq *rq = raw_rq();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348 long ret;
4349
Shailabh Nagar0ff92242006-07-14 00:24:37 -07004350 delayacct_blkio_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351 atomic_inc(&rq->nr_iowait);
Jens Axboe73c10102011-03-08 13:19:51 +01004352 blk_flush_plug(current);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07004353 current->in_iowait = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354 ret = schedule_timeout(timeout);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07004355 current->in_iowait = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356 atomic_dec(&rq->nr_iowait);
Shailabh Nagar0ff92242006-07-14 00:24:37 -07004357 delayacct_blkio_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 return ret;
4359}
4360
4361/**
4362 * sys_sched_get_priority_max - return maximum RT priority.
4363 * @policy: scheduling class.
4364 *
4365 * this syscall returns the maximum rt_priority that can be used
4366 * by a given scheduling class.
4367 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004368SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369{
4370 int ret = -EINVAL;
4371
4372 switch (policy) {
4373 case SCHED_FIFO:
4374 case SCHED_RR:
4375 ret = MAX_USER_RT_PRIO-1;
4376 break;
4377 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08004378 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02004379 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380 ret = 0;
4381 break;
4382 }
4383 return ret;
4384}
4385
4386/**
4387 * sys_sched_get_priority_min - return minimum RT priority.
4388 * @policy: scheduling class.
4389 *
4390 * this syscall returns the minimum rt_priority that can be used
4391 * by a given scheduling class.
4392 */
Heiko Carstens5add95d2009-01-14 14:14:08 +01004393SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394{
4395 int ret = -EINVAL;
4396
4397 switch (policy) {
4398 case SCHED_FIFO:
4399 case SCHED_RR:
4400 ret = 1;
4401 break;
4402 case SCHED_NORMAL:
Ingo Molnarb0a94992006-01-14 13:20:41 -08004403 case SCHED_BATCH:
Ingo Molnardd41f592007-07-09 18:51:59 +02004404 case SCHED_IDLE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405 ret = 0;
4406 }
4407 return ret;
4408}
4409
4410/**
4411 * sys_sched_rr_get_interval - return the default timeslice of a process.
4412 * @pid: pid of the process.
4413 * @interval: userspace pointer to the timeslice value.
4414 *
4415 * this syscall writes the default timeslice value of a given process
4416 * into the user-space timespec buffer. A value of '0' means infinity.
4417 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01004418SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
Heiko Carstens754fe8d2009-01-14 14:14:09 +01004419 struct timespec __user *, interval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004420{
Ingo Molnar36c8b582006-07-03 00:25:41 -07004421 struct task_struct *p;
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02004422 unsigned int time_slice;
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01004423 unsigned long flags;
4424 struct rq *rq;
Andi Kleen3a5c3592007-10-15 17:00:14 +02004425 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426 struct timespec t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004427
4428 if (pid < 0)
Andi Kleen3a5c3592007-10-15 17:00:14 +02004429 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004430
4431 retval = -ESRCH;
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00004432 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004433 p = find_process_by_pid(pid);
4434 if (!p)
4435 goto out_unlock;
4436
4437 retval = security_task_getscheduler(p);
4438 if (retval)
4439 goto out_unlock;
4440
Thomas Gleixnerdba091b2009-12-09 09:32:03 +01004441 rq = task_rq_lock(p, &flags);
4442 time_slice = p->sched_class->get_rr_interval(rq, p);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02004443 task_rq_unlock(rq, p, &flags);
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02004444
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00004445 rcu_read_unlock();
Dmitry Adamushkoa4ec24b2007-10-15 17:00:13 +02004446 jiffies_to_timespec(time_slice, &t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448 return retval;
Andi Kleen3a5c3592007-10-15 17:00:14 +02004449
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450out_unlock:
Thomas Gleixner1a551ae2009-12-09 10:15:11 +00004451 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452 return retval;
4453}
4454
Steven Rostedt7c731e02008-05-12 21:20:41 +02004455static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
Ingo Molnar36c8b582006-07-03 00:25:41 -07004456
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01004457void sched_show_task(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459 unsigned long free = 0;
Ingo Molnar36c8b582006-07-03 00:25:41 -07004460 unsigned state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462 state = p->state ? __ffs(p->state) + 1 : 0;
Erik Gilling28d06862010-11-19 18:08:51 -08004463 printk(KERN_INFO "%-15.15s %c", p->comm,
Andreas Mohr2ed6e342006-07-10 04:43:52 -07004464 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
Ingo Molnar4bd77322007-07-11 21:21:47 +02004465#if BITS_PER_LONG == 32
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01004467 printk(KERN_CONT " running ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01004469 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470#else
4471 if (state == TASK_RUNNING)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01004472 printk(KERN_CONT " running task ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473 else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01004474 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475#endif
4476#ifdef CONFIG_DEBUG_STACK_USAGE
Eric Sandeen7c9f8862008-04-22 16:38:23 -05004477 free = stack_not_used(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478#endif
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01004479 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
Kees Cook07cde262011-12-15 08:49:18 -08004480 task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)),
David Rientjesaa47b7e2009-05-04 01:38:05 -07004481 (unsigned long)task_thread_info(p)->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482
Nick Piggin5fb5e6d2008-01-25 21:08:34 +01004483 show_stack(p, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484}
4485
Ingo Molnare59e2ae2006-12-06 20:35:59 -08004486void show_state_filter(unsigned long state_filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487{
Ingo Molnar36c8b582006-07-03 00:25:41 -07004488 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489
Ingo Molnar4bd77322007-07-11 21:21:47 +02004490#if BITS_PER_LONG == 32
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01004491 printk(KERN_INFO
4492 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493#else
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01004494 printk(KERN_INFO
4495 " task PC stack pid father\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496#endif
Thomas Gleixner510f5ac2011-07-17 20:47:54 +02004497 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 do_each_thread(g, p) {
4499 /*
4500 * reset the NMI-timeout, listing all files on a slow
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004501 * console might take a lot of time:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502 */
4503 touch_nmi_watchdog();
Ingo Molnar39bc89f2007-04-25 20:50:03 -07004504 if (!state_filter || (p->state & state_filter))
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01004505 sched_show_task(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 } while_each_thread(g, p);
4507
Jeremy Fitzhardinge04c91672007-05-08 00:28:05 -07004508 touch_all_softlockup_watchdogs();
4509
Ingo Molnardd41f592007-07-09 18:51:59 +02004510#ifdef CONFIG_SCHED_DEBUG
4511 sysrq_sched_debug_show();
4512#endif
Thomas Gleixner510f5ac2011-07-17 20:47:54 +02004513 rcu_read_unlock();
Ingo Molnare59e2ae2006-12-06 20:35:59 -08004514 /*
4515 * Only show locks if all tasks are dumped:
4516 */
Shmulik Ladkani93335a22009-11-25 15:23:41 +02004517 if (!state_filter)
Ingo Molnare59e2ae2006-12-06 20:35:59 -08004518 debug_show_all_locks();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519}
4520
Ingo Molnar1df21052007-07-09 18:51:58 +02004521void __cpuinit init_idle_bootup_task(struct task_struct *idle)
4522{
Ingo Molnardd41f592007-07-09 18:51:59 +02004523 idle->sched_class = &idle_sched_class;
Ingo Molnar1df21052007-07-09 18:51:58 +02004524}
4525
Ingo Molnarf340c0d2005-06-28 16:40:42 +02004526/**
4527 * init_idle - set up an idle thread for a given CPU
4528 * @idle: task in question
4529 * @cpu: cpu the idle task belongs to
4530 *
4531 * NOTE: this function does not set the idle thread's NEED_RESCHED
4532 * flag, to make booting more robust.
4533 */
Nick Piggin5c1e1762006-10-03 01:14:04 -07004534void __cpuinit init_idle(struct task_struct *idle, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004535{
Ingo Molnar70b97a72006-07-03 00:25:42 -07004536 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004537 unsigned long flags;
4538
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004539 raw_spin_lock_irqsave(&rq->lock, flags);
Ingo Molnar5cbd54e2008-11-12 20:05:50 +01004540
Ingo Molnardd41f592007-07-09 18:51:59 +02004541 __sched_fork(idle);
Peter Zijlstra06b83b52009-12-16 18:04:35 +01004542 idle->state = TASK_RUNNING;
Ingo Molnardd41f592007-07-09 18:51:59 +02004543 idle->se.exec_start = sched_clock();
4544
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09004545 do_set_cpus_allowed(idle, cpumask_of(cpu));
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02004546 /*
4547 * We're having a chicken and egg problem, even though we are
4548 * holding rq->lock, the cpu isn't yet set to this cpu so the
4549 * lockdep check in task_group() will fail.
4550 *
4551 * Similar case to sched_fork(). / Alternatively we could
4552 * use task_rq_lock() here and obtain the other rq->lock.
4553 *
4554 * Silence PROVE_RCU
4555 */
4556 rcu_read_lock();
Ingo Molnardd41f592007-07-09 18:51:59 +02004557 __set_task_cpu(idle, cpu);
Peter Zijlstra6506cf6c2010-09-16 17:50:31 +02004558 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560 rq->curr = rq->idle = idle;
Peter Zijlstra3ca7a442011-04-05 17:23:40 +02004561#if defined(CONFIG_SMP)
4562 idle->on_cpu = 1;
Nick Piggin4866cde2005-06-25 14:57:23 -07004563#endif
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004564 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565
4566 /* Set the preempt count _outside_ the spinlocks! */
Al Viroa1261f52005-11-13 16:06:55 -08004567 task_thread_info(idle)->preempt_count = 0;
Jonathan Corbet625f2a32011-04-22 11:19:10 -06004568
Ingo Molnardd41f592007-07-09 18:51:59 +02004569 /*
4570 * The idle tasks have their own, simple scheduling class:
4571 */
4572 idle->sched_class = &idle_sched_class;
Steven Rostedt868baf02011-02-10 21:26:13 -05004573 ftrace_graph_init_idle_task(idle, cpu);
Carsten Emdef1c6f1a2011-10-26 23:14:16 +02004574#if defined(CONFIG_SMP)
4575 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4576#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577}
4578
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579#ifdef CONFIG_SMP
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09004580void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
4581{
4582 if (p->sched_class && p->sched_class->set_cpus_allowed)
4583 p->sched_class->set_cpus_allowed(p, new_mask);
Peter Zijlstra49396022011-06-25 15:45:46 +02004584
4585 cpumask_copy(&p->cpus_allowed, new_mask);
Peter Zijlstra29baa742012-04-23 12:11:21 +02004586 p->nr_cpus_allowed = cpumask_weight(new_mask);
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09004587}
4588
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589/*
4590 * This is how migration works:
4591 *
Tejun Heo969c7922010-05-06 18:49:21 +02004592 * 1) we invoke migration_cpu_stop() on the target CPU using
4593 * stop_one_cpu().
4594 * 2) stopper starts to run (implicitly forcing the migrated thread
4595 * off the CPU)
4596 * 3) it checks whether the migrated task is still in the wrong runqueue.
4597 * 4) if it's in the wrong runqueue then the migration thread removes
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598 * it and puts it into the right queue.
Tejun Heo969c7922010-05-06 18:49:21 +02004599 * 5) stopper completes and stop_one_cpu() returns and the migration
4600 * is done.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601 */
4602
4603/*
4604 * Change a given task's CPU affinity. Migrate the thread to a
4605 * proper CPU and schedule it away if the CPU it's executing on
4606 * is removed from the allowed bitmask.
4607 *
4608 * NOTE: the caller must have a valid reference to the task, the
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004609 * task must not exit() & deallocate itself prematurely. The
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610 * call is not atomic; no spinlocks may be held.
4611 */
Rusty Russell96f874e2008-11-25 02:35:14 +10304612int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004613{
4614 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07004615 struct rq *rq;
Tejun Heo969c7922010-05-06 18:49:21 +02004616 unsigned int dest_cpu;
Ingo Molnar48f24c42006-07-03 00:25:40 -07004617 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618
4619 rq = task_rq_lock(p, &flags);
Peter Zijlstrae2912002009-12-16 18:04:36 +01004620
Yong Zhangdb44fc02011-05-09 22:07:05 +08004621 if (cpumask_equal(&p->cpus_allowed, new_mask))
4622 goto out;
4623
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01004624 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625 ret = -EINVAL;
4626 goto out;
4627 }
4628
Yong Zhangdb44fc02011-05-09 22:07:05 +08004629 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
David Rientjes9985b0b2008-06-05 12:57:11 -07004630 ret = -EINVAL;
4631 goto out;
4632 }
4633
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09004634 do_set_cpus_allowed(p, new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01004635
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636 /* Can the task run on the task's current CPU? If so, we're done */
Rusty Russell96f874e2008-11-25 02:35:14 +10304637 if (cpumask_test_cpu(task_cpu(p), new_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638 goto out;
4639
Tejun Heo969c7922010-05-06 18:49:21 +02004640 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
Peter Zijlstrabd8e7dd2011-04-05 17:23:59 +02004641 if (p->on_rq) {
Tejun Heo969c7922010-05-06 18:49:21 +02004642 struct migration_arg arg = { p, dest_cpu };
Linus Torvalds1da177e2005-04-16 15:20:36 -07004643 /* Need help from migration thread: drop lock and wait. */
Peter Zijlstra0122ec52011-04-05 17:23:51 +02004644 task_rq_unlock(rq, p, &flags);
Tejun Heo969c7922010-05-06 18:49:21 +02004645 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646 tlb_migrate_finish(p->mm);
4647 return 0;
4648 }
4649out:
Peter Zijlstra0122ec52011-04-05 17:23:51 +02004650 task_rq_unlock(rq, p, &flags);
Ingo Molnar48f24c42006-07-03 00:25:40 -07004651
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652 return ret;
4653}
Mike Traviscd8ba7c2008-03-26 14:23:49 -07004654EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655
4656/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004657 * Move (not current) task off this cpu, onto dest cpu. We're doing
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 * this because either it can't run here any more (set_cpus_allowed()
4659 * away from this CPU, or CPU going down), or because we're
4660 * attempting to rebalance this task on exec (sched_exec).
4661 *
4662 * So we race with normal scheduler movements, but that's OK, as long
4663 * as the task is no longer on this CPU.
Kirill Korotaevefc30812006-06-27 02:54:32 -07004664 *
4665 * Returns non-zero if task was successfully migrated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666 */
Kirill Korotaevefc30812006-06-27 02:54:32 -07004667static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004668{
Ingo Molnar70b97a72006-07-03 00:25:42 -07004669 struct rq *rq_dest, *rq_src;
Peter Zijlstrae2912002009-12-16 18:04:36 +01004670 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671
Max Krasnyanskye761b772008-07-15 04:43:49 -07004672 if (unlikely(!cpu_active(dest_cpu)))
Kirill Korotaevefc30812006-06-27 02:54:32 -07004673 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004674
4675 rq_src = cpu_rq(src_cpu);
4676 rq_dest = cpu_rq(dest_cpu);
4677
Peter Zijlstra0122ec52011-04-05 17:23:51 +02004678 raw_spin_lock(&p->pi_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679 double_rq_lock(rq_src, rq_dest);
4680 /* Already moved. */
4681 if (task_cpu(p) != src_cpu)
Linus Torvaldsb1e38732008-07-10 11:25:03 -07004682 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004683 /* Affinity changed (again). */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02004684 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
Linus Torvaldsb1e38732008-07-10 11:25:03 -07004685 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004686
Peter Zijlstrae2912002009-12-16 18:04:36 +01004687 /*
4688 * If we're not on a rq, the next wake-up will ensure we're
4689 * placed properly.
4690 */
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02004691 if (p->on_rq) {
Peter Zijlstra4ca9b722012-01-25 11:50:51 +01004692 dequeue_task(rq_src, p, 0);
Peter Zijlstrae2912002009-12-16 18:04:36 +01004693 set_task_cpu(p, dest_cpu);
Peter Zijlstra4ca9b722012-01-25 11:50:51 +01004694 enqueue_task(rq_dest, p, 0);
Peter Zijlstra15afe092008-09-20 23:38:02 +02004695 check_preempt_curr(rq_dest, p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696 }
Linus Torvaldsb1e38732008-07-10 11:25:03 -07004697done:
Kirill Korotaevefc30812006-06-27 02:54:32 -07004698 ret = 1;
Linus Torvaldsb1e38732008-07-10 11:25:03 -07004699fail:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700 double_rq_unlock(rq_src, rq_dest);
Peter Zijlstra0122ec52011-04-05 17:23:51 +02004701 raw_spin_unlock(&p->pi_lock);
Kirill Korotaevefc30812006-06-27 02:54:32 -07004702 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703}
4704
4705/*
Tejun Heo969c7922010-05-06 18:49:21 +02004706 * migration_cpu_stop - this will be executed by a highprio stopper thread
4707 * and performs thread migration by bumping thread off CPU then
4708 * 'pushing' onto another runqueue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709 */
Tejun Heo969c7922010-05-06 18:49:21 +02004710static int migration_cpu_stop(void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004711{
Tejun Heo969c7922010-05-06 18:49:21 +02004712 struct migration_arg *arg = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004713
Tejun Heo969c7922010-05-06 18:49:21 +02004714 /*
4715 * The original target cpu might have gone down and we might
4716 * be on another cpu but it doesn't matter.
4717 */
4718 local_irq_disable();
4719 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
4720 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721 return 0;
4722}
4723
4724#ifdef CONFIG_HOTPLUG_CPU
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725
Ingo Molnar48f24c42006-07-03 00:25:40 -07004726/*
4727 * Ensures that the idle task is using init_mm right before its cpu goes
Linus Torvalds1da177e2005-04-16 15:20:36 -07004728 * offline.
4729 */
4730void idle_task_exit(void)
4731{
4732 struct mm_struct *mm = current->active_mm;
4733
4734 BUG_ON(cpu_online(smp_processor_id()));
4735
4736 if (mm != &init_mm)
4737 switch_mm(mm, &init_mm, current);
4738 mmdrop(mm);
4739}
4740
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01004741/*
Peter Zijlstraf319da02012-08-20 11:26:57 +02004742 * Since this CPU is going 'away' for a while, fold any nr_active delta
4743 * we might have. Assumes we're called after migrate_tasks() so that the
4744 * nr_active count is stable.
4745 *
4746 * Also see the comment "Global load-average calculations".
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01004747 */
Peter Zijlstraf319da02012-08-20 11:26:57 +02004748static void calc_load_migrate(struct rq *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749{
Peter Zijlstraf319da02012-08-20 11:26:57 +02004750 long delta = calc_load_fold_active(rq);
4751 if (delta)
4752 atomic_long_add(delta, &calc_load_tasks);
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02004753}
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01004754
4755/*
4756 * Migrate all tasks from the rq, sleeping tasks will be migrated by
4757 * try_to_wake_up()->select_task_rq().
4758 *
4759 * Called with rq->lock held even though we'er in stop_machine() and
4760 * there's no concurrency possible, we hold the required locks anyway
4761 * because of lock validation efforts.
4762 */
4763static void migrate_tasks(unsigned int dead_cpu)
4764{
4765 struct rq *rq = cpu_rq(dead_cpu);
4766 struct task_struct *next, *stop = rq->stop;
4767 int dest_cpu;
4768
4769 /*
4770 * Fudge the rq selection such that the below task selection loop
4771 * doesn't get stuck on the currently eligible stop task.
4772 *
4773 * We're currently inside stop_machine() and the rq is either stuck
4774 * in the stop_machine_cpu_stop() loop, or we're executing this code,
4775 * either way we should never end up calling schedule() until we're
4776 * done here.
4777 */
4778 rq->stop = NULL;
4779
4780 for ( ; ; ) {
4781 /*
4782 * There's this thread running, bail when that's the only
4783 * remaining thread.
4784 */
4785 if (rq->nr_running == 1)
4786 break;
4787
4788 next = pick_next_task(rq);
4789 BUG_ON(!next);
4790 next->sched_class->put_prev_task(rq, next);
4791
4792 /* Find suitable destination for @next, with force if needed. */
4793 dest_cpu = select_fallback_rq(dead_cpu, next);
4794 raw_spin_unlock(&rq->lock);
4795
4796 __migrate_task(next, dead_cpu, dest_cpu);
4797
4798 raw_spin_lock(&rq->lock);
4799 }
4800
4801 rq->stop = stop;
4802}
4803
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804#endif /* CONFIG_HOTPLUG_CPU */
4805
Nick Piggine692ab52007-07-26 13:40:43 +02004806#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
4807
4808static struct ctl_table sd_ctl_dir[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02004809 {
4810 .procname = "sched_domain",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02004811 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02004812 },
Eric W. Biederman56992302009-11-05 15:38:40 -08004813 {}
Nick Piggine692ab52007-07-26 13:40:43 +02004814};
4815
4816static struct ctl_table sd_ctl_root[] = {
Alexey Dobriyane0361852007-08-09 11:16:46 +02004817 {
4818 .procname = "kernel",
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02004819 .mode = 0555,
Alexey Dobriyane0361852007-08-09 11:16:46 +02004820 .child = sd_ctl_dir,
4821 },
Eric W. Biederman56992302009-11-05 15:38:40 -08004822 {}
Nick Piggine692ab52007-07-26 13:40:43 +02004823};
4824
4825static struct ctl_table *sd_alloc_ctl_entry(int n)
4826{
4827 struct ctl_table *entry =
Milton Miller5cf9f062007-10-15 17:00:19 +02004828 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
Nick Piggine692ab52007-07-26 13:40:43 +02004829
Nick Piggine692ab52007-07-26 13:40:43 +02004830 return entry;
4831}
4832
Milton Miller6382bc92007-10-15 17:00:19 +02004833static void sd_free_ctl_entry(struct ctl_table **tablep)
4834{
Milton Millercd7900762007-10-17 16:55:11 +02004835 struct ctl_table *entry;
Milton Miller6382bc92007-10-15 17:00:19 +02004836
Milton Millercd7900762007-10-17 16:55:11 +02004837 /*
4838 * In the intermediate directories, both the child directory and
4839 * procname are dynamically allocated and could fail but the mode
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01004840 * will always be set. In the lowest directory the names are
Milton Millercd7900762007-10-17 16:55:11 +02004841 * static strings and all have proc handlers.
4842 */
4843 for (entry = *tablep; entry->mode; entry++) {
Milton Miller6382bc92007-10-15 17:00:19 +02004844 if (entry->child)
4845 sd_free_ctl_entry(&entry->child);
Milton Millercd7900762007-10-17 16:55:11 +02004846 if (entry->proc_handler == NULL)
4847 kfree(entry->procname);
4848 }
Milton Miller6382bc92007-10-15 17:00:19 +02004849
4850 kfree(*tablep);
4851 *tablep = NULL;
4852}
4853
Namhyung Kim201c3732012-08-16 17:03:24 +09004854static int min_load_idx = 0;
4855static int max_load_idx = CPU_LOAD_IDX_MAX;
4856
Nick Piggine692ab52007-07-26 13:40:43 +02004857static void
Alexey Dobriyane0361852007-08-09 11:16:46 +02004858set_table_entry(struct ctl_table *entry,
Nick Piggine692ab52007-07-26 13:40:43 +02004859 const char *procname, void *data, int maxlen,
Namhyung Kim201c3732012-08-16 17:03:24 +09004860 umode_t mode, proc_handler *proc_handler,
4861 bool load_idx)
Nick Piggine692ab52007-07-26 13:40:43 +02004862{
Nick Piggine692ab52007-07-26 13:40:43 +02004863 entry->procname = procname;
4864 entry->data = data;
4865 entry->maxlen = maxlen;
4866 entry->mode = mode;
4867 entry->proc_handler = proc_handler;
Namhyung Kim201c3732012-08-16 17:03:24 +09004868
4869 if (load_idx) {
4870 entry->extra1 = &min_load_idx;
4871 entry->extra2 = &max_load_idx;
4872 }
Nick Piggine692ab52007-07-26 13:40:43 +02004873}
4874
4875static struct ctl_table *
4876sd_alloc_ctl_domain_table(struct sched_domain *sd)
4877{
Ingo Molnara5d8c342008-10-09 11:35:51 +02004878 struct ctl_table *table = sd_alloc_ctl_entry(13);
Nick Piggine692ab52007-07-26 13:40:43 +02004879
Milton Millerad1cdc12007-10-15 17:00:19 +02004880 if (table == NULL)
4881 return NULL;
4882
Alexey Dobriyane0361852007-08-09 11:16:46 +02004883 set_table_entry(&table[0], "min_interval", &sd->min_interval,
Namhyung Kim201c3732012-08-16 17:03:24 +09004884 sizeof(long), 0644, proc_doulongvec_minmax, false);
Alexey Dobriyane0361852007-08-09 11:16:46 +02004885 set_table_entry(&table[1], "max_interval", &sd->max_interval,
Namhyung Kim201c3732012-08-16 17:03:24 +09004886 sizeof(long), 0644, proc_doulongvec_minmax, false);
Alexey Dobriyane0361852007-08-09 11:16:46 +02004887 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
Namhyung Kim201c3732012-08-16 17:03:24 +09004888 sizeof(int), 0644, proc_dointvec_minmax, true);
Alexey Dobriyane0361852007-08-09 11:16:46 +02004889 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
Namhyung Kim201c3732012-08-16 17:03:24 +09004890 sizeof(int), 0644, proc_dointvec_minmax, true);
Alexey Dobriyane0361852007-08-09 11:16:46 +02004891 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
Namhyung Kim201c3732012-08-16 17:03:24 +09004892 sizeof(int), 0644, proc_dointvec_minmax, true);
Alexey Dobriyane0361852007-08-09 11:16:46 +02004893 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
Namhyung Kim201c3732012-08-16 17:03:24 +09004894 sizeof(int), 0644, proc_dointvec_minmax, true);
Alexey Dobriyane0361852007-08-09 11:16:46 +02004895 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
Namhyung Kim201c3732012-08-16 17:03:24 +09004896 sizeof(int), 0644, proc_dointvec_minmax, true);
Alexey Dobriyane0361852007-08-09 11:16:46 +02004897 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
Namhyung Kim201c3732012-08-16 17:03:24 +09004898 sizeof(int), 0644, proc_dointvec_minmax, false);
Alexey Dobriyane0361852007-08-09 11:16:46 +02004899 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
Namhyung Kim201c3732012-08-16 17:03:24 +09004900 sizeof(int), 0644, proc_dointvec_minmax, false);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02004901 set_table_entry(&table[9], "cache_nice_tries",
Nick Piggine692ab52007-07-26 13:40:43 +02004902 &sd->cache_nice_tries,
Namhyung Kim201c3732012-08-16 17:03:24 +09004903 sizeof(int), 0644, proc_dointvec_minmax, false);
Zou Nan haiace8b3d2007-10-15 17:00:14 +02004904 set_table_entry(&table[10], "flags", &sd->flags,
Namhyung Kim201c3732012-08-16 17:03:24 +09004905 sizeof(int), 0644, proc_dointvec_minmax, false);
Ingo Molnara5d8c342008-10-09 11:35:51 +02004906 set_table_entry(&table[11], "name", sd->name,
Namhyung Kim201c3732012-08-16 17:03:24 +09004907 CORENAME_MAX_SIZE, 0444, proc_dostring, false);
Ingo Molnara5d8c342008-10-09 11:35:51 +02004908 /* &table[12] is terminator */
Nick Piggine692ab52007-07-26 13:40:43 +02004909
4910 return table;
4911}
4912
Ingo Molnar9a4e7152007-11-28 15:52:56 +01004913static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
Nick Piggine692ab52007-07-26 13:40:43 +02004914{
4915 struct ctl_table *entry, *table;
4916 struct sched_domain *sd;
4917 int domain_num = 0, i;
4918 char buf[32];
4919
4920 for_each_domain(cpu, sd)
4921 domain_num++;
4922 entry = table = sd_alloc_ctl_entry(domain_num + 1);
Milton Millerad1cdc12007-10-15 17:00:19 +02004923 if (table == NULL)
4924 return NULL;
Nick Piggine692ab52007-07-26 13:40:43 +02004925
4926 i = 0;
4927 for_each_domain(cpu, sd) {
4928 snprintf(buf, 32, "domain%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02004929 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02004930 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02004931 entry->child = sd_alloc_ctl_domain_table(sd);
4932 entry++;
4933 i++;
4934 }
4935 return table;
4936}
4937
4938static struct ctl_table_header *sd_sysctl_header;
Milton Miller6382bc92007-10-15 17:00:19 +02004939static void register_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02004940{
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01004941 int i, cpu_num = num_possible_cpus();
Nick Piggine692ab52007-07-26 13:40:43 +02004942 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
4943 char buf[32];
4944
Milton Miller73785472007-10-24 18:23:48 +02004945 WARN_ON(sd_ctl_dir[0].child);
4946 sd_ctl_dir[0].child = entry;
4947
Milton Millerad1cdc12007-10-15 17:00:19 +02004948 if (entry == NULL)
4949 return;
4950
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01004951 for_each_possible_cpu(i) {
Nick Piggine692ab52007-07-26 13:40:43 +02004952 snprintf(buf, 32, "cpu%d", i);
Nick Piggine692ab52007-07-26 13:40:43 +02004953 entry->procname = kstrdup(buf, GFP_KERNEL);
Eric W. Biedermanc57baf12007-08-23 15:18:02 +02004954 entry->mode = 0555;
Nick Piggine692ab52007-07-26 13:40:43 +02004955 entry->child = sd_alloc_ctl_cpu_table(i);
Milton Miller97b6ea72007-10-15 17:00:19 +02004956 entry++;
Nick Piggine692ab52007-07-26 13:40:43 +02004957 }
Milton Miller73785472007-10-24 18:23:48 +02004958
4959 WARN_ON(sd_sysctl_header);
Nick Piggine692ab52007-07-26 13:40:43 +02004960 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
4961}
Milton Miller6382bc92007-10-15 17:00:19 +02004962
Milton Miller73785472007-10-24 18:23:48 +02004963/* may be called multiple times per register */
Milton Miller6382bc92007-10-15 17:00:19 +02004964static void unregister_sched_domain_sysctl(void)
4965{
Milton Miller73785472007-10-24 18:23:48 +02004966 if (sd_sysctl_header)
4967 unregister_sysctl_table(sd_sysctl_header);
Milton Miller6382bc92007-10-15 17:00:19 +02004968 sd_sysctl_header = NULL;
Milton Miller73785472007-10-24 18:23:48 +02004969 if (sd_ctl_dir[0].child)
4970 sd_free_ctl_entry(&sd_ctl_dir[0].child);
Milton Miller6382bc92007-10-15 17:00:19 +02004971}
Nick Piggine692ab52007-07-26 13:40:43 +02004972#else
Milton Miller6382bc92007-10-15 17:00:19 +02004973static void register_sched_domain_sysctl(void)
4974{
4975}
4976static void unregister_sched_domain_sysctl(void)
Nick Piggine692ab52007-07-26 13:40:43 +02004977{
4978}
4979#endif
4980
Gregory Haskins1f11eb62008-06-04 15:04:05 -04004981static void set_rq_online(struct rq *rq)
4982{
4983 if (!rq->online) {
4984 const struct sched_class *class;
4985
Rusty Russellc6c49272008-11-25 02:35:05 +10304986 cpumask_set_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04004987 rq->online = 1;
4988
4989 for_each_class(class) {
4990 if (class->rq_online)
4991 class->rq_online(rq);
4992 }
4993 }
4994}
4995
4996static void set_rq_offline(struct rq *rq)
4997{
4998 if (rq->online) {
4999 const struct sched_class *class;
5000
5001 for_each_class(class) {
5002 if (class->rq_offline)
5003 class->rq_offline(rq);
5004 }
5005
Rusty Russellc6c49272008-11-25 02:35:05 +10305006 cpumask_clear_cpu(rq->cpu, rq->rd->online);
Gregory Haskins1f11eb62008-06-04 15:04:05 -04005007 rq->online = 0;
5008 }
5009}
5010
Linus Torvalds1da177e2005-04-16 15:20:36 -07005011/*
5012 * migration_call - callback that gets triggered when a CPU is added.
5013 * Here we can start up the necessary migration thread for the new CPU.
5014 */
Ingo Molnar48f24c42006-07-03 00:25:40 -07005015static int __cpuinit
5016migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005017{
Ingo Molnar48f24c42006-07-03 00:25:40 -07005018 int cpu = (long)hcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005019 unsigned long flags;
Tejun Heo969c7922010-05-06 18:49:21 +02005020 struct rq *rq = cpu_rq(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005021
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01005022 switch (action & ~CPU_TASKS_FROZEN) {
Gautham R Shenoy5be93612007-05-09 02:34:04 -07005023
Linus Torvalds1da177e2005-04-16 15:20:36 -07005024 case CPU_UP_PREPARE:
Thomas Gleixnera468d382009-07-17 14:15:46 +02005025 rq->calc_load_update = calc_load_update;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005026 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005027
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028 case CPU_ONLINE:
Gregory Haskins1f94ef52008-03-10 16:52:41 -04005029 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005030 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04005031 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10305032 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04005033
5034 set_rq_online(rq);
Gregory Haskins1f94ef52008-03-10 16:52:41 -04005035 }
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005036 raw_spin_unlock_irqrestore(&rq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005037 break;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005038
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039#ifdef CONFIG_HOTPLUG_CPU
Gregory Haskins08f503b2008-03-10 17:59:11 -04005040 case CPU_DYING:
Peter Zijlstra317f3942011-04-05 17:23:58 +02005041 sched_ttwu_pending();
Gregory Haskins57d885f2008-01-25 21:08:18 +01005042 /* Update our root-domain */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005043 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01005044 if (rq->rd) {
Rusty Russellc6c49272008-11-25 02:35:05 +10305045 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
Gregory Haskins1f11eb62008-06-04 15:04:05 -04005046 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01005047 }
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01005048 migrate_tasks(cpu);
5049 BUG_ON(rq->nr_running != 1); /* the migration thread */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005050 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra48c5cca2010-11-13 19:32:29 +01005051
Peter Zijlstraf319da02012-08-20 11:26:57 +02005052 calc_load_migrate(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01005053 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054#endif
5055 }
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005056
5057 update_max_interval();
5058
Linus Torvalds1da177e2005-04-16 15:20:36 -07005059 return NOTIFY_OK;
5060}
5061
Paul Mackerrasf38b0822009-06-02 21:05:16 +10005062/*
5063 * Register at high priority so that task migration (migrate_all_tasks)
5064 * happens before everything else. This has to be lower priority than
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005065 * the notifier in the perf_event subsystem, though.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 */
Chandra Seetharaman26c21432006-06-27 02:54:10 -07005067static struct notifier_block __cpuinitdata migration_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068 .notifier_call = migration_call,
Tejun Heo50a323b2010-06-08 21:40:36 +02005069 .priority = CPU_PRI_MIGRATION,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005070};
5071
Tejun Heo3a101d02010-06-08 21:40:36 +02005072static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
5073 unsigned long action, void *hcpu)
5074{
5075 switch (action & ~CPU_TASKS_FROZEN) {
Peter Zijlstra5fbd0362011-12-15 17:09:22 +01005076 case CPU_STARTING:
Tejun Heo3a101d02010-06-08 21:40:36 +02005077 case CPU_DOWN_FAILED:
5078 set_cpu_active((long)hcpu, true);
5079 return NOTIFY_OK;
5080 default:
5081 return NOTIFY_DONE;
5082 }
5083}
5084
5085static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
5086 unsigned long action, void *hcpu)
5087{
5088 switch (action & ~CPU_TASKS_FROZEN) {
5089 case CPU_DOWN_PREPARE:
5090 set_cpu_active((long)hcpu, false);
5091 return NOTIFY_OK;
5092 default:
5093 return NOTIFY_DONE;
5094 }
5095}
5096
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07005097static int __init migration_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005098{
5099 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -07005100 int err;
Ingo Molnar48f24c42006-07-03 00:25:40 -07005101
Tejun Heo3a101d02010-06-08 21:40:36 +02005102 /* Initialize migration for the boot CPU */
Akinobu Mita07dccf32006-09-29 02:00:22 -07005103 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5104 BUG_ON(err == NOTIFY_BAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005105 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5106 register_cpu_notifier(&migration_notifier);
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07005107
Tejun Heo3a101d02010-06-08 21:40:36 +02005108 /* Register cpu active notifiers */
5109 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5110 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5111
Thomas Gleixnera004cd42009-07-21 09:54:05 +02005112 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -07005114early_initcall(migration_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005115#endif
5116
5117#ifdef CONFIG_SMP
Christoph Lameter476f3532007-05-06 14:48:58 -07005118
Peter Zijlstra4cb98832011-04-07 14:09:58 +02005119static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5120
Ingo Molnar3e9830d2007-10-15 17:00:13 +02005121#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005122
Peter Zijlstrad039ac62012-05-31 21:20:16 +02005123static __read_mostly int sched_debug_enabled;
Mike Travisf6630112009-11-17 18:22:15 -06005124
Peter Zijlstrad039ac62012-05-31 21:20:16 +02005125static int __init sched_debug_setup(char *str)
Mike Travisf6630112009-11-17 18:22:15 -06005126{
Peter Zijlstrad039ac62012-05-31 21:20:16 +02005127 sched_debug_enabled = 1;
Mike Travisf6630112009-11-17 18:22:15 -06005128
5129 return 0;
5130}
Peter Zijlstrad039ac62012-05-31 21:20:16 +02005131early_param("sched_debug", sched_debug_setup);
5132
5133static inline bool sched_debug(void)
5134{
5135 return sched_debug_enabled;
5136}
Mike Travisf6630112009-11-17 18:22:15 -06005137
Mike Travis7c16ec52008-04-04 18:11:11 -07005138static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
Rusty Russell96f874e2008-11-25 02:35:14 +10305139 struct cpumask *groupmask)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005140{
5141 struct sched_group *group = sd->groups;
Mike Travis434d53b2008-04-04 18:11:04 -07005142 char str[256];
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005143
Rusty Russell968ea6d2008-12-13 21:55:51 +10305144 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
Rusty Russell96f874e2008-11-25 02:35:14 +10305145 cpumask_clear(groupmask);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005146
5147 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5148
5149 if (!(sd->flags & SD_LOAD_BALANCE)) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005150 printk("does not load-balance\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005151 if (sd->parent)
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005152 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5153 " has parent");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005154 return -1;
5155 }
5156
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005157 printk(KERN_CONT "span %s level %s\n", str, sd->name);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005158
Rusty Russell758b2cd2008-11-25 02:35:04 +10305159 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005160 printk(KERN_ERR "ERROR: domain->span does not contain "
5161 "CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005162 }
Rusty Russell758b2cd2008-11-25 02:35:04 +10305163 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005164 printk(KERN_ERR "ERROR: domain->groups does not contain"
5165 " CPU%d\n", cpu);
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005166 }
5167
5168 printk(KERN_DEBUG "%*s groups:", level + 1, "");
5169 do {
5170 if (!group) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005171 printk("\n");
5172 printk(KERN_ERR "ERROR: group is NULL\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005173 break;
5174 }
5175
Peter Zijlstrac3decf02012-05-31 12:05:32 +02005176 /*
5177 * Even though we initialize ->power to something semi-sane,
5178 * we leave power_orig unset. This allows us to detect if
5179 * domain iteration is still funny without causing /0 traps.
5180 */
5181 if (!group->sgp->power_orig) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005182 printk(KERN_CONT "\n");
5183 printk(KERN_ERR "ERROR: domain->cpu_power not "
5184 "set\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005185 break;
5186 }
5187
Rusty Russell758b2cd2008-11-25 02:35:04 +10305188 if (!cpumask_weight(sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005189 printk(KERN_CONT "\n");
5190 printk(KERN_ERR "ERROR: empty group\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005191 break;
5192 }
5193
Peter Zijlstracb83b622012-04-17 15:49:36 +02005194 if (!(sd->flags & SD_OVERLAP) &&
5195 cpumask_intersects(groupmask, sched_group_cpus(group))) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005196 printk(KERN_CONT "\n");
5197 printk(KERN_ERR "ERROR: repeated CPUs\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005198 break;
5199 }
5200
Rusty Russell758b2cd2008-11-25 02:35:04 +10305201 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005202
Rusty Russell968ea6d2008-12-13 21:55:51 +10305203 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
Gautham R Shenoy381512c2009-04-14 09:09:36 +05305204
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005205 printk(KERN_CONT " %s", str);
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005206 if (group->sgp->power != SCHED_POWER_SCALE) {
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005207 printk(KERN_CONT " (cpu_power = %d)",
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005208 group->sgp->power);
Gautham R Shenoy381512c2009-04-14 09:09:36 +05305209 }
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005210
5211 group = group->next;
5212 } while (group != sd->groups);
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005213 printk(KERN_CONT "\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005214
Rusty Russell758b2cd2008-11-25 02:35:04 +10305215 if (!cpumask_equal(sched_domain_span(sd), groupmask))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005216 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005217
Rusty Russell758b2cd2008-11-25 02:35:04 +10305218 if (sd->parent &&
5219 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01005220 printk(KERN_ERR "ERROR: parent span is not a superset "
5221 "of domain->span\n");
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005222 return 0;
5223}
5224
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225static void sched_domain_debug(struct sched_domain *sd, int cpu)
5226{
5227 int level = 0;
5228
Peter Zijlstrad039ac62012-05-31 21:20:16 +02005229 if (!sched_debug_enabled)
Mike Travisf6630112009-11-17 18:22:15 -06005230 return;
5231
Nick Piggin41c7ce92005-06-25 14:57:24 -07005232 if (!sd) {
5233 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5234 return;
5235 }
5236
Linus Torvalds1da177e2005-04-16 15:20:36 -07005237 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5238
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005239 for (;;) {
Peter Zijlstra4cb98832011-04-07 14:09:58 +02005240 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005241 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242 level++;
5243 sd = sd->parent;
Miguel Ojeda Sandonis33859f72006-12-10 02:20:38 -08005244 if (!sd)
Ingo Molnar4dcf6af2007-10-24 18:23:48 +02005245 break;
5246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005247}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02005248#else /* !CONFIG_SCHED_DEBUG */
Ingo Molnar48f24c42006-07-03 00:25:40 -07005249# define sched_domain_debug(sd, cpu) do { } while (0)
Peter Zijlstrad039ac62012-05-31 21:20:16 +02005250static inline bool sched_debug(void)
5251{
5252 return false;
5253}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02005254#endif /* CONFIG_SCHED_DEBUG */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07005256static int sd_degenerate(struct sched_domain *sd)
Suresh Siddha245af2c2005-06-25 14:57:25 -07005257{
Rusty Russell758b2cd2008-11-25 02:35:04 +10305258 if (cpumask_weight(sched_domain_span(sd)) == 1)
Suresh Siddha245af2c2005-06-25 14:57:25 -07005259 return 1;
5260
5261 /* Following flags need at least 2 groups */
5262 if (sd->flags & (SD_LOAD_BALANCE |
5263 SD_BALANCE_NEWIDLE |
5264 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07005265 SD_BALANCE_EXEC |
5266 SD_SHARE_CPUPOWER |
5267 SD_SHARE_PKG_RESOURCES)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07005268 if (sd->groups != sd->groups->next)
5269 return 0;
5270 }
5271
5272 /* Following flags don't use groups */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005273 if (sd->flags & (SD_WAKE_AFFINE))
Suresh Siddha245af2c2005-06-25 14:57:25 -07005274 return 0;
5275
5276 return 1;
5277}
5278
Ingo Molnar48f24c42006-07-03 00:25:40 -07005279static int
5280sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
Suresh Siddha245af2c2005-06-25 14:57:25 -07005281{
5282 unsigned long cflags = sd->flags, pflags = parent->flags;
5283
5284 if (sd_degenerate(parent))
5285 return 1;
5286
Rusty Russell758b2cd2008-11-25 02:35:04 +10305287 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
Suresh Siddha245af2c2005-06-25 14:57:25 -07005288 return 0;
5289
Suresh Siddha245af2c2005-06-25 14:57:25 -07005290 /* Flags needing groups don't count if only 1 group in parent */
5291 if (parent->groups == parent->groups->next) {
5292 pflags &= ~(SD_LOAD_BALANCE |
5293 SD_BALANCE_NEWIDLE |
5294 SD_BALANCE_FORK |
Siddha, Suresh B89c47102006-10-03 01:14:09 -07005295 SD_BALANCE_EXEC |
5296 SD_SHARE_CPUPOWER |
5297 SD_SHARE_PKG_RESOURCES);
Ken Chen54364992008-12-07 18:47:37 -08005298 if (nr_node_ids == 1)
5299 pflags &= ~SD_SERIALIZE;
Suresh Siddha245af2c2005-06-25 14:57:25 -07005300 }
5301 if (~cflags & pflags)
5302 return 0;
5303
5304 return 1;
5305}
5306
Peter Zijlstradce840a2011-04-07 14:09:50 +02005307static void free_rootdomain(struct rcu_head *rcu)
Rusty Russellc6c49272008-11-25 02:35:05 +10305308{
Peter Zijlstradce840a2011-04-07 14:09:50 +02005309 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
Peter Zijlstra047106a2009-11-16 10:28:09 +01005310
Rusty Russell68e74562008-11-25 02:35:13 +10305311 cpupri_cleanup(&rd->cpupri);
Rusty Russellc6c49272008-11-25 02:35:05 +10305312 free_cpumask_var(rd->rto_mask);
5313 free_cpumask_var(rd->online);
5314 free_cpumask_var(rd->span);
5315 kfree(rd);
5316}
5317
Gregory Haskins57d885f2008-01-25 21:08:18 +01005318static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5319{
Ingo Molnara0490fa2009-02-12 11:35:40 +01005320 struct root_domain *old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01005321 unsigned long flags;
Gregory Haskins57d885f2008-01-25 21:08:18 +01005322
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005323 raw_spin_lock_irqsave(&rq->lock, flags);
Gregory Haskins57d885f2008-01-25 21:08:18 +01005324
5325 if (rq->rd) {
Ingo Molnara0490fa2009-02-12 11:35:40 +01005326 old_rd = rq->rd;
Gregory Haskins57d885f2008-01-25 21:08:18 +01005327
Rusty Russellc6c49272008-11-25 02:35:05 +10305328 if (cpumask_test_cpu(rq->cpu, old_rd->online))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04005329 set_rq_offline(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01005330
Rusty Russellc6c49272008-11-25 02:35:05 +10305331 cpumask_clear_cpu(rq->cpu, old_rd->span);
Gregory Haskinsdc938522008-01-25 21:08:26 +01005332
Ingo Molnara0490fa2009-02-12 11:35:40 +01005333 /*
5334 * If we dont want to free the old_rt yet then
5335 * set old_rd to NULL to skip the freeing later
5336 * in this function:
5337 */
5338 if (!atomic_dec_and_test(&old_rd->refcount))
5339 old_rd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01005340 }
5341
5342 atomic_inc(&rd->refcount);
5343 rq->rd = rd;
5344
Rusty Russellc6c49272008-11-25 02:35:05 +10305345 cpumask_set_cpu(rq->cpu, rd->span);
Gregory Haskins00aec932009-07-30 10:57:23 -04005346 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
Gregory Haskins1f11eb62008-06-04 15:04:05 -04005347 set_rq_online(rq);
Gregory Haskins57d885f2008-01-25 21:08:18 +01005348
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005349 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnara0490fa2009-02-12 11:35:40 +01005350
5351 if (old_rd)
Peter Zijlstradce840a2011-04-07 14:09:50 +02005352 call_rcu_sched(&old_rd->rcu, free_rootdomain);
Gregory Haskins57d885f2008-01-25 21:08:18 +01005353}
5354
Pekka Enberg68c38fc2010-07-15 23:18:22 +03005355static int init_rootdomain(struct root_domain *rd)
Gregory Haskins57d885f2008-01-25 21:08:18 +01005356{
5357 memset(rd, 0, sizeof(*rd));
5358
Pekka Enberg68c38fc2010-07-15 23:18:22 +03005359 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
Li Zefan0c910d22009-01-06 17:39:06 +08005360 goto out;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03005361 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10305362 goto free_span;
Pekka Enberg68c38fc2010-07-15 23:18:22 +03005363 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
Rusty Russellc6c49272008-11-25 02:35:05 +10305364 goto free_online;
Gregory Haskins6e0534f2008-05-12 21:21:01 +02005365
Pekka Enberg68c38fc2010-07-15 23:18:22 +03005366 if (cpupri_init(&rd->cpupri) != 0)
Rusty Russell68e74562008-11-25 02:35:13 +10305367 goto free_rto_mask;
Rusty Russellc6c49272008-11-25 02:35:05 +10305368 return 0;
5369
Rusty Russell68e74562008-11-25 02:35:13 +10305370free_rto_mask:
5371 free_cpumask_var(rd->rto_mask);
Rusty Russellc6c49272008-11-25 02:35:05 +10305372free_online:
5373 free_cpumask_var(rd->online);
5374free_span:
5375 free_cpumask_var(rd->span);
Li Zefan0c910d22009-01-06 17:39:06 +08005376out:
Rusty Russellc6c49272008-11-25 02:35:05 +10305377 return -ENOMEM;
Gregory Haskins57d885f2008-01-25 21:08:18 +01005378}
5379
Peter Zijlstra029632f2011-10-25 10:00:11 +02005380/*
5381 * By default the system creates a single root-domain with all cpus as
5382 * members (mimicking the global state we have today).
5383 */
5384struct root_domain def_root_domain;
5385
Gregory Haskins57d885f2008-01-25 21:08:18 +01005386static void init_defrootdomain(void)
5387{
Pekka Enberg68c38fc2010-07-15 23:18:22 +03005388 init_rootdomain(&def_root_domain);
Rusty Russellc6c49272008-11-25 02:35:05 +10305389
Gregory Haskins57d885f2008-01-25 21:08:18 +01005390 atomic_set(&def_root_domain.refcount, 1);
5391}
5392
Gregory Haskinsdc938522008-01-25 21:08:26 +01005393static struct root_domain *alloc_rootdomain(void)
Gregory Haskins57d885f2008-01-25 21:08:18 +01005394{
5395 struct root_domain *rd;
5396
5397 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5398 if (!rd)
5399 return NULL;
5400
Pekka Enberg68c38fc2010-07-15 23:18:22 +03005401 if (init_rootdomain(rd) != 0) {
Rusty Russellc6c49272008-11-25 02:35:05 +10305402 kfree(rd);
5403 return NULL;
5404 }
Gregory Haskins57d885f2008-01-25 21:08:18 +01005405
5406 return rd;
5407}
5408
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005409static void free_sched_groups(struct sched_group *sg, int free_sgp)
5410{
5411 struct sched_group *tmp, *first;
5412
5413 if (!sg)
5414 return;
5415
5416 first = sg;
5417 do {
5418 tmp = sg->next;
5419
5420 if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
5421 kfree(sg->sgp);
5422
5423 kfree(sg);
5424 sg = tmp;
5425 } while (sg != first);
5426}
5427
Peter Zijlstradce840a2011-04-07 14:09:50 +02005428static void free_sched_domain(struct rcu_head *rcu)
5429{
5430 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005431
5432 /*
5433 * If its an overlapping domain it has private groups, iterate and
5434 * nuke them all.
5435 */
5436 if (sd->flags & SD_OVERLAP) {
5437 free_sched_groups(sd->groups, 1);
5438 } else if (atomic_dec_and_test(&sd->groups->ref)) {
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005439 kfree(sd->groups->sgp);
Peter Zijlstradce840a2011-04-07 14:09:50 +02005440 kfree(sd->groups);
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005441 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005442 kfree(sd);
5443}
5444
5445static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5446{
5447 call_rcu(&sd->rcu, free_sched_domain);
5448}
5449
5450static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5451{
5452 for (; sd; sd = sd->parent)
5453 destroy_sched_domain(sd, cpu);
5454}
5455
Linus Torvalds1da177e2005-04-16 15:20:36 -07005456/*
Peter Zijlstra518cd622011-12-07 15:07:31 +01005457 * Keep a special pointer to the highest sched_domain that has
5458 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
5459 * allows us to avoid some pointer chasing select_idle_sibling().
5460 *
Mike Galbraith970e1782012-06-12 05:18:32 +02005461 * Iterate domains and sched_groups downward, assigning CPUs to be
5462 * select_idle_sibling() hw buddy. Cross-wiring hw makes bouncing
5463 * due to random perturbation self canceling, ie sw buddies pull
5464 * their counterpart to their CPU's hw counterpart.
5465 *
Peter Zijlstra518cd622011-12-07 15:07:31 +01005466 * Also keep a unique ID per domain (we use the first cpu number in
5467 * the cpumask of the domain), this allows us to quickly tell if
Peter Zijlstra39be3502012-01-26 12:44:34 +01005468 * two cpus are in the same cache domain, see cpus_share_cache().
Peter Zijlstra518cd622011-12-07 15:07:31 +01005469 */
5470DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5471DEFINE_PER_CPU(int, sd_llc_id);
5472
5473static void update_top_cache_domain(int cpu)
5474{
5475 struct sched_domain *sd;
5476 int id = cpu;
5477
5478 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
Mike Galbraith970e1782012-06-12 05:18:32 +02005479 if (sd) {
5480 struct sched_domain *tmp = sd;
5481 struct sched_group *sg, *prev;
5482 bool right;
5483
5484 /*
5485 * Traverse to first CPU in group, and count hops
5486 * to cpu from there, switching direction on each
5487 * hop, never ever pointing the last CPU rightward.
5488 */
5489 do {
5490 id = cpumask_first(sched_domain_span(tmp));
5491 prev = sg = tmp->groups;
5492 right = 1;
5493
5494 while (cpumask_first(sched_group_cpus(sg)) != id)
5495 sg = sg->next;
5496
5497 while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) {
5498 prev = sg;
5499 sg = sg->next;
5500 right = !right;
5501 }
5502
5503 /* A CPU went down, never point back to domain start. */
5504 if (right && cpumask_first(sched_group_cpus(sg->next)) == id)
5505 right = false;
5506
5507 sg = right ? sg->next : prev;
5508 tmp->idle_buddy = cpumask_first(sched_group_cpus(sg));
5509 } while ((tmp = tmp->child));
5510
Peter Zijlstra518cd622011-12-07 15:07:31 +01005511 id = cpumask_first(sched_domain_span(sd));
Mike Galbraith970e1782012-06-12 05:18:32 +02005512 }
Peter Zijlstra518cd622011-12-07 15:07:31 +01005513
5514 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
5515 per_cpu(sd_llc_id, cpu) = id;
5516}
5517
5518/*
Ingo Molnar0eab9142008-01-25 21:08:19 +01005519 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
Linus Torvalds1da177e2005-04-16 15:20:36 -07005520 * hold the hotplug lock.
5521 */
Ingo Molnar0eab9142008-01-25 21:08:19 +01005522static void
5523cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005524{
Ingo Molnar70b97a72006-07-03 00:25:42 -07005525 struct rq *rq = cpu_rq(cpu);
Suresh Siddha245af2c2005-06-25 14:57:25 -07005526 struct sched_domain *tmp;
5527
5528 /* Remove the sched domains which do not contribute to scheduling. */
Li Zefanf29c9b12008-11-06 09:45:16 +08005529 for (tmp = sd; tmp; ) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07005530 struct sched_domain *parent = tmp->parent;
5531 if (!parent)
5532 break;
Li Zefanf29c9b12008-11-06 09:45:16 +08005533
Siddha, Suresh B1a848872006-10-03 01:14:08 -07005534 if (sd_parent_degenerate(tmp, parent)) {
Suresh Siddha245af2c2005-06-25 14:57:25 -07005535 tmp->parent = parent->parent;
Siddha, Suresh B1a848872006-10-03 01:14:08 -07005536 if (parent->parent)
5537 parent->parent->child = tmp;
Peter Zijlstradce840a2011-04-07 14:09:50 +02005538 destroy_sched_domain(parent, cpu);
Li Zefanf29c9b12008-11-06 09:45:16 +08005539 } else
5540 tmp = tmp->parent;
Suresh Siddha245af2c2005-06-25 14:57:25 -07005541 }
5542
Siddha, Suresh B1a848872006-10-03 01:14:08 -07005543 if (sd && sd_degenerate(sd)) {
Peter Zijlstradce840a2011-04-07 14:09:50 +02005544 tmp = sd;
Suresh Siddha245af2c2005-06-25 14:57:25 -07005545 sd = sd->parent;
Peter Zijlstradce840a2011-04-07 14:09:50 +02005546 destroy_sched_domain(tmp, cpu);
Siddha, Suresh B1a848872006-10-03 01:14:08 -07005547 if (sd)
5548 sd->child = NULL;
5549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005550
Peter Zijlstra4cb98832011-04-07 14:09:58 +02005551 sched_domain_debug(sd, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005552
Gregory Haskins57d885f2008-01-25 21:08:18 +01005553 rq_attach_root(rq, rd);
Peter Zijlstradce840a2011-04-07 14:09:50 +02005554 tmp = rq->sd;
Nick Piggin674311d2005-06-25 14:57:27 -07005555 rcu_assign_pointer(rq->sd, sd);
Peter Zijlstradce840a2011-04-07 14:09:50 +02005556 destroy_sched_domains(tmp, cpu);
Peter Zijlstra518cd622011-12-07 15:07:31 +01005557
5558 update_top_cache_domain(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005559}
5560
5561/* cpus with isolated domains */
Rusty Russelldcc30a32008-11-25 02:35:12 +10305562static cpumask_var_t cpu_isolated_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005563
5564/* Setup the mask of cpus configured for isolated domains */
5565static int __init isolated_cpu_setup(char *str)
5566{
Rusty Russellbdddd292009-12-02 14:09:16 +10305567 alloc_bootmem_cpumask_var(&cpu_isolated_map);
Rusty Russell968ea6d2008-12-13 21:55:51 +10305568 cpulist_parse(str, cpu_isolated_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005569 return 1;
5570}
5571
Ingo Molnar8927f492007-10-15 17:00:13 +02005572__setup("isolcpus=", isolated_cpu_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005573
Peter Zijlstrad3081f52011-04-07 14:09:59 +02005574static const struct cpumask *cpu_cpu_mask(int cpu)
5575{
5576 return cpumask_of_node(cpu_to_node(cpu));
5577}
5578
Peter Zijlstradce840a2011-04-07 14:09:50 +02005579struct sd_data {
5580 struct sched_domain **__percpu sd;
5581 struct sched_group **__percpu sg;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005582 struct sched_group_power **__percpu sgp;
Peter Zijlstradce840a2011-04-07 14:09:50 +02005583};
5584
Andreas Herrmann49a02c52009-08-18 12:51:52 +02005585struct s_data {
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02005586 struct sched_domain ** __percpu sd;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02005587 struct root_domain *rd;
5588};
5589
Andreas Herrmann2109b992009-08-18 12:53:00 +02005590enum s_alloc {
Andreas Herrmann2109b992009-08-18 12:53:00 +02005591 sa_rootdomain,
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02005592 sa_sd,
Peter Zijlstradce840a2011-04-07 14:09:50 +02005593 sa_sd_storage,
Andreas Herrmann2109b992009-08-18 12:53:00 +02005594 sa_none,
5595};
5596
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02005597struct sched_domain_topology_level;
5598
5599typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
Peter Zijlstraeb7a74e62011-04-07 14:10:00 +02005600typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
5601
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005602#define SDTL_OVERLAP 0x01
5603
Peter Zijlstraeb7a74e62011-04-07 14:10:00 +02005604struct sched_domain_topology_level {
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02005605 sched_domain_init_f init;
5606 sched_domain_mask_f mask;
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005607 int flags;
Peter Zijlstracb83b622012-04-17 15:49:36 +02005608 int numa_level;
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02005609 struct sd_data data;
Peter Zijlstraeb7a74e62011-04-07 14:10:00 +02005610};
5611
Peter Zijlstrac1174872012-05-31 14:47:33 +02005612/*
5613 * Build an iteration mask that can exclude certain CPUs from the upwards
5614 * domain traversal.
5615 *
5616 * Asymmetric node setups can result in situations where the domain tree is of
5617 * unequal depth, make sure to skip domains that already cover the entire
5618 * range.
5619 *
5620 * In that case build_sched_domains() will have terminated the iteration early
5621 * and our sibling sd spans will be empty. Domains should always include the
5622 * cpu they're built on, so check that.
5623 *
5624 */
5625static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
5626{
5627 const struct cpumask *span = sched_domain_span(sd);
5628 struct sd_data *sdd = sd->private;
5629 struct sched_domain *sibling;
5630 int i;
5631
5632 for_each_cpu(i, span) {
5633 sibling = *per_cpu_ptr(sdd->sd, i);
5634 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
5635 continue;
5636
5637 cpumask_set_cpu(i, sched_group_mask(sg));
5638 }
5639}
5640
5641/*
5642 * Return the canonical balance cpu for this group, this is the first cpu
5643 * of this group that's also in the iteration mask.
5644 */
5645int group_balance_cpu(struct sched_group *sg)
5646{
5647 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
5648}
5649
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005650static int
5651build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5652{
5653 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
5654 const struct cpumask *span = sched_domain_span(sd);
5655 struct cpumask *covered = sched_domains_tmpmask;
5656 struct sd_data *sdd = sd->private;
5657 struct sched_domain *child;
5658 int i;
5659
5660 cpumask_clear(covered);
5661
5662 for_each_cpu(i, span) {
5663 struct cpumask *sg_span;
5664
5665 if (cpumask_test_cpu(i, covered))
5666 continue;
5667
Peter Zijlstrac1174872012-05-31 14:47:33 +02005668 child = *per_cpu_ptr(sdd->sd, i);
5669
5670 /* See the comment near build_group_mask(). */
5671 if (!cpumask_test_cpu(i, sched_domain_span(child)))
5672 continue;
5673
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005674 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
Suresh Siddha4d78a222011-11-18 15:03:29 -08005675 GFP_KERNEL, cpu_to_node(cpu));
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005676
5677 if (!sg)
5678 goto fail;
5679
5680 sg_span = sched_group_cpus(sg);
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005681 if (child->child) {
5682 child = child->child;
5683 cpumask_copy(sg_span, sched_domain_span(child));
5684 } else
5685 cpumask_set_cpu(i, sg_span);
5686
5687 cpumask_or(covered, covered, sg_span);
5688
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02005689 sg->sgp = *per_cpu_ptr(sdd->sgp, i);
Peter Zijlstrac1174872012-05-31 14:47:33 +02005690 if (atomic_inc_return(&sg->sgp->ref) == 1)
5691 build_group_mask(sd, sg);
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005692
Peter Zijlstrac3decf02012-05-31 12:05:32 +02005693 /*
5694 * Initialize sgp->power such that even if we mess up the
5695 * domains and no possible iteration will get us here, we won't
5696 * die on a /0 trap.
5697 */
5698 sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
Peter Zijlstrac1174872012-05-31 14:47:33 +02005699
5700 /*
5701 * Make sure the first group of this domain contains the
5702 * canonical balance cpu. Otherwise the sched_domain iteration
5703 * breaks. See update_sg_lb_stats().
5704 */
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02005705 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
Peter Zijlstrac1174872012-05-31 14:47:33 +02005706 group_balance_cpu(sg) == cpu)
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005707 groups = sg;
5708
5709 if (!first)
5710 first = sg;
5711 if (last)
5712 last->next = sg;
5713 last = sg;
5714 last->next = first;
5715 }
5716 sd->groups = groups;
5717
5718 return 0;
5719
5720fail:
5721 free_sched_groups(first, 0);
5722
5723 return -ENOMEM;
5724}
5725
Peter Zijlstradce840a2011-04-07 14:09:50 +02005726static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005727{
Peter Zijlstradce840a2011-04-07 14:09:50 +02005728 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
5729 struct sched_domain *child = sd->child;
5730
5731 if (child)
5732 cpu = cpumask_first(sched_domain_span(child));
5733
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005734 if (sg) {
Peter Zijlstradce840a2011-04-07 14:09:50 +02005735 *sg = *per_cpu_ptr(sdd->sg, cpu);
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005736 (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005737 atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005738 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005739
Linus Torvalds1da177e2005-04-16 15:20:36 -07005740 return cpu;
5741}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005742
Ingo Molnar48f24c42006-07-03 00:25:40 -07005743/*
Peter Zijlstradce840a2011-04-07 14:09:50 +02005744 * build_sched_groups will build a circular linked list of the groups
5745 * covered by the given span, and will set each group's ->cpumask correctly,
5746 * and ->cpu_power to 0.
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005747 *
5748 * Assumes the sched_domain tree is fully constructed
Ingo Molnar48f24c42006-07-03 00:25:40 -07005749 */
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005750static int
5751build_sched_groups(struct sched_domain *sd, int cpu)
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08005752{
Peter Zijlstradce840a2011-04-07 14:09:50 +02005753 struct sched_group *first = NULL, *last = NULL;
5754 struct sd_data *sdd = sd->private;
5755 const struct cpumask *span = sched_domain_span(sd);
Peter Zijlstraf96225f2011-04-07 14:09:57 +02005756 struct cpumask *covered;
Peter Zijlstradce840a2011-04-07 14:09:50 +02005757 int i;
5758
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005759 get_group(cpu, sdd, &sd->groups);
5760 atomic_inc(&sd->groups->ref);
5761
5762 if (cpu != cpumask_first(sched_domain_span(sd)))
5763 return 0;
5764
Peter Zijlstraf96225f2011-04-07 14:09:57 +02005765 lockdep_assert_held(&sched_domains_mutex);
5766 covered = sched_domains_tmpmask;
5767
Peter Zijlstradce840a2011-04-07 14:09:50 +02005768 cpumask_clear(covered);
5769
5770 for_each_cpu(i, span) {
5771 struct sched_group *sg;
5772 int group = get_group(i, sdd, &sg);
5773 int j;
5774
5775 if (cpumask_test_cpu(i, covered))
5776 continue;
5777
5778 cpumask_clear(sched_group_cpus(sg));
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005779 sg->sgp->power = 0;
Peter Zijlstrac1174872012-05-31 14:47:33 +02005780 cpumask_setall(sched_group_mask(sg));
Peter Zijlstradce840a2011-04-07 14:09:50 +02005781
5782 for_each_cpu(j, span) {
5783 if (get_group(j, sdd, NULL) != group)
5784 continue;
5785
5786 cpumask_set_cpu(j, covered);
5787 cpumask_set_cpu(j, sched_group_cpus(sg));
5788 }
5789
5790 if (!first)
5791 first = sg;
5792 if (last)
5793 last->next = sg;
5794 last = sg;
5795 }
5796 last->next = first;
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005797
5798 return 0;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -08005799}
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07005800
Linus Torvalds1da177e2005-04-16 15:20:36 -07005801/*
Siddha, Suresh B89c47102006-10-03 01:14:09 -07005802 * Initialize sched groups cpu_power.
5803 *
5804 * cpu_power indicates the capacity of sched group, which is used while
5805 * distributing the load between different sched groups in a sched domain.
5806 * Typically cpu_power for all the groups in a sched domain will be same unless
5807 * there are asymmetries in the topology. If there are asymmetries, group
5808 * having more cpu_power will pickup more load compared to the group having
5809 * less cpu_power.
Siddha, Suresh B89c47102006-10-03 01:14:09 -07005810 */
5811static void init_sched_groups_power(int cpu, struct sched_domain *sd)
5812{
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005813 struct sched_group *sg = sd->groups;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07005814
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005815 WARN_ON(!sd || !sg);
5816
5817 do {
5818 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
5819 sg = sg->next;
5820 } while (sg != sd->groups);
5821
Peter Zijlstrac1174872012-05-31 14:47:33 +02005822 if (cpu != group_balance_cpu(sg))
Siddha, Suresh B89c47102006-10-03 01:14:09 -07005823 return;
5824
Peter Zijlstrad274cb32011-04-07 14:09:43 +02005825 update_group_power(sd, cpu);
Suresh Siddha69e1e812011-12-01 17:07:33 -08005826 atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
Siddha, Suresh B89c47102006-10-03 01:14:09 -07005827}
5828
Peter Zijlstra029632f2011-10-25 10:00:11 +02005829int __weak arch_sd_sibling_asym_packing(void)
5830{
5831 return 0*SD_ASYM_PACKING;
Siddha, Suresh B89c47102006-10-03 01:14:09 -07005832}
5833
5834/*
Mike Travis7c16ec52008-04-04 18:11:11 -07005835 * Initializers for schedule domains
5836 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
5837 */
5838
Ingo Molnara5d8c342008-10-09 11:35:51 +02005839#ifdef CONFIG_SCHED_DEBUG
5840# define SD_INIT_NAME(sd, type) sd->name = #type
5841#else
5842# define SD_INIT_NAME(sd, type) do { } while (0)
5843#endif
5844
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02005845#define SD_INIT_FUNC(type) \
5846static noinline struct sched_domain * \
5847sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
5848{ \
5849 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
5850 *sd = SD_##type##_INIT; \
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02005851 SD_INIT_NAME(sd, type); \
5852 sd->private = &tl->data; \
5853 return sd; \
Mike Travis7c16ec52008-04-04 18:11:11 -07005854}
5855
5856SD_INIT_FUNC(CPU)
Mike Travis7c16ec52008-04-04 18:11:11 -07005857#ifdef CONFIG_SCHED_SMT
5858 SD_INIT_FUNC(SIBLING)
5859#endif
5860#ifdef CONFIG_SCHED_MC
5861 SD_INIT_FUNC(MC)
5862#endif
Heiko Carstens01a08542010-08-31 10:28:16 +02005863#ifdef CONFIG_SCHED_BOOK
5864 SD_INIT_FUNC(BOOK)
5865#endif
Mike Travis7c16ec52008-04-04 18:11:11 -07005866
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09005867static int default_relax_domain_level = -1;
Peter Zijlstra60495e72011-04-07 14:10:04 +02005868int sched_domain_level_max;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09005869
5870static int __init setup_relax_domain_level(char *str)
5871{
Dimitri Sivanicha841f8c2012-06-05 13:44:36 -05005872 if (kstrtoint(str, 0, &default_relax_domain_level))
5873 pr_warn("Unable to set relax_domain_level\n");
Li Zefan30e0e172008-05-13 10:27:17 +08005874
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09005875 return 1;
5876}
5877__setup("relax_domain_level=", setup_relax_domain_level);
5878
5879static void set_domain_attribute(struct sched_domain *sd,
5880 struct sched_domain_attr *attr)
5881{
5882 int request;
5883
5884 if (!attr || attr->relax_domain_level < 0) {
5885 if (default_relax_domain_level < 0)
5886 return;
5887 else
5888 request = default_relax_domain_level;
5889 } else
5890 request = attr->relax_domain_level;
5891 if (request < sd->level) {
5892 /* turn off idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005893 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09005894 } else {
5895 /* turn on idle balance on this domain */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005896 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09005897 }
5898}
5899
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02005900static void __sdt_free(const struct cpumask *cpu_map);
5901static int __sdt_alloc(const struct cpumask *cpu_map);
5902
Andreas Herrmann2109b992009-08-18 12:53:00 +02005903static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
5904 const struct cpumask *cpu_map)
5905{
5906 switch (what) {
Andreas Herrmann2109b992009-08-18 12:53:00 +02005907 case sa_rootdomain:
Peter Zijlstra822ff792011-04-07 14:09:51 +02005908 if (!atomic_read(&d->rd->refcount))
5909 free_rootdomain(&d->rd->rcu); /* fall through */
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02005910 case sa_sd:
5911 free_percpu(d->sd); /* fall through */
Peter Zijlstradce840a2011-04-07 14:09:50 +02005912 case sa_sd_storage:
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02005913 __sdt_free(cpu_map); /* fall through */
Andreas Herrmann2109b992009-08-18 12:53:00 +02005914 case sa_none:
5915 break;
5916 }
5917}
5918
5919static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
5920 const struct cpumask *cpu_map)
5921{
Peter Zijlstradce840a2011-04-07 14:09:50 +02005922 memset(d, 0, sizeof(*d));
5923
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02005924 if (__sdt_alloc(cpu_map))
5925 return sa_sd_storage;
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02005926 d->sd = alloc_percpu(struct sched_domain *);
Peter Zijlstradce840a2011-04-07 14:09:50 +02005927 if (!d->sd)
5928 return sa_sd_storage;
Andreas Herrmann2109b992009-08-18 12:53:00 +02005929 d->rd = alloc_rootdomain();
Peter Zijlstradce840a2011-04-07 14:09:50 +02005930 if (!d->rd)
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02005931 return sa_sd;
Andreas Herrmann2109b992009-08-18 12:53:00 +02005932 return sa_rootdomain;
5933}
5934
Peter Zijlstradce840a2011-04-07 14:09:50 +02005935/*
5936 * NULL the sd_data elements we've used to build the sched_domain and
5937 * sched_group structure so that the subsequent __free_domain_allocs()
5938 * will not free the data we're using.
5939 */
5940static void claim_allocations(int cpu, struct sched_domain *sd)
5941{
5942 struct sd_data *sdd = sd->private;
Peter Zijlstradce840a2011-04-07 14:09:50 +02005943
5944 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
5945 *per_cpu_ptr(sdd->sd, cpu) = NULL;
5946
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005947 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
Peter Zijlstradce840a2011-04-07 14:09:50 +02005948 *per_cpu_ptr(sdd->sg, cpu) = NULL;
Peter Zijlstrae3589f62011-07-15 10:35:52 +02005949
5950 if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005951 *per_cpu_ptr(sdd->sgp, cpu) = NULL;
Peter Zijlstradce840a2011-04-07 14:09:50 +02005952}
5953
Andreas Herrmannd8173532009-08-18 12:57:03 +02005954#ifdef CONFIG_SCHED_SMT
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02005955static const struct cpumask *cpu_smt_mask(int cpu)
5956{
5957 return topology_thread_cpumask(cpu);
Andreas Herrmannd8173532009-08-18 12:57:03 +02005958}
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02005959#endif
Andreas Herrmannd8173532009-08-18 12:57:03 +02005960
Peter Zijlstrad069b912011-04-07 14:10:02 +02005961/*
5962 * Topology list, bottom-up.
5963 */
Peter Zijlstraeb7a74e62011-04-07 14:10:00 +02005964static struct sched_domain_topology_level default_topology[] = {
Peter Zijlstrad069b912011-04-07 14:10:02 +02005965#ifdef CONFIG_SCHED_SMT
5966 { sd_init_SIBLING, cpu_smt_mask, },
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02005967#endif
5968#ifdef CONFIG_SCHED_MC
5969 { sd_init_MC, cpu_coregroup_mask, },
5970#endif
Peter Zijlstrad069b912011-04-07 14:10:02 +02005971#ifdef CONFIG_SCHED_BOOK
5972 { sd_init_BOOK, cpu_book_mask, },
5973#endif
5974 { sd_init_CPU, cpu_cpu_mask, },
Peter Zijlstraeb7a74e62011-04-07 14:10:00 +02005975 { NULL, },
5976};
5977
5978static struct sched_domain_topology_level *sched_domain_topology = default_topology;
5979
Peter Zijlstracb83b622012-04-17 15:49:36 +02005980#ifdef CONFIG_NUMA
5981
5982static int sched_domains_numa_levels;
Peter Zijlstracb83b622012-04-17 15:49:36 +02005983static int *sched_domains_numa_distance;
5984static struct cpumask ***sched_domains_numa_masks;
5985static int sched_domains_curr_level;
5986
Peter Zijlstracb83b622012-04-17 15:49:36 +02005987static inline int sd_local_flags(int level)
5988{
Alex Shi10717dc2012-06-06 14:52:51 +08005989 if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
Peter Zijlstracb83b622012-04-17 15:49:36 +02005990 return 0;
5991
5992 return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
5993}
5994
5995static struct sched_domain *
5996sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
5997{
5998 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
5999 int level = tl->numa_level;
6000 int sd_weight = cpumask_weight(
6001 sched_domains_numa_masks[level][cpu_to_node(cpu)]);
6002
6003 *sd = (struct sched_domain){
6004 .min_interval = sd_weight,
6005 .max_interval = 2*sd_weight,
6006 .busy_factor = 32,
Peter Zijlstra870a0bb2012-05-11 00:26:27 +02006007 .imbalance_pct = 125,
Peter Zijlstracb83b622012-04-17 15:49:36 +02006008 .cache_nice_tries = 2,
6009 .busy_idx = 3,
6010 .idle_idx = 2,
6011 .newidle_idx = 0,
6012 .wake_idx = 0,
6013 .forkexec_idx = 0,
6014
6015 .flags = 1*SD_LOAD_BALANCE
6016 | 1*SD_BALANCE_NEWIDLE
6017 | 0*SD_BALANCE_EXEC
6018 | 0*SD_BALANCE_FORK
6019 | 0*SD_BALANCE_WAKE
6020 | 0*SD_WAKE_AFFINE
Peter Zijlstracb83b622012-04-17 15:49:36 +02006021 | 0*SD_SHARE_CPUPOWER
Peter Zijlstracb83b622012-04-17 15:49:36 +02006022 | 0*SD_SHARE_PKG_RESOURCES
6023 | 1*SD_SERIALIZE
6024 | 0*SD_PREFER_SIBLING
6025 | sd_local_flags(level)
6026 ,
6027 .last_balance = jiffies,
6028 .balance_interval = sd_weight,
6029 };
6030 SD_INIT_NAME(sd, NUMA);
6031 sd->private = &tl->data;
6032
6033 /*
6034 * Ugly hack to pass state to sd_numa_mask()...
6035 */
6036 sched_domains_curr_level = tl->numa_level;
6037
6038 return sd;
6039}
6040
6041static const struct cpumask *sd_numa_mask(int cpu)
6042{
6043 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6044}
6045
Peter Zijlstrad039ac62012-05-31 21:20:16 +02006046static void sched_numa_warn(const char *str)
6047{
6048 static int done = false;
6049 int i,j;
6050
6051 if (done)
6052 return;
6053
6054 done = true;
6055
6056 printk(KERN_WARNING "ERROR: %s\n\n", str);
6057
6058 for (i = 0; i < nr_node_ids; i++) {
6059 printk(KERN_WARNING " ");
6060 for (j = 0; j < nr_node_ids; j++)
6061 printk(KERN_CONT "%02d ", node_distance(i,j));
6062 printk(KERN_CONT "\n");
6063 }
6064 printk(KERN_WARNING "\n");
6065}
6066
6067static bool find_numa_distance(int distance)
6068{
6069 int i;
6070
6071 if (distance == node_distance(0, 0))
6072 return true;
6073
6074 for (i = 0; i < sched_domains_numa_levels; i++) {
6075 if (sched_domains_numa_distance[i] == distance)
6076 return true;
6077 }
6078
6079 return false;
6080}
6081
Peter Zijlstracb83b622012-04-17 15:49:36 +02006082static void sched_init_numa(void)
6083{
6084 int next_distance, curr_distance = node_distance(0, 0);
6085 struct sched_domain_topology_level *tl;
6086 int level = 0;
6087 int i, j, k;
6088
Peter Zijlstracb83b622012-04-17 15:49:36 +02006089 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6090 if (!sched_domains_numa_distance)
6091 return;
6092
6093 /*
6094 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
6095 * unique distances in the node_distance() table.
6096 *
6097 * Assumes node_distance(0,j) includes all distances in
6098 * node_distance(i,j) in order to avoid cubic time.
Peter Zijlstracb83b622012-04-17 15:49:36 +02006099 */
6100 next_distance = curr_distance;
6101 for (i = 0; i < nr_node_ids; i++) {
6102 for (j = 0; j < nr_node_ids; j++) {
Peter Zijlstrad039ac62012-05-31 21:20:16 +02006103 for (k = 0; k < nr_node_ids; k++) {
6104 int distance = node_distance(i, k);
6105
6106 if (distance > curr_distance &&
6107 (distance < next_distance ||
6108 next_distance == curr_distance))
6109 next_distance = distance;
6110
6111 /*
6112 * While not a strong assumption it would be nice to know
6113 * about cases where if node A is connected to B, B is not
6114 * equally connected to A.
6115 */
6116 if (sched_debug() && node_distance(k, i) != distance)
6117 sched_numa_warn("Node-distance not symmetric");
6118
6119 if (sched_debug() && i && !find_numa_distance(distance))
6120 sched_numa_warn("Node-0 not representative");
6121 }
6122 if (next_distance != curr_distance) {
6123 sched_domains_numa_distance[level++] = next_distance;
6124 sched_domains_numa_levels = level;
6125 curr_distance = next_distance;
6126 } else break;
Peter Zijlstracb83b622012-04-17 15:49:36 +02006127 }
Peter Zijlstrad039ac62012-05-31 21:20:16 +02006128
6129 /*
6130 * In case of sched_debug() we verify the above assumption.
6131 */
6132 if (!sched_debug())
6133 break;
Peter Zijlstracb83b622012-04-17 15:49:36 +02006134 }
6135 /*
6136 * 'level' contains the number of unique distances, excluding the
6137 * identity distance node_distance(i,i).
6138 *
6139 * The sched_domains_nume_distance[] array includes the actual distance
6140 * numbers.
6141 */
6142
6143 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6144 if (!sched_domains_numa_masks)
6145 return;
6146
6147 /*
6148 * Now for each level, construct a mask per node which contains all
6149 * cpus of nodes that are that many hops away from us.
6150 */
6151 for (i = 0; i < level; i++) {
6152 sched_domains_numa_masks[i] =
6153 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6154 if (!sched_domains_numa_masks[i])
6155 return;
6156
6157 for (j = 0; j < nr_node_ids; j++) {
Peter Zijlstra2ea45802012-05-25 09:26:43 +02006158 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
Peter Zijlstracb83b622012-04-17 15:49:36 +02006159 if (!mask)
6160 return;
6161
6162 sched_domains_numa_masks[i][j] = mask;
6163
6164 for (k = 0; k < nr_node_ids; k++) {
Peter Zijlstradd7d8632012-05-11 00:56:20 +02006165 if (node_distance(j, k) > sched_domains_numa_distance[i])
Peter Zijlstracb83b622012-04-17 15:49:36 +02006166 continue;
6167
6168 cpumask_or(mask, mask, cpumask_of_node(k));
6169 }
6170 }
6171 }
6172
6173 tl = kzalloc((ARRAY_SIZE(default_topology) + level) *
6174 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6175 if (!tl)
6176 return;
6177
6178 /*
6179 * Copy the default topology bits..
6180 */
6181 for (i = 0; default_topology[i].init; i++)
6182 tl[i] = default_topology[i];
6183
6184 /*
6185 * .. and append 'j' levels of NUMA goodness.
6186 */
6187 for (j = 0; j < level; i++, j++) {
6188 tl[i] = (struct sched_domain_topology_level){
6189 .init = sd_numa_init,
6190 .mask = sd_numa_mask,
6191 .flags = SDTL_OVERLAP,
6192 .numa_level = j,
6193 };
6194 }
6195
6196 sched_domain_topology = tl;
6197}
6198#else
6199static inline void sched_init_numa(void)
6200{
6201}
6202#endif /* CONFIG_NUMA */
6203
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02006204static int __sdt_alloc(const struct cpumask *cpu_map)
6205{
6206 struct sched_domain_topology_level *tl;
6207 int j;
6208
6209 for (tl = sched_domain_topology; tl->init; tl++) {
6210 struct sd_data *sdd = &tl->data;
6211
6212 sdd->sd = alloc_percpu(struct sched_domain *);
6213 if (!sdd->sd)
6214 return -ENOMEM;
6215
6216 sdd->sg = alloc_percpu(struct sched_group *);
6217 if (!sdd->sg)
6218 return -ENOMEM;
6219
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02006220 sdd->sgp = alloc_percpu(struct sched_group_power *);
6221 if (!sdd->sgp)
6222 return -ENOMEM;
6223
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02006224 for_each_cpu(j, cpu_map) {
6225 struct sched_domain *sd;
6226 struct sched_group *sg;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02006227 struct sched_group_power *sgp;
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02006228
6229 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6230 GFP_KERNEL, cpu_to_node(j));
6231 if (!sd)
6232 return -ENOMEM;
6233
6234 *per_cpu_ptr(sdd->sd, j) = sd;
6235
6236 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6237 GFP_KERNEL, cpu_to_node(j));
6238 if (!sg)
6239 return -ENOMEM;
6240
Igor Mammedov30b4e9e2012-05-09 12:38:28 +02006241 sg->next = sg;
6242
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02006243 *per_cpu_ptr(sdd->sg, j) = sg;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02006244
Peter Zijlstrac1174872012-05-31 14:47:33 +02006245 sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02006246 GFP_KERNEL, cpu_to_node(j));
6247 if (!sgp)
6248 return -ENOMEM;
6249
6250 *per_cpu_ptr(sdd->sgp, j) = sgp;
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02006251 }
6252 }
6253
6254 return 0;
6255}
6256
6257static void __sdt_free(const struct cpumask *cpu_map)
6258{
6259 struct sched_domain_topology_level *tl;
6260 int j;
6261
6262 for (tl = sched_domain_topology; tl->init; tl++) {
6263 struct sd_data *sdd = &tl->data;
6264
6265 for_each_cpu(j, cpu_map) {
he, bofb2cf2c2012-04-25 19:59:21 +08006266 struct sched_domain *sd;
6267
6268 if (sdd->sd) {
6269 sd = *per_cpu_ptr(sdd->sd, j);
6270 if (sd && (sd->flags & SD_OVERLAP))
6271 free_sched_groups(sd->groups, 0);
6272 kfree(*per_cpu_ptr(sdd->sd, j));
6273 }
6274
6275 if (sdd->sg)
6276 kfree(*per_cpu_ptr(sdd->sg, j));
6277 if (sdd->sgp)
6278 kfree(*per_cpu_ptr(sdd->sgp, j));
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02006279 }
6280 free_percpu(sdd->sd);
he, bofb2cf2c2012-04-25 19:59:21 +08006281 sdd->sd = NULL;
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02006282 free_percpu(sdd->sg);
he, bofb2cf2c2012-04-25 19:59:21 +08006283 sdd->sg = NULL;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02006284 free_percpu(sdd->sgp);
he, bofb2cf2c2012-04-25 19:59:21 +08006285 sdd->sgp = NULL;
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02006286 }
6287}
6288
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02006289struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6290 struct s_data *d, const struct cpumask *cpu_map,
Peter Zijlstrad069b912011-04-07 14:10:02 +02006291 struct sched_domain_attr *attr, struct sched_domain *child,
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02006292 int cpu)
6293{
Peter Zijlstra54ab4ff2011-04-07 14:10:03 +02006294 struct sched_domain *sd = tl->init(tl, cpu);
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02006295 if (!sd)
Peter Zijlstrad069b912011-04-07 14:10:02 +02006296 return child;
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02006297
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02006298 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
Peter Zijlstra60495e72011-04-07 14:10:04 +02006299 if (child) {
6300 sd->level = child->level + 1;
6301 sched_domain_level_max = max(sched_domain_level_max, sd->level);
Peter Zijlstrad069b912011-04-07 14:10:02 +02006302 child->parent = sd;
Peter Zijlstra60495e72011-04-07 14:10:04 +02006303 }
Peter Zijlstrad069b912011-04-07 14:10:02 +02006304 sd->child = child;
Dimitri Sivanicha841f8c2012-06-05 13:44:36 -05006305 set_domain_attribute(sd, attr);
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02006306
6307 return sd;
6308}
6309
Mike Travis7c16ec52008-04-04 18:11:11 -07006310/*
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006311 * Build sched domains for a given set of cpus and attach the sched domains
6312 * to the individual cpus
Linus Torvalds1da177e2005-04-16 15:20:36 -07006313 */
Peter Zijlstradce840a2011-04-07 14:09:50 +02006314static int build_sched_domains(const struct cpumask *cpu_map,
6315 struct sched_domain_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006316{
Andreas Herrmann2109b992009-08-18 12:53:00 +02006317 enum s_alloc alloc_state = sa_none;
Peter Zijlstradce840a2011-04-07 14:09:50 +02006318 struct sched_domain *sd;
Andreas Herrmann49a02c52009-08-18 12:51:52 +02006319 struct s_data d;
Peter Zijlstra822ff792011-04-07 14:09:51 +02006320 int i, ret = -ENOMEM;
Rusty Russell3404c8d2008-11-25 02:35:03 +10306321
Andreas Herrmann2109b992009-08-18 12:53:00 +02006322 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6323 if (alloc_state != sa_rootdomain)
6324 goto error;
Mike Travis7c16ec52008-04-04 18:11:11 -07006325
Peter Zijlstradce840a2011-04-07 14:09:50 +02006326 /* Set up domains for cpus specified by the cpu_map. */
Rusty Russellabcd0832008-11-25 02:35:02 +10306327 for_each_cpu(i, cpu_map) {
Peter Zijlstraeb7a74e62011-04-07 14:10:00 +02006328 struct sched_domain_topology_level *tl;
6329
Peter Zijlstra3bd65a82011-04-07 14:09:54 +02006330 sd = NULL;
Peter Zijlstrae3589f62011-07-15 10:35:52 +02006331 for (tl = sched_domain_topology; tl->init; tl++) {
Peter Zijlstra2c402dc2011-04-07 14:10:01 +02006332 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
Peter Zijlstrae3589f62011-07-15 10:35:52 +02006333 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6334 sd->flags |= SD_OVERLAP;
Peter Zijlstrad1102352011-07-20 18:42:57 +02006335 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6336 break;
Peter Zijlstrae3589f62011-07-15 10:35:52 +02006337 }
Peter Zijlstrad274cb32011-04-07 14:09:43 +02006338
Peter Zijlstrad069b912011-04-07 14:10:02 +02006339 while (sd->child)
6340 sd = sd->child;
6341
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02006342 *per_cpu_ptr(d.sd, i) = sd;
Peter Zijlstradce840a2011-04-07 14:09:50 +02006343 }
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02006344
Peter Zijlstradce840a2011-04-07 14:09:50 +02006345 /* Build the groups for the domains */
6346 for_each_cpu(i, cpu_map) {
6347 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6348 sd->span_weight = cpumask_weight(sched_domain_span(sd));
Peter Zijlstrae3589f62011-07-15 10:35:52 +02006349 if (sd->flags & SD_OVERLAP) {
6350 if (build_overlap_sched_groups(sd, i))
6351 goto error;
6352 } else {
6353 if (build_sched_groups(sd, i))
6354 goto error;
6355 }
Peter Zijlstra1cf519022011-04-07 14:09:47 +02006356 }
Peter Zijlstraa06dadb2011-04-07 14:09:44 +02006357 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006358
Linus Torvalds1da177e2005-04-16 15:20:36 -07006359 /* Calculate CPU power for physical packages and nodes */
Peter Zijlstraa9c9a9b2011-04-07 14:09:49 +02006360 for (i = nr_cpumask_bits-1; i >= 0; i--) {
6361 if (!cpumask_test_cpu(i, cpu_map))
6362 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363
Peter Zijlstradce840a2011-04-07 14:09:50 +02006364 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6365 claim_allocations(i, sd);
Peter Zijlstracd4ea6a2011-04-07 14:09:45 +02006366 init_sched_groups_power(i, sd);
Peter Zijlstradce840a2011-04-07 14:09:50 +02006367 }
Siddha, Suresh Bf712c0c2006-07-30 03:02:59 -07006368 }
John Hawkes9c1cfda2005-09-06 15:18:14 -07006369
Linus Torvalds1da177e2005-04-16 15:20:36 -07006370 /* Attach the domains */
Peter Zijlstradce840a2011-04-07 14:09:50 +02006371 rcu_read_lock();
Rusty Russellabcd0832008-11-25 02:35:02 +10306372 for_each_cpu(i, cpu_map) {
Peter Zijlstra21d42cc2011-04-07 14:09:48 +02006373 sd = *per_cpu_ptr(d.sd, i);
Andreas Herrmann49a02c52009-08-18 12:51:52 +02006374 cpu_attach_domain(sd, d.rd, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006375 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02006376 rcu_read_unlock();
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006377
Peter Zijlstra822ff792011-04-07 14:09:51 +02006378 ret = 0;
Srivatsa Vaddagiri51888ca2006-06-27 02:54:38 -07006379error:
Andreas Herrmann2109b992009-08-18 12:53:00 +02006380 __free_domain_allocs(&d, alloc_state, cpu_map);
Peter Zijlstra822ff792011-04-07 14:09:51 +02006381 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006382}
Paul Jackson029190c2007-10-18 23:40:20 -07006383
Rusty Russellacc3f5d2009-11-03 14:53:40 +10306384static cpumask_var_t *doms_cur; /* current sched domains */
Paul Jackson029190c2007-10-18 23:40:20 -07006385static int ndoms_cur; /* number of sched domains in 'doms_cur' */
Ingo Molnar4285f5942008-05-16 17:47:14 +02006386static struct sched_domain_attr *dattr_cur;
6387 /* attribues of custom domains in 'doms_cur' */
Paul Jackson029190c2007-10-18 23:40:20 -07006388
6389/*
6390 * Special case: If a kmalloc of a doms_cur partition (array of
Rusty Russell42128232008-11-25 02:35:12 +10306391 * cpumask) fails, then fallback to a single sched domain,
6392 * as determined by the single cpumask fallback_doms.
Paul Jackson029190c2007-10-18 23:40:20 -07006393 */
Rusty Russell42128232008-11-25 02:35:12 +10306394static cpumask_var_t fallback_doms;
Paul Jackson029190c2007-10-18 23:40:20 -07006395
Heiko Carstensee79d1b2008-12-09 18:49:50 +01006396/*
6397 * arch_update_cpu_topology lets virtualized architectures update the
6398 * cpu core maps. It is supposed to return 1 if the topology changed
6399 * or 0 if it stayed the same.
6400 */
6401int __attribute__((weak)) arch_update_cpu_topology(void)
Heiko Carstens22e52b02008-03-12 18:31:59 +01006402{
Heiko Carstensee79d1b2008-12-09 18:49:50 +01006403 return 0;
Heiko Carstens22e52b02008-03-12 18:31:59 +01006404}
6405
Rusty Russellacc3f5d2009-11-03 14:53:40 +10306406cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6407{
6408 int i;
6409 cpumask_var_t *doms;
6410
6411 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6412 if (!doms)
6413 return NULL;
6414 for (i = 0; i < ndoms; i++) {
6415 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6416 free_sched_domains(doms, i);
6417 return NULL;
6418 }
6419 }
6420 return doms;
6421}
6422
6423void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6424{
6425 unsigned int i;
6426 for (i = 0; i < ndoms; i++)
6427 free_cpumask_var(doms[i]);
6428 kfree(doms);
6429}
6430
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006431/*
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006432 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
Paul Jackson029190c2007-10-18 23:40:20 -07006433 * For now this just excludes isolated cpus, but could be used to
6434 * exclude other special cases in the future.
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006435 */
Peter Zijlstrac4a88492011-04-07 14:09:42 +02006436static int init_sched_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006437{
Milton Miller73785472007-10-24 18:23:48 +02006438 int err;
6439
Heiko Carstens22e52b02008-03-12 18:31:59 +01006440 arch_update_cpu_topology();
Paul Jackson029190c2007-10-18 23:40:20 -07006441 ndoms_cur = 1;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10306442 doms_cur = alloc_sched_domains(ndoms_cur);
Paul Jackson029190c2007-10-18 23:40:20 -07006443 if (!doms_cur)
Rusty Russellacc3f5d2009-11-03 14:53:40 +10306444 doms_cur = &fallback_doms;
6445 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
Peter Zijlstradce840a2011-04-07 14:09:50 +02006446 err = build_sched_domains(doms_cur[0], NULL);
Milton Miller6382bc92007-10-15 17:00:19 +02006447 register_sched_domain_sysctl();
Milton Miller73785472007-10-24 18:23:48 +02006448
6449 return err;
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006450}
6451
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006452/*
6453 * Detach sched domains from a group of cpus specified in cpu_map
6454 * These cpus will now be attached to the NULL domain
6455 */
Rusty Russell96f874e2008-11-25 02:35:14 +10306456static void detach_destroy_domains(const struct cpumask *cpu_map)
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006457{
6458 int i;
6459
Peter Zijlstradce840a2011-04-07 14:09:50 +02006460 rcu_read_lock();
Rusty Russellabcd0832008-11-25 02:35:02 +10306461 for_each_cpu(i, cpu_map)
Gregory Haskins57d885f2008-01-25 21:08:18 +01006462 cpu_attach_domain(NULL, &def_root_domain, i);
Peter Zijlstradce840a2011-04-07 14:09:50 +02006463 rcu_read_unlock();
Dinakar Guniguntala1a20ff22005-06-25 14:57:33 -07006464}
6465
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09006466/* handle null as "default" */
6467static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
6468 struct sched_domain_attr *new, int idx_new)
6469{
6470 struct sched_domain_attr tmp;
6471
6472 /* fast path */
6473 if (!new && !cur)
6474 return 1;
6475
6476 tmp = SD_ATTR_INIT;
6477 return !memcmp(cur ? (cur + idx_cur) : &tmp,
6478 new ? (new + idx_new) : &tmp,
6479 sizeof(struct sched_domain_attr));
6480}
6481
Paul Jackson029190c2007-10-18 23:40:20 -07006482/*
6483 * Partition sched domains as specified by the 'ndoms_new'
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006484 * cpumasks in the array doms_new[] of cpumasks. This compares
Paul Jackson029190c2007-10-18 23:40:20 -07006485 * doms_new[] to the current sched domain partitioning, doms_cur[].
6486 * It destroys each deleted domain and builds each new domain.
6487 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10306488 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01006489 * The masks don't intersect (don't overlap.) We should setup one
6490 * sched domain for each mask. CPUs not in any of the cpumasks will
6491 * not be load balanced. If the same cpumask appears both in the
Paul Jackson029190c2007-10-18 23:40:20 -07006492 * current 'doms_cur' domains and in the new 'doms_new', we can leave
6493 * it as it is.
6494 *
Rusty Russellacc3f5d2009-11-03 14:53:40 +10306495 * The passed in 'doms_new' should be allocated using
6496 * alloc_sched_domains. This routine takes ownership of it and will
6497 * free_sched_domains it when done with it. If the caller failed the
6498 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
6499 * and partition_sched_domains() will fallback to the single partition
6500 * 'fallback_doms', it also forces the domains to be rebuilt.
Paul Jackson029190c2007-10-18 23:40:20 -07006501 *
Rusty Russell96f874e2008-11-25 02:35:14 +10306502 * If doms_new == NULL it will be replaced with cpu_online_mask.
Li Zefan700018e2008-11-18 14:02:03 +08006503 * ndoms_new == 0 is a special case for destroying existing domains,
6504 * and it will not create the default domain.
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07006505 *
Paul Jackson029190c2007-10-18 23:40:20 -07006506 * Call with hotplug lock held
6507 */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10306508void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09006509 struct sched_domain_attr *dattr_new)
Paul Jackson029190c2007-10-18 23:40:20 -07006510{
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07006511 int i, j, n;
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01006512 int new_topology;
Paul Jackson029190c2007-10-18 23:40:20 -07006513
Heiko Carstens712555e2008-04-28 11:33:07 +02006514 mutex_lock(&sched_domains_mutex);
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01006515
Milton Miller73785472007-10-24 18:23:48 +02006516 /* always unregister in case we don't destroy any domains */
6517 unregister_sched_domain_sysctl();
6518
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01006519 /* Let architecture update cpu core mappings. */
6520 new_topology = arch_update_cpu_topology();
6521
Max Krasnyanskydfb512e2008-08-29 13:11:41 -07006522 n = doms_new ? ndoms_new : 0;
Paul Jackson029190c2007-10-18 23:40:20 -07006523
6524 /* Destroy deleted domains */
6525 for (i = 0; i < ndoms_cur; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01006526 for (j = 0; j < n && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10306527 if (cpumask_equal(doms_cur[i], doms_new[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09006528 && dattrs_equal(dattr_cur, i, dattr_new, j))
Paul Jackson029190c2007-10-18 23:40:20 -07006529 goto match1;
6530 }
6531 /* no match - a current sched domain not in new doms_new[] */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10306532 detach_destroy_domains(doms_cur[i]);
Paul Jackson029190c2007-10-18 23:40:20 -07006533match1:
6534 ;
6535 }
6536
Max Krasnyanskye761b772008-07-15 04:43:49 -07006537 if (doms_new == NULL) {
6538 ndoms_cur = 0;
Rusty Russellacc3f5d2009-11-03 14:53:40 +10306539 doms_new = &fallback_doms;
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006540 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
Li Zefanfaa2f982008-11-04 16:20:23 +08006541 WARN_ON_ONCE(dattr_new);
Max Krasnyanskye761b772008-07-15 04:43:49 -07006542 }
6543
Paul Jackson029190c2007-10-18 23:40:20 -07006544 /* Build new domains */
6545 for (i = 0; i < ndoms_new; i++) {
Heiko Carstensd65bd5e2008-12-09 18:49:51 +01006546 for (j = 0; j < ndoms_cur && !new_topology; j++) {
Rusty Russellacc3f5d2009-11-03 14:53:40 +10306547 if (cpumask_equal(doms_new[i], doms_cur[j])
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09006548 && dattrs_equal(dattr_new, i, dattr_cur, j))
Paul Jackson029190c2007-10-18 23:40:20 -07006549 goto match2;
6550 }
6551 /* no match - add a new doms_new */
Peter Zijlstradce840a2011-04-07 14:09:50 +02006552 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
Paul Jackson029190c2007-10-18 23:40:20 -07006553match2:
6554 ;
6555 }
6556
6557 /* Remember the new sched domains */
Rusty Russellacc3f5d2009-11-03 14:53:40 +10306558 if (doms_cur != &fallback_doms)
6559 free_sched_domains(doms_cur, ndoms_cur);
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09006560 kfree(dattr_cur); /* kfree(NULL) is safe */
Paul Jackson029190c2007-10-18 23:40:20 -07006561 doms_cur = doms_new;
Hidetoshi Seto1d3504f2008-04-15 14:04:23 +09006562 dattr_cur = dattr_new;
Paul Jackson029190c2007-10-18 23:40:20 -07006563 ndoms_cur = ndoms_new;
Milton Miller73785472007-10-24 18:23:48 +02006564
6565 register_sched_domain_sysctl();
Srivatsa Vaddagiria1835612008-01-25 21:08:00 +01006566
Heiko Carstens712555e2008-04-28 11:33:07 +02006567 mutex_unlock(&sched_domains_mutex);
Paul Jackson029190c2007-10-18 23:40:20 -07006568}
6569
Srivatsa S. Bhatd35be8b2012-05-24 19:46:26 +05306570static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
6571
Linus Torvalds1da177e2005-04-16 15:20:36 -07006572/*
Tejun Heo3a101d02010-06-08 21:40:36 +02006573 * Update cpusets according to cpu_active mask. If cpusets are
6574 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
6575 * around partition_sched_domains().
Srivatsa S. Bhatd35be8b2012-05-24 19:46:26 +05306576 *
6577 * If we come here as part of a suspend/resume, don't touch cpusets because we
6578 * want to restore it back to its original state upon resume anyway.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006579 */
Tejun Heo0b2e9182010-06-21 23:53:31 +02006580static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
6581 void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006582{
Srivatsa S. Bhatd35be8b2012-05-24 19:46:26 +05306583 switch (action) {
6584 case CPU_ONLINE_FROZEN:
6585 case CPU_DOWN_FAILED_FROZEN:
6586
6587 /*
6588 * num_cpus_frozen tracks how many CPUs are involved in suspend
6589 * resume sequence. As long as this is not the last online
6590 * operation in the resume sequence, just build a single sched
6591 * domain, ignoring cpusets.
6592 */
6593 num_cpus_frozen--;
6594 if (likely(num_cpus_frozen)) {
6595 partition_sched_domains(1, NULL, NULL);
6596 break;
6597 }
6598
6599 /*
6600 * This is the last CPU online operation. So fall through and
6601 * restore the original sched domains by considering the
6602 * cpuset configurations.
6603 */
6604
Max Krasnyanskye761b772008-07-15 04:43:49 -07006605 case CPU_ONLINE:
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01006606 case CPU_DOWN_FAILED:
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +05306607 cpuset_update_active_cpus(true);
Srivatsa S. Bhatd35be8b2012-05-24 19:46:26 +05306608 break;
Max Krasnyanskye761b772008-07-15 04:43:49 -07006609 default:
6610 return NOTIFY_DONE;
6611 }
Srivatsa S. Bhatd35be8b2012-05-24 19:46:26 +05306612 return NOTIFY_OK;
Max Krasnyanskye761b772008-07-15 04:43:49 -07006613}
Tejun Heo3a101d02010-06-08 21:40:36 +02006614
Tejun Heo0b2e9182010-06-21 23:53:31 +02006615static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
6616 void *hcpu)
Tejun Heo3a101d02010-06-08 21:40:36 +02006617{
Srivatsa S. Bhatd35be8b2012-05-24 19:46:26 +05306618 switch (action) {
Tejun Heo3a101d02010-06-08 21:40:36 +02006619 case CPU_DOWN_PREPARE:
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +05306620 cpuset_update_active_cpus(false);
Srivatsa S. Bhatd35be8b2012-05-24 19:46:26 +05306621 break;
6622 case CPU_DOWN_PREPARE_FROZEN:
6623 num_cpus_frozen++;
6624 partition_sched_domains(1, NULL, NULL);
6625 break;
Tejun Heo3a101d02010-06-08 21:40:36 +02006626 default:
6627 return NOTIFY_DONE;
6628 }
Srivatsa S. Bhatd35be8b2012-05-24 19:46:26 +05306629 return NOTIFY_OK;
Tejun Heo3a101d02010-06-08 21:40:36 +02006630}
Max Krasnyanskye761b772008-07-15 04:43:49 -07006631
Linus Torvalds1da177e2005-04-16 15:20:36 -07006632void __init sched_init_smp(void)
6633{
Rusty Russelldcc30a32008-11-25 02:35:12 +10306634 cpumask_var_t non_isolated_cpus;
6635
6636 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
Yong Zhangcb5fd132009-09-14 20:20:16 +08006637 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
Nick Piggin5c1e1762006-10-03 01:14:04 -07006638
Peter Zijlstracb83b622012-04-17 15:49:36 +02006639 sched_init_numa();
6640
Gautham R Shenoy95402b32008-01-25 21:08:02 +01006641 get_online_cpus();
Heiko Carstens712555e2008-04-28 11:33:07 +02006642 mutex_lock(&sched_domains_mutex);
Peter Zijlstrac4a88492011-04-07 14:09:42 +02006643 init_sched_domains(cpu_active_mask);
Rusty Russelldcc30a32008-11-25 02:35:12 +10306644 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
6645 if (cpumask_empty(non_isolated_cpus))
6646 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
Heiko Carstens712555e2008-04-28 11:33:07 +02006647 mutex_unlock(&sched_domains_mutex);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01006648 put_online_cpus();
Max Krasnyanskye761b772008-07-15 04:43:49 -07006649
Tejun Heo3a101d02010-06-08 21:40:36 +02006650 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
6651 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
Max Krasnyanskye761b772008-07-15 04:43:49 -07006652
6653 /* RT runtime code needs to handle some hotplug events */
6654 hotcpu_notifier(update_runtime, 0);
6655
Peter Zijlstrab328ca12008-04-29 10:02:46 +02006656 init_hrtick();
Nick Piggin5c1e1762006-10-03 01:14:04 -07006657
6658 /* Move init over to a non-isolated CPU */
Rusty Russelldcc30a32008-11-25 02:35:12 +10306659 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
Nick Piggin5c1e1762006-10-03 01:14:04 -07006660 BUG();
Ingo Molnar19978ca2007-11-09 22:39:38 +01006661 sched_init_granularity();
Rusty Russelldcc30a32008-11-25 02:35:12 +10306662 free_cpumask_var(non_isolated_cpus);
Rusty Russell42128232008-11-25 02:35:12 +10306663
Rusty Russell0e3900e2008-11-25 02:35:13 +10306664 init_sched_rt_class();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006665}
6666#else
6667void __init sched_init_smp(void)
6668{
Ingo Molnar19978ca2007-11-09 22:39:38 +01006669 sched_init_granularity();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006670}
6671#endif /* CONFIG_SMP */
6672
Arun R Bharadwajcd1bb942009-04-16 12:15:34 +05306673const_debug unsigned int sysctl_timer_migration = 1;
6674
Linus Torvalds1da177e2005-04-16 15:20:36 -07006675int in_sched_functions(unsigned long addr)
6676{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006677 return in_lock_functions(addr) ||
6678 (addr >= (unsigned long)__sched_text_start
6679 && addr < (unsigned long)__sched_text_end);
6680}
6681
Peter Zijlstra029632f2011-10-25 10:00:11 +02006682#ifdef CONFIG_CGROUP_SCHED
6683struct task_group root_task_group;
Mike Galbraith35cf4e52012-08-07 05:00:13 +02006684LIST_HEAD(task_groups);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01006685#endif
6686
Peter Zijlstra029632f2011-10-25 10:00:11 +02006687DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01006688
Linus Torvalds1da177e2005-04-16 15:20:36 -07006689void __init sched_init(void)
6690{
Ingo Molnardd41f592007-07-09 18:51:59 +02006691 int i, j;
Mike Travis434d53b2008-04-04 18:11:04 -07006692 unsigned long alloc_size = 0, ptr;
6693
6694#ifdef CONFIG_FAIR_GROUP_SCHED
6695 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6696#endif
6697#ifdef CONFIG_RT_GROUP_SCHED
6698 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6699#endif
Rusty Russelldf7c8e82009-03-19 15:22:20 +10306700#ifdef CONFIG_CPUMASK_OFFSTACK
Rusty Russell8c083f02009-03-19 15:22:20 +10306701 alloc_size += num_possible_cpus() * cpumask_size();
Rusty Russelldf7c8e82009-03-19 15:22:20 +10306702#endif
Mike Travis434d53b2008-04-04 18:11:04 -07006703 if (alloc_size) {
Pekka Enberg36b7b6d2009-06-10 23:42:36 +03006704 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
Mike Travis434d53b2008-04-04 18:11:04 -07006705
6706#ifdef CONFIG_FAIR_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08006707 root_task_group.se = (struct sched_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07006708 ptr += nr_cpu_ids * sizeof(void **);
6709
Yong Zhang07e06b02011-01-07 15:17:36 +08006710 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07006711 ptr += nr_cpu_ids * sizeof(void **);
Peter Zijlstraeff766a2008-04-19 19:45:00 +02006712
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006713#endif /* CONFIG_FAIR_GROUP_SCHED */
Mike Travis434d53b2008-04-04 18:11:04 -07006714#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08006715 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
Mike Travis434d53b2008-04-04 18:11:04 -07006716 ptr += nr_cpu_ids * sizeof(void **);
6717
Yong Zhang07e06b02011-01-07 15:17:36 +08006718 root_task_group.rt_rq = (struct rt_rq **)ptr;
Peter Zijlstraeff766a2008-04-19 19:45:00 +02006719 ptr += nr_cpu_ids * sizeof(void **);
6720
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006721#endif /* CONFIG_RT_GROUP_SCHED */
Rusty Russelldf7c8e82009-03-19 15:22:20 +10306722#ifdef CONFIG_CPUMASK_OFFSTACK
6723 for_each_possible_cpu(i) {
6724 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
6725 ptr += cpumask_size();
6726 }
6727#endif /* CONFIG_CPUMASK_OFFSTACK */
Mike Travis434d53b2008-04-04 18:11:04 -07006728 }
Ingo Molnardd41f592007-07-09 18:51:59 +02006729
Gregory Haskins57d885f2008-01-25 21:08:18 +01006730#ifdef CONFIG_SMP
6731 init_defrootdomain();
6732#endif
6733
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02006734 init_rt_bandwidth(&def_rt_bandwidth,
6735 global_rt_period(), global_rt_runtime());
6736
6737#ifdef CONFIG_RT_GROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08006738 init_rt_bandwidth(&root_task_group.rt_bandwidth,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02006739 global_rt_period(), global_rt_runtime());
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02006740#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02006741
Dhaval Giani7c941432010-01-20 13:26:18 +01006742#ifdef CONFIG_CGROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08006743 list_add(&root_task_group.list, &task_groups);
6744 INIT_LIST_HEAD(&root_task_group.children);
Glauber Costaf4d6f6c2011-11-01 19:19:07 -02006745 INIT_LIST_HEAD(&root_task_group.siblings);
Mike Galbraith5091faa2010-11-30 14:18:03 +01006746 autogroup_init(&init_task);
Glauber Costa54c707e2011-11-28 14:45:19 -02006747
Dhaval Giani7c941432010-01-20 13:26:18 +01006748#endif /* CONFIG_CGROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01006749
Glauber Costa54c707e2011-11-28 14:45:19 -02006750#ifdef CONFIG_CGROUP_CPUACCT
6751 root_cpuacct.cpustat = &kernel_cpustat;
6752 root_cpuacct.cpuusage = alloc_percpu(u64);
6753 /* Too early, not expected to fail */
6754 BUG_ON(!root_cpuacct.cpuusage);
6755#endif
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08006756 for_each_possible_cpu(i) {
Ingo Molnar70b97a72006-07-03 00:25:42 -07006757 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006758
6759 rq = cpu_rq(i);
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006760 raw_spin_lock_init(&rq->lock);
Nick Piggin78979862005-06-25 14:57:13 -07006761 rq->nr_running = 0;
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02006762 rq->calc_load_active = 0;
6763 rq->calc_load_update = jiffies + LOAD_FREQ;
Jan H. Schönherracb5a9b2011-07-14 18:32:43 +02006764 init_cfs_rq(&rq->cfs);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +01006765 init_rt_rq(&rq->rt, rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01006766#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra029632f2011-10-25 10:00:11 +02006767 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01006768 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
Dhaval Giani354d60c2008-04-19 19:44:59 +02006769 /*
Yong Zhang07e06b02011-01-07 15:17:36 +08006770 * How much cpu bandwidth does root_task_group get?
Dhaval Giani354d60c2008-04-19 19:44:59 +02006771 *
6772 * In case of task-groups formed thr' the cgroup filesystem, it
6773 * gets 100% of the cpu resources in the system. This overall
6774 * system cpu resource is divided among the tasks of
Yong Zhang07e06b02011-01-07 15:17:36 +08006775 * root_task_group and its child task-groups in a fair manner,
Dhaval Giani354d60c2008-04-19 19:44:59 +02006776 * based on each entity's (task or task-group's) weight
6777 * (se->load.weight).
6778 *
Yong Zhang07e06b02011-01-07 15:17:36 +08006779 * In other words, if root_task_group has 10 tasks of weight
Dhaval Giani354d60c2008-04-19 19:44:59 +02006780 * 1024) and two child groups A0 and A1 (of weight 1024 each),
6781 * then A0's share of the cpu resource is:
6782 *
Ingo Molnar0d905bc2009-05-04 19:13:30 +02006783 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
Dhaval Giani354d60c2008-04-19 19:44:59 +02006784 *
Yong Zhang07e06b02011-01-07 15:17:36 +08006785 * We achieve this by letting root_task_group's tasks sit
6786 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
Dhaval Giani354d60c2008-04-19 19:44:59 +02006787 */
Paul Turnerab84d312011-07-21 09:43:28 -07006788 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
Yong Zhang07e06b02011-01-07 15:17:36 +08006789 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
Dhaval Giani354d60c2008-04-19 19:44:59 +02006790#endif /* CONFIG_FAIR_GROUP_SCHED */
6791
6792 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01006793#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01006794 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
Yong Zhang07e06b02011-01-07 15:17:36 +08006795 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01006796#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006797
Ingo Molnardd41f592007-07-09 18:51:59 +02006798 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
6799 rq->cpu_load[j] = 0;
Venkatesh Pallipadifdf3e952010-05-17 18:14:43 -07006800
6801 rq->last_load_update_tick = jiffies;
6802
Linus Torvalds1da177e2005-04-16 15:20:36 -07006803#ifdef CONFIG_SMP
Nick Piggin41c7ce92005-06-25 14:57:24 -07006804 rq->sd = NULL;
Gregory Haskins57d885f2008-01-25 21:08:18 +01006805 rq->rd = NULL;
Nikhil Rao1399fa72011-05-18 10:09:39 -07006806 rq->cpu_power = SCHED_POWER_SCALE;
Gregory Haskins3f029d32009-07-29 11:08:47 -04006807 rq->post_schedule = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006808 rq->active_balance = 0;
Ingo Molnardd41f592007-07-09 18:51:59 +02006809 rq->next_balance = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006810 rq->push_cpu = 0;
Christoph Lameter0a2966b2006-09-25 23:30:51 -07006811 rq->cpu = i;
Gregory Haskins1f11eb62008-06-04 15:04:05 -04006812 rq->online = 0;
Mike Galbraitheae0c9d2009-11-10 03:50:02 +01006813 rq->idle_stamp = 0;
6814 rq->avg_idle = 2*sysctl_sched_migration_cost;
Peter Zijlstra367456c2012-02-20 21:49:09 +01006815
6816 INIT_LIST_HEAD(&rq->cfs_tasks);
6817
Gregory Haskinsdc938522008-01-25 21:08:26 +01006818 rq_attach_root(rq, &def_root_domain);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006819#ifdef CONFIG_NO_HZ
Suresh Siddha1c792db2011-12-01 17:07:32 -08006820 rq->nohz_flags = 0;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006821#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006822#endif
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01006823 init_rq_hrtick(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006824 atomic_set(&rq->nr_iowait, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006825 }
6826
Peter Williams2dd73a42006-06-27 02:54:34 -07006827 set_load_weight(&init_task);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07006828
Avi Kivitye107be32007-07-26 13:40:43 +02006829#ifdef CONFIG_PREEMPT_NOTIFIERS
6830 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
6831#endif
6832
Heiko Carstensb50f60c2006-07-30 03:03:52 -07006833#ifdef CONFIG_RT_MUTEXES
Dima Zavin732375c2011-07-07 17:27:59 -07006834 plist_head_init(&init_task.pi_waiters);
Heiko Carstensb50f60c2006-07-30 03:03:52 -07006835#endif
6836
Linus Torvalds1da177e2005-04-16 15:20:36 -07006837 /*
6838 * The boot idle thread does lazy MMU switching as well:
6839 */
6840 atomic_inc(&init_mm.mm_count);
6841 enter_lazy_tlb(&init_mm, current);
6842
6843 /*
6844 * Make us the idle thread. Technically, schedule() should not be
6845 * called from this thread, however somewhere below it might be,
6846 * but because we are the idle thread, we just pick up running again
6847 * when this runqueue becomes "idle".
6848 */
6849 init_idle(current, smp_processor_id());
Thomas Gleixnerdce48a82009-04-11 10:43:41 +02006850
6851 calc_load_update = jiffies + LOAD_FREQ;
6852
Ingo Molnardd41f592007-07-09 18:51:59 +02006853 /*
6854 * During early bootup we pretend to be a normal task:
6855 */
6856 current->sched_class = &fair_sched_class;
Ingo Molnar6892b752008-02-13 14:02:36 +01006857
Rusty Russellbf4d83f2008-11-25 09:57:51 +10306858#ifdef CONFIG_SMP
Peter Zijlstra4cb98832011-04-07 14:09:58 +02006859 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
Rusty Russellbdddd292009-12-02 14:09:16 +10306860 /* May be allocated at isolcpus cmdline parse time */
6861 if (cpu_isolated_map == NULL)
6862 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
Thomas Gleixner29d5e042012-04-20 13:05:45 +00006863 idle_thread_set_boot_cpu();
Peter Zijlstra029632f2011-10-25 10:00:11 +02006864#endif
6865 init_sched_fair_class();
Rusty Russell6a7b3dc2008-11-25 02:35:04 +10306866
Ingo Molnar6892b752008-02-13 14:02:36 +01006867 scheduler_running = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006868}
6869
Frederic Weisbeckerd902db12011-06-08 19:31:56 +02006870#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02006871static inline int preempt_count_equals(int preempt_offset)
6872{
Frederic Weisbecker234da7b2009-12-16 20:21:05 +01006873 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02006874
Arnd Bergmann4ba82162011-01-25 22:52:22 +01006875 return (nested == preempt_offset);
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02006876}
6877
Simon Kagstromd8948372009-12-23 11:08:18 +01006878void __might_sleep(const char *file, int line, int preempt_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006879{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006880 static unsigned long prev_jiffy; /* ratelimiting */
6881
Paul E. McKenneyb3fbab02011-05-24 08:31:09 -07006882 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
Frederic Weisbeckere4aafea2009-07-16 15:44:29 +02006883 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
6884 system_state != SYSTEM_RUNNING || oops_in_progress)
Ingo Molnaraef745f2008-08-28 11:34:43 +02006885 return;
6886 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6887 return;
6888 prev_jiffy = jiffies;
6889
Peter Zijlstra3df0fc52009-12-20 14:23:57 +01006890 printk(KERN_ERR
6891 "BUG: sleeping function called from invalid context at %s:%d\n",
6892 file, line);
6893 printk(KERN_ERR
6894 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
6895 in_atomic(), irqs_disabled(),
6896 current->pid, current->comm);
Ingo Molnaraef745f2008-08-28 11:34:43 +02006897
6898 debug_show_held_locks(current);
6899 if (irqs_disabled())
6900 print_irqtrace_events(current);
6901 dump_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006902}
6903EXPORT_SYMBOL(__might_sleep);
6904#endif
6905
6906#ifdef CONFIG_MAGIC_SYSRQ
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02006907static void normalize_task(struct rq *rq, struct task_struct *p)
6908{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006909 const struct sched_class *prev_class = p->sched_class;
6910 int old_prio = p->prio;
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02006911 int on_rq;
Peter Zijlstra3e51f332008-05-03 18:29:28 +02006912
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02006913 on_rq = p->on_rq;
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02006914 if (on_rq)
Peter Zijlstra4ca9b722012-01-25 11:50:51 +01006915 dequeue_task(rq, p, 0);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02006916 __setscheduler(rq, p, SCHED_NORMAL, 0);
6917 if (on_rq) {
Peter Zijlstra4ca9b722012-01-25 11:50:51 +01006918 enqueue_task(rq, p, 0);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02006919 resched_task(rq->curr);
6920 }
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006921
6922 check_class_changed(rq, p, prev_class, old_prio);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02006923}
6924
Linus Torvalds1da177e2005-04-16 15:20:36 -07006925void normalize_rt_tasks(void)
6926{
Ingo Molnara0f98a12007-06-17 18:37:45 +02006927 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006928 unsigned long flags;
Ingo Molnar70b97a72006-07-03 00:25:42 -07006929 struct rq *rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006930
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01006931 read_lock_irqsave(&tasklist_lock, flags);
Ingo Molnara0f98a12007-06-17 18:37:45 +02006932 do_each_thread(g, p) {
Ingo Molnar178be792007-10-15 17:00:18 +02006933 /*
6934 * Only normalize user tasks:
6935 */
6936 if (!p->mm)
6937 continue;
6938
Ingo Molnardd41f592007-07-09 18:51:59 +02006939 p->se.exec_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02006940#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03006941 p->se.statistics.wait_start = 0;
6942 p->se.statistics.sleep_start = 0;
6943 p->se.statistics.block_start = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02006944#endif
Ingo Molnardd41f592007-07-09 18:51:59 +02006945
6946 if (!rt_task(p)) {
6947 /*
6948 * Renice negative nice level userspace
6949 * tasks back to 0:
6950 */
6951 if (TASK_NICE(p) < 0 && p->mm)
6952 set_user_nice(p, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006953 continue;
Ingo Molnardd41f592007-07-09 18:51:59 +02006954 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006955
Thomas Gleixner1d615482009-11-17 14:54:03 +01006956 raw_spin_lock(&p->pi_lock);
Ingo Molnarb29739f2006-06-27 02:54:51 -07006957 rq = __task_rq_lock(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006958
Ingo Molnar178be792007-10-15 17:00:18 +02006959 normalize_task(rq, p);
Andi Kleen3a5e4dc2007-10-15 17:00:15 +02006960
Ingo Molnarb29739f2006-06-27 02:54:51 -07006961 __task_rq_unlock(rq);
Thomas Gleixner1d615482009-11-17 14:54:03 +01006962 raw_spin_unlock(&p->pi_lock);
Ingo Molnara0f98a12007-06-17 18:37:45 +02006963 } while_each_thread(g, p);
6964
Peter Zijlstra4cf5d772008-02-13 15:45:39 +01006965 read_unlock_irqrestore(&tasklist_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006966}
6967
6968#endif /* CONFIG_MAGIC_SYSRQ */
Linus Torvalds1df5c102005-09-12 07:59:21 -07006969
Jason Wessel67fc4e02010-05-20 21:04:21 -05006970#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
Linus Torvalds1df5c102005-09-12 07:59:21 -07006971/*
Jason Wessel67fc4e02010-05-20 21:04:21 -05006972 * These functions are only useful for the IA64 MCA handling, or kdb.
Linus Torvalds1df5c102005-09-12 07:59:21 -07006973 *
6974 * They can only be called when the whole system has been
6975 * stopped - every CPU needs to be quiescent, and no scheduling
6976 * activity can take place. Using them for anything else would
6977 * be a serious bug, and as a result, they aren't even visible
6978 * under any other configuration.
6979 */
6980
6981/**
6982 * curr_task - return the current task for a given cpu.
6983 * @cpu: the processor in question.
6984 *
6985 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6986 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07006987struct task_struct *curr_task(int cpu)
Linus Torvalds1df5c102005-09-12 07:59:21 -07006988{
6989 return cpu_curr(cpu);
6990}
6991
Jason Wessel67fc4e02010-05-20 21:04:21 -05006992#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
6993
6994#ifdef CONFIG_IA64
Linus Torvalds1df5c102005-09-12 07:59:21 -07006995/**
6996 * set_curr_task - set the current task for a given cpu.
6997 * @cpu: the processor in question.
6998 * @p: the task pointer to set.
6999 *
7000 * Description: This function must only be used when non-maskable interrupts
Ingo Molnar41a2d6c2007-12-05 15:46:09 +01007001 * are serviced on a separate stack. It allows the architecture to switch the
7002 * notion of the current task on a cpu in a non-blocking manner. This function
Linus Torvalds1df5c102005-09-12 07:59:21 -07007003 * must be called with all CPU's synchronized, and interrupts disabled, the
7004 * and caller must save the original value of the current task (see
7005 * curr_task() above) and restore that value before reenabling interrupts and
7006 * re-starting the system.
7007 *
7008 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7009 */
Ingo Molnar36c8b582006-07-03 00:25:41 -07007010void set_curr_task(int cpu, struct task_struct *p)
Linus Torvalds1df5c102005-09-12 07:59:21 -07007011{
7012 cpu_curr(cpu) = p;
7013}
7014
7015#endif
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007016
Dhaval Giani7c941432010-01-20 13:26:18 +01007017#ifdef CONFIG_CGROUP_SCHED
Peter Zijlstra029632f2011-10-25 10:00:11 +02007018/* task_group_lock serializes the addition/removal of task groups */
7019static DEFINE_SPINLOCK(task_group_lock);
7020
Peter Zijlstrabccbe082008-02-13 15:45:40 +01007021static void free_sched_group(struct task_group *tg)
7022{
7023 free_fair_sched_group(tg);
7024 free_rt_sched_group(tg);
Mike Galbraithe9aa1dd2011-01-05 11:11:25 +01007025 autogroup_free(tg);
Peter Zijlstrabccbe082008-02-13 15:45:40 +01007026 kfree(tg);
7027}
7028
7029/* allocate runqueue etc for a new task group */
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007030struct task_group *sched_create_group(struct task_group *parent)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01007031{
7032 struct task_group *tg;
7033 unsigned long flags;
Peter Zijlstrabccbe082008-02-13 15:45:40 +01007034
7035 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7036 if (!tg)
7037 return ERR_PTR(-ENOMEM);
7038
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007039 if (!alloc_fair_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01007040 goto err;
7041
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007042 if (!alloc_rt_sched_group(tg, parent))
Peter Zijlstrabccbe082008-02-13 15:45:40 +01007043 goto err;
7044
Peter Zijlstra8ed36992008-02-13 15:45:39 +01007045 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007046 list_add_rcu(&tg->list, &task_groups);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02007047
7048 WARN_ON(!parent); /* root should already exist */
7049
7050 tg->parent = parent;
Peter Zijlstraf473aa52008-04-19 19:45:00 +02007051 INIT_LIST_HEAD(&tg->children);
Zhang, Yanmin09f27242030-08-14 15:56:40 +08007052 list_add_rcu(&tg->siblings, &parent->children);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01007053 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007054
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007055 return tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007056
7057err:
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007058 free_sched_group(tg);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007059 return ERR_PTR(-ENOMEM);
7060}
7061
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007062/* rcu callback to free various structures associated with a task group */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007063static void free_sched_group_rcu(struct rcu_head *rhp)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007064{
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007065 /* now it should be safe to free those cfs_rqs */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007066 free_sched_group(container_of(rhp, struct task_group, rcu));
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007067}
7068
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007069/* Destroy runqueue etc associated with a task group */
Ingo Molnar4cf86d72007-10-15 17:00:14 +02007070void sched_destroy_group(struct task_group *tg)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007071{
Peter Zijlstra8ed36992008-02-13 15:45:39 +01007072 unsigned long flags;
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007073 int i;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007074
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08007075 /* end participation in shares distribution */
7076 for_each_possible_cpu(i)
Peter Zijlstrabccbe082008-02-13 15:45:40 +01007077 unregister_fair_sched_group(tg, i);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08007078
7079 spin_lock_irqsave(&task_group_lock, flags);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007080 list_del_rcu(&tg->list);
Peter Zijlstraf473aa52008-04-19 19:45:00 +02007081 list_del_rcu(&tg->siblings);
Peter Zijlstra8ed36992008-02-13 15:45:39 +01007082 spin_unlock_irqrestore(&task_group_lock, flags);
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007083
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007084 /* wait for possible concurrent references to cfs_rqs complete */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007085 call_rcu(&tg->rcu, free_sched_group_rcu);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007086}
7087
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007088/* change task's runqueue when it moves between groups.
Ingo Molnar3a252012007-10-15 17:00:12 +02007089 * The caller of this function should have put the task in its new group
7090 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7091 * reflect its new group.
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02007092 */
7093void sched_move_task(struct task_struct *tsk)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007094{
Peter Zijlstra8323f262012-06-22 13:36:05 +02007095 struct task_group *tg;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007096 int on_rq, running;
7097 unsigned long flags;
7098 struct rq *rq;
7099
7100 rq = task_rq_lock(tsk, &flags);
7101
Dmitry Adamushko051a1d12007-12-18 15:21:13 +01007102 running = task_current(rq, tsk);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02007103 on_rq = tsk->on_rq;
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007104
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07007105 if (on_rq)
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007106 dequeue_task(rq, tsk, 0);
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07007107 if (unlikely(running))
7108 tsk->sched_class->put_prev_task(rq, tsk);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007109
Peter Zijlstra8323f262012-06-22 13:36:05 +02007110 tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
7111 lockdep_is_held(&tsk->sighand->siglock)),
7112 struct task_group, css);
7113 tg = autogroup_task_group(tsk, tg);
7114 tsk->sched_task_group = tg;
7115
Peter Zijlstra810b3812008-02-29 15:21:01 -05007116#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02007117 if (tsk->sched_class->task_move_group)
7118 tsk->sched_class->task_move_group(tsk, on_rq);
7119 else
Peter Zijlstra810b3812008-02-29 15:21:01 -05007120#endif
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02007121 set_task_rq(tsk, task_cpu(tsk));
Peter Zijlstra810b3812008-02-29 15:21:01 -05007122
Hiroshi Shimamoto0e1f3482008-03-10 11:01:20 -07007123 if (unlikely(running))
7124 tsk->sched_class->set_curr_task(rq);
7125 if (on_rq)
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01007126 enqueue_task(rq, tsk, 0);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007127
Peter Zijlstra0122ec52011-04-05 17:23:51 +02007128 task_rq_unlock(rq, tsk, &flags);
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007129}
Dhaval Giani7c941432010-01-20 13:26:18 +01007130#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri29f59db2007-10-15 17:00:07 +02007131
Paul Turnera790de92011-07-21 09:43:29 -07007132#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01007133static unsigned long to_ratio(u64 period, u64 runtime)
7134{
7135 if (runtime == RUNTIME_INF)
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02007136 return 1ULL << 20;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01007137
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02007138 return div64_u64(runtime << 20, period);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01007139}
Paul Turnera790de92011-07-21 09:43:29 -07007140#endif
7141
7142#ifdef CONFIG_RT_GROUP_SCHED
7143/*
7144 * Ensure that the real time constraints are schedulable.
7145 */
7146static DEFINE_MUTEX(rt_constraints_mutex);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01007147
Dhaval Giani521f1a242008-02-28 15:21:56 +05307148/* Must be called with tasklist_lock held */
7149static inline int tg_has_rt_tasks(struct task_group *tg)
7150{
7151 struct task_struct *g, *p;
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02007152
Dhaval Giani521f1a242008-02-28 15:21:56 +05307153 do_each_thread(g, p) {
Peter Zijlstra029632f2011-10-25 10:00:11 +02007154 if (rt_task(p) && task_rq(p)->rt.tg == tg)
Dhaval Giani521f1a242008-02-28 15:21:56 +05307155 return 1;
7156 } while_each_thread(g, p);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02007157
Dhaval Giani521f1a242008-02-28 15:21:56 +05307158 return 0;
7159}
7160
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02007161struct rt_schedulable_data {
7162 struct task_group *tg;
7163 u64 rt_period;
7164 u64 rt_runtime;
7165};
7166
Paul Turnera790de92011-07-21 09:43:29 -07007167static int tg_rt_schedulable(struct task_group *tg, void *data)
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02007168{
7169 struct rt_schedulable_data *d = data;
7170 struct task_group *child;
7171 unsigned long total, sum = 0;
7172 u64 period, runtime;
7173
7174 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7175 runtime = tg->rt_bandwidth.rt_runtime;
7176
7177 if (tg == d->tg) {
7178 period = d->rt_period;
7179 runtime = d->rt_runtime;
7180 }
7181
Peter Zijlstra4653f802008-09-23 15:33:44 +02007182 /*
7183 * Cannot have more runtime than the period.
7184 */
7185 if (runtime > period && runtime != RUNTIME_INF)
7186 return -EINVAL;
7187
7188 /*
7189 * Ensure we don't starve existing RT tasks.
7190 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02007191 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7192 return -EBUSY;
7193
7194 total = to_ratio(period, runtime);
7195
Peter Zijlstra4653f802008-09-23 15:33:44 +02007196 /*
7197 * Nobody can have more than the global setting allows.
7198 */
7199 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7200 return -EINVAL;
7201
7202 /*
7203 * The sum of our children's runtime should not exceed our own.
7204 */
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02007205 list_for_each_entry_rcu(child, &tg->children, siblings) {
7206 period = ktime_to_ns(child->rt_bandwidth.rt_period);
7207 runtime = child->rt_bandwidth.rt_runtime;
7208
7209 if (child == d->tg) {
7210 period = d->rt_period;
7211 runtime = d->rt_runtime;
7212 }
7213
7214 sum += to_ratio(period, runtime);
7215 }
7216
7217 if (sum > total)
7218 return -EINVAL;
7219
7220 return 0;
7221}
7222
7223static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
7224{
Paul Turner82774342011-07-21 09:43:35 -07007225 int ret;
7226
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02007227 struct rt_schedulable_data data = {
7228 .tg = tg,
7229 .rt_period = period,
7230 .rt_runtime = runtime,
7231 };
7232
Paul Turner82774342011-07-21 09:43:35 -07007233 rcu_read_lock();
7234 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7235 rcu_read_unlock();
7236
7237 return ret;
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02007238}
7239
Paul Turnerab84d312011-07-21 09:43:28 -07007240static int tg_set_rt_bandwidth(struct task_group *tg,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007241 u64 rt_period, u64 rt_runtime)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007242{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007243 int i, err = 0;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01007244
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01007245 mutex_lock(&rt_constraints_mutex);
Dhaval Giani521f1a242008-02-28 15:21:56 +05307246 read_lock(&tasklist_lock);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02007247 err = __rt_schedulable(tg, rt_period, rt_runtime);
7248 if (err)
Dhaval Giani521f1a242008-02-28 15:21:56 +05307249 goto unlock;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007250
Thomas Gleixner0986b112009-11-17 15:32:06 +01007251 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007252 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7253 tg->rt_bandwidth.rt_runtime = rt_runtime;
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007254
7255 for_each_possible_cpu(i) {
7256 struct rt_rq *rt_rq = tg->rt_rq[i];
7257
Thomas Gleixner0986b112009-11-17 15:32:06 +01007258 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007259 rt_rq->rt_runtime = rt_runtime;
Thomas Gleixner0986b112009-11-17 15:32:06 +01007260 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007261 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01007262 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
Peter Zijlstra49246272010-10-17 21:46:10 +02007263unlock:
Dhaval Giani521f1a242008-02-28 15:21:56 +05307264 read_unlock(&tasklist_lock);
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01007265 mutex_unlock(&rt_constraints_mutex);
7266
7267 return err;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007268}
7269
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007270int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7271{
7272 u64 rt_runtime, rt_period;
7273
7274 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7275 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7276 if (rt_runtime_us < 0)
7277 rt_runtime = RUNTIME_INF;
7278
Paul Turnerab84d312011-07-21 09:43:28 -07007279 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007280}
7281
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01007282long sched_group_rt_runtime(struct task_group *tg)
7283{
7284 u64 rt_runtime_us;
7285
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007286 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01007287 return -1;
7288
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007289 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01007290 do_div(rt_runtime_us, NSEC_PER_USEC);
7291 return rt_runtime_us;
7292}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007293
7294int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
7295{
7296 u64 rt_runtime, rt_period;
7297
7298 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
7299 rt_runtime = tg->rt_bandwidth.rt_runtime;
7300
Raistlin619b0482008-06-26 18:54:09 +02007301 if (rt_period == 0)
7302 return -EINVAL;
7303
Paul Turnerab84d312011-07-21 09:43:28 -07007304 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007305}
7306
7307long sched_group_rt_period(struct task_group *tg)
7308{
7309 u64 rt_period_us;
7310
7311 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7312 do_div(rt_period_us, NSEC_PER_USEC);
7313 return rt_period_us;
7314}
7315
7316static int sched_rt_global_constraints(void)
7317{
Peter Zijlstra4653f802008-09-23 15:33:44 +02007318 u64 runtime, period;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007319 int ret = 0;
7320
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07007321 if (sysctl_sched_rt_period <= 0)
7322 return -EINVAL;
7323
Peter Zijlstra4653f802008-09-23 15:33:44 +02007324 runtime = global_rt_runtime();
7325 period = global_rt_period();
7326
7327 /*
7328 * Sanity check on the sysctl variables.
7329 */
7330 if (runtime > period && runtime != RUNTIME_INF)
7331 return -EINVAL;
Peter Zijlstra10b612f2008-06-19 14:22:27 +02007332
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007333 mutex_lock(&rt_constraints_mutex);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02007334 read_lock(&tasklist_lock);
Peter Zijlstra4653f802008-09-23 15:33:44 +02007335 ret = __rt_schedulable(NULL, 0, 0);
Peter Zijlstra9a7e0b12008-08-19 12:33:06 +02007336 read_unlock(&tasklist_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007337 mutex_unlock(&rt_constraints_mutex);
7338
7339 return ret;
7340}
Dhaval Giani54e99122009-02-27 15:13:54 +05307341
7342int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
7343{
7344 /* Don't accept realtime tasks when there is no way for them to run */
7345 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7346 return 0;
7347
7348 return 1;
7349}
7350
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007351#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007352static int sched_rt_global_constraints(void)
7353{
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007354 unsigned long flags;
7355 int i;
7356
Hiroshi Shimamotoec5d4982008-09-10 17:00:19 -07007357 if (sysctl_sched_rt_period <= 0)
7358 return -EINVAL;
7359
Peter Zijlstra60aa6052009-05-05 17:50:21 +02007360 /*
7361 * There's always some RT tasks in the root group
7362 * -- migration, kstopmachine etc..
7363 */
7364 if (sysctl_sched_rt_runtime == 0)
7365 return -EBUSY;
7366
Thomas Gleixner0986b112009-11-17 15:32:06 +01007367 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007368 for_each_possible_cpu(i) {
7369 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7370
Thomas Gleixner0986b112009-11-17 15:32:06 +01007371 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007372 rt_rq->rt_runtime = global_rt_runtime();
Thomas Gleixner0986b112009-11-17 15:32:06 +01007373 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007374 }
Thomas Gleixner0986b112009-11-17 15:32:06 +01007375 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
Peter Zijlstraac086bc2008-04-19 19:44:58 +02007376
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007377 return 0;
7378}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007379#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007380
7381int sched_rt_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007382 void __user *buffer, size_t *lenp,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007383 loff_t *ppos)
7384{
7385 int ret;
7386 int old_period, old_runtime;
7387 static DEFINE_MUTEX(mutex);
7388
7389 mutex_lock(&mutex);
7390 old_period = sysctl_sched_rt_period;
7391 old_runtime = sysctl_sched_rt_runtime;
7392
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07007393 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007394
7395 if (!ret && write) {
7396 ret = sched_rt_global_constraints();
7397 if (ret) {
7398 sysctl_sched_rt_period = old_period;
7399 sysctl_sched_rt_runtime = old_runtime;
7400 } else {
7401 def_rt_bandwidth.rt_runtime = global_rt_runtime();
7402 def_rt_bandwidth.rt_period =
7403 ns_to_ktime(global_rt_period());
7404 }
7405 }
7406 mutex_unlock(&mutex);
7407
7408 return ret;
7409}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007410
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007411#ifdef CONFIG_CGROUP_SCHED
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007412
7413/* return corresponding task_group object of a cgroup */
Paul Menage2b01dfe2007-10-24 18:23:50 +02007414static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007415{
Paul Menage2b01dfe2007-10-24 18:23:50 +02007416 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
7417 struct task_group, css);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007418}
7419
Li Zefan761b3ef2012-01-31 13:47:36 +08007420static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007421{
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007422 struct task_group *tg, *parent;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007423
Paul Menage2b01dfe2007-10-24 18:23:50 +02007424 if (!cgrp->parent) {
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007425 /* This is early initialization for the top cgroup */
Yong Zhang07e06b02011-01-07 15:17:36 +08007426 return &root_task_group.css;
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007427 }
7428
Dhaval Gianiec7dc8a2008-04-19 19:44:59 +02007429 parent = cgroup_tg(cgrp->parent);
7430 tg = sched_create_group(parent);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007431 if (IS_ERR(tg))
7432 return ERR_PTR(-ENOMEM);
7433
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007434 return &tg->css;
7435}
7436
Li Zefan761b3ef2012-01-31 13:47:36 +08007437static void cpu_cgroup_destroy(struct cgroup *cgrp)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007438{
Paul Menage2b01dfe2007-10-24 18:23:50 +02007439 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007440
7441 sched_destroy_group(tg);
7442}
7443
Li Zefan761b3ef2012-01-31 13:47:36 +08007444static int cpu_cgroup_can_attach(struct cgroup *cgrp,
Tejun Heobb9d97b2011-12-12 18:12:21 -08007445 struct cgroup_taskset *tset)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007446{
Tejun Heobb9d97b2011-12-12 18:12:21 -08007447 struct task_struct *task;
7448
7449 cgroup_taskset_for_each(task, cgrp, tset) {
Peter Zijlstrab68aa232008-02-13 15:45:40 +01007450#ifdef CONFIG_RT_GROUP_SCHED
Tejun Heobb9d97b2011-12-12 18:12:21 -08007451 if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
7452 return -EINVAL;
Peter Zijlstrab68aa232008-02-13 15:45:40 +01007453#else
Tejun Heobb9d97b2011-12-12 18:12:21 -08007454 /* We don't support RT-tasks being in separate groups */
7455 if (task->sched_class != &fair_sched_class)
7456 return -EINVAL;
Peter Zijlstrab68aa232008-02-13 15:45:40 +01007457#endif
Tejun Heobb9d97b2011-12-12 18:12:21 -08007458 }
Ben Blumbe367d02009-09-23 15:56:31 -07007459 return 0;
7460}
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007461
Li Zefan761b3ef2012-01-31 13:47:36 +08007462static void cpu_cgroup_attach(struct cgroup *cgrp,
Tejun Heobb9d97b2011-12-12 18:12:21 -08007463 struct cgroup_taskset *tset)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007464{
Tejun Heobb9d97b2011-12-12 18:12:21 -08007465 struct task_struct *task;
7466
7467 cgroup_taskset_for_each(task, cgrp, tset)
7468 sched_move_task(task);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007469}
7470
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01007471static void
Li Zefan761b3ef2012-01-31 13:47:36 +08007472cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7473 struct task_struct *task)
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01007474{
7475 /*
7476 * cgroup_exit() is called in the copy_process() failure path.
7477 * Ignore this case since the task hasn't ran yet, this avoids
7478 * trying to poke a half freed task state from generic code.
7479 */
7480 if (!(task->flags & PF_EXITING))
7481 return;
7482
7483 sched_move_task(task);
7484}
7485
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007486#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagef4c753b2008-04-29 00:59:56 -07007487static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
Paul Menage2b01dfe2007-10-24 18:23:50 +02007488 u64 shareval)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007489{
Nikhil Raoc8b28112011-05-18 14:37:48 -07007490 return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007491}
7492
Paul Menagef4c753b2008-04-29 00:59:56 -07007493static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007494{
Paul Menage2b01dfe2007-10-24 18:23:50 +02007495 struct task_group *tg = cgroup_tg(cgrp);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007496
Nikhil Raoc8b28112011-05-18 14:37:48 -07007497 return (u64) scale_load_down(tg->shares);
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007498}
Paul Turnerab84d312011-07-21 09:43:28 -07007499
7500#ifdef CONFIG_CFS_BANDWIDTH
Paul Turnera790de92011-07-21 09:43:29 -07007501static DEFINE_MUTEX(cfs_constraints_mutex);
7502
Paul Turnerab84d312011-07-21 09:43:28 -07007503const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
7504const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
7505
Paul Turnera790de92011-07-21 09:43:29 -07007506static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7507
Paul Turnerab84d312011-07-21 09:43:28 -07007508static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7509{
Paul Turner56f570e2011-11-07 20:26:33 -08007510 int i, ret = 0, runtime_enabled, runtime_was_enabled;
Peter Zijlstra029632f2011-10-25 10:00:11 +02007511 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
Paul Turnerab84d312011-07-21 09:43:28 -07007512
7513 if (tg == &root_task_group)
7514 return -EINVAL;
7515
7516 /*
7517 * Ensure we have at some amount of bandwidth every period. This is
7518 * to prevent reaching a state of large arrears when throttled via
7519 * entity_tick() resulting in prolonged exit starvation.
7520 */
7521 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
7522 return -EINVAL;
7523
7524 /*
7525 * Likewise, bound things on the otherside by preventing insane quota
7526 * periods. This also allows us to normalize in computing quota
7527 * feasibility.
7528 */
7529 if (period > max_cfs_quota_period)
7530 return -EINVAL;
7531
Paul Turnera790de92011-07-21 09:43:29 -07007532 mutex_lock(&cfs_constraints_mutex);
7533 ret = __cfs_schedulable(tg, period, quota);
7534 if (ret)
7535 goto out_unlock;
7536
Paul Turner58088ad2011-07-21 09:43:31 -07007537 runtime_enabled = quota != RUNTIME_INF;
Paul Turner56f570e2011-11-07 20:26:33 -08007538 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7539 account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
Paul Turnerab84d312011-07-21 09:43:28 -07007540 raw_spin_lock_irq(&cfs_b->lock);
7541 cfs_b->period = ns_to_ktime(period);
7542 cfs_b->quota = quota;
Paul Turner58088ad2011-07-21 09:43:31 -07007543
Paul Turnera9cf55b2011-07-21 09:43:32 -07007544 __refill_cfs_bandwidth_runtime(cfs_b);
Paul Turner58088ad2011-07-21 09:43:31 -07007545 /* restart the period timer (if active) to handle new period expiry */
7546 if (runtime_enabled && cfs_b->timer_active) {
7547 /* force a reprogram */
7548 cfs_b->timer_active = 0;
7549 __start_cfs_bandwidth(cfs_b);
7550 }
Paul Turnerab84d312011-07-21 09:43:28 -07007551 raw_spin_unlock_irq(&cfs_b->lock);
7552
7553 for_each_possible_cpu(i) {
7554 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
Peter Zijlstra029632f2011-10-25 10:00:11 +02007555 struct rq *rq = cfs_rq->rq;
Paul Turnerab84d312011-07-21 09:43:28 -07007556
7557 raw_spin_lock_irq(&rq->lock);
Paul Turner58088ad2011-07-21 09:43:31 -07007558 cfs_rq->runtime_enabled = runtime_enabled;
Paul Turnerab84d312011-07-21 09:43:28 -07007559 cfs_rq->runtime_remaining = 0;
Paul Turner671fd9d2011-07-21 09:43:34 -07007560
Peter Zijlstra029632f2011-10-25 10:00:11 +02007561 if (cfs_rq->throttled)
Paul Turner671fd9d2011-07-21 09:43:34 -07007562 unthrottle_cfs_rq(cfs_rq);
Paul Turnerab84d312011-07-21 09:43:28 -07007563 raw_spin_unlock_irq(&rq->lock);
7564 }
Paul Turnera790de92011-07-21 09:43:29 -07007565out_unlock:
7566 mutex_unlock(&cfs_constraints_mutex);
Paul Turnerab84d312011-07-21 09:43:28 -07007567
Paul Turnera790de92011-07-21 09:43:29 -07007568 return ret;
Paul Turnerab84d312011-07-21 09:43:28 -07007569}
7570
7571int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
7572{
7573 u64 quota, period;
7574
Peter Zijlstra029632f2011-10-25 10:00:11 +02007575 period = ktime_to_ns(tg->cfs_bandwidth.period);
Paul Turnerab84d312011-07-21 09:43:28 -07007576 if (cfs_quota_us < 0)
7577 quota = RUNTIME_INF;
7578 else
7579 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
7580
7581 return tg_set_cfs_bandwidth(tg, period, quota);
7582}
7583
7584long tg_get_cfs_quota(struct task_group *tg)
7585{
7586 u64 quota_us;
7587
Peter Zijlstra029632f2011-10-25 10:00:11 +02007588 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
Paul Turnerab84d312011-07-21 09:43:28 -07007589 return -1;
7590
Peter Zijlstra029632f2011-10-25 10:00:11 +02007591 quota_us = tg->cfs_bandwidth.quota;
Paul Turnerab84d312011-07-21 09:43:28 -07007592 do_div(quota_us, NSEC_PER_USEC);
7593
7594 return quota_us;
7595}
7596
7597int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
7598{
7599 u64 quota, period;
7600
7601 period = (u64)cfs_period_us * NSEC_PER_USEC;
Peter Zijlstra029632f2011-10-25 10:00:11 +02007602 quota = tg->cfs_bandwidth.quota;
Paul Turnerab84d312011-07-21 09:43:28 -07007603
Paul Turnerab84d312011-07-21 09:43:28 -07007604 return tg_set_cfs_bandwidth(tg, period, quota);
7605}
7606
7607long tg_get_cfs_period(struct task_group *tg)
7608{
7609 u64 cfs_period_us;
7610
Peter Zijlstra029632f2011-10-25 10:00:11 +02007611 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
Paul Turnerab84d312011-07-21 09:43:28 -07007612 do_div(cfs_period_us, NSEC_PER_USEC);
7613
7614 return cfs_period_us;
7615}
7616
7617static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
7618{
7619 return tg_get_cfs_quota(cgroup_tg(cgrp));
7620}
7621
7622static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
7623 s64 cfs_quota_us)
7624{
7625 return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
7626}
7627
7628static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
7629{
7630 return tg_get_cfs_period(cgroup_tg(cgrp));
7631}
7632
7633static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
7634 u64 cfs_period_us)
7635{
7636 return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
7637}
7638
Paul Turnera790de92011-07-21 09:43:29 -07007639struct cfs_schedulable_data {
7640 struct task_group *tg;
7641 u64 period, quota;
7642};
7643
7644/*
7645 * normalize group quota/period to be quota/max_period
7646 * note: units are usecs
7647 */
7648static u64 normalize_cfs_quota(struct task_group *tg,
7649 struct cfs_schedulable_data *d)
7650{
7651 u64 quota, period;
7652
7653 if (tg == d->tg) {
7654 period = d->period;
7655 quota = d->quota;
7656 } else {
7657 period = tg_get_cfs_period(tg);
7658 quota = tg_get_cfs_quota(tg);
7659 }
7660
7661 /* note: these should typically be equivalent */
7662 if (quota == RUNTIME_INF || quota == -1)
7663 return RUNTIME_INF;
7664
7665 return to_ratio(period, quota);
7666}
7667
7668static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
7669{
7670 struct cfs_schedulable_data *d = data;
Peter Zijlstra029632f2011-10-25 10:00:11 +02007671 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
Paul Turnera790de92011-07-21 09:43:29 -07007672 s64 quota = 0, parent_quota = -1;
7673
7674 if (!tg->parent) {
7675 quota = RUNTIME_INF;
7676 } else {
Peter Zijlstra029632f2011-10-25 10:00:11 +02007677 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
Paul Turnera790de92011-07-21 09:43:29 -07007678
7679 quota = normalize_cfs_quota(tg, d);
7680 parent_quota = parent_b->hierarchal_quota;
7681
7682 /*
7683 * ensure max(child_quota) <= parent_quota, inherit when no
7684 * limit is set
7685 */
7686 if (quota == RUNTIME_INF)
7687 quota = parent_quota;
7688 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
7689 return -EINVAL;
7690 }
7691 cfs_b->hierarchal_quota = quota;
7692
7693 return 0;
7694}
7695
7696static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
7697{
Paul Turner82774342011-07-21 09:43:35 -07007698 int ret;
Paul Turnera790de92011-07-21 09:43:29 -07007699 struct cfs_schedulable_data data = {
7700 .tg = tg,
7701 .period = period,
7702 .quota = quota,
7703 };
7704
7705 if (quota != RUNTIME_INF) {
7706 do_div(data.period, NSEC_PER_USEC);
7707 do_div(data.quota, NSEC_PER_USEC);
7708 }
7709
Paul Turner82774342011-07-21 09:43:35 -07007710 rcu_read_lock();
7711 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
7712 rcu_read_unlock();
7713
7714 return ret;
Paul Turnera790de92011-07-21 09:43:29 -07007715}
Nikhil Raoe8da1b12011-07-21 09:43:40 -07007716
7717static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
7718 struct cgroup_map_cb *cb)
7719{
7720 struct task_group *tg = cgroup_tg(cgrp);
Peter Zijlstra029632f2011-10-25 10:00:11 +02007721 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
Nikhil Raoe8da1b12011-07-21 09:43:40 -07007722
7723 cb->fill(cb, "nr_periods", cfs_b->nr_periods);
7724 cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
7725 cb->fill(cb, "throttled_time", cfs_b->throttled_time);
7726
7727 return 0;
7728}
Paul Turnerab84d312011-07-21 09:43:28 -07007729#endif /* CONFIG_CFS_BANDWIDTH */
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007730#endif /* CONFIG_FAIR_GROUP_SCHED */
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007731
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007732#ifdef CONFIG_RT_GROUP_SCHED
Mirco Tischler0c708142008-05-14 16:05:46 -07007733static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
Paul Menage06ecb272008-04-29 01:00:06 -07007734 s64 val)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007735{
Paul Menage06ecb272008-04-29 01:00:06 -07007736 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007737}
7738
Paul Menage06ecb272008-04-29 01:00:06 -07007739static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007740{
Paul Menage06ecb272008-04-29 01:00:06 -07007741 return sched_group_rt_runtime(cgroup_tg(cgrp));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007742}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007743
7744static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
7745 u64 rt_period_us)
7746{
7747 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
7748}
7749
7750static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
7751{
7752 return sched_group_rt_period(cgroup_tg(cgrp));
7753}
Dhaval Giani6d6bc0a2008-05-30 14:23:45 +02007754#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007755
Paul Menagefe5c7cc2007-10-29 21:18:11 +01007756static struct cftype cpu_files[] = {
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007757#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Menagefe5c7cc2007-10-29 21:18:11 +01007758 {
7759 .name = "shares",
Paul Menagef4c753b2008-04-29 00:59:56 -07007760 .read_u64 = cpu_shares_read_u64,
7761 .write_u64 = cpu_shares_write_u64,
Paul Menagefe5c7cc2007-10-29 21:18:11 +01007762 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007763#endif
Paul Turnerab84d312011-07-21 09:43:28 -07007764#ifdef CONFIG_CFS_BANDWIDTH
7765 {
7766 .name = "cfs_quota_us",
7767 .read_s64 = cpu_cfs_quota_read_s64,
7768 .write_s64 = cpu_cfs_quota_write_s64,
7769 },
7770 {
7771 .name = "cfs_period_us",
7772 .read_u64 = cpu_cfs_period_read_u64,
7773 .write_u64 = cpu_cfs_period_write_u64,
7774 },
Nikhil Raoe8da1b12011-07-21 09:43:40 -07007775 {
7776 .name = "stat",
7777 .read_map = cpu_stats_show,
7778 },
Paul Turnerab84d312011-07-21 09:43:28 -07007779#endif
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007780#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007781 {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +01007782 .name = "rt_runtime_us",
Paul Menage06ecb272008-04-29 01:00:06 -07007783 .read_s64 = cpu_rt_runtime_read,
7784 .write_s64 = cpu_rt_runtime_write,
Peter Zijlstra6f505b12008-01-25 21:08:30 +01007785 },
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007786 {
7787 .name = "rt_period_us",
Paul Menagef4c753b2008-04-29 00:59:56 -07007788 .read_u64 = cpu_rt_period_read_uint,
7789 .write_u64 = cpu_rt_period_write_uint,
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +02007790 },
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007791#endif
Tejun Heo4baf6e32012-04-01 12:09:55 -07007792 { } /* terminate */
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007793};
7794
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007795struct cgroup_subsys cpu_cgroup_subsys = {
Ingo Molnar38605ca2007-10-29 21:18:11 +01007796 .name = "cpu",
7797 .create = cpu_cgroup_create,
7798 .destroy = cpu_cgroup_destroy,
Tejun Heobb9d97b2011-12-12 18:12:21 -08007799 .can_attach = cpu_cgroup_can_attach,
7800 .attach = cpu_cgroup_attach,
Peter Zijlstra068c5cc2011-01-19 12:26:11 +01007801 .exit = cpu_cgroup_exit,
Ingo Molnar38605ca2007-10-29 21:18:11 +01007802 .subsys_id = cpu_cgroup_subsys_id,
Tejun Heo4baf6e32012-04-01 12:09:55 -07007803 .base_cftypes = cpu_files,
Srivatsa Vaddagiri68318b82007-10-18 23:41:03 -07007804 .early_init = 1,
7805};
7806
Peter Zijlstra052f1dc2008-02-13 15:45:40 +01007807#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007808
7809#ifdef CONFIG_CGROUP_CPUACCT
7810
7811/*
7812 * CPU accounting code for task groups.
7813 *
7814 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
7815 * (balbir@in.ibm.com).
7816 */
7817
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02007818struct cpuacct root_cpuacct;
7819
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007820/* create a new cpu accounting group */
Li Zefan761b3ef2012-01-31 13:47:36 +08007821static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007822{
Glauber Costa54c707e2011-11-28 14:45:19 -02007823 struct cpuacct *ca;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007824
Glauber Costa54c707e2011-11-28 14:45:19 -02007825 if (!cgrp->parent)
7826 return &root_cpuacct.css;
7827
7828 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007829 if (!ca)
Bharata B Raoef12fef2009-03-31 10:02:22 +05307830 goto out;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007831
7832 ca->cpuusage = alloc_percpu(u64);
Bharata B Raoef12fef2009-03-31 10:02:22 +05307833 if (!ca->cpuusage)
7834 goto out_free_ca;
7835
Glauber Costa54c707e2011-11-28 14:45:19 -02007836 ca->cpustat = alloc_percpu(struct kernel_cpustat);
7837 if (!ca->cpustat)
7838 goto out_free_cpuusage;
Bharata B Rao934352f2008-11-10 20:41:13 +05307839
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007840 return &ca->css;
Bharata B Raoef12fef2009-03-31 10:02:22 +05307841
Glauber Costa54c707e2011-11-28 14:45:19 -02007842out_free_cpuusage:
Bharata B Raoef12fef2009-03-31 10:02:22 +05307843 free_percpu(ca->cpuusage);
7844out_free_ca:
7845 kfree(ca);
7846out:
7847 return ERR_PTR(-ENOMEM);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007848}
7849
7850/* destroy an existing cpu accounting group */
Li Zefan761b3ef2012-01-31 13:47:36 +08007851static void cpuacct_destroy(struct cgroup *cgrp)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007852{
Dhaval Giani32cd7562008-02-29 10:02:43 +05307853 struct cpuacct *ca = cgroup_ca(cgrp);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007854
Glauber Costa54c707e2011-11-28 14:45:19 -02007855 free_percpu(ca->cpustat);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007856 free_percpu(ca->cpuusage);
7857 kfree(ca);
7858}
7859
Ken Chen720f5492008-12-15 22:02:01 -08007860static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
7861{
Rusty Russellb36128c2009-02-20 16:29:08 +09007862 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08007863 u64 data;
7864
7865#ifndef CONFIG_64BIT
7866 /*
7867 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
7868 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007869 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08007870 data = *cpuusage;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007871 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08007872#else
7873 data = *cpuusage;
7874#endif
7875
7876 return data;
7877}
7878
7879static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
7880{
Rusty Russellb36128c2009-02-20 16:29:08 +09007881 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Ken Chen720f5492008-12-15 22:02:01 -08007882
7883#ifndef CONFIG_64BIT
7884 /*
7885 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
7886 */
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007887 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08007888 *cpuusage = val;
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007889 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
Ken Chen720f5492008-12-15 22:02:01 -08007890#else
7891 *cpuusage = val;
7892#endif
7893}
7894
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007895/* return total cpu usage (in nanoseconds) of a group */
Dhaval Giani32cd7562008-02-29 10:02:43 +05307896static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007897{
Dhaval Giani32cd7562008-02-29 10:02:43 +05307898 struct cpuacct *ca = cgroup_ca(cgrp);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007899 u64 totalcpuusage = 0;
7900 int i;
7901
Ken Chen720f5492008-12-15 22:02:01 -08007902 for_each_present_cpu(i)
7903 totalcpuusage += cpuacct_cpuusage_read(ca, i);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007904
7905 return totalcpuusage;
7906}
7907
Dhaval Giani0297b802008-02-29 10:02:44 +05307908static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
7909 u64 reset)
7910{
7911 struct cpuacct *ca = cgroup_ca(cgrp);
7912 int err = 0;
7913 int i;
7914
7915 if (reset) {
7916 err = -EINVAL;
7917 goto out;
7918 }
7919
Ken Chen720f5492008-12-15 22:02:01 -08007920 for_each_present_cpu(i)
7921 cpuacct_cpuusage_write(ca, i, 0);
Dhaval Giani0297b802008-02-29 10:02:44 +05307922
Dhaval Giani0297b802008-02-29 10:02:44 +05307923out:
7924 return err;
7925}
7926
Ken Chene9515c32008-12-15 22:04:15 -08007927static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
7928 struct seq_file *m)
7929{
7930 struct cpuacct *ca = cgroup_ca(cgroup);
7931 u64 percpu;
7932 int i;
7933
7934 for_each_present_cpu(i) {
7935 percpu = cpuacct_cpuusage_read(ca, i);
7936 seq_printf(m, "%llu ", (unsigned long long) percpu);
7937 }
7938 seq_printf(m, "\n");
7939 return 0;
7940}
7941
Bharata B Raoef12fef2009-03-31 10:02:22 +05307942static const char *cpuacct_stat_desc[] = {
7943 [CPUACCT_STAT_USER] = "user",
7944 [CPUACCT_STAT_SYSTEM] = "system",
7945};
7946
7947static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
Glauber Costa54c707e2011-11-28 14:45:19 -02007948 struct cgroup_map_cb *cb)
Bharata B Raoef12fef2009-03-31 10:02:22 +05307949{
7950 struct cpuacct *ca = cgroup_ca(cgrp);
Glauber Costa54c707e2011-11-28 14:45:19 -02007951 int cpu;
7952 s64 val = 0;
Bharata B Raoef12fef2009-03-31 10:02:22 +05307953
Glauber Costa54c707e2011-11-28 14:45:19 -02007954 for_each_online_cpu(cpu) {
7955 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
7956 val += kcpustat->cpustat[CPUTIME_USER];
7957 val += kcpustat->cpustat[CPUTIME_NICE];
Bharata B Raoef12fef2009-03-31 10:02:22 +05307958 }
Glauber Costa54c707e2011-11-28 14:45:19 -02007959 val = cputime64_to_clock_t(val);
7960 cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val);
7961
7962 val = 0;
7963 for_each_online_cpu(cpu) {
7964 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
7965 val += kcpustat->cpustat[CPUTIME_SYSTEM];
7966 val += kcpustat->cpustat[CPUTIME_IRQ];
7967 val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
7968 }
7969
7970 val = cputime64_to_clock_t(val);
7971 cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
7972
Bharata B Raoef12fef2009-03-31 10:02:22 +05307973 return 0;
7974}
7975
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007976static struct cftype files[] = {
7977 {
7978 .name = "usage",
Paul Menagef4c753b2008-04-29 00:59:56 -07007979 .read_u64 = cpuusage_read,
7980 .write_u64 = cpuusage_write,
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007981 },
Ken Chene9515c32008-12-15 22:04:15 -08007982 {
7983 .name = "usage_percpu",
7984 .read_seq_string = cpuacct_percpu_seq_read,
7985 },
Bharata B Raoef12fef2009-03-31 10:02:22 +05307986 {
7987 .name = "stat",
7988 .read_map = cpuacct_stats_show,
7989 },
Tejun Heo4baf6e32012-04-01 12:09:55 -07007990 { } /* terminate */
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007991};
7992
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007993/*
7994 * charge this task's execution time to its accounting group.
7995 *
7996 * called with rq->lock held.
7997 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02007998void cpuacct_charge(struct task_struct *tsk, u64 cputime)
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01007999{
8000 struct cpuacct *ca;
Bharata B Rao934352f2008-11-10 20:41:13 +05308001 int cpu;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008002
Li Zefanc40c6f82009-02-26 15:40:15 +08008003 if (unlikely(!cpuacct_subsys.active))
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008004 return;
8005
Bharata B Rao934352f2008-11-10 20:41:13 +05308006 cpu = task_cpu(tsk);
Bharata B Raoa18b83b2009-03-23 10:02:53 +05308007
8008 rcu_read_lock();
8009
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008010 ca = task_ca(tsk);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008011
Glauber Costa44252e42011-11-28 14:45:18 -02008012 for (; ca; ca = parent_ca(ca)) {
Rusty Russellb36128c2009-02-20 16:29:08 +09008013 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008014 *cpuusage += cputime;
8015 }
Bharata B Raoa18b83b2009-03-23 10:02:53 +05308016
8017 rcu_read_unlock();
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008018}
8019
8020struct cgroup_subsys cpuacct_subsys = {
8021 .name = "cpuacct",
8022 .create = cpuacct_create,
8023 .destroy = cpuacct_destroy,
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008024 .subsys_id = cpuacct_subsys_id,
Tejun Heo4baf6e32012-04-01 12:09:55 -07008025 .base_cftypes = files,
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +01008026};
8027#endif /* CONFIG_CGROUP_CPUACCT */