blob: da39489d2d80e9b8f6781610e97c23ec0014f45b [file] [log] [blame]
Peter Zijlstra029632f2011-10-25 10:00:11 +02001#include "sched.h"
2
Mike Galbraith5091faa2010-11-30 14:18:03 +01003#include <linux/proc_fs.h>
4#include <linux/seq_file.h>
5#include <linux/kallsyms.h>
6#include <linux/utsname.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +02007#include <linux/security.h>
8#include <linux/export.h>
Mike Galbraith5091faa2010-11-30 14:18:03 +01009
10unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
11static struct autogroup autogroup_default;
12static atomic_t autogroup_seq_nr;
13
Peter Zijlstra029632f2011-10-25 10:00:11 +020014void __init autogroup_init(struct task_struct *init_task)
Mike Galbraith5091faa2010-11-30 14:18:03 +010015{
Yong Zhang07e06b02011-01-07 15:17:36 +080016 autogroup_default.tg = &root_task_group;
Mike Galbraith5091faa2010-11-30 14:18:03 +010017 kref_init(&autogroup_default.kref);
18 init_rwsem(&autogroup_default.lock);
19 init_task->signal->autogroup = &autogroup_default;
20}
21
Peter Zijlstra029632f2011-10-25 10:00:11 +020022void autogroup_free(struct task_group *tg)
Mike Galbraith5091faa2010-11-30 14:18:03 +010023{
24 kfree(tg->autogroup);
25}
26
27static inline void autogroup_destroy(struct kref *kref)
28{
29 struct autogroup *ag = container_of(kref, struct autogroup, kref);
30
Mike Galbraithf4493772011-01-13 04:54:50 +010031#ifdef CONFIG_RT_GROUP_SCHED
32 /* We've redirected RT tasks to the root task group... */
33 ag->tg->rt_se = NULL;
34 ag->tg->rt_rq = NULL;
35#endif
Li Zefanace783b2013-01-24 14:30:48 +080036 sched_offline_group(ag->tg);
Mike Galbraith5091faa2010-11-30 14:18:03 +010037 sched_destroy_group(ag->tg);
38}
39
40static inline void autogroup_kref_put(struct autogroup *ag)
41{
42 kref_put(&ag->kref, autogroup_destroy);
43}
44
45static inline struct autogroup *autogroup_kref_get(struct autogroup *ag)
46{
47 kref_get(&ag->kref);
48 return ag;
49}
50
Mike Galbraith4f821982010-12-16 15:09:52 +010051static inline struct autogroup *autogroup_task_get(struct task_struct *p)
52{
53 struct autogroup *ag;
54 unsigned long flags;
55
56 if (!lock_task_sighand(p, &flags))
57 return autogroup_kref_get(&autogroup_default);
58
59 ag = autogroup_kref_get(p->signal->autogroup);
60 unlock_task_sighand(p, &flags);
61
62 return ag;
63}
64
Mike Galbraith5091faa2010-11-30 14:18:03 +010065static inline struct autogroup *autogroup_create(void)
66{
67 struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
68 struct task_group *tg;
69
70 if (!ag)
71 goto out_fail;
72
Yong Zhang07e06b02011-01-07 15:17:36 +080073 tg = sched_create_group(&root_task_group);
Mike Galbraith5091faa2010-11-30 14:18:03 +010074
75 if (IS_ERR(tg))
76 goto out_free;
77
78 kref_init(&ag->kref);
79 init_rwsem(&ag->lock);
80 ag->id = atomic_inc_return(&autogroup_seq_nr);
81 ag->tg = tg;
Mike Galbraithf4493772011-01-13 04:54:50 +010082#ifdef CONFIG_RT_GROUP_SCHED
83 /*
84 * Autogroup RT tasks are redirected to the root task group
85 * so we don't have to move tasks around upon policy change,
86 * or flail around trying to allocate bandwidth on the fly.
87 * A bandwidth exception in __sched_setscheduler() allows
Peter Zijlstra1fe89e12015-02-09 11:53:18 +010088 * the policy change to proceed.
Mike Galbraithf4493772011-01-13 04:54:50 +010089 */
90 free_rt_sched_group(tg);
91 tg->rt_se = root_task_group.rt_se;
92 tg->rt_rq = root_task_group.rt_rq;
93#endif
Mike Galbraith5091faa2010-11-30 14:18:03 +010094 tg->autogroup = ag;
95
Gerald Schaefer41261b62013-05-24 18:07:49 +020096 sched_online_group(tg, &root_task_group);
Mike Galbraith5091faa2010-11-30 14:18:03 +010097 return ag;
98
99out_free:
100 kfree(ag);
101out_fail:
102 if (printk_ratelimit()) {
103 printk(KERN_WARNING "autogroup_create: %s failure.\n",
104 ag ? "sched_create_group()" : "kmalloc()");
105 }
106
107 return autogroup_kref_get(&autogroup_default);
108}
109
Peter Zijlstra029632f2011-10-25 10:00:11 +0200110bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
Mike Galbraith5091faa2010-11-30 14:18:03 +0100111{
112 if (tg != &root_task_group)
113 return false;
Mike Galbraith5091faa2010-11-30 14:18:03 +0100114 /*
Oleg Nesterov18f649e2016-11-14 19:46:09 +0100115 * If we race with autogroup_move_group() the caller can use the old
116 * value of signal->autogroup but in this case sched_move_task() will
117 * be called again before autogroup_kref_put().
Oleg Nesterov8e5bfa82016-11-14 19:46:12 +0100118 *
119 * However, there is no way sched_autogroup_exit_task() could tell us
120 * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
Mike Galbraith5091faa2010-11-30 14:18:03 +0100121 */
Oleg Nesterov8e5bfa82016-11-14 19:46:12 +0100122 if (p->flags & PF_EXITING)
123 return false;
124
Mike Galbraith5091faa2010-11-30 14:18:03 +0100125 return true;
126}
127
Oleg Nesterov8e5bfa82016-11-14 19:46:12 +0100128void sched_autogroup_exit_task(struct task_struct *p)
129{
130 /*
131 * We are going to call exit_notify() and autogroup_move_group() can't
132 * see this thread after that: we can no longer use signal->autogroup.
133 * See the PF_EXITING check in task_wants_autogroup().
134 */
135 sched_move_task(p);
136}
137
Mike Galbraith5091faa2010-11-30 14:18:03 +0100138static void
139autogroup_move_group(struct task_struct *p, struct autogroup *ag)
140{
141 struct autogroup *prev;
142 struct task_struct *t;
143 unsigned long flags;
144
145 BUG_ON(!lock_task_sighand(p, &flags));
146
147 prev = p->signal->autogroup;
148 if (prev == ag) {
149 unlock_task_sighand(p, &flags);
150 return;
151 }
152
153 p->signal->autogroup = autogroup_kref_get(ag);
Oleg Nesterov18f649e2016-11-14 19:46:09 +0100154 /*
155 * We can't avoid sched_move_task() after we changed signal->autogroup,
156 * this process can already run with task_group() == prev->tg or we can
157 * race with cgroup code which can read autogroup = prev under rq->lock.
158 * In the latter case for_each_thread() can not miss a migrating thread,
159 * cpu_cgroup_attach() must not be possible after cgroup_exit() and it
160 * can't be removed from thread list, we hold ->siglock.
Oleg Nesterov8e5bfa82016-11-14 19:46:12 +0100161 *
162 * If an exiting thread was already removed from thread list we rely on
163 * sched_autogroup_exit_task().
Oleg Nesterov18f649e2016-11-14 19:46:09 +0100164 */
Oleg Nesterov5aface52014-08-13 21:20:03 +0200165 for_each_thread(p, t)
Mike Galbraith5091faa2010-11-30 14:18:03 +0100166 sched_move_task(t);
Oleg Nesterov18f649e2016-11-14 19:46:09 +0100167
Mike Galbraith5091faa2010-11-30 14:18:03 +0100168 unlock_task_sighand(p, &flags);
169 autogroup_kref_put(prev);
170}
171
172/* Allocates GFP_KERNEL, cannot be called under any spinlock */
173void sched_autogroup_create_attach(struct task_struct *p)
174{
Ingo Molnarc1ad41f2012-12-11 10:23:45 +0100175 struct autogroup *ag = autogroup_create();
Mike Galbraith5091faa2010-11-30 14:18:03 +0100176
177 autogroup_move_group(p, ag);
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300178 /* drop extra reference added by autogroup_create() */
Mike Galbraith5091faa2010-11-30 14:18:03 +0100179 autogroup_kref_put(ag);
180}
181EXPORT_SYMBOL(sched_autogroup_create_attach);
182
183/* Cannot be called under siglock. Currently has no users */
184void sched_autogroup_detach(struct task_struct *p)
185{
186 autogroup_move_group(p, &autogroup_default);
187}
188EXPORT_SYMBOL(sched_autogroup_detach);
189
190void sched_autogroup_fork(struct signal_struct *sig)
191{
Mike Galbraith4f821982010-12-16 15:09:52 +0100192 sig->autogroup = autogroup_task_get(current);
Mike Galbraith5091faa2010-11-30 14:18:03 +0100193}
194
195void sched_autogroup_exit(struct signal_struct *sig)
196{
197 autogroup_kref_put(sig->autogroup);
198}
199
200static int __init setup_autogroup(char *str)
201{
202 sysctl_sched_autogroup_enabled = 0;
203
204 return 1;
205}
206
207__setup("noautogroup", setup_autogroup);
208
Ingo Molnarc1ad41f2012-12-11 10:23:45 +0100209#ifdef CONFIG_PROC_FS
210
211int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
212{
213 static unsigned long next = INITIAL_JIFFIES;
214 struct autogroup *ag;
Mike Galbraith83929cce2016-11-23 11:33:37 +0100215 unsigned long shares;
Ingo Molnarc1ad41f2012-12-11 10:23:45 +0100216 int err;
217
Dongsheng Yang75e45d52014-02-11 15:34:50 +0800218 if (nice < MIN_NICE || nice > MAX_NICE)
Ingo Molnarc1ad41f2012-12-11 10:23:45 +0100219 return -EINVAL;
220
221 err = security_task_setnice(current, nice);
222 if (err)
223 return err;
224
225 if (nice < 0 && !can_nice(current, nice))
226 return -EPERM;
227
228 /* this is a heavy operation taking global locks.. */
229 if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
230 return -EAGAIN;
231
232 next = HZ / 10 + jiffies;
233 ag = autogroup_task_get(p);
Mike Galbraith83929cce2016-11-23 11:33:37 +0100234 shares = scale_load(sched_prio_to_weight[nice + 20]);
Ingo Molnarc1ad41f2012-12-11 10:23:45 +0100235
236 down_write(&ag->lock);
Mike Galbraith83929cce2016-11-23 11:33:37 +0100237 err = sched_group_set_shares(ag->tg, shares);
Ingo Molnarc1ad41f2012-12-11 10:23:45 +0100238 if (!err)
239 ag->nice = nice;
240 up_write(&ag->lock);
241
242 autogroup_kref_put(ag);
243
244 return err;
245}
246
247void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
248{
249 struct autogroup *ag = autogroup_task_get(p);
250
251 if (!task_group_is_autogroup(ag->tg))
252 goto out;
253
254 down_read(&ag->lock);
255 seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
256 up_read(&ag->lock);
257
258out:
259 autogroup_kref_put(ag);
260}
261#endif /* CONFIG_PROC_FS */
262
Mike Galbraith5091faa2010-11-30 14:18:03 +0100263#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +0200264int autogroup_path(struct task_group *tg, char *buf, int buflen)
Mike Galbraith5091faa2010-11-30 14:18:03 +0100265{
Mike Galbraith511f67a2011-02-22 15:02:00 +0100266 if (!task_group_is_autogroup(tg))
Bharata B Rao8ecedd72011-01-11 15:42:57 +0530267 return 0;
268
Mike Galbraith5091faa2010-11-30 14:18:03 +0100269 return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
270}
271#endif /* CONFIG_SCHED_DEBUG */