blob: eb3f34d574196a8adb00c8327500f40c9ac03c5c [file] [log] [blame]
Matt Helsleydc52ddc2008-10-18 20:27:21 -07001/*
2 * cgroup_freezer.c - control group freezer subsystem
3 *
4 * Copyright IBM Corporation, 2007
5 *
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#include <linux/module.h>
18#include <linux/cgroup.h>
19#include <linux/fs.h>
20#include <linux/uaccess.h>
21#include <linux/freezer.h>
22#include <linux/seq_file.h>
23
24enum freezer_state {
Matt Helsley81dcf332008-10-18 20:27:23 -070025 CGROUP_THAWED = 0,
26 CGROUP_FREEZING,
27 CGROUP_FROZEN,
Matt Helsleydc52ddc2008-10-18 20:27:21 -070028};
29
30struct freezer {
31 struct cgroup_subsys_state css;
32 enum freezer_state state;
33 spinlock_t lock; /* protects _writes_ to state */
34};
35
36static inline struct freezer *cgroup_freezer(
37 struct cgroup *cgroup)
38{
39 return container_of(
40 cgroup_subsys_state(cgroup, freezer_subsys_id),
41 struct freezer, css);
42}
43
44static inline struct freezer *task_freezer(struct task_struct *task)
45{
46 return container_of(task_subsys_state(task, freezer_subsys_id),
47 struct freezer, css);
48}
49
Matt Helsley5a7aadf2010-03-26 23:51:44 +010050int cgroup_freezing_or_frozen(struct task_struct *task)
Matt Helsleydc52ddc2008-10-18 20:27:21 -070051{
52 struct freezer *freezer;
53 enum freezer_state state;
54
55 task_lock(task);
56 freezer = task_freezer(task);
Matt Helsley5a7aadf2010-03-26 23:51:44 +010057 if (!freezer->css.cgroup->parent)
58 state = CGROUP_THAWED; /* root cgroup can't be frozen */
59 else
60 state = freezer->state;
Matt Helsleydc52ddc2008-10-18 20:27:21 -070061 task_unlock(task);
62
Matt Helsley5a7aadf2010-03-26 23:51:44 +010063 return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
Matt Helsleydc52ddc2008-10-18 20:27:21 -070064}
65
66/*
67 * cgroups_write_string() limits the size of freezer state strings to
68 * CGROUP_LOCAL_BUFFER_SIZE
69 */
70static const char *freezer_state_strs[] = {
Matt Helsley81dcf332008-10-18 20:27:23 -070071 "THAWED",
Matt Helsleydc52ddc2008-10-18 20:27:21 -070072 "FREEZING",
73 "FROZEN",
74};
75
76/*
77 * State diagram
78 * Transitions are caused by userspace writes to the freezer.state file.
79 * The values in parenthesis are state labels. The rest are edge labels.
80 *
Matt Helsley81dcf332008-10-18 20:27:23 -070081 * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
82 * ^ ^ | |
83 * | \_______THAWED_______/ |
84 * \__________________________THAWED____________/
Matt Helsleydc52ddc2008-10-18 20:27:21 -070085 */
86
87struct cgroup_subsys freezer_subsys;
88
89/* Locks taken and their ordering
90 * ------------------------------
91 * css_set_lock
92 * cgroup_mutex (AKA cgroup_lock)
93 * task->alloc_lock (AKA task_lock)
94 * freezer->lock
95 * task->sighand->siglock
96 *
97 * cgroup code forces css_set_lock to be taken before task->alloc_lock
98 *
99 * freezer_create(), freezer_destroy():
100 * cgroup_mutex [ by cgroup core ]
101 *
102 * can_attach():
103 * cgroup_mutex
104 *
105 * cgroup_frozen():
106 * task->alloc_lock (to get task's cgroup)
107 *
108 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
109 * task->alloc_lock (to get task's cgroup)
110 * freezer->lock
111 * sighand->siglock (if the cgroup is freezing)
112 *
113 * freezer_read():
114 * cgroup_mutex
115 * freezer->lock
116 * read_lock css_set_lock (cgroup iterator start)
117 *
118 * freezer_write() (freeze):
119 * cgroup_mutex
120 * freezer->lock
121 * read_lock css_set_lock (cgroup iterator start)
122 * sighand->siglock
123 *
124 * freezer_write() (unfreeze):
125 * cgroup_mutex
126 * freezer->lock
127 * read_lock css_set_lock (cgroup iterator start)
128 * task->alloc_lock (to prevent races with freeze_task())
129 * sighand->siglock
130 */
131static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
132 struct cgroup *cgroup)
133{
134 struct freezer *freezer;
135
136 freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
137 if (!freezer)
138 return ERR_PTR(-ENOMEM);
139
140 spin_lock_init(&freezer->lock);
Matt Helsley81dcf332008-10-18 20:27:23 -0700141 freezer->state = CGROUP_THAWED;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700142 return &freezer->css;
143}
144
145static void freezer_destroy(struct cgroup_subsys *ss,
146 struct cgroup *cgroup)
147{
148 kfree(cgroup_freezer(cgroup));
149}
150
Matt Helsley957a4ee2008-10-18 20:27:22 -0700151/* Task is frozen or will freeze immediately when next it gets woken */
152static bool is_task_frozen_enough(struct task_struct *task)
153{
154 return frozen(task) ||
155 (task_is_stopped_or_traced(task) && freezing(task));
156}
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700157
Matt Helsley957a4ee2008-10-18 20:27:22 -0700158/*
159 * The call to cgroup_lock() in the freezer.state write method prevents
160 * a write to that file racing against an attach, and hence the
161 * can_attach() result will remain valid until the attach completes.
162 */
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700163static int freezer_can_attach(struct cgroup_subsys *ss,
164 struct cgroup *new_cgroup,
Ben Blumbe367d02009-09-23 15:56:31 -0700165 struct task_struct *task, bool threadgroup)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700166{
167 struct freezer *freezer;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700168
Li Zefan80a6a2c2008-10-29 14:00:52 -0700169 /*
170 * Anything frozen can't move or be moved to/from.
171 *
172 * Since orig_freezer->state == FROZEN means that @task has been
173 * frozen, so it's sufficient to check the latter condition.
174 */
Matt Helsley957a4ee2008-10-18 20:27:22 -0700175
176 if (is_task_frozen_enough(task))
177 return -EBUSY;
178
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700179 freezer = cgroup_freezer(new_cgroup);
Matt Helsley81dcf332008-10-18 20:27:23 -0700180 if (freezer->state == CGROUP_FROZEN)
Matt Helsley957a4ee2008-10-18 20:27:22 -0700181 return -EBUSY;
182
Ben Blumbe367d02009-09-23 15:56:31 -0700183 if (threadgroup) {
184 struct task_struct *c;
185
186 rcu_read_lock();
187 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
188 if (is_task_frozen_enough(c)) {
189 rcu_read_unlock();
190 return -EBUSY;
191 }
192 }
193 rcu_read_unlock();
194 }
195
Li Zefan80a6a2c2008-10-29 14:00:52 -0700196 return 0;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700197}
198
199static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
200{
201 struct freezer *freezer;
202
Li Zefan68744672008-11-12 13:26:49 -0800203 /*
204 * No lock is needed, since the task isn't on tasklist yet,
205 * so it can't be moved to another cgroup, which means the
206 * freezer won't be removed and will be valid during this
207 * function call.
208 */
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700209 freezer = task_freezer(task);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700210
Li Zefan3b1b3f62008-11-12 13:26:50 -0800211 /*
212 * The root cgroup is non-freezable, so we can skip the
213 * following check.
214 */
215 if (!freezer->css.cgroup->parent)
216 return;
217
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700218 spin_lock_irq(&freezer->lock);
Li Zefan7ccb9742008-10-29 14:00:51 -0700219 BUG_ON(freezer->state == CGROUP_FROZEN);
220
Matt Helsley81dcf332008-10-18 20:27:23 -0700221 /* Locking avoids race with FREEZING -> THAWED transitions. */
222 if (freezer->state == CGROUP_FREEZING)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700223 freeze_task(task, true);
224 spin_unlock_irq(&freezer->lock);
225}
226
227/*
228 * caller must hold freezer->lock
229 */
Matt Helsley1aece342008-10-18 20:27:24 -0700230static void update_freezer_state(struct cgroup *cgroup,
231 struct freezer *freezer)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700232{
233 struct cgroup_iter it;
234 struct task_struct *task;
235 unsigned int nfrozen = 0, ntotal = 0;
236
237 cgroup_iter_start(cgroup, &it);
238 while ((task = cgroup_iter_next(cgroup, &it))) {
239 ntotal++;
Matt Helsley957a4ee2008-10-18 20:27:22 -0700240 if (is_task_frozen_enough(task))
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700241 nfrozen++;
242 }
243
244 /*
245 * Transition to FROZEN when no new tasks can be added ensures
246 * that we never exist in the FROZEN state while there are unfrozen
247 * tasks.
248 */
249 if (nfrozen == ntotal)
Matt Helsley81dcf332008-10-18 20:27:23 -0700250 freezer->state = CGROUP_FROZEN;
Matt Helsley1aece342008-10-18 20:27:24 -0700251 else if (nfrozen > 0)
252 freezer->state = CGROUP_FREEZING;
253 else
254 freezer->state = CGROUP_THAWED;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700255 cgroup_iter_end(cgroup, &it);
256}
257
258static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
259 struct seq_file *m)
260{
261 struct freezer *freezer;
262 enum freezer_state state;
263
264 if (!cgroup_lock_live_group(cgroup))
265 return -ENODEV;
266
267 freezer = cgroup_freezer(cgroup);
268 spin_lock_irq(&freezer->lock);
269 state = freezer->state;
Matt Helsley81dcf332008-10-18 20:27:23 -0700270 if (state == CGROUP_FREEZING) {
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700271 /* We change from FREEZING to FROZEN lazily if the cgroup was
272 * only partially frozen when we exitted write. */
Matt Helsley1aece342008-10-18 20:27:24 -0700273 update_freezer_state(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700274 state = freezer->state;
275 }
276 spin_unlock_irq(&freezer->lock);
277 cgroup_unlock();
278
279 seq_puts(m, freezer_state_strs[state]);
280 seq_putc(m, '\n');
281 return 0;
282}
283
284static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
285{
286 struct cgroup_iter it;
287 struct task_struct *task;
288 unsigned int num_cant_freeze_now = 0;
289
Matt Helsley81dcf332008-10-18 20:27:23 -0700290 freezer->state = CGROUP_FREEZING;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700291 cgroup_iter_start(cgroup, &it);
292 while ((task = cgroup_iter_next(cgroup, &it))) {
293 if (!freeze_task(task, true))
294 continue;
Matt Helsley957a4ee2008-10-18 20:27:22 -0700295 if (is_task_frozen_enough(task))
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700296 continue;
297 if (!freezing(task) && !freezer_should_skip(task))
298 num_cant_freeze_now++;
299 }
300 cgroup_iter_end(cgroup, &it);
301
302 return num_cant_freeze_now ? -EBUSY : 0;
303}
304
Li Zefan00c2e632008-10-29 14:00:53 -0700305static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700306{
307 struct cgroup_iter it;
308 struct task_struct *task;
309
310 cgroup_iter_start(cgroup, &it);
311 while ((task = cgroup_iter_next(cgroup, &it))) {
Li Zefan00c2e632008-10-29 14:00:53 -0700312 thaw_process(task);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700313 }
314 cgroup_iter_end(cgroup, &it);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700315
Li Zefan00c2e632008-10-29 14:00:53 -0700316 freezer->state = CGROUP_THAWED;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700317}
318
319static int freezer_change_state(struct cgroup *cgroup,
320 enum freezer_state goal_state)
321{
322 struct freezer *freezer;
323 int retval = 0;
324
325 freezer = cgroup_freezer(cgroup);
Li Zefan51308ee2008-10-29 14:00:54 -0700326
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700327 spin_lock_irq(&freezer->lock);
Li Zefan51308ee2008-10-29 14:00:54 -0700328
Matt Helsley1aece342008-10-18 20:27:24 -0700329 update_freezer_state(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700330 if (goal_state == freezer->state)
331 goto out;
Li Zefan51308ee2008-10-29 14:00:54 -0700332
333 switch (goal_state) {
Matt Helsley81dcf332008-10-18 20:27:23 -0700334 case CGROUP_THAWED:
Li Zefan00c2e632008-10-29 14:00:53 -0700335 unfreeze_cgroup(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700336 break;
Li Zefan51308ee2008-10-29 14:00:54 -0700337 case CGROUP_FROZEN:
338 retval = try_to_freeze_cgroup(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700339 break;
Li Zefan51308ee2008-10-29 14:00:54 -0700340 default:
341 BUG();
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700342 }
343out:
344 spin_unlock_irq(&freezer->lock);
345
346 return retval;
347}
348
349static int freezer_write(struct cgroup *cgroup,
350 struct cftype *cft,
351 const char *buffer)
352{
353 int retval;
354 enum freezer_state goal_state;
355
Matt Helsley81dcf332008-10-18 20:27:23 -0700356 if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
357 goal_state = CGROUP_THAWED;
358 else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
359 goal_state = CGROUP_FROZEN;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700360 else
Li Zefan3b1b3f62008-11-12 13:26:50 -0800361 return -EINVAL;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700362
363 if (!cgroup_lock_live_group(cgroup))
364 return -ENODEV;
365 retval = freezer_change_state(cgroup, goal_state);
366 cgroup_unlock();
367 return retval;
368}
369
370static struct cftype files[] = {
371 {
372 .name = "state",
373 .read_seq_string = freezer_read,
374 .write_string = freezer_write,
375 },
376};
377
378static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
379{
Li Zefan3b1b3f62008-11-12 13:26:50 -0800380 if (!cgroup->parent)
381 return 0;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700382 return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
383}
384
385struct cgroup_subsys freezer_subsys = {
386 .name = "freezer",
387 .create = freezer_create,
388 .destroy = freezer_destroy,
389 .populate = freezer_populate,
390 .subsys_id = freezer_subsys_id,
391 .can_attach = freezer_can_attach,
392 .attach = NULL,
393 .fork = freezer_fork,
394 .exit = NULL,
395};