blob: e5c0244962b020ab3cd4e4d00a0be04e5cc18af5 [file] [log] [blame]
Matt Helsleydc52ddc2008-10-18 20:27:21 -07001/*
2 * cgroup_freezer.c - control group freezer subsystem
3 *
4 * Copyright IBM Corporation, 2007
5 *
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Matt Helsleydc52ddc2008-10-18 20:27:21 -070019#include <linux/cgroup.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/freezer.h>
23#include <linux/seq_file.h>
24
25enum freezer_state {
Matt Helsley81dcf332008-10-18 20:27:23 -070026 CGROUP_THAWED = 0,
27 CGROUP_FREEZING,
28 CGROUP_FROZEN,
Matt Helsleydc52ddc2008-10-18 20:27:21 -070029};
30
31struct freezer {
32 struct cgroup_subsys_state css;
33 enum freezer_state state;
34 spinlock_t lock; /* protects _writes_ to state */
35};
36
37static inline struct freezer *cgroup_freezer(
38 struct cgroup *cgroup)
39{
40 return container_of(
41 cgroup_subsys_state(cgroup, freezer_subsys_id),
42 struct freezer, css);
43}
44
45static inline struct freezer *task_freezer(struct task_struct *task)
46{
47 return container_of(task_subsys_state(task, freezer_subsys_id),
48 struct freezer, css);
49}
50
Matt Helsley5a7aadf2010-03-26 23:51:44 +010051int cgroup_freezing_or_frozen(struct task_struct *task)
Matt Helsleydc52ddc2008-10-18 20:27:21 -070052{
53 struct freezer *freezer;
54 enum freezer_state state;
55
56 task_lock(task);
57 freezer = task_freezer(task);
Matt Helsley5a7aadf2010-03-26 23:51:44 +010058 if (!freezer->css.cgroup->parent)
59 state = CGROUP_THAWED; /* root cgroup can't be frozen */
60 else
61 state = freezer->state;
Matt Helsleydc52ddc2008-10-18 20:27:21 -070062 task_unlock(task);
63
Matt Helsley5a7aadf2010-03-26 23:51:44 +010064 return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
Matt Helsleydc52ddc2008-10-18 20:27:21 -070065}
66
67/*
68 * cgroups_write_string() limits the size of freezer state strings to
69 * CGROUP_LOCAL_BUFFER_SIZE
70 */
71static const char *freezer_state_strs[] = {
Matt Helsley81dcf332008-10-18 20:27:23 -070072 "THAWED",
Matt Helsleydc52ddc2008-10-18 20:27:21 -070073 "FREEZING",
74 "FROZEN",
75};
76
77/*
78 * State diagram
79 * Transitions are caused by userspace writes to the freezer.state file.
80 * The values in parenthesis are state labels. The rest are edge labels.
81 *
Matt Helsley81dcf332008-10-18 20:27:23 -070082 * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
83 * ^ ^ | |
84 * | \_______THAWED_______/ |
85 * \__________________________THAWED____________/
Matt Helsleydc52ddc2008-10-18 20:27:21 -070086 */
87
88struct cgroup_subsys freezer_subsys;
89
90/* Locks taken and their ordering
91 * ------------------------------
92 * css_set_lock
93 * cgroup_mutex (AKA cgroup_lock)
94 * task->alloc_lock (AKA task_lock)
95 * freezer->lock
96 * task->sighand->siglock
97 *
98 * cgroup code forces css_set_lock to be taken before task->alloc_lock
99 *
100 * freezer_create(), freezer_destroy():
101 * cgroup_mutex [ by cgroup core ]
102 *
103 * can_attach():
104 * cgroup_mutex
105 *
106 * cgroup_frozen():
107 * task->alloc_lock (to get task's cgroup)
108 *
109 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
110 * task->alloc_lock (to get task's cgroup)
111 * freezer->lock
112 * sighand->siglock (if the cgroup is freezing)
113 *
114 * freezer_read():
115 * cgroup_mutex
116 * freezer->lock
117 * read_lock css_set_lock (cgroup iterator start)
118 *
119 * freezer_write() (freeze):
120 * cgroup_mutex
121 * freezer->lock
122 * read_lock css_set_lock (cgroup iterator start)
123 * sighand->siglock
124 *
125 * freezer_write() (unfreeze):
126 * cgroup_mutex
127 * freezer->lock
128 * read_lock css_set_lock (cgroup iterator start)
129 * task->alloc_lock (to prevent races with freeze_task())
130 * sighand->siglock
131 */
132static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
133 struct cgroup *cgroup)
134{
135 struct freezer *freezer;
136
137 freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
138 if (!freezer)
139 return ERR_PTR(-ENOMEM);
140
141 spin_lock_init(&freezer->lock);
Matt Helsley81dcf332008-10-18 20:27:23 -0700142 freezer->state = CGROUP_THAWED;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700143 return &freezer->css;
144}
145
146static void freezer_destroy(struct cgroup_subsys *ss,
147 struct cgroup *cgroup)
148{
149 kfree(cgroup_freezer(cgroup));
150}
151
Matt Helsley957a4ee2008-10-18 20:27:22 -0700152/* Task is frozen or will freeze immediately when next it gets woken */
153static bool is_task_frozen_enough(struct task_struct *task)
154{
155 return frozen(task) ||
156 (task_is_stopped_or_traced(task) && freezing(task));
157}
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700158
Matt Helsley957a4ee2008-10-18 20:27:22 -0700159/*
160 * The call to cgroup_lock() in the freezer.state write method prevents
161 * a write to that file racing against an attach, and hence the
162 * can_attach() result will remain valid until the attach completes.
163 */
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700164static int freezer_can_attach(struct cgroup_subsys *ss,
165 struct cgroup *new_cgroup,
Ben Blumbe367d02009-09-23 15:56:31 -0700166 struct task_struct *task, bool threadgroup)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700167{
168 struct freezer *freezer;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700169
Li Zefan80a6a2c2008-10-29 14:00:52 -0700170 /*
171 * Anything frozen can't move or be moved to/from.
172 *
173 * Since orig_freezer->state == FROZEN means that @task has been
174 * frozen, so it's sufficient to check the latter condition.
175 */
Matt Helsley957a4ee2008-10-18 20:27:22 -0700176
177 if (is_task_frozen_enough(task))
178 return -EBUSY;
179
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700180 freezer = cgroup_freezer(new_cgroup);
Matt Helsley81dcf332008-10-18 20:27:23 -0700181 if (freezer->state == CGROUP_FROZEN)
Matt Helsley957a4ee2008-10-18 20:27:22 -0700182 return -EBUSY;
183
Ben Blumbe367d02009-09-23 15:56:31 -0700184 if (threadgroup) {
185 struct task_struct *c;
186
187 rcu_read_lock();
188 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
189 if (is_task_frozen_enough(c)) {
190 rcu_read_unlock();
191 return -EBUSY;
192 }
193 }
194 rcu_read_unlock();
195 }
196
Li Zefan80a6a2c2008-10-29 14:00:52 -0700197 return 0;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700198}
199
200static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
201{
202 struct freezer *freezer;
203
Li Zefan68744672008-11-12 13:26:49 -0800204 /*
205 * No lock is needed, since the task isn't on tasklist yet,
206 * so it can't be moved to another cgroup, which means the
207 * freezer won't be removed and will be valid during this
Paul E. McKenney8b46f882010-04-21 13:02:08 -0700208 * function call. Nevertheless, apply RCU read-side critical
209 * section to suppress RCU lockdep false positives.
Li Zefan68744672008-11-12 13:26:49 -0800210 */
Paul E. McKenney8b46f882010-04-21 13:02:08 -0700211 rcu_read_lock();
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700212 freezer = task_freezer(task);
Paul E. McKenney8b46f882010-04-21 13:02:08 -0700213 rcu_read_unlock();
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700214
Li Zefan3b1b3f62008-11-12 13:26:50 -0800215 /*
216 * The root cgroup is non-freezable, so we can skip the
217 * following check.
218 */
219 if (!freezer->css.cgroup->parent)
220 return;
221
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700222 spin_lock_irq(&freezer->lock);
Li Zefan7ccb9742008-10-29 14:00:51 -0700223 BUG_ON(freezer->state == CGROUP_FROZEN);
224
Matt Helsley81dcf332008-10-18 20:27:23 -0700225 /* Locking avoids race with FREEZING -> THAWED transitions. */
226 if (freezer->state == CGROUP_FREEZING)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700227 freeze_task(task, true);
228 spin_unlock_irq(&freezer->lock);
229}
230
231/*
232 * caller must hold freezer->lock
233 */
Matt Helsley1aece342008-10-18 20:27:24 -0700234static void update_freezer_state(struct cgroup *cgroup,
235 struct freezer *freezer)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700236{
237 struct cgroup_iter it;
238 struct task_struct *task;
239 unsigned int nfrozen = 0, ntotal = 0;
240
241 cgroup_iter_start(cgroup, &it);
242 while ((task = cgroup_iter_next(cgroup, &it))) {
243 ntotal++;
Matt Helsley957a4ee2008-10-18 20:27:22 -0700244 if (is_task_frozen_enough(task))
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700245 nfrozen++;
246 }
247
248 /*
249 * Transition to FROZEN when no new tasks can be added ensures
250 * that we never exist in the FROZEN state while there are unfrozen
251 * tasks.
252 */
253 if (nfrozen == ntotal)
Matt Helsley81dcf332008-10-18 20:27:23 -0700254 freezer->state = CGROUP_FROZEN;
Matt Helsley1aece342008-10-18 20:27:24 -0700255 else if (nfrozen > 0)
256 freezer->state = CGROUP_FREEZING;
257 else
258 freezer->state = CGROUP_THAWED;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700259 cgroup_iter_end(cgroup, &it);
260}
261
262static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
263 struct seq_file *m)
264{
265 struct freezer *freezer;
266 enum freezer_state state;
267
268 if (!cgroup_lock_live_group(cgroup))
269 return -ENODEV;
270
271 freezer = cgroup_freezer(cgroup);
272 spin_lock_irq(&freezer->lock);
273 state = freezer->state;
Matt Helsley81dcf332008-10-18 20:27:23 -0700274 if (state == CGROUP_FREEZING) {
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700275 /* We change from FREEZING to FROZEN lazily if the cgroup was
276 * only partially frozen when we exitted write. */
Matt Helsley1aece342008-10-18 20:27:24 -0700277 update_freezer_state(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700278 state = freezer->state;
279 }
280 spin_unlock_irq(&freezer->lock);
281 cgroup_unlock();
282
283 seq_puts(m, freezer_state_strs[state]);
284 seq_putc(m, '\n');
285 return 0;
286}
287
288static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
289{
290 struct cgroup_iter it;
291 struct task_struct *task;
292 unsigned int num_cant_freeze_now = 0;
293
Matt Helsley81dcf332008-10-18 20:27:23 -0700294 freezer->state = CGROUP_FREEZING;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700295 cgroup_iter_start(cgroup, &it);
296 while ((task = cgroup_iter_next(cgroup, &it))) {
297 if (!freeze_task(task, true))
298 continue;
Matt Helsley957a4ee2008-10-18 20:27:22 -0700299 if (is_task_frozen_enough(task))
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700300 continue;
301 if (!freezing(task) && !freezer_should_skip(task))
302 num_cant_freeze_now++;
303 }
304 cgroup_iter_end(cgroup, &it);
305
306 return num_cant_freeze_now ? -EBUSY : 0;
307}
308
Li Zefan00c2e632008-10-29 14:00:53 -0700309static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700310{
311 struct cgroup_iter it;
312 struct task_struct *task;
313
314 cgroup_iter_start(cgroup, &it);
315 while ((task = cgroup_iter_next(cgroup, &it))) {
Li Zefan00c2e632008-10-29 14:00:53 -0700316 thaw_process(task);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700317 }
318 cgroup_iter_end(cgroup, &it);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700319
Li Zefan00c2e632008-10-29 14:00:53 -0700320 freezer->state = CGROUP_THAWED;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700321}
322
323static int freezer_change_state(struct cgroup *cgroup,
324 enum freezer_state goal_state)
325{
326 struct freezer *freezer;
327 int retval = 0;
328
329 freezer = cgroup_freezer(cgroup);
Li Zefan51308ee2008-10-29 14:00:54 -0700330
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700331 spin_lock_irq(&freezer->lock);
Li Zefan51308ee2008-10-29 14:00:54 -0700332
Matt Helsley1aece342008-10-18 20:27:24 -0700333 update_freezer_state(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700334 if (goal_state == freezer->state)
335 goto out;
Li Zefan51308ee2008-10-29 14:00:54 -0700336
337 switch (goal_state) {
Matt Helsley81dcf332008-10-18 20:27:23 -0700338 case CGROUP_THAWED:
Li Zefan00c2e632008-10-29 14:00:53 -0700339 unfreeze_cgroup(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700340 break;
Li Zefan51308ee2008-10-29 14:00:54 -0700341 case CGROUP_FROZEN:
342 retval = try_to_freeze_cgroup(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700343 break;
Li Zefan51308ee2008-10-29 14:00:54 -0700344 default:
345 BUG();
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700346 }
347out:
348 spin_unlock_irq(&freezer->lock);
349
350 return retval;
351}
352
353static int freezer_write(struct cgroup *cgroup,
354 struct cftype *cft,
355 const char *buffer)
356{
357 int retval;
358 enum freezer_state goal_state;
359
Matt Helsley81dcf332008-10-18 20:27:23 -0700360 if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
361 goal_state = CGROUP_THAWED;
362 else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
363 goal_state = CGROUP_FROZEN;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700364 else
Li Zefan3b1b3f62008-11-12 13:26:50 -0800365 return -EINVAL;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700366
367 if (!cgroup_lock_live_group(cgroup))
368 return -ENODEV;
369 retval = freezer_change_state(cgroup, goal_state);
370 cgroup_unlock();
371 return retval;
372}
373
374static struct cftype files[] = {
375 {
376 .name = "state",
377 .read_seq_string = freezer_read,
378 .write_string = freezer_write,
379 },
380};
381
382static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
383{
Li Zefan3b1b3f62008-11-12 13:26:50 -0800384 if (!cgroup->parent)
385 return 0;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700386 return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
387}
388
389struct cgroup_subsys freezer_subsys = {
390 .name = "freezer",
391 .create = freezer_create,
392 .destroy = freezer_destroy,
393 .populate = freezer_populate,
394 .subsys_id = freezer_subsys_id,
395 .can_attach = freezer_can_attach,
396 .attach = NULL,
397 .fork = freezer_fork,
398 .exit = NULL,
399};