blob: d2ccd2798d7ac406a103cce43a40b99a9189cfb4 [file] [log] [blame]
Matt Helsleydc52ddc2008-10-18 20:27:21 -07001/*
2 * cgroup_freezer.c - control group freezer subsystem
3 *
4 * Copyright IBM Corporation, 2007
5 *
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Matt Helsleydc52ddc2008-10-18 20:27:21 -070019#include <linux/cgroup.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/freezer.h>
23#include <linux/seq_file.h>
24
25enum freezer_state {
Matt Helsley81dcf332008-10-18 20:27:23 -070026 CGROUP_THAWED = 0,
27 CGROUP_FREEZING,
28 CGROUP_FROZEN,
Matt Helsleydc52ddc2008-10-18 20:27:21 -070029};
30
31struct freezer {
32 struct cgroup_subsys_state css;
33 enum freezer_state state;
34 spinlock_t lock; /* protects _writes_ to state */
35};
36
37static inline struct freezer *cgroup_freezer(
38 struct cgroup *cgroup)
39{
40 return container_of(
41 cgroup_subsys_state(cgroup, freezer_subsys_id),
42 struct freezer, css);
43}
44
45static inline struct freezer *task_freezer(struct task_struct *task)
46{
47 return container_of(task_subsys_state(task, freezer_subsys_id),
48 struct freezer, css);
49}
50
51int cgroup_frozen(struct task_struct *task)
52{
53 struct freezer *freezer;
54 enum freezer_state state;
55
56 task_lock(task);
57 freezer = task_freezer(task);
58 state = freezer->state;
59 task_unlock(task);
60
Matt Helsley81dcf332008-10-18 20:27:23 -070061 return state == CGROUP_FROZEN;
Matt Helsleydc52ddc2008-10-18 20:27:21 -070062}
63
64/*
65 * cgroups_write_string() limits the size of freezer state strings to
66 * CGROUP_LOCAL_BUFFER_SIZE
67 */
68static const char *freezer_state_strs[] = {
Matt Helsley81dcf332008-10-18 20:27:23 -070069 "THAWED",
Matt Helsleydc52ddc2008-10-18 20:27:21 -070070 "FREEZING",
71 "FROZEN",
72};
73
74/*
75 * State diagram
76 * Transitions are caused by userspace writes to the freezer.state file.
77 * The values in parenthesis are state labels. The rest are edge labels.
78 *
Matt Helsley81dcf332008-10-18 20:27:23 -070079 * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
80 * ^ ^ | |
81 * | \_______THAWED_______/ |
82 * \__________________________THAWED____________/
Matt Helsleydc52ddc2008-10-18 20:27:21 -070083 */
84
85struct cgroup_subsys freezer_subsys;
86
87/* Locks taken and their ordering
88 * ------------------------------
89 * css_set_lock
90 * cgroup_mutex (AKA cgroup_lock)
91 * task->alloc_lock (AKA task_lock)
92 * freezer->lock
93 * task->sighand->siglock
94 *
95 * cgroup code forces css_set_lock to be taken before task->alloc_lock
96 *
97 * freezer_create(), freezer_destroy():
98 * cgroup_mutex [ by cgroup core ]
99 *
100 * can_attach():
101 * cgroup_mutex
102 *
103 * cgroup_frozen():
104 * task->alloc_lock (to get task's cgroup)
105 *
106 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
107 * task->alloc_lock (to get task's cgroup)
108 * freezer->lock
109 * sighand->siglock (if the cgroup is freezing)
110 *
111 * freezer_read():
112 * cgroup_mutex
113 * freezer->lock
114 * read_lock css_set_lock (cgroup iterator start)
115 *
116 * freezer_write() (freeze):
117 * cgroup_mutex
118 * freezer->lock
119 * read_lock css_set_lock (cgroup iterator start)
120 * sighand->siglock
121 *
122 * freezer_write() (unfreeze):
123 * cgroup_mutex
124 * freezer->lock
125 * read_lock css_set_lock (cgroup iterator start)
126 * task->alloc_lock (to prevent races with freeze_task())
127 * sighand->siglock
128 */
129static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
130 struct cgroup *cgroup)
131{
132 struct freezer *freezer;
133
134 freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
135 if (!freezer)
136 return ERR_PTR(-ENOMEM);
137
138 spin_lock_init(&freezer->lock);
Matt Helsley81dcf332008-10-18 20:27:23 -0700139 freezer->state = CGROUP_THAWED;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700140 return &freezer->css;
141}
142
143static void freezer_destroy(struct cgroup_subsys *ss,
144 struct cgroup *cgroup)
145{
146 kfree(cgroup_freezer(cgroup));
147}
148
Matt Helsley957a4ee2008-10-18 20:27:22 -0700149/* Task is frozen or will freeze immediately when next it gets woken */
150static bool is_task_frozen_enough(struct task_struct *task)
151{
152 return frozen(task) ||
153 (task_is_stopped_or_traced(task) && freezing(task));
154}
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700155
Matt Helsley957a4ee2008-10-18 20:27:22 -0700156/*
157 * The call to cgroup_lock() in the freezer.state write method prevents
158 * a write to that file racing against an attach, and hence the
159 * can_attach() result will remain valid until the attach completes.
160 */
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700161static int freezer_can_attach(struct cgroup_subsys *ss,
162 struct cgroup *new_cgroup,
Ben Blumbe367d02009-09-23 15:56:31 -0700163 struct task_struct *task, bool threadgroup)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700164{
165 struct freezer *freezer;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700166
Li Zefan80a6a2c2008-10-29 14:00:52 -0700167 /*
168 * Anything frozen can't move or be moved to/from.
169 *
170 * Since orig_freezer->state == FROZEN means that @task has been
171 * frozen, so it's sufficient to check the latter condition.
172 */
Matt Helsley957a4ee2008-10-18 20:27:22 -0700173
174 if (is_task_frozen_enough(task))
175 return -EBUSY;
176
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700177 freezer = cgroup_freezer(new_cgroup);
Matt Helsley81dcf332008-10-18 20:27:23 -0700178 if (freezer->state == CGROUP_FROZEN)
Matt Helsley957a4ee2008-10-18 20:27:22 -0700179 return -EBUSY;
180
Ben Blumbe367d02009-09-23 15:56:31 -0700181 if (threadgroup) {
182 struct task_struct *c;
183
184 rcu_read_lock();
185 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
186 if (is_task_frozen_enough(c)) {
187 rcu_read_unlock();
188 return -EBUSY;
189 }
190 }
191 rcu_read_unlock();
192 }
193
Li Zefan80a6a2c2008-10-29 14:00:52 -0700194 return 0;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700195}
196
197static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
198{
199 struct freezer *freezer;
200
Li Zefan68744672008-11-12 13:26:49 -0800201 /*
202 * No lock is needed, since the task isn't on tasklist yet,
203 * so it can't be moved to another cgroup, which means the
204 * freezer won't be removed and will be valid during this
205 * function call.
206 */
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700207 freezer = task_freezer(task);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700208
Li Zefan3b1b3f62008-11-12 13:26:50 -0800209 /*
210 * The root cgroup is non-freezable, so we can skip the
211 * following check.
212 */
213 if (!freezer->css.cgroup->parent)
214 return;
215
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700216 spin_lock_irq(&freezer->lock);
Li Zefan7ccb9742008-10-29 14:00:51 -0700217 BUG_ON(freezer->state == CGROUP_FROZEN);
218
Matt Helsley81dcf332008-10-18 20:27:23 -0700219 /* Locking avoids race with FREEZING -> THAWED transitions. */
220 if (freezer->state == CGROUP_FREEZING)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700221 freeze_task(task, true);
222 spin_unlock_irq(&freezer->lock);
223}
224
225/*
226 * caller must hold freezer->lock
227 */
Matt Helsley1aece342008-10-18 20:27:24 -0700228static void update_freezer_state(struct cgroup *cgroup,
229 struct freezer *freezer)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700230{
231 struct cgroup_iter it;
232 struct task_struct *task;
233 unsigned int nfrozen = 0, ntotal = 0;
234
235 cgroup_iter_start(cgroup, &it);
236 while ((task = cgroup_iter_next(cgroup, &it))) {
237 ntotal++;
Matt Helsley957a4ee2008-10-18 20:27:22 -0700238 if (is_task_frozen_enough(task))
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700239 nfrozen++;
240 }
241
242 /*
243 * Transition to FROZEN when no new tasks can be added ensures
244 * that we never exist in the FROZEN state while there are unfrozen
245 * tasks.
246 */
247 if (nfrozen == ntotal)
Matt Helsley81dcf332008-10-18 20:27:23 -0700248 freezer->state = CGROUP_FROZEN;
Matt Helsley1aece342008-10-18 20:27:24 -0700249 else if (nfrozen > 0)
250 freezer->state = CGROUP_FREEZING;
251 else
252 freezer->state = CGROUP_THAWED;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700253 cgroup_iter_end(cgroup, &it);
254}
255
256static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
257 struct seq_file *m)
258{
259 struct freezer *freezer;
260 enum freezer_state state;
261
262 if (!cgroup_lock_live_group(cgroup))
263 return -ENODEV;
264
265 freezer = cgroup_freezer(cgroup);
266 spin_lock_irq(&freezer->lock);
267 state = freezer->state;
Matt Helsley81dcf332008-10-18 20:27:23 -0700268 if (state == CGROUP_FREEZING) {
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700269 /* We change from FREEZING to FROZEN lazily if the cgroup was
270 * only partially frozen when we exitted write. */
Matt Helsley1aece342008-10-18 20:27:24 -0700271 update_freezer_state(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700272 state = freezer->state;
273 }
274 spin_unlock_irq(&freezer->lock);
275 cgroup_unlock();
276
277 seq_puts(m, freezer_state_strs[state]);
278 seq_putc(m, '\n');
279 return 0;
280}
281
282static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
283{
284 struct cgroup_iter it;
285 struct task_struct *task;
286 unsigned int num_cant_freeze_now = 0;
287
Matt Helsley81dcf332008-10-18 20:27:23 -0700288 freezer->state = CGROUP_FREEZING;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700289 cgroup_iter_start(cgroup, &it);
290 while ((task = cgroup_iter_next(cgroup, &it))) {
291 if (!freeze_task(task, true))
292 continue;
Matt Helsley957a4ee2008-10-18 20:27:22 -0700293 if (is_task_frozen_enough(task))
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700294 continue;
295 if (!freezing(task) && !freezer_should_skip(task))
296 num_cant_freeze_now++;
297 }
298 cgroup_iter_end(cgroup, &it);
299
300 return num_cant_freeze_now ? -EBUSY : 0;
301}
302
Li Zefan00c2e632008-10-29 14:00:53 -0700303static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700304{
305 struct cgroup_iter it;
306 struct task_struct *task;
307
308 cgroup_iter_start(cgroup, &it);
309 while ((task = cgroup_iter_next(cgroup, &it))) {
Li Zefan00c2e632008-10-29 14:00:53 -0700310 thaw_process(task);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700311 }
312 cgroup_iter_end(cgroup, &it);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700313
Li Zefan00c2e632008-10-29 14:00:53 -0700314 freezer->state = CGROUP_THAWED;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700315}
316
317static int freezer_change_state(struct cgroup *cgroup,
318 enum freezer_state goal_state)
319{
320 struct freezer *freezer;
321 int retval = 0;
322
323 freezer = cgroup_freezer(cgroup);
Li Zefan51308ee2008-10-29 14:00:54 -0700324
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700325 spin_lock_irq(&freezer->lock);
Li Zefan51308ee2008-10-29 14:00:54 -0700326
Matt Helsley1aece342008-10-18 20:27:24 -0700327 update_freezer_state(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700328 if (goal_state == freezer->state)
329 goto out;
Li Zefan51308ee2008-10-29 14:00:54 -0700330
331 switch (goal_state) {
Matt Helsley81dcf332008-10-18 20:27:23 -0700332 case CGROUP_THAWED:
Li Zefan00c2e632008-10-29 14:00:53 -0700333 unfreeze_cgroup(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700334 break;
Li Zefan51308ee2008-10-29 14:00:54 -0700335 case CGROUP_FROZEN:
336 retval = try_to_freeze_cgroup(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700337 break;
Li Zefan51308ee2008-10-29 14:00:54 -0700338 default:
339 BUG();
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700340 }
341out:
342 spin_unlock_irq(&freezer->lock);
343
344 return retval;
345}
346
347static int freezer_write(struct cgroup *cgroup,
348 struct cftype *cft,
349 const char *buffer)
350{
351 int retval;
352 enum freezer_state goal_state;
353
Matt Helsley81dcf332008-10-18 20:27:23 -0700354 if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
355 goal_state = CGROUP_THAWED;
356 else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
357 goal_state = CGROUP_FROZEN;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700358 else
Li Zefan3b1b3f62008-11-12 13:26:50 -0800359 return -EINVAL;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700360
361 if (!cgroup_lock_live_group(cgroup))
362 return -ENODEV;
363 retval = freezer_change_state(cgroup, goal_state);
364 cgroup_unlock();
365 return retval;
366}
367
368static struct cftype files[] = {
369 {
370 .name = "state",
371 .read_seq_string = freezer_read,
372 .write_string = freezer_write,
373 },
374};
375
376static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
377{
Li Zefan3b1b3f62008-11-12 13:26:50 -0800378 if (!cgroup->parent)
379 return 0;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700380 return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
381}
382
383struct cgroup_subsys freezer_subsys = {
384 .name = "freezer",
385 .create = freezer_create,
386 .destroy = freezer_destroy,
387 .populate = freezer_populate,
388 .subsys_id = freezer_subsys_id,
389 .can_attach = freezer_can_attach,
390 .attach = NULL,
391 .fork = freezer_fork,
392 .exit = NULL,
393};