blob: 05d52185139c520397f7e00486a98b24170a8101 [file] [log] [blame]
Matt Helsleydc52ddc2008-10-18 20:27:21 -07001/*
2 * cgroup_freezer.c - control group freezer subsystem
3 *
4 * Copyright IBM Corporation, 2007
5 *
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
Paul Gortmaker9984de12011-05-23 14:51:41 -040017#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Matt Helsleydc52ddc2008-10-18 20:27:21 -070019#include <linux/cgroup.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/freezer.h>
23#include <linux/seq_file.h>
24
25enum freezer_state {
Matt Helsley81dcf332008-10-18 20:27:23 -070026 CGROUP_THAWED = 0,
27 CGROUP_FREEZING,
28 CGROUP_FROZEN,
Matt Helsleydc52ddc2008-10-18 20:27:21 -070029};
30
31struct freezer {
32 struct cgroup_subsys_state css;
33 enum freezer_state state;
34 spinlock_t lock; /* protects _writes_ to state */
35};
36
37static inline struct freezer *cgroup_freezer(
38 struct cgroup *cgroup)
39{
40 return container_of(
41 cgroup_subsys_state(cgroup, freezer_subsys_id),
42 struct freezer, css);
43}
44
45static inline struct freezer *task_freezer(struct task_struct *task)
46{
47 return container_of(task_subsys_state(task, freezer_subsys_id),
48 struct freezer, css);
49}
50
Tejun Heo22b4e112011-11-21 12:32:25 -080051bool cgroup_freezing(struct task_struct *task)
Tomasz Buchertd5de4ddb2010-10-27 15:33:32 -070052{
Tejun Heo22b4e112011-11-21 12:32:25 -080053 enum freezer_state state;
54 bool ret;
Tomasz Buchertd5de4ddb2010-10-27 15:33:32 -070055
Tejun Heo22b4e112011-11-21 12:32:25 -080056 rcu_read_lock();
57 state = task_freezer(task)->state;
58 ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
59 rcu_read_unlock();
60
61 return ret;
Matt Helsleydc52ddc2008-10-18 20:27:21 -070062}
63
64/*
65 * cgroups_write_string() limits the size of freezer state strings to
66 * CGROUP_LOCAL_BUFFER_SIZE
67 */
68static const char *freezer_state_strs[] = {
Matt Helsley81dcf332008-10-18 20:27:23 -070069 "THAWED",
Matt Helsleydc52ddc2008-10-18 20:27:21 -070070 "FREEZING",
71 "FROZEN",
72};
73
74/*
75 * State diagram
76 * Transitions are caused by userspace writes to the freezer.state file.
77 * The values in parenthesis are state labels. The rest are edge labels.
78 *
Matt Helsley81dcf332008-10-18 20:27:23 -070079 * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
80 * ^ ^ | |
81 * | \_______THAWED_______/ |
82 * \__________________________THAWED____________/
Matt Helsleydc52ddc2008-10-18 20:27:21 -070083 */
84
85struct cgroup_subsys freezer_subsys;
86
87/* Locks taken and their ordering
88 * ------------------------------
Matt Helsleydc52ddc2008-10-18 20:27:21 -070089 * cgroup_mutex (AKA cgroup_lock)
Matt Helsleydc52ddc2008-10-18 20:27:21 -070090 * freezer->lock
Matt Helsley8f775782010-05-10 23:18:47 +020091 * css_set_lock
92 * task->alloc_lock (AKA task_lock)
Matt Helsleydc52ddc2008-10-18 20:27:21 -070093 * task->sighand->siglock
94 *
95 * cgroup code forces css_set_lock to be taken before task->alloc_lock
96 *
97 * freezer_create(), freezer_destroy():
98 * cgroup_mutex [ by cgroup core ]
99 *
Matt Helsley8f775782010-05-10 23:18:47 +0200100 * freezer_can_attach():
101 * cgroup_mutex (held by caller of can_attach)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700102 *
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700103 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700104 * freezer->lock
105 * sighand->siglock (if the cgroup is freezing)
106 *
107 * freezer_read():
108 * cgroup_mutex
109 * freezer->lock
Matt Helsley8f775782010-05-10 23:18:47 +0200110 * write_lock css_set_lock (cgroup iterator start)
111 * task->alloc_lock
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700112 * read_lock css_set_lock (cgroup iterator start)
113 *
114 * freezer_write() (freeze):
115 * cgroup_mutex
116 * freezer->lock
Matt Helsley8f775782010-05-10 23:18:47 +0200117 * write_lock css_set_lock (cgroup iterator start)
118 * task->alloc_lock
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700119 * read_lock css_set_lock (cgroup iterator start)
Matt Helsley8f775782010-05-10 23:18:47 +0200120 * sighand->siglock (fake signal delivery inside freeze_task())
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700121 *
122 * freezer_write() (unfreeze):
123 * cgroup_mutex
124 * freezer->lock
Matt Helsley8f775782010-05-10 23:18:47 +0200125 * write_lock css_set_lock (cgroup iterator start)
126 * task->alloc_lock
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700127 * read_lock css_set_lock (cgroup iterator start)
Tejun Heoa5be2d02011-11-21 12:32:23 -0800128 * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700129 * sighand->siglock
130 */
Li Zefan761b3ef2012-01-31 13:47:36 +0800131static struct cgroup_subsys_state *freezer_create(struct cgroup *cgroup)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700132{
133 struct freezer *freezer;
134
135 freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
136 if (!freezer)
137 return ERR_PTR(-ENOMEM);
138
139 spin_lock_init(&freezer->lock);
Matt Helsley81dcf332008-10-18 20:27:23 -0700140 freezer->state = CGROUP_THAWED;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700141 return &freezer->css;
142}
143
Li Zefan761b3ef2012-01-31 13:47:36 +0800144static void freezer_destroy(struct cgroup *cgroup)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700145{
Tejun Heoa3201222011-11-21 12:32:25 -0800146 struct freezer *freezer = cgroup_freezer(cgroup);
147
148 if (freezer->state != CGROUP_THAWED)
149 atomic_dec(&system_freezing_cnt);
150 kfree(freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700151}
152
Matt Helsley957a4ee2008-10-18 20:27:22 -0700153/*
154 * The call to cgroup_lock() in the freezer.state write method prevents
155 * a write to that file racing against an attach, and hence the
156 * can_attach() result will remain valid until the attach completes.
157 */
Li Zefan761b3ef2012-01-31 13:47:36 +0800158static int freezer_can_attach(struct cgroup *new_cgroup,
Tejun Heo2f7ee562011-12-12 18:12:21 -0800159 struct cgroup_taskset *tset)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700160{
161 struct freezer *freezer;
Tejun Heobb9d97b2011-12-12 18:12:21 -0800162 struct task_struct *task;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700163
Li Zefan80a6a2c2008-10-29 14:00:52 -0700164 /*
165 * Anything frozen can't move or be moved to/from.
Li Zefan80a6a2c2008-10-29 14:00:52 -0700166 */
Tejun Heobb9d97b2011-12-12 18:12:21 -0800167 cgroup_taskset_for_each(task, new_cgroup, tset)
168 if (cgroup_freezing(task))
169 return -EBUSY;
Matt Helsley957a4ee2008-10-18 20:27:22 -0700170
Tomasz Buchert0bdba582010-10-27 15:33:33 -0700171 freezer = cgroup_freezer(new_cgroup);
172 if (freezer->state != CGROUP_THAWED)
Matt Helsley957a4ee2008-10-18 20:27:22 -0700173 return -EBUSY;
174
Ben Blumf780bdb2011-05-26 16:25:19 -0700175 return 0;
176}
177
Li Zefan761b3ef2012-01-31 13:47:36 +0800178static void freezer_fork(struct task_struct *task)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700179{
180 struct freezer *freezer;
181
Paul E. McKenney8b46f882010-04-21 13:02:08 -0700182 rcu_read_lock();
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700183 freezer = task_freezer(task);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700184
Li Zefan3b1b3f62008-11-12 13:26:50 -0800185 /*
186 * The root cgroup is non-freezable, so we can skip the
187 * following check.
188 */
189 if (!freezer->css.cgroup->parent)
Tejun Heo5edee612012-10-16 15:03:14 -0700190 goto out;
Li Zefan3b1b3f62008-11-12 13:26:50 -0800191
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700192 spin_lock_irq(&freezer->lock);
Li Zefan7ccb9742008-10-29 14:00:51 -0700193 BUG_ON(freezer->state == CGROUP_FROZEN);
194
Matt Helsley81dcf332008-10-18 20:27:23 -0700195 /* Locking avoids race with FREEZING -> THAWED transitions. */
196 if (freezer->state == CGROUP_FREEZING)
Tejun Heo839e3402011-11-21 12:32:26 -0800197 freeze_task(task);
Tejun Heo5edee612012-10-16 15:03:14 -0700198
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700199 spin_unlock_irq(&freezer->lock);
Tejun Heo5edee612012-10-16 15:03:14 -0700200out:
201 rcu_read_unlock();
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700202}
203
204/*
205 * caller must hold freezer->lock
206 */
Tomasz Buchert2d3cbf82010-10-27 15:33:34 -0700207static void update_if_frozen(struct cgroup *cgroup,
Matt Helsley1aece342008-10-18 20:27:24 -0700208 struct freezer *freezer)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700209{
210 struct cgroup_iter it;
211 struct task_struct *task;
212 unsigned int nfrozen = 0, ntotal = 0;
Tomasz Buchert2d3cbf82010-10-27 15:33:34 -0700213 enum freezer_state old_state = freezer->state;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700214
215 cgroup_iter_start(cgroup, &it);
216 while ((task = cgroup_iter_next(cgroup, &it))) {
217 ntotal++;
Tejun Heo51f246e2012-10-16 15:03:14 -0700218 if (freezing(task) && (frozen(task) ||
219 task_is_stopped_or_traced(task)))
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700220 nfrozen++;
221 }
222
Tomasz Buchert2d3cbf82010-10-27 15:33:34 -0700223 if (old_state == CGROUP_THAWED) {
224 BUG_ON(nfrozen > 0);
225 } else if (old_state == CGROUP_FREEZING) {
226 if (nfrozen == ntotal)
227 freezer->state = CGROUP_FROZEN;
228 } else { /* old_state == CGROUP_FROZEN */
229 BUG_ON(nfrozen != ntotal);
230 }
231
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700232 cgroup_iter_end(cgroup, &it);
233}
234
235static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
236 struct seq_file *m)
237{
238 struct freezer *freezer;
239 enum freezer_state state;
240
241 if (!cgroup_lock_live_group(cgroup))
242 return -ENODEV;
243
244 freezer = cgroup_freezer(cgroup);
245 spin_lock_irq(&freezer->lock);
246 state = freezer->state;
Matt Helsley81dcf332008-10-18 20:27:23 -0700247 if (state == CGROUP_FREEZING) {
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700248 /* We change from FREEZING to FROZEN lazily if the cgroup was
249 * only partially frozen when we exitted write. */
Tomasz Buchert2d3cbf82010-10-27 15:33:34 -0700250 update_if_frozen(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700251 state = freezer->state;
252 }
253 spin_unlock_irq(&freezer->lock);
254 cgroup_unlock();
255
256 seq_puts(m, freezer_state_strs[state]);
257 seq_putc(m, '\n');
258 return 0;
259}
260
Tejun Heo51f246e2012-10-16 15:03:14 -0700261static void freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700262{
263 struct cgroup_iter it;
264 struct task_struct *task;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700265
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700266 cgroup_iter_start(cgroup, &it);
Tejun Heo51f246e2012-10-16 15:03:14 -0700267 while ((task = cgroup_iter_next(cgroup, &it)))
268 freeze_task(task);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700269 cgroup_iter_end(cgroup, &it);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700270}
271
Li Zefan00c2e632008-10-29 14:00:53 -0700272static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700273{
274 struct cgroup_iter it;
275 struct task_struct *task;
276
277 cgroup_iter_start(cgroup, &it);
Tejun Heoa5be2d02011-11-21 12:32:23 -0800278 while ((task = cgroup_iter_next(cgroup, &it)))
279 __thaw_task(task);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700280 cgroup_iter_end(cgroup, &it);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700281}
282
Tejun Heo51f246e2012-10-16 15:03:14 -0700283static void freezer_change_state(struct cgroup *cgroup,
284 enum freezer_state goal_state)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700285{
Tejun Heo51f246e2012-10-16 15:03:14 -0700286 struct freezer *freezer = cgroup_freezer(cgroup);
Li Zefan51308ee2008-10-29 14:00:54 -0700287
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700288 spin_lock_irq(&freezer->lock);
Li Zefan51308ee2008-10-29 14:00:54 -0700289
Tomasz Buchert2d3cbf82010-10-27 15:33:34 -0700290 update_if_frozen(cgroup, freezer);
Li Zefan51308ee2008-10-29 14:00:54 -0700291
292 switch (goal_state) {
Matt Helsley81dcf332008-10-18 20:27:23 -0700293 case CGROUP_THAWED:
Tejun Heoa3201222011-11-21 12:32:25 -0800294 if (freezer->state != CGROUP_THAWED)
295 atomic_dec(&system_freezing_cnt);
Tejun Heo22b4e112011-11-21 12:32:25 -0800296 freezer->state = CGROUP_THAWED;
Li Zefan00c2e632008-10-29 14:00:53 -0700297 unfreeze_cgroup(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700298 break;
Li Zefan51308ee2008-10-29 14:00:54 -0700299 case CGROUP_FROZEN:
Tejun Heoa3201222011-11-21 12:32:25 -0800300 if (freezer->state == CGROUP_THAWED)
301 atomic_inc(&system_freezing_cnt);
Tejun Heo22b4e112011-11-21 12:32:25 -0800302 freezer->state = CGROUP_FREEZING;
Tejun Heo51f246e2012-10-16 15:03:14 -0700303 freeze_cgroup(cgroup, freezer);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700304 break;
Li Zefan51308ee2008-10-29 14:00:54 -0700305 default:
306 BUG();
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700307 }
Tejun Heo22b4e112011-11-21 12:32:25 -0800308
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700309 spin_unlock_irq(&freezer->lock);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700310}
311
312static int freezer_write(struct cgroup *cgroup,
313 struct cftype *cft,
314 const char *buffer)
315{
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700316 enum freezer_state goal_state;
317
Matt Helsley81dcf332008-10-18 20:27:23 -0700318 if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
319 goal_state = CGROUP_THAWED;
320 else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
321 goal_state = CGROUP_FROZEN;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700322 else
Li Zefan3b1b3f62008-11-12 13:26:50 -0800323 return -EINVAL;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700324
325 if (!cgroup_lock_live_group(cgroup))
326 return -ENODEV;
Tejun Heo51f246e2012-10-16 15:03:14 -0700327 freezer_change_state(cgroup, goal_state);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700328 cgroup_unlock();
Tejun Heo51f246e2012-10-16 15:03:14 -0700329 return 0;
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700330}
331
332static struct cftype files[] = {
333 {
334 .name = "state",
Tejun Heo4baf6e32012-04-01 12:09:55 -0700335 .flags = CFTYPE_NOT_ON_ROOT,
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700336 .read_seq_string = freezer_read,
337 .write_string = freezer_write,
338 },
Tejun Heo4baf6e32012-04-01 12:09:55 -0700339 { } /* terminate */
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700340};
341
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700342struct cgroup_subsys freezer_subsys = {
343 .name = "freezer",
344 .create = freezer_create,
345 .destroy = freezer_destroy,
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700346 .subsys_id = freezer_subsys_id,
347 .can_attach = freezer_can_attach,
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700348 .fork = freezer_fork,
Tejun Heo4baf6e32012-04-01 12:09:55 -0700349 .base_cftypes = files,
Tejun Heo8c7f6ed2012-09-13 12:20:58 -0700350
351 /*
352 * freezer subsys doesn't handle hierarchy at all. Frozen state
353 * should be inherited through the hierarchy - if a parent is
354 * frozen, all its children should be frozen. Fix it and remove
355 * the following.
356 */
357 .broken_hierarchy = true,
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700358};