blob: 9af7257f429c34dfa444f54d5e4907d7f06bd10a [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
Vivek Goyal22084192009-12-03 12:59:49 -050014#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
Vivek Goyal9d6a9862009-12-04 10:36:41 -050016#include <linux/module.h>
Stephen Rothwellaccee782009-12-07 19:29:39 +110017#include <linux/err.h>
Divyesh Shah91952912010-04-01 15:01:41 -070018#include <linux/blkdev.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050019#include "blk-cgroup.h"
Vivek Goyal3e252062009-12-04 10:36:42 -050020
21static DEFINE_SPINLOCK(blkio_list_lock);
22static LIST_HEAD(blkio_list);
Vivek Goyalb1c35762009-12-03 12:59:47 -050023
Vivek Goyal31e4c282009-12-03 12:59:42 -050024struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
Vivek Goyal9d6a9862009-12-04 10:36:41 -050025EXPORT_SYMBOL_GPL(blkio_root_cgroup);
26
Ben Blum67523c42010-03-10 15:22:11 -080027static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
28 struct cgroup *);
29static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
30 struct task_struct *, bool);
31static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
32 struct cgroup *, struct task_struct *, bool);
33static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
34static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
35
36struct cgroup_subsys blkio_subsys = {
37 .name = "blkio",
38 .create = blkiocg_create,
39 .can_attach = blkiocg_can_attach,
40 .attach = blkiocg_attach,
41 .destroy = blkiocg_destroy,
42 .populate = blkiocg_populate,
43#ifdef CONFIG_BLK_CGROUP
44 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
45 .subsys_id = blkio_subsys_id,
46#endif
47 .use_id = 1,
48 .module = THIS_MODULE,
49};
50EXPORT_SYMBOL_GPL(blkio_subsys);
51
Vivek Goyal31e4c282009-12-03 12:59:42 -050052struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
53{
54 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
55 struct blkio_cgroup, css);
56}
Vivek Goyal9d6a9862009-12-04 10:36:41 -050057EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
Vivek Goyal31e4c282009-12-03 12:59:42 -050058
Divyesh Shah91952912010-04-01 15:01:41 -070059/*
60 * Add to the appropriate stat variable depending on the request type.
61 * This should be called with the blkg->stats_lock held.
62 */
63void io_add_stat(uint64_t *stat, uint64_t add, unsigned int flags)
64{
65 if (flags & REQ_RW)
66 stat[IO_WRITE] += add;
67 else
68 stat[IO_READ] += add;
69 /*
70 * Everywhere in the block layer, an IO is treated as sync if it is a
71 * read or a SYNC write. We follow the same norm.
72 */
73 if (!(flags & REQ_RW) || flags & REQ_RW_SYNC)
74 stat[IO_SYNC] += add;
75 else
76 stat[IO_ASYNC] += add;
77}
78
Divyesh Shah303a3ac2010-04-01 15:01:24 -070079void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
Vivek Goyal22084192009-12-03 12:59:49 -050080{
Divyesh Shah303a3ac2010-04-01 15:01:24 -070081 unsigned long flags;
82
83 spin_lock_irqsave(&blkg->stats_lock, flags);
84 blkg->stats.time += time;
85 spin_unlock_irqrestore(&blkg->stats_lock, flags);
Vivek Goyal22084192009-12-03 12:59:49 -050086}
Divyesh Shah303a3ac2010-04-01 15:01:24 -070087EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
Vivek Goyal22084192009-12-03 12:59:49 -050088
Divyesh Shah91952912010-04-01 15:01:41 -070089void blkiocg_update_request_dispatch_stats(struct blkio_group *blkg,
90 struct request *rq)
91{
92 struct blkio_group_stats *stats;
93 unsigned long flags;
94
95 spin_lock_irqsave(&blkg->stats_lock, flags);
96 stats = &blkg->stats;
97 stats->sectors += blk_rq_sectors(rq);
98 io_add_stat(stats->io_serviced, 1, rq->cmd_flags);
99 io_add_stat(stats->io_service_bytes, blk_rq_sectors(rq) << 9,
100 rq->cmd_flags);
101 spin_unlock_irqrestore(&blkg->stats_lock, flags);
102}
103
104void blkiocg_update_request_completion_stats(struct blkio_group *blkg,
105 struct request *rq)
106{
107 struct blkio_group_stats *stats;
108 unsigned long flags;
109 unsigned long long now = sched_clock();
110
111 spin_lock_irqsave(&blkg->stats_lock, flags);
112 stats = &blkg->stats;
113 if (time_after64(now, rq->io_start_time_ns))
114 io_add_stat(stats->io_service_time, now - rq->io_start_time_ns,
115 rq->cmd_flags);
116 if (time_after64(rq->io_start_time_ns, rq->start_time_ns))
117 io_add_stat(stats->io_wait_time,
118 rq->io_start_time_ns - rq->start_time_ns,
119 rq->cmd_flags);
120 spin_unlock_irqrestore(&blkg->stats_lock, flags);
121}
122EXPORT_SYMBOL_GPL(blkiocg_update_request_completion_stats);
123
Vivek Goyal31e4c282009-12-03 12:59:42 -0500124void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
Vivek Goyal22084192009-12-03 12:59:49 -0500125 struct blkio_group *blkg, void *key, dev_t dev)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500126{
127 unsigned long flags;
128
129 spin_lock_irqsave(&blkcg->lock, flags);
130 rcu_assign_pointer(blkg->key, key);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500131 blkg->blkcg_id = css_id(&blkcg->css);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500132 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
133 spin_unlock_irqrestore(&blkcg->lock, flags);
Vivek Goyal2868ef72009-12-03 12:59:48 -0500134#ifdef CONFIG_DEBUG_BLK_CGROUP
135 /* Need to take css reference ? */
136 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
137#endif
Vivek Goyal22084192009-12-03 12:59:49 -0500138 blkg->dev = dev;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500139}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500140EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500141
Vivek Goyalb1c35762009-12-03 12:59:47 -0500142static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
143{
144 hlist_del_init_rcu(&blkg->blkcg_node);
145 blkg->blkcg_id = 0;
146}
147
148/*
149 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
150 * indicating that blk_group was unhashed by the time we got to it.
151 */
Vivek Goyal31e4c282009-12-03 12:59:42 -0500152int blkiocg_del_blkio_group(struct blkio_group *blkg)
153{
Vivek Goyalb1c35762009-12-03 12:59:47 -0500154 struct blkio_cgroup *blkcg;
155 unsigned long flags;
156 struct cgroup_subsys_state *css;
157 int ret = 1;
158
159 rcu_read_lock();
160 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
161 if (!css)
162 goto out;
163
164 blkcg = container_of(css, struct blkio_cgroup, css);
165 spin_lock_irqsave(&blkcg->lock, flags);
166 if (!hlist_unhashed(&blkg->blkcg_node)) {
167 __blkiocg_del_blkio_group(blkg);
168 ret = 0;
169 }
170 spin_unlock_irqrestore(&blkcg->lock, flags);
171out:
172 rcu_read_unlock();
173 return ret;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500174}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500175EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500176
177/* called under rcu_read_lock(). */
178struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
179{
180 struct blkio_group *blkg;
181 struct hlist_node *n;
182 void *__key;
183
184 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
185 __key = blkg->key;
186 if (__key == key)
187 return blkg;
188 }
189
190 return NULL;
191}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500192EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500193
194#define SHOW_FUNCTION(__VAR) \
195static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
196 struct cftype *cftype) \
197{ \
198 struct blkio_cgroup *blkcg; \
199 \
200 blkcg = cgroup_to_blkio_cgroup(cgroup); \
201 return (u64)blkcg->__VAR; \
202}
203
204SHOW_FUNCTION(weight);
205#undef SHOW_FUNCTION
206
207static int
208blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
209{
210 struct blkio_cgroup *blkcg;
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500211 struct blkio_group *blkg;
212 struct hlist_node *n;
Vivek Goyal3e252062009-12-04 10:36:42 -0500213 struct blkio_policy_type *blkiop;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500214
215 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
216 return -EINVAL;
217
218 blkcg = cgroup_to_blkio_cgroup(cgroup);
Gui Jianfengbcf4dd42010-02-01 09:58:54 +0100219 spin_lock(&blkio_list_lock);
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500220 spin_lock_irq(&blkcg->lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500221 blkcg->weight = (unsigned int)val;
Vivek Goyal3e252062009-12-04 10:36:42 -0500222 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
Vivek Goyal3e252062009-12-04 10:36:42 -0500223 list_for_each_entry(blkiop, &blkio_list, list)
224 blkiop->ops.blkio_update_group_weight_fn(blkg,
225 blkcg->weight);
Vivek Goyal3e252062009-12-04 10:36:42 -0500226 }
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500227 spin_unlock_irq(&blkcg->lock);
Gui Jianfengbcf4dd42010-02-01 09:58:54 +0100228 spin_unlock(&blkio_list_lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500229 return 0;
230}
231
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700232static int
233blkiocg_reset_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
234{
235 struct blkio_cgroup *blkcg;
236 struct blkio_group *blkg;
237 struct hlist_node *n;
238 struct blkio_group_stats *stats;
239
240 blkcg = cgroup_to_blkio_cgroup(cgroup);
241 spin_lock_irq(&blkcg->lock);
242 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
243 spin_lock(&blkg->stats_lock);
244 stats = &blkg->stats;
245 memset(stats, 0, sizeof(struct blkio_group_stats));
246 spin_unlock(&blkg->stats_lock);
247 }
248 spin_unlock_irq(&blkcg->lock);
249 return 0;
250}
251
252void get_key_name(int type, char *disk_id, char *str, int chars_left)
253{
254 strlcpy(str, disk_id, chars_left);
255 chars_left -= strlen(str);
256 if (chars_left <= 0) {
257 printk(KERN_WARNING
258 "Possibly incorrect cgroup stat display format");
259 return;
260 }
261 switch (type) {
262 case IO_READ:
263 strlcat(str, " Read", chars_left);
264 break;
265 case IO_WRITE:
266 strlcat(str, " Write", chars_left);
267 break;
268 case IO_SYNC:
269 strlcat(str, " Sync", chars_left);
270 break;
271 case IO_ASYNC:
272 strlcat(str, " Async", chars_left);
273 break;
274 case IO_TYPE_MAX:
275 strlcat(str, " Total", chars_left);
276 break;
277 default:
278 strlcat(str, " Invalid", chars_left);
279 }
280}
281
282typedef uint64_t (get_var) (struct blkio_group *, int);
283
284#define MAX_KEY_LEN 100
285uint64_t get_typed_stat(struct blkio_group *blkg, struct cgroup_map_cb *cb,
286 get_var *getvar, char *disk_id)
287{
288 uint64_t disk_total;
289 char key_str[MAX_KEY_LEN];
290 int type;
291
292 for (type = 0; type < IO_TYPE_MAX; type++) {
293 get_key_name(type, disk_id, key_str, MAX_KEY_LEN);
294 cb->fill(cb, key_str, getvar(blkg, type));
295 }
296 disk_total = getvar(blkg, IO_READ) + getvar(blkg, IO_WRITE);
297 get_key_name(IO_TYPE_MAX, disk_id, key_str, MAX_KEY_LEN);
298 cb->fill(cb, key_str, disk_total);
299 return disk_total;
300}
301
302uint64_t get_stat(struct blkio_group *blkg, struct cgroup_map_cb *cb,
303 get_var *getvar, char *disk_id)
304{
305 uint64_t var = getvar(blkg, 0);
306 cb->fill(cb, disk_id, var);
307 return var;
308}
309
310#define GET_STAT_INDEXED(__VAR) \
311uint64_t get_##__VAR##_stat(struct blkio_group *blkg, int type) \
312{ \
313 return blkg->stats.__VAR[type]; \
314} \
315
316GET_STAT_INDEXED(io_service_bytes);
317GET_STAT_INDEXED(io_serviced);
318GET_STAT_INDEXED(io_service_time);
319GET_STAT_INDEXED(io_wait_time);
320#undef GET_STAT_INDEXED
321
322#define GET_STAT(__VAR, __CONV) \
323uint64_t get_##__VAR##_stat(struct blkio_group *blkg, int dummy) \
324{ \
325 uint64_t data = blkg->stats.__VAR; \
326 if (__CONV) \
327 data = (uint64_t)jiffies_to_msecs(data) * NSEC_PER_MSEC;\
328 return data; \
329}
330
331GET_STAT(time, 1);
332GET_STAT(sectors, 0);
333#ifdef CONFIG_DEBUG_BLK_CGROUP
334GET_STAT(dequeue, 0);
335#endif
336#undef GET_STAT
337
338#define SHOW_FUNCTION_PER_GROUP(__VAR, get_stats, getvar, show_total) \
Vivek Goyal22084192009-12-03 12:59:49 -0500339static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700340 struct cftype *cftype, struct cgroup_map_cb *cb) \
Vivek Goyal22084192009-12-03 12:59:49 -0500341{ \
342 struct blkio_cgroup *blkcg; \
343 struct blkio_group *blkg; \
344 struct hlist_node *n; \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700345 uint64_t cgroup_total = 0; \
346 char disk_id[10]; \
Vivek Goyal22084192009-12-03 12:59:49 -0500347 \
348 if (!cgroup_lock_live_group(cgroup)) \
349 return -ENODEV; \
350 \
351 blkcg = cgroup_to_blkio_cgroup(cgroup); \
352 rcu_read_lock(); \
353 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700354 if (blkg->dev) { \
355 spin_lock_irq(&blkg->stats_lock); \
356 snprintf(disk_id, 10, "%u:%u", MAJOR(blkg->dev),\
357 MINOR(blkg->dev)); \
358 cgroup_total += get_stats(blkg, cb, getvar, \
359 disk_id); \
360 spin_unlock_irq(&blkg->stats_lock); \
361 } \
Vivek Goyal22084192009-12-03 12:59:49 -0500362 } \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700363 if (show_total) \
364 cb->fill(cb, "Total", cgroup_total); \
Vivek Goyal22084192009-12-03 12:59:49 -0500365 rcu_read_unlock(); \
366 cgroup_unlock(); \
367 return 0; \
368}
369
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700370SHOW_FUNCTION_PER_GROUP(time, get_stat, get_time_stat, 0);
371SHOW_FUNCTION_PER_GROUP(sectors, get_stat, get_sectors_stat, 0);
372SHOW_FUNCTION_PER_GROUP(io_service_bytes, get_typed_stat,
373 get_io_service_bytes_stat, 1);
374SHOW_FUNCTION_PER_GROUP(io_serviced, get_typed_stat, get_io_serviced_stat, 1);
375SHOW_FUNCTION_PER_GROUP(io_service_time, get_typed_stat,
376 get_io_service_time_stat, 1);
377SHOW_FUNCTION_PER_GROUP(io_wait_time, get_typed_stat, get_io_wait_time_stat, 1);
Vivek Goyal22084192009-12-03 12:59:49 -0500378#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700379SHOW_FUNCTION_PER_GROUP(dequeue, get_stat, get_dequeue_stat, 0);
Vivek Goyal22084192009-12-03 12:59:49 -0500380#endif
381#undef SHOW_FUNCTION_PER_GROUP
382
383#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shah91952912010-04-01 15:01:41 -0700384void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Vivek Goyal22084192009-12-03 12:59:49 -0500385 unsigned long dequeue)
386{
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700387 blkg->stats.dequeue += dequeue;
Vivek Goyal22084192009-12-03 12:59:49 -0500388}
Divyesh Shah91952912010-04-01 15:01:41 -0700389EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
Vivek Goyal22084192009-12-03 12:59:49 -0500390#endif
391
Vivek Goyal31e4c282009-12-03 12:59:42 -0500392struct cftype blkio_files[] = {
393 {
394 .name = "weight",
395 .read_u64 = blkiocg_weight_read,
396 .write_u64 = blkiocg_weight_write,
397 },
Vivek Goyal22084192009-12-03 12:59:49 -0500398 {
399 .name = "time",
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700400 .read_map = blkiocg_time_read,
401 .write_u64 = blkiocg_reset_write,
Vivek Goyal22084192009-12-03 12:59:49 -0500402 },
403 {
404 .name = "sectors",
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700405 .read_map = blkiocg_sectors_read,
406 .write_u64 = blkiocg_reset_write,
407 },
408 {
409 .name = "io_service_bytes",
410 .read_map = blkiocg_io_service_bytes_read,
411 .write_u64 = blkiocg_reset_write,
412 },
413 {
414 .name = "io_serviced",
415 .read_map = blkiocg_io_serviced_read,
416 .write_u64 = blkiocg_reset_write,
417 },
418 {
419 .name = "io_service_time",
420 .read_map = blkiocg_io_service_time_read,
421 .write_u64 = blkiocg_reset_write,
422 },
423 {
424 .name = "io_wait_time",
425 .read_map = blkiocg_io_wait_time_read,
426 .write_u64 = blkiocg_reset_write,
Vivek Goyal22084192009-12-03 12:59:49 -0500427 },
428#ifdef CONFIG_DEBUG_BLK_CGROUP
429 {
430 .name = "dequeue",
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700431 .read_map = blkiocg_dequeue_read,
Vivek Goyal22084192009-12-03 12:59:49 -0500432 },
433#endif
Vivek Goyal31e4c282009-12-03 12:59:42 -0500434};
435
436static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
437{
438 return cgroup_add_files(cgroup, subsys, blkio_files,
439 ARRAY_SIZE(blkio_files));
440}
441
442static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
443{
444 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500445 unsigned long flags;
446 struct blkio_group *blkg;
447 void *key;
Vivek Goyal3e252062009-12-04 10:36:42 -0500448 struct blkio_policy_type *blkiop;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500449
Vivek Goyalb1c35762009-12-03 12:59:47 -0500450 rcu_read_lock();
451remove_entry:
452 spin_lock_irqsave(&blkcg->lock, flags);
453
454 if (hlist_empty(&blkcg->blkg_list)) {
455 spin_unlock_irqrestore(&blkcg->lock, flags);
456 goto done;
457 }
458
459 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
460 blkcg_node);
461 key = rcu_dereference(blkg->key);
462 __blkiocg_del_blkio_group(blkg);
463
464 spin_unlock_irqrestore(&blkcg->lock, flags);
465
466 /*
467 * This blkio_group is being unlinked as associated cgroup is going
468 * away. Let all the IO controlling policies know about this event.
469 *
470 * Currently this is static call to one io controlling policy. Once
471 * we have more policies in place, we need some dynamic registration
472 * of callback function.
473 */
Vivek Goyal3e252062009-12-04 10:36:42 -0500474 spin_lock(&blkio_list_lock);
475 list_for_each_entry(blkiop, &blkio_list, list)
476 blkiop->ops.blkio_unlink_group_fn(key, blkg);
477 spin_unlock(&blkio_list_lock);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500478 goto remove_entry;
479done:
Vivek Goyal31e4c282009-12-03 12:59:42 -0500480 free_css_id(&blkio_subsys, &blkcg->css);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500481 rcu_read_unlock();
Ben Blum67523c42010-03-10 15:22:11 -0800482 if (blkcg != &blkio_root_cgroup)
483 kfree(blkcg);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500484}
485
486static struct cgroup_subsys_state *
487blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
488{
489 struct blkio_cgroup *blkcg, *parent_blkcg;
490
491 if (!cgroup->parent) {
492 blkcg = &blkio_root_cgroup;
493 goto done;
494 }
495
496 /* Currently we do not support hierarchy deeper than two level (0,1) */
497 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
498 if (css_depth(&parent_blkcg->css) > 0)
499 return ERR_PTR(-EINVAL);
500
501 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
502 if (!blkcg)
503 return ERR_PTR(-ENOMEM);
504
505 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
506done:
507 spin_lock_init(&blkcg->lock);
508 INIT_HLIST_HEAD(&blkcg->blkg_list);
509
510 return &blkcg->css;
511}
512
513/*
514 * We cannot support shared io contexts, as we have no mean to support
515 * two tasks with the same ioc in two different groups without major rework
516 * of the main cic data structures. For now we allow a task to change
517 * its cgroup only if it's the only owner of its ioc.
518 */
519static int blkiocg_can_attach(struct cgroup_subsys *subsys,
520 struct cgroup *cgroup, struct task_struct *tsk,
521 bool threadgroup)
522{
523 struct io_context *ioc;
524 int ret = 0;
525
526 /* task_lock() is needed to avoid races with exit_io_context() */
527 task_lock(tsk);
528 ioc = tsk->io_context;
529 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
530 ret = -EINVAL;
531 task_unlock(tsk);
532
533 return ret;
534}
535
536static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
537 struct cgroup *prev, struct task_struct *tsk,
538 bool threadgroup)
539{
540 struct io_context *ioc;
541
542 task_lock(tsk);
543 ioc = tsk->io_context;
544 if (ioc)
545 ioc->cgroup_changed = 1;
546 task_unlock(tsk);
547}
548
Vivek Goyal3e252062009-12-04 10:36:42 -0500549void blkio_policy_register(struct blkio_policy_type *blkiop)
550{
551 spin_lock(&blkio_list_lock);
552 list_add_tail(&blkiop->list, &blkio_list);
553 spin_unlock(&blkio_list_lock);
554}
555EXPORT_SYMBOL_GPL(blkio_policy_register);
556
557void blkio_policy_unregister(struct blkio_policy_type *blkiop)
558{
559 spin_lock(&blkio_list_lock);
560 list_del_init(&blkiop->list);
561 spin_unlock(&blkio_list_lock);
562}
563EXPORT_SYMBOL_GPL(blkio_policy_unregister);
Ben Blum67523c42010-03-10 15:22:11 -0800564
565static int __init init_cgroup_blkio(void)
566{
567 return cgroup_load_subsys(&blkio_subsys);
568}
569
570static void __exit exit_cgroup_blkio(void)
571{
572 cgroup_unload_subsys(&blkio_subsys);
573}
574
575module_init(init_cgroup_blkio);
576module_exit(exit_cgroup_blkio);
577MODULE_LICENSE("GPL");