blob: 649b05d7f29134dec7b430548bd30e45a09fed89 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
Vivek Goyal22084192009-12-03 12:59:49 -050014#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
Vivek Goyal9d6a9862009-12-04 10:36:41 -050016#include <linux/module.h>
Stephen Rothwellaccee782009-12-07 19:29:39 +110017#include <linux/err.h>
Divyesh Shah91952912010-04-01 15:01:41 -070018#include <linux/blkdev.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050019#include "blk-cgroup.h"
Gui Jianfeng34d0f172010-04-13 16:05:49 +080020#include <linux/genhd.h>
Vivek Goyal3e252062009-12-04 10:36:42 -050021
Divyesh Shah84c124d2010-04-09 08:31:19 +020022#define MAX_KEY_LEN 100
23
Vivek Goyal3e252062009-12-04 10:36:42 -050024static DEFINE_SPINLOCK(blkio_list_lock);
25static LIST_HEAD(blkio_list);
Vivek Goyalb1c35762009-12-03 12:59:47 -050026
Vivek Goyal31e4c282009-12-03 12:59:42 -050027struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
Vivek Goyal9d6a9862009-12-04 10:36:41 -050028EXPORT_SYMBOL_GPL(blkio_root_cgroup);
29
Ben Blum67523c42010-03-10 15:22:11 -080030static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
31 struct cgroup *);
32static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
33 struct task_struct *, bool);
34static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
35 struct cgroup *, struct task_struct *, bool);
36static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
37static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
38
39struct cgroup_subsys blkio_subsys = {
40 .name = "blkio",
41 .create = blkiocg_create,
42 .can_attach = blkiocg_can_attach,
43 .attach = blkiocg_attach,
44 .destroy = blkiocg_destroy,
45 .populate = blkiocg_populate,
46#ifdef CONFIG_BLK_CGROUP
47 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
48 .subsys_id = blkio_subsys_id,
49#endif
50 .use_id = 1,
51 .module = THIS_MODULE,
52};
53EXPORT_SYMBOL_GPL(blkio_subsys);
54
Gui Jianfeng34d0f172010-04-13 16:05:49 +080055static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
56 struct blkio_policy_node *pn)
57{
58 list_add(&pn->node, &blkcg->policy_list);
59}
60
61/* Must be called with blkcg->lock held */
62static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
63{
64 list_del(&pn->node);
65}
66
67/* Must be called with blkcg->lock held */
68static struct blkio_policy_node *
69blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev)
70{
71 struct blkio_policy_node *pn;
72
73 list_for_each_entry(pn, &blkcg->policy_list, node) {
74 if (pn->dev == dev)
75 return pn;
76 }
77
78 return NULL;
79}
80
Vivek Goyal31e4c282009-12-03 12:59:42 -050081struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
82{
83 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
84 struct blkio_cgroup, css);
85}
Vivek Goyal9d6a9862009-12-04 10:36:41 -050086EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
Vivek Goyal31e4c282009-12-03 12:59:42 -050087
Divyesh Shah84c124d2010-04-09 08:31:19 +020088void blkio_group_init(struct blkio_group *blkg)
89{
90 spin_lock_init(&blkg->stats_lock);
91}
92EXPORT_SYMBOL_GPL(blkio_group_init);
93
Divyesh Shah91952912010-04-01 15:01:41 -070094/*
95 * Add to the appropriate stat variable depending on the request type.
96 * This should be called with the blkg->stats_lock held.
97 */
Divyesh Shah84c124d2010-04-09 08:31:19 +020098static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
99 bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -0700100{
Divyesh Shah84c124d2010-04-09 08:31:19 +0200101 if (direction)
102 stat[BLKIO_STAT_WRITE] += add;
Divyesh Shah91952912010-04-01 15:01:41 -0700103 else
Divyesh Shah84c124d2010-04-09 08:31:19 +0200104 stat[BLKIO_STAT_READ] += add;
105 if (sync)
106 stat[BLKIO_STAT_SYNC] += add;
Divyesh Shah91952912010-04-01 15:01:41 -0700107 else
Divyesh Shah84c124d2010-04-09 08:31:19 +0200108 stat[BLKIO_STAT_ASYNC] += add;
Divyesh Shah91952912010-04-01 15:01:41 -0700109}
110
Divyesh Shahcdc11842010-04-08 21:15:10 -0700111/*
112 * Decrements the appropriate stat variable if non-zero depending on the
113 * request type. Panics on value being zero.
114 * This should be called with the blkg->stats_lock held.
115 */
116static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
117{
118 if (direction) {
119 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
120 stat[BLKIO_STAT_WRITE]--;
121 } else {
122 BUG_ON(stat[BLKIO_STAT_READ] == 0);
123 stat[BLKIO_STAT_READ]--;
124 }
125 if (sync) {
126 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
127 stat[BLKIO_STAT_SYNC]--;
128 } else {
129 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
130 stat[BLKIO_STAT_ASYNC]--;
131 }
132}
133
134#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shah812df482010-04-08 21:15:35 -0700135/* This should be called with the blkg->stats_lock held. */
136static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
137 struct blkio_group *curr_blkg)
138{
139 if (blkio_blkg_waiting(&blkg->stats))
140 return;
141 if (blkg == curr_blkg)
142 return;
143 blkg->stats.start_group_wait_time = sched_clock();
144 blkio_mark_blkg_waiting(&blkg->stats);
145}
146
147/* This should be called with the blkg->stats_lock held. */
148static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
149{
150 unsigned long long now;
151
152 if (!blkio_blkg_waiting(stats))
153 return;
154
155 now = sched_clock();
156 if (time_after64(now, stats->start_group_wait_time))
157 stats->group_wait_time += now - stats->start_group_wait_time;
158 blkio_clear_blkg_waiting(stats);
159}
160
161/* This should be called with the blkg->stats_lock held. */
162static void blkio_end_empty_time(struct blkio_group_stats *stats)
163{
164 unsigned long long now;
165
166 if (!blkio_blkg_empty(stats))
167 return;
168
169 now = sched_clock();
170 if (time_after64(now, stats->start_empty_time))
171 stats->empty_time += now - stats->start_empty_time;
172 blkio_clear_blkg_empty(stats);
173}
174
175void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
176{
177 unsigned long flags;
178
179 spin_lock_irqsave(&blkg->stats_lock, flags);
180 BUG_ON(blkio_blkg_idling(&blkg->stats));
181 blkg->stats.start_idle_time = sched_clock();
182 blkio_mark_blkg_idling(&blkg->stats);
183 spin_unlock_irqrestore(&blkg->stats_lock, flags);
184}
185EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
186
187void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
188{
189 unsigned long flags;
190 unsigned long long now;
191 struct blkio_group_stats *stats;
192
193 spin_lock_irqsave(&blkg->stats_lock, flags);
194 stats = &blkg->stats;
195 if (blkio_blkg_idling(stats)) {
196 now = sched_clock();
197 if (time_after64(now, stats->start_idle_time))
198 stats->idle_time += now - stats->start_idle_time;
199 blkio_clear_blkg_idling(stats);
200 }
201 spin_unlock_irqrestore(&blkg->stats_lock, flags);
202}
203EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
204
Divyesh Shahcdc11842010-04-08 21:15:10 -0700205void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg)
206{
207 unsigned long flags;
208 struct blkio_group_stats *stats;
209
210 spin_lock_irqsave(&blkg->stats_lock, flags);
211 stats = &blkg->stats;
212 stats->avg_queue_size_sum +=
213 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
214 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
215 stats->avg_queue_size_samples++;
Divyesh Shah812df482010-04-08 21:15:35 -0700216 blkio_update_group_wait_time(stats);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700217 spin_unlock_irqrestore(&blkg->stats_lock, flags);
218}
219EXPORT_SYMBOL_GPL(blkiocg_update_set_active_queue_stats);
Divyesh Shah812df482010-04-08 21:15:35 -0700220#else
221static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
222 struct blkio_group *curr_blkg) {}
223static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
Divyesh Shahcdc11842010-04-08 21:15:10 -0700224#endif
225
226void blkiocg_update_request_add_stats(struct blkio_group *blkg,
227 struct blkio_group *curr_blkg, bool direction,
228 bool sync)
229{
230 unsigned long flags;
231
232 spin_lock_irqsave(&blkg->stats_lock, flags);
233 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
234 sync);
Divyesh Shah812df482010-04-08 21:15:35 -0700235 blkio_end_empty_time(&blkg->stats);
236 blkio_set_start_group_wait_time(blkg, curr_blkg);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700237 spin_unlock_irqrestore(&blkg->stats_lock, flags);
238}
239EXPORT_SYMBOL_GPL(blkiocg_update_request_add_stats);
240
241void blkiocg_update_request_remove_stats(struct blkio_group *blkg,
242 bool direction, bool sync)
243{
244 unsigned long flags;
245
246 spin_lock_irqsave(&blkg->stats_lock, flags);
247 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
248 direction, sync);
249 spin_unlock_irqrestore(&blkg->stats_lock, flags);
250}
251EXPORT_SYMBOL_GPL(blkiocg_update_request_remove_stats);
252
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700253void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
Vivek Goyal22084192009-12-03 12:59:49 -0500254{
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700255 unsigned long flags;
256
257 spin_lock_irqsave(&blkg->stats_lock, flags);
258 blkg->stats.time += time;
259 spin_unlock_irqrestore(&blkg->stats_lock, flags);
Vivek Goyal22084192009-12-03 12:59:49 -0500260}
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700261EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
Vivek Goyal22084192009-12-03 12:59:49 -0500262
Divyesh Shah812df482010-04-08 21:15:35 -0700263void blkiocg_set_start_empty_time(struct blkio_group *blkg, bool ignore)
264{
265 unsigned long flags;
266 struct blkio_group_stats *stats;
267
268 spin_lock_irqsave(&blkg->stats_lock, flags);
269 stats = &blkg->stats;
270
271 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
272 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
273 spin_unlock_irqrestore(&blkg->stats_lock, flags);
274 return;
275 }
276
277 /*
278 * If ignore is set, we do not panic on the empty flag being set
279 * already. This is to avoid cases where there are superfluous timeslice
280 * complete events (for eg., forced_dispatch in CFQ) when no IOs are
281 * served which could result in triggering the empty check incorrectly.
282 */
283 BUG_ON(!ignore && blkio_blkg_empty(stats));
284 stats->start_empty_time = sched_clock();
285 blkio_mark_blkg_empty(stats);
286 spin_unlock_irqrestore(&blkg->stats_lock, flags);
287}
288EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
289
Divyesh Shah84c124d2010-04-09 08:31:19 +0200290void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
291 uint64_t bytes, bool direction, bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -0700292{
293 struct blkio_group_stats *stats;
294 unsigned long flags;
295
296 spin_lock_irqsave(&blkg->stats_lock, flags);
297 stats = &blkg->stats;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200298 stats->sectors += bytes >> 9;
299 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
300 sync);
301 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
302 direction, sync);
Divyesh Shah91952912010-04-01 15:01:41 -0700303 spin_unlock_irqrestore(&blkg->stats_lock, flags);
304}
Divyesh Shah84c124d2010-04-09 08:31:19 +0200305EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
Divyesh Shah91952912010-04-01 15:01:41 -0700306
Divyesh Shah84c124d2010-04-09 08:31:19 +0200307void blkiocg_update_completion_stats(struct blkio_group *blkg,
308 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -0700309{
310 struct blkio_group_stats *stats;
311 unsigned long flags;
312 unsigned long long now = sched_clock();
313
314 spin_lock_irqsave(&blkg->stats_lock, flags);
315 stats = &blkg->stats;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200316 if (time_after64(now, io_start_time))
317 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
318 now - io_start_time, direction, sync);
319 if (time_after64(io_start_time, start_time))
320 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
321 io_start_time - start_time, direction, sync);
Divyesh Shah91952912010-04-01 15:01:41 -0700322 spin_unlock_irqrestore(&blkg->stats_lock, flags);
323}
Divyesh Shah84c124d2010-04-09 08:31:19 +0200324EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
Divyesh Shah91952912010-04-01 15:01:41 -0700325
Divyesh Shah812d4022010-04-08 21:14:23 -0700326void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
327 bool sync)
328{
329 unsigned long flags;
330
331 spin_lock_irqsave(&blkg->stats_lock, flags);
332 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
333 sync);
334 spin_unlock_irqrestore(&blkg->stats_lock, flags);
335}
336EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
337
Vivek Goyal31e4c282009-12-03 12:59:42 -0500338void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
Vivek Goyal22084192009-12-03 12:59:49 -0500339 struct blkio_group *blkg, void *key, dev_t dev)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500340{
341 unsigned long flags;
342
343 spin_lock_irqsave(&blkcg->lock, flags);
344 rcu_assign_pointer(blkg->key, key);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500345 blkg->blkcg_id = css_id(&blkcg->css);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500346 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
347 spin_unlock_irqrestore(&blkcg->lock, flags);
Vivek Goyal2868ef72009-12-03 12:59:48 -0500348#ifdef CONFIG_DEBUG_BLK_CGROUP
349 /* Need to take css reference ? */
350 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
351#endif
Vivek Goyal22084192009-12-03 12:59:49 -0500352 blkg->dev = dev;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500353}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500354EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500355
Vivek Goyalb1c35762009-12-03 12:59:47 -0500356static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
357{
358 hlist_del_init_rcu(&blkg->blkcg_node);
359 blkg->blkcg_id = 0;
360}
361
362/*
363 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
364 * indicating that blk_group was unhashed by the time we got to it.
365 */
Vivek Goyal31e4c282009-12-03 12:59:42 -0500366int blkiocg_del_blkio_group(struct blkio_group *blkg)
367{
Vivek Goyalb1c35762009-12-03 12:59:47 -0500368 struct blkio_cgroup *blkcg;
369 unsigned long flags;
370 struct cgroup_subsys_state *css;
371 int ret = 1;
372
373 rcu_read_lock();
374 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
375 if (!css)
376 goto out;
377
378 blkcg = container_of(css, struct blkio_cgroup, css);
379 spin_lock_irqsave(&blkcg->lock, flags);
380 if (!hlist_unhashed(&blkg->blkcg_node)) {
381 __blkiocg_del_blkio_group(blkg);
382 ret = 0;
383 }
384 spin_unlock_irqrestore(&blkcg->lock, flags);
385out:
386 rcu_read_unlock();
387 return ret;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500388}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500389EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500390
391/* called under rcu_read_lock(). */
392struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
393{
394 struct blkio_group *blkg;
395 struct hlist_node *n;
396 void *__key;
397
398 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
399 __key = blkg->key;
400 if (__key == key)
401 return blkg;
402 }
403
404 return NULL;
405}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500406EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500407
408#define SHOW_FUNCTION(__VAR) \
409static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
410 struct cftype *cftype) \
411{ \
412 struct blkio_cgroup *blkcg; \
413 \
414 blkcg = cgroup_to_blkio_cgroup(cgroup); \
415 return (u64)blkcg->__VAR; \
416}
417
418SHOW_FUNCTION(weight);
419#undef SHOW_FUNCTION
420
421static int
422blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
423{
424 struct blkio_cgroup *blkcg;
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500425 struct blkio_group *blkg;
426 struct hlist_node *n;
Vivek Goyal3e252062009-12-04 10:36:42 -0500427 struct blkio_policy_type *blkiop;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800428 struct blkio_policy_node *pn;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500429
430 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
431 return -EINVAL;
432
433 blkcg = cgroup_to_blkio_cgroup(cgroup);
Gui Jianfengbcf4dd42010-02-01 09:58:54 +0100434 spin_lock(&blkio_list_lock);
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500435 spin_lock_irq(&blkcg->lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500436 blkcg->weight = (unsigned int)val;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800437
Vivek Goyal3e252062009-12-04 10:36:42 -0500438 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800439 pn = blkio_policy_search_node(blkcg, blkg->dev);
440
441 if (pn)
442 continue;
443
Vivek Goyal3e252062009-12-04 10:36:42 -0500444 list_for_each_entry(blkiop, &blkio_list, list)
445 blkiop->ops.blkio_update_group_weight_fn(blkg,
446 blkcg->weight);
Vivek Goyal3e252062009-12-04 10:36:42 -0500447 }
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500448 spin_unlock_irq(&blkcg->lock);
Gui Jianfengbcf4dd42010-02-01 09:58:54 +0100449 spin_unlock(&blkio_list_lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500450 return 0;
451}
452
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700453static int
Divyesh Shah84c124d2010-04-09 08:31:19 +0200454blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700455{
456 struct blkio_cgroup *blkcg;
457 struct blkio_group *blkg;
Divyesh Shah812df482010-04-08 21:15:35 -0700458 struct blkio_group_stats *stats;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700459 struct hlist_node *n;
Divyesh Shahcdc11842010-04-08 21:15:10 -0700460 uint64_t queued[BLKIO_STAT_TOTAL];
461 int i;
Divyesh Shah812df482010-04-08 21:15:35 -0700462#ifdef CONFIG_DEBUG_BLK_CGROUP
463 bool idling, waiting, empty;
464 unsigned long long now = sched_clock();
465#endif
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700466
467 blkcg = cgroup_to_blkio_cgroup(cgroup);
468 spin_lock_irq(&blkcg->lock);
469 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
470 spin_lock(&blkg->stats_lock);
Divyesh Shah812df482010-04-08 21:15:35 -0700471 stats = &blkg->stats;
472#ifdef CONFIG_DEBUG_BLK_CGROUP
473 idling = blkio_blkg_idling(stats);
474 waiting = blkio_blkg_waiting(stats);
475 empty = blkio_blkg_empty(stats);
476#endif
Divyesh Shahcdc11842010-04-08 21:15:10 -0700477 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
Divyesh Shah812df482010-04-08 21:15:35 -0700478 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
479 memset(stats, 0, sizeof(struct blkio_group_stats));
Divyesh Shahcdc11842010-04-08 21:15:10 -0700480 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
Divyesh Shah812df482010-04-08 21:15:35 -0700481 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
482#ifdef CONFIG_DEBUG_BLK_CGROUP
483 if (idling) {
484 blkio_mark_blkg_idling(stats);
485 stats->start_idle_time = now;
486 }
487 if (waiting) {
488 blkio_mark_blkg_waiting(stats);
489 stats->start_group_wait_time = now;
490 }
491 if (empty) {
492 blkio_mark_blkg_empty(stats);
493 stats->start_empty_time = now;
494 }
495#endif
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700496 spin_unlock(&blkg->stats_lock);
497 }
498 spin_unlock_irq(&blkcg->lock);
499 return 0;
500}
501
Divyesh Shah84c124d2010-04-09 08:31:19 +0200502static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
503 int chars_left, bool diskname_only)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700504{
Divyesh Shah84c124d2010-04-09 08:31:19 +0200505 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700506 chars_left -= strlen(str);
507 if (chars_left <= 0) {
508 printk(KERN_WARNING
509 "Possibly incorrect cgroup stat display format");
510 return;
511 }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200512 if (diskname_only)
513 return;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700514 switch (type) {
Divyesh Shah84c124d2010-04-09 08:31:19 +0200515 case BLKIO_STAT_READ:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700516 strlcat(str, " Read", chars_left);
517 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200518 case BLKIO_STAT_WRITE:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700519 strlcat(str, " Write", chars_left);
520 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200521 case BLKIO_STAT_SYNC:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700522 strlcat(str, " Sync", chars_left);
523 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200524 case BLKIO_STAT_ASYNC:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700525 strlcat(str, " Async", chars_left);
526 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200527 case BLKIO_STAT_TOTAL:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700528 strlcat(str, " Total", chars_left);
529 break;
530 default:
531 strlcat(str, " Invalid", chars_left);
532 }
533}
534
Divyesh Shah84c124d2010-04-09 08:31:19 +0200535static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
536 struct cgroup_map_cb *cb, dev_t dev)
537{
538 blkio_get_key_name(0, dev, str, chars_left, true);
539 cb->fill(cb, str, val);
540 return val;
541}
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700542
Divyesh Shah84c124d2010-04-09 08:31:19 +0200543/* This should be called with blkg->stats_lock held */
544static uint64_t blkio_get_stat(struct blkio_group *blkg,
545 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700546{
547 uint64_t disk_total;
548 char key_str[MAX_KEY_LEN];
Divyesh Shah84c124d2010-04-09 08:31:19 +0200549 enum stat_sub_type sub_type;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700550
Divyesh Shah84c124d2010-04-09 08:31:19 +0200551 if (type == BLKIO_STAT_TIME)
552 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
553 blkg->stats.time, cb, dev);
554 if (type == BLKIO_STAT_SECTORS)
555 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
556 blkg->stats.sectors, cb, dev);
557#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shahcdc11842010-04-08 21:15:10 -0700558 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
559 uint64_t sum = blkg->stats.avg_queue_size_sum;
560 uint64_t samples = blkg->stats.avg_queue_size_samples;
561 if (samples)
562 do_div(sum, samples);
563 else
564 sum = 0;
565 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
566 }
Divyesh Shah812df482010-04-08 21:15:35 -0700567 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
568 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
569 blkg->stats.group_wait_time, cb, dev);
570 if (type == BLKIO_STAT_IDLE_TIME)
571 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
572 blkg->stats.idle_time, cb, dev);
573 if (type == BLKIO_STAT_EMPTY_TIME)
574 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
575 blkg->stats.empty_time, cb, dev);
Divyesh Shah84c124d2010-04-09 08:31:19 +0200576 if (type == BLKIO_STAT_DEQUEUE)
577 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
578 blkg->stats.dequeue, cb, dev);
579#endif
580
581 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
582 sub_type++) {
583 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
584 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700585 }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200586 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
587 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
588 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700589 cb->fill(cb, key_str, disk_total);
590 return disk_total;
591}
592
Divyesh Shah84c124d2010-04-09 08:31:19 +0200593#define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
Vivek Goyal22084192009-12-03 12:59:49 -0500594static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700595 struct cftype *cftype, struct cgroup_map_cb *cb) \
Vivek Goyal22084192009-12-03 12:59:49 -0500596{ \
597 struct blkio_cgroup *blkcg; \
598 struct blkio_group *blkg; \
599 struct hlist_node *n; \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700600 uint64_t cgroup_total = 0; \
Vivek Goyal22084192009-12-03 12:59:49 -0500601 \
602 if (!cgroup_lock_live_group(cgroup)) \
603 return -ENODEV; \
604 \
605 blkcg = cgroup_to_blkio_cgroup(cgroup); \
606 rcu_read_lock(); \
607 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700608 if (blkg->dev) { \
609 spin_lock_irq(&blkg->stats_lock); \
Divyesh Shah84c124d2010-04-09 08:31:19 +0200610 cgroup_total += blkio_get_stat(blkg, cb, \
611 blkg->dev, type); \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700612 spin_unlock_irq(&blkg->stats_lock); \
613 } \
Vivek Goyal22084192009-12-03 12:59:49 -0500614 } \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700615 if (show_total) \
616 cb->fill(cb, "Total", cgroup_total); \
Vivek Goyal22084192009-12-03 12:59:49 -0500617 rcu_read_unlock(); \
618 cgroup_unlock(); \
619 return 0; \
620}
621
Divyesh Shah84c124d2010-04-09 08:31:19 +0200622SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
623SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
624SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
625SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
626SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
627SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
Divyesh Shah812d4022010-04-08 21:14:23 -0700628SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700629SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
Vivek Goyal22084192009-12-03 12:59:49 -0500630#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shah84c124d2010-04-09 08:31:19 +0200631SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700632SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
Divyesh Shah812df482010-04-08 21:15:35 -0700633SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0);
634SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0);
635SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0);
Vivek Goyal22084192009-12-03 12:59:49 -0500636#endif
637#undef SHOW_FUNCTION_PER_GROUP
638
639#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shah91952912010-04-01 15:01:41 -0700640void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Vivek Goyal22084192009-12-03 12:59:49 -0500641 unsigned long dequeue)
642{
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700643 blkg->stats.dequeue += dequeue;
Vivek Goyal22084192009-12-03 12:59:49 -0500644}
Divyesh Shah91952912010-04-01 15:01:41 -0700645EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
Vivek Goyal22084192009-12-03 12:59:49 -0500646#endif
647
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800648static int blkio_check_dev_num(dev_t dev)
649{
650 int part = 0;
651 struct gendisk *disk;
652
653 disk = get_gendisk(dev, &part);
654 if (!disk || part)
655 return -ENODEV;
656
657 return 0;
658}
659
660static int blkio_policy_parse_and_set(char *buf,
661 struct blkio_policy_node *newpn)
662{
663 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
664 int ret;
665 unsigned long major, minor, temp;
666 int i = 0;
667 dev_t dev;
668
669 memset(s, 0, sizeof(s));
670
671 while ((p = strsep(&buf, " ")) != NULL) {
672 if (!*p)
673 continue;
674
675 s[i++] = p;
676
677 /* Prevent from inputing too many things */
678 if (i == 3)
679 break;
680 }
681
682 if (i != 2)
683 return -EINVAL;
684
685 p = strsep(&s[0], ":");
686 if (p != NULL)
687 major_s = p;
688 else
689 return -EINVAL;
690
691 minor_s = s[0];
692 if (!minor_s)
693 return -EINVAL;
694
695 ret = strict_strtoul(major_s, 10, &major);
696 if (ret)
697 return -EINVAL;
698
699 ret = strict_strtoul(minor_s, 10, &minor);
700 if (ret)
701 return -EINVAL;
702
703 dev = MKDEV(major, minor);
704
705 ret = blkio_check_dev_num(dev);
706 if (ret)
707 return ret;
708
709 newpn->dev = dev;
710
711 if (s[1] == NULL)
712 return -EINVAL;
713
714 ret = strict_strtoul(s[1], 10, &temp);
715 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
716 temp > BLKIO_WEIGHT_MAX)
717 return -EINVAL;
718
719 newpn->weight = temp;
720
721 return 0;
722}
723
724unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
725 dev_t dev)
726{
727 struct blkio_policy_node *pn;
728
729 pn = blkio_policy_search_node(blkcg, dev);
730 if (pn)
731 return pn->weight;
732 else
733 return blkcg->weight;
734}
735EXPORT_SYMBOL_GPL(blkcg_get_weight);
736
737
738static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft,
739 const char *buffer)
740{
741 int ret = 0;
742 char *buf;
743 struct blkio_policy_node *newpn, *pn;
744 struct blkio_cgroup *blkcg;
745 struct blkio_group *blkg;
746 int keep_newpn = 0;
747 struct hlist_node *n;
748 struct blkio_policy_type *blkiop;
749
750 buf = kstrdup(buffer, GFP_KERNEL);
751 if (!buf)
752 return -ENOMEM;
753
754 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
755 if (!newpn) {
756 ret = -ENOMEM;
757 goto free_buf;
758 }
759
760 ret = blkio_policy_parse_and_set(buf, newpn);
761 if (ret)
762 goto free_newpn;
763
764 blkcg = cgroup_to_blkio_cgroup(cgrp);
765
766 spin_lock_irq(&blkcg->lock);
767
768 pn = blkio_policy_search_node(blkcg, newpn->dev);
769 if (!pn) {
770 if (newpn->weight != 0) {
771 blkio_policy_insert_node(blkcg, newpn);
772 keep_newpn = 1;
773 }
774 spin_unlock_irq(&blkcg->lock);
775 goto update_io_group;
776 }
777
778 if (newpn->weight == 0) {
779 /* weight == 0 means deleteing a specific weight */
780 blkio_policy_delete_node(pn);
781 spin_unlock_irq(&blkcg->lock);
782 goto update_io_group;
783 }
784 spin_unlock_irq(&blkcg->lock);
785
786 pn->weight = newpn->weight;
787
788update_io_group:
789 /* update weight for each cfqg */
790 spin_lock(&blkio_list_lock);
791 spin_lock_irq(&blkcg->lock);
792
793 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
794 if (newpn->dev == blkg->dev) {
795 list_for_each_entry(blkiop, &blkio_list, list)
796 blkiop->ops.blkio_update_group_weight_fn(blkg,
797 newpn->weight ?
798 newpn->weight :
799 blkcg->weight);
800 }
801 }
802
803 spin_unlock_irq(&blkcg->lock);
804 spin_unlock(&blkio_list_lock);
805
806free_newpn:
807 if (!keep_newpn)
808 kfree(newpn);
809free_buf:
810 kfree(buf);
811 return ret;
812}
813
814static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft,
815 struct seq_file *m)
816{
817 struct blkio_cgroup *blkcg;
818 struct blkio_policy_node *pn;
819
820 seq_printf(m, "dev\tweight\n");
821
822 blkcg = cgroup_to_blkio_cgroup(cgrp);
823 if (list_empty(&blkcg->policy_list))
824 goto out;
825
826 spin_lock_irq(&blkcg->lock);
827 list_for_each_entry(pn, &blkcg->policy_list, node) {
828 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
829 MINOR(pn->dev), pn->weight);
830 }
831 spin_unlock_irq(&blkcg->lock);
832
833out:
834 return 0;
835}
836
Vivek Goyal31e4c282009-12-03 12:59:42 -0500837struct cftype blkio_files[] = {
838 {
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800839 .name = "weight_device",
840 .read_seq_string = blkiocg_weight_device_read,
841 .write_string = blkiocg_weight_device_write,
842 .max_write_len = 256,
843 },
844 {
Vivek Goyal31e4c282009-12-03 12:59:42 -0500845 .name = "weight",
846 .read_u64 = blkiocg_weight_read,
847 .write_u64 = blkiocg_weight_write,
848 },
Vivek Goyal22084192009-12-03 12:59:49 -0500849 {
850 .name = "time",
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700851 .read_map = blkiocg_time_read,
Vivek Goyal22084192009-12-03 12:59:49 -0500852 },
853 {
854 .name = "sectors",
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700855 .read_map = blkiocg_sectors_read,
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700856 },
857 {
858 .name = "io_service_bytes",
859 .read_map = blkiocg_io_service_bytes_read,
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700860 },
861 {
862 .name = "io_serviced",
863 .read_map = blkiocg_io_serviced_read,
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700864 },
865 {
866 .name = "io_service_time",
867 .read_map = blkiocg_io_service_time_read,
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700868 },
869 {
870 .name = "io_wait_time",
871 .read_map = blkiocg_io_wait_time_read,
Divyesh Shah84c124d2010-04-09 08:31:19 +0200872 },
873 {
Divyesh Shah812d4022010-04-08 21:14:23 -0700874 .name = "io_merged",
875 .read_map = blkiocg_io_merged_read,
876 },
877 {
Divyesh Shahcdc11842010-04-08 21:15:10 -0700878 .name = "io_queued",
879 .read_map = blkiocg_io_queued_read,
880 },
881 {
Divyesh Shah84c124d2010-04-09 08:31:19 +0200882 .name = "reset_stats",
883 .write_u64 = blkiocg_reset_stats,
Vivek Goyal22084192009-12-03 12:59:49 -0500884 },
885#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shahcdc11842010-04-08 21:15:10 -0700886 {
887 .name = "avg_queue_size",
888 .read_map = blkiocg_avg_queue_size_read,
889 },
890 {
Divyesh Shah812df482010-04-08 21:15:35 -0700891 .name = "group_wait_time",
892 .read_map = blkiocg_group_wait_time_read,
893 },
894 {
895 .name = "idle_time",
896 .read_map = blkiocg_idle_time_read,
897 },
898 {
899 .name = "empty_time",
900 .read_map = blkiocg_empty_time_read,
901 },
902 {
Vivek Goyal22084192009-12-03 12:59:49 -0500903 .name = "dequeue",
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700904 .read_map = blkiocg_dequeue_read,
Divyesh Shahcdc11842010-04-08 21:15:10 -0700905 },
Vivek Goyal22084192009-12-03 12:59:49 -0500906#endif
Vivek Goyal31e4c282009-12-03 12:59:42 -0500907};
908
909static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
910{
911 return cgroup_add_files(cgroup, subsys, blkio_files,
912 ARRAY_SIZE(blkio_files));
913}
914
915static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
916{
917 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500918 unsigned long flags;
919 struct blkio_group *blkg;
920 void *key;
Vivek Goyal3e252062009-12-04 10:36:42 -0500921 struct blkio_policy_type *blkiop;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800922 struct blkio_policy_node *pn, *pntmp;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500923
Vivek Goyalb1c35762009-12-03 12:59:47 -0500924 rcu_read_lock();
925remove_entry:
926 spin_lock_irqsave(&blkcg->lock, flags);
927
928 if (hlist_empty(&blkcg->blkg_list)) {
929 spin_unlock_irqrestore(&blkcg->lock, flags);
930 goto done;
931 }
932
933 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
934 blkcg_node);
935 key = rcu_dereference(blkg->key);
936 __blkiocg_del_blkio_group(blkg);
937
938 spin_unlock_irqrestore(&blkcg->lock, flags);
939
940 /*
941 * This blkio_group is being unlinked as associated cgroup is going
942 * away. Let all the IO controlling policies know about this event.
943 *
944 * Currently this is static call to one io controlling policy. Once
945 * we have more policies in place, we need some dynamic registration
946 * of callback function.
947 */
Vivek Goyal3e252062009-12-04 10:36:42 -0500948 spin_lock(&blkio_list_lock);
949 list_for_each_entry(blkiop, &blkio_list, list)
950 blkiop->ops.blkio_unlink_group_fn(key, blkg);
951 spin_unlock(&blkio_list_lock);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500952 goto remove_entry;
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800953
Vivek Goyalb1c35762009-12-03 12:59:47 -0500954done:
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800955 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
956 blkio_policy_delete_node(pn);
957 kfree(pn);
958 }
Vivek Goyal31e4c282009-12-03 12:59:42 -0500959 free_css_id(&blkio_subsys, &blkcg->css);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500960 rcu_read_unlock();
Ben Blum67523c42010-03-10 15:22:11 -0800961 if (blkcg != &blkio_root_cgroup)
962 kfree(blkcg);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500963}
964
965static struct cgroup_subsys_state *
966blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
967{
968 struct blkio_cgroup *blkcg, *parent_blkcg;
969
970 if (!cgroup->parent) {
971 blkcg = &blkio_root_cgroup;
972 goto done;
973 }
974
975 /* Currently we do not support hierarchy deeper than two level (0,1) */
976 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
977 if (css_depth(&parent_blkcg->css) > 0)
978 return ERR_PTR(-EINVAL);
979
980 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
981 if (!blkcg)
982 return ERR_PTR(-ENOMEM);
983
984 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
985done:
986 spin_lock_init(&blkcg->lock);
987 INIT_HLIST_HEAD(&blkcg->blkg_list);
988
Gui Jianfeng34d0f172010-04-13 16:05:49 +0800989 INIT_LIST_HEAD(&blkcg->policy_list);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500990 return &blkcg->css;
991}
992
993/*
994 * We cannot support shared io contexts, as we have no mean to support
995 * two tasks with the same ioc in two different groups without major rework
996 * of the main cic data structures. For now we allow a task to change
997 * its cgroup only if it's the only owner of its ioc.
998 */
999static int blkiocg_can_attach(struct cgroup_subsys *subsys,
1000 struct cgroup *cgroup, struct task_struct *tsk,
1001 bool threadgroup)
1002{
1003 struct io_context *ioc;
1004 int ret = 0;
1005
1006 /* task_lock() is needed to avoid races with exit_io_context() */
1007 task_lock(tsk);
1008 ioc = tsk->io_context;
1009 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1010 ret = -EINVAL;
1011 task_unlock(tsk);
1012
1013 return ret;
1014}
1015
1016static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
1017 struct cgroup *prev, struct task_struct *tsk,
1018 bool threadgroup)
1019{
1020 struct io_context *ioc;
1021
1022 task_lock(tsk);
1023 ioc = tsk->io_context;
1024 if (ioc)
1025 ioc->cgroup_changed = 1;
1026 task_unlock(tsk);
1027}
1028
Vivek Goyal3e252062009-12-04 10:36:42 -05001029void blkio_policy_register(struct blkio_policy_type *blkiop)
1030{
1031 spin_lock(&blkio_list_lock);
1032 list_add_tail(&blkiop->list, &blkio_list);
1033 spin_unlock(&blkio_list_lock);
1034}
1035EXPORT_SYMBOL_GPL(blkio_policy_register);
1036
1037void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1038{
1039 spin_lock(&blkio_list_lock);
1040 list_del_init(&blkiop->list);
1041 spin_unlock(&blkio_list_lock);
1042}
1043EXPORT_SYMBOL_GPL(blkio_policy_unregister);
Ben Blum67523c42010-03-10 15:22:11 -08001044
1045static int __init init_cgroup_blkio(void)
1046{
1047 return cgroup_load_subsys(&blkio_subsys);
1048}
1049
1050static void __exit exit_cgroup_blkio(void)
1051{
1052 cgroup_unload_subsys(&blkio_subsys);
1053}
1054
1055module_init(init_cgroup_blkio);
1056module_exit(exit_cgroup_blkio);
1057MODULE_LICENSE("GPL");