blob: 1e0c4970b35d336437ae3a56f0aa7a2915e9bd27 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
Vivek Goyal22084192009-12-03 12:59:49 -050014#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
Vivek Goyal9d6a9862009-12-04 10:36:41 -050016#include <linux/module.h>
Stephen Rothwellaccee782009-12-07 19:29:39 +110017#include <linux/err.h>
Divyesh Shah91952912010-04-01 15:01:41 -070018#include <linux/blkdev.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050019#include "blk-cgroup.h"
Vivek Goyal3e252062009-12-04 10:36:42 -050020
Divyesh Shah84c124d2010-04-09 08:31:19 +020021#define MAX_KEY_LEN 100
22
Vivek Goyal3e252062009-12-04 10:36:42 -050023static DEFINE_SPINLOCK(blkio_list_lock);
24static LIST_HEAD(blkio_list);
Vivek Goyalb1c35762009-12-03 12:59:47 -050025
Vivek Goyal31e4c282009-12-03 12:59:42 -050026struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
Vivek Goyal9d6a9862009-12-04 10:36:41 -050027EXPORT_SYMBOL_GPL(blkio_root_cgroup);
28
Ben Blum67523c42010-03-10 15:22:11 -080029static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
30 struct cgroup *);
31static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
32 struct task_struct *, bool);
33static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
34 struct cgroup *, struct task_struct *, bool);
35static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
36static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
37
38struct cgroup_subsys blkio_subsys = {
39 .name = "blkio",
40 .create = blkiocg_create,
41 .can_attach = blkiocg_can_attach,
42 .attach = blkiocg_attach,
43 .destroy = blkiocg_destroy,
44 .populate = blkiocg_populate,
45#ifdef CONFIG_BLK_CGROUP
46 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
47 .subsys_id = blkio_subsys_id,
48#endif
49 .use_id = 1,
50 .module = THIS_MODULE,
51};
52EXPORT_SYMBOL_GPL(blkio_subsys);
53
Vivek Goyal31e4c282009-12-03 12:59:42 -050054struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
55{
56 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
57 struct blkio_cgroup, css);
58}
Vivek Goyal9d6a9862009-12-04 10:36:41 -050059EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
Vivek Goyal31e4c282009-12-03 12:59:42 -050060
Divyesh Shah84c124d2010-04-09 08:31:19 +020061void blkio_group_init(struct blkio_group *blkg)
62{
63 spin_lock_init(&blkg->stats_lock);
64}
65EXPORT_SYMBOL_GPL(blkio_group_init);
66
Divyesh Shah91952912010-04-01 15:01:41 -070067/*
68 * Add to the appropriate stat variable depending on the request type.
69 * This should be called with the blkg->stats_lock held.
70 */
Divyesh Shah84c124d2010-04-09 08:31:19 +020071static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
72 bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -070073{
Divyesh Shah84c124d2010-04-09 08:31:19 +020074 if (direction)
75 stat[BLKIO_STAT_WRITE] += add;
Divyesh Shah91952912010-04-01 15:01:41 -070076 else
Divyesh Shah84c124d2010-04-09 08:31:19 +020077 stat[BLKIO_STAT_READ] += add;
78 if (sync)
79 stat[BLKIO_STAT_SYNC] += add;
Divyesh Shah91952912010-04-01 15:01:41 -070080 else
Divyesh Shah84c124d2010-04-09 08:31:19 +020081 stat[BLKIO_STAT_ASYNC] += add;
Divyesh Shah91952912010-04-01 15:01:41 -070082}
83
Divyesh Shahcdc11842010-04-08 21:15:10 -070084/*
85 * Decrements the appropriate stat variable if non-zero depending on the
86 * request type. Panics on value being zero.
87 * This should be called with the blkg->stats_lock held.
88 */
89static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
90{
91 if (direction) {
92 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
93 stat[BLKIO_STAT_WRITE]--;
94 } else {
95 BUG_ON(stat[BLKIO_STAT_READ] == 0);
96 stat[BLKIO_STAT_READ]--;
97 }
98 if (sync) {
99 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
100 stat[BLKIO_STAT_SYNC]--;
101 } else {
102 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
103 stat[BLKIO_STAT_ASYNC]--;
104 }
105}
106
107#ifdef CONFIG_DEBUG_BLK_CGROUP
108void blkiocg_update_set_active_queue_stats(struct blkio_group *blkg)
109{
110 unsigned long flags;
111 struct blkio_group_stats *stats;
112
113 spin_lock_irqsave(&blkg->stats_lock, flags);
114 stats = &blkg->stats;
115 stats->avg_queue_size_sum +=
116 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
117 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
118 stats->avg_queue_size_samples++;
119 spin_unlock_irqrestore(&blkg->stats_lock, flags);
120}
121EXPORT_SYMBOL_GPL(blkiocg_update_set_active_queue_stats);
122#endif
123
124void blkiocg_update_request_add_stats(struct blkio_group *blkg,
125 struct blkio_group *curr_blkg, bool direction,
126 bool sync)
127{
128 unsigned long flags;
129
130 spin_lock_irqsave(&blkg->stats_lock, flags);
131 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
132 sync);
133 spin_unlock_irqrestore(&blkg->stats_lock, flags);
134}
135EXPORT_SYMBOL_GPL(blkiocg_update_request_add_stats);
136
137void blkiocg_update_request_remove_stats(struct blkio_group *blkg,
138 bool direction, bool sync)
139{
140 unsigned long flags;
141
142 spin_lock_irqsave(&blkg->stats_lock, flags);
143 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
144 direction, sync);
145 spin_unlock_irqrestore(&blkg->stats_lock, flags);
146}
147EXPORT_SYMBOL_GPL(blkiocg_update_request_remove_stats);
148
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700149void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
Vivek Goyal22084192009-12-03 12:59:49 -0500150{
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700151 unsigned long flags;
152
153 spin_lock_irqsave(&blkg->stats_lock, flags);
154 blkg->stats.time += time;
155 spin_unlock_irqrestore(&blkg->stats_lock, flags);
Vivek Goyal22084192009-12-03 12:59:49 -0500156}
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700157EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
Vivek Goyal22084192009-12-03 12:59:49 -0500158
Divyesh Shah84c124d2010-04-09 08:31:19 +0200159void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
160 uint64_t bytes, bool direction, bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -0700161{
162 struct blkio_group_stats *stats;
163 unsigned long flags;
164
165 spin_lock_irqsave(&blkg->stats_lock, flags);
166 stats = &blkg->stats;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200167 stats->sectors += bytes >> 9;
168 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
169 sync);
170 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
171 direction, sync);
Divyesh Shah91952912010-04-01 15:01:41 -0700172 spin_unlock_irqrestore(&blkg->stats_lock, flags);
173}
Divyesh Shah84c124d2010-04-09 08:31:19 +0200174EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
Divyesh Shah91952912010-04-01 15:01:41 -0700175
Divyesh Shah84c124d2010-04-09 08:31:19 +0200176void blkiocg_update_completion_stats(struct blkio_group *blkg,
177 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -0700178{
179 struct blkio_group_stats *stats;
180 unsigned long flags;
181 unsigned long long now = sched_clock();
182
183 spin_lock_irqsave(&blkg->stats_lock, flags);
184 stats = &blkg->stats;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200185 if (time_after64(now, io_start_time))
186 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
187 now - io_start_time, direction, sync);
188 if (time_after64(io_start_time, start_time))
189 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
190 io_start_time - start_time, direction, sync);
Divyesh Shah91952912010-04-01 15:01:41 -0700191 spin_unlock_irqrestore(&blkg->stats_lock, flags);
192}
Divyesh Shah84c124d2010-04-09 08:31:19 +0200193EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
Divyesh Shah91952912010-04-01 15:01:41 -0700194
Divyesh Shah812d4022010-04-08 21:14:23 -0700195void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
196 bool sync)
197{
198 unsigned long flags;
199
200 spin_lock_irqsave(&blkg->stats_lock, flags);
201 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
202 sync);
203 spin_unlock_irqrestore(&blkg->stats_lock, flags);
204}
205EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
206
Vivek Goyal31e4c282009-12-03 12:59:42 -0500207void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
Vivek Goyal22084192009-12-03 12:59:49 -0500208 struct blkio_group *blkg, void *key, dev_t dev)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500209{
210 unsigned long flags;
211
212 spin_lock_irqsave(&blkcg->lock, flags);
213 rcu_assign_pointer(blkg->key, key);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500214 blkg->blkcg_id = css_id(&blkcg->css);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500215 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
216 spin_unlock_irqrestore(&blkcg->lock, flags);
Vivek Goyal2868ef72009-12-03 12:59:48 -0500217#ifdef CONFIG_DEBUG_BLK_CGROUP
218 /* Need to take css reference ? */
219 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
220#endif
Vivek Goyal22084192009-12-03 12:59:49 -0500221 blkg->dev = dev;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500222}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500223EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500224
Vivek Goyalb1c35762009-12-03 12:59:47 -0500225static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
226{
227 hlist_del_init_rcu(&blkg->blkcg_node);
228 blkg->blkcg_id = 0;
229}
230
231/*
232 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
233 * indicating that blk_group was unhashed by the time we got to it.
234 */
Vivek Goyal31e4c282009-12-03 12:59:42 -0500235int blkiocg_del_blkio_group(struct blkio_group *blkg)
236{
Vivek Goyalb1c35762009-12-03 12:59:47 -0500237 struct blkio_cgroup *blkcg;
238 unsigned long flags;
239 struct cgroup_subsys_state *css;
240 int ret = 1;
241
242 rcu_read_lock();
243 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
244 if (!css)
245 goto out;
246
247 blkcg = container_of(css, struct blkio_cgroup, css);
248 spin_lock_irqsave(&blkcg->lock, flags);
249 if (!hlist_unhashed(&blkg->blkcg_node)) {
250 __blkiocg_del_blkio_group(blkg);
251 ret = 0;
252 }
253 spin_unlock_irqrestore(&blkcg->lock, flags);
254out:
255 rcu_read_unlock();
256 return ret;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500257}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500258EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500259
260/* called under rcu_read_lock(). */
261struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
262{
263 struct blkio_group *blkg;
264 struct hlist_node *n;
265 void *__key;
266
267 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
268 __key = blkg->key;
269 if (__key == key)
270 return blkg;
271 }
272
273 return NULL;
274}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500275EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500276
277#define SHOW_FUNCTION(__VAR) \
278static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
279 struct cftype *cftype) \
280{ \
281 struct blkio_cgroup *blkcg; \
282 \
283 blkcg = cgroup_to_blkio_cgroup(cgroup); \
284 return (u64)blkcg->__VAR; \
285}
286
287SHOW_FUNCTION(weight);
288#undef SHOW_FUNCTION
289
290static int
291blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
292{
293 struct blkio_cgroup *blkcg;
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500294 struct blkio_group *blkg;
295 struct hlist_node *n;
Vivek Goyal3e252062009-12-04 10:36:42 -0500296 struct blkio_policy_type *blkiop;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500297
298 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
299 return -EINVAL;
300
301 blkcg = cgroup_to_blkio_cgroup(cgroup);
Gui Jianfengbcf4dd42010-02-01 09:58:54 +0100302 spin_lock(&blkio_list_lock);
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500303 spin_lock_irq(&blkcg->lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500304 blkcg->weight = (unsigned int)val;
Vivek Goyal3e252062009-12-04 10:36:42 -0500305 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
Vivek Goyal3e252062009-12-04 10:36:42 -0500306 list_for_each_entry(blkiop, &blkio_list, list)
307 blkiop->ops.blkio_update_group_weight_fn(blkg,
308 blkcg->weight);
Vivek Goyal3e252062009-12-04 10:36:42 -0500309 }
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500310 spin_unlock_irq(&blkcg->lock);
Gui Jianfengbcf4dd42010-02-01 09:58:54 +0100311 spin_unlock(&blkio_list_lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500312 return 0;
313}
314
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700315static int
Divyesh Shah84c124d2010-04-09 08:31:19 +0200316blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700317{
318 struct blkio_cgroup *blkcg;
319 struct blkio_group *blkg;
320 struct hlist_node *n;
Divyesh Shahcdc11842010-04-08 21:15:10 -0700321 uint64_t queued[BLKIO_STAT_TOTAL];
322 int i;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700323
324 blkcg = cgroup_to_blkio_cgroup(cgroup);
325 spin_lock_irq(&blkcg->lock);
326 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
327 spin_lock(&blkg->stats_lock);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700328 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
329 queued[i] = blkg->stats.stat_arr[BLKIO_STAT_QUEUED][i];
330 memset(&blkg->stats, 0, sizeof(struct blkio_group_stats));
331 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
332 blkg->stats.stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700333 spin_unlock(&blkg->stats_lock);
334 }
335 spin_unlock_irq(&blkcg->lock);
336 return 0;
337}
338
Divyesh Shah84c124d2010-04-09 08:31:19 +0200339static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
340 int chars_left, bool diskname_only)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700341{
Divyesh Shah84c124d2010-04-09 08:31:19 +0200342 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700343 chars_left -= strlen(str);
344 if (chars_left <= 0) {
345 printk(KERN_WARNING
346 "Possibly incorrect cgroup stat display format");
347 return;
348 }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200349 if (diskname_only)
350 return;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700351 switch (type) {
Divyesh Shah84c124d2010-04-09 08:31:19 +0200352 case BLKIO_STAT_READ:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700353 strlcat(str, " Read", chars_left);
354 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200355 case BLKIO_STAT_WRITE:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700356 strlcat(str, " Write", chars_left);
357 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200358 case BLKIO_STAT_SYNC:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700359 strlcat(str, " Sync", chars_left);
360 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200361 case BLKIO_STAT_ASYNC:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700362 strlcat(str, " Async", chars_left);
363 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200364 case BLKIO_STAT_TOTAL:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700365 strlcat(str, " Total", chars_left);
366 break;
367 default:
368 strlcat(str, " Invalid", chars_left);
369 }
370}
371
Divyesh Shah84c124d2010-04-09 08:31:19 +0200372static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
373 struct cgroup_map_cb *cb, dev_t dev)
374{
375 blkio_get_key_name(0, dev, str, chars_left, true);
376 cb->fill(cb, str, val);
377 return val;
378}
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700379
Divyesh Shah84c124d2010-04-09 08:31:19 +0200380/* This should be called with blkg->stats_lock held */
381static uint64_t blkio_get_stat(struct blkio_group *blkg,
382 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700383{
384 uint64_t disk_total;
385 char key_str[MAX_KEY_LEN];
Divyesh Shah84c124d2010-04-09 08:31:19 +0200386 enum stat_sub_type sub_type;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700387
Divyesh Shah84c124d2010-04-09 08:31:19 +0200388 if (type == BLKIO_STAT_TIME)
389 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
390 blkg->stats.time, cb, dev);
391 if (type == BLKIO_STAT_SECTORS)
392 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
393 blkg->stats.sectors, cb, dev);
394#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shahcdc11842010-04-08 21:15:10 -0700395 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
396 uint64_t sum = blkg->stats.avg_queue_size_sum;
397 uint64_t samples = blkg->stats.avg_queue_size_samples;
398 if (samples)
399 do_div(sum, samples);
400 else
401 sum = 0;
402 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
403 }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200404 if (type == BLKIO_STAT_DEQUEUE)
405 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
406 blkg->stats.dequeue, cb, dev);
407#endif
408
409 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
410 sub_type++) {
411 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
412 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700413 }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200414 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
415 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
416 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700417 cb->fill(cb, key_str, disk_total);
418 return disk_total;
419}
420
Divyesh Shah84c124d2010-04-09 08:31:19 +0200421#define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
Vivek Goyal22084192009-12-03 12:59:49 -0500422static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700423 struct cftype *cftype, struct cgroup_map_cb *cb) \
Vivek Goyal22084192009-12-03 12:59:49 -0500424{ \
425 struct blkio_cgroup *blkcg; \
426 struct blkio_group *blkg; \
427 struct hlist_node *n; \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700428 uint64_t cgroup_total = 0; \
Vivek Goyal22084192009-12-03 12:59:49 -0500429 \
430 if (!cgroup_lock_live_group(cgroup)) \
431 return -ENODEV; \
432 \
433 blkcg = cgroup_to_blkio_cgroup(cgroup); \
434 rcu_read_lock(); \
435 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700436 if (blkg->dev) { \
437 spin_lock_irq(&blkg->stats_lock); \
Divyesh Shah84c124d2010-04-09 08:31:19 +0200438 cgroup_total += blkio_get_stat(blkg, cb, \
439 blkg->dev, type); \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700440 spin_unlock_irq(&blkg->stats_lock); \
441 } \
Vivek Goyal22084192009-12-03 12:59:49 -0500442 } \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700443 if (show_total) \
444 cb->fill(cb, "Total", cgroup_total); \
Vivek Goyal22084192009-12-03 12:59:49 -0500445 rcu_read_unlock(); \
446 cgroup_unlock(); \
447 return 0; \
448}
449
Divyesh Shah84c124d2010-04-09 08:31:19 +0200450SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
451SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
452SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
453SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
454SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
455SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
Divyesh Shah812d4022010-04-08 21:14:23 -0700456SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700457SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1);
Vivek Goyal22084192009-12-03 12:59:49 -0500458#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shah84c124d2010-04-09 08:31:19 +0200459SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
Divyesh Shahcdc11842010-04-08 21:15:10 -0700460SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0);
Vivek Goyal22084192009-12-03 12:59:49 -0500461#endif
462#undef SHOW_FUNCTION_PER_GROUP
463
464#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shah91952912010-04-01 15:01:41 -0700465void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Vivek Goyal22084192009-12-03 12:59:49 -0500466 unsigned long dequeue)
467{
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700468 blkg->stats.dequeue += dequeue;
Vivek Goyal22084192009-12-03 12:59:49 -0500469}
Divyesh Shah91952912010-04-01 15:01:41 -0700470EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
Vivek Goyal22084192009-12-03 12:59:49 -0500471#endif
472
Vivek Goyal31e4c282009-12-03 12:59:42 -0500473struct cftype blkio_files[] = {
474 {
475 .name = "weight",
476 .read_u64 = blkiocg_weight_read,
477 .write_u64 = blkiocg_weight_write,
478 },
Vivek Goyal22084192009-12-03 12:59:49 -0500479 {
480 .name = "time",
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700481 .read_map = blkiocg_time_read,
Vivek Goyal22084192009-12-03 12:59:49 -0500482 },
483 {
484 .name = "sectors",
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700485 .read_map = blkiocg_sectors_read,
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700486 },
487 {
488 .name = "io_service_bytes",
489 .read_map = blkiocg_io_service_bytes_read,
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700490 },
491 {
492 .name = "io_serviced",
493 .read_map = blkiocg_io_serviced_read,
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700494 },
495 {
496 .name = "io_service_time",
497 .read_map = blkiocg_io_service_time_read,
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700498 },
499 {
500 .name = "io_wait_time",
501 .read_map = blkiocg_io_wait_time_read,
Divyesh Shah84c124d2010-04-09 08:31:19 +0200502 },
503 {
Divyesh Shah812d4022010-04-08 21:14:23 -0700504 .name = "io_merged",
505 .read_map = blkiocg_io_merged_read,
506 },
507 {
Divyesh Shahcdc11842010-04-08 21:15:10 -0700508 .name = "io_queued",
509 .read_map = blkiocg_io_queued_read,
510 },
511 {
Divyesh Shah84c124d2010-04-09 08:31:19 +0200512 .name = "reset_stats",
513 .write_u64 = blkiocg_reset_stats,
Vivek Goyal22084192009-12-03 12:59:49 -0500514 },
515#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shahcdc11842010-04-08 21:15:10 -0700516 {
517 .name = "avg_queue_size",
518 .read_map = blkiocg_avg_queue_size_read,
519 },
520 {
Vivek Goyal22084192009-12-03 12:59:49 -0500521 .name = "dequeue",
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700522 .read_map = blkiocg_dequeue_read,
Divyesh Shahcdc11842010-04-08 21:15:10 -0700523 },
Vivek Goyal22084192009-12-03 12:59:49 -0500524#endif
Vivek Goyal31e4c282009-12-03 12:59:42 -0500525};
526
527static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
528{
529 return cgroup_add_files(cgroup, subsys, blkio_files,
530 ARRAY_SIZE(blkio_files));
531}
532
533static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
534{
535 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500536 unsigned long flags;
537 struct blkio_group *blkg;
538 void *key;
Vivek Goyal3e252062009-12-04 10:36:42 -0500539 struct blkio_policy_type *blkiop;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500540
Vivek Goyalb1c35762009-12-03 12:59:47 -0500541 rcu_read_lock();
542remove_entry:
543 spin_lock_irqsave(&blkcg->lock, flags);
544
545 if (hlist_empty(&blkcg->blkg_list)) {
546 spin_unlock_irqrestore(&blkcg->lock, flags);
547 goto done;
548 }
549
550 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
551 blkcg_node);
552 key = rcu_dereference(blkg->key);
553 __blkiocg_del_blkio_group(blkg);
554
555 spin_unlock_irqrestore(&blkcg->lock, flags);
556
557 /*
558 * This blkio_group is being unlinked as associated cgroup is going
559 * away. Let all the IO controlling policies know about this event.
560 *
561 * Currently this is static call to one io controlling policy. Once
562 * we have more policies in place, we need some dynamic registration
563 * of callback function.
564 */
Vivek Goyal3e252062009-12-04 10:36:42 -0500565 spin_lock(&blkio_list_lock);
566 list_for_each_entry(blkiop, &blkio_list, list)
567 blkiop->ops.blkio_unlink_group_fn(key, blkg);
568 spin_unlock(&blkio_list_lock);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500569 goto remove_entry;
570done:
Vivek Goyal31e4c282009-12-03 12:59:42 -0500571 free_css_id(&blkio_subsys, &blkcg->css);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500572 rcu_read_unlock();
Ben Blum67523c42010-03-10 15:22:11 -0800573 if (blkcg != &blkio_root_cgroup)
574 kfree(blkcg);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500575}
576
577static struct cgroup_subsys_state *
578blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
579{
580 struct blkio_cgroup *blkcg, *parent_blkcg;
581
582 if (!cgroup->parent) {
583 blkcg = &blkio_root_cgroup;
584 goto done;
585 }
586
587 /* Currently we do not support hierarchy deeper than two level (0,1) */
588 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
589 if (css_depth(&parent_blkcg->css) > 0)
590 return ERR_PTR(-EINVAL);
591
592 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
593 if (!blkcg)
594 return ERR_PTR(-ENOMEM);
595
596 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
597done:
598 spin_lock_init(&blkcg->lock);
599 INIT_HLIST_HEAD(&blkcg->blkg_list);
600
601 return &blkcg->css;
602}
603
604/*
605 * We cannot support shared io contexts, as we have no mean to support
606 * two tasks with the same ioc in two different groups without major rework
607 * of the main cic data structures. For now we allow a task to change
608 * its cgroup only if it's the only owner of its ioc.
609 */
610static int blkiocg_can_attach(struct cgroup_subsys *subsys,
611 struct cgroup *cgroup, struct task_struct *tsk,
612 bool threadgroup)
613{
614 struct io_context *ioc;
615 int ret = 0;
616
617 /* task_lock() is needed to avoid races with exit_io_context() */
618 task_lock(tsk);
619 ioc = tsk->io_context;
620 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
621 ret = -EINVAL;
622 task_unlock(tsk);
623
624 return ret;
625}
626
627static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
628 struct cgroup *prev, struct task_struct *tsk,
629 bool threadgroup)
630{
631 struct io_context *ioc;
632
633 task_lock(tsk);
634 ioc = tsk->io_context;
635 if (ioc)
636 ioc->cgroup_changed = 1;
637 task_unlock(tsk);
638}
639
Vivek Goyal3e252062009-12-04 10:36:42 -0500640void blkio_policy_register(struct blkio_policy_type *blkiop)
641{
642 spin_lock(&blkio_list_lock);
643 list_add_tail(&blkiop->list, &blkio_list);
644 spin_unlock(&blkio_list_lock);
645}
646EXPORT_SYMBOL_GPL(blkio_policy_register);
647
648void blkio_policy_unregister(struct blkio_policy_type *blkiop)
649{
650 spin_lock(&blkio_list_lock);
651 list_del_init(&blkiop->list);
652 spin_unlock(&blkio_list_lock);
653}
654EXPORT_SYMBOL_GPL(blkio_policy_unregister);
Ben Blum67523c42010-03-10 15:22:11 -0800655
656static int __init init_cgroup_blkio(void)
657{
658 return cgroup_load_subsys(&blkio_subsys);
659}
660
661static void __exit exit_cgroup_blkio(void)
662{
663 cgroup_unload_subsys(&blkio_subsys);
664}
665
666module_init(init_cgroup_blkio);
667module_exit(exit_cgroup_blkio);
668MODULE_LICENSE("GPL");