blob: d23b538858cedba5176f477e61e8d7cc6ef2827b [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
Vivek Goyal22084192009-12-03 12:59:49 -050014#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
Vivek Goyal9d6a9862009-12-04 10:36:41 -050016#include <linux/module.h>
Stephen Rothwellaccee782009-12-07 19:29:39 +110017#include <linux/err.h>
Divyesh Shah91952912010-04-01 15:01:41 -070018#include <linux/blkdev.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050019#include "blk-cgroup.h"
Vivek Goyal3e252062009-12-04 10:36:42 -050020
Divyesh Shah84c124d2010-04-09 08:31:19 +020021#define MAX_KEY_LEN 100
22
Vivek Goyal3e252062009-12-04 10:36:42 -050023static DEFINE_SPINLOCK(blkio_list_lock);
24static LIST_HEAD(blkio_list);
Vivek Goyalb1c35762009-12-03 12:59:47 -050025
Vivek Goyal31e4c282009-12-03 12:59:42 -050026struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
Vivek Goyal9d6a9862009-12-04 10:36:41 -050027EXPORT_SYMBOL_GPL(blkio_root_cgroup);
28
Ben Blum67523c42010-03-10 15:22:11 -080029static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
30 struct cgroup *);
31static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
32 struct task_struct *, bool);
33static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
34 struct cgroup *, struct task_struct *, bool);
35static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
36static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
37
38struct cgroup_subsys blkio_subsys = {
39 .name = "blkio",
40 .create = blkiocg_create,
41 .can_attach = blkiocg_can_attach,
42 .attach = blkiocg_attach,
43 .destroy = blkiocg_destroy,
44 .populate = blkiocg_populate,
45#ifdef CONFIG_BLK_CGROUP
46 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
47 .subsys_id = blkio_subsys_id,
48#endif
49 .use_id = 1,
50 .module = THIS_MODULE,
51};
52EXPORT_SYMBOL_GPL(blkio_subsys);
53
Vivek Goyal31e4c282009-12-03 12:59:42 -050054struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
55{
56 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
57 struct blkio_cgroup, css);
58}
Vivek Goyal9d6a9862009-12-04 10:36:41 -050059EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
Vivek Goyal31e4c282009-12-03 12:59:42 -050060
Divyesh Shah84c124d2010-04-09 08:31:19 +020061void blkio_group_init(struct blkio_group *blkg)
62{
63 spin_lock_init(&blkg->stats_lock);
64}
65EXPORT_SYMBOL_GPL(blkio_group_init);
66
Divyesh Shah91952912010-04-01 15:01:41 -070067/*
68 * Add to the appropriate stat variable depending on the request type.
69 * This should be called with the blkg->stats_lock held.
70 */
Divyesh Shah84c124d2010-04-09 08:31:19 +020071static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
72 bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -070073{
Divyesh Shah84c124d2010-04-09 08:31:19 +020074 if (direction)
75 stat[BLKIO_STAT_WRITE] += add;
Divyesh Shah91952912010-04-01 15:01:41 -070076 else
Divyesh Shah84c124d2010-04-09 08:31:19 +020077 stat[BLKIO_STAT_READ] += add;
78 if (sync)
79 stat[BLKIO_STAT_SYNC] += add;
Divyesh Shah91952912010-04-01 15:01:41 -070080 else
Divyesh Shah84c124d2010-04-09 08:31:19 +020081 stat[BLKIO_STAT_ASYNC] += add;
Divyesh Shah91952912010-04-01 15:01:41 -070082}
83
Divyesh Shah303a3ac2010-04-01 15:01:24 -070084void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time)
Vivek Goyal22084192009-12-03 12:59:49 -050085{
Divyesh Shah303a3ac2010-04-01 15:01:24 -070086 unsigned long flags;
87
88 spin_lock_irqsave(&blkg->stats_lock, flags);
89 blkg->stats.time += time;
90 spin_unlock_irqrestore(&blkg->stats_lock, flags);
Vivek Goyal22084192009-12-03 12:59:49 -050091}
Divyesh Shah303a3ac2010-04-01 15:01:24 -070092EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
Vivek Goyal22084192009-12-03 12:59:49 -050093
Divyesh Shah84c124d2010-04-09 08:31:19 +020094void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
95 uint64_t bytes, bool direction, bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -070096{
97 struct blkio_group_stats *stats;
98 unsigned long flags;
99
100 spin_lock_irqsave(&blkg->stats_lock, flags);
101 stats = &blkg->stats;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200102 stats->sectors += bytes >> 9;
103 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction,
104 sync);
105 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes,
106 direction, sync);
Divyesh Shah91952912010-04-01 15:01:41 -0700107 spin_unlock_irqrestore(&blkg->stats_lock, flags);
108}
Divyesh Shah84c124d2010-04-09 08:31:19 +0200109EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
Divyesh Shah91952912010-04-01 15:01:41 -0700110
Divyesh Shah84c124d2010-04-09 08:31:19 +0200111void blkiocg_update_completion_stats(struct blkio_group *blkg,
112 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
Divyesh Shah91952912010-04-01 15:01:41 -0700113{
114 struct blkio_group_stats *stats;
115 unsigned long flags;
116 unsigned long long now = sched_clock();
117
118 spin_lock_irqsave(&blkg->stats_lock, flags);
119 stats = &blkg->stats;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200120 if (time_after64(now, io_start_time))
121 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
122 now - io_start_time, direction, sync);
123 if (time_after64(io_start_time, start_time))
124 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
125 io_start_time - start_time, direction, sync);
Divyesh Shah91952912010-04-01 15:01:41 -0700126 spin_unlock_irqrestore(&blkg->stats_lock, flags);
127}
Divyesh Shah84c124d2010-04-09 08:31:19 +0200128EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
Divyesh Shah91952912010-04-01 15:01:41 -0700129
Divyesh Shah812d4022010-04-08 21:14:23 -0700130void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
131 bool sync)
132{
133 unsigned long flags;
134
135 spin_lock_irqsave(&blkg->stats_lock, flags);
136 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction,
137 sync);
138 spin_unlock_irqrestore(&blkg->stats_lock, flags);
139}
140EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
141
Vivek Goyal31e4c282009-12-03 12:59:42 -0500142void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
Vivek Goyal22084192009-12-03 12:59:49 -0500143 struct blkio_group *blkg, void *key, dev_t dev)
Vivek Goyal31e4c282009-12-03 12:59:42 -0500144{
145 unsigned long flags;
146
147 spin_lock_irqsave(&blkcg->lock, flags);
148 rcu_assign_pointer(blkg->key, key);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500149 blkg->blkcg_id = css_id(&blkcg->css);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500150 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
151 spin_unlock_irqrestore(&blkcg->lock, flags);
Vivek Goyal2868ef72009-12-03 12:59:48 -0500152#ifdef CONFIG_DEBUG_BLK_CGROUP
153 /* Need to take css reference ? */
154 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
155#endif
Vivek Goyal22084192009-12-03 12:59:49 -0500156 blkg->dev = dev;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500157}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500158EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500159
Vivek Goyalb1c35762009-12-03 12:59:47 -0500160static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
161{
162 hlist_del_init_rcu(&blkg->blkcg_node);
163 blkg->blkcg_id = 0;
164}
165
166/*
167 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
168 * indicating that blk_group was unhashed by the time we got to it.
169 */
Vivek Goyal31e4c282009-12-03 12:59:42 -0500170int blkiocg_del_blkio_group(struct blkio_group *blkg)
171{
Vivek Goyalb1c35762009-12-03 12:59:47 -0500172 struct blkio_cgroup *blkcg;
173 unsigned long flags;
174 struct cgroup_subsys_state *css;
175 int ret = 1;
176
177 rcu_read_lock();
178 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
179 if (!css)
180 goto out;
181
182 blkcg = container_of(css, struct blkio_cgroup, css);
183 spin_lock_irqsave(&blkcg->lock, flags);
184 if (!hlist_unhashed(&blkg->blkcg_node)) {
185 __blkiocg_del_blkio_group(blkg);
186 ret = 0;
187 }
188 spin_unlock_irqrestore(&blkcg->lock, flags);
189out:
190 rcu_read_unlock();
191 return ret;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500192}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500193EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500194
195/* called under rcu_read_lock(). */
196struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
197{
198 struct blkio_group *blkg;
199 struct hlist_node *n;
200 void *__key;
201
202 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
203 __key = blkg->key;
204 if (__key == key)
205 return blkg;
206 }
207
208 return NULL;
209}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500210EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500211
212#define SHOW_FUNCTION(__VAR) \
213static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
214 struct cftype *cftype) \
215{ \
216 struct blkio_cgroup *blkcg; \
217 \
218 blkcg = cgroup_to_blkio_cgroup(cgroup); \
219 return (u64)blkcg->__VAR; \
220}
221
222SHOW_FUNCTION(weight);
223#undef SHOW_FUNCTION
224
225static int
226blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
227{
228 struct blkio_cgroup *blkcg;
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500229 struct blkio_group *blkg;
230 struct hlist_node *n;
Vivek Goyal3e252062009-12-04 10:36:42 -0500231 struct blkio_policy_type *blkiop;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500232
233 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
234 return -EINVAL;
235
236 blkcg = cgroup_to_blkio_cgroup(cgroup);
Gui Jianfengbcf4dd42010-02-01 09:58:54 +0100237 spin_lock(&blkio_list_lock);
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500238 spin_lock_irq(&blkcg->lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500239 blkcg->weight = (unsigned int)val;
Vivek Goyal3e252062009-12-04 10:36:42 -0500240 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
Vivek Goyal3e252062009-12-04 10:36:42 -0500241 list_for_each_entry(blkiop, &blkio_list, list)
242 blkiop->ops.blkio_update_group_weight_fn(blkg,
243 blkcg->weight);
Vivek Goyal3e252062009-12-04 10:36:42 -0500244 }
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500245 spin_unlock_irq(&blkcg->lock);
Gui Jianfengbcf4dd42010-02-01 09:58:54 +0100246 spin_unlock(&blkio_list_lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500247 return 0;
248}
249
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700250static int
Divyesh Shah84c124d2010-04-09 08:31:19 +0200251blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700252{
253 struct blkio_cgroup *blkcg;
254 struct blkio_group *blkg;
255 struct hlist_node *n;
256 struct blkio_group_stats *stats;
257
258 blkcg = cgroup_to_blkio_cgroup(cgroup);
259 spin_lock_irq(&blkcg->lock);
260 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
261 spin_lock(&blkg->stats_lock);
262 stats = &blkg->stats;
263 memset(stats, 0, sizeof(struct blkio_group_stats));
264 spin_unlock(&blkg->stats_lock);
265 }
266 spin_unlock_irq(&blkcg->lock);
267 return 0;
268}
269
Divyesh Shah84c124d2010-04-09 08:31:19 +0200270static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
271 int chars_left, bool diskname_only)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700272{
Divyesh Shah84c124d2010-04-09 08:31:19 +0200273 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700274 chars_left -= strlen(str);
275 if (chars_left <= 0) {
276 printk(KERN_WARNING
277 "Possibly incorrect cgroup stat display format");
278 return;
279 }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200280 if (diskname_only)
281 return;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700282 switch (type) {
Divyesh Shah84c124d2010-04-09 08:31:19 +0200283 case BLKIO_STAT_READ:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700284 strlcat(str, " Read", chars_left);
285 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200286 case BLKIO_STAT_WRITE:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700287 strlcat(str, " Write", chars_left);
288 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200289 case BLKIO_STAT_SYNC:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700290 strlcat(str, " Sync", chars_left);
291 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200292 case BLKIO_STAT_ASYNC:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700293 strlcat(str, " Async", chars_left);
294 break;
Divyesh Shah84c124d2010-04-09 08:31:19 +0200295 case BLKIO_STAT_TOTAL:
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700296 strlcat(str, " Total", chars_left);
297 break;
298 default:
299 strlcat(str, " Invalid", chars_left);
300 }
301}
302
Divyesh Shah84c124d2010-04-09 08:31:19 +0200303static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
304 struct cgroup_map_cb *cb, dev_t dev)
305{
306 blkio_get_key_name(0, dev, str, chars_left, true);
307 cb->fill(cb, str, val);
308 return val;
309}
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700310
Divyesh Shah84c124d2010-04-09 08:31:19 +0200311/* This should be called with blkg->stats_lock held */
312static uint64_t blkio_get_stat(struct blkio_group *blkg,
313 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700314{
315 uint64_t disk_total;
316 char key_str[MAX_KEY_LEN];
Divyesh Shah84c124d2010-04-09 08:31:19 +0200317 enum stat_sub_type sub_type;
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700318
Divyesh Shah84c124d2010-04-09 08:31:19 +0200319 if (type == BLKIO_STAT_TIME)
320 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
321 blkg->stats.time, cb, dev);
322 if (type == BLKIO_STAT_SECTORS)
323 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
324 blkg->stats.sectors, cb, dev);
325#ifdef CONFIG_DEBUG_BLK_CGROUP
326 if (type == BLKIO_STAT_DEQUEUE)
327 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
328 blkg->stats.dequeue, cb, dev);
329#endif
330
331 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
332 sub_type++) {
333 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
334 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700335 }
Divyesh Shah84c124d2010-04-09 08:31:19 +0200336 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
337 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
338 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700339 cb->fill(cb, key_str, disk_total);
340 return disk_total;
341}
342
Divyesh Shah84c124d2010-04-09 08:31:19 +0200343#define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \
Vivek Goyal22084192009-12-03 12:59:49 -0500344static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700345 struct cftype *cftype, struct cgroup_map_cb *cb) \
Vivek Goyal22084192009-12-03 12:59:49 -0500346{ \
347 struct blkio_cgroup *blkcg; \
348 struct blkio_group *blkg; \
349 struct hlist_node *n; \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700350 uint64_t cgroup_total = 0; \
Vivek Goyal22084192009-12-03 12:59:49 -0500351 \
352 if (!cgroup_lock_live_group(cgroup)) \
353 return -ENODEV; \
354 \
355 blkcg = cgroup_to_blkio_cgroup(cgroup); \
356 rcu_read_lock(); \
357 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700358 if (blkg->dev) { \
359 spin_lock_irq(&blkg->stats_lock); \
Divyesh Shah84c124d2010-04-09 08:31:19 +0200360 cgroup_total += blkio_get_stat(blkg, cb, \
361 blkg->dev, type); \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700362 spin_unlock_irq(&blkg->stats_lock); \
363 } \
Vivek Goyal22084192009-12-03 12:59:49 -0500364 } \
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700365 if (show_total) \
366 cb->fill(cb, "Total", cgroup_total); \
Vivek Goyal22084192009-12-03 12:59:49 -0500367 rcu_read_unlock(); \
368 cgroup_unlock(); \
369 return 0; \
370}
371
Divyesh Shah84c124d2010-04-09 08:31:19 +0200372SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0);
373SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0);
374SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1);
375SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1);
376SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1);
377SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1);
Divyesh Shah812d4022010-04-08 21:14:23 -0700378SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1);
Vivek Goyal22084192009-12-03 12:59:49 -0500379#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shah84c124d2010-04-09 08:31:19 +0200380SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0);
Vivek Goyal22084192009-12-03 12:59:49 -0500381#endif
382#undef SHOW_FUNCTION_PER_GROUP
383
384#ifdef CONFIG_DEBUG_BLK_CGROUP
Divyesh Shah91952912010-04-01 15:01:41 -0700385void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
Vivek Goyal22084192009-12-03 12:59:49 -0500386 unsigned long dequeue)
387{
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700388 blkg->stats.dequeue += dequeue;
Vivek Goyal22084192009-12-03 12:59:49 -0500389}
Divyesh Shah91952912010-04-01 15:01:41 -0700390EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
Vivek Goyal22084192009-12-03 12:59:49 -0500391#endif
392
Vivek Goyal31e4c282009-12-03 12:59:42 -0500393struct cftype blkio_files[] = {
394 {
395 .name = "weight",
396 .read_u64 = blkiocg_weight_read,
397 .write_u64 = blkiocg_weight_write,
398 },
Vivek Goyal22084192009-12-03 12:59:49 -0500399 {
400 .name = "time",
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700401 .read_map = blkiocg_time_read,
Vivek Goyal22084192009-12-03 12:59:49 -0500402 },
403 {
404 .name = "sectors",
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700405 .read_map = blkiocg_sectors_read,
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700406 },
407 {
408 .name = "io_service_bytes",
409 .read_map = blkiocg_io_service_bytes_read,
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700410 },
411 {
412 .name = "io_serviced",
413 .read_map = blkiocg_io_serviced_read,
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700414 },
415 {
416 .name = "io_service_time",
417 .read_map = blkiocg_io_service_time_read,
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700418 },
419 {
420 .name = "io_wait_time",
421 .read_map = blkiocg_io_wait_time_read,
Divyesh Shah84c124d2010-04-09 08:31:19 +0200422 },
423 {
Divyesh Shah812d4022010-04-08 21:14:23 -0700424 .name = "io_merged",
425 .read_map = blkiocg_io_merged_read,
426 },
427 {
Divyesh Shah84c124d2010-04-09 08:31:19 +0200428 .name = "reset_stats",
429 .write_u64 = blkiocg_reset_stats,
Vivek Goyal22084192009-12-03 12:59:49 -0500430 },
431#ifdef CONFIG_DEBUG_BLK_CGROUP
432 {
433 .name = "dequeue",
Divyesh Shah303a3ac2010-04-01 15:01:24 -0700434 .read_map = blkiocg_dequeue_read,
Vivek Goyal22084192009-12-03 12:59:49 -0500435 },
436#endif
Vivek Goyal31e4c282009-12-03 12:59:42 -0500437};
438
439static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
440{
441 return cgroup_add_files(cgroup, subsys, blkio_files,
442 ARRAY_SIZE(blkio_files));
443}
444
445static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
446{
447 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500448 unsigned long flags;
449 struct blkio_group *blkg;
450 void *key;
Vivek Goyal3e252062009-12-04 10:36:42 -0500451 struct blkio_policy_type *blkiop;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500452
Vivek Goyalb1c35762009-12-03 12:59:47 -0500453 rcu_read_lock();
454remove_entry:
455 spin_lock_irqsave(&blkcg->lock, flags);
456
457 if (hlist_empty(&blkcg->blkg_list)) {
458 spin_unlock_irqrestore(&blkcg->lock, flags);
459 goto done;
460 }
461
462 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
463 blkcg_node);
464 key = rcu_dereference(blkg->key);
465 __blkiocg_del_blkio_group(blkg);
466
467 spin_unlock_irqrestore(&blkcg->lock, flags);
468
469 /*
470 * This blkio_group is being unlinked as associated cgroup is going
471 * away. Let all the IO controlling policies know about this event.
472 *
473 * Currently this is static call to one io controlling policy. Once
474 * we have more policies in place, we need some dynamic registration
475 * of callback function.
476 */
Vivek Goyal3e252062009-12-04 10:36:42 -0500477 spin_lock(&blkio_list_lock);
478 list_for_each_entry(blkiop, &blkio_list, list)
479 blkiop->ops.blkio_unlink_group_fn(key, blkg);
480 spin_unlock(&blkio_list_lock);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500481 goto remove_entry;
482done:
Vivek Goyal31e4c282009-12-03 12:59:42 -0500483 free_css_id(&blkio_subsys, &blkcg->css);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500484 rcu_read_unlock();
Ben Blum67523c42010-03-10 15:22:11 -0800485 if (blkcg != &blkio_root_cgroup)
486 kfree(blkcg);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500487}
488
489static struct cgroup_subsys_state *
490blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
491{
492 struct blkio_cgroup *blkcg, *parent_blkcg;
493
494 if (!cgroup->parent) {
495 blkcg = &blkio_root_cgroup;
496 goto done;
497 }
498
499 /* Currently we do not support hierarchy deeper than two level (0,1) */
500 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
501 if (css_depth(&parent_blkcg->css) > 0)
502 return ERR_PTR(-EINVAL);
503
504 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
505 if (!blkcg)
506 return ERR_PTR(-ENOMEM);
507
508 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
509done:
510 spin_lock_init(&blkcg->lock);
511 INIT_HLIST_HEAD(&blkcg->blkg_list);
512
513 return &blkcg->css;
514}
515
516/*
517 * We cannot support shared io contexts, as we have no mean to support
518 * two tasks with the same ioc in two different groups without major rework
519 * of the main cic data structures. For now we allow a task to change
520 * its cgroup only if it's the only owner of its ioc.
521 */
522static int blkiocg_can_attach(struct cgroup_subsys *subsys,
523 struct cgroup *cgroup, struct task_struct *tsk,
524 bool threadgroup)
525{
526 struct io_context *ioc;
527 int ret = 0;
528
529 /* task_lock() is needed to avoid races with exit_io_context() */
530 task_lock(tsk);
531 ioc = tsk->io_context;
532 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
533 ret = -EINVAL;
534 task_unlock(tsk);
535
536 return ret;
537}
538
539static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
540 struct cgroup *prev, struct task_struct *tsk,
541 bool threadgroup)
542{
543 struct io_context *ioc;
544
545 task_lock(tsk);
546 ioc = tsk->io_context;
547 if (ioc)
548 ioc->cgroup_changed = 1;
549 task_unlock(tsk);
550}
551
Vivek Goyal3e252062009-12-04 10:36:42 -0500552void blkio_policy_register(struct blkio_policy_type *blkiop)
553{
554 spin_lock(&blkio_list_lock);
555 list_add_tail(&blkiop->list, &blkio_list);
556 spin_unlock(&blkio_list_lock);
557}
558EXPORT_SYMBOL_GPL(blkio_policy_register);
559
560void blkio_policy_unregister(struct blkio_policy_type *blkiop)
561{
562 spin_lock(&blkio_list_lock);
563 list_del_init(&blkiop->list);
564 spin_unlock(&blkio_list_lock);
565}
566EXPORT_SYMBOL_GPL(blkio_policy_unregister);
Ben Blum67523c42010-03-10 15:22:11 -0800567
568static int __init init_cgroup_blkio(void)
569{
570 return cgroup_load_subsys(&blkio_subsys);
571}
572
573static void __exit exit_cgroup_blkio(void)
574{
575 cgroup_unload_subsys(&blkio_subsys);
576}
577
578module_init(init_cgroup_blkio);
579module_exit(exit_cgroup_blkio);
580MODULE_LICENSE("GPL");