blob: e7dbbaf5fb3ee58698b1692508b991da8d38accd [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
Vivek Goyal22084192009-12-03 12:59:49 -050014#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
Vivek Goyal9d6a9862009-12-04 10:36:41 -050016#include <linux/module.h>
Stephen Rothwellaccee782009-12-07 19:29:39 +110017#include <linux/err.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050018#include "blk-cgroup.h"
Vivek Goyal3e252062009-12-04 10:36:42 -050019
20static DEFINE_SPINLOCK(blkio_list_lock);
21static LIST_HEAD(blkio_list);
Vivek Goyalb1c35762009-12-03 12:59:47 -050022
Vivek Goyal31e4c282009-12-03 12:59:42 -050023struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
Vivek Goyal9d6a9862009-12-04 10:36:41 -050024EXPORT_SYMBOL_GPL(blkio_root_cgroup);
25
26bool blkiocg_css_tryget(struct blkio_cgroup *blkcg)
27{
28 if (!css_tryget(&blkcg->css))
29 return false;
30 return true;
31}
32EXPORT_SYMBOL_GPL(blkiocg_css_tryget);
33
34void blkiocg_css_put(struct blkio_cgroup *blkcg)
35{
36 css_put(&blkcg->css);
37}
38EXPORT_SYMBOL_GPL(blkiocg_css_put);
Vivek Goyal31e4c282009-12-03 12:59:42 -050039
40struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
41{
42 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
43 struct blkio_cgroup, css);
44}
Vivek Goyal9d6a9862009-12-04 10:36:41 -050045EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
Vivek Goyal31e4c282009-12-03 12:59:42 -050046
Vivek Goyal22084192009-12-03 12:59:49 -050047void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
48 unsigned long time, unsigned long sectors)
49{
50 blkg->time += time;
51 blkg->sectors += sectors;
52}
Vivek Goyal9d6a9862009-12-04 10:36:41 -050053EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats);
Vivek Goyal22084192009-12-03 12:59:49 -050054
Vivek Goyal31e4c282009-12-03 12:59:42 -050055void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
Vivek Goyal22084192009-12-03 12:59:49 -050056 struct blkio_group *blkg, void *key, dev_t dev)
Vivek Goyal31e4c282009-12-03 12:59:42 -050057{
58 unsigned long flags;
59
60 spin_lock_irqsave(&blkcg->lock, flags);
61 rcu_assign_pointer(blkg->key, key);
Vivek Goyalb1c35762009-12-03 12:59:47 -050062 blkg->blkcg_id = css_id(&blkcg->css);
Vivek Goyal31e4c282009-12-03 12:59:42 -050063 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
64 spin_unlock_irqrestore(&blkcg->lock, flags);
Vivek Goyal2868ef72009-12-03 12:59:48 -050065#ifdef CONFIG_DEBUG_BLK_CGROUP
66 /* Need to take css reference ? */
67 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
68#endif
Vivek Goyal22084192009-12-03 12:59:49 -050069 blkg->dev = dev;
Vivek Goyal31e4c282009-12-03 12:59:42 -050070}
Vivek Goyal9d6a9862009-12-04 10:36:41 -050071EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -050072
Vivek Goyalb1c35762009-12-03 12:59:47 -050073static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
74{
75 hlist_del_init_rcu(&blkg->blkcg_node);
76 blkg->blkcg_id = 0;
77}
78
79/*
80 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
81 * indicating that blk_group was unhashed by the time we got to it.
82 */
Vivek Goyal31e4c282009-12-03 12:59:42 -050083int blkiocg_del_blkio_group(struct blkio_group *blkg)
84{
Vivek Goyalb1c35762009-12-03 12:59:47 -050085 struct blkio_cgroup *blkcg;
86 unsigned long flags;
87 struct cgroup_subsys_state *css;
88 int ret = 1;
89
90 rcu_read_lock();
91 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
92 if (!css)
93 goto out;
94
95 blkcg = container_of(css, struct blkio_cgroup, css);
96 spin_lock_irqsave(&blkcg->lock, flags);
97 if (!hlist_unhashed(&blkg->blkcg_node)) {
98 __blkiocg_del_blkio_group(blkg);
99 ret = 0;
100 }
101 spin_unlock_irqrestore(&blkcg->lock, flags);
102out:
103 rcu_read_unlock();
104 return ret;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500105}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500106EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500107
108/* called under rcu_read_lock(). */
109struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
110{
111 struct blkio_group *blkg;
112 struct hlist_node *n;
113 void *__key;
114
115 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
116 __key = blkg->key;
117 if (__key == key)
118 return blkg;
119 }
120
121 return NULL;
122}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500123EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500124
125#define SHOW_FUNCTION(__VAR) \
126static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
127 struct cftype *cftype) \
128{ \
129 struct blkio_cgroup *blkcg; \
130 \
131 blkcg = cgroup_to_blkio_cgroup(cgroup); \
132 return (u64)blkcg->__VAR; \
133}
134
135SHOW_FUNCTION(weight);
136#undef SHOW_FUNCTION
137
138static int
139blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
140{
141 struct blkio_cgroup *blkcg;
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500142 struct blkio_group *blkg;
143 struct hlist_node *n;
Vivek Goyal3e252062009-12-04 10:36:42 -0500144 struct blkio_policy_type *blkiop;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500145
146 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
147 return -EINVAL;
148
149 blkcg = cgroup_to_blkio_cgroup(cgroup);
Gui Jianfengbcf4dd42010-02-01 09:58:54 +0100150 spin_lock(&blkio_list_lock);
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500151 spin_lock_irq(&blkcg->lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500152 blkcg->weight = (unsigned int)val;
Vivek Goyal3e252062009-12-04 10:36:42 -0500153 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
Vivek Goyal3e252062009-12-04 10:36:42 -0500154 list_for_each_entry(blkiop, &blkio_list, list)
155 blkiop->ops.blkio_update_group_weight_fn(blkg,
156 blkcg->weight);
Vivek Goyal3e252062009-12-04 10:36:42 -0500157 }
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500158 spin_unlock_irq(&blkcg->lock);
Gui Jianfengbcf4dd42010-02-01 09:58:54 +0100159 spin_unlock(&blkio_list_lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500160 return 0;
161}
162
Vivek Goyal22084192009-12-03 12:59:49 -0500163#define SHOW_FUNCTION_PER_GROUP(__VAR) \
164static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
165 struct cftype *cftype, struct seq_file *m) \
166{ \
167 struct blkio_cgroup *blkcg; \
168 struct blkio_group *blkg; \
169 struct hlist_node *n; \
170 \
171 if (!cgroup_lock_live_group(cgroup)) \
172 return -ENODEV; \
173 \
174 blkcg = cgroup_to_blkio_cgroup(cgroup); \
175 rcu_read_lock(); \
176 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
177 if (blkg->dev) \
178 seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev), \
179 MINOR(blkg->dev), blkg->__VAR); \
180 } \
181 rcu_read_unlock(); \
182 cgroup_unlock(); \
183 return 0; \
184}
185
186SHOW_FUNCTION_PER_GROUP(time);
187SHOW_FUNCTION_PER_GROUP(sectors);
188#ifdef CONFIG_DEBUG_BLK_CGROUP
189SHOW_FUNCTION_PER_GROUP(dequeue);
190#endif
191#undef SHOW_FUNCTION_PER_GROUP
192
193#ifdef CONFIG_DEBUG_BLK_CGROUP
194void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
195 unsigned long dequeue)
196{
197 blkg->dequeue += dequeue;
198}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500199EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats);
Vivek Goyal22084192009-12-03 12:59:49 -0500200#endif
201
Vivek Goyal31e4c282009-12-03 12:59:42 -0500202struct cftype blkio_files[] = {
203 {
204 .name = "weight",
205 .read_u64 = blkiocg_weight_read,
206 .write_u64 = blkiocg_weight_write,
207 },
Vivek Goyal22084192009-12-03 12:59:49 -0500208 {
209 .name = "time",
210 .read_seq_string = blkiocg_time_read,
211 },
212 {
213 .name = "sectors",
214 .read_seq_string = blkiocg_sectors_read,
215 },
216#ifdef CONFIG_DEBUG_BLK_CGROUP
217 {
218 .name = "dequeue",
219 .read_seq_string = blkiocg_dequeue_read,
220 },
221#endif
Vivek Goyal31e4c282009-12-03 12:59:42 -0500222};
223
224static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
225{
226 return cgroup_add_files(cgroup, subsys, blkio_files,
227 ARRAY_SIZE(blkio_files));
228}
229
230static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
231{
232 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500233 unsigned long flags;
234 struct blkio_group *blkg;
235 void *key;
Vivek Goyal3e252062009-12-04 10:36:42 -0500236 struct blkio_policy_type *blkiop;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500237
Vivek Goyalb1c35762009-12-03 12:59:47 -0500238 rcu_read_lock();
239remove_entry:
240 spin_lock_irqsave(&blkcg->lock, flags);
241
242 if (hlist_empty(&blkcg->blkg_list)) {
243 spin_unlock_irqrestore(&blkcg->lock, flags);
244 goto done;
245 }
246
247 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
248 blkcg_node);
249 key = rcu_dereference(blkg->key);
250 __blkiocg_del_blkio_group(blkg);
251
252 spin_unlock_irqrestore(&blkcg->lock, flags);
253
254 /*
255 * This blkio_group is being unlinked as associated cgroup is going
256 * away. Let all the IO controlling policies know about this event.
257 *
258 * Currently this is static call to one io controlling policy. Once
259 * we have more policies in place, we need some dynamic registration
260 * of callback function.
261 */
Vivek Goyal3e252062009-12-04 10:36:42 -0500262 spin_lock(&blkio_list_lock);
263 list_for_each_entry(blkiop, &blkio_list, list)
264 blkiop->ops.blkio_unlink_group_fn(key, blkg);
265 spin_unlock(&blkio_list_lock);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500266 goto remove_entry;
267done:
Vivek Goyal31e4c282009-12-03 12:59:42 -0500268 free_css_id(&blkio_subsys, &blkcg->css);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500269 rcu_read_unlock();
Vivek Goyal31e4c282009-12-03 12:59:42 -0500270 kfree(blkcg);
271}
272
273static struct cgroup_subsys_state *
274blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
275{
276 struct blkio_cgroup *blkcg, *parent_blkcg;
277
278 if (!cgroup->parent) {
279 blkcg = &blkio_root_cgroup;
280 goto done;
281 }
282
283 /* Currently we do not support hierarchy deeper than two level (0,1) */
284 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
285 if (css_depth(&parent_blkcg->css) > 0)
286 return ERR_PTR(-EINVAL);
287
288 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
289 if (!blkcg)
290 return ERR_PTR(-ENOMEM);
291
292 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
293done:
294 spin_lock_init(&blkcg->lock);
295 INIT_HLIST_HEAD(&blkcg->blkg_list);
296
297 return &blkcg->css;
298}
299
300/*
301 * We cannot support shared io contexts, as we have no mean to support
302 * two tasks with the same ioc in two different groups without major rework
303 * of the main cic data structures. For now we allow a task to change
304 * its cgroup only if it's the only owner of its ioc.
305 */
306static int blkiocg_can_attach(struct cgroup_subsys *subsys,
307 struct cgroup *cgroup, struct task_struct *tsk,
308 bool threadgroup)
309{
310 struct io_context *ioc;
311 int ret = 0;
312
313 /* task_lock() is needed to avoid races with exit_io_context() */
314 task_lock(tsk);
315 ioc = tsk->io_context;
316 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
317 ret = -EINVAL;
318 task_unlock(tsk);
319
320 return ret;
321}
322
323static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
324 struct cgroup *prev, struct task_struct *tsk,
325 bool threadgroup)
326{
327 struct io_context *ioc;
328
329 task_lock(tsk);
330 ioc = tsk->io_context;
331 if (ioc)
332 ioc->cgroup_changed = 1;
333 task_unlock(tsk);
334}
335
336struct cgroup_subsys blkio_subsys = {
337 .name = "blkio",
338 .create = blkiocg_create,
339 .can_attach = blkiocg_can_attach,
340 .attach = blkiocg_attach,
341 .destroy = blkiocg_destroy,
342 .populate = blkiocg_populate,
343 .subsys_id = blkio_subsys_id,
344 .use_id = 1,
345};
Vivek Goyal3e252062009-12-04 10:36:42 -0500346
347void blkio_policy_register(struct blkio_policy_type *blkiop)
348{
349 spin_lock(&blkio_list_lock);
350 list_add_tail(&blkiop->list, &blkio_list);
351 spin_unlock(&blkio_list_lock);
352}
353EXPORT_SYMBOL_GPL(blkio_policy_register);
354
355void blkio_policy_unregister(struct blkio_policy_type *blkiop)
356{
357 spin_lock(&blkio_list_lock);
358 list_del_init(&blkiop->list);
359 spin_unlock(&blkio_list_lock);
360}
361EXPORT_SYMBOL_GPL(blkio_policy_unregister);