blob: 3ad497f4eed6ab5c2a71080aa1bd4ed937185616 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
Vivek Goyal22084192009-12-03 12:59:49 -050014#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
Vivek Goyal9d6a9862009-12-04 10:36:41 -050016#include <linux/module.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050017#include "blk-cgroup.h"
Vivek Goyal3e252062009-12-04 10:36:42 -050018
19static DEFINE_SPINLOCK(blkio_list_lock);
20static LIST_HEAD(blkio_list);
Vivek Goyalb1c35762009-12-03 12:59:47 -050021
Vivek Goyal31e4c282009-12-03 12:59:42 -050022struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
Vivek Goyal9d6a9862009-12-04 10:36:41 -050023EXPORT_SYMBOL_GPL(blkio_root_cgroup);
24
25bool blkiocg_css_tryget(struct blkio_cgroup *blkcg)
26{
27 if (!css_tryget(&blkcg->css))
28 return false;
29 return true;
30}
31EXPORT_SYMBOL_GPL(blkiocg_css_tryget);
32
33void blkiocg_css_put(struct blkio_cgroup *blkcg)
34{
35 css_put(&blkcg->css);
36}
37EXPORT_SYMBOL_GPL(blkiocg_css_put);
Vivek Goyal31e4c282009-12-03 12:59:42 -050038
39struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
40{
41 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
42 struct blkio_cgroup, css);
43}
Vivek Goyal9d6a9862009-12-04 10:36:41 -050044EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
Vivek Goyal31e4c282009-12-03 12:59:42 -050045
Vivek Goyal22084192009-12-03 12:59:49 -050046void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
47 unsigned long time, unsigned long sectors)
48{
49 blkg->time += time;
50 blkg->sectors += sectors;
51}
Vivek Goyal9d6a9862009-12-04 10:36:41 -050052EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats);
Vivek Goyal22084192009-12-03 12:59:49 -050053
Vivek Goyal31e4c282009-12-03 12:59:42 -050054void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
Vivek Goyal22084192009-12-03 12:59:49 -050055 struct blkio_group *blkg, void *key, dev_t dev)
Vivek Goyal31e4c282009-12-03 12:59:42 -050056{
57 unsigned long flags;
58
59 spin_lock_irqsave(&blkcg->lock, flags);
60 rcu_assign_pointer(blkg->key, key);
Vivek Goyalb1c35762009-12-03 12:59:47 -050061 blkg->blkcg_id = css_id(&blkcg->css);
Vivek Goyal31e4c282009-12-03 12:59:42 -050062 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
63 spin_unlock_irqrestore(&blkcg->lock, flags);
Vivek Goyal2868ef72009-12-03 12:59:48 -050064#ifdef CONFIG_DEBUG_BLK_CGROUP
65 /* Need to take css reference ? */
66 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
67#endif
Vivek Goyal22084192009-12-03 12:59:49 -050068 blkg->dev = dev;
Vivek Goyal31e4c282009-12-03 12:59:42 -050069}
Vivek Goyal9d6a9862009-12-04 10:36:41 -050070EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -050071
Vivek Goyalb1c35762009-12-03 12:59:47 -050072static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
73{
74 hlist_del_init_rcu(&blkg->blkcg_node);
75 blkg->blkcg_id = 0;
76}
77
78/*
79 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
80 * indicating that blk_group was unhashed by the time we got to it.
81 */
Vivek Goyal31e4c282009-12-03 12:59:42 -050082int blkiocg_del_blkio_group(struct blkio_group *blkg)
83{
Vivek Goyalb1c35762009-12-03 12:59:47 -050084 struct blkio_cgroup *blkcg;
85 unsigned long flags;
86 struct cgroup_subsys_state *css;
87 int ret = 1;
88
89 rcu_read_lock();
90 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
91 if (!css)
92 goto out;
93
94 blkcg = container_of(css, struct blkio_cgroup, css);
95 spin_lock_irqsave(&blkcg->lock, flags);
96 if (!hlist_unhashed(&blkg->blkcg_node)) {
97 __blkiocg_del_blkio_group(blkg);
98 ret = 0;
99 }
100 spin_unlock_irqrestore(&blkcg->lock, flags);
101out:
102 rcu_read_unlock();
103 return ret;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500104}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500105EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500106
107/* called under rcu_read_lock(). */
108struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
109{
110 struct blkio_group *blkg;
111 struct hlist_node *n;
112 void *__key;
113
114 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
115 __key = blkg->key;
116 if (__key == key)
117 return blkg;
118 }
119
120 return NULL;
121}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500122EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500123
124#define SHOW_FUNCTION(__VAR) \
125static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
126 struct cftype *cftype) \
127{ \
128 struct blkio_cgroup *blkcg; \
129 \
130 blkcg = cgroup_to_blkio_cgroup(cgroup); \
131 return (u64)blkcg->__VAR; \
132}
133
134SHOW_FUNCTION(weight);
135#undef SHOW_FUNCTION
136
137static int
138blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
139{
140 struct blkio_cgroup *blkcg;
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500141 struct blkio_group *blkg;
142 struct hlist_node *n;
Vivek Goyal3e252062009-12-04 10:36:42 -0500143 struct blkio_policy_type *blkiop;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500144
145 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
146 return -EINVAL;
147
148 blkcg = cgroup_to_blkio_cgroup(cgroup);
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500149 spin_lock_irq(&blkcg->lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500150 blkcg->weight = (unsigned int)val;
Vivek Goyal3e252062009-12-04 10:36:42 -0500151 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
152 spin_lock(&blkio_list_lock);
153 list_for_each_entry(blkiop, &blkio_list, list)
154 blkiop->ops.blkio_update_group_weight_fn(blkg,
155 blkcg->weight);
156 spin_unlock(&blkio_list_lock);
157 }
Vivek Goyalf8d461d2009-12-03 12:59:52 -0500158 spin_unlock_irq(&blkcg->lock);
Vivek Goyal31e4c282009-12-03 12:59:42 -0500159 return 0;
160}
161
Vivek Goyal22084192009-12-03 12:59:49 -0500162#define SHOW_FUNCTION_PER_GROUP(__VAR) \
163static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
164 struct cftype *cftype, struct seq_file *m) \
165{ \
166 struct blkio_cgroup *blkcg; \
167 struct blkio_group *blkg; \
168 struct hlist_node *n; \
169 \
170 if (!cgroup_lock_live_group(cgroup)) \
171 return -ENODEV; \
172 \
173 blkcg = cgroup_to_blkio_cgroup(cgroup); \
174 rcu_read_lock(); \
175 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
176 if (blkg->dev) \
177 seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev), \
178 MINOR(blkg->dev), blkg->__VAR); \
179 } \
180 rcu_read_unlock(); \
181 cgroup_unlock(); \
182 return 0; \
183}
184
185SHOW_FUNCTION_PER_GROUP(time);
186SHOW_FUNCTION_PER_GROUP(sectors);
187#ifdef CONFIG_DEBUG_BLK_CGROUP
188SHOW_FUNCTION_PER_GROUP(dequeue);
189#endif
190#undef SHOW_FUNCTION_PER_GROUP
191
192#ifdef CONFIG_DEBUG_BLK_CGROUP
193void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
194 unsigned long dequeue)
195{
196 blkg->dequeue += dequeue;
197}
Vivek Goyal9d6a9862009-12-04 10:36:41 -0500198EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats);
Vivek Goyal22084192009-12-03 12:59:49 -0500199#endif
200
Vivek Goyal31e4c282009-12-03 12:59:42 -0500201struct cftype blkio_files[] = {
202 {
203 .name = "weight",
204 .read_u64 = blkiocg_weight_read,
205 .write_u64 = blkiocg_weight_write,
206 },
Vivek Goyal22084192009-12-03 12:59:49 -0500207 {
208 .name = "time",
209 .read_seq_string = blkiocg_time_read,
210 },
211 {
212 .name = "sectors",
213 .read_seq_string = blkiocg_sectors_read,
214 },
215#ifdef CONFIG_DEBUG_BLK_CGROUP
216 {
217 .name = "dequeue",
218 .read_seq_string = blkiocg_dequeue_read,
219 },
220#endif
Vivek Goyal31e4c282009-12-03 12:59:42 -0500221};
222
223static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
224{
225 return cgroup_add_files(cgroup, subsys, blkio_files,
226 ARRAY_SIZE(blkio_files));
227}
228
229static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
230{
231 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500232 unsigned long flags;
233 struct blkio_group *blkg;
234 void *key;
Vivek Goyal3e252062009-12-04 10:36:42 -0500235 struct blkio_policy_type *blkiop;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500236
Vivek Goyalb1c35762009-12-03 12:59:47 -0500237 rcu_read_lock();
238remove_entry:
239 spin_lock_irqsave(&blkcg->lock, flags);
240
241 if (hlist_empty(&blkcg->blkg_list)) {
242 spin_unlock_irqrestore(&blkcg->lock, flags);
243 goto done;
244 }
245
246 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
247 blkcg_node);
248 key = rcu_dereference(blkg->key);
249 __blkiocg_del_blkio_group(blkg);
250
251 spin_unlock_irqrestore(&blkcg->lock, flags);
252
253 /*
254 * This blkio_group is being unlinked as associated cgroup is going
255 * away. Let all the IO controlling policies know about this event.
256 *
257 * Currently this is static call to one io controlling policy. Once
258 * we have more policies in place, we need some dynamic registration
259 * of callback function.
260 */
Vivek Goyal3e252062009-12-04 10:36:42 -0500261 spin_lock(&blkio_list_lock);
262 list_for_each_entry(blkiop, &blkio_list, list)
263 blkiop->ops.blkio_unlink_group_fn(key, blkg);
264 spin_unlock(&blkio_list_lock);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500265 goto remove_entry;
266done:
Vivek Goyal31e4c282009-12-03 12:59:42 -0500267 free_css_id(&blkio_subsys, &blkcg->css);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500268 rcu_read_unlock();
Vivek Goyal31e4c282009-12-03 12:59:42 -0500269 kfree(blkcg);
270}
271
272static struct cgroup_subsys_state *
273blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
274{
275 struct blkio_cgroup *blkcg, *parent_blkcg;
276
277 if (!cgroup->parent) {
278 blkcg = &blkio_root_cgroup;
279 goto done;
280 }
281
282 /* Currently we do not support hierarchy deeper than two level (0,1) */
283 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
284 if (css_depth(&parent_blkcg->css) > 0)
285 return ERR_PTR(-EINVAL);
286
287 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
288 if (!blkcg)
289 return ERR_PTR(-ENOMEM);
290
291 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
292done:
293 spin_lock_init(&blkcg->lock);
294 INIT_HLIST_HEAD(&blkcg->blkg_list);
295
296 return &blkcg->css;
297}
298
299/*
300 * We cannot support shared io contexts, as we have no mean to support
301 * two tasks with the same ioc in two different groups without major rework
302 * of the main cic data structures. For now we allow a task to change
303 * its cgroup only if it's the only owner of its ioc.
304 */
305static int blkiocg_can_attach(struct cgroup_subsys *subsys,
306 struct cgroup *cgroup, struct task_struct *tsk,
307 bool threadgroup)
308{
309 struct io_context *ioc;
310 int ret = 0;
311
312 /* task_lock() is needed to avoid races with exit_io_context() */
313 task_lock(tsk);
314 ioc = tsk->io_context;
315 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
316 ret = -EINVAL;
317 task_unlock(tsk);
318
319 return ret;
320}
321
322static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
323 struct cgroup *prev, struct task_struct *tsk,
324 bool threadgroup)
325{
326 struct io_context *ioc;
327
328 task_lock(tsk);
329 ioc = tsk->io_context;
330 if (ioc)
331 ioc->cgroup_changed = 1;
332 task_unlock(tsk);
333}
334
335struct cgroup_subsys blkio_subsys = {
336 .name = "blkio",
337 .create = blkiocg_create,
338 .can_attach = blkiocg_can_attach,
339 .attach = blkiocg_attach,
340 .destroy = blkiocg_destroy,
341 .populate = blkiocg_populate,
342 .subsys_id = blkio_subsys_id,
343 .use_id = 1,
344};
Vivek Goyal3e252062009-12-04 10:36:42 -0500345
346void blkio_policy_register(struct blkio_policy_type *blkiop)
347{
348 spin_lock(&blkio_list_lock);
349 list_add_tail(&blkiop->list, &blkio_list);
350 spin_unlock(&blkio_list_lock);
351}
352EXPORT_SYMBOL_GPL(blkio_policy_register);
353
354void blkio_policy_unregister(struct blkio_policy_type *blkiop)
355{
356 spin_lock(&blkio_list_lock);
357 list_del_init(&blkiop->list);
358 spin_unlock(&blkio_list_lock);
359}
360EXPORT_SYMBOL_GPL(blkio_policy_unregister);