blob: 4ef78d35cbd276fdac6f6fa8552fa023fded9f4c [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
Vivek Goyal22084192009-12-03 12:59:49 -050014#include <linux/seq_file.h>
15#include <linux/kdev_t.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050016#include "blk-cgroup.h"
17
Vivek Goyalb1c35762009-12-03 12:59:47 -050018extern void cfq_unlink_blkio_group(void *, struct blkio_group *);
19
Vivek Goyal31e4c282009-12-03 12:59:42 -050020struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
21
22struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
23{
24 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
25 struct blkio_cgroup, css);
26}
27
Vivek Goyal22084192009-12-03 12:59:49 -050028void blkiocg_update_blkio_group_stats(struct blkio_group *blkg,
29 unsigned long time, unsigned long sectors)
30{
31 blkg->time += time;
32 blkg->sectors += sectors;
33}
34
Vivek Goyal31e4c282009-12-03 12:59:42 -050035void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
Vivek Goyal22084192009-12-03 12:59:49 -050036 struct blkio_group *blkg, void *key, dev_t dev)
Vivek Goyal31e4c282009-12-03 12:59:42 -050037{
38 unsigned long flags;
39
40 spin_lock_irqsave(&blkcg->lock, flags);
41 rcu_assign_pointer(blkg->key, key);
Vivek Goyalb1c35762009-12-03 12:59:47 -050042 blkg->blkcg_id = css_id(&blkcg->css);
Vivek Goyal31e4c282009-12-03 12:59:42 -050043 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
44 spin_unlock_irqrestore(&blkcg->lock, flags);
Vivek Goyal2868ef72009-12-03 12:59:48 -050045#ifdef CONFIG_DEBUG_BLK_CGROUP
46 /* Need to take css reference ? */
47 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
48#endif
Vivek Goyal22084192009-12-03 12:59:49 -050049 blkg->dev = dev;
Vivek Goyal31e4c282009-12-03 12:59:42 -050050}
51
Vivek Goyalb1c35762009-12-03 12:59:47 -050052static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
53{
54 hlist_del_init_rcu(&blkg->blkcg_node);
55 blkg->blkcg_id = 0;
56}
57
58/*
59 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
60 * indicating that blk_group was unhashed by the time we got to it.
61 */
Vivek Goyal31e4c282009-12-03 12:59:42 -050062int blkiocg_del_blkio_group(struct blkio_group *blkg)
63{
Vivek Goyalb1c35762009-12-03 12:59:47 -050064 struct blkio_cgroup *blkcg;
65 unsigned long flags;
66 struct cgroup_subsys_state *css;
67 int ret = 1;
68
69 rcu_read_lock();
70 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
71 if (!css)
72 goto out;
73
74 blkcg = container_of(css, struct blkio_cgroup, css);
75 spin_lock_irqsave(&blkcg->lock, flags);
76 if (!hlist_unhashed(&blkg->blkcg_node)) {
77 __blkiocg_del_blkio_group(blkg);
78 ret = 0;
79 }
80 spin_unlock_irqrestore(&blkcg->lock, flags);
81out:
82 rcu_read_unlock();
83 return ret;
Vivek Goyal31e4c282009-12-03 12:59:42 -050084}
85
86/* called under rcu_read_lock(). */
87struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
88{
89 struct blkio_group *blkg;
90 struct hlist_node *n;
91 void *__key;
92
93 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
94 __key = blkg->key;
95 if (__key == key)
96 return blkg;
97 }
98
99 return NULL;
100}
101
102#define SHOW_FUNCTION(__VAR) \
103static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \
104 struct cftype *cftype) \
105{ \
106 struct blkio_cgroup *blkcg; \
107 \
108 blkcg = cgroup_to_blkio_cgroup(cgroup); \
109 return (u64)blkcg->__VAR; \
110}
111
112SHOW_FUNCTION(weight);
113#undef SHOW_FUNCTION
114
115static int
116blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
117{
118 struct blkio_cgroup *blkcg;
119
120 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
121 return -EINVAL;
122
123 blkcg = cgroup_to_blkio_cgroup(cgroup);
124 blkcg->weight = (unsigned int)val;
125 return 0;
126}
127
Vivek Goyal22084192009-12-03 12:59:49 -0500128#define SHOW_FUNCTION_PER_GROUP(__VAR) \
129static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \
130 struct cftype *cftype, struct seq_file *m) \
131{ \
132 struct blkio_cgroup *blkcg; \
133 struct blkio_group *blkg; \
134 struct hlist_node *n; \
135 \
136 if (!cgroup_lock_live_group(cgroup)) \
137 return -ENODEV; \
138 \
139 blkcg = cgroup_to_blkio_cgroup(cgroup); \
140 rcu_read_lock(); \
141 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\
142 if (blkg->dev) \
143 seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev), \
144 MINOR(blkg->dev), blkg->__VAR); \
145 } \
146 rcu_read_unlock(); \
147 cgroup_unlock(); \
148 return 0; \
149}
150
151SHOW_FUNCTION_PER_GROUP(time);
152SHOW_FUNCTION_PER_GROUP(sectors);
153#ifdef CONFIG_DEBUG_BLK_CGROUP
154SHOW_FUNCTION_PER_GROUP(dequeue);
155#endif
156#undef SHOW_FUNCTION_PER_GROUP
157
158#ifdef CONFIG_DEBUG_BLK_CGROUP
159void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg,
160 unsigned long dequeue)
161{
162 blkg->dequeue += dequeue;
163}
164#endif
165
Vivek Goyal31e4c282009-12-03 12:59:42 -0500166struct cftype blkio_files[] = {
167 {
168 .name = "weight",
169 .read_u64 = blkiocg_weight_read,
170 .write_u64 = blkiocg_weight_write,
171 },
Vivek Goyal22084192009-12-03 12:59:49 -0500172 {
173 .name = "time",
174 .read_seq_string = blkiocg_time_read,
175 },
176 {
177 .name = "sectors",
178 .read_seq_string = blkiocg_sectors_read,
179 },
180#ifdef CONFIG_DEBUG_BLK_CGROUP
181 {
182 .name = "dequeue",
183 .read_seq_string = blkiocg_dequeue_read,
184 },
185#endif
Vivek Goyal31e4c282009-12-03 12:59:42 -0500186};
187
188static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
189{
190 return cgroup_add_files(cgroup, subsys, blkio_files,
191 ARRAY_SIZE(blkio_files));
192}
193
194static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
195{
196 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500197 unsigned long flags;
198 struct blkio_group *blkg;
199 void *key;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500200
Vivek Goyalb1c35762009-12-03 12:59:47 -0500201 rcu_read_lock();
202remove_entry:
203 spin_lock_irqsave(&blkcg->lock, flags);
204
205 if (hlist_empty(&blkcg->blkg_list)) {
206 spin_unlock_irqrestore(&blkcg->lock, flags);
207 goto done;
208 }
209
210 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
211 blkcg_node);
212 key = rcu_dereference(blkg->key);
213 __blkiocg_del_blkio_group(blkg);
214
215 spin_unlock_irqrestore(&blkcg->lock, flags);
216
217 /*
218 * This blkio_group is being unlinked as associated cgroup is going
219 * away. Let all the IO controlling policies know about this event.
220 *
221 * Currently this is static call to one io controlling policy. Once
222 * we have more policies in place, we need some dynamic registration
223 * of callback function.
224 */
225 cfq_unlink_blkio_group(key, blkg);
226 goto remove_entry;
227done:
Vivek Goyal31e4c282009-12-03 12:59:42 -0500228 free_css_id(&blkio_subsys, &blkcg->css);
Vivek Goyalb1c35762009-12-03 12:59:47 -0500229 rcu_read_unlock();
Vivek Goyal31e4c282009-12-03 12:59:42 -0500230 kfree(blkcg);
231}
232
233static struct cgroup_subsys_state *
234blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
235{
236 struct blkio_cgroup *blkcg, *parent_blkcg;
237
238 if (!cgroup->parent) {
239 blkcg = &blkio_root_cgroup;
240 goto done;
241 }
242
243 /* Currently we do not support hierarchy deeper than two level (0,1) */
244 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent);
245 if (css_depth(&parent_blkcg->css) > 0)
246 return ERR_PTR(-EINVAL);
247
248 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
249 if (!blkcg)
250 return ERR_PTR(-ENOMEM);
251
252 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
253done:
254 spin_lock_init(&blkcg->lock);
255 INIT_HLIST_HEAD(&blkcg->blkg_list);
256
257 return &blkcg->css;
258}
259
260/*
261 * We cannot support shared io contexts, as we have no mean to support
262 * two tasks with the same ioc in two different groups without major rework
263 * of the main cic data structures. For now we allow a task to change
264 * its cgroup only if it's the only owner of its ioc.
265 */
266static int blkiocg_can_attach(struct cgroup_subsys *subsys,
267 struct cgroup *cgroup, struct task_struct *tsk,
268 bool threadgroup)
269{
270 struct io_context *ioc;
271 int ret = 0;
272
273 /* task_lock() is needed to avoid races with exit_io_context() */
274 task_lock(tsk);
275 ioc = tsk->io_context;
276 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
277 ret = -EINVAL;
278 task_unlock(tsk);
279
280 return ret;
281}
282
283static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
284 struct cgroup *prev, struct task_struct *tsk,
285 bool threadgroup)
286{
287 struct io_context *ioc;
288
289 task_lock(tsk);
290 ioc = tsk->io_context;
291 if (ioc)
292 ioc->cgroup_changed = 1;
293 task_unlock(tsk);
294}
295
296struct cgroup_subsys blkio_subsys = {
297 .name = "blkio",
298 .create = blkiocg_create,
299 .can_attach = blkiocg_can_attach,
300 .attach = blkiocg_attach,
301 .destroy = blkiocg_destroy,
302 .populate = blkiocg_populate,
303 .subsys_id = blkio_subsys_id,
304 .use_id = 1,
305};