blob: fa9a900c1254ee9184a59b0a15e67fd24e7af527 [file] [log] [blame]
Vivek Goyale43473b2010-09-15 17:06:35 -04001/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
12#include "blk-cgroup.h"
13
14/* Max dispatch from a group in 1 round */
15static int throtl_grp_quantum = 8;
16
17/* Total max dispatch from all groups in one round */
18static int throtl_quantum = 32;
19
20/* Throttling is performed over 100ms slice and after that slice is renewed */
21static unsigned long throtl_slice = HZ/10; /* 100 ms */
22
Vivek Goyal450adcb2011-03-01 13:40:54 -050023/* A workqueue to queue throttle related work */
24static struct workqueue_struct *kthrotld_workqueue;
25static void throtl_schedule_delayed_work(struct throtl_data *td,
26 unsigned long delay);
27
Vivek Goyale43473b2010-09-15 17:06:35 -040028struct throtl_rb_root {
29 struct rb_root rb;
30 struct rb_node *left;
31 unsigned int count;
32 unsigned long min_disptime;
33};
34
35#define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
36 .count = 0, .min_disptime = 0}
37
38#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
39
40struct throtl_grp {
41 /* List of throtl groups on the request queue*/
42 struct hlist_node tg_node;
43
44 /* active throtl group service_tree member */
45 struct rb_node rb_node;
46
47 /*
48 * Dispatch time in jiffies. This is the estimated time when group
49 * will unthrottle and is ready to dispatch more bio. It is used as
50 * key to sort active groups in service tree.
51 */
52 unsigned long disptime;
53
54 struct blkio_group blkg;
55 atomic_t ref;
56 unsigned int flags;
57
58 /* Two lists for READ and WRITE */
59 struct bio_list bio_lists[2];
60
61 /* Number of queued bios on READ and WRITE lists */
62 unsigned int nr_queued[2];
63
64 /* bytes per second rate limits */
65 uint64_t bps[2];
66
Vivek Goyal8e89d132010-09-15 17:06:37 -040067 /* IOPS limits */
68 unsigned int iops[2];
69
Vivek Goyale43473b2010-09-15 17:06:35 -040070 /* Number of bytes disptached in current slice */
71 uint64_t bytes_disp[2];
Vivek Goyal8e89d132010-09-15 17:06:37 -040072 /* Number of bio's dispatched in current slice */
73 unsigned int io_disp[2];
Vivek Goyale43473b2010-09-15 17:06:35 -040074
75 /* When did we start a new slice */
76 unsigned long slice_start[2];
77 unsigned long slice_end[2];
Vivek Goyalfe071432010-10-01 14:49:49 +020078
79 /* Some throttle limits got updated for the group */
Andreas Schwab6f037932011-03-30 12:21:56 +020080 int limits_changed;
Vivek Goyale43473b2010-09-15 17:06:35 -040081};
82
83struct throtl_data
84{
85 /* List of throtl groups */
86 struct hlist_head tg_list;
87
88 /* service tree for active throtl groups */
89 struct throtl_rb_root tg_service_tree;
90
91 struct throtl_grp root_tg;
92 struct request_queue *queue;
93
94 /* Total Number of queued bios on READ and WRITE lists */
95 unsigned int nr_queued[2];
96
97 /*
Vivek Goyal02977e42010-10-01 14:49:48 +020098 * number of total undestroyed groups
Vivek Goyale43473b2010-09-15 17:06:35 -040099 */
100 unsigned int nr_undestroyed_grps;
101
102 /* Work for dispatching throttled bios */
103 struct delayed_work throtl_work;
Vivek Goyalfe071432010-10-01 14:49:49 +0200104
Andreas Schwab6f037932011-03-30 12:21:56 +0200105 int limits_changed;
Vivek Goyale43473b2010-09-15 17:06:35 -0400106};
107
108enum tg_state_flags {
109 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
110};
111
112#define THROTL_TG_FNS(name) \
113static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
114{ \
115 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
116} \
117static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
118{ \
119 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
120} \
121static inline int throtl_tg_##name(const struct throtl_grp *tg) \
122{ \
123 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
124}
125
126THROTL_TG_FNS(on_rr);
127
128#define throtl_log_tg(td, tg, fmt, args...) \
129 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
130 blkg_path(&(tg)->blkg), ##args); \
131
132#define throtl_log(td, fmt, args...) \
133 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
134
135static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
136{
137 if (blkg)
138 return container_of(blkg, struct throtl_grp, blkg);
139
140 return NULL;
141}
142
143static inline int total_nr_queued(struct throtl_data *td)
144{
145 return (td->nr_queued[0] + td->nr_queued[1]);
146}
147
148static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
149{
150 atomic_inc(&tg->ref);
151 return tg;
152}
153
154static void throtl_put_tg(struct throtl_grp *tg)
155{
156 BUG_ON(atomic_read(&tg->ref) <= 0);
157 if (!atomic_dec_and_test(&tg->ref))
158 return;
159 kfree(tg);
160}
161
Vivek Goyala29a1712011-05-19 15:38:19 -0400162static void throtl_init_group(struct throtl_grp *tg)
163{
164 INIT_HLIST_NODE(&tg->tg_node);
165 RB_CLEAR_NODE(&tg->rb_node);
166 bio_list_init(&tg->bio_lists[0]);
167 bio_list_init(&tg->bio_lists[1]);
168 tg->limits_changed = false;
169
170 /* Practically unlimited BW */
171 tg->bps[0] = tg->bps[1] = -1;
172 tg->iops[0] = tg->iops[1] = -1;
173
174 /*
175 * Take the initial reference that will be released on destroy
176 * This can be thought of a joint reference by cgroup and
177 * request queue which will be dropped by either request queue
178 * exit or cgroup deletion path depending on who is exiting first.
179 */
180 atomic_set(&tg->ref, 1);
181}
182
183/* Should be called with rcu read lock held (needed for blkcg) */
184static void
185throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
186{
187 hlist_add_head(&tg->tg_node, &td->tg_list);
188 td->nr_undestroyed_grps++;
189}
190
Vivek Goyale43473b2010-09-15 17:06:35 -0400191static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
Vivek Goyal70087dc2011-05-16 15:24:08 +0200192 struct blkio_cgroup *blkcg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400193{
Vivek Goyale43473b2010-09-15 17:06:35 -0400194 struct throtl_grp *tg = NULL;
195 void *key = td;
196 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
197 unsigned int major, minor;
198
199 /*
200 * TODO: Speed up blkiocg_lookup_group() by maintaining a radix
201 * tree of blkg (instead of traversing through hash list all
202 * the time.
203 */
Vivek Goyalbe2c6b12011-01-19 08:25:02 -0700204
205 /*
206 * This is the common case when there are no blkio cgroups.
207 * Avoid lookup in this case
208 */
209 if (blkcg == &blkio_root_cgroup)
210 tg = &td->root_tg;
211 else
212 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
Vivek Goyale43473b2010-09-15 17:06:35 -0400213
214 /* Fill in device details for root group */
215 if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
216 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
217 tg->blkg.dev = MKDEV(major, minor);
218 goto done;
219 }
220
221 if (tg)
222 goto done;
223
224 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
225 if (!tg)
226 goto done;
227
Vivek Goyala29a1712011-05-19 15:38:19 -0400228 throtl_init_group(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400229
230 /* Add group onto cgroup list */
231 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
232 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
233 MKDEV(major, minor), BLKIO_POLICY_THROTL);
234
235 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
236 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400237 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
238 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
Vivek Goyale43473b2010-09-15 17:06:35 -0400239
Vivek Goyala29a1712011-05-19 15:38:19 -0400240 throtl_add_group_to_td_list(td, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400241done:
242 return tg;
243}
244
245static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
246{
Vivek Goyale43473b2010-09-15 17:06:35 -0400247 struct throtl_grp *tg = NULL;
Vivek Goyal70087dc2011-05-16 15:24:08 +0200248 struct blkio_cgroup *blkcg;
Vivek Goyale43473b2010-09-15 17:06:35 -0400249
250 rcu_read_lock();
Vivek Goyal70087dc2011-05-16 15:24:08 +0200251 blkcg = task_blkio_cgroup(current);
252 tg = throtl_find_alloc_tg(td, blkcg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400253 if (!tg)
254 tg = &td->root_tg;
255 rcu_read_unlock();
256 return tg;
257}
258
259static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
260{
261 /* Service tree is empty */
262 if (!root->count)
263 return NULL;
264
265 if (!root->left)
266 root->left = rb_first(&root->rb);
267
268 if (root->left)
269 return rb_entry_tg(root->left);
270
271 return NULL;
272}
273
274static void rb_erase_init(struct rb_node *n, struct rb_root *root)
275{
276 rb_erase(n, root);
277 RB_CLEAR_NODE(n);
278}
279
280static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
281{
282 if (root->left == n)
283 root->left = NULL;
284 rb_erase_init(n, &root->rb);
285 --root->count;
286}
287
288static void update_min_dispatch_time(struct throtl_rb_root *st)
289{
290 struct throtl_grp *tg;
291
292 tg = throtl_rb_first(st);
293 if (!tg)
294 return;
295
296 st->min_disptime = tg->disptime;
297}
298
299static void
300tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
301{
302 struct rb_node **node = &st->rb.rb_node;
303 struct rb_node *parent = NULL;
304 struct throtl_grp *__tg;
305 unsigned long key = tg->disptime;
306 int left = 1;
307
308 while (*node != NULL) {
309 parent = *node;
310 __tg = rb_entry_tg(parent);
311
312 if (time_before(key, __tg->disptime))
313 node = &parent->rb_left;
314 else {
315 node = &parent->rb_right;
316 left = 0;
317 }
318 }
319
320 if (left)
321 st->left = &tg->rb_node;
322
323 rb_link_node(&tg->rb_node, parent, node);
324 rb_insert_color(&tg->rb_node, &st->rb);
325}
326
327static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
328{
329 struct throtl_rb_root *st = &td->tg_service_tree;
330
331 tg_service_tree_add(st, tg);
332 throtl_mark_tg_on_rr(tg);
333 st->count++;
334}
335
336static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
337{
338 if (!throtl_tg_on_rr(tg))
339 __throtl_enqueue_tg(td, tg);
340}
341
342static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
343{
344 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
345 throtl_clear_tg_on_rr(tg);
346}
347
348static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
349{
350 if (throtl_tg_on_rr(tg))
351 __throtl_dequeue_tg(td, tg);
352}
353
354static void throtl_schedule_next_dispatch(struct throtl_data *td)
355{
356 struct throtl_rb_root *st = &td->tg_service_tree;
357
358 /*
359 * If there are more bios pending, schedule more work.
360 */
361 if (!total_nr_queued(td))
362 return;
363
364 BUG_ON(!st->count);
365
366 update_min_dispatch_time(st);
367
368 if (time_before_eq(st->min_disptime, jiffies))
Vivek Goyal450adcb2011-03-01 13:40:54 -0500369 throtl_schedule_delayed_work(td, 0);
Vivek Goyale43473b2010-09-15 17:06:35 -0400370 else
Vivek Goyal450adcb2011-03-01 13:40:54 -0500371 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
Vivek Goyale43473b2010-09-15 17:06:35 -0400372}
373
374static inline void
375throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
376{
377 tg->bytes_disp[rw] = 0;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400378 tg->io_disp[rw] = 0;
Vivek Goyale43473b2010-09-15 17:06:35 -0400379 tg->slice_start[rw] = jiffies;
380 tg->slice_end[rw] = jiffies + throtl_slice;
381 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
382 rw == READ ? 'R' : 'W', tg->slice_start[rw],
383 tg->slice_end[rw], jiffies);
384}
385
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100386static inline void throtl_set_slice_end(struct throtl_data *td,
387 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
388{
389 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
390}
391
Vivek Goyale43473b2010-09-15 17:06:35 -0400392static inline void throtl_extend_slice(struct throtl_data *td,
393 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
394{
395 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
396 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
397 rw == READ ? 'R' : 'W', tg->slice_start[rw],
398 tg->slice_end[rw], jiffies);
399}
400
401/* Determine if previously allocated or extended slice is complete or not */
402static bool
403throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
404{
405 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
406 return 0;
407
408 return 1;
409}
410
411/* Trim the used slices and adjust slice start accordingly */
412static inline void
413throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
414{
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200415 unsigned long nr_slices, time_elapsed, io_trim;
416 u64 bytes_trim, tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400417
418 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
419
420 /*
421 * If bps are unlimited (-1), then time slice don't get
422 * renewed. Don't try to trim the slice if slice is used. A new
423 * slice will start when appropriate.
424 */
425 if (throtl_slice_used(td, tg, rw))
426 return;
427
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100428 /*
429 * A bio has been dispatched. Also adjust slice_end. It might happen
430 * that initially cgroup limit was very low resulting in high
431 * slice_end, but later limit was bumped up and bio was dispached
432 * sooner, then we need to reduce slice_end. A high bogus slice_end
433 * is bad because it does not allow new slice to start.
434 */
435
436 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
437
Vivek Goyale43473b2010-09-15 17:06:35 -0400438 time_elapsed = jiffies - tg->slice_start[rw];
439
440 nr_slices = time_elapsed / throtl_slice;
441
442 if (!nr_slices)
443 return;
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200444 tmp = tg->bps[rw] * throtl_slice * nr_slices;
445 do_div(tmp, HZ);
446 bytes_trim = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400447
Vivek Goyal8e89d132010-09-15 17:06:37 -0400448 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
Vivek Goyale43473b2010-09-15 17:06:35 -0400449
Vivek Goyal8e89d132010-09-15 17:06:37 -0400450 if (!bytes_trim && !io_trim)
Vivek Goyale43473b2010-09-15 17:06:35 -0400451 return;
452
453 if (tg->bytes_disp[rw] >= bytes_trim)
454 tg->bytes_disp[rw] -= bytes_trim;
455 else
456 tg->bytes_disp[rw] = 0;
457
Vivek Goyal8e89d132010-09-15 17:06:37 -0400458 if (tg->io_disp[rw] >= io_trim)
459 tg->io_disp[rw] -= io_trim;
460 else
461 tg->io_disp[rw] = 0;
462
Vivek Goyale43473b2010-09-15 17:06:35 -0400463 tg->slice_start[rw] += nr_slices * throtl_slice;
464
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200465 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
Vivek Goyale43473b2010-09-15 17:06:35 -0400466 " start=%lu end=%lu jiffies=%lu",
Vivek Goyal8e89d132010-09-15 17:06:37 -0400467 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
Vivek Goyale43473b2010-09-15 17:06:35 -0400468 tg->slice_start[rw], tg->slice_end[rw], jiffies);
469}
470
Vivek Goyal8e89d132010-09-15 17:06:37 -0400471static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
472 struct bio *bio, unsigned long *wait)
Vivek Goyale43473b2010-09-15 17:06:35 -0400473{
474 bool rw = bio_data_dir(bio);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400475 unsigned int io_allowed;
Vivek Goyale43473b2010-09-15 17:06:35 -0400476 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200477 u64 tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400478
Vivek Goyal8e89d132010-09-15 17:06:37 -0400479 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
Vivek Goyale43473b2010-09-15 17:06:35 -0400480
Vivek Goyal8e89d132010-09-15 17:06:37 -0400481 /* Slice has just started. Consider one slice interval */
482 if (!jiffy_elapsed)
483 jiffy_elapsed_rnd = throtl_slice;
484
485 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
486
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200487 /*
488 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
489 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
490 * will allow dispatch after 1 second and after that slice should
491 * have been trimmed.
492 */
493
494 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
495 do_div(tmp, HZ);
496
497 if (tmp > UINT_MAX)
498 io_allowed = UINT_MAX;
499 else
500 io_allowed = tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400501
502 if (tg->io_disp[rw] + 1 <= io_allowed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400503 if (wait)
504 *wait = 0;
505 return 1;
506 }
507
Vivek Goyal8e89d132010-09-15 17:06:37 -0400508 /* Calc approx time to dispatch */
509 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
510
511 if (jiffy_wait > jiffy_elapsed)
512 jiffy_wait = jiffy_wait - jiffy_elapsed;
513 else
514 jiffy_wait = 1;
515
516 if (wait)
517 *wait = jiffy_wait;
518 return 0;
519}
520
521static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
522 struct bio *bio, unsigned long *wait)
523{
524 bool rw = bio_data_dir(bio);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200525 u64 bytes_allowed, extra_bytes, tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400526 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyale43473b2010-09-15 17:06:35 -0400527
528 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
529
530 /* Slice has just started. Consider one slice interval */
531 if (!jiffy_elapsed)
532 jiffy_elapsed_rnd = throtl_slice;
533
534 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
535
Vivek Goyal5e901a22010-10-01 21:16:38 +0200536 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
537 do_div(tmp, HZ);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200538 bytes_allowed = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400539
540 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
541 if (wait)
542 *wait = 0;
543 return 1;
544 }
545
546 /* Calc approx time to dispatch */
547 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
548 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
549
550 if (!jiffy_wait)
551 jiffy_wait = 1;
552
553 /*
554 * This wait time is without taking into consideration the rounding
555 * up we did. Add that time also.
556 */
557 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
Vivek Goyale43473b2010-09-15 17:06:35 -0400558 if (wait)
559 *wait = jiffy_wait;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400560 return 0;
561}
Vivek Goyale43473b2010-09-15 17:06:35 -0400562
Vivek Goyal8e89d132010-09-15 17:06:37 -0400563/*
564 * Returns whether one can dispatch a bio or not. Also returns approx number
565 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
566 */
567static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
568 struct bio *bio, unsigned long *wait)
569{
570 bool rw = bio_data_dir(bio);
571 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
572
573 /*
574 * Currently whole state machine of group depends on first bio
575 * queued in the group bio list. So one should not be calling
576 * this function with a different bio if there are other bios
577 * queued.
578 */
579 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
580
581 /* If tg->bps = -1, then BW is unlimited */
582 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
583 if (wait)
584 *wait = 0;
585 return 1;
586 }
587
588 /*
589 * If previous slice expired, start a new one otherwise renew/extend
590 * existing slice to make sure it is at least throtl_slice interval
591 * long since now.
592 */
593 if (throtl_slice_used(td, tg, rw))
594 throtl_start_new_slice(td, tg, rw);
595 else {
596 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
597 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
598 }
599
600 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
601 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
602 if (wait)
603 *wait = 0;
604 return 1;
605 }
606
607 max_wait = max(bps_wait, iops_wait);
608
609 if (wait)
610 *wait = max_wait;
611
612 if (time_before(tg->slice_end[rw], jiffies + max_wait))
613 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400614
615 return 0;
616}
617
618static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
619{
620 bool rw = bio_data_dir(bio);
621 bool sync = bio->bi_rw & REQ_SYNC;
622
623 /* Charge the bio to the group */
624 tg->bytes_disp[rw] += bio->bi_size;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400625 tg->io_disp[rw]++;
Vivek Goyale43473b2010-09-15 17:06:35 -0400626
627 /*
628 * TODO: This will take blkg->stats_lock. Figure out a way
629 * to avoid this cost.
630 */
631 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
Vivek Goyale43473b2010-09-15 17:06:35 -0400632}
633
634static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
635 struct bio *bio)
636{
637 bool rw = bio_data_dir(bio);
638
639 bio_list_add(&tg->bio_lists[rw], bio);
640 /* Take a bio reference on tg */
641 throtl_ref_get_tg(tg);
642 tg->nr_queued[rw]++;
643 td->nr_queued[rw]++;
644 throtl_enqueue_tg(td, tg);
645}
646
647static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
648{
649 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
650 struct bio *bio;
651
652 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
653 tg_may_dispatch(td, tg, bio, &read_wait);
654
655 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
656 tg_may_dispatch(td, tg, bio, &write_wait);
657
658 min_wait = min(read_wait, write_wait);
659 disptime = jiffies + min_wait;
660
Vivek Goyale43473b2010-09-15 17:06:35 -0400661 /* Update dispatch time */
662 throtl_dequeue_tg(td, tg);
663 tg->disptime = disptime;
664 throtl_enqueue_tg(td, tg);
665}
666
667static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
668 bool rw, struct bio_list *bl)
669{
670 struct bio *bio;
671
672 bio = bio_list_pop(&tg->bio_lists[rw]);
673 tg->nr_queued[rw]--;
674 /* Drop bio reference on tg */
675 throtl_put_tg(tg);
676
677 BUG_ON(td->nr_queued[rw] <= 0);
678 td->nr_queued[rw]--;
679
680 throtl_charge_bio(tg, bio);
681 bio_list_add(bl, bio);
682 bio->bi_rw |= REQ_THROTTLED;
683
684 throtl_trim_slice(td, tg, rw);
685}
686
687static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
688 struct bio_list *bl)
689{
690 unsigned int nr_reads = 0, nr_writes = 0;
691 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
Vivek Goyalc2f68052010-11-15 19:32:42 +0100692 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
Vivek Goyale43473b2010-09-15 17:06:35 -0400693 struct bio *bio;
694
695 /* Try to dispatch 75% READS and 25% WRITES */
696
697 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
698 && tg_may_dispatch(td, tg, bio, NULL)) {
699
700 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
701 nr_reads++;
702
703 if (nr_reads >= max_nr_reads)
704 break;
705 }
706
707 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
708 && tg_may_dispatch(td, tg, bio, NULL)) {
709
710 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
711 nr_writes++;
712
713 if (nr_writes >= max_nr_writes)
714 break;
715 }
716
717 return nr_reads + nr_writes;
718}
719
720static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
721{
722 unsigned int nr_disp = 0;
723 struct throtl_grp *tg;
724 struct throtl_rb_root *st = &td->tg_service_tree;
725
726 while (1) {
727 tg = throtl_rb_first(st);
728
729 if (!tg)
730 break;
731
732 if (time_before(jiffies, tg->disptime))
733 break;
734
735 throtl_dequeue_tg(td, tg);
736
737 nr_disp += throtl_dispatch_tg(td, tg, bl);
738
739 if (tg->nr_queued[0] || tg->nr_queued[1]) {
740 tg_update_disptime(td, tg);
741 throtl_enqueue_tg(td, tg);
742 }
743
744 if (nr_disp >= throtl_quantum)
745 break;
746 }
747
748 return nr_disp;
749}
750
Vivek Goyalfe071432010-10-01 14:49:49 +0200751static void throtl_process_limit_change(struct throtl_data *td)
752{
753 struct throtl_grp *tg;
754 struct hlist_node *pos, *n;
755
Vivek Goyalde701c72011-03-07 21:09:32 +0100756 if (!td->limits_changed)
Vivek Goyalfe071432010-10-01 14:49:49 +0200757 return;
758
Vivek Goyalde701c72011-03-07 21:09:32 +0100759 xchg(&td->limits_changed, false);
Vivek Goyalfe071432010-10-01 14:49:49 +0200760
Vivek Goyalde701c72011-03-07 21:09:32 +0100761 throtl_log(td, "limits changed");
Vivek Goyalfe071432010-10-01 14:49:49 +0200762
Vivek Goyal04a6b512010-12-01 19:34:52 +0100763 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
Vivek Goyalde701c72011-03-07 21:09:32 +0100764 if (!tg->limits_changed)
765 continue;
Vivek Goyalfe071432010-10-01 14:49:49 +0200766
Vivek Goyalde701c72011-03-07 21:09:32 +0100767 if (!xchg(&tg->limits_changed, false))
768 continue;
769
770 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
771 " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
772 tg->iops[READ], tg->iops[WRITE]);
773
Vivek Goyal04521db2011-03-22 21:54:29 +0100774 /*
775 * Restart the slices for both READ and WRITES. It
776 * might happen that a group's limit are dropped
777 * suddenly and we don't want to account recently
778 * dispatched IO with new low rate
779 */
780 throtl_start_new_slice(td, tg, 0);
781 throtl_start_new_slice(td, tg, 1);
782
Vivek Goyalde701c72011-03-07 21:09:32 +0100783 if (throtl_tg_on_rr(tg))
784 tg_update_disptime(td, tg);
785 }
Vivek Goyalfe071432010-10-01 14:49:49 +0200786}
787
Vivek Goyale43473b2010-09-15 17:06:35 -0400788/* Dispatch throttled bios. Should be called without queue lock held. */
789static int throtl_dispatch(struct request_queue *q)
790{
791 struct throtl_data *td = q->td;
792 unsigned int nr_disp = 0;
793 struct bio_list bio_list_on_stack;
794 struct bio *bio;
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100795 struct blk_plug plug;
Vivek Goyale43473b2010-09-15 17:06:35 -0400796
797 spin_lock_irq(q->queue_lock);
798
Vivek Goyalfe071432010-10-01 14:49:49 +0200799 throtl_process_limit_change(td);
800
Vivek Goyale43473b2010-09-15 17:06:35 -0400801 if (!total_nr_queued(td))
802 goto out;
803
804 bio_list_init(&bio_list_on_stack);
805
806 throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u",
807 total_nr_queued(td), td->nr_queued[READ],
808 td->nr_queued[WRITE]);
809
810 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
811
812 if (nr_disp)
813 throtl_log(td, "bios disp=%u", nr_disp);
814
815 throtl_schedule_next_dispatch(td);
816out:
817 spin_unlock_irq(q->queue_lock);
818
819 /*
820 * If we dispatched some requests, unplug the queue to make sure
821 * immediate dispatch
822 */
823 if (nr_disp) {
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100824 blk_start_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -0400825 while((bio = bio_list_pop(&bio_list_on_stack)))
826 generic_make_request(bio);
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100827 blk_finish_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -0400828 }
829 return nr_disp;
830}
831
832void blk_throtl_work(struct work_struct *work)
833{
834 struct throtl_data *td = container_of(work, struct throtl_data,
835 throtl_work.work);
836 struct request_queue *q = td->queue;
837
838 throtl_dispatch(q);
839}
840
841/* Call with queue lock held */
Vivek Goyal450adcb2011-03-01 13:40:54 -0500842static void
843throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
Vivek Goyale43473b2010-09-15 17:06:35 -0400844{
845
Vivek Goyale43473b2010-09-15 17:06:35 -0400846 struct delayed_work *dwork = &td->throtl_work;
847
Vivek Goyal04521db2011-03-22 21:54:29 +0100848 /* schedule work if limits changed even if no bio is queued */
849 if (total_nr_queued(td) > 0 || td->limits_changed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400850 /*
851 * We might have a work scheduled to be executed in future.
852 * Cancel that and schedule a new one.
853 */
854 __cancel_delayed_work(dwork);
Vivek Goyal450adcb2011-03-01 13:40:54 -0500855 queue_delayed_work(kthrotld_workqueue, dwork, delay);
Vivek Goyale43473b2010-09-15 17:06:35 -0400856 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
857 delay, jiffies);
858 }
859}
Vivek Goyale43473b2010-09-15 17:06:35 -0400860
861static void
862throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
863{
864 /* Something wrong if we are trying to remove same group twice */
865 BUG_ON(hlist_unhashed(&tg->tg_node));
866
867 hlist_del_init(&tg->tg_node);
868
869 /*
870 * Put the reference taken at the time of creation so that when all
871 * queues are gone, group can be destroyed.
872 */
873 throtl_put_tg(tg);
874 td->nr_undestroyed_grps--;
875}
876
877static void throtl_release_tgs(struct throtl_data *td)
878{
879 struct hlist_node *pos, *n;
880 struct throtl_grp *tg;
881
882 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
883 /*
884 * If cgroup removal path got to blk_group first and removed
885 * it from cgroup list, then it will take care of destroying
886 * cfqg also.
887 */
888 if (!blkiocg_del_blkio_group(&tg->blkg))
889 throtl_destroy_tg(td, tg);
890 }
891}
892
893static void throtl_td_free(struct throtl_data *td)
894{
895 kfree(td);
896}
897
898/*
899 * Blk cgroup controller notification saying that blkio_group object is being
900 * delinked as associated cgroup object is going away. That also means that
901 * no new IO will come in this group. So get rid of this group as soon as
902 * any pending IO in the group is finished.
903 *
904 * This function is called under rcu_read_lock(). key is the rcu protected
905 * pointer. That means "key" is a valid throtl_data pointer as long as we are
906 * rcu read lock.
907 *
908 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
909 * it should not be NULL as even if queue was going away, cgroup deltion
910 * path got to it first.
911 */
912void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
913{
914 unsigned long flags;
915 struct throtl_data *td = key;
916
917 spin_lock_irqsave(td->queue->queue_lock, flags);
918 throtl_destroy_tg(td, tg_of_blkg(blkg));
919 spin_unlock_irqrestore(td->queue->queue_lock, flags);
920}
921
Vivek Goyalde701c72011-03-07 21:09:32 +0100922static void throtl_update_blkio_group_common(struct throtl_data *td,
923 struct throtl_grp *tg)
924{
925 xchg(&tg->limits_changed, true);
926 xchg(&td->limits_changed, true);
927 /* Schedule a work now to process the limit change */
928 throtl_schedule_delayed_work(td, 0);
929}
930
Vivek Goyalfe071432010-10-01 14:49:49 +0200931/*
932 * For all update functions, key should be a valid pointer because these
933 * update functions are called under blkcg_lock, that means, blkg is
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300934 * valid and in turn key is valid. queue exit path can not race because
Vivek Goyalfe071432010-10-01 14:49:49 +0200935 * of blkcg_lock
936 *
937 * Can not take queue lock in update functions as queue lock under blkcg_lock
938 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
939 */
940static void throtl_update_blkio_group_read_bps(void *key,
941 struct blkio_group *blkg, u64 read_bps)
Vivek Goyale43473b2010-09-15 17:06:35 -0400942{
Vivek Goyalfe071432010-10-01 14:49:49 +0200943 struct throtl_data *td = key;
Vivek Goyalde701c72011-03-07 21:09:32 +0100944 struct throtl_grp *tg = tg_of_blkg(blkg);
Vivek Goyalfe071432010-10-01 14:49:49 +0200945
Vivek Goyalde701c72011-03-07 21:09:32 +0100946 tg->bps[READ] = read_bps;
947 throtl_update_blkio_group_common(td, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400948}
949
Vivek Goyalfe071432010-10-01 14:49:49 +0200950static void throtl_update_blkio_group_write_bps(void *key,
951 struct blkio_group *blkg, u64 write_bps)
Vivek Goyale43473b2010-09-15 17:06:35 -0400952{
Vivek Goyalfe071432010-10-01 14:49:49 +0200953 struct throtl_data *td = key;
Vivek Goyalde701c72011-03-07 21:09:32 +0100954 struct throtl_grp *tg = tg_of_blkg(blkg);
Vivek Goyalfe071432010-10-01 14:49:49 +0200955
Vivek Goyalde701c72011-03-07 21:09:32 +0100956 tg->bps[WRITE] = write_bps;
957 throtl_update_blkio_group_common(td, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400958}
959
Vivek Goyalfe071432010-10-01 14:49:49 +0200960static void throtl_update_blkio_group_read_iops(void *key,
961 struct blkio_group *blkg, unsigned int read_iops)
Vivek Goyal8e89d132010-09-15 17:06:37 -0400962{
Vivek Goyalfe071432010-10-01 14:49:49 +0200963 struct throtl_data *td = key;
Vivek Goyalde701c72011-03-07 21:09:32 +0100964 struct throtl_grp *tg = tg_of_blkg(blkg);
Vivek Goyalfe071432010-10-01 14:49:49 +0200965
Vivek Goyalde701c72011-03-07 21:09:32 +0100966 tg->iops[READ] = read_iops;
967 throtl_update_blkio_group_common(td, tg);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400968}
969
Vivek Goyalfe071432010-10-01 14:49:49 +0200970static void throtl_update_blkio_group_write_iops(void *key,
971 struct blkio_group *blkg, unsigned int write_iops)
Vivek Goyal8e89d132010-09-15 17:06:37 -0400972{
Vivek Goyalfe071432010-10-01 14:49:49 +0200973 struct throtl_data *td = key;
Vivek Goyalde701c72011-03-07 21:09:32 +0100974 struct throtl_grp *tg = tg_of_blkg(blkg);
Vivek Goyalfe071432010-10-01 14:49:49 +0200975
Vivek Goyalde701c72011-03-07 21:09:32 +0100976 tg->iops[WRITE] = write_iops;
977 throtl_update_blkio_group_common(td, tg);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400978}
979
Vivek Goyalda527772011-03-02 19:05:33 -0500980static void throtl_shutdown_wq(struct request_queue *q)
Vivek Goyale43473b2010-09-15 17:06:35 -0400981{
982 struct throtl_data *td = q->td;
983
984 cancel_delayed_work_sync(&td->throtl_work);
985}
986
987static struct blkio_policy_type blkio_policy_throtl = {
988 .ops = {
989 .blkio_unlink_group_fn = throtl_unlink_blkio_group,
990 .blkio_update_group_read_bps_fn =
991 throtl_update_blkio_group_read_bps,
992 .blkio_update_group_write_bps_fn =
993 throtl_update_blkio_group_write_bps,
Vivek Goyal8e89d132010-09-15 17:06:37 -0400994 .blkio_update_group_read_iops_fn =
995 throtl_update_blkio_group_read_iops,
996 .blkio_update_group_write_iops_fn =
997 throtl_update_blkio_group_write_iops,
Vivek Goyale43473b2010-09-15 17:06:35 -0400998 },
Vivek Goyal8e89d132010-09-15 17:06:37 -0400999 .plid = BLKIO_POLICY_THROTL,
Vivek Goyale43473b2010-09-15 17:06:35 -04001000};
1001
1002int blk_throtl_bio(struct request_queue *q, struct bio **biop)
1003{
1004 struct throtl_data *td = q->td;
1005 struct throtl_grp *tg;
1006 struct bio *bio = *biop;
1007 bool rw = bio_data_dir(bio), update_disptime = true;
1008
1009 if (bio->bi_rw & REQ_THROTTLED) {
1010 bio->bi_rw &= ~REQ_THROTTLED;
1011 return 0;
1012 }
1013
1014 spin_lock_irq(q->queue_lock);
1015 tg = throtl_get_tg(td);
1016
1017 if (tg->nr_queued[rw]) {
1018 /*
1019 * There is already another bio queued in same dir. No
1020 * need to update dispatch time.
1021 */
Vivek Goyal231d7042011-03-07 21:05:14 +01001022 update_disptime = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001023 goto queue_bio;
Vivek Goyalde701c72011-03-07 21:09:32 +01001024
Vivek Goyale43473b2010-09-15 17:06:35 -04001025 }
1026
1027 /* Bio is with-in rate limit of group */
1028 if (tg_may_dispatch(td, tg, bio, NULL)) {
1029 throtl_charge_bio(tg, bio);
Vivek Goyal04521db2011-03-22 21:54:29 +01001030
1031 /*
1032 * We need to trim slice even when bios are not being queued
1033 * otherwise it might happen that a bio is not queued for
1034 * a long time and slice keeps on extending and trim is not
1035 * called for a long time. Now if limits are reduced suddenly
1036 * we take into account all the IO dispatched so far at new
1037 * low rate and * newly queued IO gets a really long dispatch
1038 * time.
1039 *
1040 * So keep on trimming slice even if bio is not queued.
1041 */
1042 throtl_trim_slice(td, tg, rw);
Vivek Goyale43473b2010-09-15 17:06:35 -04001043 goto out;
1044 }
1045
1046queue_bio:
Vivek Goyal8e89d132010-09-15 17:06:37 -04001047 throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu"
1048 " iodisp=%u iops=%u queued=%d/%d",
1049 rw == READ ? 'R' : 'W',
Vivek Goyale43473b2010-09-15 17:06:35 -04001050 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
Vivek Goyal8e89d132010-09-15 17:06:37 -04001051 tg->io_disp[rw], tg->iops[rw],
Vivek Goyale43473b2010-09-15 17:06:35 -04001052 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1053
1054 throtl_add_bio_tg(q->td, tg, bio);
1055 *biop = NULL;
1056
1057 if (update_disptime) {
1058 tg_update_disptime(td, tg);
1059 throtl_schedule_next_dispatch(td);
1060 }
1061
1062out:
1063 spin_unlock_irq(q->queue_lock);
1064 return 0;
1065}
1066
1067int blk_throtl_init(struct request_queue *q)
1068{
1069 struct throtl_data *td;
1070 struct throtl_grp *tg;
1071
1072 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1073 if (!td)
1074 return -ENOMEM;
1075
1076 INIT_HLIST_HEAD(&td->tg_list);
1077 td->tg_service_tree = THROTL_RB_ROOT;
Vivek Goyalde701c72011-03-07 21:09:32 +01001078 td->limits_changed = false;
Vivek Goyala29a1712011-05-19 15:38:19 -04001079 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
Vivek Goyale43473b2010-09-15 17:06:35 -04001080
1081 /* Init root group */
1082 tg = &td->root_tg;
Vivek Goyala29a1712011-05-19 15:38:19 -04001083 throtl_init_group(tg);
Vivek Goyal02977e42010-10-01 14:49:48 +02001084
1085 /*
1086 * Set root group reference to 2. One reference will be dropped when
1087 * all groups on tg_list are being deleted during queue exit. Other
1088 * reference will remain there as we don't want to delete this group
1089 * as it is statically allocated and gets destroyed when throtl_data
1090 * goes away.
1091 */
Vivek Goyala29a1712011-05-19 15:38:19 -04001092 atomic_inc(&tg->ref);
Vivek Goyale43473b2010-09-15 17:06:35 -04001093
1094 rcu_read_lock();
1095 blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td,
1096 0, BLKIO_POLICY_THROTL);
1097 rcu_read_unlock();
Vivek Goyala29a1712011-05-19 15:38:19 -04001098 throtl_add_group_to_td_list(td, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001099
1100 /* Attach throtl data to request queue */
1101 td->queue = q;
1102 q->td = td;
1103 return 0;
1104}
1105
1106void blk_throtl_exit(struct request_queue *q)
1107{
1108 struct throtl_data *td = q->td;
1109 bool wait = false;
1110
1111 BUG_ON(!td);
1112
Vivek Goyalda527772011-03-02 19:05:33 -05001113 throtl_shutdown_wq(q);
Vivek Goyale43473b2010-09-15 17:06:35 -04001114
1115 spin_lock_irq(q->queue_lock);
1116 throtl_release_tgs(td);
Vivek Goyale43473b2010-09-15 17:06:35 -04001117
1118 /* If there are other groups */
Vivek Goyal02977e42010-10-01 14:49:48 +02001119 if (td->nr_undestroyed_grps > 0)
Vivek Goyale43473b2010-09-15 17:06:35 -04001120 wait = true;
1121
1122 spin_unlock_irq(q->queue_lock);
1123
1124 /*
1125 * Wait for tg->blkg->key accessors to exit their grace periods.
1126 * Do this wait only if there are other undestroyed groups out
1127 * there (other than root group). This can happen if cgroup deletion
1128 * path claimed the responsibility of cleaning up a group before
1129 * queue cleanup code get to the group.
1130 *
1131 * Do not call synchronize_rcu() unconditionally as there are drivers
1132 * which create/delete request queue hundreds of times during scan/boot
1133 * and synchronize_rcu() can take significant time and slow down boot.
1134 */
1135 if (wait)
1136 synchronize_rcu();
Vivek Goyalfe071432010-10-01 14:49:49 +02001137
1138 /*
1139 * Just being safe to make sure after previous flush if some body did
1140 * update limits through cgroup and another work got queued, cancel
1141 * it.
1142 */
Vivek Goyalda527772011-03-02 19:05:33 -05001143 throtl_shutdown_wq(q);
Vivek Goyale43473b2010-09-15 17:06:35 -04001144 throtl_td_free(td);
1145}
1146
1147static int __init throtl_init(void)
1148{
Vivek Goyal450adcb2011-03-01 13:40:54 -05001149 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1150 if (!kthrotld_workqueue)
1151 panic("Failed to create kthrotld\n");
1152
Vivek Goyale43473b2010-09-15 17:06:35 -04001153 blkio_policy_register(&blkio_policy_throtl);
1154 return 0;
1155}
1156
1157module_init(throtl_init);