blob: f3f495ea4eeb94c54f80e064f3b038ce75078762 [file] [log] [blame]
Vivek Goyale43473b2010-09-15 17:06:35 -04001/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
12#include "blk-cgroup.h"
Tejun Heobc9fcbf2011-10-19 14:31:18 +020013#include "blk.h"
Vivek Goyale43473b2010-09-15 17:06:35 -040014
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
Vivek Goyal450adcb2011-03-01 13:40:54 -050024/* A workqueue to queue throttle related work */
25static struct workqueue_struct *kthrotld_workqueue;
26static void throtl_schedule_delayed_work(struct throtl_data *td,
27 unsigned long delay);
28
Vivek Goyale43473b2010-09-15 17:06:35 -040029struct throtl_rb_root {
30 struct rb_root rb;
31 struct rb_node *left;
32 unsigned int count;
33 unsigned long min_disptime;
34};
35
36#define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
37 .count = 0, .min_disptime = 0}
38
39#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
40
41struct throtl_grp {
42 /* List of throtl groups on the request queue*/
43 struct hlist_node tg_node;
44
45 /* active throtl group service_tree member */
46 struct rb_node rb_node;
47
48 /*
49 * Dispatch time in jiffies. This is the estimated time when group
50 * will unthrottle and is ready to dispatch more bio. It is used as
51 * key to sort active groups in service tree.
52 */
53 unsigned long disptime;
54
55 struct blkio_group blkg;
56 atomic_t ref;
57 unsigned int flags;
58
59 /* Two lists for READ and WRITE */
60 struct bio_list bio_lists[2];
61
62 /* Number of queued bios on READ and WRITE lists */
63 unsigned int nr_queued[2];
64
65 /* bytes per second rate limits */
66 uint64_t bps[2];
67
Vivek Goyal8e89d132010-09-15 17:06:37 -040068 /* IOPS limits */
69 unsigned int iops[2];
70
Vivek Goyale43473b2010-09-15 17:06:35 -040071 /* Number of bytes disptached in current slice */
72 uint64_t bytes_disp[2];
Vivek Goyal8e89d132010-09-15 17:06:37 -040073 /* Number of bio's dispatched in current slice */
74 unsigned int io_disp[2];
Vivek Goyale43473b2010-09-15 17:06:35 -040075
76 /* When did we start a new slice */
77 unsigned long slice_start[2];
78 unsigned long slice_end[2];
Vivek Goyalfe071432010-10-01 14:49:49 +020079
80 /* Some throttle limits got updated for the group */
Andreas Schwab6f037932011-03-30 12:21:56 +020081 int limits_changed;
Vivek Goyal4843c692011-05-19 15:38:27 -040082
83 struct rcu_head rcu_head;
Vivek Goyale43473b2010-09-15 17:06:35 -040084};
85
86struct throtl_data
87{
88 /* List of throtl groups */
89 struct hlist_head tg_list;
90
91 /* service tree for active throtl groups */
92 struct throtl_rb_root tg_service_tree;
93
Vivek Goyal29b12582011-05-19 15:38:24 -040094 struct throtl_grp *root_tg;
Vivek Goyale43473b2010-09-15 17:06:35 -040095 struct request_queue *queue;
96
97 /* Total Number of queued bios on READ and WRITE lists */
98 unsigned int nr_queued[2];
99
100 /*
Vivek Goyal02977e42010-10-01 14:49:48 +0200101 * number of total undestroyed groups
Vivek Goyale43473b2010-09-15 17:06:35 -0400102 */
103 unsigned int nr_undestroyed_grps;
104
105 /* Work for dispatching throttled bios */
106 struct delayed_work throtl_work;
Vivek Goyalfe071432010-10-01 14:49:49 +0200107
Andreas Schwab6f037932011-03-30 12:21:56 +0200108 int limits_changed;
Vivek Goyale43473b2010-09-15 17:06:35 -0400109};
110
111enum tg_state_flags {
112 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
113};
114
115#define THROTL_TG_FNS(name) \
116static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
117{ \
118 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
119} \
120static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
121{ \
122 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
123} \
124static inline int throtl_tg_##name(const struct throtl_grp *tg) \
125{ \
126 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
127}
128
129THROTL_TG_FNS(on_rr);
130
131#define throtl_log_tg(td, tg, fmt, args...) \
132 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
133 blkg_path(&(tg)->blkg), ##args); \
134
135#define throtl_log(td, fmt, args...) \
136 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
137
138static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
139{
140 if (blkg)
141 return container_of(blkg, struct throtl_grp, blkg);
142
143 return NULL;
144}
145
Joe Perchesd2f31a52011-06-13 20:19:27 +0200146static inline unsigned int total_nr_queued(struct throtl_data *td)
Vivek Goyale43473b2010-09-15 17:06:35 -0400147{
Joe Perchesd2f31a52011-06-13 20:19:27 +0200148 return td->nr_queued[0] + td->nr_queued[1];
Vivek Goyale43473b2010-09-15 17:06:35 -0400149}
150
151static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
152{
153 atomic_inc(&tg->ref);
154 return tg;
155}
156
Vivek Goyal4843c692011-05-19 15:38:27 -0400157static void throtl_free_tg(struct rcu_head *head)
158{
159 struct throtl_grp *tg;
160
161 tg = container_of(head, struct throtl_grp, rcu_head);
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400162 free_percpu(tg->blkg.stats_cpu);
Vivek Goyal4843c692011-05-19 15:38:27 -0400163 kfree(tg);
164}
165
Vivek Goyale43473b2010-09-15 17:06:35 -0400166static void throtl_put_tg(struct throtl_grp *tg)
167{
168 BUG_ON(atomic_read(&tg->ref) <= 0);
169 if (!atomic_dec_and_test(&tg->ref))
170 return;
Vivek Goyal4843c692011-05-19 15:38:27 -0400171
172 /*
173 * A group is freed in rcu manner. But having an rcu lock does not
174 * mean that one can access all the fields of blkg and assume these
175 * are valid. For example, don't try to follow throtl_data and
176 * request queue links.
177 *
178 * Having a reference to blkg under an rcu allows acess to only
179 * values local to groups like group stats and group rate limits
180 */
181 call_rcu(&tg->rcu_head, throtl_free_tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400182}
183
Vivek Goyala29a1712011-05-19 15:38:19 -0400184static void throtl_init_group(struct throtl_grp *tg)
185{
186 INIT_HLIST_NODE(&tg->tg_node);
187 RB_CLEAR_NODE(&tg->rb_node);
188 bio_list_init(&tg->bio_lists[0]);
189 bio_list_init(&tg->bio_lists[1]);
190 tg->limits_changed = false;
191
192 /* Practically unlimited BW */
193 tg->bps[0] = tg->bps[1] = -1;
194 tg->iops[0] = tg->iops[1] = -1;
195
196 /*
197 * Take the initial reference that will be released on destroy
198 * This can be thought of a joint reference by cgroup and
199 * request queue which will be dropped by either request queue
200 * exit or cgroup deletion path depending on who is exiting first.
201 */
202 atomic_set(&tg->ref, 1);
203}
204
205/* Should be called with rcu read lock held (needed for blkcg) */
206static void
207throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
208{
209 hlist_add_head(&tg->tg_node, &td->tg_list);
210 td->nr_undestroyed_grps++;
211}
212
Vivek Goyal269f5412011-05-19 15:38:25 -0400213static void
214__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400215{
216 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
217 unsigned int major, minor;
218
Vivek Goyal269f5412011-05-19 15:38:25 -0400219 if (!tg || tg->blkg.dev)
220 return;
221
222 /*
223 * Fill in device details for a group which might not have been
224 * filled at group creation time as queue was being instantiated
225 * and driver had not attached a device yet
226 */
227 if (bdi->dev && dev_name(bdi->dev)) {
228 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
229 tg->blkg.dev = MKDEV(major, minor);
230 }
231}
232
Vivek Goyalaf75cd32011-05-19 15:38:31 -0400233/*
234 * Should be called with without queue lock held. Here queue lock will be
235 * taken rarely. It will be taken only once during life time of a group
236 * if need be
237 */
238static void
239throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
240{
241 if (!tg || tg->blkg.dev)
242 return;
243
244 spin_lock_irq(td->queue->queue_lock);
245 __throtl_tg_fill_dev_details(td, tg);
246 spin_unlock_irq(td->queue->queue_lock);
247}
248
Vivek Goyal269f5412011-05-19 15:38:25 -0400249static void throtl_init_add_tg_lists(struct throtl_data *td,
250 struct throtl_grp *tg, struct blkio_cgroup *blkcg)
251{
252 __throtl_tg_fill_dev_details(td, tg);
253
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400254 /* Add group onto cgroup list */
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400255 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
Vivek Goyal269f5412011-05-19 15:38:25 -0400256 tg->blkg.dev, BLKIO_POLICY_THROTL);
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400257
258 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
259 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
260 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
261 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
262
263 throtl_add_group_to_td_list(td, tg);
264}
265
266/* Should be called without queue lock and outside of rcu period */
267static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
268{
269 struct throtl_grp *tg = NULL;
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400270 int ret;
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400271
272 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
273 if (!tg)
274 return NULL;
275
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400276 ret = blkio_alloc_blkg_stats(&tg->blkg);
277
278 if (ret) {
279 kfree(tg);
280 return NULL;
281 }
282
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400283 throtl_init_group(tg);
284 return tg;
285}
286
287static struct
288throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400289{
Vivek Goyale43473b2010-09-15 17:06:35 -0400290 struct throtl_grp *tg = NULL;
291 void *key = td;
Vivek Goyale43473b2010-09-15 17:06:35 -0400292
293 /*
Vivek Goyalbe2c6b12011-01-19 08:25:02 -0700294 * This is the common case when there are no blkio cgroups.
295 * Avoid lookup in this case
296 */
297 if (blkcg == &blkio_root_cgroup)
Vivek Goyal29b12582011-05-19 15:38:24 -0400298 tg = td->root_tg;
Vivek Goyalbe2c6b12011-01-19 08:25:02 -0700299 else
300 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
Vivek Goyale43473b2010-09-15 17:06:35 -0400301
Vivek Goyal269f5412011-05-19 15:38:25 -0400302 __throtl_tg_fill_dev_details(td, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400303 return tg;
304}
305
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400306/*
307 * This function returns with queue lock unlocked in case of error, like
308 * request queue is no more
309 */
Vivek Goyale43473b2010-09-15 17:06:35 -0400310static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
311{
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400312 struct throtl_grp *tg = NULL, *__tg = NULL;
Vivek Goyal70087dc2011-05-16 15:24:08 +0200313 struct blkio_cgroup *blkcg;
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400314 struct request_queue *q = td->queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400315
316 rcu_read_lock();
Vivek Goyal70087dc2011-05-16 15:24:08 +0200317 blkcg = task_blkio_cgroup(current);
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400318 tg = throtl_find_tg(td, blkcg);
319 if (tg) {
320 rcu_read_unlock();
321 return tg;
322 }
323
324 /*
325 * Need to allocate a group. Allocation of group also needs allocation
326 * of per cpu stats which in-turn takes a mutex() and can block. Hence
327 * we need to drop rcu lock and queue_lock before we call alloc
328 *
329 * Take the request queue reference to make sure queue does not
330 * go away once we return from allocation.
331 */
332 blk_get_queue(q);
333 rcu_read_unlock();
334 spin_unlock_irq(q->queue_lock);
335
336 tg = throtl_alloc_tg(td);
337 /*
338 * We might have slept in group allocation. Make sure queue is not
339 * dead
340 */
341 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
342 blk_put_queue(q);
343 if (tg)
344 kfree(tg);
345
346 return ERR_PTR(-ENODEV);
347 }
348 blk_put_queue(q);
349
350 /* Group allocated and queue is still alive. take the lock */
351 spin_lock_irq(q->queue_lock);
352
353 /*
354 * Initialize the new group. After sleeping, read the blkcg again.
355 */
356 rcu_read_lock();
357 blkcg = task_blkio_cgroup(current);
358
359 /*
360 * If some other thread already allocated the group while we were
361 * not holding queue lock, free up the group
362 */
363 __tg = throtl_find_tg(td, blkcg);
364
365 if (__tg) {
366 kfree(tg);
367 rcu_read_unlock();
368 return __tg;
369 }
370
371 /* Group allocation failed. Account the IO to root group */
372 if (!tg) {
Vivek Goyal29b12582011-05-19 15:38:24 -0400373 tg = td->root_tg;
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400374 return tg;
375 }
376
377 throtl_init_add_tg_lists(td, tg, blkcg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400378 rcu_read_unlock();
379 return tg;
380}
381
382static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
383{
384 /* Service tree is empty */
385 if (!root->count)
386 return NULL;
387
388 if (!root->left)
389 root->left = rb_first(&root->rb);
390
391 if (root->left)
392 return rb_entry_tg(root->left);
393
394 return NULL;
395}
396
397static void rb_erase_init(struct rb_node *n, struct rb_root *root)
398{
399 rb_erase(n, root);
400 RB_CLEAR_NODE(n);
401}
402
403static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
404{
405 if (root->left == n)
406 root->left = NULL;
407 rb_erase_init(n, &root->rb);
408 --root->count;
409}
410
411static void update_min_dispatch_time(struct throtl_rb_root *st)
412{
413 struct throtl_grp *tg;
414
415 tg = throtl_rb_first(st);
416 if (!tg)
417 return;
418
419 st->min_disptime = tg->disptime;
420}
421
422static void
423tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
424{
425 struct rb_node **node = &st->rb.rb_node;
426 struct rb_node *parent = NULL;
427 struct throtl_grp *__tg;
428 unsigned long key = tg->disptime;
429 int left = 1;
430
431 while (*node != NULL) {
432 parent = *node;
433 __tg = rb_entry_tg(parent);
434
435 if (time_before(key, __tg->disptime))
436 node = &parent->rb_left;
437 else {
438 node = &parent->rb_right;
439 left = 0;
440 }
441 }
442
443 if (left)
444 st->left = &tg->rb_node;
445
446 rb_link_node(&tg->rb_node, parent, node);
447 rb_insert_color(&tg->rb_node, &st->rb);
448}
449
450static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
451{
452 struct throtl_rb_root *st = &td->tg_service_tree;
453
454 tg_service_tree_add(st, tg);
455 throtl_mark_tg_on_rr(tg);
456 st->count++;
457}
458
459static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
460{
461 if (!throtl_tg_on_rr(tg))
462 __throtl_enqueue_tg(td, tg);
463}
464
465static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
466{
467 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
468 throtl_clear_tg_on_rr(tg);
469}
470
471static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
472{
473 if (throtl_tg_on_rr(tg))
474 __throtl_dequeue_tg(td, tg);
475}
476
477static void throtl_schedule_next_dispatch(struct throtl_data *td)
478{
479 struct throtl_rb_root *st = &td->tg_service_tree;
480
481 /*
482 * If there are more bios pending, schedule more work.
483 */
484 if (!total_nr_queued(td))
485 return;
486
487 BUG_ON(!st->count);
488
489 update_min_dispatch_time(st);
490
491 if (time_before_eq(st->min_disptime, jiffies))
Vivek Goyal450adcb2011-03-01 13:40:54 -0500492 throtl_schedule_delayed_work(td, 0);
Vivek Goyale43473b2010-09-15 17:06:35 -0400493 else
Vivek Goyal450adcb2011-03-01 13:40:54 -0500494 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
Vivek Goyale43473b2010-09-15 17:06:35 -0400495}
496
497static inline void
498throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
499{
500 tg->bytes_disp[rw] = 0;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400501 tg->io_disp[rw] = 0;
Vivek Goyale43473b2010-09-15 17:06:35 -0400502 tg->slice_start[rw] = jiffies;
503 tg->slice_end[rw] = jiffies + throtl_slice;
504 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
505 rw == READ ? 'R' : 'W', tg->slice_start[rw],
506 tg->slice_end[rw], jiffies);
507}
508
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100509static inline void throtl_set_slice_end(struct throtl_data *td,
510 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
511{
512 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
513}
514
Vivek Goyale43473b2010-09-15 17:06:35 -0400515static inline void throtl_extend_slice(struct throtl_data *td,
516 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
517{
518 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
519 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
520 rw == READ ? 'R' : 'W', tg->slice_start[rw],
521 tg->slice_end[rw], jiffies);
522}
523
524/* Determine if previously allocated or extended slice is complete or not */
525static bool
526throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
527{
528 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
529 return 0;
530
531 return 1;
532}
533
534/* Trim the used slices and adjust slice start accordingly */
535static inline void
536throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
537{
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200538 unsigned long nr_slices, time_elapsed, io_trim;
539 u64 bytes_trim, tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400540
541 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
542
543 /*
544 * If bps are unlimited (-1), then time slice don't get
545 * renewed. Don't try to trim the slice if slice is used. A new
546 * slice will start when appropriate.
547 */
548 if (throtl_slice_used(td, tg, rw))
549 return;
550
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100551 /*
552 * A bio has been dispatched. Also adjust slice_end. It might happen
553 * that initially cgroup limit was very low resulting in high
554 * slice_end, but later limit was bumped up and bio was dispached
555 * sooner, then we need to reduce slice_end. A high bogus slice_end
556 * is bad because it does not allow new slice to start.
557 */
558
559 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
560
Vivek Goyale43473b2010-09-15 17:06:35 -0400561 time_elapsed = jiffies - tg->slice_start[rw];
562
563 nr_slices = time_elapsed / throtl_slice;
564
565 if (!nr_slices)
566 return;
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200567 tmp = tg->bps[rw] * throtl_slice * nr_slices;
568 do_div(tmp, HZ);
569 bytes_trim = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400570
Vivek Goyal8e89d132010-09-15 17:06:37 -0400571 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
Vivek Goyale43473b2010-09-15 17:06:35 -0400572
Vivek Goyal8e89d132010-09-15 17:06:37 -0400573 if (!bytes_trim && !io_trim)
Vivek Goyale43473b2010-09-15 17:06:35 -0400574 return;
575
576 if (tg->bytes_disp[rw] >= bytes_trim)
577 tg->bytes_disp[rw] -= bytes_trim;
578 else
579 tg->bytes_disp[rw] = 0;
580
Vivek Goyal8e89d132010-09-15 17:06:37 -0400581 if (tg->io_disp[rw] >= io_trim)
582 tg->io_disp[rw] -= io_trim;
583 else
584 tg->io_disp[rw] = 0;
585
Vivek Goyale43473b2010-09-15 17:06:35 -0400586 tg->slice_start[rw] += nr_slices * throtl_slice;
587
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200588 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
Vivek Goyale43473b2010-09-15 17:06:35 -0400589 " start=%lu end=%lu jiffies=%lu",
Vivek Goyal8e89d132010-09-15 17:06:37 -0400590 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
Vivek Goyale43473b2010-09-15 17:06:35 -0400591 tg->slice_start[rw], tg->slice_end[rw], jiffies);
592}
593
Vivek Goyal8e89d132010-09-15 17:06:37 -0400594static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
595 struct bio *bio, unsigned long *wait)
Vivek Goyale43473b2010-09-15 17:06:35 -0400596{
597 bool rw = bio_data_dir(bio);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400598 unsigned int io_allowed;
Vivek Goyale43473b2010-09-15 17:06:35 -0400599 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200600 u64 tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400601
Vivek Goyal8e89d132010-09-15 17:06:37 -0400602 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
Vivek Goyale43473b2010-09-15 17:06:35 -0400603
Vivek Goyal8e89d132010-09-15 17:06:37 -0400604 /* Slice has just started. Consider one slice interval */
605 if (!jiffy_elapsed)
606 jiffy_elapsed_rnd = throtl_slice;
607
608 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
609
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200610 /*
611 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
612 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
613 * will allow dispatch after 1 second and after that slice should
614 * have been trimmed.
615 */
616
617 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
618 do_div(tmp, HZ);
619
620 if (tmp > UINT_MAX)
621 io_allowed = UINT_MAX;
622 else
623 io_allowed = tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400624
625 if (tg->io_disp[rw] + 1 <= io_allowed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400626 if (wait)
627 *wait = 0;
628 return 1;
629 }
630
Vivek Goyal8e89d132010-09-15 17:06:37 -0400631 /* Calc approx time to dispatch */
632 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
633
634 if (jiffy_wait > jiffy_elapsed)
635 jiffy_wait = jiffy_wait - jiffy_elapsed;
636 else
637 jiffy_wait = 1;
638
639 if (wait)
640 *wait = jiffy_wait;
641 return 0;
642}
643
644static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
645 struct bio *bio, unsigned long *wait)
646{
647 bool rw = bio_data_dir(bio);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200648 u64 bytes_allowed, extra_bytes, tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400649 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyale43473b2010-09-15 17:06:35 -0400650
651 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
652
653 /* Slice has just started. Consider one slice interval */
654 if (!jiffy_elapsed)
655 jiffy_elapsed_rnd = throtl_slice;
656
657 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
658
Vivek Goyal5e901a22010-10-01 21:16:38 +0200659 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
660 do_div(tmp, HZ);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200661 bytes_allowed = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400662
663 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
664 if (wait)
665 *wait = 0;
666 return 1;
667 }
668
669 /* Calc approx time to dispatch */
670 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
671 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
672
673 if (!jiffy_wait)
674 jiffy_wait = 1;
675
676 /*
677 * This wait time is without taking into consideration the rounding
678 * up we did. Add that time also.
679 */
680 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
Vivek Goyale43473b2010-09-15 17:06:35 -0400681 if (wait)
682 *wait = jiffy_wait;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400683 return 0;
684}
Vivek Goyale43473b2010-09-15 17:06:35 -0400685
Vivek Goyalaf75cd32011-05-19 15:38:31 -0400686static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
687 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
688 return 1;
689 return 0;
690}
691
Vivek Goyal8e89d132010-09-15 17:06:37 -0400692/*
693 * Returns whether one can dispatch a bio or not. Also returns approx number
694 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
695 */
696static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
697 struct bio *bio, unsigned long *wait)
698{
699 bool rw = bio_data_dir(bio);
700 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
701
702 /*
703 * Currently whole state machine of group depends on first bio
704 * queued in the group bio list. So one should not be calling
705 * this function with a different bio if there are other bios
706 * queued.
707 */
708 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
709
710 /* If tg->bps = -1, then BW is unlimited */
711 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
712 if (wait)
713 *wait = 0;
714 return 1;
715 }
716
717 /*
718 * If previous slice expired, start a new one otherwise renew/extend
719 * existing slice to make sure it is at least throtl_slice interval
720 * long since now.
721 */
722 if (throtl_slice_used(td, tg, rw))
723 throtl_start_new_slice(td, tg, rw);
724 else {
725 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
726 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
727 }
728
729 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
730 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
731 if (wait)
732 *wait = 0;
733 return 1;
734 }
735
736 max_wait = max(bps_wait, iops_wait);
737
738 if (wait)
739 *wait = max_wait;
740
741 if (time_before(tg->slice_end[rw], jiffies + max_wait))
742 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400743
744 return 0;
745}
746
747static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
748{
749 bool rw = bio_data_dir(bio);
Shaohua Lie5a94f52011-08-01 10:31:06 +0200750 bool sync = rw_is_sync(bio->bi_rw);
Vivek Goyale43473b2010-09-15 17:06:35 -0400751
752 /* Charge the bio to the group */
753 tg->bytes_disp[rw] += bio->bi_size;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400754 tg->io_disp[rw]++;
Vivek Goyale43473b2010-09-15 17:06:35 -0400755
Vivek Goyale43473b2010-09-15 17:06:35 -0400756 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
Vivek Goyale43473b2010-09-15 17:06:35 -0400757}
758
759static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
760 struct bio *bio)
761{
762 bool rw = bio_data_dir(bio);
763
764 bio_list_add(&tg->bio_lists[rw], bio);
765 /* Take a bio reference on tg */
766 throtl_ref_get_tg(tg);
767 tg->nr_queued[rw]++;
768 td->nr_queued[rw]++;
769 throtl_enqueue_tg(td, tg);
770}
771
772static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
773{
774 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
775 struct bio *bio;
776
777 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
778 tg_may_dispatch(td, tg, bio, &read_wait);
779
780 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
781 tg_may_dispatch(td, tg, bio, &write_wait);
782
783 min_wait = min(read_wait, write_wait);
784 disptime = jiffies + min_wait;
785
Vivek Goyale43473b2010-09-15 17:06:35 -0400786 /* Update dispatch time */
787 throtl_dequeue_tg(td, tg);
788 tg->disptime = disptime;
789 throtl_enqueue_tg(td, tg);
790}
791
792static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
793 bool rw, struct bio_list *bl)
794{
795 struct bio *bio;
796
797 bio = bio_list_pop(&tg->bio_lists[rw]);
798 tg->nr_queued[rw]--;
799 /* Drop bio reference on tg */
800 throtl_put_tg(tg);
801
802 BUG_ON(td->nr_queued[rw] <= 0);
803 td->nr_queued[rw]--;
804
805 throtl_charge_bio(tg, bio);
806 bio_list_add(bl, bio);
807 bio->bi_rw |= REQ_THROTTLED;
808
809 throtl_trim_slice(td, tg, rw);
810}
811
812static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
813 struct bio_list *bl)
814{
815 unsigned int nr_reads = 0, nr_writes = 0;
816 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
Vivek Goyalc2f68052010-11-15 19:32:42 +0100817 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
Vivek Goyale43473b2010-09-15 17:06:35 -0400818 struct bio *bio;
819
820 /* Try to dispatch 75% READS and 25% WRITES */
821
822 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
823 && tg_may_dispatch(td, tg, bio, NULL)) {
824
825 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
826 nr_reads++;
827
828 if (nr_reads >= max_nr_reads)
829 break;
830 }
831
832 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
833 && tg_may_dispatch(td, tg, bio, NULL)) {
834
835 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
836 nr_writes++;
837
838 if (nr_writes >= max_nr_writes)
839 break;
840 }
841
842 return nr_reads + nr_writes;
843}
844
845static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
846{
847 unsigned int nr_disp = 0;
848 struct throtl_grp *tg;
849 struct throtl_rb_root *st = &td->tg_service_tree;
850
851 while (1) {
852 tg = throtl_rb_first(st);
853
854 if (!tg)
855 break;
856
857 if (time_before(jiffies, tg->disptime))
858 break;
859
860 throtl_dequeue_tg(td, tg);
861
862 nr_disp += throtl_dispatch_tg(td, tg, bl);
863
864 if (tg->nr_queued[0] || tg->nr_queued[1]) {
865 tg_update_disptime(td, tg);
866 throtl_enqueue_tg(td, tg);
867 }
868
869 if (nr_disp >= throtl_quantum)
870 break;
871 }
872
873 return nr_disp;
874}
875
Vivek Goyalfe071432010-10-01 14:49:49 +0200876static void throtl_process_limit_change(struct throtl_data *td)
877{
878 struct throtl_grp *tg;
879 struct hlist_node *pos, *n;
880
Vivek Goyalde701c72011-03-07 21:09:32 +0100881 if (!td->limits_changed)
Vivek Goyalfe071432010-10-01 14:49:49 +0200882 return;
883
Vivek Goyalde701c72011-03-07 21:09:32 +0100884 xchg(&td->limits_changed, false);
Vivek Goyalfe071432010-10-01 14:49:49 +0200885
Vivek Goyalde701c72011-03-07 21:09:32 +0100886 throtl_log(td, "limits changed");
Vivek Goyalfe071432010-10-01 14:49:49 +0200887
Vivek Goyal04a6b512010-12-01 19:34:52 +0100888 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
Vivek Goyalde701c72011-03-07 21:09:32 +0100889 if (!tg->limits_changed)
890 continue;
Vivek Goyalfe071432010-10-01 14:49:49 +0200891
Vivek Goyalde701c72011-03-07 21:09:32 +0100892 if (!xchg(&tg->limits_changed, false))
893 continue;
894
895 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
896 " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
897 tg->iops[READ], tg->iops[WRITE]);
898
Vivek Goyal04521db2011-03-22 21:54:29 +0100899 /*
900 * Restart the slices for both READ and WRITES. It
901 * might happen that a group's limit are dropped
902 * suddenly and we don't want to account recently
903 * dispatched IO with new low rate
904 */
905 throtl_start_new_slice(td, tg, 0);
906 throtl_start_new_slice(td, tg, 1);
907
Vivek Goyalde701c72011-03-07 21:09:32 +0100908 if (throtl_tg_on_rr(tg))
909 tg_update_disptime(td, tg);
910 }
Vivek Goyalfe071432010-10-01 14:49:49 +0200911}
912
Vivek Goyale43473b2010-09-15 17:06:35 -0400913/* Dispatch throttled bios. Should be called without queue lock held. */
914static int throtl_dispatch(struct request_queue *q)
915{
916 struct throtl_data *td = q->td;
917 unsigned int nr_disp = 0;
918 struct bio_list bio_list_on_stack;
919 struct bio *bio;
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100920 struct blk_plug plug;
Vivek Goyale43473b2010-09-15 17:06:35 -0400921
922 spin_lock_irq(q->queue_lock);
923
Vivek Goyalfe071432010-10-01 14:49:49 +0200924 throtl_process_limit_change(td);
925
Vivek Goyale43473b2010-09-15 17:06:35 -0400926 if (!total_nr_queued(td))
927 goto out;
928
929 bio_list_init(&bio_list_on_stack);
930
Joe Perchesd2f31a52011-06-13 20:19:27 +0200931 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
Vivek Goyale43473b2010-09-15 17:06:35 -0400932 total_nr_queued(td), td->nr_queued[READ],
933 td->nr_queued[WRITE]);
934
935 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
936
937 if (nr_disp)
938 throtl_log(td, "bios disp=%u", nr_disp);
939
940 throtl_schedule_next_dispatch(td);
941out:
942 spin_unlock_irq(q->queue_lock);
943
944 /*
945 * If we dispatched some requests, unplug the queue to make sure
946 * immediate dispatch
947 */
948 if (nr_disp) {
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100949 blk_start_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -0400950 while((bio = bio_list_pop(&bio_list_on_stack)))
951 generic_make_request(bio);
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100952 blk_finish_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -0400953 }
954 return nr_disp;
955}
956
957void blk_throtl_work(struct work_struct *work)
958{
959 struct throtl_data *td = container_of(work, struct throtl_data,
960 throtl_work.work);
961 struct request_queue *q = td->queue;
962
963 throtl_dispatch(q);
964}
965
966/* Call with queue lock held */
Vivek Goyal450adcb2011-03-01 13:40:54 -0500967static void
968throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
Vivek Goyale43473b2010-09-15 17:06:35 -0400969{
970
Vivek Goyale43473b2010-09-15 17:06:35 -0400971 struct delayed_work *dwork = &td->throtl_work;
972
Vivek Goyal04521db2011-03-22 21:54:29 +0100973 /* schedule work if limits changed even if no bio is queued */
Joe Perchesd2f31a52011-06-13 20:19:27 +0200974 if (total_nr_queued(td) || td->limits_changed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400975 /*
976 * We might have a work scheduled to be executed in future.
977 * Cancel that and schedule a new one.
978 */
979 __cancel_delayed_work(dwork);
Vivek Goyal450adcb2011-03-01 13:40:54 -0500980 queue_delayed_work(kthrotld_workqueue, dwork, delay);
Vivek Goyale43473b2010-09-15 17:06:35 -0400981 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
982 delay, jiffies);
983 }
984}
Vivek Goyale43473b2010-09-15 17:06:35 -0400985
986static void
987throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
988{
989 /* Something wrong if we are trying to remove same group twice */
990 BUG_ON(hlist_unhashed(&tg->tg_node));
991
992 hlist_del_init(&tg->tg_node);
993
994 /*
995 * Put the reference taken at the time of creation so that when all
996 * queues are gone, group can be destroyed.
997 */
998 throtl_put_tg(tg);
999 td->nr_undestroyed_grps--;
1000}
1001
1002static void throtl_release_tgs(struct throtl_data *td)
1003{
1004 struct hlist_node *pos, *n;
1005 struct throtl_grp *tg;
1006
1007 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
1008 /*
1009 * If cgroup removal path got to blk_group first and removed
1010 * it from cgroup list, then it will take care of destroying
1011 * cfqg also.
1012 */
1013 if (!blkiocg_del_blkio_group(&tg->blkg))
1014 throtl_destroy_tg(td, tg);
1015 }
1016}
1017
1018static void throtl_td_free(struct throtl_data *td)
1019{
1020 kfree(td);
1021}
1022
1023/*
1024 * Blk cgroup controller notification saying that blkio_group object is being
1025 * delinked as associated cgroup object is going away. That also means that
1026 * no new IO will come in this group. So get rid of this group as soon as
1027 * any pending IO in the group is finished.
1028 *
1029 * This function is called under rcu_read_lock(). key is the rcu protected
1030 * pointer. That means "key" is a valid throtl_data pointer as long as we are
1031 * rcu read lock.
1032 *
1033 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1034 * it should not be NULL as even if queue was going away, cgroup deltion
1035 * path got to it first.
1036 */
1037void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
1038{
1039 unsigned long flags;
1040 struct throtl_data *td = key;
1041
1042 spin_lock_irqsave(td->queue->queue_lock, flags);
1043 throtl_destroy_tg(td, tg_of_blkg(blkg));
1044 spin_unlock_irqrestore(td->queue->queue_lock, flags);
1045}
1046
Vivek Goyalde701c72011-03-07 21:09:32 +01001047static void throtl_update_blkio_group_common(struct throtl_data *td,
1048 struct throtl_grp *tg)
1049{
1050 xchg(&tg->limits_changed, true);
1051 xchg(&td->limits_changed, true);
1052 /* Schedule a work now to process the limit change */
1053 throtl_schedule_delayed_work(td, 0);
1054}
1055
Vivek Goyalfe071432010-10-01 14:49:49 +02001056/*
1057 * For all update functions, key should be a valid pointer because these
1058 * update functions are called under blkcg_lock, that means, blkg is
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001059 * valid and in turn key is valid. queue exit path can not race because
Vivek Goyalfe071432010-10-01 14:49:49 +02001060 * of blkcg_lock
1061 *
1062 * Can not take queue lock in update functions as queue lock under blkcg_lock
1063 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
1064 */
1065static void throtl_update_blkio_group_read_bps(void *key,
1066 struct blkio_group *blkg, u64 read_bps)
Vivek Goyale43473b2010-09-15 17:06:35 -04001067{
Vivek Goyalfe071432010-10-01 14:49:49 +02001068 struct throtl_data *td = key;
Vivek Goyalde701c72011-03-07 21:09:32 +01001069 struct throtl_grp *tg = tg_of_blkg(blkg);
Vivek Goyalfe071432010-10-01 14:49:49 +02001070
Vivek Goyalde701c72011-03-07 21:09:32 +01001071 tg->bps[READ] = read_bps;
1072 throtl_update_blkio_group_common(td, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001073}
1074
Vivek Goyalfe071432010-10-01 14:49:49 +02001075static void throtl_update_blkio_group_write_bps(void *key,
1076 struct blkio_group *blkg, u64 write_bps)
Vivek Goyale43473b2010-09-15 17:06:35 -04001077{
Vivek Goyalfe071432010-10-01 14:49:49 +02001078 struct throtl_data *td = key;
Vivek Goyalde701c72011-03-07 21:09:32 +01001079 struct throtl_grp *tg = tg_of_blkg(blkg);
Vivek Goyalfe071432010-10-01 14:49:49 +02001080
Vivek Goyalde701c72011-03-07 21:09:32 +01001081 tg->bps[WRITE] = write_bps;
1082 throtl_update_blkio_group_common(td, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001083}
1084
Vivek Goyalfe071432010-10-01 14:49:49 +02001085static void throtl_update_blkio_group_read_iops(void *key,
1086 struct blkio_group *blkg, unsigned int read_iops)
Vivek Goyal8e89d132010-09-15 17:06:37 -04001087{
Vivek Goyalfe071432010-10-01 14:49:49 +02001088 struct throtl_data *td = key;
Vivek Goyalde701c72011-03-07 21:09:32 +01001089 struct throtl_grp *tg = tg_of_blkg(blkg);
Vivek Goyalfe071432010-10-01 14:49:49 +02001090
Vivek Goyalde701c72011-03-07 21:09:32 +01001091 tg->iops[READ] = read_iops;
1092 throtl_update_blkio_group_common(td, tg);
Vivek Goyal8e89d132010-09-15 17:06:37 -04001093}
1094
Vivek Goyalfe071432010-10-01 14:49:49 +02001095static void throtl_update_blkio_group_write_iops(void *key,
1096 struct blkio_group *blkg, unsigned int write_iops)
Vivek Goyal8e89d132010-09-15 17:06:37 -04001097{
Vivek Goyalfe071432010-10-01 14:49:49 +02001098 struct throtl_data *td = key;
Vivek Goyalde701c72011-03-07 21:09:32 +01001099 struct throtl_grp *tg = tg_of_blkg(blkg);
Vivek Goyalfe071432010-10-01 14:49:49 +02001100
Vivek Goyalde701c72011-03-07 21:09:32 +01001101 tg->iops[WRITE] = write_iops;
1102 throtl_update_blkio_group_common(td, tg);
Vivek Goyal8e89d132010-09-15 17:06:37 -04001103}
1104
Vivek Goyalda527772011-03-02 19:05:33 -05001105static void throtl_shutdown_wq(struct request_queue *q)
Vivek Goyale43473b2010-09-15 17:06:35 -04001106{
1107 struct throtl_data *td = q->td;
1108
1109 cancel_delayed_work_sync(&td->throtl_work);
1110}
1111
1112static struct blkio_policy_type blkio_policy_throtl = {
1113 .ops = {
1114 .blkio_unlink_group_fn = throtl_unlink_blkio_group,
1115 .blkio_update_group_read_bps_fn =
1116 throtl_update_blkio_group_read_bps,
1117 .blkio_update_group_write_bps_fn =
1118 throtl_update_blkio_group_write_bps,
Vivek Goyal8e89d132010-09-15 17:06:37 -04001119 .blkio_update_group_read_iops_fn =
1120 throtl_update_blkio_group_read_iops,
1121 .blkio_update_group_write_iops_fn =
1122 throtl_update_blkio_group_write_iops,
Vivek Goyale43473b2010-09-15 17:06:35 -04001123 },
Vivek Goyal8e89d132010-09-15 17:06:37 -04001124 .plid = BLKIO_POLICY_THROTL,
Vivek Goyale43473b2010-09-15 17:06:35 -04001125};
1126
1127int blk_throtl_bio(struct request_queue *q, struct bio **biop)
1128{
1129 struct throtl_data *td = q->td;
1130 struct throtl_grp *tg;
1131 struct bio *bio = *biop;
1132 bool rw = bio_data_dir(bio), update_disptime = true;
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001133 struct blkio_cgroup *blkcg;
Vivek Goyale43473b2010-09-15 17:06:35 -04001134
1135 if (bio->bi_rw & REQ_THROTTLED) {
1136 bio->bi_rw &= ~REQ_THROTTLED;
1137 return 0;
1138 }
1139
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001140 /*
1141 * A throtl_grp pointer retrieved under rcu can be used to access
1142 * basic fields like stats and io rates. If a group has no rules,
1143 * just update the dispatch stats in lockless manner and return.
1144 */
1145
1146 rcu_read_lock();
1147 blkcg = task_blkio_cgroup(current);
1148 tg = throtl_find_tg(td, blkcg);
1149 if (tg) {
1150 throtl_tg_fill_dev_details(td, tg);
1151
1152 if (tg_no_rule_group(tg, rw)) {
1153 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
Shaohua Lie5a94f52011-08-01 10:31:06 +02001154 rw, rw_is_sync(bio->bi_rw));
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001155 rcu_read_unlock();
1156 return 0;
1157 }
1158 }
1159 rcu_read_unlock();
1160
1161 /*
1162 * Either group has not been allocated yet or it is not an unlimited
1163 * IO group
1164 */
1165
Vivek Goyale43473b2010-09-15 17:06:35 -04001166 spin_lock_irq(q->queue_lock);
1167 tg = throtl_get_tg(td);
1168
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001169 if (IS_ERR(tg)) {
1170 if (PTR_ERR(tg) == -ENODEV) {
1171 /*
1172 * Queue is gone. No queue lock held here.
1173 */
1174 return -ENODEV;
1175 }
1176 }
1177
Vivek Goyale43473b2010-09-15 17:06:35 -04001178 if (tg->nr_queued[rw]) {
1179 /*
1180 * There is already another bio queued in same dir. No
1181 * need to update dispatch time.
1182 */
Vivek Goyal231d7042011-03-07 21:05:14 +01001183 update_disptime = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001184 goto queue_bio;
Vivek Goyalde701c72011-03-07 21:09:32 +01001185
Vivek Goyale43473b2010-09-15 17:06:35 -04001186 }
1187
1188 /* Bio is with-in rate limit of group */
1189 if (tg_may_dispatch(td, tg, bio, NULL)) {
1190 throtl_charge_bio(tg, bio);
Vivek Goyal04521db2011-03-22 21:54:29 +01001191
1192 /*
1193 * We need to trim slice even when bios are not being queued
1194 * otherwise it might happen that a bio is not queued for
1195 * a long time and slice keeps on extending and trim is not
1196 * called for a long time. Now if limits are reduced suddenly
1197 * we take into account all the IO dispatched so far at new
1198 * low rate and * newly queued IO gets a really long dispatch
1199 * time.
1200 *
1201 * So keep on trimming slice even if bio is not queued.
1202 */
1203 throtl_trim_slice(td, tg, rw);
Vivek Goyale43473b2010-09-15 17:06:35 -04001204 goto out;
1205 }
1206
1207queue_bio:
Joe Perchesfd16d262011-06-13 10:42:49 +02001208 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
Vivek Goyal8e89d132010-09-15 17:06:37 -04001209 " iodisp=%u iops=%u queued=%d/%d",
1210 rw == READ ? 'R' : 'W',
Vivek Goyale43473b2010-09-15 17:06:35 -04001211 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
Vivek Goyal8e89d132010-09-15 17:06:37 -04001212 tg->io_disp[rw], tg->iops[rw],
Vivek Goyale43473b2010-09-15 17:06:35 -04001213 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1214
1215 throtl_add_bio_tg(q->td, tg, bio);
1216 *biop = NULL;
1217
1218 if (update_disptime) {
1219 tg_update_disptime(td, tg);
1220 throtl_schedule_next_dispatch(td);
1221 }
1222
1223out:
1224 spin_unlock_irq(q->queue_lock);
1225 return 0;
1226}
1227
1228int blk_throtl_init(struct request_queue *q)
1229{
1230 struct throtl_data *td;
1231 struct throtl_grp *tg;
1232
1233 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1234 if (!td)
1235 return -ENOMEM;
1236
1237 INIT_HLIST_HEAD(&td->tg_list);
1238 td->tg_service_tree = THROTL_RB_ROOT;
Vivek Goyalde701c72011-03-07 21:09:32 +01001239 td->limits_changed = false;
Vivek Goyala29a1712011-05-19 15:38:19 -04001240 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
Vivek Goyale43473b2010-09-15 17:06:35 -04001241
Vivek Goyal29b12582011-05-19 15:38:24 -04001242 /* alloc and Init root group. */
1243 td->queue = q;
1244 tg = throtl_alloc_tg(td);
Vivek Goyal02977e42010-10-01 14:49:48 +02001245
Vivek Goyal29b12582011-05-19 15:38:24 -04001246 if (!tg) {
1247 kfree(td);
1248 return -ENOMEM;
1249 }
1250
1251 td->root_tg = tg;
Vivek Goyale43473b2010-09-15 17:06:35 -04001252
1253 rcu_read_lock();
Vivek Goyal5617cbe2011-05-19 15:38:26 -04001254 throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
Vivek Goyale43473b2010-09-15 17:06:35 -04001255 rcu_read_unlock();
1256
1257 /* Attach throtl data to request queue */
Vivek Goyale43473b2010-09-15 17:06:35 -04001258 q->td = td;
1259 return 0;
1260}
1261
1262void blk_throtl_exit(struct request_queue *q)
1263{
1264 struct throtl_data *td = q->td;
1265 bool wait = false;
1266
1267 BUG_ON(!td);
1268
Vivek Goyalda527772011-03-02 19:05:33 -05001269 throtl_shutdown_wq(q);
Vivek Goyale43473b2010-09-15 17:06:35 -04001270
1271 spin_lock_irq(q->queue_lock);
1272 throtl_release_tgs(td);
Vivek Goyale43473b2010-09-15 17:06:35 -04001273
1274 /* If there are other groups */
Vivek Goyal02977e42010-10-01 14:49:48 +02001275 if (td->nr_undestroyed_grps > 0)
Vivek Goyale43473b2010-09-15 17:06:35 -04001276 wait = true;
1277
1278 spin_unlock_irq(q->queue_lock);
1279
1280 /*
1281 * Wait for tg->blkg->key accessors to exit their grace periods.
1282 * Do this wait only if there are other undestroyed groups out
1283 * there (other than root group). This can happen if cgroup deletion
1284 * path claimed the responsibility of cleaning up a group before
1285 * queue cleanup code get to the group.
1286 *
1287 * Do not call synchronize_rcu() unconditionally as there are drivers
1288 * which create/delete request queue hundreds of times during scan/boot
1289 * and synchronize_rcu() can take significant time and slow down boot.
1290 */
1291 if (wait)
1292 synchronize_rcu();
Vivek Goyalfe071432010-10-01 14:49:49 +02001293
1294 /*
1295 * Just being safe to make sure after previous flush if some body did
1296 * update limits through cgroup and another work got queued, cancel
1297 * it.
1298 */
Vivek Goyalda527772011-03-02 19:05:33 -05001299 throtl_shutdown_wq(q);
Vivek Goyale43473b2010-09-15 17:06:35 -04001300 throtl_td_free(td);
1301}
1302
1303static int __init throtl_init(void)
1304{
Vivek Goyal450adcb2011-03-01 13:40:54 -05001305 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1306 if (!kthrotld_workqueue)
1307 panic("Failed to create kthrotld\n");
1308
Vivek Goyale43473b2010-09-15 17:06:35 -04001309 blkio_policy_register(&blkio_policy_throtl);
1310 return 0;
1311}
1312
1313module_init(throtl_init);