blob: 2ae637b9e80c39f2cbfaf05bf641d5182f2051dc [file] [log] [blame]
Vivek Goyale43473b2010-09-15 17:06:35 -04001/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
12#include "blk-cgroup.h"
Tejun Heobc9fcbf2011-10-19 14:31:18 +020013#include "blk.h"
Vivek Goyale43473b2010-09-15 17:06:35 -040014
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
Vivek Goyal450adcb2011-03-01 13:40:54 -050024/* A workqueue to queue throttle related work */
25static struct workqueue_struct *kthrotld_workqueue;
26static void throtl_schedule_delayed_work(struct throtl_data *td,
27 unsigned long delay);
28
Vivek Goyale43473b2010-09-15 17:06:35 -040029struct throtl_rb_root {
30 struct rb_root rb;
31 struct rb_node *left;
32 unsigned int count;
33 unsigned long min_disptime;
34};
35
36#define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
37 .count = 0, .min_disptime = 0}
38
39#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
40
41struct throtl_grp {
42 /* List of throtl groups on the request queue*/
43 struct hlist_node tg_node;
44
45 /* active throtl group service_tree member */
46 struct rb_node rb_node;
47
48 /*
49 * Dispatch time in jiffies. This is the estimated time when group
50 * will unthrottle and is ready to dispatch more bio. It is used as
51 * key to sort active groups in service tree.
52 */
53 unsigned long disptime;
54
55 struct blkio_group blkg;
56 atomic_t ref;
57 unsigned int flags;
58
59 /* Two lists for READ and WRITE */
60 struct bio_list bio_lists[2];
61
62 /* Number of queued bios on READ and WRITE lists */
63 unsigned int nr_queued[2];
64
65 /* bytes per second rate limits */
66 uint64_t bps[2];
67
Vivek Goyal8e89d132010-09-15 17:06:37 -040068 /* IOPS limits */
69 unsigned int iops[2];
70
Vivek Goyale43473b2010-09-15 17:06:35 -040071 /* Number of bytes disptached in current slice */
72 uint64_t bytes_disp[2];
Vivek Goyal8e89d132010-09-15 17:06:37 -040073 /* Number of bio's dispatched in current slice */
74 unsigned int io_disp[2];
Vivek Goyale43473b2010-09-15 17:06:35 -040075
76 /* When did we start a new slice */
77 unsigned long slice_start[2];
78 unsigned long slice_end[2];
Vivek Goyalfe071432010-10-01 14:49:49 +020079
80 /* Some throttle limits got updated for the group */
Andreas Schwab6f037932011-03-30 12:21:56 +020081 int limits_changed;
Vivek Goyal4843c692011-05-19 15:38:27 -040082
83 struct rcu_head rcu_head;
Vivek Goyale43473b2010-09-15 17:06:35 -040084};
85
86struct throtl_data
87{
88 /* List of throtl groups */
89 struct hlist_head tg_list;
90
91 /* service tree for active throtl groups */
92 struct throtl_rb_root tg_service_tree;
93
Vivek Goyal29b12582011-05-19 15:38:24 -040094 struct throtl_grp *root_tg;
Vivek Goyale43473b2010-09-15 17:06:35 -040095 struct request_queue *queue;
96
97 /* Total Number of queued bios on READ and WRITE lists */
98 unsigned int nr_queued[2];
99
100 /*
Vivek Goyal02977e42010-10-01 14:49:48 +0200101 * number of total undestroyed groups
Vivek Goyale43473b2010-09-15 17:06:35 -0400102 */
103 unsigned int nr_undestroyed_grps;
104
105 /* Work for dispatching throttled bios */
106 struct delayed_work throtl_work;
Vivek Goyalfe071432010-10-01 14:49:49 +0200107
Andreas Schwab6f037932011-03-30 12:21:56 +0200108 int limits_changed;
Vivek Goyale43473b2010-09-15 17:06:35 -0400109};
110
111enum tg_state_flags {
112 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
113};
114
115#define THROTL_TG_FNS(name) \
116static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
117{ \
118 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
119} \
120static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
121{ \
122 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
123} \
124static inline int throtl_tg_##name(const struct throtl_grp *tg) \
125{ \
126 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
127}
128
129THROTL_TG_FNS(on_rr);
130
131#define throtl_log_tg(td, tg, fmt, args...) \
132 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
133 blkg_path(&(tg)->blkg), ##args); \
134
135#define throtl_log(td, fmt, args...) \
136 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
137
138static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
139{
140 if (blkg)
141 return container_of(blkg, struct throtl_grp, blkg);
142
143 return NULL;
144}
145
Joe Perchesd2f31a52011-06-13 20:19:27 +0200146static inline unsigned int total_nr_queued(struct throtl_data *td)
Vivek Goyale43473b2010-09-15 17:06:35 -0400147{
Joe Perchesd2f31a52011-06-13 20:19:27 +0200148 return td->nr_queued[0] + td->nr_queued[1];
Vivek Goyale43473b2010-09-15 17:06:35 -0400149}
150
151static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
152{
153 atomic_inc(&tg->ref);
154 return tg;
155}
156
Vivek Goyal4843c692011-05-19 15:38:27 -0400157static void throtl_free_tg(struct rcu_head *head)
158{
159 struct throtl_grp *tg;
160
161 tg = container_of(head, struct throtl_grp, rcu_head);
Vivek Goyal5624a4e2011-05-19 15:38:28 -0400162 free_percpu(tg->blkg.stats_cpu);
Vivek Goyal4843c692011-05-19 15:38:27 -0400163 kfree(tg);
164}
165
Vivek Goyale43473b2010-09-15 17:06:35 -0400166static void throtl_put_tg(struct throtl_grp *tg)
167{
168 BUG_ON(atomic_read(&tg->ref) <= 0);
169 if (!atomic_dec_and_test(&tg->ref))
170 return;
Vivek Goyal4843c692011-05-19 15:38:27 -0400171
172 /*
173 * A group is freed in rcu manner. But having an rcu lock does not
174 * mean that one can access all the fields of blkg and assume these
175 * are valid. For example, don't try to follow throtl_data and
176 * request queue links.
177 *
178 * Having a reference to blkg under an rcu allows acess to only
179 * values local to groups like group stats and group rate limits
180 */
181 call_rcu(&tg->rcu_head, throtl_free_tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400182}
183
Tejun Heocd1604f2012-03-05 13:15:06 -0800184static struct blkio_group *throtl_alloc_blkio_group(struct request_queue *q,
185 struct blkio_cgroup *blkcg)
Vivek Goyala29a1712011-05-19 15:38:19 -0400186{
Tejun Heocd1604f2012-03-05 13:15:06 -0800187 struct throtl_grp *tg;
188
189 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, q->node);
190 if (!tg)
191 return NULL;
192
Vivek Goyala29a1712011-05-19 15:38:19 -0400193 INIT_HLIST_NODE(&tg->tg_node);
194 RB_CLEAR_NODE(&tg->rb_node);
195 bio_list_init(&tg->bio_lists[0]);
196 bio_list_init(&tg->bio_lists[1]);
197 tg->limits_changed = false;
198
Tejun Heocd1604f2012-03-05 13:15:06 -0800199 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
200 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
201 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
202 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
Vivek Goyala29a1712011-05-19 15:38:19 -0400203
204 /*
205 * Take the initial reference that will be released on destroy
206 * This can be thought of a joint reference by cgroup and
207 * request queue which will be dropped by either request queue
208 * exit or cgroup deletion path depending on who is exiting first.
209 */
210 atomic_set(&tg->ref, 1);
Vivek Goyala29a1712011-05-19 15:38:19 -0400211
Tejun Heocd1604f2012-03-05 13:15:06 -0800212 return &tg->blkg;
Vivek Goyala29a1712011-05-19 15:38:19 -0400213}
214
Vivek Goyal269f5412011-05-19 15:38:25 -0400215static void
216__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400217{
218 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
219 unsigned int major, minor;
220
Vivek Goyal269f5412011-05-19 15:38:25 -0400221 if (!tg || tg->blkg.dev)
222 return;
223
224 /*
225 * Fill in device details for a group which might not have been
226 * filled at group creation time as queue was being instantiated
227 * and driver had not attached a device yet
228 */
229 if (bdi->dev && dev_name(bdi->dev)) {
230 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
231 tg->blkg.dev = MKDEV(major, minor);
232 }
233}
234
Vivek Goyalaf75cd32011-05-19 15:38:31 -0400235/*
236 * Should be called with without queue lock held. Here queue lock will be
237 * taken rarely. It will be taken only once during life time of a group
238 * if need be
239 */
240static void
241throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
242{
243 if (!tg || tg->blkg.dev)
244 return;
245
246 spin_lock_irq(td->queue->queue_lock);
247 __throtl_tg_fill_dev_details(td, tg);
248 spin_unlock_irq(td->queue->queue_lock);
249}
250
Tejun Heocd1604f2012-03-05 13:15:06 -0800251static void throtl_link_blkio_group(struct request_queue *q,
252 struct blkio_group *blkg)
Vivek Goyal269f5412011-05-19 15:38:25 -0400253{
Tejun Heocd1604f2012-03-05 13:15:06 -0800254 struct throtl_data *td = q->td;
255 struct throtl_grp *tg = tg_of_blkg(blkg);
256
Vivek Goyal269f5412011-05-19 15:38:25 -0400257 __throtl_tg_fill_dev_details(td, tg);
258
Tejun Heocd1604f2012-03-05 13:15:06 -0800259 hlist_add_head(&tg->tg_node, &td->tg_list);
260 td->nr_undestroyed_grps++;
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400261}
262
263static struct
Tejun Heocd1604f2012-03-05 13:15:06 -0800264throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400265{
Vivek Goyale43473b2010-09-15 17:06:35 -0400266 struct throtl_grp *tg = NULL;
Vivek Goyale43473b2010-09-15 17:06:35 -0400267
268 /*
Vivek Goyalbe2c6b12011-01-19 08:25:02 -0700269 * This is the common case when there are no blkio cgroups.
Tejun Heocd1604f2012-03-05 13:15:06 -0800270 * Avoid lookup in this case
271 */
Vivek Goyalbe2c6b12011-01-19 08:25:02 -0700272 if (blkcg == &blkio_root_cgroup)
Vivek Goyal29b12582011-05-19 15:38:24 -0400273 tg = td->root_tg;
Vivek Goyalbe2c6b12011-01-19 08:25:02 -0700274 else
Tejun Heocd1604f2012-03-05 13:15:06 -0800275 tg = tg_of_blkg(blkg_lookup(blkcg, td->queue,
276 BLKIO_POLICY_THROTL));
Vivek Goyale43473b2010-09-15 17:06:35 -0400277
Vivek Goyal269f5412011-05-19 15:38:25 -0400278 __throtl_tg_fill_dev_details(td, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400279 return tg;
280}
281
Tejun Heocd1604f2012-03-05 13:15:06 -0800282static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
283 struct blkio_cgroup *blkcg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400284{
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400285 struct request_queue *q = td->queue;
Tejun Heocd1604f2012-03-05 13:15:06 -0800286 struct throtl_grp *tg = NULL;
Tejun Heo0a5a7d02012-03-05 13:15:02 -0800287
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400288 /*
Tejun Heocd1604f2012-03-05 13:15:06 -0800289 * This is the common case when there are no blkio cgroups.
290 * Avoid lookup in this case
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400291 */
Tejun Heocd1604f2012-03-05 13:15:06 -0800292 if (blkcg == &blkio_root_cgroup) {
Vivek Goyal29b12582011-05-19 15:38:24 -0400293 tg = td->root_tg;
Tejun Heocd1604f2012-03-05 13:15:06 -0800294 } else {
295 struct blkio_group *blkg;
296
297 blkg = blkg_lookup_create(blkcg, q, BLKIO_POLICY_THROTL, false);
298
299 /* if %NULL and @q is alive, fall back to root_tg */
300 if (!IS_ERR(blkg))
301 tg = tg_of_blkg(blkg);
302 else if (!blk_queue_dead(q))
303 tg = td->root_tg;
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400304 }
305
Tejun Heocd1604f2012-03-05 13:15:06 -0800306 __throtl_tg_fill_dev_details(td, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400307 return tg;
308}
309
310static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
311{
312 /* Service tree is empty */
313 if (!root->count)
314 return NULL;
315
316 if (!root->left)
317 root->left = rb_first(&root->rb);
318
319 if (root->left)
320 return rb_entry_tg(root->left);
321
322 return NULL;
323}
324
325static void rb_erase_init(struct rb_node *n, struct rb_root *root)
326{
327 rb_erase(n, root);
328 RB_CLEAR_NODE(n);
329}
330
331static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
332{
333 if (root->left == n)
334 root->left = NULL;
335 rb_erase_init(n, &root->rb);
336 --root->count;
337}
338
339static void update_min_dispatch_time(struct throtl_rb_root *st)
340{
341 struct throtl_grp *tg;
342
343 tg = throtl_rb_first(st);
344 if (!tg)
345 return;
346
347 st->min_disptime = tg->disptime;
348}
349
350static void
351tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
352{
353 struct rb_node **node = &st->rb.rb_node;
354 struct rb_node *parent = NULL;
355 struct throtl_grp *__tg;
356 unsigned long key = tg->disptime;
357 int left = 1;
358
359 while (*node != NULL) {
360 parent = *node;
361 __tg = rb_entry_tg(parent);
362
363 if (time_before(key, __tg->disptime))
364 node = &parent->rb_left;
365 else {
366 node = &parent->rb_right;
367 left = 0;
368 }
369 }
370
371 if (left)
372 st->left = &tg->rb_node;
373
374 rb_link_node(&tg->rb_node, parent, node);
375 rb_insert_color(&tg->rb_node, &st->rb);
376}
377
378static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
379{
380 struct throtl_rb_root *st = &td->tg_service_tree;
381
382 tg_service_tree_add(st, tg);
383 throtl_mark_tg_on_rr(tg);
384 st->count++;
385}
386
387static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
388{
389 if (!throtl_tg_on_rr(tg))
390 __throtl_enqueue_tg(td, tg);
391}
392
393static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
394{
395 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
396 throtl_clear_tg_on_rr(tg);
397}
398
399static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
400{
401 if (throtl_tg_on_rr(tg))
402 __throtl_dequeue_tg(td, tg);
403}
404
405static void throtl_schedule_next_dispatch(struct throtl_data *td)
406{
407 struct throtl_rb_root *st = &td->tg_service_tree;
408
409 /*
410 * If there are more bios pending, schedule more work.
411 */
412 if (!total_nr_queued(td))
413 return;
414
415 BUG_ON(!st->count);
416
417 update_min_dispatch_time(st);
418
419 if (time_before_eq(st->min_disptime, jiffies))
Vivek Goyal450adcb2011-03-01 13:40:54 -0500420 throtl_schedule_delayed_work(td, 0);
Vivek Goyale43473b2010-09-15 17:06:35 -0400421 else
Vivek Goyal450adcb2011-03-01 13:40:54 -0500422 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
Vivek Goyale43473b2010-09-15 17:06:35 -0400423}
424
425static inline void
426throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
427{
428 tg->bytes_disp[rw] = 0;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400429 tg->io_disp[rw] = 0;
Vivek Goyale43473b2010-09-15 17:06:35 -0400430 tg->slice_start[rw] = jiffies;
431 tg->slice_end[rw] = jiffies + throtl_slice;
432 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
433 rw == READ ? 'R' : 'W', tg->slice_start[rw],
434 tg->slice_end[rw], jiffies);
435}
436
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100437static inline void throtl_set_slice_end(struct throtl_data *td,
438 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
439{
440 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
441}
442
Vivek Goyale43473b2010-09-15 17:06:35 -0400443static inline void throtl_extend_slice(struct throtl_data *td,
444 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
445{
446 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
447 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
448 rw == READ ? 'R' : 'W', tg->slice_start[rw],
449 tg->slice_end[rw], jiffies);
450}
451
452/* Determine if previously allocated or extended slice is complete or not */
453static bool
454throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
455{
456 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
457 return 0;
458
459 return 1;
460}
461
462/* Trim the used slices and adjust slice start accordingly */
463static inline void
464throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
465{
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200466 unsigned long nr_slices, time_elapsed, io_trim;
467 u64 bytes_trim, tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400468
469 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
470
471 /*
472 * If bps are unlimited (-1), then time slice don't get
473 * renewed. Don't try to trim the slice if slice is used. A new
474 * slice will start when appropriate.
475 */
476 if (throtl_slice_used(td, tg, rw))
477 return;
478
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100479 /*
480 * A bio has been dispatched. Also adjust slice_end. It might happen
481 * that initially cgroup limit was very low resulting in high
482 * slice_end, but later limit was bumped up and bio was dispached
483 * sooner, then we need to reduce slice_end. A high bogus slice_end
484 * is bad because it does not allow new slice to start.
485 */
486
487 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
488
Vivek Goyale43473b2010-09-15 17:06:35 -0400489 time_elapsed = jiffies - tg->slice_start[rw];
490
491 nr_slices = time_elapsed / throtl_slice;
492
493 if (!nr_slices)
494 return;
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200495 tmp = tg->bps[rw] * throtl_slice * nr_slices;
496 do_div(tmp, HZ);
497 bytes_trim = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400498
Vivek Goyal8e89d132010-09-15 17:06:37 -0400499 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
Vivek Goyale43473b2010-09-15 17:06:35 -0400500
Vivek Goyal8e89d132010-09-15 17:06:37 -0400501 if (!bytes_trim && !io_trim)
Vivek Goyale43473b2010-09-15 17:06:35 -0400502 return;
503
504 if (tg->bytes_disp[rw] >= bytes_trim)
505 tg->bytes_disp[rw] -= bytes_trim;
506 else
507 tg->bytes_disp[rw] = 0;
508
Vivek Goyal8e89d132010-09-15 17:06:37 -0400509 if (tg->io_disp[rw] >= io_trim)
510 tg->io_disp[rw] -= io_trim;
511 else
512 tg->io_disp[rw] = 0;
513
Vivek Goyale43473b2010-09-15 17:06:35 -0400514 tg->slice_start[rw] += nr_slices * throtl_slice;
515
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200516 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
Vivek Goyale43473b2010-09-15 17:06:35 -0400517 " start=%lu end=%lu jiffies=%lu",
Vivek Goyal8e89d132010-09-15 17:06:37 -0400518 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
Vivek Goyale43473b2010-09-15 17:06:35 -0400519 tg->slice_start[rw], tg->slice_end[rw], jiffies);
520}
521
Vivek Goyal8e89d132010-09-15 17:06:37 -0400522static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
523 struct bio *bio, unsigned long *wait)
Vivek Goyale43473b2010-09-15 17:06:35 -0400524{
525 bool rw = bio_data_dir(bio);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400526 unsigned int io_allowed;
Vivek Goyale43473b2010-09-15 17:06:35 -0400527 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200528 u64 tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400529
Vivek Goyal8e89d132010-09-15 17:06:37 -0400530 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
Vivek Goyale43473b2010-09-15 17:06:35 -0400531
Vivek Goyal8e89d132010-09-15 17:06:37 -0400532 /* Slice has just started. Consider one slice interval */
533 if (!jiffy_elapsed)
534 jiffy_elapsed_rnd = throtl_slice;
535
536 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
537
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200538 /*
539 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
540 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
541 * will allow dispatch after 1 second and after that slice should
542 * have been trimmed.
543 */
544
545 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
546 do_div(tmp, HZ);
547
548 if (tmp > UINT_MAX)
549 io_allowed = UINT_MAX;
550 else
551 io_allowed = tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400552
553 if (tg->io_disp[rw] + 1 <= io_allowed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400554 if (wait)
555 *wait = 0;
556 return 1;
557 }
558
Vivek Goyal8e89d132010-09-15 17:06:37 -0400559 /* Calc approx time to dispatch */
560 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
561
562 if (jiffy_wait > jiffy_elapsed)
563 jiffy_wait = jiffy_wait - jiffy_elapsed;
564 else
565 jiffy_wait = 1;
566
567 if (wait)
568 *wait = jiffy_wait;
569 return 0;
570}
571
572static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
573 struct bio *bio, unsigned long *wait)
574{
575 bool rw = bio_data_dir(bio);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200576 u64 bytes_allowed, extra_bytes, tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400577 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyale43473b2010-09-15 17:06:35 -0400578
579 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
580
581 /* Slice has just started. Consider one slice interval */
582 if (!jiffy_elapsed)
583 jiffy_elapsed_rnd = throtl_slice;
584
585 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
586
Vivek Goyal5e901a22010-10-01 21:16:38 +0200587 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
588 do_div(tmp, HZ);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200589 bytes_allowed = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400590
591 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
592 if (wait)
593 *wait = 0;
594 return 1;
595 }
596
597 /* Calc approx time to dispatch */
598 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
599 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
600
601 if (!jiffy_wait)
602 jiffy_wait = 1;
603
604 /*
605 * This wait time is without taking into consideration the rounding
606 * up we did. Add that time also.
607 */
608 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
Vivek Goyale43473b2010-09-15 17:06:35 -0400609 if (wait)
610 *wait = jiffy_wait;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400611 return 0;
612}
Vivek Goyale43473b2010-09-15 17:06:35 -0400613
Vivek Goyalaf75cd32011-05-19 15:38:31 -0400614static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
615 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
616 return 1;
617 return 0;
618}
619
Vivek Goyal8e89d132010-09-15 17:06:37 -0400620/*
621 * Returns whether one can dispatch a bio or not. Also returns approx number
622 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
623 */
624static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
625 struct bio *bio, unsigned long *wait)
626{
627 bool rw = bio_data_dir(bio);
628 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
629
630 /*
631 * Currently whole state machine of group depends on first bio
632 * queued in the group bio list. So one should not be calling
633 * this function with a different bio if there are other bios
634 * queued.
635 */
636 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
637
638 /* If tg->bps = -1, then BW is unlimited */
639 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
640 if (wait)
641 *wait = 0;
642 return 1;
643 }
644
645 /*
646 * If previous slice expired, start a new one otherwise renew/extend
647 * existing slice to make sure it is at least throtl_slice interval
648 * long since now.
649 */
650 if (throtl_slice_used(td, tg, rw))
651 throtl_start_new_slice(td, tg, rw);
652 else {
653 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
654 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
655 }
656
657 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
658 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
659 if (wait)
660 *wait = 0;
661 return 1;
662 }
663
664 max_wait = max(bps_wait, iops_wait);
665
666 if (wait)
667 *wait = max_wait;
668
669 if (time_before(tg->slice_end[rw], jiffies + max_wait))
670 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400671
672 return 0;
673}
674
675static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
676{
677 bool rw = bio_data_dir(bio);
Shaohua Lie5a94f52011-08-01 10:31:06 +0200678 bool sync = rw_is_sync(bio->bi_rw);
Vivek Goyale43473b2010-09-15 17:06:35 -0400679
680 /* Charge the bio to the group */
681 tg->bytes_disp[rw] += bio->bi_size;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400682 tg->io_disp[rw]++;
Vivek Goyale43473b2010-09-15 17:06:35 -0400683
Vivek Goyale43473b2010-09-15 17:06:35 -0400684 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
Vivek Goyale43473b2010-09-15 17:06:35 -0400685}
686
687static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
688 struct bio *bio)
689{
690 bool rw = bio_data_dir(bio);
691
692 bio_list_add(&tg->bio_lists[rw], bio);
693 /* Take a bio reference on tg */
694 throtl_ref_get_tg(tg);
695 tg->nr_queued[rw]++;
696 td->nr_queued[rw]++;
697 throtl_enqueue_tg(td, tg);
698}
699
700static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
701{
702 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
703 struct bio *bio;
704
705 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
706 tg_may_dispatch(td, tg, bio, &read_wait);
707
708 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
709 tg_may_dispatch(td, tg, bio, &write_wait);
710
711 min_wait = min(read_wait, write_wait);
712 disptime = jiffies + min_wait;
713
Vivek Goyale43473b2010-09-15 17:06:35 -0400714 /* Update dispatch time */
715 throtl_dequeue_tg(td, tg);
716 tg->disptime = disptime;
717 throtl_enqueue_tg(td, tg);
718}
719
720static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
721 bool rw, struct bio_list *bl)
722{
723 struct bio *bio;
724
725 bio = bio_list_pop(&tg->bio_lists[rw]);
726 tg->nr_queued[rw]--;
727 /* Drop bio reference on tg */
728 throtl_put_tg(tg);
729
730 BUG_ON(td->nr_queued[rw] <= 0);
731 td->nr_queued[rw]--;
732
733 throtl_charge_bio(tg, bio);
734 bio_list_add(bl, bio);
735 bio->bi_rw |= REQ_THROTTLED;
736
737 throtl_trim_slice(td, tg, rw);
738}
739
740static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
741 struct bio_list *bl)
742{
743 unsigned int nr_reads = 0, nr_writes = 0;
744 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
Vivek Goyalc2f68052010-11-15 19:32:42 +0100745 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
Vivek Goyale43473b2010-09-15 17:06:35 -0400746 struct bio *bio;
747
748 /* Try to dispatch 75% READS and 25% WRITES */
749
750 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
751 && tg_may_dispatch(td, tg, bio, NULL)) {
752
753 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
754 nr_reads++;
755
756 if (nr_reads >= max_nr_reads)
757 break;
758 }
759
760 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
761 && tg_may_dispatch(td, tg, bio, NULL)) {
762
763 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
764 nr_writes++;
765
766 if (nr_writes >= max_nr_writes)
767 break;
768 }
769
770 return nr_reads + nr_writes;
771}
772
773static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
774{
775 unsigned int nr_disp = 0;
776 struct throtl_grp *tg;
777 struct throtl_rb_root *st = &td->tg_service_tree;
778
779 while (1) {
780 tg = throtl_rb_first(st);
781
782 if (!tg)
783 break;
784
785 if (time_before(jiffies, tg->disptime))
786 break;
787
788 throtl_dequeue_tg(td, tg);
789
790 nr_disp += throtl_dispatch_tg(td, tg, bl);
791
792 if (tg->nr_queued[0] || tg->nr_queued[1]) {
793 tg_update_disptime(td, tg);
794 throtl_enqueue_tg(td, tg);
795 }
796
797 if (nr_disp >= throtl_quantum)
798 break;
799 }
800
801 return nr_disp;
802}
803
Vivek Goyalfe071432010-10-01 14:49:49 +0200804static void throtl_process_limit_change(struct throtl_data *td)
805{
806 struct throtl_grp *tg;
807 struct hlist_node *pos, *n;
808
Vivek Goyalde701c72011-03-07 21:09:32 +0100809 if (!td->limits_changed)
Vivek Goyalfe071432010-10-01 14:49:49 +0200810 return;
811
Vivek Goyalde701c72011-03-07 21:09:32 +0100812 xchg(&td->limits_changed, false);
Vivek Goyalfe071432010-10-01 14:49:49 +0200813
Vivek Goyalde701c72011-03-07 21:09:32 +0100814 throtl_log(td, "limits changed");
Vivek Goyalfe071432010-10-01 14:49:49 +0200815
Vivek Goyal04a6b512010-12-01 19:34:52 +0100816 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
Vivek Goyalde701c72011-03-07 21:09:32 +0100817 if (!tg->limits_changed)
818 continue;
Vivek Goyalfe071432010-10-01 14:49:49 +0200819
Vivek Goyalde701c72011-03-07 21:09:32 +0100820 if (!xchg(&tg->limits_changed, false))
821 continue;
822
823 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
824 " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
825 tg->iops[READ], tg->iops[WRITE]);
826
Vivek Goyal04521db2011-03-22 21:54:29 +0100827 /*
828 * Restart the slices for both READ and WRITES. It
829 * might happen that a group's limit are dropped
830 * suddenly and we don't want to account recently
831 * dispatched IO with new low rate
832 */
833 throtl_start_new_slice(td, tg, 0);
834 throtl_start_new_slice(td, tg, 1);
835
Vivek Goyalde701c72011-03-07 21:09:32 +0100836 if (throtl_tg_on_rr(tg))
837 tg_update_disptime(td, tg);
838 }
Vivek Goyalfe071432010-10-01 14:49:49 +0200839}
840
Vivek Goyale43473b2010-09-15 17:06:35 -0400841/* Dispatch throttled bios. Should be called without queue lock held. */
842static int throtl_dispatch(struct request_queue *q)
843{
844 struct throtl_data *td = q->td;
845 unsigned int nr_disp = 0;
846 struct bio_list bio_list_on_stack;
847 struct bio *bio;
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100848 struct blk_plug plug;
Vivek Goyale43473b2010-09-15 17:06:35 -0400849
850 spin_lock_irq(q->queue_lock);
851
Vivek Goyalfe071432010-10-01 14:49:49 +0200852 throtl_process_limit_change(td);
853
Vivek Goyale43473b2010-09-15 17:06:35 -0400854 if (!total_nr_queued(td))
855 goto out;
856
857 bio_list_init(&bio_list_on_stack);
858
Joe Perchesd2f31a52011-06-13 20:19:27 +0200859 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
Vivek Goyale43473b2010-09-15 17:06:35 -0400860 total_nr_queued(td), td->nr_queued[READ],
861 td->nr_queued[WRITE]);
862
863 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
864
865 if (nr_disp)
866 throtl_log(td, "bios disp=%u", nr_disp);
867
868 throtl_schedule_next_dispatch(td);
869out:
870 spin_unlock_irq(q->queue_lock);
871
872 /*
873 * If we dispatched some requests, unplug the queue to make sure
874 * immediate dispatch
875 */
876 if (nr_disp) {
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100877 blk_start_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -0400878 while((bio = bio_list_pop(&bio_list_on_stack)))
879 generic_make_request(bio);
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100880 blk_finish_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -0400881 }
882 return nr_disp;
883}
884
885void blk_throtl_work(struct work_struct *work)
886{
887 struct throtl_data *td = container_of(work, struct throtl_data,
888 throtl_work.work);
889 struct request_queue *q = td->queue;
890
891 throtl_dispatch(q);
892}
893
894/* Call with queue lock held */
Vivek Goyal450adcb2011-03-01 13:40:54 -0500895static void
896throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
Vivek Goyale43473b2010-09-15 17:06:35 -0400897{
898
Vivek Goyale43473b2010-09-15 17:06:35 -0400899 struct delayed_work *dwork = &td->throtl_work;
900
Vivek Goyal04521db2011-03-22 21:54:29 +0100901 /* schedule work if limits changed even if no bio is queued */
Joe Perchesd2f31a52011-06-13 20:19:27 +0200902 if (total_nr_queued(td) || td->limits_changed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400903 /*
904 * We might have a work scheduled to be executed in future.
905 * Cancel that and schedule a new one.
906 */
907 __cancel_delayed_work(dwork);
Vivek Goyal450adcb2011-03-01 13:40:54 -0500908 queue_delayed_work(kthrotld_workqueue, dwork, delay);
Vivek Goyale43473b2010-09-15 17:06:35 -0400909 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
910 delay, jiffies);
911 }
912}
Vivek Goyale43473b2010-09-15 17:06:35 -0400913
914static void
915throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
916{
917 /* Something wrong if we are trying to remove same group twice */
918 BUG_ON(hlist_unhashed(&tg->tg_node));
919
920 hlist_del_init(&tg->tg_node);
921
922 /*
923 * Put the reference taken at the time of creation so that when all
924 * queues are gone, group can be destroyed.
925 */
926 throtl_put_tg(tg);
927 td->nr_undestroyed_grps--;
928}
929
Tejun Heo72e06c22012-03-05 13:15:00 -0800930static bool throtl_release_tgs(struct throtl_data *td, bool release_root)
Vivek Goyale43473b2010-09-15 17:06:35 -0400931{
932 struct hlist_node *pos, *n;
933 struct throtl_grp *tg;
Tejun Heo72e06c22012-03-05 13:15:00 -0800934 bool empty = true;
Vivek Goyale43473b2010-09-15 17:06:35 -0400935
936 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
Tejun Heo72e06c22012-03-05 13:15:00 -0800937 /* skip root? */
938 if (!release_root && tg == td->root_tg)
939 continue;
940
Vivek Goyale43473b2010-09-15 17:06:35 -0400941 /*
942 * If cgroup removal path got to blk_group first and removed
943 * it from cgroup list, then it will take care of destroying
944 * cfqg also.
945 */
946 if (!blkiocg_del_blkio_group(&tg->blkg))
947 throtl_destroy_tg(td, tg);
Tejun Heo72e06c22012-03-05 13:15:00 -0800948 else
949 empty = false;
Vivek Goyale43473b2010-09-15 17:06:35 -0400950 }
Tejun Heo72e06c22012-03-05 13:15:00 -0800951 return empty;
Vivek Goyale43473b2010-09-15 17:06:35 -0400952}
953
Vivek Goyale43473b2010-09-15 17:06:35 -0400954/*
955 * Blk cgroup controller notification saying that blkio_group object is being
956 * delinked as associated cgroup object is going away. That also means that
957 * no new IO will come in this group. So get rid of this group as soon as
958 * any pending IO in the group is finished.
959 *
Tejun Heoca32aef2012-03-05 13:15:03 -0800960 * This function is called under rcu_read_lock(). @q is the rcu protected
961 * pointer. That means @q is a valid request_queue pointer as long as we
962 * are rcu read lock.
Vivek Goyale43473b2010-09-15 17:06:35 -0400963 *
Tejun Heoca32aef2012-03-05 13:15:03 -0800964 * @q was fetched from blkio_group under blkio_cgroup->lock. That means
Vivek Goyale43473b2010-09-15 17:06:35 -0400965 * it should not be NULL as even if queue was going away, cgroup deltion
966 * path got to it first.
967 */
Tejun Heoca32aef2012-03-05 13:15:03 -0800968void throtl_unlink_blkio_group(struct request_queue *q,
969 struct blkio_group *blkg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400970{
971 unsigned long flags;
Vivek Goyale43473b2010-09-15 17:06:35 -0400972
Tejun Heoca32aef2012-03-05 13:15:03 -0800973 spin_lock_irqsave(q->queue_lock, flags);
974 throtl_destroy_tg(q->td, tg_of_blkg(blkg));
975 spin_unlock_irqrestore(q->queue_lock, flags);
Vivek Goyale43473b2010-09-15 17:06:35 -0400976}
977
Tejun Heo72e06c22012-03-05 13:15:00 -0800978static bool throtl_clear_queue(struct request_queue *q)
979{
980 lockdep_assert_held(q->queue_lock);
981
982 /*
983 * Clear tgs but leave the root one alone. This is necessary
984 * because root_tg is expected to be persistent and safe because
985 * blk-throtl can never be disabled while @q is alive. This is a
986 * kludge to prepare for unified blkg. This whole function will be
987 * removed soon.
988 */
989 return throtl_release_tgs(q->td, false);
990}
991
Vivek Goyalde701c72011-03-07 21:09:32 +0100992static void throtl_update_blkio_group_common(struct throtl_data *td,
993 struct throtl_grp *tg)
994{
995 xchg(&tg->limits_changed, true);
996 xchg(&td->limits_changed, true);
997 /* Schedule a work now to process the limit change */
998 throtl_schedule_delayed_work(td, 0);
999}
1000
Vivek Goyalfe071432010-10-01 14:49:49 +02001001/*
Tejun Heoca32aef2012-03-05 13:15:03 -08001002 * For all update functions, @q should be a valid pointer because these
Vivek Goyalfe071432010-10-01 14:49:49 +02001003 * update functions are called under blkcg_lock, that means, blkg is
Tejun Heoca32aef2012-03-05 13:15:03 -08001004 * valid and in turn @q is valid. queue exit path can not race because
Vivek Goyalfe071432010-10-01 14:49:49 +02001005 * of blkcg_lock
1006 *
1007 * Can not take queue lock in update functions as queue lock under blkcg_lock
1008 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
1009 */
Tejun Heoca32aef2012-03-05 13:15:03 -08001010static void throtl_update_blkio_group_read_bps(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +02001011 struct blkio_group *blkg, u64 read_bps)
Vivek Goyale43473b2010-09-15 17:06:35 -04001012{
Vivek Goyalde701c72011-03-07 21:09:32 +01001013 struct throtl_grp *tg = tg_of_blkg(blkg);
Vivek Goyalfe071432010-10-01 14:49:49 +02001014
Vivek Goyalde701c72011-03-07 21:09:32 +01001015 tg->bps[READ] = read_bps;
Tejun Heoca32aef2012-03-05 13:15:03 -08001016 throtl_update_blkio_group_common(q->td, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001017}
1018
Tejun Heoca32aef2012-03-05 13:15:03 -08001019static void throtl_update_blkio_group_write_bps(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +02001020 struct blkio_group *blkg, u64 write_bps)
Vivek Goyale43473b2010-09-15 17:06:35 -04001021{
Vivek Goyalde701c72011-03-07 21:09:32 +01001022 struct throtl_grp *tg = tg_of_blkg(blkg);
Vivek Goyalfe071432010-10-01 14:49:49 +02001023
Vivek Goyalde701c72011-03-07 21:09:32 +01001024 tg->bps[WRITE] = write_bps;
Tejun Heoca32aef2012-03-05 13:15:03 -08001025 throtl_update_blkio_group_common(q->td, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001026}
1027
Tejun Heoca32aef2012-03-05 13:15:03 -08001028static void throtl_update_blkio_group_read_iops(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +02001029 struct blkio_group *blkg, unsigned int read_iops)
Vivek Goyal8e89d132010-09-15 17:06:37 -04001030{
Vivek Goyalde701c72011-03-07 21:09:32 +01001031 struct throtl_grp *tg = tg_of_blkg(blkg);
Vivek Goyalfe071432010-10-01 14:49:49 +02001032
Vivek Goyalde701c72011-03-07 21:09:32 +01001033 tg->iops[READ] = read_iops;
Tejun Heoca32aef2012-03-05 13:15:03 -08001034 throtl_update_blkio_group_common(q->td, tg);
Vivek Goyal8e89d132010-09-15 17:06:37 -04001035}
1036
Tejun Heoca32aef2012-03-05 13:15:03 -08001037static void throtl_update_blkio_group_write_iops(struct request_queue *q,
Vivek Goyalfe071432010-10-01 14:49:49 +02001038 struct blkio_group *blkg, unsigned int write_iops)
Vivek Goyal8e89d132010-09-15 17:06:37 -04001039{
Vivek Goyalde701c72011-03-07 21:09:32 +01001040 struct throtl_grp *tg = tg_of_blkg(blkg);
Vivek Goyalfe071432010-10-01 14:49:49 +02001041
Vivek Goyalde701c72011-03-07 21:09:32 +01001042 tg->iops[WRITE] = write_iops;
Tejun Heoca32aef2012-03-05 13:15:03 -08001043 throtl_update_blkio_group_common(q->td, tg);
Vivek Goyal8e89d132010-09-15 17:06:37 -04001044}
1045
Vivek Goyalda527772011-03-02 19:05:33 -05001046static void throtl_shutdown_wq(struct request_queue *q)
Vivek Goyale43473b2010-09-15 17:06:35 -04001047{
1048 struct throtl_data *td = q->td;
1049
1050 cancel_delayed_work_sync(&td->throtl_work);
1051}
1052
1053static struct blkio_policy_type blkio_policy_throtl = {
1054 .ops = {
Tejun Heocd1604f2012-03-05 13:15:06 -08001055 .blkio_alloc_group_fn = throtl_alloc_blkio_group,
1056 .blkio_link_group_fn = throtl_link_blkio_group,
Vivek Goyale43473b2010-09-15 17:06:35 -04001057 .blkio_unlink_group_fn = throtl_unlink_blkio_group,
Tejun Heo72e06c22012-03-05 13:15:00 -08001058 .blkio_clear_queue_fn = throtl_clear_queue,
Vivek Goyale43473b2010-09-15 17:06:35 -04001059 .blkio_update_group_read_bps_fn =
1060 throtl_update_blkio_group_read_bps,
1061 .blkio_update_group_write_bps_fn =
1062 throtl_update_blkio_group_write_bps,
Vivek Goyal8e89d132010-09-15 17:06:37 -04001063 .blkio_update_group_read_iops_fn =
1064 throtl_update_blkio_group_read_iops,
1065 .blkio_update_group_write_iops_fn =
1066 throtl_update_blkio_group_write_iops,
Vivek Goyale43473b2010-09-15 17:06:35 -04001067 },
Vivek Goyal8e89d132010-09-15 17:06:37 -04001068 .plid = BLKIO_POLICY_THROTL,
Vivek Goyale43473b2010-09-15 17:06:35 -04001069};
1070
Tejun Heobc16a4f2011-10-19 14:33:01 +02001071bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
Vivek Goyale43473b2010-09-15 17:06:35 -04001072{
1073 struct throtl_data *td = q->td;
1074 struct throtl_grp *tg;
Vivek Goyale43473b2010-09-15 17:06:35 -04001075 bool rw = bio_data_dir(bio), update_disptime = true;
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001076 struct blkio_cgroup *blkcg;
Tejun Heobc16a4f2011-10-19 14:33:01 +02001077 bool throttled = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001078
1079 if (bio->bi_rw & REQ_THROTTLED) {
1080 bio->bi_rw &= ~REQ_THROTTLED;
Tejun Heobc16a4f2011-10-19 14:33:01 +02001081 goto out;
Vivek Goyale43473b2010-09-15 17:06:35 -04001082 }
1083
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001084 /*
1085 * A throtl_grp pointer retrieved under rcu can be used to access
1086 * basic fields like stats and io rates. If a group has no rules,
1087 * just update the dispatch stats in lockless manner and return.
1088 */
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001089 rcu_read_lock();
1090 blkcg = task_blkio_cgroup(current);
Tejun Heocd1604f2012-03-05 13:15:06 -08001091 tg = throtl_lookup_tg(td, blkcg);
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001092 if (tg) {
1093 throtl_tg_fill_dev_details(td, tg);
1094
1095 if (tg_no_rule_group(tg, rw)) {
1096 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
Shaohua Lie5a94f52011-08-01 10:31:06 +02001097 rw, rw_is_sync(bio->bi_rw));
Tejun Heo2a7f1242012-03-05 13:15:01 -08001098 goto out_unlock_rcu;
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001099 }
1100 }
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001101
1102 /*
1103 * Either group has not been allocated yet or it is not an unlimited
1104 * IO group
1105 */
Vivek Goyale43473b2010-09-15 17:06:35 -04001106 spin_lock_irq(q->queue_lock);
Tejun Heocd1604f2012-03-05 13:15:06 -08001107 tg = throtl_lookup_create_tg(td, blkcg);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001108 if (unlikely(!tg))
1109 goto out_unlock;
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001110
Vivek Goyale43473b2010-09-15 17:06:35 -04001111 if (tg->nr_queued[rw]) {
1112 /*
1113 * There is already another bio queued in same dir. No
1114 * need to update dispatch time.
1115 */
Vivek Goyal231d7042011-03-07 21:05:14 +01001116 update_disptime = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001117 goto queue_bio;
Vivek Goyalde701c72011-03-07 21:09:32 +01001118
Vivek Goyale43473b2010-09-15 17:06:35 -04001119 }
1120
1121 /* Bio is with-in rate limit of group */
1122 if (tg_may_dispatch(td, tg, bio, NULL)) {
1123 throtl_charge_bio(tg, bio);
Vivek Goyal04521db2011-03-22 21:54:29 +01001124
1125 /*
1126 * We need to trim slice even when bios are not being queued
1127 * otherwise it might happen that a bio is not queued for
1128 * a long time and slice keeps on extending and trim is not
1129 * called for a long time. Now if limits are reduced suddenly
1130 * we take into account all the IO dispatched so far at new
1131 * low rate and * newly queued IO gets a really long dispatch
1132 * time.
1133 *
1134 * So keep on trimming slice even if bio is not queued.
1135 */
1136 throtl_trim_slice(td, tg, rw);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001137 goto out_unlock;
Vivek Goyale43473b2010-09-15 17:06:35 -04001138 }
1139
1140queue_bio:
Joe Perchesfd16d262011-06-13 10:42:49 +02001141 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
Vivek Goyal8e89d132010-09-15 17:06:37 -04001142 " iodisp=%u iops=%u queued=%d/%d",
1143 rw == READ ? 'R' : 'W',
Vivek Goyale43473b2010-09-15 17:06:35 -04001144 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
Vivek Goyal8e89d132010-09-15 17:06:37 -04001145 tg->io_disp[rw], tg->iops[rw],
Vivek Goyale43473b2010-09-15 17:06:35 -04001146 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1147
1148 throtl_add_bio_tg(q->td, tg, bio);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001149 throttled = true;
Vivek Goyale43473b2010-09-15 17:06:35 -04001150
1151 if (update_disptime) {
1152 tg_update_disptime(td, tg);
1153 throtl_schedule_next_dispatch(td);
1154 }
1155
Tejun Heobc16a4f2011-10-19 14:33:01 +02001156out_unlock:
Vivek Goyale43473b2010-09-15 17:06:35 -04001157 spin_unlock_irq(q->queue_lock);
Tejun Heo2a7f1242012-03-05 13:15:01 -08001158out_unlock_rcu:
1159 rcu_read_unlock();
Tejun Heobc16a4f2011-10-19 14:33:01 +02001160out:
1161 return throttled;
Vivek Goyale43473b2010-09-15 17:06:35 -04001162}
1163
Tejun Heoc9a929d2011-10-19 14:42:16 +02001164/**
1165 * blk_throtl_drain - drain throttled bios
1166 * @q: request_queue to drain throttled bios for
1167 *
1168 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1169 */
1170void blk_throtl_drain(struct request_queue *q)
1171 __releases(q->queue_lock) __acquires(q->queue_lock)
1172{
1173 struct throtl_data *td = q->td;
1174 struct throtl_rb_root *st = &td->tg_service_tree;
1175 struct throtl_grp *tg;
1176 struct bio_list bl;
1177 struct bio *bio;
1178
Jens Axboe334c2b02011-10-25 15:51:48 +02001179 WARN_ON_ONCE(!queue_is_locked(q));
Tejun Heoc9a929d2011-10-19 14:42:16 +02001180
1181 bio_list_init(&bl);
1182
1183 while ((tg = throtl_rb_first(st))) {
1184 throtl_dequeue_tg(td, tg);
1185
1186 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1187 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1188 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
1189 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1190 }
1191 spin_unlock_irq(q->queue_lock);
1192
1193 while ((bio = bio_list_pop(&bl)))
1194 generic_make_request(bio);
1195
1196 spin_lock_irq(q->queue_lock);
1197}
1198
Vivek Goyale43473b2010-09-15 17:06:35 -04001199int blk_throtl_init(struct request_queue *q)
1200{
1201 struct throtl_data *td;
Tejun Heocd1604f2012-03-05 13:15:06 -08001202 struct blkio_group *blkg;
Vivek Goyale43473b2010-09-15 17:06:35 -04001203
1204 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1205 if (!td)
1206 return -ENOMEM;
1207
1208 INIT_HLIST_HEAD(&td->tg_list);
1209 td->tg_service_tree = THROTL_RB_ROOT;
Vivek Goyalde701c72011-03-07 21:09:32 +01001210 td->limits_changed = false;
Vivek Goyala29a1712011-05-19 15:38:19 -04001211 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
Vivek Goyale43473b2010-09-15 17:06:35 -04001212
Tejun Heocd1604f2012-03-05 13:15:06 -08001213 q->td = td;
Vivek Goyal29b12582011-05-19 15:38:24 -04001214 td->queue = q;
Vivek Goyal02977e42010-10-01 14:49:48 +02001215
Tejun Heocd1604f2012-03-05 13:15:06 -08001216 /* alloc and init root group. */
Tejun Heof51b8022012-03-05 13:15:05 -08001217 rcu_read_lock();
1218 spin_lock_irq(q->queue_lock);
1219
Tejun Heocd1604f2012-03-05 13:15:06 -08001220 blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_THROTL,
1221 true);
1222 if (!IS_ERR(blkg))
1223 td->root_tg = tg_of_blkg(blkg);
Tejun Heof51b8022012-03-05 13:15:05 -08001224
1225 spin_unlock_irq(q->queue_lock);
1226 rcu_read_unlock();
1227
1228 if (!td->root_tg) {
Vivek Goyal29b12582011-05-19 15:38:24 -04001229 kfree(td);
1230 return -ENOMEM;
1231 }
Vivek Goyale43473b2010-09-15 17:06:35 -04001232 return 0;
1233}
1234
1235void blk_throtl_exit(struct request_queue *q)
1236{
1237 struct throtl_data *td = q->td;
1238 bool wait = false;
1239
1240 BUG_ON(!td);
1241
Vivek Goyalda527772011-03-02 19:05:33 -05001242 throtl_shutdown_wq(q);
Vivek Goyale43473b2010-09-15 17:06:35 -04001243
1244 spin_lock_irq(q->queue_lock);
Tejun Heo72e06c22012-03-05 13:15:00 -08001245 throtl_release_tgs(td, true);
Vivek Goyale43473b2010-09-15 17:06:35 -04001246
1247 /* If there are other groups */
Vivek Goyal02977e42010-10-01 14:49:48 +02001248 if (td->nr_undestroyed_grps > 0)
Vivek Goyale43473b2010-09-15 17:06:35 -04001249 wait = true;
1250
1251 spin_unlock_irq(q->queue_lock);
1252
1253 /*
Tejun Heoca32aef2012-03-05 13:15:03 -08001254 * Wait for tg->blkg->q accessors to exit their grace periods.
Vivek Goyale43473b2010-09-15 17:06:35 -04001255 * Do this wait only if there are other undestroyed groups out
1256 * there (other than root group). This can happen if cgroup deletion
1257 * path claimed the responsibility of cleaning up a group before
1258 * queue cleanup code get to the group.
1259 *
1260 * Do not call synchronize_rcu() unconditionally as there are drivers
1261 * which create/delete request queue hundreds of times during scan/boot
1262 * and synchronize_rcu() can take significant time and slow down boot.
1263 */
1264 if (wait)
1265 synchronize_rcu();
Vivek Goyalfe071432010-10-01 14:49:49 +02001266
1267 /*
1268 * Just being safe to make sure after previous flush if some body did
1269 * update limits through cgroup and another work got queued, cancel
1270 * it.
1271 */
Vivek Goyalda527772011-03-02 19:05:33 -05001272 throtl_shutdown_wq(q);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001273}
1274
1275void blk_throtl_release(struct request_queue *q)
1276{
1277 kfree(q->td);
Vivek Goyale43473b2010-09-15 17:06:35 -04001278}
1279
1280static int __init throtl_init(void)
1281{
Vivek Goyal450adcb2011-03-01 13:40:54 -05001282 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1283 if (!kthrotld_workqueue)
1284 panic("Failed to create kthrotld\n");
1285
Vivek Goyale43473b2010-09-15 17:06:35 -04001286 blkio_policy_register(&blkio_policy_throtl);
1287 return 0;
1288}
1289
1290module_init(throtl_init);