blob: dbeef303f27b685a04ead12f0f9e2b9769023105 [file] [log] [blame]
Vivek Goyale43473b2010-09-15 17:06:35 -04001/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
12#include "blk-cgroup.h"
Tejun Heobc9fcbf2011-10-19 14:31:18 +020013#include "blk.h"
Vivek Goyale43473b2010-09-15 17:06:35 -040014
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
Tejun Heo3c798392012-04-16 13:57:25 -070024static struct blkcg_policy blkcg_policy_throtl;
Tejun Heo03814112012-03-05 13:15:14 -080025
Vivek Goyal450adcb2011-03-01 13:40:54 -050026/* A workqueue to queue throttle related work */
27static struct workqueue_struct *kthrotld_workqueue;
Vivek Goyal450adcb2011-03-01 13:40:54 -050028
Vivek Goyale43473b2010-09-15 17:06:35 -040029struct throtl_rb_root {
30 struct rb_root rb;
31 struct rb_node *left;
32 unsigned int count;
33 unsigned long min_disptime;
34};
35
36#define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
37 .count = 0, .min_disptime = 0}
38
39#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
40
Tejun Heo8a3d2612012-04-01 14:38:44 -070041/* Per-cpu group stats */
42struct tg_stats_cpu {
43 /* total bytes transferred */
44 struct blkg_rwstat service_bytes;
45 /* total IOs serviced, post merge */
46 struct blkg_rwstat serviced;
47};
48
Vivek Goyale43473b2010-09-15 17:06:35 -040049struct throtl_grp {
Tejun Heof95a04a2012-04-16 13:57:26 -070050 /* must be the first member */
51 struct blkg_policy_data pd;
52
Vivek Goyale43473b2010-09-15 17:06:35 -040053 /* active throtl group service_tree member */
54 struct rb_node rb_node;
55
56 /*
57 * Dispatch time in jiffies. This is the estimated time when group
58 * will unthrottle and is ready to dispatch more bio. It is used as
59 * key to sort active groups in service tree.
60 */
61 unsigned long disptime;
62
Vivek Goyale43473b2010-09-15 17:06:35 -040063 unsigned int flags;
64
65 /* Two lists for READ and WRITE */
66 struct bio_list bio_lists[2];
67
68 /* Number of queued bios on READ and WRITE lists */
69 unsigned int nr_queued[2];
70
71 /* bytes per second rate limits */
72 uint64_t bps[2];
73
Vivek Goyal8e89d132010-09-15 17:06:37 -040074 /* IOPS limits */
75 unsigned int iops[2];
76
Vivek Goyale43473b2010-09-15 17:06:35 -040077 /* Number of bytes disptached in current slice */
78 uint64_t bytes_disp[2];
Vivek Goyal8e89d132010-09-15 17:06:37 -040079 /* Number of bio's dispatched in current slice */
80 unsigned int io_disp[2];
Vivek Goyale43473b2010-09-15 17:06:35 -040081
82 /* When did we start a new slice */
83 unsigned long slice_start[2];
84 unsigned long slice_end[2];
Vivek Goyalfe071432010-10-01 14:49:49 +020085
Tejun Heo8a3d2612012-04-01 14:38:44 -070086 /* Per cpu stats pointer */
87 struct tg_stats_cpu __percpu *stats_cpu;
88
89 /* List of tgs waiting for per cpu stats memory to be allocated */
90 struct list_head stats_alloc_node;
Vivek Goyale43473b2010-09-15 17:06:35 -040091};
92
93struct throtl_data
94{
Vivek Goyale43473b2010-09-15 17:06:35 -040095 /* service tree for active throtl groups */
96 struct throtl_rb_root tg_service_tree;
97
Vivek Goyale43473b2010-09-15 17:06:35 -040098 struct request_queue *queue;
99
100 /* Total Number of queued bios on READ and WRITE lists */
101 unsigned int nr_queued[2];
102
103 /*
Vivek Goyal02977e42010-10-01 14:49:48 +0200104 * number of total undestroyed groups
Vivek Goyale43473b2010-09-15 17:06:35 -0400105 */
106 unsigned int nr_undestroyed_grps;
107
108 /* Work for dispatching throttled bios */
Tejun Heocb761992013-05-14 13:52:31 -0700109 struct delayed_work dispatch_work;
Vivek Goyale43473b2010-09-15 17:06:35 -0400110};
111
Tejun Heo8a3d2612012-04-01 14:38:44 -0700112/* list and work item to allocate percpu group stats */
113static DEFINE_SPINLOCK(tg_stats_alloc_lock);
114static LIST_HEAD(tg_stats_alloc_list);
115
116static void tg_stats_alloc_fn(struct work_struct *);
117static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
118
Tejun Heof95a04a2012-04-16 13:57:26 -0700119static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
120{
121 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
122}
123
Tejun Heo3c798392012-04-16 13:57:25 -0700124static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
Tejun Heo03814112012-03-05 13:15:14 -0800125{
Tejun Heof95a04a2012-04-16 13:57:26 -0700126 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
Tejun Heo03814112012-03-05 13:15:14 -0800127}
128
Tejun Heo3c798392012-04-16 13:57:25 -0700129static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
Tejun Heo03814112012-03-05 13:15:14 -0800130{
Tejun Heof95a04a2012-04-16 13:57:26 -0700131 return pd_to_blkg(&tg->pd);
Tejun Heo03814112012-03-05 13:15:14 -0800132}
133
Tejun Heo03d8e112012-04-13 13:11:32 -0700134static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
135{
136 return blkg_to_tg(td->queue->root_blkg);
137}
138
Vivek Goyale43473b2010-09-15 17:06:35 -0400139enum tg_state_flags {
140 THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
141};
142
143#define THROTL_TG_FNS(name) \
144static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
145{ \
146 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
147} \
148static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
149{ \
150 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
151} \
152static inline int throtl_tg_##name(const struct throtl_grp *tg) \
153{ \
154 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
155}
156
157THROTL_TG_FNS(on_rr);
158
Tejun Heo54e7ed12012-04-16 13:57:23 -0700159#define throtl_log_tg(td, tg, fmt, args...) do { \
160 char __pbuf[128]; \
161 \
162 blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
163 blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
164} while (0)
Vivek Goyale43473b2010-09-15 17:06:35 -0400165
166#define throtl_log(td, fmt, args...) \
167 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
168
Tejun Heo8a3d2612012-04-01 14:38:44 -0700169/*
170 * Worker for allocating per cpu stat for tgs. This is scheduled on the
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700171 * system_wq once there are some groups on the alloc_list waiting for
Tejun Heo8a3d2612012-04-01 14:38:44 -0700172 * allocation.
173 */
174static void tg_stats_alloc_fn(struct work_struct *work)
175{
176 static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
177 struct delayed_work *dwork = to_delayed_work(work);
178 bool empty = false;
179
180alloc_stats:
181 if (!stats_cpu) {
182 stats_cpu = alloc_percpu(struct tg_stats_cpu);
183 if (!stats_cpu) {
184 /* allocation failed, try again after some time */
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700185 schedule_delayed_work(dwork, msecs_to_jiffies(10));
Tejun Heo8a3d2612012-04-01 14:38:44 -0700186 return;
187 }
188 }
189
190 spin_lock_irq(&tg_stats_alloc_lock);
191
192 if (!list_empty(&tg_stats_alloc_list)) {
193 struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
194 struct throtl_grp,
195 stats_alloc_node);
196 swap(tg->stats_cpu, stats_cpu);
197 list_del_init(&tg->stats_alloc_node);
198 }
199
200 empty = list_empty(&tg_stats_alloc_list);
201 spin_unlock_irq(&tg_stats_alloc_lock);
202 if (!empty)
203 goto alloc_stats;
204}
205
Tejun Heo3c798392012-04-16 13:57:25 -0700206static void throtl_pd_init(struct blkcg_gq *blkg)
Vivek Goyala29a1712011-05-19 15:38:19 -0400207{
Tejun Heo03814112012-03-05 13:15:14 -0800208 struct throtl_grp *tg = blkg_to_tg(blkg);
Tejun Heoff26eaa2012-05-23 12:16:21 +0200209 unsigned long flags;
Tejun Heocd1604f2012-03-05 13:15:06 -0800210
Vivek Goyala29a1712011-05-19 15:38:19 -0400211 RB_CLEAR_NODE(&tg->rb_node);
212 bio_list_init(&tg->bio_lists[0]);
213 bio_list_init(&tg->bio_lists[1]);
Vivek Goyala29a1712011-05-19 15:38:19 -0400214
Tejun Heoe56da7e2012-03-05 13:15:07 -0800215 tg->bps[READ] = -1;
216 tg->bps[WRITE] = -1;
217 tg->iops[READ] = -1;
218 tg->iops[WRITE] = -1;
Tejun Heo8a3d2612012-04-01 14:38:44 -0700219
220 /*
221 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
222 * but percpu allocator can't be called from IO path. Queue tg on
223 * tg_stats_alloc_list and allocate from work item.
224 */
Tejun Heoff26eaa2012-05-23 12:16:21 +0200225 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
Tejun Heo8a3d2612012-04-01 14:38:44 -0700226 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700227 schedule_delayed_work(&tg_stats_alloc_work, 0);
Tejun Heoff26eaa2012-05-23 12:16:21 +0200228 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
Tejun Heo8a3d2612012-04-01 14:38:44 -0700229}
230
Tejun Heo3c798392012-04-16 13:57:25 -0700231static void throtl_pd_exit(struct blkcg_gq *blkg)
Tejun Heo8a3d2612012-04-01 14:38:44 -0700232{
233 struct throtl_grp *tg = blkg_to_tg(blkg);
Tejun Heoff26eaa2012-05-23 12:16:21 +0200234 unsigned long flags;
Tejun Heo8a3d2612012-04-01 14:38:44 -0700235
Tejun Heoff26eaa2012-05-23 12:16:21 +0200236 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
Tejun Heo8a3d2612012-04-01 14:38:44 -0700237 list_del_init(&tg->stats_alloc_node);
Tejun Heoff26eaa2012-05-23 12:16:21 +0200238 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
Tejun Heo8a3d2612012-04-01 14:38:44 -0700239
240 free_percpu(tg->stats_cpu);
241}
242
Tejun Heo3c798392012-04-16 13:57:25 -0700243static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
Tejun Heo8a3d2612012-04-01 14:38:44 -0700244{
245 struct throtl_grp *tg = blkg_to_tg(blkg);
246 int cpu;
247
248 if (tg->stats_cpu == NULL)
249 return;
250
251 for_each_possible_cpu(cpu) {
252 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
253
254 blkg_rwstat_reset(&sc->service_bytes);
255 blkg_rwstat_reset(&sc->serviced);
256 }
Vivek Goyala29a1712011-05-19 15:38:19 -0400257}
258
Tejun Heo3c798392012-04-16 13:57:25 -0700259static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
260 struct blkcg *blkcg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400261{
Vivek Goyale43473b2010-09-15 17:06:35 -0400262 /*
Tejun Heo3c798392012-04-16 13:57:25 -0700263 * This is the common case when there are no blkcgs. Avoid lookup
264 * in this case
Tejun Heocd1604f2012-03-05 13:15:06 -0800265 */
Tejun Heo3c798392012-04-16 13:57:25 -0700266 if (blkcg == &blkcg_root)
Tejun Heo03d8e112012-04-13 13:11:32 -0700267 return td_root_tg(td);
Vivek Goyale43473b2010-09-15 17:06:35 -0400268
Tejun Heoe8989fa2012-03-05 13:15:20 -0800269 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
Vivek Goyale43473b2010-09-15 17:06:35 -0400270}
271
Tejun Heocd1604f2012-03-05 13:15:06 -0800272static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
Tejun Heo3c798392012-04-16 13:57:25 -0700273 struct blkcg *blkcg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400274{
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400275 struct request_queue *q = td->queue;
Tejun Heocd1604f2012-03-05 13:15:06 -0800276 struct throtl_grp *tg = NULL;
Tejun Heo0a5a7d02012-03-05 13:15:02 -0800277
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400278 /*
Tejun Heo3c798392012-04-16 13:57:25 -0700279 * This is the common case when there are no blkcgs. Avoid lookup
280 * in this case
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400281 */
Tejun Heo3c798392012-04-16 13:57:25 -0700282 if (blkcg == &blkcg_root) {
Tejun Heo03d8e112012-04-13 13:11:32 -0700283 tg = td_root_tg(td);
Tejun Heocd1604f2012-03-05 13:15:06 -0800284 } else {
Tejun Heo3c798392012-04-16 13:57:25 -0700285 struct blkcg_gq *blkg;
Tejun Heocd1604f2012-03-05 13:15:06 -0800286
Tejun Heo3c96cb32012-04-13 13:11:34 -0700287 blkg = blkg_lookup_create(blkcg, q);
Tejun Heocd1604f2012-03-05 13:15:06 -0800288
289 /* if %NULL and @q is alive, fall back to root_tg */
290 if (!IS_ERR(blkg))
Tejun Heo03814112012-03-05 13:15:14 -0800291 tg = blkg_to_tg(blkg);
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100292 else if (!blk_queue_dying(q))
Tejun Heo03d8e112012-04-13 13:11:32 -0700293 tg = td_root_tg(td);
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400294 }
295
Vivek Goyale43473b2010-09-15 17:06:35 -0400296 return tg;
297}
298
299static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
300{
301 /* Service tree is empty */
302 if (!root->count)
303 return NULL;
304
305 if (!root->left)
306 root->left = rb_first(&root->rb);
307
308 if (root->left)
309 return rb_entry_tg(root->left);
310
311 return NULL;
312}
313
314static void rb_erase_init(struct rb_node *n, struct rb_root *root)
315{
316 rb_erase(n, root);
317 RB_CLEAR_NODE(n);
318}
319
320static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
321{
322 if (root->left == n)
323 root->left = NULL;
324 rb_erase_init(n, &root->rb);
325 --root->count;
326}
327
328static void update_min_dispatch_time(struct throtl_rb_root *st)
329{
330 struct throtl_grp *tg;
331
332 tg = throtl_rb_first(st);
333 if (!tg)
334 return;
335
336 st->min_disptime = tg->disptime;
337}
338
339static void
340tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
341{
342 struct rb_node **node = &st->rb.rb_node;
343 struct rb_node *parent = NULL;
344 struct throtl_grp *__tg;
345 unsigned long key = tg->disptime;
346 int left = 1;
347
348 while (*node != NULL) {
349 parent = *node;
350 __tg = rb_entry_tg(parent);
351
352 if (time_before(key, __tg->disptime))
353 node = &parent->rb_left;
354 else {
355 node = &parent->rb_right;
356 left = 0;
357 }
358 }
359
360 if (left)
361 st->left = &tg->rb_node;
362
363 rb_link_node(&tg->rb_node, parent, node);
364 rb_insert_color(&tg->rb_node, &st->rb);
365}
366
367static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
368{
369 struct throtl_rb_root *st = &td->tg_service_tree;
370
371 tg_service_tree_add(st, tg);
372 throtl_mark_tg_on_rr(tg);
373 st->count++;
374}
375
376static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
377{
378 if (!throtl_tg_on_rr(tg))
379 __throtl_enqueue_tg(td, tg);
380}
381
382static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
383{
384 throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
385 throtl_clear_tg_on_rr(tg);
386}
387
388static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
389{
390 if (throtl_tg_on_rr(tg))
391 __throtl_dequeue_tg(td, tg);
392}
393
Tejun Heoa9131a22013-05-14 13:52:31 -0700394/* Call with queue lock held */
395static void throtl_schedule_delayed_work(struct throtl_data *td,
396 unsigned long delay)
397{
398 struct delayed_work *dwork = &td->dispatch_work;
399
Tejun Heo6a525602013-05-14 13:52:32 -0700400 mod_delayed_work(kthrotld_workqueue, dwork, delay);
401 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies);
Tejun Heoa9131a22013-05-14 13:52:31 -0700402}
403
Vivek Goyale43473b2010-09-15 17:06:35 -0400404static void throtl_schedule_next_dispatch(struct throtl_data *td)
405{
406 struct throtl_rb_root *st = &td->tg_service_tree;
407
Tejun Heo6a525602013-05-14 13:52:32 -0700408 /* any pending children left? */
409 if (!st->count)
Vivek Goyale43473b2010-09-15 17:06:35 -0400410 return;
411
Vivek Goyale43473b2010-09-15 17:06:35 -0400412 update_min_dispatch_time(st);
413
414 if (time_before_eq(st->min_disptime, jiffies))
Vivek Goyal450adcb2011-03-01 13:40:54 -0500415 throtl_schedule_delayed_work(td, 0);
Vivek Goyale43473b2010-09-15 17:06:35 -0400416 else
Vivek Goyal450adcb2011-03-01 13:40:54 -0500417 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
Vivek Goyale43473b2010-09-15 17:06:35 -0400418}
419
420static inline void
421throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
422{
423 tg->bytes_disp[rw] = 0;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400424 tg->io_disp[rw] = 0;
Vivek Goyale43473b2010-09-15 17:06:35 -0400425 tg->slice_start[rw] = jiffies;
426 tg->slice_end[rw] = jiffies + throtl_slice;
427 throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
428 rw == READ ? 'R' : 'W', tg->slice_start[rw],
429 tg->slice_end[rw], jiffies);
430}
431
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100432static inline void throtl_set_slice_end(struct throtl_data *td,
433 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
434{
435 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
436}
437
Vivek Goyale43473b2010-09-15 17:06:35 -0400438static inline void throtl_extend_slice(struct throtl_data *td,
439 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
440{
441 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
442 throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
443 rw == READ ? 'R' : 'W', tg->slice_start[rw],
444 tg->slice_end[rw], jiffies);
445}
446
447/* Determine if previously allocated or extended slice is complete or not */
448static bool
449throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
450{
451 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
452 return 0;
453
454 return 1;
455}
456
457/* Trim the used slices and adjust slice start accordingly */
458static inline void
459throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
460{
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200461 unsigned long nr_slices, time_elapsed, io_trim;
462 u64 bytes_trim, tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400463
464 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
465
466 /*
467 * If bps are unlimited (-1), then time slice don't get
468 * renewed. Don't try to trim the slice if slice is used. A new
469 * slice will start when appropriate.
470 */
471 if (throtl_slice_used(td, tg, rw))
472 return;
473
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100474 /*
475 * A bio has been dispatched. Also adjust slice_end. It might happen
476 * that initially cgroup limit was very low resulting in high
477 * slice_end, but later limit was bumped up and bio was dispached
478 * sooner, then we need to reduce slice_end. A high bogus slice_end
479 * is bad because it does not allow new slice to start.
480 */
481
482 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
483
Vivek Goyale43473b2010-09-15 17:06:35 -0400484 time_elapsed = jiffies - tg->slice_start[rw];
485
486 nr_slices = time_elapsed / throtl_slice;
487
488 if (!nr_slices)
489 return;
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200490 tmp = tg->bps[rw] * throtl_slice * nr_slices;
491 do_div(tmp, HZ);
492 bytes_trim = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400493
Vivek Goyal8e89d132010-09-15 17:06:37 -0400494 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
Vivek Goyale43473b2010-09-15 17:06:35 -0400495
Vivek Goyal8e89d132010-09-15 17:06:37 -0400496 if (!bytes_trim && !io_trim)
Vivek Goyale43473b2010-09-15 17:06:35 -0400497 return;
498
499 if (tg->bytes_disp[rw] >= bytes_trim)
500 tg->bytes_disp[rw] -= bytes_trim;
501 else
502 tg->bytes_disp[rw] = 0;
503
Vivek Goyal8e89d132010-09-15 17:06:37 -0400504 if (tg->io_disp[rw] >= io_trim)
505 tg->io_disp[rw] -= io_trim;
506 else
507 tg->io_disp[rw] = 0;
508
Vivek Goyale43473b2010-09-15 17:06:35 -0400509 tg->slice_start[rw] += nr_slices * throtl_slice;
510
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200511 throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
Vivek Goyale43473b2010-09-15 17:06:35 -0400512 " start=%lu end=%lu jiffies=%lu",
Vivek Goyal8e89d132010-09-15 17:06:37 -0400513 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
Vivek Goyale43473b2010-09-15 17:06:35 -0400514 tg->slice_start[rw], tg->slice_end[rw], jiffies);
515}
516
Vivek Goyal8e89d132010-09-15 17:06:37 -0400517static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
518 struct bio *bio, unsigned long *wait)
Vivek Goyale43473b2010-09-15 17:06:35 -0400519{
520 bool rw = bio_data_dir(bio);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400521 unsigned int io_allowed;
Vivek Goyale43473b2010-09-15 17:06:35 -0400522 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200523 u64 tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400524
Vivek Goyal8e89d132010-09-15 17:06:37 -0400525 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
Vivek Goyale43473b2010-09-15 17:06:35 -0400526
Vivek Goyal8e89d132010-09-15 17:06:37 -0400527 /* Slice has just started. Consider one slice interval */
528 if (!jiffy_elapsed)
529 jiffy_elapsed_rnd = throtl_slice;
530
531 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
532
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200533 /*
534 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
535 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
536 * will allow dispatch after 1 second and after that slice should
537 * have been trimmed.
538 */
539
540 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
541 do_div(tmp, HZ);
542
543 if (tmp > UINT_MAX)
544 io_allowed = UINT_MAX;
545 else
546 io_allowed = tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400547
548 if (tg->io_disp[rw] + 1 <= io_allowed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400549 if (wait)
550 *wait = 0;
551 return 1;
552 }
553
Vivek Goyal8e89d132010-09-15 17:06:37 -0400554 /* Calc approx time to dispatch */
555 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
556
557 if (jiffy_wait > jiffy_elapsed)
558 jiffy_wait = jiffy_wait - jiffy_elapsed;
559 else
560 jiffy_wait = 1;
561
562 if (wait)
563 *wait = jiffy_wait;
564 return 0;
565}
566
567static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
568 struct bio *bio, unsigned long *wait)
569{
570 bool rw = bio_data_dir(bio);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200571 u64 bytes_allowed, extra_bytes, tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400572 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyale43473b2010-09-15 17:06:35 -0400573
574 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
575
576 /* Slice has just started. Consider one slice interval */
577 if (!jiffy_elapsed)
578 jiffy_elapsed_rnd = throtl_slice;
579
580 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
581
Vivek Goyal5e901a22010-10-01 21:16:38 +0200582 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
583 do_div(tmp, HZ);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200584 bytes_allowed = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400585
586 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
587 if (wait)
588 *wait = 0;
589 return 1;
590 }
591
592 /* Calc approx time to dispatch */
593 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
594 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
595
596 if (!jiffy_wait)
597 jiffy_wait = 1;
598
599 /*
600 * This wait time is without taking into consideration the rounding
601 * up we did. Add that time also.
602 */
603 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
Vivek Goyale43473b2010-09-15 17:06:35 -0400604 if (wait)
605 *wait = jiffy_wait;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400606 return 0;
607}
Vivek Goyale43473b2010-09-15 17:06:35 -0400608
Vivek Goyalaf75cd32011-05-19 15:38:31 -0400609static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
610 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
611 return 1;
612 return 0;
613}
614
Vivek Goyal8e89d132010-09-15 17:06:37 -0400615/*
616 * Returns whether one can dispatch a bio or not. Also returns approx number
617 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
618 */
619static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
620 struct bio *bio, unsigned long *wait)
621{
622 bool rw = bio_data_dir(bio);
623 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
624
625 /*
626 * Currently whole state machine of group depends on first bio
627 * queued in the group bio list. So one should not be calling
628 * this function with a different bio if there are other bios
629 * queued.
630 */
631 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
632
633 /* If tg->bps = -1, then BW is unlimited */
634 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
635 if (wait)
636 *wait = 0;
637 return 1;
638 }
639
640 /*
641 * If previous slice expired, start a new one otherwise renew/extend
642 * existing slice to make sure it is at least throtl_slice interval
643 * long since now.
644 */
645 if (throtl_slice_used(td, tg, rw))
646 throtl_start_new_slice(td, tg, rw);
647 else {
648 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
649 throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
650 }
651
652 if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
653 && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
654 if (wait)
655 *wait = 0;
656 return 1;
657 }
658
659 max_wait = max(bps_wait, iops_wait);
660
661 if (wait)
662 *wait = max_wait;
663
664 if (time_before(tg->slice_end[rw], jiffies + max_wait))
665 throtl_extend_slice(td, tg, rw, jiffies + max_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400666
667 return 0;
668}
669
Tejun Heo3c798392012-04-16 13:57:25 -0700670static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
Tejun Heo629ed0b2012-04-01 14:38:44 -0700671 int rw)
672{
Tejun Heo8a3d2612012-04-01 14:38:44 -0700673 struct throtl_grp *tg = blkg_to_tg(blkg);
674 struct tg_stats_cpu *stats_cpu;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700675 unsigned long flags;
676
677 /* If per cpu stats are not allocated yet, don't do any accounting. */
Tejun Heo8a3d2612012-04-01 14:38:44 -0700678 if (tg->stats_cpu == NULL)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700679 return;
680
681 /*
682 * Disabling interrupts to provide mutual exclusion between two
683 * writes on same cpu. It probably is not needed for 64bit. Not
684 * optimizing that case yet.
685 */
686 local_irq_save(flags);
687
Tejun Heo8a3d2612012-04-01 14:38:44 -0700688 stats_cpu = this_cpu_ptr(tg->stats_cpu);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700689
Tejun Heo629ed0b2012-04-01 14:38:44 -0700690 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
691 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
692
693 local_irq_restore(flags);
694}
695
Vivek Goyale43473b2010-09-15 17:06:35 -0400696static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
697{
698 bool rw = bio_data_dir(bio);
Vivek Goyale43473b2010-09-15 17:06:35 -0400699
700 /* Charge the bio to the group */
701 tg->bytes_disp[rw] += bio->bi_size;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400702 tg->io_disp[rw]++;
Vivek Goyale43473b2010-09-15 17:06:35 -0400703
Tejun Heo629ed0b2012-04-01 14:38:44 -0700704 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
Vivek Goyale43473b2010-09-15 17:06:35 -0400705}
706
707static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
708 struct bio *bio)
709{
710 bool rw = bio_data_dir(bio);
711
712 bio_list_add(&tg->bio_lists[rw], bio);
713 /* Take a bio reference on tg */
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800714 blkg_get(tg_to_blkg(tg));
Vivek Goyale43473b2010-09-15 17:06:35 -0400715 tg->nr_queued[rw]++;
716 td->nr_queued[rw]++;
717 throtl_enqueue_tg(td, tg);
718}
719
720static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
721{
722 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
723 struct bio *bio;
724
725 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
726 tg_may_dispatch(td, tg, bio, &read_wait);
727
728 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
729 tg_may_dispatch(td, tg, bio, &write_wait);
730
731 min_wait = min(read_wait, write_wait);
732 disptime = jiffies + min_wait;
733
Vivek Goyale43473b2010-09-15 17:06:35 -0400734 /* Update dispatch time */
735 throtl_dequeue_tg(td, tg);
736 tg->disptime = disptime;
737 throtl_enqueue_tg(td, tg);
738}
739
740static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
741 bool rw, struct bio_list *bl)
742{
743 struct bio *bio;
744
745 bio = bio_list_pop(&tg->bio_lists[rw]);
746 tg->nr_queued[rw]--;
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800747 /* Drop bio reference on blkg */
748 blkg_put(tg_to_blkg(tg));
Vivek Goyale43473b2010-09-15 17:06:35 -0400749
750 BUG_ON(td->nr_queued[rw] <= 0);
751 td->nr_queued[rw]--;
752
753 throtl_charge_bio(tg, bio);
754 bio_list_add(bl, bio);
755 bio->bi_rw |= REQ_THROTTLED;
756
757 throtl_trim_slice(td, tg, rw);
758}
759
760static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
761 struct bio_list *bl)
762{
763 unsigned int nr_reads = 0, nr_writes = 0;
764 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
Vivek Goyalc2f68052010-11-15 19:32:42 +0100765 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
Vivek Goyale43473b2010-09-15 17:06:35 -0400766 struct bio *bio;
767
768 /* Try to dispatch 75% READS and 25% WRITES */
769
770 while ((bio = bio_list_peek(&tg->bio_lists[READ]))
771 && tg_may_dispatch(td, tg, bio, NULL)) {
772
773 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
774 nr_reads++;
775
776 if (nr_reads >= max_nr_reads)
777 break;
778 }
779
780 while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
781 && tg_may_dispatch(td, tg, bio, NULL)) {
782
783 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
784 nr_writes++;
785
786 if (nr_writes >= max_nr_writes)
787 break;
788 }
789
790 return nr_reads + nr_writes;
791}
792
793static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
794{
795 unsigned int nr_disp = 0;
796 struct throtl_grp *tg;
797 struct throtl_rb_root *st = &td->tg_service_tree;
798
799 while (1) {
800 tg = throtl_rb_first(st);
801
802 if (!tg)
803 break;
804
805 if (time_before(jiffies, tg->disptime))
806 break;
807
808 throtl_dequeue_tg(td, tg);
809
810 nr_disp += throtl_dispatch_tg(td, tg, bl);
811
Tejun Heo2db63142013-05-14 13:52:31 -0700812 if (tg->nr_queued[0] || tg->nr_queued[1])
Vivek Goyale43473b2010-09-15 17:06:35 -0400813 tg_update_disptime(td, tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400814
815 if (nr_disp >= throtl_quantum)
816 break;
817 }
818
819 return nr_disp;
820}
821
Tejun Heocb761992013-05-14 13:52:31 -0700822/* work function to dispatch throttled bios */
823void blk_throtl_dispatch_work_fn(struct work_struct *work)
Vivek Goyale43473b2010-09-15 17:06:35 -0400824{
Tejun Heocb761992013-05-14 13:52:31 -0700825 struct throtl_data *td = container_of(to_delayed_work(work),
826 struct throtl_data, dispatch_work);
827 struct request_queue *q = td->queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400828 unsigned int nr_disp = 0;
829 struct bio_list bio_list_on_stack;
830 struct bio *bio;
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100831 struct blk_plug plug;
Vivek Goyale43473b2010-09-15 17:06:35 -0400832
833 spin_lock_irq(q->queue_lock);
834
Vivek Goyale43473b2010-09-15 17:06:35 -0400835 bio_list_init(&bio_list_on_stack);
836
Joe Perchesd2f31a52011-06-13 20:19:27 +0200837 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
Tejun Heo6a525602013-05-14 13:52:32 -0700838 td->nr_queued[READ] + td->nr_queued[WRITE],
839 td->nr_queued[READ], td->nr_queued[WRITE]);
Vivek Goyale43473b2010-09-15 17:06:35 -0400840
841 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
842
843 if (nr_disp)
844 throtl_log(td, "bios disp=%u", nr_disp);
845
846 throtl_schedule_next_dispatch(td);
Tejun Heo6a525602013-05-14 13:52:32 -0700847
Vivek Goyale43473b2010-09-15 17:06:35 -0400848 spin_unlock_irq(q->queue_lock);
849
850 /*
851 * If we dispatched some requests, unplug the queue to make sure
852 * immediate dispatch
853 */
854 if (nr_disp) {
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100855 blk_start_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -0400856 while((bio = bio_list_pop(&bio_list_on_stack)))
857 generic_make_request(bio);
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100858 blk_finish_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -0400859 }
Vivek Goyale43473b2010-09-15 17:06:35 -0400860}
861
Tejun Heof95a04a2012-04-16 13:57:26 -0700862static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
863 struct blkg_policy_data *pd, int off)
Tejun Heo41b38b62012-04-01 14:38:44 -0700864{
Tejun Heof95a04a2012-04-16 13:57:26 -0700865 struct throtl_grp *tg = pd_to_tg(pd);
Tejun Heo41b38b62012-04-01 14:38:44 -0700866 struct blkg_rwstat rwstat = { }, tmp;
867 int i, cpu;
868
869 for_each_possible_cpu(cpu) {
Tejun Heo8a3d2612012-04-01 14:38:44 -0700870 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
Tejun Heo41b38b62012-04-01 14:38:44 -0700871
872 tmp = blkg_rwstat_read((void *)sc + off);
873 for (i = 0; i < BLKG_RWSTAT_NR; i++)
874 rwstat.cnt[i] += tmp.cnt[i];
875 }
876
Tejun Heof95a04a2012-04-16 13:57:26 -0700877 return __blkg_prfill_rwstat(sf, pd, &rwstat);
Tejun Heo41b38b62012-04-01 14:38:44 -0700878}
879
Tejun Heo8a3d2612012-04-01 14:38:44 -0700880static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
881 struct seq_file *sf)
Tejun Heo41b38b62012-04-01 14:38:44 -0700882{
Tejun Heo3c798392012-04-16 13:57:25 -0700883 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo41b38b62012-04-01 14:38:44 -0700884
Tejun Heo3c798392012-04-16 13:57:25 -0700885 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700886 cft->private, true);
Tejun Heo41b38b62012-04-01 14:38:44 -0700887 return 0;
888}
889
Tejun Heof95a04a2012-04-16 13:57:26 -0700890static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
891 int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700892{
Tejun Heof95a04a2012-04-16 13:57:26 -0700893 struct throtl_grp *tg = pd_to_tg(pd);
894 u64 v = *(u64 *)((void *)tg + off);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700895
Tejun Heoaf133ce2012-04-01 14:38:44 -0700896 if (v == -1)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700897 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -0700898 return __blkg_prfill_u64(sf, pd, v);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700899}
900
Tejun Heof95a04a2012-04-16 13:57:26 -0700901static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
902 int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700903{
Tejun Heof95a04a2012-04-16 13:57:26 -0700904 struct throtl_grp *tg = pd_to_tg(pd);
905 unsigned int v = *(unsigned int *)((void *)tg + off);
Tejun Heoaf133ce2012-04-01 14:38:44 -0700906
907 if (v == -1)
908 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -0700909 return __blkg_prfill_u64(sf, pd, v);
Tejun Heoaf133ce2012-04-01 14:38:44 -0700910}
911
912static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
913 struct seq_file *sf)
914{
Tejun Heo3c798392012-04-16 13:57:25 -0700915 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
916 &blkcg_policy_throtl, cft->private, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700917 return 0;
918}
919
Tejun Heoaf133ce2012-04-01 14:38:44 -0700920static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
921 struct seq_file *sf)
Vivek Goyale43473b2010-09-15 17:06:35 -0400922{
Tejun Heo3c798392012-04-16 13:57:25 -0700923 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
924 &blkcg_policy_throtl, cft->private, false);
Tejun Heoaf133ce2012-04-01 14:38:44 -0700925 return 0;
Vivek Goyale43473b2010-09-15 17:06:35 -0400926}
927
Tejun Heoaf133ce2012-04-01 14:38:44 -0700928static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
929 bool is_u64)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700930{
Tejun Heo3c798392012-04-16 13:57:25 -0700931 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700932 struct blkg_conf_ctx ctx;
Tejun Heoaf133ce2012-04-01 14:38:44 -0700933 struct throtl_grp *tg;
Tejun Heoa2b16932012-04-13 13:11:33 -0700934 struct throtl_data *td;
Tejun Heo60c2bc22012-04-01 14:38:43 -0700935 int ret;
936
Tejun Heo3c798392012-04-16 13:57:25 -0700937 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700938 if (ret)
939 return ret;
940
Tejun Heoaf133ce2012-04-01 14:38:44 -0700941 tg = blkg_to_tg(ctx.blkg);
Tejun Heoa2b16932012-04-13 13:11:33 -0700942 td = ctx.blkg->q->td;
Tejun Heoaf133ce2012-04-01 14:38:44 -0700943
Tejun Heoa2b16932012-04-13 13:11:33 -0700944 if (!ctx.v)
945 ctx.v = -1;
Tejun Heoaf133ce2012-04-01 14:38:44 -0700946
Tejun Heoa2b16932012-04-13 13:11:33 -0700947 if (is_u64)
948 *(u64 *)((void *)tg + cft->private) = ctx.v;
949 else
950 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
Tejun Heoaf133ce2012-04-01 14:38:44 -0700951
Tejun Heo632b4492013-05-14 13:52:31 -0700952 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
953 tg->bps[READ], tg->bps[WRITE],
954 tg->iops[READ], tg->iops[WRITE]);
955
956 /*
957 * We're already holding queue_lock and know @tg is valid. Let's
958 * apply the new config directly.
959 *
960 * Restart the slices for both READ and WRITES. It might happen
961 * that a group's limit are dropped suddenly and we don't want to
962 * account recently dispatched IO with new low rate.
963 */
964 throtl_start_new_slice(td, tg, 0);
965 throtl_start_new_slice(td, tg, 1);
966
967 if (throtl_tg_on_rr(tg)) {
968 tg_update_disptime(td, tg);
969 throtl_schedule_next_dispatch(td);
970 }
Tejun Heo60c2bc22012-04-01 14:38:43 -0700971
972 blkg_conf_finish(&ctx);
Tejun Heoa2b16932012-04-13 13:11:33 -0700973 return 0;
Tejun Heo60c2bc22012-04-01 14:38:43 -0700974}
975
Tejun Heoaf133ce2012-04-01 14:38:44 -0700976static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
977 const char *buf)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700978{
Tejun Heoaf133ce2012-04-01 14:38:44 -0700979 return tg_set_conf(cgrp, cft, buf, true);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700980}
981
Tejun Heoaf133ce2012-04-01 14:38:44 -0700982static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
983 const char *buf)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700984{
Tejun Heoaf133ce2012-04-01 14:38:44 -0700985 return tg_set_conf(cgrp, cft, buf, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700986}
987
988static struct cftype throtl_files[] = {
989 {
990 .name = "throttle.read_bps_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -0700991 .private = offsetof(struct throtl_grp, bps[READ]),
992 .read_seq_string = tg_print_conf_u64,
993 .write_string = tg_set_conf_u64,
Tejun Heo60c2bc22012-04-01 14:38:43 -0700994 .max_write_len = 256,
995 },
996 {
997 .name = "throttle.write_bps_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -0700998 .private = offsetof(struct throtl_grp, bps[WRITE]),
999 .read_seq_string = tg_print_conf_u64,
1000 .write_string = tg_set_conf_u64,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001001 .max_write_len = 256,
1002 },
1003 {
1004 .name = "throttle.read_iops_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -07001005 .private = offsetof(struct throtl_grp, iops[READ]),
1006 .read_seq_string = tg_print_conf_uint,
1007 .write_string = tg_set_conf_uint,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001008 .max_write_len = 256,
1009 },
1010 {
1011 .name = "throttle.write_iops_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -07001012 .private = offsetof(struct throtl_grp, iops[WRITE]),
1013 .read_seq_string = tg_print_conf_uint,
1014 .write_string = tg_set_conf_uint,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001015 .max_write_len = 256,
1016 },
1017 {
1018 .name = "throttle.io_service_bytes",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001019 .private = offsetof(struct tg_stats_cpu, service_bytes),
Tejun Heo8a3d2612012-04-01 14:38:44 -07001020 .read_seq_string = tg_print_cpu_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001021 },
1022 {
1023 .name = "throttle.io_serviced",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001024 .private = offsetof(struct tg_stats_cpu, serviced),
Tejun Heo8a3d2612012-04-01 14:38:44 -07001025 .read_seq_string = tg_print_cpu_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001026 },
1027 { } /* terminate */
1028};
1029
Vivek Goyalda527772011-03-02 19:05:33 -05001030static void throtl_shutdown_wq(struct request_queue *q)
Vivek Goyale43473b2010-09-15 17:06:35 -04001031{
1032 struct throtl_data *td = q->td;
1033
Tejun Heocb761992013-05-14 13:52:31 -07001034 cancel_delayed_work_sync(&td->dispatch_work);
Vivek Goyale43473b2010-09-15 17:06:35 -04001035}
1036
Tejun Heo3c798392012-04-16 13:57:25 -07001037static struct blkcg_policy blkcg_policy_throtl = {
Tejun Heof9fcc2d2012-04-16 13:57:27 -07001038 .pd_size = sizeof(struct throtl_grp),
1039 .cftypes = throtl_files,
1040
1041 .pd_init_fn = throtl_pd_init,
1042 .pd_exit_fn = throtl_pd_exit,
1043 .pd_reset_stats_fn = throtl_pd_reset_stats,
Vivek Goyale43473b2010-09-15 17:06:35 -04001044};
1045
Tejun Heobc16a4f2011-10-19 14:33:01 +02001046bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
Vivek Goyale43473b2010-09-15 17:06:35 -04001047{
1048 struct throtl_data *td = q->td;
1049 struct throtl_grp *tg;
Vivek Goyale43473b2010-09-15 17:06:35 -04001050 bool rw = bio_data_dir(bio), update_disptime = true;
Tejun Heo3c798392012-04-16 13:57:25 -07001051 struct blkcg *blkcg;
Tejun Heobc16a4f2011-10-19 14:33:01 +02001052 bool throttled = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001053
1054 if (bio->bi_rw & REQ_THROTTLED) {
1055 bio->bi_rw &= ~REQ_THROTTLED;
Tejun Heobc16a4f2011-10-19 14:33:01 +02001056 goto out;
Vivek Goyale43473b2010-09-15 17:06:35 -04001057 }
1058
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001059 /*
1060 * A throtl_grp pointer retrieved under rcu can be used to access
1061 * basic fields like stats and io rates. If a group has no rules,
1062 * just update the dispatch stats in lockless manner and return.
1063 */
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001064 rcu_read_lock();
Tejun Heo3c798392012-04-16 13:57:25 -07001065 blkcg = bio_blkcg(bio);
Tejun Heocd1604f2012-03-05 13:15:06 -08001066 tg = throtl_lookup_tg(td, blkcg);
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001067 if (tg) {
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001068 if (tg_no_rule_group(tg, rw)) {
Tejun Heo629ed0b2012-04-01 14:38:44 -07001069 throtl_update_dispatch_stats(tg_to_blkg(tg),
1070 bio->bi_size, bio->bi_rw);
Tejun Heo2a7f1242012-03-05 13:15:01 -08001071 goto out_unlock_rcu;
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001072 }
1073 }
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001074
1075 /*
1076 * Either group has not been allocated yet or it is not an unlimited
1077 * IO group
1078 */
Vivek Goyale43473b2010-09-15 17:06:35 -04001079 spin_lock_irq(q->queue_lock);
Tejun Heocd1604f2012-03-05 13:15:06 -08001080 tg = throtl_lookup_create_tg(td, blkcg);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001081 if (unlikely(!tg))
1082 goto out_unlock;
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001083
Vivek Goyale43473b2010-09-15 17:06:35 -04001084 if (tg->nr_queued[rw]) {
1085 /*
1086 * There is already another bio queued in same dir. No
1087 * need to update dispatch time.
1088 */
Vivek Goyal231d7042011-03-07 21:05:14 +01001089 update_disptime = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001090 goto queue_bio;
Vivek Goyalde701c72011-03-07 21:09:32 +01001091
Vivek Goyale43473b2010-09-15 17:06:35 -04001092 }
1093
1094 /* Bio is with-in rate limit of group */
1095 if (tg_may_dispatch(td, tg, bio, NULL)) {
1096 throtl_charge_bio(tg, bio);
Vivek Goyal04521db2011-03-22 21:54:29 +01001097
1098 /*
1099 * We need to trim slice even when bios are not being queued
1100 * otherwise it might happen that a bio is not queued for
1101 * a long time and slice keeps on extending and trim is not
1102 * called for a long time. Now if limits are reduced suddenly
1103 * we take into account all the IO dispatched so far at new
1104 * low rate and * newly queued IO gets a really long dispatch
1105 * time.
1106 *
1107 * So keep on trimming slice even if bio is not queued.
1108 */
1109 throtl_trim_slice(td, tg, rw);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001110 goto out_unlock;
Vivek Goyale43473b2010-09-15 17:06:35 -04001111 }
1112
1113queue_bio:
Joe Perchesfd16d262011-06-13 10:42:49 +02001114 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
Vivek Goyal8e89d132010-09-15 17:06:37 -04001115 " iodisp=%u iops=%u queued=%d/%d",
1116 rw == READ ? 'R' : 'W',
Vivek Goyale43473b2010-09-15 17:06:35 -04001117 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
Vivek Goyal8e89d132010-09-15 17:06:37 -04001118 tg->io_disp[rw], tg->iops[rw],
Vivek Goyale43473b2010-09-15 17:06:35 -04001119 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1120
Tejun Heo671058f2012-03-05 13:15:29 -08001121 bio_associate_current(bio);
Vivek Goyale43473b2010-09-15 17:06:35 -04001122 throtl_add_bio_tg(q->td, tg, bio);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001123 throttled = true;
Vivek Goyale43473b2010-09-15 17:06:35 -04001124
1125 if (update_disptime) {
1126 tg_update_disptime(td, tg);
1127 throtl_schedule_next_dispatch(td);
1128 }
1129
Tejun Heobc16a4f2011-10-19 14:33:01 +02001130out_unlock:
Vivek Goyale43473b2010-09-15 17:06:35 -04001131 spin_unlock_irq(q->queue_lock);
Tejun Heo2a7f1242012-03-05 13:15:01 -08001132out_unlock_rcu:
1133 rcu_read_unlock();
Tejun Heobc16a4f2011-10-19 14:33:01 +02001134out:
1135 return throttled;
Vivek Goyale43473b2010-09-15 17:06:35 -04001136}
1137
Tejun Heoc9a929d2011-10-19 14:42:16 +02001138/**
1139 * blk_throtl_drain - drain throttled bios
1140 * @q: request_queue to drain throttled bios for
1141 *
1142 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1143 */
1144void blk_throtl_drain(struct request_queue *q)
1145 __releases(q->queue_lock) __acquires(q->queue_lock)
1146{
1147 struct throtl_data *td = q->td;
1148 struct throtl_rb_root *st = &td->tg_service_tree;
1149 struct throtl_grp *tg;
1150 struct bio_list bl;
1151 struct bio *bio;
1152
Andi Kleen8bcb6c72012-03-30 12:33:28 +02001153 queue_lockdep_assert_held(q);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001154
1155 bio_list_init(&bl);
1156
1157 while ((tg = throtl_rb_first(st))) {
1158 throtl_dequeue_tg(td, tg);
1159
1160 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1161 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1162 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
1163 tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1164 }
1165 spin_unlock_irq(q->queue_lock);
1166
1167 while ((bio = bio_list_pop(&bl)))
1168 generic_make_request(bio);
1169
1170 spin_lock_irq(q->queue_lock);
1171}
1172
Vivek Goyale43473b2010-09-15 17:06:35 -04001173int blk_throtl_init(struct request_queue *q)
1174{
1175 struct throtl_data *td;
Tejun Heoa2b16932012-04-13 13:11:33 -07001176 int ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04001177
1178 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1179 if (!td)
1180 return -ENOMEM;
1181
Vivek Goyale43473b2010-09-15 17:06:35 -04001182 td->tg_service_tree = THROTL_RB_ROOT;
Tejun Heocb761992013-05-14 13:52:31 -07001183 INIT_DELAYED_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
Vivek Goyale43473b2010-09-15 17:06:35 -04001184
Tejun Heocd1604f2012-03-05 13:15:06 -08001185 q->td = td;
Vivek Goyal29b12582011-05-19 15:38:24 -04001186 td->queue = q;
Vivek Goyal02977e42010-10-01 14:49:48 +02001187
Tejun Heoa2b16932012-04-13 13:11:33 -07001188 /* activate policy */
Tejun Heo3c798392012-04-16 13:57:25 -07001189 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
Tejun Heoa2b16932012-04-13 13:11:33 -07001190 if (ret)
Vivek Goyal29b12582011-05-19 15:38:24 -04001191 kfree(td);
Tejun Heoa2b16932012-04-13 13:11:33 -07001192 return ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04001193}
1194
1195void blk_throtl_exit(struct request_queue *q)
1196{
Tejun Heoc875f4d2012-03-05 13:15:22 -08001197 BUG_ON(!q->td);
Vivek Goyalda527772011-03-02 19:05:33 -05001198 throtl_shutdown_wq(q);
Tejun Heo3c798392012-04-16 13:57:25 -07001199 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001200 kfree(q->td);
Vivek Goyale43473b2010-09-15 17:06:35 -04001201}
1202
1203static int __init throtl_init(void)
1204{
Vivek Goyal450adcb2011-03-01 13:40:54 -05001205 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1206 if (!kthrotld_workqueue)
1207 panic("Failed to create kthrotld\n");
1208
Tejun Heo3c798392012-04-16 13:57:25 -07001209 return blkcg_policy_register(&blkcg_policy_throtl);
Vivek Goyale43473b2010-09-15 17:06:35 -04001210}
1211
1212module_init(throtl_init);