blob: ebaaaa9f57d6ed3b3015fa51d47af7ffe9766694 [file] [log] [blame]
Vivek Goyale43473b2010-09-15 17:06:35 -04001/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
12#include "blk-cgroup.h"
Tejun Heobc9fcbf2011-10-19 14:31:18 +020013#include "blk.h"
Vivek Goyale43473b2010-09-15 17:06:35 -040014
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
Tejun Heo3c798392012-04-16 13:57:25 -070024static struct blkcg_policy blkcg_policy_throtl;
Tejun Heo03814112012-03-05 13:15:14 -080025
Vivek Goyal450adcb2011-03-01 13:40:54 -050026/* A workqueue to queue throttle related work */
27static struct workqueue_struct *kthrotld_workqueue;
Vivek Goyal450adcb2011-03-01 13:40:54 -050028
Tejun Heoc9e03322013-05-14 13:52:32 -070029struct throtl_service_queue {
30 struct rb_root pending_tree; /* RB tree of active tgs */
31 struct rb_node *first_pending; /* first node in the tree */
32 unsigned int nr_pending; /* # queued in the tree */
33 unsigned long first_pending_disptime; /* disptime of the first tg */
Vivek Goyale43473b2010-09-15 17:06:35 -040034};
35
Tejun Heoc9e03322013-05-14 13:52:32 -070036#define THROTL_SERVICE_QUEUE_INITIALIZER \
37 (struct throtl_service_queue){ .pending_tree = RB_ROOT }
Vivek Goyale43473b2010-09-15 17:06:35 -040038
Tejun Heo5b2c16a2013-05-14 13:52:32 -070039enum tg_state_flags {
40 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
41};
42
Vivek Goyale43473b2010-09-15 17:06:35 -040043#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
44
Tejun Heo8a3d2612012-04-01 14:38:44 -070045/* Per-cpu group stats */
46struct tg_stats_cpu {
47 /* total bytes transferred */
48 struct blkg_rwstat service_bytes;
49 /* total IOs serviced, post merge */
50 struct blkg_rwstat serviced;
51};
52
Vivek Goyale43473b2010-09-15 17:06:35 -040053struct throtl_grp {
Tejun Heof95a04a2012-04-16 13:57:26 -070054 /* must be the first member */
55 struct blkg_policy_data pd;
56
Tejun Heoc9e03322013-05-14 13:52:32 -070057 /* active throtl group service_queue member */
Vivek Goyale43473b2010-09-15 17:06:35 -040058 struct rb_node rb_node;
59
Tejun Heo0f3457f2013-05-14 13:52:32 -070060 /* throtl_data this group belongs to */
61 struct throtl_data *td;
62
Vivek Goyale43473b2010-09-15 17:06:35 -040063 /*
64 * Dispatch time in jiffies. This is the estimated time when group
65 * will unthrottle and is ready to dispatch more bio. It is used as
66 * key to sort active groups in service tree.
67 */
68 unsigned long disptime;
69
Vivek Goyale43473b2010-09-15 17:06:35 -040070 unsigned int flags;
71
72 /* Two lists for READ and WRITE */
73 struct bio_list bio_lists[2];
74
75 /* Number of queued bios on READ and WRITE lists */
76 unsigned int nr_queued[2];
77
78 /* bytes per second rate limits */
79 uint64_t bps[2];
80
Vivek Goyal8e89d132010-09-15 17:06:37 -040081 /* IOPS limits */
82 unsigned int iops[2];
83
Vivek Goyale43473b2010-09-15 17:06:35 -040084 /* Number of bytes disptached in current slice */
85 uint64_t bytes_disp[2];
Vivek Goyal8e89d132010-09-15 17:06:37 -040086 /* Number of bio's dispatched in current slice */
87 unsigned int io_disp[2];
Vivek Goyale43473b2010-09-15 17:06:35 -040088
89 /* When did we start a new slice */
90 unsigned long slice_start[2];
91 unsigned long slice_end[2];
Vivek Goyalfe071432010-10-01 14:49:49 +020092
Tejun Heo8a3d2612012-04-01 14:38:44 -070093 /* Per cpu stats pointer */
94 struct tg_stats_cpu __percpu *stats_cpu;
95
96 /* List of tgs waiting for per cpu stats memory to be allocated */
97 struct list_head stats_alloc_node;
Vivek Goyale43473b2010-09-15 17:06:35 -040098};
99
100struct throtl_data
101{
Vivek Goyale43473b2010-09-15 17:06:35 -0400102 /* service tree for active throtl groups */
Tejun Heoc9e03322013-05-14 13:52:32 -0700103 struct throtl_service_queue service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400104
Vivek Goyale43473b2010-09-15 17:06:35 -0400105 struct request_queue *queue;
106
107 /* Total Number of queued bios on READ and WRITE lists */
108 unsigned int nr_queued[2];
109
110 /*
Vivek Goyal02977e42010-10-01 14:49:48 +0200111 * number of total undestroyed groups
Vivek Goyale43473b2010-09-15 17:06:35 -0400112 */
113 unsigned int nr_undestroyed_grps;
114
115 /* Work for dispatching throttled bios */
Tejun Heocb761992013-05-14 13:52:31 -0700116 struct delayed_work dispatch_work;
Vivek Goyale43473b2010-09-15 17:06:35 -0400117};
118
Tejun Heo8a3d2612012-04-01 14:38:44 -0700119/* list and work item to allocate percpu group stats */
120static DEFINE_SPINLOCK(tg_stats_alloc_lock);
121static LIST_HEAD(tg_stats_alloc_list);
122
123static void tg_stats_alloc_fn(struct work_struct *);
124static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
125
Tejun Heof95a04a2012-04-16 13:57:26 -0700126static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
127{
128 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
129}
130
Tejun Heo3c798392012-04-16 13:57:25 -0700131static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
Tejun Heo03814112012-03-05 13:15:14 -0800132{
Tejun Heof95a04a2012-04-16 13:57:26 -0700133 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
Tejun Heo03814112012-03-05 13:15:14 -0800134}
135
Tejun Heo3c798392012-04-16 13:57:25 -0700136static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
Tejun Heo03814112012-03-05 13:15:14 -0800137{
Tejun Heof95a04a2012-04-16 13:57:26 -0700138 return pd_to_blkg(&tg->pd);
Tejun Heo03814112012-03-05 13:15:14 -0800139}
140
Tejun Heo03d8e112012-04-13 13:11:32 -0700141static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
142{
143 return blkg_to_tg(td->queue->root_blkg);
144}
145
Tejun Heo0f3457f2013-05-14 13:52:32 -0700146#define throtl_log_tg(tg, fmt, args...) do { \
Tejun Heo54e7ed12012-04-16 13:57:23 -0700147 char __pbuf[128]; \
148 \
149 blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
Tejun Heo0f3457f2013-05-14 13:52:32 -0700150 blk_add_trace_msg((tg)->td->queue, "throtl %s " fmt, __pbuf, ##args); \
Tejun Heo54e7ed12012-04-16 13:57:23 -0700151} while (0)
Vivek Goyale43473b2010-09-15 17:06:35 -0400152
153#define throtl_log(td, fmt, args...) \
154 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
155
Tejun Heo8a3d2612012-04-01 14:38:44 -0700156/*
157 * Worker for allocating per cpu stat for tgs. This is scheduled on the
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700158 * system_wq once there are some groups on the alloc_list waiting for
Tejun Heo8a3d2612012-04-01 14:38:44 -0700159 * allocation.
160 */
161static void tg_stats_alloc_fn(struct work_struct *work)
162{
163 static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
164 struct delayed_work *dwork = to_delayed_work(work);
165 bool empty = false;
166
167alloc_stats:
168 if (!stats_cpu) {
169 stats_cpu = alloc_percpu(struct tg_stats_cpu);
170 if (!stats_cpu) {
171 /* allocation failed, try again after some time */
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700172 schedule_delayed_work(dwork, msecs_to_jiffies(10));
Tejun Heo8a3d2612012-04-01 14:38:44 -0700173 return;
174 }
175 }
176
177 spin_lock_irq(&tg_stats_alloc_lock);
178
179 if (!list_empty(&tg_stats_alloc_list)) {
180 struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
181 struct throtl_grp,
182 stats_alloc_node);
183 swap(tg->stats_cpu, stats_cpu);
184 list_del_init(&tg->stats_alloc_node);
185 }
186
187 empty = list_empty(&tg_stats_alloc_list);
188 spin_unlock_irq(&tg_stats_alloc_lock);
189 if (!empty)
190 goto alloc_stats;
191}
192
Tejun Heo3c798392012-04-16 13:57:25 -0700193static void throtl_pd_init(struct blkcg_gq *blkg)
Vivek Goyala29a1712011-05-19 15:38:19 -0400194{
Tejun Heo03814112012-03-05 13:15:14 -0800195 struct throtl_grp *tg = blkg_to_tg(blkg);
Tejun Heoff26eaa2012-05-23 12:16:21 +0200196 unsigned long flags;
Tejun Heocd1604f2012-03-05 13:15:06 -0800197
Vivek Goyala29a1712011-05-19 15:38:19 -0400198 RB_CLEAR_NODE(&tg->rb_node);
Tejun Heo0f3457f2013-05-14 13:52:32 -0700199 tg->td = blkg->q->td;
Vivek Goyala29a1712011-05-19 15:38:19 -0400200 bio_list_init(&tg->bio_lists[0]);
201 bio_list_init(&tg->bio_lists[1]);
Vivek Goyala29a1712011-05-19 15:38:19 -0400202
Tejun Heoe56da7e2012-03-05 13:15:07 -0800203 tg->bps[READ] = -1;
204 tg->bps[WRITE] = -1;
205 tg->iops[READ] = -1;
206 tg->iops[WRITE] = -1;
Tejun Heo8a3d2612012-04-01 14:38:44 -0700207
208 /*
209 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
210 * but percpu allocator can't be called from IO path. Queue tg on
211 * tg_stats_alloc_list and allocate from work item.
212 */
Tejun Heoff26eaa2012-05-23 12:16:21 +0200213 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
Tejun Heo8a3d2612012-04-01 14:38:44 -0700214 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
Tejun Heo3b07e9c2012-08-20 14:51:24 -0700215 schedule_delayed_work(&tg_stats_alloc_work, 0);
Tejun Heoff26eaa2012-05-23 12:16:21 +0200216 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
Tejun Heo8a3d2612012-04-01 14:38:44 -0700217}
218
Tejun Heo3c798392012-04-16 13:57:25 -0700219static void throtl_pd_exit(struct blkcg_gq *blkg)
Tejun Heo8a3d2612012-04-01 14:38:44 -0700220{
221 struct throtl_grp *tg = blkg_to_tg(blkg);
Tejun Heoff26eaa2012-05-23 12:16:21 +0200222 unsigned long flags;
Tejun Heo8a3d2612012-04-01 14:38:44 -0700223
Tejun Heoff26eaa2012-05-23 12:16:21 +0200224 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
Tejun Heo8a3d2612012-04-01 14:38:44 -0700225 list_del_init(&tg->stats_alloc_node);
Tejun Heoff26eaa2012-05-23 12:16:21 +0200226 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
Tejun Heo8a3d2612012-04-01 14:38:44 -0700227
228 free_percpu(tg->stats_cpu);
229}
230
Tejun Heo3c798392012-04-16 13:57:25 -0700231static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
Tejun Heo8a3d2612012-04-01 14:38:44 -0700232{
233 struct throtl_grp *tg = blkg_to_tg(blkg);
234 int cpu;
235
236 if (tg->stats_cpu == NULL)
237 return;
238
239 for_each_possible_cpu(cpu) {
240 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
241
242 blkg_rwstat_reset(&sc->service_bytes);
243 blkg_rwstat_reset(&sc->serviced);
244 }
Vivek Goyala29a1712011-05-19 15:38:19 -0400245}
246
Tejun Heo3c798392012-04-16 13:57:25 -0700247static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
248 struct blkcg *blkcg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400249{
Vivek Goyale43473b2010-09-15 17:06:35 -0400250 /*
Tejun Heo3c798392012-04-16 13:57:25 -0700251 * This is the common case when there are no blkcgs. Avoid lookup
252 * in this case
Tejun Heocd1604f2012-03-05 13:15:06 -0800253 */
Tejun Heo3c798392012-04-16 13:57:25 -0700254 if (blkcg == &blkcg_root)
Tejun Heo03d8e112012-04-13 13:11:32 -0700255 return td_root_tg(td);
Vivek Goyale43473b2010-09-15 17:06:35 -0400256
Tejun Heoe8989fa2012-03-05 13:15:20 -0800257 return blkg_to_tg(blkg_lookup(blkcg, td->queue));
Vivek Goyale43473b2010-09-15 17:06:35 -0400258}
259
Tejun Heocd1604f2012-03-05 13:15:06 -0800260static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
Tejun Heo3c798392012-04-16 13:57:25 -0700261 struct blkcg *blkcg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400262{
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400263 struct request_queue *q = td->queue;
Tejun Heocd1604f2012-03-05 13:15:06 -0800264 struct throtl_grp *tg = NULL;
Tejun Heo0a5a7d02012-03-05 13:15:02 -0800265
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400266 /*
Tejun Heo3c798392012-04-16 13:57:25 -0700267 * This is the common case when there are no blkcgs. Avoid lookup
268 * in this case
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400269 */
Tejun Heo3c798392012-04-16 13:57:25 -0700270 if (blkcg == &blkcg_root) {
Tejun Heo03d8e112012-04-13 13:11:32 -0700271 tg = td_root_tg(td);
Tejun Heocd1604f2012-03-05 13:15:06 -0800272 } else {
Tejun Heo3c798392012-04-16 13:57:25 -0700273 struct blkcg_gq *blkg;
Tejun Heocd1604f2012-03-05 13:15:06 -0800274
Tejun Heo3c96cb32012-04-13 13:11:34 -0700275 blkg = blkg_lookup_create(blkcg, q);
Tejun Heocd1604f2012-03-05 13:15:06 -0800276
277 /* if %NULL and @q is alive, fall back to root_tg */
278 if (!IS_ERR(blkg))
Tejun Heo03814112012-03-05 13:15:14 -0800279 tg = blkg_to_tg(blkg);
Bart Van Assche3f3299d2012-11-28 13:42:38 +0100280 else if (!blk_queue_dying(q))
Tejun Heo03d8e112012-04-13 13:11:32 -0700281 tg = td_root_tg(td);
Vivek Goyalf469a7b2011-05-19 15:38:23 -0400282 }
283
Vivek Goyale43473b2010-09-15 17:06:35 -0400284 return tg;
285}
286
Tejun Heo0049af72013-05-14 13:52:33 -0700287static struct throtl_grp *
288throtl_rb_first(struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400289{
290 /* Service tree is empty */
Tejun Heo0049af72013-05-14 13:52:33 -0700291 if (!parent_sq->nr_pending)
Vivek Goyale43473b2010-09-15 17:06:35 -0400292 return NULL;
293
Tejun Heo0049af72013-05-14 13:52:33 -0700294 if (!parent_sq->first_pending)
295 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
Vivek Goyale43473b2010-09-15 17:06:35 -0400296
Tejun Heo0049af72013-05-14 13:52:33 -0700297 if (parent_sq->first_pending)
298 return rb_entry_tg(parent_sq->first_pending);
Vivek Goyale43473b2010-09-15 17:06:35 -0400299
300 return NULL;
301}
302
303static void rb_erase_init(struct rb_node *n, struct rb_root *root)
304{
305 rb_erase(n, root);
306 RB_CLEAR_NODE(n);
307}
308
Tejun Heo0049af72013-05-14 13:52:33 -0700309static void throtl_rb_erase(struct rb_node *n,
310 struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400311{
Tejun Heo0049af72013-05-14 13:52:33 -0700312 if (parent_sq->first_pending == n)
313 parent_sq->first_pending = NULL;
314 rb_erase_init(n, &parent_sq->pending_tree);
315 --parent_sq->nr_pending;
Vivek Goyale43473b2010-09-15 17:06:35 -0400316}
317
Tejun Heo0049af72013-05-14 13:52:33 -0700318static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400319{
320 struct throtl_grp *tg;
321
Tejun Heo0049af72013-05-14 13:52:33 -0700322 tg = throtl_rb_first(parent_sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400323 if (!tg)
324 return;
325
Tejun Heo0049af72013-05-14 13:52:33 -0700326 parent_sq->first_pending_disptime = tg->disptime;
Vivek Goyale43473b2010-09-15 17:06:35 -0400327}
328
Tejun Heo0049af72013-05-14 13:52:33 -0700329static void tg_service_queue_add(struct throtl_grp *tg,
330 struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400331{
Tejun Heo0049af72013-05-14 13:52:33 -0700332 struct rb_node **node = &parent_sq->pending_tree.rb_node;
Vivek Goyale43473b2010-09-15 17:06:35 -0400333 struct rb_node *parent = NULL;
334 struct throtl_grp *__tg;
335 unsigned long key = tg->disptime;
336 int left = 1;
337
338 while (*node != NULL) {
339 parent = *node;
340 __tg = rb_entry_tg(parent);
341
342 if (time_before(key, __tg->disptime))
343 node = &parent->rb_left;
344 else {
345 node = &parent->rb_right;
346 left = 0;
347 }
348 }
349
350 if (left)
Tejun Heo0049af72013-05-14 13:52:33 -0700351 parent_sq->first_pending = &tg->rb_node;
Vivek Goyale43473b2010-09-15 17:06:35 -0400352
353 rb_link_node(&tg->rb_node, parent, node);
Tejun Heo0049af72013-05-14 13:52:33 -0700354 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
Vivek Goyale43473b2010-09-15 17:06:35 -0400355}
356
Tejun Heo0049af72013-05-14 13:52:33 -0700357static void __throtl_enqueue_tg(struct throtl_grp *tg,
358 struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400359{
Tejun Heo0049af72013-05-14 13:52:33 -0700360 tg_service_queue_add(tg, parent_sq);
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700361 tg->flags |= THROTL_TG_PENDING;
Tejun Heo0049af72013-05-14 13:52:33 -0700362 parent_sq->nr_pending++;
Vivek Goyale43473b2010-09-15 17:06:35 -0400363}
364
Tejun Heo0049af72013-05-14 13:52:33 -0700365static void throtl_enqueue_tg(struct throtl_grp *tg,
366 struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400367{
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700368 if (!(tg->flags & THROTL_TG_PENDING))
Tejun Heo0049af72013-05-14 13:52:33 -0700369 __throtl_enqueue_tg(tg, parent_sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400370}
371
Tejun Heo0049af72013-05-14 13:52:33 -0700372static void __throtl_dequeue_tg(struct throtl_grp *tg,
373 struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400374{
Tejun Heo0049af72013-05-14 13:52:33 -0700375 throtl_rb_erase(&tg->rb_node, parent_sq);
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700376 tg->flags &= ~THROTL_TG_PENDING;
Vivek Goyale43473b2010-09-15 17:06:35 -0400377}
378
Tejun Heo0049af72013-05-14 13:52:33 -0700379static void throtl_dequeue_tg(struct throtl_grp *tg,
380 struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400381{
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700382 if (tg->flags & THROTL_TG_PENDING)
Tejun Heo0049af72013-05-14 13:52:33 -0700383 __throtl_dequeue_tg(tg, parent_sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400384}
385
Tejun Heoa9131a22013-05-14 13:52:31 -0700386/* Call with queue lock held */
387static void throtl_schedule_delayed_work(struct throtl_data *td,
388 unsigned long delay)
389{
390 struct delayed_work *dwork = &td->dispatch_work;
391
Tejun Heo6a525602013-05-14 13:52:32 -0700392 mod_delayed_work(kthrotld_workqueue, dwork, delay);
393 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies);
Tejun Heoa9131a22013-05-14 13:52:31 -0700394}
395
Vivek Goyale43473b2010-09-15 17:06:35 -0400396static void throtl_schedule_next_dispatch(struct throtl_data *td)
397{
Tejun Heoc9e03322013-05-14 13:52:32 -0700398 struct throtl_service_queue *sq = &td->service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400399
Tejun Heo6a525602013-05-14 13:52:32 -0700400 /* any pending children left? */
Tejun Heoc9e03322013-05-14 13:52:32 -0700401 if (!sq->nr_pending)
Vivek Goyale43473b2010-09-15 17:06:35 -0400402 return;
403
Tejun Heoc9e03322013-05-14 13:52:32 -0700404 update_min_dispatch_time(sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400405
Tejun Heoc9e03322013-05-14 13:52:32 -0700406 if (time_before_eq(sq->first_pending_disptime, jiffies))
Vivek Goyal450adcb2011-03-01 13:40:54 -0500407 throtl_schedule_delayed_work(td, 0);
Vivek Goyale43473b2010-09-15 17:06:35 -0400408 else
Tejun Heoc9e03322013-05-14 13:52:32 -0700409 throtl_schedule_delayed_work(td, sq->first_pending_disptime - jiffies);
Vivek Goyale43473b2010-09-15 17:06:35 -0400410}
411
Tejun Heo0f3457f2013-05-14 13:52:32 -0700412static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400413{
414 tg->bytes_disp[rw] = 0;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400415 tg->io_disp[rw] = 0;
Vivek Goyale43473b2010-09-15 17:06:35 -0400416 tg->slice_start[rw] = jiffies;
417 tg->slice_end[rw] = jiffies + throtl_slice;
Tejun Heo0f3457f2013-05-14 13:52:32 -0700418 throtl_log_tg(tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
Vivek Goyale43473b2010-09-15 17:06:35 -0400419 rw == READ ? 'R' : 'W', tg->slice_start[rw],
420 tg->slice_end[rw], jiffies);
421}
422
Tejun Heo0f3457f2013-05-14 13:52:32 -0700423static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
424 unsigned long jiffy_end)
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100425{
426 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
427}
428
Tejun Heo0f3457f2013-05-14 13:52:32 -0700429static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
430 unsigned long jiffy_end)
Vivek Goyale43473b2010-09-15 17:06:35 -0400431{
432 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
Tejun Heo0f3457f2013-05-14 13:52:32 -0700433 throtl_log_tg(tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
Vivek Goyale43473b2010-09-15 17:06:35 -0400434 rw == READ ? 'R' : 'W', tg->slice_start[rw],
435 tg->slice_end[rw], jiffies);
436}
437
438/* Determine if previously allocated or extended slice is complete or not */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700439static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400440{
441 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
442 return 0;
443
444 return 1;
445}
446
447/* Trim the used slices and adjust slice start accordingly */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700448static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400449{
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200450 unsigned long nr_slices, time_elapsed, io_trim;
451 u64 bytes_trim, tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400452
453 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
454
455 /*
456 * If bps are unlimited (-1), then time slice don't get
457 * renewed. Don't try to trim the slice if slice is used. A new
458 * slice will start when appropriate.
459 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700460 if (throtl_slice_used(tg, rw))
Vivek Goyale43473b2010-09-15 17:06:35 -0400461 return;
462
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100463 /*
464 * A bio has been dispatched. Also adjust slice_end. It might happen
465 * that initially cgroup limit was very low resulting in high
466 * slice_end, but later limit was bumped up and bio was dispached
467 * sooner, then we need to reduce slice_end. A high bogus slice_end
468 * is bad because it does not allow new slice to start.
469 */
470
Tejun Heo0f3457f2013-05-14 13:52:32 -0700471 throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100472
Vivek Goyale43473b2010-09-15 17:06:35 -0400473 time_elapsed = jiffies - tg->slice_start[rw];
474
475 nr_slices = time_elapsed / throtl_slice;
476
477 if (!nr_slices)
478 return;
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200479 tmp = tg->bps[rw] * throtl_slice * nr_slices;
480 do_div(tmp, HZ);
481 bytes_trim = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400482
Vivek Goyal8e89d132010-09-15 17:06:37 -0400483 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
Vivek Goyale43473b2010-09-15 17:06:35 -0400484
Vivek Goyal8e89d132010-09-15 17:06:37 -0400485 if (!bytes_trim && !io_trim)
Vivek Goyale43473b2010-09-15 17:06:35 -0400486 return;
487
488 if (tg->bytes_disp[rw] >= bytes_trim)
489 tg->bytes_disp[rw] -= bytes_trim;
490 else
491 tg->bytes_disp[rw] = 0;
492
Vivek Goyal8e89d132010-09-15 17:06:37 -0400493 if (tg->io_disp[rw] >= io_trim)
494 tg->io_disp[rw] -= io_trim;
495 else
496 tg->io_disp[rw] = 0;
497
Vivek Goyale43473b2010-09-15 17:06:35 -0400498 tg->slice_start[rw] += nr_slices * throtl_slice;
499
Tejun Heo0f3457f2013-05-14 13:52:32 -0700500 throtl_log_tg(tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
Vivek Goyale43473b2010-09-15 17:06:35 -0400501 " start=%lu end=%lu jiffies=%lu",
Vivek Goyal8e89d132010-09-15 17:06:37 -0400502 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
Vivek Goyale43473b2010-09-15 17:06:35 -0400503 tg->slice_start[rw], tg->slice_end[rw], jiffies);
504}
505
Tejun Heo0f3457f2013-05-14 13:52:32 -0700506static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
507 unsigned long *wait)
Vivek Goyale43473b2010-09-15 17:06:35 -0400508{
509 bool rw = bio_data_dir(bio);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400510 unsigned int io_allowed;
Vivek Goyale43473b2010-09-15 17:06:35 -0400511 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200512 u64 tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400513
Vivek Goyal8e89d132010-09-15 17:06:37 -0400514 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
Vivek Goyale43473b2010-09-15 17:06:35 -0400515
Vivek Goyal8e89d132010-09-15 17:06:37 -0400516 /* Slice has just started. Consider one slice interval */
517 if (!jiffy_elapsed)
518 jiffy_elapsed_rnd = throtl_slice;
519
520 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
521
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200522 /*
523 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
524 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
525 * will allow dispatch after 1 second and after that slice should
526 * have been trimmed.
527 */
528
529 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
530 do_div(tmp, HZ);
531
532 if (tmp > UINT_MAX)
533 io_allowed = UINT_MAX;
534 else
535 io_allowed = tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400536
537 if (tg->io_disp[rw] + 1 <= io_allowed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400538 if (wait)
539 *wait = 0;
540 return 1;
541 }
542
Vivek Goyal8e89d132010-09-15 17:06:37 -0400543 /* Calc approx time to dispatch */
544 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
545
546 if (jiffy_wait > jiffy_elapsed)
547 jiffy_wait = jiffy_wait - jiffy_elapsed;
548 else
549 jiffy_wait = 1;
550
551 if (wait)
552 *wait = jiffy_wait;
553 return 0;
554}
555
Tejun Heo0f3457f2013-05-14 13:52:32 -0700556static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
557 unsigned long *wait)
Vivek Goyal8e89d132010-09-15 17:06:37 -0400558{
559 bool rw = bio_data_dir(bio);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200560 u64 bytes_allowed, extra_bytes, tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400561 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyale43473b2010-09-15 17:06:35 -0400562
563 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
564
565 /* Slice has just started. Consider one slice interval */
566 if (!jiffy_elapsed)
567 jiffy_elapsed_rnd = throtl_slice;
568
569 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
570
Vivek Goyal5e901a22010-10-01 21:16:38 +0200571 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
572 do_div(tmp, HZ);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200573 bytes_allowed = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400574
575 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
576 if (wait)
577 *wait = 0;
578 return 1;
579 }
580
581 /* Calc approx time to dispatch */
582 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
583 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
584
585 if (!jiffy_wait)
586 jiffy_wait = 1;
587
588 /*
589 * This wait time is without taking into consideration the rounding
590 * up we did. Add that time also.
591 */
592 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
Vivek Goyale43473b2010-09-15 17:06:35 -0400593 if (wait)
594 *wait = jiffy_wait;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400595 return 0;
596}
Vivek Goyale43473b2010-09-15 17:06:35 -0400597
Vivek Goyalaf75cd32011-05-19 15:38:31 -0400598static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
599 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
600 return 1;
601 return 0;
602}
603
Vivek Goyal8e89d132010-09-15 17:06:37 -0400604/*
605 * Returns whether one can dispatch a bio or not. Also returns approx number
606 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
607 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700608static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
609 unsigned long *wait)
Vivek Goyal8e89d132010-09-15 17:06:37 -0400610{
611 bool rw = bio_data_dir(bio);
612 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
613
614 /*
615 * Currently whole state machine of group depends on first bio
616 * queued in the group bio list. So one should not be calling
617 * this function with a different bio if there are other bios
618 * queued.
619 */
620 BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
621
622 /* If tg->bps = -1, then BW is unlimited */
623 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
624 if (wait)
625 *wait = 0;
626 return 1;
627 }
628
629 /*
630 * If previous slice expired, start a new one otherwise renew/extend
631 * existing slice to make sure it is at least throtl_slice interval
632 * long since now.
633 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700634 if (throtl_slice_used(tg, rw))
635 throtl_start_new_slice(tg, rw);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400636 else {
637 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700638 throtl_extend_slice(tg, rw, jiffies + throtl_slice);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400639 }
640
Tejun Heo0f3457f2013-05-14 13:52:32 -0700641 if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
642 tg_with_in_iops_limit(tg, bio, &iops_wait)) {
Vivek Goyal8e89d132010-09-15 17:06:37 -0400643 if (wait)
644 *wait = 0;
645 return 1;
646 }
647
648 max_wait = max(bps_wait, iops_wait);
649
650 if (wait)
651 *wait = max_wait;
652
653 if (time_before(tg->slice_end[rw], jiffies + max_wait))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700654 throtl_extend_slice(tg, rw, jiffies + max_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400655
656 return 0;
657}
658
Tejun Heo3c798392012-04-16 13:57:25 -0700659static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
Tejun Heo629ed0b2012-04-01 14:38:44 -0700660 int rw)
661{
Tejun Heo8a3d2612012-04-01 14:38:44 -0700662 struct throtl_grp *tg = blkg_to_tg(blkg);
663 struct tg_stats_cpu *stats_cpu;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700664 unsigned long flags;
665
666 /* If per cpu stats are not allocated yet, don't do any accounting. */
Tejun Heo8a3d2612012-04-01 14:38:44 -0700667 if (tg->stats_cpu == NULL)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700668 return;
669
670 /*
671 * Disabling interrupts to provide mutual exclusion between two
672 * writes on same cpu. It probably is not needed for 64bit. Not
673 * optimizing that case yet.
674 */
675 local_irq_save(flags);
676
Tejun Heo8a3d2612012-04-01 14:38:44 -0700677 stats_cpu = this_cpu_ptr(tg->stats_cpu);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700678
Tejun Heo629ed0b2012-04-01 14:38:44 -0700679 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
680 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
681
682 local_irq_restore(flags);
683}
684
Vivek Goyale43473b2010-09-15 17:06:35 -0400685static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
686{
687 bool rw = bio_data_dir(bio);
Vivek Goyale43473b2010-09-15 17:06:35 -0400688
689 /* Charge the bio to the group */
690 tg->bytes_disp[rw] += bio->bi_size;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400691 tg->io_disp[rw]++;
Vivek Goyale43473b2010-09-15 17:06:35 -0400692
Tejun Heo629ed0b2012-04-01 14:38:44 -0700693 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
Vivek Goyale43473b2010-09-15 17:06:35 -0400694}
695
Tejun Heo0049af72013-05-14 13:52:33 -0700696static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg,
697 struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400698{
699 bool rw = bio_data_dir(bio);
700
701 bio_list_add(&tg->bio_lists[rw], bio);
702 /* Take a bio reference on tg */
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800703 blkg_get(tg_to_blkg(tg));
Vivek Goyale43473b2010-09-15 17:06:35 -0400704 tg->nr_queued[rw]++;
Tejun Heoe2d57e62013-05-14 13:52:33 -0700705 tg->td->nr_queued[rw]++;
Tejun Heo0049af72013-05-14 13:52:33 -0700706 throtl_enqueue_tg(tg, parent_sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400707}
708
Tejun Heo0049af72013-05-14 13:52:33 -0700709static void tg_update_disptime(struct throtl_grp *tg,
710 struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400711{
712 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
713 struct bio *bio;
714
715 if ((bio = bio_list_peek(&tg->bio_lists[READ])))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700716 tg_may_dispatch(tg, bio, &read_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400717
718 if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700719 tg_may_dispatch(tg, bio, &write_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400720
721 min_wait = min(read_wait, write_wait);
722 disptime = jiffies + min_wait;
723
Vivek Goyale43473b2010-09-15 17:06:35 -0400724 /* Update dispatch time */
Tejun Heo0049af72013-05-14 13:52:33 -0700725 throtl_dequeue_tg(tg, parent_sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400726 tg->disptime = disptime;
Tejun Heo0049af72013-05-14 13:52:33 -0700727 throtl_enqueue_tg(tg, parent_sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400728}
729
Tejun Heo0f3457f2013-05-14 13:52:32 -0700730static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw,
731 struct bio_list *bl)
Vivek Goyale43473b2010-09-15 17:06:35 -0400732{
733 struct bio *bio;
734
735 bio = bio_list_pop(&tg->bio_lists[rw]);
736 tg->nr_queued[rw]--;
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800737 /* Drop bio reference on blkg */
738 blkg_put(tg_to_blkg(tg));
Vivek Goyale43473b2010-09-15 17:06:35 -0400739
Tejun Heo0f3457f2013-05-14 13:52:32 -0700740 BUG_ON(tg->td->nr_queued[rw] <= 0);
741 tg->td->nr_queued[rw]--;
Vivek Goyale43473b2010-09-15 17:06:35 -0400742
743 throtl_charge_bio(tg, bio);
744 bio_list_add(bl, bio);
745 bio->bi_rw |= REQ_THROTTLED;
746
Tejun Heo0f3457f2013-05-14 13:52:32 -0700747 throtl_trim_slice(tg, rw);
Vivek Goyale43473b2010-09-15 17:06:35 -0400748}
749
Tejun Heo0f3457f2013-05-14 13:52:32 -0700750static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl)
Vivek Goyale43473b2010-09-15 17:06:35 -0400751{
752 unsigned int nr_reads = 0, nr_writes = 0;
753 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
Vivek Goyalc2f68052010-11-15 19:32:42 +0100754 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
Vivek Goyale43473b2010-09-15 17:06:35 -0400755 struct bio *bio;
756
757 /* Try to dispatch 75% READS and 25% WRITES */
758
Tejun Heo0f3457f2013-05-14 13:52:32 -0700759 while ((bio = bio_list_peek(&tg->bio_lists[READ])) &&
760 tg_may_dispatch(tg, bio, NULL)) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400761
Tejun Heo0f3457f2013-05-14 13:52:32 -0700762 tg_dispatch_one_bio(tg, bio_data_dir(bio), bl);
Vivek Goyale43473b2010-09-15 17:06:35 -0400763 nr_reads++;
764
765 if (nr_reads >= max_nr_reads)
766 break;
767 }
768
Tejun Heo0f3457f2013-05-14 13:52:32 -0700769 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])) &&
770 tg_may_dispatch(tg, bio, NULL)) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400771
Tejun Heo0f3457f2013-05-14 13:52:32 -0700772 tg_dispatch_one_bio(tg, bio_data_dir(bio), bl);
Vivek Goyale43473b2010-09-15 17:06:35 -0400773 nr_writes++;
774
775 if (nr_writes >= max_nr_writes)
776 break;
777 }
778
779 return nr_reads + nr_writes;
780}
781
Tejun Heo0049af72013-05-14 13:52:33 -0700782static int throtl_select_dispatch(struct throtl_service_queue *parent_sq,
Tejun Heoe2d57e62013-05-14 13:52:33 -0700783 struct bio_list *bl)
Vivek Goyale43473b2010-09-15 17:06:35 -0400784{
785 unsigned int nr_disp = 0;
786 struct throtl_grp *tg;
Vivek Goyale43473b2010-09-15 17:06:35 -0400787
788 while (1) {
Tejun Heo0049af72013-05-14 13:52:33 -0700789 tg = throtl_rb_first(parent_sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400790
791 if (!tg)
792 break;
793
794 if (time_before(jiffies, tg->disptime))
795 break;
796
Tejun Heo0049af72013-05-14 13:52:33 -0700797 throtl_dequeue_tg(tg, parent_sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400798
Tejun Heo0f3457f2013-05-14 13:52:32 -0700799 nr_disp += throtl_dispatch_tg(tg, bl);
Vivek Goyale43473b2010-09-15 17:06:35 -0400800
Tejun Heo2db63142013-05-14 13:52:31 -0700801 if (tg->nr_queued[0] || tg->nr_queued[1])
Tejun Heo0049af72013-05-14 13:52:33 -0700802 tg_update_disptime(tg, parent_sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400803
804 if (nr_disp >= throtl_quantum)
805 break;
806 }
807
808 return nr_disp;
809}
810
Tejun Heocb761992013-05-14 13:52:31 -0700811/* work function to dispatch throttled bios */
812void blk_throtl_dispatch_work_fn(struct work_struct *work)
Vivek Goyale43473b2010-09-15 17:06:35 -0400813{
Tejun Heocb761992013-05-14 13:52:31 -0700814 struct throtl_data *td = container_of(to_delayed_work(work),
815 struct throtl_data, dispatch_work);
816 struct request_queue *q = td->queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400817 unsigned int nr_disp = 0;
818 struct bio_list bio_list_on_stack;
819 struct bio *bio;
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100820 struct blk_plug plug;
Vivek Goyale43473b2010-09-15 17:06:35 -0400821
822 spin_lock_irq(q->queue_lock);
823
Vivek Goyale43473b2010-09-15 17:06:35 -0400824 bio_list_init(&bio_list_on_stack);
825
Joe Perchesd2f31a52011-06-13 20:19:27 +0200826 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
Tejun Heo6a525602013-05-14 13:52:32 -0700827 td->nr_queued[READ] + td->nr_queued[WRITE],
828 td->nr_queued[READ], td->nr_queued[WRITE]);
Vivek Goyale43473b2010-09-15 17:06:35 -0400829
Tejun Heoe2d57e62013-05-14 13:52:33 -0700830 nr_disp = throtl_select_dispatch(&td->service_queue, &bio_list_on_stack);
Vivek Goyale43473b2010-09-15 17:06:35 -0400831
832 if (nr_disp)
833 throtl_log(td, "bios disp=%u", nr_disp);
834
835 throtl_schedule_next_dispatch(td);
Tejun Heo6a525602013-05-14 13:52:32 -0700836
Vivek Goyale43473b2010-09-15 17:06:35 -0400837 spin_unlock_irq(q->queue_lock);
838
839 /*
840 * If we dispatched some requests, unplug the queue to make sure
841 * immediate dispatch
842 */
843 if (nr_disp) {
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100844 blk_start_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -0400845 while((bio = bio_list_pop(&bio_list_on_stack)))
846 generic_make_request(bio);
Vivek Goyal69d60eb2011-03-09 08:27:37 +0100847 blk_finish_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -0400848 }
Vivek Goyale43473b2010-09-15 17:06:35 -0400849}
850
Tejun Heof95a04a2012-04-16 13:57:26 -0700851static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
852 struct blkg_policy_data *pd, int off)
Tejun Heo41b38b62012-04-01 14:38:44 -0700853{
Tejun Heof95a04a2012-04-16 13:57:26 -0700854 struct throtl_grp *tg = pd_to_tg(pd);
Tejun Heo41b38b62012-04-01 14:38:44 -0700855 struct blkg_rwstat rwstat = { }, tmp;
856 int i, cpu;
857
858 for_each_possible_cpu(cpu) {
Tejun Heo8a3d2612012-04-01 14:38:44 -0700859 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
Tejun Heo41b38b62012-04-01 14:38:44 -0700860
861 tmp = blkg_rwstat_read((void *)sc + off);
862 for (i = 0; i < BLKG_RWSTAT_NR; i++)
863 rwstat.cnt[i] += tmp.cnt[i];
864 }
865
Tejun Heof95a04a2012-04-16 13:57:26 -0700866 return __blkg_prfill_rwstat(sf, pd, &rwstat);
Tejun Heo41b38b62012-04-01 14:38:44 -0700867}
868
Tejun Heo8a3d2612012-04-01 14:38:44 -0700869static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
870 struct seq_file *sf)
Tejun Heo41b38b62012-04-01 14:38:44 -0700871{
Tejun Heo3c798392012-04-16 13:57:25 -0700872 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo41b38b62012-04-01 14:38:44 -0700873
Tejun Heo3c798392012-04-16 13:57:25 -0700874 blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
Tejun Heo5bc4afb12012-04-01 14:38:45 -0700875 cft->private, true);
Tejun Heo41b38b62012-04-01 14:38:44 -0700876 return 0;
877}
878
Tejun Heof95a04a2012-04-16 13:57:26 -0700879static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
880 int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700881{
Tejun Heof95a04a2012-04-16 13:57:26 -0700882 struct throtl_grp *tg = pd_to_tg(pd);
883 u64 v = *(u64 *)((void *)tg + off);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700884
Tejun Heoaf133ce2012-04-01 14:38:44 -0700885 if (v == -1)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700886 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -0700887 return __blkg_prfill_u64(sf, pd, v);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700888}
889
Tejun Heof95a04a2012-04-16 13:57:26 -0700890static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
891 int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700892{
Tejun Heof95a04a2012-04-16 13:57:26 -0700893 struct throtl_grp *tg = pd_to_tg(pd);
894 unsigned int v = *(unsigned int *)((void *)tg + off);
Tejun Heoaf133ce2012-04-01 14:38:44 -0700895
896 if (v == -1)
897 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -0700898 return __blkg_prfill_u64(sf, pd, v);
Tejun Heoaf133ce2012-04-01 14:38:44 -0700899}
900
901static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
902 struct seq_file *sf)
903{
Tejun Heo3c798392012-04-16 13:57:25 -0700904 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
905 &blkcg_policy_throtl, cft->private, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700906 return 0;
907}
908
Tejun Heoaf133ce2012-04-01 14:38:44 -0700909static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
910 struct seq_file *sf)
Vivek Goyale43473b2010-09-15 17:06:35 -0400911{
Tejun Heo3c798392012-04-16 13:57:25 -0700912 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
913 &blkcg_policy_throtl, cft->private, false);
Tejun Heoaf133ce2012-04-01 14:38:44 -0700914 return 0;
Vivek Goyale43473b2010-09-15 17:06:35 -0400915}
916
Tejun Heoaf133ce2012-04-01 14:38:44 -0700917static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
918 bool is_u64)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700919{
Tejun Heo3c798392012-04-16 13:57:25 -0700920 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700921 struct blkg_conf_ctx ctx;
Tejun Heoaf133ce2012-04-01 14:38:44 -0700922 struct throtl_grp *tg;
Tejun Heoa2b16932012-04-13 13:11:33 -0700923 struct throtl_data *td;
Tejun Heo60c2bc22012-04-01 14:38:43 -0700924 int ret;
925
Tejun Heo3c798392012-04-16 13:57:25 -0700926 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700927 if (ret)
928 return ret;
929
Tejun Heoaf133ce2012-04-01 14:38:44 -0700930 tg = blkg_to_tg(ctx.blkg);
Tejun Heoa2b16932012-04-13 13:11:33 -0700931 td = ctx.blkg->q->td;
Tejun Heoaf133ce2012-04-01 14:38:44 -0700932
Tejun Heoa2b16932012-04-13 13:11:33 -0700933 if (!ctx.v)
934 ctx.v = -1;
Tejun Heoaf133ce2012-04-01 14:38:44 -0700935
Tejun Heoa2b16932012-04-13 13:11:33 -0700936 if (is_u64)
937 *(u64 *)((void *)tg + cft->private) = ctx.v;
938 else
939 *(unsigned int *)((void *)tg + cft->private) = ctx.v;
Tejun Heoaf133ce2012-04-01 14:38:44 -0700940
Tejun Heo0f3457f2013-05-14 13:52:32 -0700941 throtl_log_tg(tg, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
Tejun Heo632b4492013-05-14 13:52:31 -0700942 tg->bps[READ], tg->bps[WRITE],
943 tg->iops[READ], tg->iops[WRITE]);
944
945 /*
946 * We're already holding queue_lock and know @tg is valid. Let's
947 * apply the new config directly.
948 *
949 * Restart the slices for both READ and WRITES. It might happen
950 * that a group's limit are dropped suddenly and we don't want to
951 * account recently dispatched IO with new low rate.
952 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700953 throtl_start_new_slice(tg, 0);
954 throtl_start_new_slice(tg, 1);
Tejun Heo632b4492013-05-14 13:52:31 -0700955
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700956 if (tg->flags & THROTL_TG_PENDING) {
Tejun Heo0049af72013-05-14 13:52:33 -0700957 tg_update_disptime(tg, &td->service_queue);
Tejun Heo632b4492013-05-14 13:52:31 -0700958 throtl_schedule_next_dispatch(td);
959 }
Tejun Heo60c2bc22012-04-01 14:38:43 -0700960
961 blkg_conf_finish(&ctx);
Tejun Heoa2b16932012-04-13 13:11:33 -0700962 return 0;
Tejun Heo60c2bc22012-04-01 14:38:43 -0700963}
964
Tejun Heoaf133ce2012-04-01 14:38:44 -0700965static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
966 const char *buf)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700967{
Tejun Heoaf133ce2012-04-01 14:38:44 -0700968 return tg_set_conf(cgrp, cft, buf, true);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700969}
970
Tejun Heoaf133ce2012-04-01 14:38:44 -0700971static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
972 const char *buf)
Tejun Heo60c2bc22012-04-01 14:38:43 -0700973{
Tejun Heoaf133ce2012-04-01 14:38:44 -0700974 return tg_set_conf(cgrp, cft, buf, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -0700975}
976
977static struct cftype throtl_files[] = {
978 {
979 .name = "throttle.read_bps_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -0700980 .private = offsetof(struct throtl_grp, bps[READ]),
981 .read_seq_string = tg_print_conf_u64,
982 .write_string = tg_set_conf_u64,
Tejun Heo60c2bc22012-04-01 14:38:43 -0700983 .max_write_len = 256,
984 },
985 {
986 .name = "throttle.write_bps_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -0700987 .private = offsetof(struct throtl_grp, bps[WRITE]),
988 .read_seq_string = tg_print_conf_u64,
989 .write_string = tg_set_conf_u64,
Tejun Heo60c2bc22012-04-01 14:38:43 -0700990 .max_write_len = 256,
991 },
992 {
993 .name = "throttle.read_iops_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -0700994 .private = offsetof(struct throtl_grp, iops[READ]),
995 .read_seq_string = tg_print_conf_uint,
996 .write_string = tg_set_conf_uint,
Tejun Heo60c2bc22012-04-01 14:38:43 -0700997 .max_write_len = 256,
998 },
999 {
1000 .name = "throttle.write_iops_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -07001001 .private = offsetof(struct throtl_grp, iops[WRITE]),
1002 .read_seq_string = tg_print_conf_uint,
1003 .write_string = tg_set_conf_uint,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001004 .max_write_len = 256,
1005 },
1006 {
1007 .name = "throttle.io_service_bytes",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001008 .private = offsetof(struct tg_stats_cpu, service_bytes),
Tejun Heo8a3d2612012-04-01 14:38:44 -07001009 .read_seq_string = tg_print_cpu_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001010 },
1011 {
1012 .name = "throttle.io_serviced",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001013 .private = offsetof(struct tg_stats_cpu, serviced),
Tejun Heo8a3d2612012-04-01 14:38:44 -07001014 .read_seq_string = tg_print_cpu_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001015 },
1016 { } /* terminate */
1017};
1018
Vivek Goyalda527772011-03-02 19:05:33 -05001019static void throtl_shutdown_wq(struct request_queue *q)
Vivek Goyale43473b2010-09-15 17:06:35 -04001020{
1021 struct throtl_data *td = q->td;
1022
Tejun Heocb761992013-05-14 13:52:31 -07001023 cancel_delayed_work_sync(&td->dispatch_work);
Vivek Goyale43473b2010-09-15 17:06:35 -04001024}
1025
Tejun Heo3c798392012-04-16 13:57:25 -07001026static struct blkcg_policy blkcg_policy_throtl = {
Tejun Heof9fcc2d2012-04-16 13:57:27 -07001027 .pd_size = sizeof(struct throtl_grp),
1028 .cftypes = throtl_files,
1029
1030 .pd_init_fn = throtl_pd_init,
1031 .pd_exit_fn = throtl_pd_exit,
1032 .pd_reset_stats_fn = throtl_pd_reset_stats,
Vivek Goyale43473b2010-09-15 17:06:35 -04001033};
1034
Tejun Heobc16a4f2011-10-19 14:33:01 +02001035bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
Vivek Goyale43473b2010-09-15 17:06:35 -04001036{
1037 struct throtl_data *td = q->td;
1038 struct throtl_grp *tg;
Vivek Goyale43473b2010-09-15 17:06:35 -04001039 bool rw = bio_data_dir(bio), update_disptime = true;
Tejun Heo3c798392012-04-16 13:57:25 -07001040 struct blkcg *blkcg;
Tejun Heobc16a4f2011-10-19 14:33:01 +02001041 bool throttled = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001042
1043 if (bio->bi_rw & REQ_THROTTLED) {
1044 bio->bi_rw &= ~REQ_THROTTLED;
Tejun Heobc16a4f2011-10-19 14:33:01 +02001045 goto out;
Vivek Goyale43473b2010-09-15 17:06:35 -04001046 }
1047
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001048 /*
1049 * A throtl_grp pointer retrieved under rcu can be used to access
1050 * basic fields like stats and io rates. If a group has no rules,
1051 * just update the dispatch stats in lockless manner and return.
1052 */
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001053 rcu_read_lock();
Tejun Heo3c798392012-04-16 13:57:25 -07001054 blkcg = bio_blkcg(bio);
Tejun Heocd1604f2012-03-05 13:15:06 -08001055 tg = throtl_lookup_tg(td, blkcg);
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001056 if (tg) {
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001057 if (tg_no_rule_group(tg, rw)) {
Tejun Heo629ed0b2012-04-01 14:38:44 -07001058 throtl_update_dispatch_stats(tg_to_blkg(tg),
1059 bio->bi_size, bio->bi_rw);
Tejun Heo2a7f1242012-03-05 13:15:01 -08001060 goto out_unlock_rcu;
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001061 }
1062 }
Vivek Goyalaf75cd32011-05-19 15:38:31 -04001063
1064 /*
1065 * Either group has not been allocated yet or it is not an unlimited
1066 * IO group
1067 */
Vivek Goyale43473b2010-09-15 17:06:35 -04001068 spin_lock_irq(q->queue_lock);
Tejun Heocd1604f2012-03-05 13:15:06 -08001069 tg = throtl_lookup_create_tg(td, blkcg);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001070 if (unlikely(!tg))
1071 goto out_unlock;
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001072
Vivek Goyale43473b2010-09-15 17:06:35 -04001073 if (tg->nr_queued[rw]) {
1074 /*
1075 * There is already another bio queued in same dir. No
1076 * need to update dispatch time.
1077 */
Vivek Goyal231d7042011-03-07 21:05:14 +01001078 update_disptime = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001079 goto queue_bio;
Vivek Goyalde701c72011-03-07 21:09:32 +01001080
Vivek Goyale43473b2010-09-15 17:06:35 -04001081 }
1082
1083 /* Bio is with-in rate limit of group */
Tejun Heo0f3457f2013-05-14 13:52:32 -07001084 if (tg_may_dispatch(tg, bio, NULL)) {
Vivek Goyale43473b2010-09-15 17:06:35 -04001085 throtl_charge_bio(tg, bio);
Vivek Goyal04521db2011-03-22 21:54:29 +01001086
1087 /*
1088 * We need to trim slice even when bios are not being queued
1089 * otherwise it might happen that a bio is not queued for
1090 * a long time and slice keeps on extending and trim is not
1091 * called for a long time. Now if limits are reduced suddenly
1092 * we take into account all the IO dispatched so far at new
1093 * low rate and * newly queued IO gets a really long dispatch
1094 * time.
1095 *
1096 * So keep on trimming slice even if bio is not queued.
1097 */
Tejun Heo0f3457f2013-05-14 13:52:32 -07001098 throtl_trim_slice(tg, rw);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001099 goto out_unlock;
Vivek Goyale43473b2010-09-15 17:06:35 -04001100 }
1101
1102queue_bio:
Tejun Heo0f3457f2013-05-14 13:52:32 -07001103 throtl_log_tg(tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
Vivek Goyal8e89d132010-09-15 17:06:37 -04001104 " iodisp=%u iops=%u queued=%d/%d",
1105 rw == READ ? 'R' : 'W',
Vivek Goyale43473b2010-09-15 17:06:35 -04001106 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
Vivek Goyal8e89d132010-09-15 17:06:37 -04001107 tg->io_disp[rw], tg->iops[rw],
Vivek Goyale43473b2010-09-15 17:06:35 -04001108 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1109
Tejun Heo671058f2012-03-05 13:15:29 -08001110 bio_associate_current(bio);
Tejun Heo0049af72013-05-14 13:52:33 -07001111 throtl_add_bio_tg(bio, tg, &q->td->service_queue);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001112 throttled = true;
Vivek Goyale43473b2010-09-15 17:06:35 -04001113
1114 if (update_disptime) {
Tejun Heo0049af72013-05-14 13:52:33 -07001115 tg_update_disptime(tg, &td->service_queue);
Vivek Goyale43473b2010-09-15 17:06:35 -04001116 throtl_schedule_next_dispatch(td);
1117 }
1118
Tejun Heobc16a4f2011-10-19 14:33:01 +02001119out_unlock:
Vivek Goyale43473b2010-09-15 17:06:35 -04001120 spin_unlock_irq(q->queue_lock);
Tejun Heo2a7f1242012-03-05 13:15:01 -08001121out_unlock_rcu:
1122 rcu_read_unlock();
Tejun Heobc16a4f2011-10-19 14:33:01 +02001123out:
1124 return throttled;
Vivek Goyale43473b2010-09-15 17:06:35 -04001125}
1126
Tejun Heoc9a929d2011-10-19 14:42:16 +02001127/**
1128 * blk_throtl_drain - drain throttled bios
1129 * @q: request_queue to drain throttled bios for
1130 *
1131 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1132 */
1133void blk_throtl_drain(struct request_queue *q)
1134 __releases(q->queue_lock) __acquires(q->queue_lock)
1135{
1136 struct throtl_data *td = q->td;
Tejun Heo0049af72013-05-14 13:52:33 -07001137 struct throtl_service_queue *parent_sq = &td->service_queue;
Tejun Heoc9a929d2011-10-19 14:42:16 +02001138 struct throtl_grp *tg;
1139 struct bio_list bl;
1140 struct bio *bio;
1141
Andi Kleen8bcb6c72012-03-30 12:33:28 +02001142 queue_lockdep_assert_held(q);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001143
1144 bio_list_init(&bl);
1145
Tejun Heo0049af72013-05-14 13:52:33 -07001146 while ((tg = throtl_rb_first(parent_sq))) {
1147 throtl_dequeue_tg(tg, parent_sq);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001148
1149 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
Tejun Heo0f3457f2013-05-14 13:52:32 -07001150 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001151 while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
Tejun Heo0f3457f2013-05-14 13:52:32 -07001152 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001153 }
1154 spin_unlock_irq(q->queue_lock);
1155
1156 while ((bio = bio_list_pop(&bl)))
1157 generic_make_request(bio);
1158
1159 spin_lock_irq(q->queue_lock);
1160}
1161
Vivek Goyale43473b2010-09-15 17:06:35 -04001162int blk_throtl_init(struct request_queue *q)
1163{
1164 struct throtl_data *td;
Tejun Heoa2b16932012-04-13 13:11:33 -07001165 int ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04001166
1167 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1168 if (!td)
1169 return -ENOMEM;
1170
Tejun Heoc9e03322013-05-14 13:52:32 -07001171 td->service_queue = THROTL_SERVICE_QUEUE_INITIALIZER;
Tejun Heocb761992013-05-14 13:52:31 -07001172 INIT_DELAYED_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
Vivek Goyale43473b2010-09-15 17:06:35 -04001173
Tejun Heocd1604f2012-03-05 13:15:06 -08001174 q->td = td;
Vivek Goyal29b12582011-05-19 15:38:24 -04001175 td->queue = q;
Vivek Goyal02977e42010-10-01 14:49:48 +02001176
Tejun Heoa2b16932012-04-13 13:11:33 -07001177 /* activate policy */
Tejun Heo3c798392012-04-16 13:57:25 -07001178 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
Tejun Heoa2b16932012-04-13 13:11:33 -07001179 if (ret)
Vivek Goyal29b12582011-05-19 15:38:24 -04001180 kfree(td);
Tejun Heoa2b16932012-04-13 13:11:33 -07001181 return ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04001182}
1183
1184void blk_throtl_exit(struct request_queue *q)
1185{
Tejun Heoc875f4d2012-03-05 13:15:22 -08001186 BUG_ON(!q->td);
Vivek Goyalda527772011-03-02 19:05:33 -05001187 throtl_shutdown_wq(q);
Tejun Heo3c798392012-04-16 13:57:25 -07001188 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001189 kfree(q->td);
Vivek Goyale43473b2010-09-15 17:06:35 -04001190}
1191
1192static int __init throtl_init(void)
1193{
Vivek Goyal450adcb2011-03-01 13:40:54 -05001194 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1195 if (!kthrotld_workqueue)
1196 panic("Failed to create kthrotld\n");
1197
Tejun Heo3c798392012-04-16 13:57:25 -07001198 return blkcg_policy_register(&blkcg_policy_throtl);
Vivek Goyale43473b2010-09-15 17:06:35 -04001199}
1200
1201module_init(throtl_init);