blob: a3ea8260c94c89236f938fb255158be8cb880a73 [file] [log] [blame]
Vivek Goyale43473b2010-09-15 17:06:35 -04001/*
2 * Interface for controlling IO bandwidth on a request queue
3 *
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
5 */
6
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/bio.h>
11#include <linux/blktrace_api.h>
Tejun Heoeea8f412015-05-22 17:13:17 -040012#include <linux/blk-cgroup.h>
Tejun Heobc9fcbf2011-10-19 14:31:18 +020013#include "blk.h"
Vivek Goyale43473b2010-09-15 17:06:35 -040014
15/* Max dispatch from a group in 1 round */
16static int throtl_grp_quantum = 8;
17
18/* Total max dispatch from all groups in one round */
19static int throtl_quantum = 32;
20
21/* Throttling is performed over 100ms slice and after that slice is renewed */
22static unsigned long throtl_slice = HZ/10; /* 100 ms */
23
Tejun Heo3c798392012-04-16 13:57:25 -070024static struct blkcg_policy blkcg_policy_throtl;
Tejun Heo03814112012-03-05 13:15:14 -080025
Vivek Goyal450adcb2011-03-01 13:40:54 -050026/* A workqueue to queue throttle related work */
27static struct workqueue_struct *kthrotld_workqueue;
Vivek Goyal450adcb2011-03-01 13:40:54 -050028
Tejun Heoc5cc2072013-05-14 13:52:38 -070029/*
30 * To implement hierarchical throttling, throtl_grps form a tree and bios
31 * are dispatched upwards level by level until they reach the top and get
32 * issued. When dispatching bios from the children and local group at each
33 * level, if the bios are dispatched into a single bio_list, there's a risk
34 * of a local or child group which can queue many bios at once filling up
35 * the list starving others.
36 *
37 * To avoid such starvation, dispatched bios are queued separately
38 * according to where they came from. When they are again dispatched to
39 * the parent, they're popped in round-robin order so that no single source
40 * hogs the dispatch window.
41 *
42 * throtl_qnode is used to keep the queued bios separated by their sources.
43 * Bios are queued to throtl_qnode which in turn is queued to
44 * throtl_service_queue and then dispatched in round-robin order.
45 *
46 * It's also used to track the reference counts on blkg's. A qnode always
47 * belongs to a throtl_grp and gets queued on itself or the parent, so
48 * incrementing the reference of the associated throtl_grp when a qnode is
49 * queued and decrementing when dequeued is enough to keep the whole blkg
50 * tree pinned while bios are in flight.
51 */
52struct throtl_qnode {
53 struct list_head node; /* service_queue->queued[] */
54 struct bio_list bios; /* queued bios */
55 struct throtl_grp *tg; /* tg this qnode belongs to */
56};
57
Tejun Heoc9e03322013-05-14 13:52:32 -070058struct throtl_service_queue {
Tejun Heo77216b02013-05-14 13:52:36 -070059 struct throtl_service_queue *parent_sq; /* the parent service_queue */
60
Tejun Heo73f0d492013-05-14 13:52:35 -070061 /*
62 * Bios queued directly to this service_queue or dispatched from
63 * children throtl_grp's.
64 */
Tejun Heoc5cc2072013-05-14 13:52:38 -070065 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
Tejun Heo73f0d492013-05-14 13:52:35 -070066 unsigned int nr_queued[2]; /* number of queued bios */
67
68 /*
69 * RB tree of active children throtl_grp's, which are sorted by
70 * their ->disptime.
71 */
Tejun Heoc9e03322013-05-14 13:52:32 -070072 struct rb_root pending_tree; /* RB tree of active tgs */
73 struct rb_node *first_pending; /* first node in the tree */
74 unsigned int nr_pending; /* # queued in the tree */
75 unsigned long first_pending_disptime; /* disptime of the first tg */
Tejun Heo69df0ab2013-05-14 13:52:36 -070076 struct timer_list pending_timer; /* fires on first_pending_disptime */
Vivek Goyale43473b2010-09-15 17:06:35 -040077};
78
Tejun Heo5b2c16a2013-05-14 13:52:32 -070079enum tg_state_flags {
80 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
Tejun Heo0e9f4162013-05-14 13:52:35 -070081 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
Tejun Heo5b2c16a2013-05-14 13:52:32 -070082};
83
Vivek Goyale43473b2010-09-15 17:06:35 -040084#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
85
86struct throtl_grp {
Tejun Heof95a04a2012-04-16 13:57:26 -070087 /* must be the first member */
88 struct blkg_policy_data pd;
89
Tejun Heoc9e03322013-05-14 13:52:32 -070090 /* active throtl group service_queue member */
Vivek Goyale43473b2010-09-15 17:06:35 -040091 struct rb_node rb_node;
92
Tejun Heo0f3457f2013-05-14 13:52:32 -070093 /* throtl_data this group belongs to */
94 struct throtl_data *td;
95
Tejun Heo49a2f1e2013-05-14 13:52:34 -070096 /* this group's service queue */
97 struct throtl_service_queue service_queue;
98
Vivek Goyale43473b2010-09-15 17:06:35 -040099 /*
Tejun Heoc5cc2072013-05-14 13:52:38 -0700100 * qnode_on_self is used when bios are directly queued to this
101 * throtl_grp so that local bios compete fairly with bios
102 * dispatched from children. qnode_on_parent is used when bios are
103 * dispatched from this throtl_grp into its parent and will compete
104 * with the sibling qnode_on_parents and the parent's
105 * qnode_on_self.
106 */
107 struct throtl_qnode qnode_on_self[2];
108 struct throtl_qnode qnode_on_parent[2];
109
110 /*
Vivek Goyale43473b2010-09-15 17:06:35 -0400111 * Dispatch time in jiffies. This is the estimated time when group
112 * will unthrottle and is ready to dispatch more bio. It is used as
113 * key to sort active groups in service tree.
114 */
115 unsigned long disptime;
116
Vivek Goyale43473b2010-09-15 17:06:35 -0400117 unsigned int flags;
118
Tejun Heo693e7512013-05-14 13:52:38 -0700119 /* are there any throtl rules between this group and td? */
120 bool has_rules[2];
121
Vivek Goyale43473b2010-09-15 17:06:35 -0400122 /* bytes per second rate limits */
123 uint64_t bps[2];
124
Vivek Goyal8e89d132010-09-15 17:06:37 -0400125 /* IOPS limits */
126 unsigned int iops[2];
127
Vivek Goyale43473b2010-09-15 17:06:35 -0400128 /* Number of bytes disptached in current slice */
129 uint64_t bytes_disp[2];
Vivek Goyal8e89d132010-09-15 17:06:37 -0400130 /* Number of bio's dispatched in current slice */
131 unsigned int io_disp[2];
Vivek Goyale43473b2010-09-15 17:06:35 -0400132
133 /* When did we start a new slice */
134 unsigned long slice_start[2];
135 unsigned long slice_end[2];
136};
137
138struct throtl_data
139{
Vivek Goyale43473b2010-09-15 17:06:35 -0400140 /* service tree for active throtl groups */
Tejun Heoc9e03322013-05-14 13:52:32 -0700141 struct throtl_service_queue service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400142
Vivek Goyale43473b2010-09-15 17:06:35 -0400143 struct request_queue *queue;
144
145 /* Total Number of queued bios on READ and WRITE lists */
146 unsigned int nr_queued[2];
147
Vivek Goyale43473b2010-09-15 17:06:35 -0400148 /* Work for dispatching throttled bios */
Tejun Heo69df0ab2013-05-14 13:52:36 -0700149 struct work_struct dispatch_work;
Vivek Goyale43473b2010-09-15 17:06:35 -0400150};
151
Tejun Heo69df0ab2013-05-14 13:52:36 -0700152static void throtl_pending_timer_fn(unsigned long arg);
153
Tejun Heof95a04a2012-04-16 13:57:26 -0700154static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
155{
156 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
157}
158
Tejun Heo3c798392012-04-16 13:57:25 -0700159static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
Tejun Heo03814112012-03-05 13:15:14 -0800160{
Tejun Heof95a04a2012-04-16 13:57:26 -0700161 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
Tejun Heo03814112012-03-05 13:15:14 -0800162}
163
Tejun Heo3c798392012-04-16 13:57:25 -0700164static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
Tejun Heo03814112012-03-05 13:15:14 -0800165{
Tejun Heof95a04a2012-04-16 13:57:26 -0700166 return pd_to_blkg(&tg->pd);
Tejun Heo03814112012-03-05 13:15:14 -0800167}
168
Tejun Heofda6f272013-05-14 13:52:36 -0700169/**
170 * sq_to_tg - return the throl_grp the specified service queue belongs to
171 * @sq: the throtl_service_queue of interest
172 *
173 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
174 * embedded in throtl_data, %NULL is returned.
175 */
176static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
177{
178 if (sq && sq->parent_sq)
179 return container_of(sq, struct throtl_grp, service_queue);
180 else
181 return NULL;
182}
Vivek Goyale43473b2010-09-15 17:06:35 -0400183
Tejun Heofda6f272013-05-14 13:52:36 -0700184/**
185 * sq_to_td - return throtl_data the specified service queue belongs to
186 * @sq: the throtl_service_queue of interest
187 *
188 * A service_queue can be embeded in either a throtl_grp or throtl_data.
189 * Determine the associated throtl_data accordingly and return it.
190 */
191static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
192{
193 struct throtl_grp *tg = sq_to_tg(sq);
194
195 if (tg)
196 return tg->td;
197 else
198 return container_of(sq, struct throtl_data, service_queue);
199}
200
201/**
202 * throtl_log - log debug message via blktrace
203 * @sq: the service_queue being reported
204 * @fmt: printf format string
205 * @args: printf args
206 *
207 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
208 * throtl_grp; otherwise, just "throtl".
Tejun Heofda6f272013-05-14 13:52:36 -0700209 */
210#define throtl_log(sq, fmt, args...) do { \
211 struct throtl_grp *__tg = sq_to_tg((sq)); \
212 struct throtl_data *__td = sq_to_td((sq)); \
213 \
214 (void)__td; \
Shaohua Li59fa0222016-05-09 17:22:15 -0700215 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
216 break; \
Tejun Heofda6f272013-05-14 13:52:36 -0700217 if ((__tg)) { \
218 char __pbuf[128]; \
219 \
220 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
221 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
222 } else { \
223 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
224 } \
225} while (0)
Vivek Goyale43473b2010-09-15 17:06:35 -0400226
Tejun Heoc5cc2072013-05-14 13:52:38 -0700227static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
228{
229 INIT_LIST_HEAD(&qn->node);
230 bio_list_init(&qn->bios);
231 qn->tg = tg;
232}
233
234/**
235 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
236 * @bio: bio being added
237 * @qn: qnode to add bio to
238 * @queued: the service_queue->queued[] list @qn belongs to
239 *
240 * Add @bio to @qn and put @qn on @queued if it's not already on.
241 * @qn->tg's reference count is bumped when @qn is activated. See the
242 * comment on top of throtl_qnode definition for details.
243 */
244static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
245 struct list_head *queued)
246{
247 bio_list_add(&qn->bios, bio);
248 if (list_empty(&qn->node)) {
249 list_add_tail(&qn->node, queued);
250 blkg_get(tg_to_blkg(qn->tg));
251 }
252}
253
254/**
255 * throtl_peek_queued - peek the first bio on a qnode list
256 * @queued: the qnode list to peek
257 */
258static struct bio *throtl_peek_queued(struct list_head *queued)
259{
260 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
261 struct bio *bio;
262
263 if (list_empty(queued))
264 return NULL;
265
266 bio = bio_list_peek(&qn->bios);
267 WARN_ON_ONCE(!bio);
268 return bio;
269}
270
271/**
272 * throtl_pop_queued - pop the first bio form a qnode list
273 * @queued: the qnode list to pop a bio from
274 * @tg_to_put: optional out argument for throtl_grp to put
275 *
276 * Pop the first bio from the qnode list @queued. After popping, the first
277 * qnode is removed from @queued if empty or moved to the end of @queued so
278 * that the popping order is round-robin.
279 *
280 * When the first qnode is removed, its associated throtl_grp should be put
281 * too. If @tg_to_put is NULL, this function automatically puts it;
282 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
283 * responsible for putting it.
284 */
285static struct bio *throtl_pop_queued(struct list_head *queued,
286 struct throtl_grp **tg_to_put)
287{
288 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
289 struct bio *bio;
290
291 if (list_empty(queued))
292 return NULL;
293
294 bio = bio_list_pop(&qn->bios);
295 WARN_ON_ONCE(!bio);
296
297 if (bio_list_empty(&qn->bios)) {
298 list_del_init(&qn->node);
299 if (tg_to_put)
300 *tg_to_put = qn->tg;
301 else
302 blkg_put(tg_to_blkg(qn->tg));
303 } else {
304 list_move_tail(&qn->node, queued);
305 }
306
307 return bio;
308}
309
Tejun Heo49a2f1e2013-05-14 13:52:34 -0700310/* init a service_queue, assumes the caller zeroed it */
Tejun Heob2ce2642015-08-18 14:55:13 -0700311static void throtl_service_queue_init(struct throtl_service_queue *sq)
Tejun Heo49a2f1e2013-05-14 13:52:34 -0700312{
Tejun Heoc5cc2072013-05-14 13:52:38 -0700313 INIT_LIST_HEAD(&sq->queued[0]);
314 INIT_LIST_HEAD(&sq->queued[1]);
Tejun Heo49a2f1e2013-05-14 13:52:34 -0700315 sq->pending_tree = RB_ROOT;
Tejun Heo69df0ab2013-05-14 13:52:36 -0700316 setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
317 (unsigned long)sq);
318}
319
Tejun Heo001bea72015-08-18 14:55:11 -0700320static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
321{
Tejun Heo4fb72032015-08-18 14:55:12 -0700322 struct throtl_grp *tg;
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700323 int rw;
Tejun Heo4fb72032015-08-18 14:55:12 -0700324
325 tg = kzalloc_node(sizeof(*tg), gfp, node);
326 if (!tg)
Tejun Heo77ea7332015-08-18 14:55:24 -0700327 return NULL;
Tejun Heo4fb72032015-08-18 14:55:12 -0700328
Tejun Heob2ce2642015-08-18 14:55:13 -0700329 throtl_service_queue_init(&tg->service_queue);
330
331 for (rw = READ; rw <= WRITE; rw++) {
332 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
333 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
334 }
335
336 RB_CLEAR_NODE(&tg->rb_node);
337 tg->bps[READ] = -1;
338 tg->bps[WRITE] = -1;
339 tg->iops[READ] = -1;
340 tg->iops[WRITE] = -1;
341
Tejun Heo4fb72032015-08-18 14:55:12 -0700342 return &tg->pd;
Tejun Heo001bea72015-08-18 14:55:11 -0700343}
344
Tejun Heoa9520cd2015-08-18 14:55:14 -0700345static void throtl_pd_init(struct blkg_policy_data *pd)
Vivek Goyala29a1712011-05-19 15:38:19 -0400346{
Tejun Heoa9520cd2015-08-18 14:55:14 -0700347 struct throtl_grp *tg = pd_to_tg(pd);
348 struct blkcg_gq *blkg = tg_to_blkg(tg);
Tejun Heo77216b02013-05-14 13:52:36 -0700349 struct throtl_data *td = blkg->q->td;
Tejun Heob2ce2642015-08-18 14:55:13 -0700350 struct throtl_service_queue *sq = &tg->service_queue;
Tejun Heocd1604f2012-03-05 13:15:06 -0800351
Tejun Heo91381252013-05-14 13:52:38 -0700352 /*
Tejun Heoaa6ec292014-07-09 10:08:08 -0400353 * If on the default hierarchy, we switch to properly hierarchical
Tejun Heo91381252013-05-14 13:52:38 -0700354 * behavior where limits on a given throtl_grp are applied to the
355 * whole subtree rather than just the group itself. e.g. If 16M
356 * read_bps limit is set on the root group, the whole system can't
357 * exceed 16M for the device.
358 *
Tejun Heoaa6ec292014-07-09 10:08:08 -0400359 * If not on the default hierarchy, the broken flat hierarchy
Tejun Heo91381252013-05-14 13:52:38 -0700360 * behavior is retained where all throtl_grps are treated as if
361 * they're all separate root groups right below throtl_data.
362 * Limits of a group don't interact with limits of other groups
363 * regardless of the position of the group in the hierarchy.
364 */
Tejun Heob2ce2642015-08-18 14:55:13 -0700365 sq->parent_sq = &td->service_queue;
Tejun Heo9e10a132015-09-18 11:56:28 -0400366 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
Tejun Heob2ce2642015-08-18 14:55:13 -0700367 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
Tejun Heo77216b02013-05-14 13:52:36 -0700368 tg->td = td;
Tejun Heo8a3d2612012-04-01 14:38:44 -0700369}
370
Tejun Heo693e7512013-05-14 13:52:38 -0700371/*
372 * Set has_rules[] if @tg or any of its parents have limits configured.
373 * This doesn't require walking up to the top of the hierarchy as the
374 * parent's has_rules[] is guaranteed to be correct.
375 */
376static void tg_update_has_rules(struct throtl_grp *tg)
377{
378 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
379 int rw;
380
381 for (rw = READ; rw <= WRITE; rw++)
382 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
383 (tg->bps[rw] != -1 || tg->iops[rw] != -1);
384}
385
Tejun Heoa9520cd2015-08-18 14:55:14 -0700386static void throtl_pd_online(struct blkg_policy_data *pd)
Tejun Heo693e7512013-05-14 13:52:38 -0700387{
388 /*
389 * We don't want new groups to escape the limits of its ancestors.
390 * Update has_rules[] after a new group is brought online.
391 */
Tejun Heoa9520cd2015-08-18 14:55:14 -0700392 tg_update_has_rules(pd_to_tg(pd));
Tejun Heo693e7512013-05-14 13:52:38 -0700393}
394
Tejun Heo001bea72015-08-18 14:55:11 -0700395static void throtl_pd_free(struct blkg_policy_data *pd)
396{
Tejun Heo4fb72032015-08-18 14:55:12 -0700397 struct throtl_grp *tg = pd_to_tg(pd);
398
Tejun Heob2ce2642015-08-18 14:55:13 -0700399 del_timer_sync(&tg->service_queue.pending_timer);
Tejun Heo4fb72032015-08-18 14:55:12 -0700400 kfree(tg);
Tejun Heo001bea72015-08-18 14:55:11 -0700401}
402
Tejun Heo0049af72013-05-14 13:52:33 -0700403static struct throtl_grp *
404throtl_rb_first(struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400405{
406 /* Service tree is empty */
Tejun Heo0049af72013-05-14 13:52:33 -0700407 if (!parent_sq->nr_pending)
Vivek Goyale43473b2010-09-15 17:06:35 -0400408 return NULL;
409
Tejun Heo0049af72013-05-14 13:52:33 -0700410 if (!parent_sq->first_pending)
411 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
Vivek Goyale43473b2010-09-15 17:06:35 -0400412
Tejun Heo0049af72013-05-14 13:52:33 -0700413 if (parent_sq->first_pending)
414 return rb_entry_tg(parent_sq->first_pending);
Vivek Goyale43473b2010-09-15 17:06:35 -0400415
416 return NULL;
417}
418
419static void rb_erase_init(struct rb_node *n, struct rb_root *root)
420{
421 rb_erase(n, root);
422 RB_CLEAR_NODE(n);
423}
424
Tejun Heo0049af72013-05-14 13:52:33 -0700425static void throtl_rb_erase(struct rb_node *n,
426 struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400427{
Tejun Heo0049af72013-05-14 13:52:33 -0700428 if (parent_sq->first_pending == n)
429 parent_sq->first_pending = NULL;
430 rb_erase_init(n, &parent_sq->pending_tree);
431 --parent_sq->nr_pending;
Vivek Goyale43473b2010-09-15 17:06:35 -0400432}
433
Tejun Heo0049af72013-05-14 13:52:33 -0700434static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400435{
436 struct throtl_grp *tg;
437
Tejun Heo0049af72013-05-14 13:52:33 -0700438 tg = throtl_rb_first(parent_sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400439 if (!tg)
440 return;
441
Tejun Heo0049af72013-05-14 13:52:33 -0700442 parent_sq->first_pending_disptime = tg->disptime;
Vivek Goyale43473b2010-09-15 17:06:35 -0400443}
444
Tejun Heo77216b02013-05-14 13:52:36 -0700445static void tg_service_queue_add(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400446{
Tejun Heo77216b02013-05-14 13:52:36 -0700447 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
Tejun Heo0049af72013-05-14 13:52:33 -0700448 struct rb_node **node = &parent_sq->pending_tree.rb_node;
Vivek Goyale43473b2010-09-15 17:06:35 -0400449 struct rb_node *parent = NULL;
450 struct throtl_grp *__tg;
451 unsigned long key = tg->disptime;
452 int left = 1;
453
454 while (*node != NULL) {
455 parent = *node;
456 __tg = rb_entry_tg(parent);
457
458 if (time_before(key, __tg->disptime))
459 node = &parent->rb_left;
460 else {
461 node = &parent->rb_right;
462 left = 0;
463 }
464 }
465
466 if (left)
Tejun Heo0049af72013-05-14 13:52:33 -0700467 parent_sq->first_pending = &tg->rb_node;
Vivek Goyale43473b2010-09-15 17:06:35 -0400468
469 rb_link_node(&tg->rb_node, parent, node);
Tejun Heo0049af72013-05-14 13:52:33 -0700470 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
Vivek Goyale43473b2010-09-15 17:06:35 -0400471}
472
Tejun Heo77216b02013-05-14 13:52:36 -0700473static void __throtl_enqueue_tg(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400474{
Tejun Heo77216b02013-05-14 13:52:36 -0700475 tg_service_queue_add(tg);
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700476 tg->flags |= THROTL_TG_PENDING;
Tejun Heo77216b02013-05-14 13:52:36 -0700477 tg->service_queue.parent_sq->nr_pending++;
Vivek Goyale43473b2010-09-15 17:06:35 -0400478}
479
Tejun Heo77216b02013-05-14 13:52:36 -0700480static void throtl_enqueue_tg(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400481{
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700482 if (!(tg->flags & THROTL_TG_PENDING))
Tejun Heo77216b02013-05-14 13:52:36 -0700483 __throtl_enqueue_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400484}
485
Tejun Heo77216b02013-05-14 13:52:36 -0700486static void __throtl_dequeue_tg(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400487{
Tejun Heo77216b02013-05-14 13:52:36 -0700488 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700489 tg->flags &= ~THROTL_TG_PENDING;
Vivek Goyale43473b2010-09-15 17:06:35 -0400490}
491
Tejun Heo77216b02013-05-14 13:52:36 -0700492static void throtl_dequeue_tg(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400493{
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700494 if (tg->flags & THROTL_TG_PENDING)
Tejun Heo77216b02013-05-14 13:52:36 -0700495 __throtl_dequeue_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400496}
497
Tejun Heoa9131a22013-05-14 13:52:31 -0700498/* Call with queue lock held */
Tejun Heo69df0ab2013-05-14 13:52:36 -0700499static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
500 unsigned long expires)
Tejun Heoa9131a22013-05-14 13:52:31 -0700501{
Tejun Heo69df0ab2013-05-14 13:52:36 -0700502 mod_timer(&sq->pending_timer, expires);
503 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
504 expires - jiffies, jiffies);
Tejun Heoa9131a22013-05-14 13:52:31 -0700505}
506
Tejun Heo7f52f982013-05-14 13:52:37 -0700507/**
508 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
509 * @sq: the service_queue to schedule dispatch for
510 * @force: force scheduling
511 *
512 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
513 * dispatch time of the first pending child. Returns %true if either timer
514 * is armed or there's no pending child left. %false if the current
515 * dispatch window is still open and the caller should continue
516 * dispatching.
517 *
518 * If @force is %true, the dispatch timer is always scheduled and this
519 * function is guaranteed to return %true. This is to be used when the
520 * caller can't dispatch itself and needs to invoke pending_timer
521 * unconditionally. Note that forced scheduling is likely to induce short
522 * delay before dispatch starts even if @sq->first_pending_disptime is not
523 * in the future and thus shouldn't be used in hot paths.
524 */
525static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
526 bool force)
Vivek Goyale43473b2010-09-15 17:06:35 -0400527{
Tejun Heo6a525602013-05-14 13:52:32 -0700528 /* any pending children left? */
Tejun Heoc9e03322013-05-14 13:52:32 -0700529 if (!sq->nr_pending)
Tejun Heo7f52f982013-05-14 13:52:37 -0700530 return true;
Vivek Goyale43473b2010-09-15 17:06:35 -0400531
Tejun Heoc9e03322013-05-14 13:52:32 -0700532 update_min_dispatch_time(sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400533
Tejun Heo69df0ab2013-05-14 13:52:36 -0700534 /* is the next dispatch time in the future? */
Tejun Heo7f52f982013-05-14 13:52:37 -0700535 if (force || time_after(sq->first_pending_disptime, jiffies)) {
Tejun Heo69df0ab2013-05-14 13:52:36 -0700536 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
Tejun Heo7f52f982013-05-14 13:52:37 -0700537 return true;
Tejun Heo69df0ab2013-05-14 13:52:36 -0700538 }
539
Tejun Heo7f52f982013-05-14 13:52:37 -0700540 /* tell the caller to continue dispatching */
541 return false;
Vivek Goyale43473b2010-09-15 17:06:35 -0400542}
543
Vivek Goyal32ee5bc2013-05-14 13:52:38 -0700544static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
545 bool rw, unsigned long start)
546{
547 tg->bytes_disp[rw] = 0;
548 tg->io_disp[rw] = 0;
549
550 /*
551 * Previous slice has expired. We must have trimmed it after last
552 * bio dispatch. That means since start of last slice, we never used
553 * that bandwidth. Do try to make use of that bandwidth while giving
554 * credit.
555 */
556 if (time_after_eq(start, tg->slice_start[rw]))
557 tg->slice_start[rw] = start;
558
559 tg->slice_end[rw] = jiffies + throtl_slice;
560 throtl_log(&tg->service_queue,
561 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
562 rw == READ ? 'R' : 'W', tg->slice_start[rw],
563 tg->slice_end[rw], jiffies);
564}
565
Tejun Heo0f3457f2013-05-14 13:52:32 -0700566static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400567{
568 tg->bytes_disp[rw] = 0;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400569 tg->io_disp[rw] = 0;
Vivek Goyale43473b2010-09-15 17:06:35 -0400570 tg->slice_start[rw] = jiffies;
571 tg->slice_end[rw] = jiffies + throtl_slice;
Tejun Heofda6f272013-05-14 13:52:36 -0700572 throtl_log(&tg->service_queue,
573 "[%c] new slice start=%lu end=%lu jiffies=%lu",
574 rw == READ ? 'R' : 'W', tg->slice_start[rw],
575 tg->slice_end[rw], jiffies);
Vivek Goyale43473b2010-09-15 17:06:35 -0400576}
577
Tejun Heo0f3457f2013-05-14 13:52:32 -0700578static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
579 unsigned long jiffy_end)
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100580{
581 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
582}
583
Tejun Heo0f3457f2013-05-14 13:52:32 -0700584static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
585 unsigned long jiffy_end)
Vivek Goyale43473b2010-09-15 17:06:35 -0400586{
587 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
Tejun Heofda6f272013-05-14 13:52:36 -0700588 throtl_log(&tg->service_queue,
589 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
590 rw == READ ? 'R' : 'W', tg->slice_start[rw],
591 tg->slice_end[rw], jiffies);
Vivek Goyale43473b2010-09-15 17:06:35 -0400592}
593
594/* Determine if previously allocated or extended slice is complete or not */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700595static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400596{
597 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
Fabian Frederick5cf8c222014-05-02 18:28:17 +0200598 return false;
Vivek Goyale43473b2010-09-15 17:06:35 -0400599
600 return 1;
601}
602
603/* Trim the used slices and adjust slice start accordingly */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700604static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400605{
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200606 unsigned long nr_slices, time_elapsed, io_trim;
607 u64 bytes_trim, tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400608
609 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
610
611 /*
612 * If bps are unlimited (-1), then time slice don't get
613 * renewed. Don't try to trim the slice if slice is used. A new
614 * slice will start when appropriate.
615 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700616 if (throtl_slice_used(tg, rw))
Vivek Goyale43473b2010-09-15 17:06:35 -0400617 return;
618
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100619 /*
620 * A bio has been dispatched. Also adjust slice_end. It might happen
621 * that initially cgroup limit was very low resulting in high
622 * slice_end, but later limit was bumped up and bio was dispached
623 * sooner, then we need to reduce slice_end. A high bogus slice_end
624 * is bad because it does not allow new slice to start.
625 */
626
Tejun Heo0f3457f2013-05-14 13:52:32 -0700627 throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100628
Vivek Goyale43473b2010-09-15 17:06:35 -0400629 time_elapsed = jiffies - tg->slice_start[rw];
630
631 nr_slices = time_elapsed / throtl_slice;
632
633 if (!nr_slices)
634 return;
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200635 tmp = tg->bps[rw] * throtl_slice * nr_slices;
636 do_div(tmp, HZ);
637 bytes_trim = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400638
Vivek Goyal8e89d132010-09-15 17:06:37 -0400639 io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
Vivek Goyale43473b2010-09-15 17:06:35 -0400640
Vivek Goyal8e89d132010-09-15 17:06:37 -0400641 if (!bytes_trim && !io_trim)
Vivek Goyale43473b2010-09-15 17:06:35 -0400642 return;
643
644 if (tg->bytes_disp[rw] >= bytes_trim)
645 tg->bytes_disp[rw] -= bytes_trim;
646 else
647 tg->bytes_disp[rw] = 0;
648
Vivek Goyal8e89d132010-09-15 17:06:37 -0400649 if (tg->io_disp[rw] >= io_trim)
650 tg->io_disp[rw] -= io_trim;
651 else
652 tg->io_disp[rw] = 0;
653
Vivek Goyale43473b2010-09-15 17:06:35 -0400654 tg->slice_start[rw] += nr_slices * throtl_slice;
655
Tejun Heofda6f272013-05-14 13:52:36 -0700656 throtl_log(&tg->service_queue,
657 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
658 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
659 tg->slice_start[rw], tg->slice_end[rw], jiffies);
Vivek Goyale43473b2010-09-15 17:06:35 -0400660}
661
Tejun Heo0f3457f2013-05-14 13:52:32 -0700662static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
663 unsigned long *wait)
Vivek Goyale43473b2010-09-15 17:06:35 -0400664{
665 bool rw = bio_data_dir(bio);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400666 unsigned int io_allowed;
Vivek Goyale43473b2010-09-15 17:06:35 -0400667 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200668 u64 tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400669
Vivek Goyal8e89d132010-09-15 17:06:37 -0400670 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
Vivek Goyale43473b2010-09-15 17:06:35 -0400671
Vivek Goyal8e89d132010-09-15 17:06:37 -0400672 /* Slice has just started. Consider one slice interval */
673 if (!jiffy_elapsed)
674 jiffy_elapsed_rnd = throtl_slice;
675
676 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
677
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200678 /*
679 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
680 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
681 * will allow dispatch after 1 second and after that slice should
682 * have been trimmed.
683 */
684
685 tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
686 do_div(tmp, HZ);
687
688 if (tmp > UINT_MAX)
689 io_allowed = UINT_MAX;
690 else
691 io_allowed = tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400692
693 if (tg->io_disp[rw] + 1 <= io_allowed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400694 if (wait)
695 *wait = 0;
Fabian Frederick5cf8c222014-05-02 18:28:17 +0200696 return true;
Vivek Goyale43473b2010-09-15 17:06:35 -0400697 }
698
Vivek Goyal8e89d132010-09-15 17:06:37 -0400699 /* Calc approx time to dispatch */
700 jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
701
702 if (jiffy_wait > jiffy_elapsed)
703 jiffy_wait = jiffy_wait - jiffy_elapsed;
704 else
705 jiffy_wait = 1;
706
707 if (wait)
708 *wait = jiffy_wait;
709 return 0;
710}
711
Tejun Heo0f3457f2013-05-14 13:52:32 -0700712static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
713 unsigned long *wait)
Vivek Goyal8e89d132010-09-15 17:06:37 -0400714{
715 bool rw = bio_data_dir(bio);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200716 u64 bytes_allowed, extra_bytes, tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400717 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyale43473b2010-09-15 17:06:35 -0400718
719 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
720
721 /* Slice has just started. Consider one slice interval */
722 if (!jiffy_elapsed)
723 jiffy_elapsed_rnd = throtl_slice;
724
725 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
726
Vivek Goyal5e901a22010-10-01 21:16:38 +0200727 tmp = tg->bps[rw] * jiffy_elapsed_rnd;
728 do_div(tmp, HZ);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200729 bytes_allowed = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400730
Kent Overstreet4f024f32013-10-11 15:44:27 -0700731 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400732 if (wait)
733 *wait = 0;
Fabian Frederick5cf8c222014-05-02 18:28:17 +0200734 return true;
Vivek Goyale43473b2010-09-15 17:06:35 -0400735 }
736
737 /* Calc approx time to dispatch */
Kent Overstreet4f024f32013-10-11 15:44:27 -0700738 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
Vivek Goyale43473b2010-09-15 17:06:35 -0400739 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
740
741 if (!jiffy_wait)
742 jiffy_wait = 1;
743
744 /*
745 * This wait time is without taking into consideration the rounding
746 * up we did. Add that time also.
747 */
748 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
Vivek Goyale43473b2010-09-15 17:06:35 -0400749 if (wait)
750 *wait = jiffy_wait;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400751 return 0;
752}
Vivek Goyale43473b2010-09-15 17:06:35 -0400753
Vivek Goyal8e89d132010-09-15 17:06:37 -0400754/*
755 * Returns whether one can dispatch a bio or not. Also returns approx number
756 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
757 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700758static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
759 unsigned long *wait)
Vivek Goyal8e89d132010-09-15 17:06:37 -0400760{
761 bool rw = bio_data_dir(bio);
762 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
763
764 /*
765 * Currently whole state machine of group depends on first bio
766 * queued in the group bio list. So one should not be calling
767 * this function with a different bio if there are other bios
768 * queued.
769 */
Tejun Heo73f0d492013-05-14 13:52:35 -0700770 BUG_ON(tg->service_queue.nr_queued[rw] &&
Tejun Heoc5cc2072013-05-14 13:52:38 -0700771 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
Vivek Goyal8e89d132010-09-15 17:06:37 -0400772
773 /* If tg->bps = -1, then BW is unlimited */
774 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
775 if (wait)
776 *wait = 0;
Fabian Frederick5cf8c222014-05-02 18:28:17 +0200777 return true;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400778 }
779
780 /*
781 * If previous slice expired, start a new one otherwise renew/extend
782 * existing slice to make sure it is at least throtl_slice interval
Vivek Goyal164c80e2016-09-19 15:12:41 -0600783 * long since now. New slice is started only for empty throttle group.
784 * If there is queued bio, that means there should be an active
785 * slice and it should be extended instead.
Vivek Goyal8e89d132010-09-15 17:06:37 -0400786 */
Vivek Goyal164c80e2016-09-19 15:12:41 -0600787 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700788 throtl_start_new_slice(tg, rw);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400789 else {
790 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700791 throtl_extend_slice(tg, rw, jiffies + throtl_slice);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400792 }
793
Tejun Heo0f3457f2013-05-14 13:52:32 -0700794 if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
795 tg_with_in_iops_limit(tg, bio, &iops_wait)) {
Vivek Goyal8e89d132010-09-15 17:06:37 -0400796 if (wait)
797 *wait = 0;
798 return 1;
799 }
800
801 max_wait = max(bps_wait, iops_wait);
802
803 if (wait)
804 *wait = max_wait;
805
806 if (time_before(tg->slice_end[rw], jiffies + max_wait))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700807 throtl_extend_slice(tg, rw, jiffies + max_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400808
809 return 0;
810}
811
812static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
813{
814 bool rw = bio_data_dir(bio);
Vivek Goyale43473b2010-09-15 17:06:35 -0400815
816 /* Charge the bio to the group */
Kent Overstreet4f024f32013-10-11 15:44:27 -0700817 tg->bytes_disp[rw] += bio->bi_iter.bi_size;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400818 tg->io_disp[rw]++;
Vivek Goyale43473b2010-09-15 17:06:35 -0400819
Tejun Heo2a0f61e2013-05-14 13:52:36 -0700820 /*
821 * REQ_THROTTLED is used to prevent the same bio to be throttled
822 * more than once as a throttled bio will go through blk-throtl the
823 * second time when it eventually gets issued. Set it when a bio
824 * is being charged to a tg.
Tejun Heo2a0f61e2013-05-14 13:52:36 -0700825 */
Jens Axboe1eff9d32016-08-05 15:35:16 -0600826 if (!(bio->bi_opf & REQ_THROTTLED))
827 bio->bi_opf |= REQ_THROTTLED;
Vivek Goyale43473b2010-09-15 17:06:35 -0400828}
829
Tejun Heoc5cc2072013-05-14 13:52:38 -0700830/**
831 * throtl_add_bio_tg - add a bio to the specified throtl_grp
832 * @bio: bio to add
833 * @qn: qnode to use
834 * @tg: the target throtl_grp
835 *
836 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
837 * tg->qnode_on_self[] is used.
838 */
839static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
840 struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400841{
Tejun Heo73f0d492013-05-14 13:52:35 -0700842 struct throtl_service_queue *sq = &tg->service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400843 bool rw = bio_data_dir(bio);
844
Tejun Heoc5cc2072013-05-14 13:52:38 -0700845 if (!qn)
846 qn = &tg->qnode_on_self[rw];
847
Tejun Heo0e9f4162013-05-14 13:52:35 -0700848 /*
849 * If @tg doesn't currently have any bios queued in the same
850 * direction, queueing @bio can change when @tg should be
851 * dispatched. Mark that @tg was empty. This is automatically
852 * cleaered on the next tg_update_disptime().
853 */
854 if (!sq->nr_queued[rw])
855 tg->flags |= THROTL_TG_WAS_EMPTY;
856
Tejun Heoc5cc2072013-05-14 13:52:38 -0700857 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
858
Tejun Heo73f0d492013-05-14 13:52:35 -0700859 sq->nr_queued[rw]++;
Tejun Heo77216b02013-05-14 13:52:36 -0700860 throtl_enqueue_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400861}
862
Tejun Heo77216b02013-05-14 13:52:36 -0700863static void tg_update_disptime(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400864{
Tejun Heo73f0d492013-05-14 13:52:35 -0700865 struct throtl_service_queue *sq = &tg->service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400866 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
867 struct bio *bio;
868
Tejun Heoc5cc2072013-05-14 13:52:38 -0700869 if ((bio = throtl_peek_queued(&sq->queued[READ])))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700870 tg_may_dispatch(tg, bio, &read_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400871
Tejun Heoc5cc2072013-05-14 13:52:38 -0700872 if ((bio = throtl_peek_queued(&sq->queued[WRITE])))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700873 tg_may_dispatch(tg, bio, &write_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400874
875 min_wait = min(read_wait, write_wait);
876 disptime = jiffies + min_wait;
877
Vivek Goyale43473b2010-09-15 17:06:35 -0400878 /* Update dispatch time */
Tejun Heo77216b02013-05-14 13:52:36 -0700879 throtl_dequeue_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400880 tg->disptime = disptime;
Tejun Heo77216b02013-05-14 13:52:36 -0700881 throtl_enqueue_tg(tg);
Tejun Heo0e9f4162013-05-14 13:52:35 -0700882
883 /* see throtl_add_bio_tg() */
884 tg->flags &= ~THROTL_TG_WAS_EMPTY;
Vivek Goyale43473b2010-09-15 17:06:35 -0400885}
886
Vivek Goyal32ee5bc2013-05-14 13:52:38 -0700887static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
888 struct throtl_grp *parent_tg, bool rw)
889{
890 if (throtl_slice_used(parent_tg, rw)) {
891 throtl_start_new_slice_with_credit(parent_tg, rw,
892 child_tg->slice_start[rw]);
893 }
894
895}
896
Tejun Heo77216b02013-05-14 13:52:36 -0700897static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400898{
Tejun Heo73f0d492013-05-14 13:52:35 -0700899 struct throtl_service_queue *sq = &tg->service_queue;
Tejun Heo6bc9c2b2013-05-14 13:52:38 -0700900 struct throtl_service_queue *parent_sq = sq->parent_sq;
901 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
Tejun Heoc5cc2072013-05-14 13:52:38 -0700902 struct throtl_grp *tg_to_put = NULL;
Vivek Goyale43473b2010-09-15 17:06:35 -0400903 struct bio *bio;
904
Tejun Heoc5cc2072013-05-14 13:52:38 -0700905 /*
906 * @bio is being transferred from @tg to @parent_sq. Popping a bio
907 * from @tg may put its reference and @parent_sq might end up
908 * getting released prematurely. Remember the tg to put and put it
909 * after @bio is transferred to @parent_sq.
910 */
911 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
Tejun Heo73f0d492013-05-14 13:52:35 -0700912 sq->nr_queued[rw]--;
Vivek Goyale43473b2010-09-15 17:06:35 -0400913
914 throtl_charge_bio(tg, bio);
Tejun Heo6bc9c2b2013-05-14 13:52:38 -0700915
916 /*
917 * If our parent is another tg, we just need to transfer @bio to
918 * the parent using throtl_add_bio_tg(). If our parent is
919 * @td->service_queue, @bio is ready to be issued. Put it on its
920 * bio_lists[] and decrease total number queued. The caller is
921 * responsible for issuing these bios.
922 */
923 if (parent_tg) {
Tejun Heoc5cc2072013-05-14 13:52:38 -0700924 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
Vivek Goyal32ee5bc2013-05-14 13:52:38 -0700925 start_parent_slice_with_credit(tg, parent_tg, rw);
Tejun Heo6bc9c2b2013-05-14 13:52:38 -0700926 } else {
Tejun Heoc5cc2072013-05-14 13:52:38 -0700927 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
928 &parent_sq->queued[rw]);
Tejun Heo6bc9c2b2013-05-14 13:52:38 -0700929 BUG_ON(tg->td->nr_queued[rw] <= 0);
930 tg->td->nr_queued[rw]--;
931 }
Vivek Goyale43473b2010-09-15 17:06:35 -0400932
Tejun Heo0f3457f2013-05-14 13:52:32 -0700933 throtl_trim_slice(tg, rw);
Tejun Heo6bc9c2b2013-05-14 13:52:38 -0700934
Tejun Heoc5cc2072013-05-14 13:52:38 -0700935 if (tg_to_put)
936 blkg_put(tg_to_blkg(tg_to_put));
Vivek Goyale43473b2010-09-15 17:06:35 -0400937}
938
Tejun Heo77216b02013-05-14 13:52:36 -0700939static int throtl_dispatch_tg(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400940{
Tejun Heo73f0d492013-05-14 13:52:35 -0700941 struct throtl_service_queue *sq = &tg->service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400942 unsigned int nr_reads = 0, nr_writes = 0;
943 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
Vivek Goyalc2f68052010-11-15 19:32:42 +0100944 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
Vivek Goyale43473b2010-09-15 17:06:35 -0400945 struct bio *bio;
946
947 /* Try to dispatch 75% READS and 25% WRITES */
948
Tejun Heoc5cc2072013-05-14 13:52:38 -0700949 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
Tejun Heo0f3457f2013-05-14 13:52:32 -0700950 tg_may_dispatch(tg, bio, NULL)) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400951
Tejun Heo77216b02013-05-14 13:52:36 -0700952 tg_dispatch_one_bio(tg, bio_data_dir(bio));
Vivek Goyale43473b2010-09-15 17:06:35 -0400953 nr_reads++;
954
955 if (nr_reads >= max_nr_reads)
956 break;
957 }
958
Tejun Heoc5cc2072013-05-14 13:52:38 -0700959 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
Tejun Heo0f3457f2013-05-14 13:52:32 -0700960 tg_may_dispatch(tg, bio, NULL)) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400961
Tejun Heo77216b02013-05-14 13:52:36 -0700962 tg_dispatch_one_bio(tg, bio_data_dir(bio));
Vivek Goyale43473b2010-09-15 17:06:35 -0400963 nr_writes++;
964
965 if (nr_writes >= max_nr_writes)
966 break;
967 }
968
969 return nr_reads + nr_writes;
970}
971
Tejun Heo651930b2013-05-14 13:52:35 -0700972static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400973{
974 unsigned int nr_disp = 0;
Vivek Goyale43473b2010-09-15 17:06:35 -0400975
976 while (1) {
Tejun Heo73f0d492013-05-14 13:52:35 -0700977 struct throtl_grp *tg = throtl_rb_first(parent_sq);
978 struct throtl_service_queue *sq = &tg->service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400979
980 if (!tg)
981 break;
982
983 if (time_before(jiffies, tg->disptime))
984 break;
985
Tejun Heo77216b02013-05-14 13:52:36 -0700986 throtl_dequeue_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400987
Tejun Heo77216b02013-05-14 13:52:36 -0700988 nr_disp += throtl_dispatch_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400989
Tejun Heo73f0d492013-05-14 13:52:35 -0700990 if (sq->nr_queued[0] || sq->nr_queued[1])
Tejun Heo77216b02013-05-14 13:52:36 -0700991 tg_update_disptime(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400992
993 if (nr_disp >= throtl_quantum)
994 break;
995 }
996
997 return nr_disp;
998}
999
Tejun Heo6e1a5702013-05-14 13:52:37 -07001000/**
1001 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1002 * @arg: the throtl_service_queue being serviced
1003 *
1004 * This timer is armed when a child throtl_grp with active bio's become
1005 * pending and queued on the service_queue's pending_tree and expires when
1006 * the first child throtl_grp should be dispatched. This function
Tejun Heo2e48a532013-05-14 13:52:38 -07001007 * dispatches bio's from the children throtl_grps to the parent
1008 * service_queue.
1009 *
1010 * If the parent's parent is another throtl_grp, dispatching is propagated
1011 * by either arming its pending_timer or repeating dispatch directly. If
1012 * the top-level service_tree is reached, throtl_data->dispatch_work is
1013 * kicked so that the ready bio's are issued.
Tejun Heo6e1a5702013-05-14 13:52:37 -07001014 */
Tejun Heo69df0ab2013-05-14 13:52:36 -07001015static void throtl_pending_timer_fn(unsigned long arg)
1016{
1017 struct throtl_service_queue *sq = (void *)arg;
Tejun Heo2e48a532013-05-14 13:52:38 -07001018 struct throtl_grp *tg = sq_to_tg(sq);
Tejun Heo69df0ab2013-05-14 13:52:36 -07001019 struct throtl_data *td = sq_to_td(sq);
Tejun Heocb761992013-05-14 13:52:31 -07001020 struct request_queue *q = td->queue;
Tejun Heo2e48a532013-05-14 13:52:38 -07001021 struct throtl_service_queue *parent_sq;
1022 bool dispatched;
Tejun Heo6e1a5702013-05-14 13:52:37 -07001023 int ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04001024
1025 spin_lock_irq(q->queue_lock);
Tejun Heo2e48a532013-05-14 13:52:38 -07001026again:
1027 parent_sq = sq->parent_sq;
1028 dispatched = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001029
Tejun Heo7f52f982013-05-14 13:52:37 -07001030 while (true) {
1031 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
Tejun Heo2e48a532013-05-14 13:52:38 -07001032 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1033 sq->nr_queued[READ], sq->nr_queued[WRITE]);
Vivek Goyale43473b2010-09-15 17:06:35 -04001034
Tejun Heo7f52f982013-05-14 13:52:37 -07001035 ret = throtl_select_dispatch(sq);
1036 if (ret) {
Tejun Heo7f52f982013-05-14 13:52:37 -07001037 throtl_log(sq, "bios disp=%u", ret);
1038 dispatched = true;
Tejun Heo651930b2013-05-14 13:52:35 -07001039 }
Vivek Goyale43473b2010-09-15 17:06:35 -04001040
Tejun Heo7f52f982013-05-14 13:52:37 -07001041 if (throtl_schedule_next_dispatch(sq, false))
1042 break;
1043
1044 /* this dispatch windows is still open, relax and repeat */
1045 spin_unlock_irq(q->queue_lock);
1046 cpu_relax();
1047 spin_lock_irq(q->queue_lock);
1048 }
Tejun Heo6a525602013-05-14 13:52:32 -07001049
Tejun Heo2e48a532013-05-14 13:52:38 -07001050 if (!dispatched)
1051 goto out_unlock;
Tejun Heo6e1a5702013-05-14 13:52:37 -07001052
Tejun Heo2e48a532013-05-14 13:52:38 -07001053 if (parent_sq) {
1054 /* @parent_sq is another throl_grp, propagate dispatch */
1055 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1056 tg_update_disptime(tg);
1057 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1058 /* window is already open, repeat dispatching */
1059 sq = parent_sq;
1060 tg = sq_to_tg(sq);
1061 goto again;
1062 }
1063 }
1064 } else {
1065 /* reached the top-level, queue issueing */
1066 queue_work(kthrotld_workqueue, &td->dispatch_work);
1067 }
1068out_unlock:
Tejun Heo6e1a5702013-05-14 13:52:37 -07001069 spin_unlock_irq(q->queue_lock);
1070}
1071
1072/**
1073 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1074 * @work: work item being executed
1075 *
1076 * This function is queued for execution when bio's reach the bio_lists[]
1077 * of throtl_data->service_queue. Those bio's are ready and issued by this
1078 * function.
1079 */
Fabian Frederick8876e142014-04-17 21:41:16 +02001080static void blk_throtl_dispatch_work_fn(struct work_struct *work)
Tejun Heo6e1a5702013-05-14 13:52:37 -07001081{
1082 struct throtl_data *td = container_of(work, struct throtl_data,
1083 dispatch_work);
1084 struct throtl_service_queue *td_sq = &td->service_queue;
1085 struct request_queue *q = td->queue;
1086 struct bio_list bio_list_on_stack;
1087 struct bio *bio;
1088 struct blk_plug plug;
1089 int rw;
1090
1091 bio_list_init(&bio_list_on_stack);
1092
1093 spin_lock_irq(q->queue_lock);
Tejun Heoc5cc2072013-05-14 13:52:38 -07001094 for (rw = READ; rw <= WRITE; rw++)
1095 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1096 bio_list_add(&bio_list_on_stack, bio);
Vivek Goyale43473b2010-09-15 17:06:35 -04001097 spin_unlock_irq(q->queue_lock);
1098
Tejun Heo6e1a5702013-05-14 13:52:37 -07001099 if (!bio_list_empty(&bio_list_on_stack)) {
Vivek Goyal69d60eb2011-03-09 08:27:37 +01001100 blk_start_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -04001101 while((bio = bio_list_pop(&bio_list_on_stack)))
1102 generic_make_request(bio);
Vivek Goyal69d60eb2011-03-09 08:27:37 +01001103 blk_finish_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -04001104 }
Vivek Goyale43473b2010-09-15 17:06:35 -04001105}
1106
Tejun Heof95a04a2012-04-16 13:57:26 -07001107static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1108 int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001109{
Tejun Heof95a04a2012-04-16 13:57:26 -07001110 struct throtl_grp *tg = pd_to_tg(pd);
1111 u64 v = *(u64 *)((void *)tg + off);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001112
Tejun Heoaf133ce2012-04-01 14:38:44 -07001113 if (v == -1)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001114 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -07001115 return __blkg_prfill_u64(sf, pd, v);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001116}
1117
Tejun Heof95a04a2012-04-16 13:57:26 -07001118static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1119 int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001120{
Tejun Heof95a04a2012-04-16 13:57:26 -07001121 struct throtl_grp *tg = pd_to_tg(pd);
1122 unsigned int v = *(unsigned int *)((void *)tg + off);
Tejun Heoaf133ce2012-04-01 14:38:44 -07001123
1124 if (v == -1)
1125 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -07001126 return __blkg_prfill_u64(sf, pd, v);
Tejun Heoaf133ce2012-04-01 14:38:44 -07001127}
1128
Tejun Heo2da8ca82013-12-05 12:28:04 -05001129static int tg_print_conf_u64(struct seq_file *sf, void *v)
Tejun Heoaf133ce2012-04-01 14:38:44 -07001130{
Tejun Heo2da8ca82013-12-05 12:28:04 -05001131 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1132 &blkcg_policy_throtl, seq_cft(sf)->private, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001133 return 0;
1134}
1135
Tejun Heo2da8ca82013-12-05 12:28:04 -05001136static int tg_print_conf_uint(struct seq_file *sf, void *v)
Vivek Goyale43473b2010-09-15 17:06:35 -04001137{
Tejun Heo2da8ca82013-12-05 12:28:04 -05001138 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1139 &blkcg_policy_throtl, seq_cft(sf)->private, false);
Tejun Heoaf133ce2012-04-01 14:38:44 -07001140 return 0;
Vivek Goyale43473b2010-09-15 17:06:35 -04001141}
1142
Tejun Heo69948b02015-08-18 14:55:32 -07001143static void tg_conf_updated(struct throtl_grp *tg)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001144{
Tejun Heo69948b02015-08-18 14:55:32 -07001145 struct throtl_service_queue *sq = &tg->service_queue;
Tejun Heo492eb212013-08-08 20:11:25 -04001146 struct cgroup_subsys_state *pos_css;
Tejun Heo69948b02015-08-18 14:55:32 -07001147 struct blkcg_gq *blkg;
Tejun Heoaf133ce2012-04-01 14:38:44 -07001148
Tejun Heofda6f272013-05-14 13:52:36 -07001149 throtl_log(&tg->service_queue,
1150 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1151 tg->bps[READ], tg->bps[WRITE],
1152 tg->iops[READ], tg->iops[WRITE]);
Tejun Heo632b4492013-05-14 13:52:31 -07001153
1154 /*
Tejun Heo693e7512013-05-14 13:52:38 -07001155 * Update has_rules[] flags for the updated tg's subtree. A tg is
1156 * considered to have rules if either the tg itself or any of its
1157 * ancestors has rules. This identifies groups without any
1158 * restrictions in the whole hierarchy and allows them to bypass
1159 * blk-throttle.
1160 */
Tejun Heo69948b02015-08-18 14:55:32 -07001161 blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg))
Tejun Heo693e7512013-05-14 13:52:38 -07001162 tg_update_has_rules(blkg_to_tg(blkg));
1163
1164 /*
Tejun Heo632b4492013-05-14 13:52:31 -07001165 * We're already holding queue_lock and know @tg is valid. Let's
1166 * apply the new config directly.
1167 *
1168 * Restart the slices for both READ and WRITES. It might happen
1169 * that a group's limit are dropped suddenly and we don't want to
1170 * account recently dispatched IO with new low rate.
1171 */
Tejun Heo0f3457f2013-05-14 13:52:32 -07001172 throtl_start_new_slice(tg, 0);
1173 throtl_start_new_slice(tg, 1);
Tejun Heo632b4492013-05-14 13:52:31 -07001174
Tejun Heo5b2c16a2013-05-14 13:52:32 -07001175 if (tg->flags & THROTL_TG_PENDING) {
Tejun Heo77216b02013-05-14 13:52:36 -07001176 tg_update_disptime(tg);
Tejun Heo7f52f982013-05-14 13:52:37 -07001177 throtl_schedule_next_dispatch(sq->parent_sq, true);
Tejun Heo632b4492013-05-14 13:52:31 -07001178 }
Tejun Heo69948b02015-08-18 14:55:32 -07001179}
Tejun Heo60c2bc22012-04-01 14:38:43 -07001180
Tejun Heo69948b02015-08-18 14:55:32 -07001181static ssize_t tg_set_conf(struct kernfs_open_file *of,
1182 char *buf, size_t nbytes, loff_t off, bool is_u64)
1183{
1184 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1185 struct blkg_conf_ctx ctx;
1186 struct throtl_grp *tg;
1187 int ret;
1188 u64 v;
1189
1190 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1191 if (ret)
1192 return ret;
1193
1194 ret = -EINVAL;
1195 if (sscanf(ctx.body, "%llu", &v) != 1)
1196 goto out_finish;
1197 if (!v)
1198 v = -1;
1199
1200 tg = blkg_to_tg(ctx.blkg);
1201
1202 if (is_u64)
1203 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1204 else
1205 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1206
1207 tg_conf_updated(tg);
Tejun Heo36aa9e52015-08-18 14:55:31 -07001208 ret = 0;
1209out_finish:
Tejun Heo60c2bc22012-04-01 14:38:43 -07001210 blkg_conf_finish(&ctx);
Tejun Heo36aa9e52015-08-18 14:55:31 -07001211 return ret ?: nbytes;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001212}
1213
Tejun Heo451af502014-05-13 12:16:21 -04001214static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1215 char *buf, size_t nbytes, loff_t off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001216{
Tejun Heo451af502014-05-13 12:16:21 -04001217 return tg_set_conf(of, buf, nbytes, off, true);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001218}
1219
Tejun Heo451af502014-05-13 12:16:21 -04001220static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1221 char *buf, size_t nbytes, loff_t off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001222{
Tejun Heo451af502014-05-13 12:16:21 -04001223 return tg_set_conf(of, buf, nbytes, off, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001224}
1225
Tejun Heo880f50e2015-08-18 14:55:30 -07001226static struct cftype throtl_legacy_files[] = {
Tejun Heo60c2bc22012-04-01 14:38:43 -07001227 {
1228 .name = "throttle.read_bps_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -07001229 .private = offsetof(struct throtl_grp, bps[READ]),
Tejun Heo2da8ca82013-12-05 12:28:04 -05001230 .seq_show = tg_print_conf_u64,
Tejun Heo451af502014-05-13 12:16:21 -04001231 .write = tg_set_conf_u64,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001232 },
1233 {
1234 .name = "throttle.write_bps_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -07001235 .private = offsetof(struct throtl_grp, bps[WRITE]),
Tejun Heo2da8ca82013-12-05 12:28:04 -05001236 .seq_show = tg_print_conf_u64,
Tejun Heo451af502014-05-13 12:16:21 -04001237 .write = tg_set_conf_u64,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001238 },
1239 {
1240 .name = "throttle.read_iops_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -07001241 .private = offsetof(struct throtl_grp, iops[READ]),
Tejun Heo2da8ca82013-12-05 12:28:04 -05001242 .seq_show = tg_print_conf_uint,
Tejun Heo451af502014-05-13 12:16:21 -04001243 .write = tg_set_conf_uint,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001244 },
1245 {
1246 .name = "throttle.write_iops_device",
Tejun Heoaf133ce2012-04-01 14:38:44 -07001247 .private = offsetof(struct throtl_grp, iops[WRITE]),
Tejun Heo2da8ca82013-12-05 12:28:04 -05001248 .seq_show = tg_print_conf_uint,
Tejun Heo451af502014-05-13 12:16:21 -04001249 .write = tg_set_conf_uint,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001250 },
1251 {
1252 .name = "throttle.io_service_bytes",
Tejun Heo77ea7332015-08-18 14:55:24 -07001253 .private = (unsigned long)&blkcg_policy_throtl,
1254 .seq_show = blkg_print_stat_bytes,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001255 },
1256 {
1257 .name = "throttle.io_serviced",
Tejun Heo77ea7332015-08-18 14:55:24 -07001258 .private = (unsigned long)&blkcg_policy_throtl,
1259 .seq_show = blkg_print_stat_ios,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001260 },
1261 { } /* terminate */
1262};
1263
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001264static u64 tg_prfill_max(struct seq_file *sf, struct blkg_policy_data *pd,
1265 int off)
1266{
1267 struct throtl_grp *tg = pd_to_tg(pd);
1268 const char *dname = blkg_dev_name(pd->blkg);
1269 char bufs[4][21] = { "max", "max", "max", "max" };
1270
1271 if (!dname)
1272 return 0;
1273 if (tg->bps[READ] == -1 && tg->bps[WRITE] == -1 &&
1274 tg->iops[READ] == -1 && tg->iops[WRITE] == -1)
1275 return 0;
1276
1277 if (tg->bps[READ] != -1)
1278 snprintf(bufs[0], sizeof(bufs[0]), "%llu", tg->bps[READ]);
1279 if (tg->bps[WRITE] != -1)
1280 snprintf(bufs[1], sizeof(bufs[1]), "%llu", tg->bps[WRITE]);
1281 if (tg->iops[READ] != -1)
1282 snprintf(bufs[2], sizeof(bufs[2]), "%u", tg->iops[READ]);
1283 if (tg->iops[WRITE] != -1)
1284 snprintf(bufs[3], sizeof(bufs[3]), "%u", tg->iops[WRITE]);
1285
1286 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s\n",
1287 dname, bufs[0], bufs[1], bufs[2], bufs[3]);
1288 return 0;
1289}
1290
1291static int tg_print_max(struct seq_file *sf, void *v)
1292{
1293 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_max,
1294 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1295 return 0;
1296}
1297
1298static ssize_t tg_set_max(struct kernfs_open_file *of,
1299 char *buf, size_t nbytes, loff_t off)
1300{
1301 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1302 struct blkg_conf_ctx ctx;
1303 struct throtl_grp *tg;
1304 u64 v[4];
1305 int ret;
1306
1307 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1308 if (ret)
1309 return ret;
1310
1311 tg = blkg_to_tg(ctx.blkg);
1312
1313 v[0] = tg->bps[READ];
1314 v[1] = tg->bps[WRITE];
1315 v[2] = tg->iops[READ];
1316 v[3] = tg->iops[WRITE];
1317
1318 while (true) {
1319 char tok[27]; /* wiops=18446744073709551616 */
1320 char *p;
1321 u64 val = -1;
1322 int len;
1323
1324 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1325 break;
1326 if (tok[0] == '\0')
1327 break;
1328 ctx.body += len;
1329
1330 ret = -EINVAL;
1331 p = tok;
1332 strsep(&p, "=");
1333 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1334 goto out_finish;
1335
1336 ret = -ERANGE;
1337 if (!val)
1338 goto out_finish;
1339
1340 ret = -EINVAL;
1341 if (!strcmp(tok, "rbps"))
1342 v[0] = val;
1343 else if (!strcmp(tok, "wbps"))
1344 v[1] = val;
1345 else if (!strcmp(tok, "riops"))
1346 v[2] = min_t(u64, val, UINT_MAX);
1347 else if (!strcmp(tok, "wiops"))
1348 v[3] = min_t(u64, val, UINT_MAX);
1349 else
1350 goto out_finish;
1351 }
1352
1353 tg->bps[READ] = v[0];
1354 tg->bps[WRITE] = v[1];
1355 tg->iops[READ] = v[2];
1356 tg->iops[WRITE] = v[3];
1357
1358 tg_conf_updated(tg);
1359 ret = 0;
1360out_finish:
1361 blkg_conf_finish(&ctx);
1362 return ret ?: nbytes;
1363}
1364
1365static struct cftype throtl_files[] = {
1366 {
1367 .name = "max",
1368 .flags = CFTYPE_NOT_ON_ROOT,
1369 .seq_show = tg_print_max,
1370 .write = tg_set_max,
1371 },
1372 { } /* terminate */
1373};
1374
Vivek Goyalda527772011-03-02 19:05:33 -05001375static void throtl_shutdown_wq(struct request_queue *q)
Vivek Goyale43473b2010-09-15 17:06:35 -04001376{
1377 struct throtl_data *td = q->td;
1378
Tejun Heo69df0ab2013-05-14 13:52:36 -07001379 cancel_work_sync(&td->dispatch_work);
Vivek Goyale43473b2010-09-15 17:06:35 -04001380}
1381
Tejun Heo3c798392012-04-16 13:57:25 -07001382static struct blkcg_policy blkcg_policy_throtl = {
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001383 .dfl_cftypes = throtl_files,
Tejun Heo880f50e2015-08-18 14:55:30 -07001384 .legacy_cftypes = throtl_legacy_files,
Tejun Heof9fcc2d2012-04-16 13:57:27 -07001385
Tejun Heo001bea72015-08-18 14:55:11 -07001386 .pd_alloc_fn = throtl_pd_alloc,
Tejun Heof9fcc2d2012-04-16 13:57:27 -07001387 .pd_init_fn = throtl_pd_init,
Tejun Heo693e7512013-05-14 13:52:38 -07001388 .pd_online_fn = throtl_pd_online,
Tejun Heo001bea72015-08-18 14:55:11 -07001389 .pd_free_fn = throtl_pd_free,
Vivek Goyale43473b2010-09-15 17:06:35 -04001390};
1391
Tejun Heoae118892015-08-18 14:55:20 -07001392bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
1393 struct bio *bio)
Vivek Goyale43473b2010-09-15 17:06:35 -04001394{
Tejun Heoc5cc2072013-05-14 13:52:38 -07001395 struct throtl_qnode *qn = NULL;
Tejun Heoae118892015-08-18 14:55:20 -07001396 struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
Tejun Heo73f0d492013-05-14 13:52:35 -07001397 struct throtl_service_queue *sq;
Tejun Heo0e9f4162013-05-14 13:52:35 -07001398 bool rw = bio_data_dir(bio);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001399 bool throttled = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001400
Tejun Heoae118892015-08-18 14:55:20 -07001401 WARN_ON_ONCE(!rcu_read_lock_held());
1402
Tejun Heo2a0f61e2013-05-14 13:52:36 -07001403 /* see throtl_charge_bio() */
Jens Axboe1eff9d32016-08-05 15:35:16 -06001404 if ((bio->bi_opf & REQ_THROTTLED) || !tg->has_rules[rw])
Tejun Heobc16a4f2011-10-19 14:33:01 +02001405 goto out;
Vivek Goyale43473b2010-09-15 17:06:35 -04001406
1407 spin_lock_irq(q->queue_lock);
Tejun Heoc9589f02015-08-18 14:55:19 -07001408
1409 if (unlikely(blk_queue_bypass(q)))
Tejun Heobc16a4f2011-10-19 14:33:01 +02001410 goto out_unlock;
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001411
Tejun Heo73f0d492013-05-14 13:52:35 -07001412 sq = &tg->service_queue;
1413
Tejun Heo9e660ac2013-05-14 13:52:38 -07001414 while (true) {
1415 /* throtl is FIFO - if bios are already queued, should queue */
1416 if (sq->nr_queued[rw])
1417 break;
Vivek Goyalde701c72011-03-07 21:09:32 +01001418
Tejun Heo9e660ac2013-05-14 13:52:38 -07001419 /* if above limits, break to queue */
1420 if (!tg_may_dispatch(tg, bio, NULL))
1421 break;
1422
1423 /* within limits, let's charge and dispatch directly */
Vivek Goyale43473b2010-09-15 17:06:35 -04001424 throtl_charge_bio(tg, bio);
Vivek Goyal04521db2011-03-22 21:54:29 +01001425
1426 /*
1427 * We need to trim slice even when bios are not being queued
1428 * otherwise it might happen that a bio is not queued for
1429 * a long time and slice keeps on extending and trim is not
1430 * called for a long time. Now if limits are reduced suddenly
1431 * we take into account all the IO dispatched so far at new
1432 * low rate and * newly queued IO gets a really long dispatch
1433 * time.
1434 *
1435 * So keep on trimming slice even if bio is not queued.
1436 */
Tejun Heo0f3457f2013-05-14 13:52:32 -07001437 throtl_trim_slice(tg, rw);
Tejun Heo9e660ac2013-05-14 13:52:38 -07001438
1439 /*
1440 * @bio passed through this layer without being throttled.
1441 * Climb up the ladder. If we''re already at the top, it
1442 * can be executed directly.
1443 */
Tejun Heoc5cc2072013-05-14 13:52:38 -07001444 qn = &tg->qnode_on_parent[rw];
Tejun Heo9e660ac2013-05-14 13:52:38 -07001445 sq = sq->parent_sq;
1446 tg = sq_to_tg(sq);
1447 if (!tg)
1448 goto out_unlock;
Vivek Goyale43473b2010-09-15 17:06:35 -04001449 }
1450
Tejun Heo9e660ac2013-05-14 13:52:38 -07001451 /* out-of-limit, queue to @tg */
Tejun Heofda6f272013-05-14 13:52:36 -07001452 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1453 rw == READ ? 'R' : 'W',
Kent Overstreet4f024f32013-10-11 15:44:27 -07001454 tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
Tejun Heofda6f272013-05-14 13:52:36 -07001455 tg->io_disp[rw], tg->iops[rw],
1456 sq->nr_queued[READ], sq->nr_queued[WRITE]);
Vivek Goyale43473b2010-09-15 17:06:35 -04001457
Tejun Heo671058f2012-03-05 13:15:29 -08001458 bio_associate_current(bio);
Tejun Heo6bc9c2b2013-05-14 13:52:38 -07001459 tg->td->nr_queued[rw]++;
Tejun Heoc5cc2072013-05-14 13:52:38 -07001460 throtl_add_bio_tg(bio, qn, tg);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001461 throttled = true;
Vivek Goyale43473b2010-09-15 17:06:35 -04001462
Tejun Heo7f52f982013-05-14 13:52:37 -07001463 /*
1464 * Update @tg's dispatch time and force schedule dispatch if @tg
1465 * was empty before @bio. The forced scheduling isn't likely to
1466 * cause undue delay as @bio is likely to be dispatched directly if
1467 * its @tg's disptime is not in the future.
1468 */
Tejun Heo0e9f4162013-05-14 13:52:35 -07001469 if (tg->flags & THROTL_TG_WAS_EMPTY) {
Tejun Heo77216b02013-05-14 13:52:36 -07001470 tg_update_disptime(tg);
Tejun Heo7f52f982013-05-14 13:52:37 -07001471 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
Vivek Goyale43473b2010-09-15 17:06:35 -04001472 }
1473
Tejun Heobc16a4f2011-10-19 14:33:01 +02001474out_unlock:
Vivek Goyale43473b2010-09-15 17:06:35 -04001475 spin_unlock_irq(q->queue_lock);
Tejun Heobc16a4f2011-10-19 14:33:01 +02001476out:
Tejun Heo2a0f61e2013-05-14 13:52:36 -07001477 /*
1478 * As multiple blk-throtls may stack in the same issue path, we
1479 * don't want bios to leave with the flag set. Clear the flag if
1480 * being issued.
1481 */
1482 if (!throttled)
Jens Axboe1eff9d32016-08-05 15:35:16 -06001483 bio->bi_opf &= ~REQ_THROTTLED;
Tejun Heobc16a4f2011-10-19 14:33:01 +02001484 return throttled;
Vivek Goyale43473b2010-09-15 17:06:35 -04001485}
1486
Tejun Heo2a12f0d2013-05-14 13:52:37 -07001487/*
1488 * Dispatch all bios from all children tg's queued on @parent_sq. On
1489 * return, @parent_sq is guaranteed to not have any active children tg's
1490 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
1491 */
1492static void tg_drain_bios(struct throtl_service_queue *parent_sq)
1493{
1494 struct throtl_grp *tg;
1495
1496 while ((tg = throtl_rb_first(parent_sq))) {
1497 struct throtl_service_queue *sq = &tg->service_queue;
1498 struct bio *bio;
1499
1500 throtl_dequeue_tg(tg);
1501
Tejun Heoc5cc2072013-05-14 13:52:38 -07001502 while ((bio = throtl_peek_queued(&sq->queued[READ])))
Tejun Heo2a12f0d2013-05-14 13:52:37 -07001503 tg_dispatch_one_bio(tg, bio_data_dir(bio));
Tejun Heoc5cc2072013-05-14 13:52:38 -07001504 while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
Tejun Heo2a12f0d2013-05-14 13:52:37 -07001505 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1506 }
1507}
1508
Tejun Heoc9a929d2011-10-19 14:42:16 +02001509/**
1510 * blk_throtl_drain - drain throttled bios
1511 * @q: request_queue to drain throttled bios for
1512 *
1513 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1514 */
1515void blk_throtl_drain(struct request_queue *q)
1516 __releases(q->queue_lock) __acquires(q->queue_lock)
1517{
1518 struct throtl_data *td = q->td;
Tejun Heo2a12f0d2013-05-14 13:52:37 -07001519 struct blkcg_gq *blkg;
Tejun Heo492eb212013-08-08 20:11:25 -04001520 struct cgroup_subsys_state *pos_css;
Tejun Heoc9a929d2011-10-19 14:42:16 +02001521 struct bio *bio;
Tejun Heo651930b2013-05-14 13:52:35 -07001522 int rw;
Tejun Heoc9a929d2011-10-19 14:42:16 +02001523
Andi Kleen8bcb6c72012-03-30 12:33:28 +02001524 queue_lockdep_assert_held(q);
Tejun Heo2a12f0d2013-05-14 13:52:37 -07001525 rcu_read_lock();
Tejun Heoc9a929d2011-10-19 14:42:16 +02001526
Tejun Heo2a12f0d2013-05-14 13:52:37 -07001527 /*
1528 * Drain each tg while doing post-order walk on the blkg tree, so
1529 * that all bios are propagated to td->service_queue. It'd be
1530 * better to walk service_queue tree directly but blkg walk is
1531 * easier.
1532 */
Tejun Heo492eb212013-08-08 20:11:25 -04001533 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
Tejun Heo2a12f0d2013-05-14 13:52:37 -07001534 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
Tejun Heo73f0d492013-05-14 13:52:35 -07001535
Tejun Heo2a12f0d2013-05-14 13:52:37 -07001536 /* finally, transfer bios from top-level tg's into the td */
1537 tg_drain_bios(&td->service_queue);
1538
1539 rcu_read_unlock();
Tejun Heoc9a929d2011-10-19 14:42:16 +02001540 spin_unlock_irq(q->queue_lock);
1541
Tejun Heo2a12f0d2013-05-14 13:52:37 -07001542 /* all bios now should be in td->service_queue, issue them */
Tejun Heo651930b2013-05-14 13:52:35 -07001543 for (rw = READ; rw <= WRITE; rw++)
Tejun Heoc5cc2072013-05-14 13:52:38 -07001544 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
1545 NULL)))
Tejun Heo651930b2013-05-14 13:52:35 -07001546 generic_make_request(bio);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001547
1548 spin_lock_irq(q->queue_lock);
1549}
1550
Vivek Goyale43473b2010-09-15 17:06:35 -04001551int blk_throtl_init(struct request_queue *q)
1552{
1553 struct throtl_data *td;
Tejun Heoa2b16932012-04-13 13:11:33 -07001554 int ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04001555
1556 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1557 if (!td)
1558 return -ENOMEM;
1559
Tejun Heo69df0ab2013-05-14 13:52:36 -07001560 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
Tejun Heob2ce2642015-08-18 14:55:13 -07001561 throtl_service_queue_init(&td->service_queue);
Vivek Goyale43473b2010-09-15 17:06:35 -04001562
Tejun Heocd1604f2012-03-05 13:15:06 -08001563 q->td = td;
Vivek Goyal29b12582011-05-19 15:38:24 -04001564 td->queue = q;
Vivek Goyal02977e42010-10-01 14:49:48 +02001565
Tejun Heoa2b16932012-04-13 13:11:33 -07001566 /* activate policy */
Tejun Heo3c798392012-04-16 13:57:25 -07001567 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
Tejun Heoa2b16932012-04-13 13:11:33 -07001568 if (ret)
Vivek Goyal29b12582011-05-19 15:38:24 -04001569 kfree(td);
Tejun Heoa2b16932012-04-13 13:11:33 -07001570 return ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04001571}
1572
1573void blk_throtl_exit(struct request_queue *q)
1574{
Tejun Heoc875f4d2012-03-05 13:15:22 -08001575 BUG_ON(!q->td);
Vivek Goyalda527772011-03-02 19:05:33 -05001576 throtl_shutdown_wq(q);
Tejun Heo3c798392012-04-16 13:57:25 -07001577 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
Tejun Heoc9a929d2011-10-19 14:42:16 +02001578 kfree(q->td);
Vivek Goyale43473b2010-09-15 17:06:35 -04001579}
1580
1581static int __init throtl_init(void)
1582{
Vivek Goyal450adcb2011-03-01 13:40:54 -05001583 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1584 if (!kthrotld_workqueue)
1585 panic("Failed to create kthrotld\n");
1586
Tejun Heo3c798392012-04-16 13:57:25 -07001587 return blkcg_policy_register(&blkcg_policy_throtl);
Vivek Goyale43473b2010-09-15 17:06:35 -04001588}
1589
1590module_init(throtl_init);