blob: caee658609d7385937bf7d08d4258aec70346692 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Vivek Goyale43473b2010-09-15 17:06:35 -04002/*
3 * Interface for controlling IO bandwidth on a request queue
4 *
5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/blkdev.h>
11#include <linux/bio.h>
12#include <linux/blktrace_api.h>
Tejun Heoeea8f412015-05-22 17:13:17 -040013#include <linux/blk-cgroup.h>
Tejun Heobc9fcbf2011-10-19 14:31:18 +020014#include "blk.h"
Vivek Goyale43473b2010-09-15 17:06:35 -040015
16/* Max dispatch from a group in 1 round */
17static int throtl_grp_quantum = 8;
18
19/* Total max dispatch from all groups in one round */
20static int throtl_quantum = 32;
21
Shaohua Lid61fcfa2017-03-27 10:51:38 -070022/* Throttling is performed over a slice and after that slice is renewed */
23#define DFL_THROTL_SLICE_HD (HZ / 10)
24#define DFL_THROTL_SLICE_SSD (HZ / 50)
Shaohua Li297e3d82017-03-27 10:51:37 -070025#define MAX_THROTL_SLICE (HZ)
Shaohua Li9e234ee2017-03-27 10:51:41 -070026#define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
Shaohua Li9bb67ae2017-05-17 13:07:26 -070027#define MIN_THROTL_BPS (320 * 1024)
28#define MIN_THROTL_IOPS (10)
Shaohua Lib4f428e2017-05-17 13:07:27 -070029#define DFL_LATENCY_TARGET (-1L)
30#define DFL_IDLE_THRESHOLD (0)
Shaohua Li6679a902017-06-06 12:40:43 -070031#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
32#define LATENCY_FILTERED_SSD (0)
33/*
34 * For HD, very small latency comes from sequential IO. Such IO is helpless to
35 * help determine if its IO is impacted by others, hence we ignore the IO
36 */
37#define LATENCY_FILTERED_HD (1000L) /* 1ms */
Vivek Goyale43473b2010-09-15 17:06:35 -040038
Tejun Heo3c798392012-04-16 13:57:25 -070039static struct blkcg_policy blkcg_policy_throtl;
Tejun Heo03814112012-03-05 13:15:14 -080040
Vivek Goyal450adcb2011-03-01 13:40:54 -050041/* A workqueue to queue throttle related work */
42static struct workqueue_struct *kthrotld_workqueue;
Vivek Goyal450adcb2011-03-01 13:40:54 -050043
Tejun Heoc5cc2072013-05-14 13:52:38 -070044/*
45 * To implement hierarchical throttling, throtl_grps form a tree and bios
46 * are dispatched upwards level by level until they reach the top and get
47 * issued. When dispatching bios from the children and local group at each
48 * level, if the bios are dispatched into a single bio_list, there's a risk
49 * of a local or child group which can queue many bios at once filling up
50 * the list starving others.
51 *
52 * To avoid such starvation, dispatched bios are queued separately
53 * according to where they came from. When they are again dispatched to
54 * the parent, they're popped in round-robin order so that no single source
55 * hogs the dispatch window.
56 *
57 * throtl_qnode is used to keep the queued bios separated by their sources.
58 * Bios are queued to throtl_qnode which in turn is queued to
59 * throtl_service_queue and then dispatched in round-robin order.
60 *
61 * It's also used to track the reference counts on blkg's. A qnode always
62 * belongs to a throtl_grp and gets queued on itself or the parent, so
63 * incrementing the reference of the associated throtl_grp when a qnode is
64 * queued and decrementing when dequeued is enough to keep the whole blkg
65 * tree pinned while bios are in flight.
66 */
67struct throtl_qnode {
68 struct list_head node; /* service_queue->queued[] */
69 struct bio_list bios; /* queued bios */
70 struct throtl_grp *tg; /* tg this qnode belongs to */
71};
72
Tejun Heoc9e03322013-05-14 13:52:32 -070073struct throtl_service_queue {
Tejun Heo77216b02013-05-14 13:52:36 -070074 struct throtl_service_queue *parent_sq; /* the parent service_queue */
75
Tejun Heo73f0d492013-05-14 13:52:35 -070076 /*
77 * Bios queued directly to this service_queue or dispatched from
78 * children throtl_grp's.
79 */
Tejun Heoc5cc2072013-05-14 13:52:38 -070080 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
Tejun Heo73f0d492013-05-14 13:52:35 -070081 unsigned int nr_queued[2]; /* number of queued bios */
82
83 /*
84 * RB tree of active children throtl_grp's, which are sorted by
85 * their ->disptime.
86 */
Tejun Heoc9e03322013-05-14 13:52:32 -070087 struct rb_root pending_tree; /* RB tree of active tgs */
88 struct rb_node *first_pending; /* first node in the tree */
89 unsigned int nr_pending; /* # queued in the tree */
90 unsigned long first_pending_disptime; /* disptime of the first tg */
Tejun Heo69df0ab2013-05-14 13:52:36 -070091 struct timer_list pending_timer; /* fires on first_pending_disptime */
Vivek Goyale43473b2010-09-15 17:06:35 -040092};
93
Tejun Heo5b2c16a2013-05-14 13:52:32 -070094enum tg_state_flags {
95 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
Tejun Heo0e9f4162013-05-14 13:52:35 -070096 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
Tejun Heo5b2c16a2013-05-14 13:52:32 -070097};
98
Vivek Goyale43473b2010-09-15 17:06:35 -040099#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
100
Shaohua Li9f626e32017-03-27 10:51:30 -0700101enum {
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700102 LIMIT_LOW,
Shaohua Li9f626e32017-03-27 10:51:30 -0700103 LIMIT_MAX,
104 LIMIT_CNT,
105};
106
Vivek Goyale43473b2010-09-15 17:06:35 -0400107struct throtl_grp {
Tejun Heof95a04a2012-04-16 13:57:26 -0700108 /* must be the first member */
109 struct blkg_policy_data pd;
110
Tejun Heoc9e03322013-05-14 13:52:32 -0700111 /* active throtl group service_queue member */
Vivek Goyale43473b2010-09-15 17:06:35 -0400112 struct rb_node rb_node;
113
Tejun Heo0f3457f2013-05-14 13:52:32 -0700114 /* throtl_data this group belongs to */
115 struct throtl_data *td;
116
Tejun Heo49a2f1e2013-05-14 13:52:34 -0700117 /* this group's service queue */
118 struct throtl_service_queue service_queue;
119
Vivek Goyale43473b2010-09-15 17:06:35 -0400120 /*
Tejun Heoc5cc2072013-05-14 13:52:38 -0700121 * qnode_on_self is used when bios are directly queued to this
122 * throtl_grp so that local bios compete fairly with bios
123 * dispatched from children. qnode_on_parent is used when bios are
124 * dispatched from this throtl_grp into its parent and will compete
125 * with the sibling qnode_on_parents and the parent's
126 * qnode_on_self.
127 */
128 struct throtl_qnode qnode_on_self[2];
129 struct throtl_qnode qnode_on_parent[2];
130
131 /*
Vivek Goyale43473b2010-09-15 17:06:35 -0400132 * Dispatch time in jiffies. This is the estimated time when group
133 * will unthrottle and is ready to dispatch more bio. It is used as
134 * key to sort active groups in service tree.
135 */
136 unsigned long disptime;
137
Vivek Goyale43473b2010-09-15 17:06:35 -0400138 unsigned int flags;
139
Tejun Heo693e7512013-05-14 13:52:38 -0700140 /* are there any throtl rules between this group and td? */
141 bool has_rules[2];
142
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700143 /* internally used bytes per second rate limits */
Shaohua Li9f626e32017-03-27 10:51:30 -0700144 uint64_t bps[2][LIMIT_CNT];
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700145 /* user configured bps limits */
146 uint64_t bps_conf[2][LIMIT_CNT];
Vivek Goyale43473b2010-09-15 17:06:35 -0400147
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700148 /* internally used IOPS limits */
Shaohua Li9f626e32017-03-27 10:51:30 -0700149 unsigned int iops[2][LIMIT_CNT];
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700150 /* user configured IOPS limits */
151 unsigned int iops_conf[2][LIMIT_CNT];
Vivek Goyal8e89d132010-09-15 17:06:37 -0400152
Vivek Goyale43473b2010-09-15 17:06:35 -0400153 /* Number of bytes disptached in current slice */
154 uint64_t bytes_disp[2];
Vivek Goyal8e89d132010-09-15 17:06:37 -0400155 /* Number of bio's dispatched in current slice */
156 unsigned int io_disp[2];
Vivek Goyale43473b2010-09-15 17:06:35 -0400157
Shaohua Li3f0abd82017-03-27 10:51:35 -0700158 unsigned long last_low_overflow_time[2];
159
160 uint64_t last_bytes_disp[2];
161 unsigned int last_io_disp[2];
162
163 unsigned long last_check_time;
164
Shaohua Liec809912017-03-27 10:51:44 -0700165 unsigned long latency_target; /* us */
Shaohua Li5b81fc32017-05-17 13:07:24 -0700166 unsigned long latency_target_conf; /* us */
Vivek Goyale43473b2010-09-15 17:06:35 -0400167 /* When did we start a new slice */
168 unsigned long slice_start[2];
169 unsigned long slice_end[2];
Shaohua Li9e234ee2017-03-27 10:51:41 -0700170
171 unsigned long last_finish_time; /* ns / 1024 */
172 unsigned long checked_last_finish_time; /* ns / 1024 */
173 unsigned long avg_idletime; /* ns / 1024 */
174 unsigned long idletime_threshold; /* us */
Shaohua Li5b81fc32017-05-17 13:07:24 -0700175 unsigned long idletime_threshold_conf; /* us */
Shaohua Li53696b82017-03-27 15:19:43 -0700176
177 unsigned int bio_cnt; /* total bios */
178 unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
179 unsigned long bio_cnt_reset_time;
Vivek Goyale43473b2010-09-15 17:06:35 -0400180};
181
Shaohua Lib9147dd2017-03-27 15:19:42 -0700182/* We measure latency for request size from <= 4k to >= 1M */
183#define LATENCY_BUCKET_SIZE 9
184
185struct latency_bucket {
186 unsigned long total_latency; /* ns / 1024 */
187 int samples;
188};
189
190struct avg_latency_bucket {
191 unsigned long latency; /* ns / 1024 */
192 bool valid;
193};
194
Vivek Goyale43473b2010-09-15 17:06:35 -0400195struct throtl_data
196{
Vivek Goyale43473b2010-09-15 17:06:35 -0400197 /* service tree for active throtl groups */
Tejun Heoc9e03322013-05-14 13:52:32 -0700198 struct throtl_service_queue service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400199
Vivek Goyale43473b2010-09-15 17:06:35 -0400200 struct request_queue *queue;
201
202 /* Total Number of queued bios on READ and WRITE lists */
203 unsigned int nr_queued[2];
204
Shaohua Li297e3d82017-03-27 10:51:37 -0700205 unsigned int throtl_slice;
206
Vivek Goyale43473b2010-09-15 17:06:35 -0400207 /* Work for dispatching throttled bios */
Tejun Heo69df0ab2013-05-14 13:52:36 -0700208 struct work_struct dispatch_work;
Shaohua Li9f626e32017-03-27 10:51:30 -0700209 unsigned int limit_index;
210 bool limit_valid[LIMIT_CNT];
Shaohua Li3f0abd82017-03-27 10:51:35 -0700211
212 unsigned long low_upgrade_time;
213 unsigned long low_downgrade_time;
Shaohua Li7394e312017-03-27 10:51:40 -0700214
215 unsigned int scale;
Shaohua Lib9147dd2017-03-27 15:19:42 -0700216
Joseph Qib889bf62017-11-21 09:38:30 +0800217 struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
218 struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
219 struct latency_bucket __percpu *latency_buckets[2];
Shaohua Lib9147dd2017-03-27 15:19:42 -0700220 unsigned long last_calculate_time;
Shaohua Li6679a902017-06-06 12:40:43 -0700221 unsigned long filtered_latency;
Shaohua Lib9147dd2017-03-27 15:19:42 -0700222
223 bool track_bio_latency;
Vivek Goyale43473b2010-09-15 17:06:35 -0400224};
225
Kees Cooke99e88a2017-10-16 14:43:17 -0700226static void throtl_pending_timer_fn(struct timer_list *t);
Tejun Heo69df0ab2013-05-14 13:52:36 -0700227
Tejun Heof95a04a2012-04-16 13:57:26 -0700228static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
229{
230 return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
231}
232
Tejun Heo3c798392012-04-16 13:57:25 -0700233static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
Tejun Heo03814112012-03-05 13:15:14 -0800234{
Tejun Heof95a04a2012-04-16 13:57:26 -0700235 return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
Tejun Heo03814112012-03-05 13:15:14 -0800236}
237
Tejun Heo3c798392012-04-16 13:57:25 -0700238static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
Tejun Heo03814112012-03-05 13:15:14 -0800239{
Tejun Heof95a04a2012-04-16 13:57:26 -0700240 return pd_to_blkg(&tg->pd);
Tejun Heo03814112012-03-05 13:15:14 -0800241}
242
Tejun Heofda6f272013-05-14 13:52:36 -0700243/**
244 * sq_to_tg - return the throl_grp the specified service queue belongs to
245 * @sq: the throtl_service_queue of interest
246 *
247 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
248 * embedded in throtl_data, %NULL is returned.
249 */
250static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
251{
252 if (sq && sq->parent_sq)
253 return container_of(sq, struct throtl_grp, service_queue);
254 else
255 return NULL;
256}
Vivek Goyale43473b2010-09-15 17:06:35 -0400257
Tejun Heofda6f272013-05-14 13:52:36 -0700258/**
259 * sq_to_td - return throtl_data the specified service queue belongs to
260 * @sq: the throtl_service_queue of interest
261 *
Masahiro Yamadab43daed2017-02-27 14:29:09 -0800262 * A service_queue can be embedded in either a throtl_grp or throtl_data.
Tejun Heofda6f272013-05-14 13:52:36 -0700263 * Determine the associated throtl_data accordingly and return it.
264 */
265static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
266{
267 struct throtl_grp *tg = sq_to_tg(sq);
268
269 if (tg)
270 return tg->td;
271 else
272 return container_of(sq, struct throtl_data, service_queue);
273}
274
Shaohua Li7394e312017-03-27 10:51:40 -0700275/*
276 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
277 * make the IO dispatch more smooth.
278 * Scale up: linearly scale up according to lapsed time since upgrade. For
279 * every throtl_slice, the limit scales up 1/2 .low limit till the
280 * limit hits .max limit
281 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
282 */
283static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
284{
285 /* arbitrary value to avoid too big scale */
286 if (td->scale < 4096 && time_after_eq(jiffies,
287 td->low_upgrade_time + td->scale * td->throtl_slice))
288 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
289
290 return low + (low >> 1) * td->scale;
291}
292
Shaohua Li9f626e32017-03-27 10:51:30 -0700293static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
294{
Shaohua Lib22c4172017-03-27 10:51:33 -0700295 struct blkcg_gq *blkg = tg_to_blkg(tg);
Shaohua Li7394e312017-03-27 10:51:40 -0700296 struct throtl_data *td;
Shaohua Lib22c4172017-03-27 10:51:33 -0700297 uint64_t ret;
298
299 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
300 return U64_MAX;
Shaohua Li7394e312017-03-27 10:51:40 -0700301
302 td = tg->td;
303 ret = tg->bps[rw][td->limit_index];
Shaohua Li9bb67ae2017-05-17 13:07:26 -0700304 if (ret == 0 && td->limit_index == LIMIT_LOW) {
305 /* intermediate node or iops isn't 0 */
306 if (!list_empty(&blkg->blkcg->css.children) ||
307 tg->iops[rw][td->limit_index])
308 return U64_MAX;
309 else
310 return MIN_THROTL_BPS;
311 }
Shaohua Li7394e312017-03-27 10:51:40 -0700312
313 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
314 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
315 uint64_t adjusted;
316
317 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
318 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
319 }
Shaohua Lib22c4172017-03-27 10:51:33 -0700320 return ret;
Shaohua Li9f626e32017-03-27 10:51:30 -0700321}
322
323static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
324{
Shaohua Lib22c4172017-03-27 10:51:33 -0700325 struct blkcg_gq *blkg = tg_to_blkg(tg);
Shaohua Li7394e312017-03-27 10:51:40 -0700326 struct throtl_data *td;
Shaohua Lib22c4172017-03-27 10:51:33 -0700327 unsigned int ret;
328
329 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
330 return UINT_MAX;
Shaohua Li9bb67ae2017-05-17 13:07:26 -0700331
Shaohua Li7394e312017-03-27 10:51:40 -0700332 td = tg->td;
333 ret = tg->iops[rw][td->limit_index];
Shaohua Li9bb67ae2017-05-17 13:07:26 -0700334 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
335 /* intermediate node or bps isn't 0 */
336 if (!list_empty(&blkg->blkcg->css.children) ||
337 tg->bps[rw][td->limit_index])
338 return UINT_MAX;
339 else
340 return MIN_THROTL_IOPS;
341 }
Shaohua Li7394e312017-03-27 10:51:40 -0700342
343 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
344 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
345 uint64_t adjusted;
346
347 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
348 if (adjusted > UINT_MAX)
349 adjusted = UINT_MAX;
350 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
351 }
Shaohua Lib22c4172017-03-27 10:51:33 -0700352 return ret;
Shaohua Li9f626e32017-03-27 10:51:30 -0700353}
354
Shaohua Lib9147dd2017-03-27 15:19:42 -0700355#define request_bucket_index(sectors) \
356 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
357
Tejun Heofda6f272013-05-14 13:52:36 -0700358/**
359 * throtl_log - log debug message via blktrace
360 * @sq: the service_queue being reported
361 * @fmt: printf format string
362 * @args: printf args
363 *
364 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
365 * throtl_grp; otherwise, just "throtl".
Tejun Heofda6f272013-05-14 13:52:36 -0700366 */
367#define throtl_log(sq, fmt, args...) do { \
368 struct throtl_grp *__tg = sq_to_tg((sq)); \
369 struct throtl_data *__td = sq_to_td((sq)); \
370 \
371 (void)__td; \
Shaohua Li59fa0222016-05-09 17:22:15 -0700372 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
373 break; \
Tejun Heofda6f272013-05-14 13:52:36 -0700374 if ((__tg)) { \
Shaohua Li35fe6d72017-07-12 11:49:56 -0700375 blk_add_cgroup_trace_msg(__td->queue, \
376 tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
Tejun Heofda6f272013-05-14 13:52:36 -0700377 } else { \
378 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
379 } \
380} while (0)
Vivek Goyale43473b2010-09-15 17:06:35 -0400381
Shaohua Liea0ea2b2017-08-18 16:08:13 -0700382static inline unsigned int throtl_bio_data_size(struct bio *bio)
383{
384 /* assume it's one sector */
385 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
386 return 512;
387 return bio->bi_iter.bi_size;
388}
389
Tejun Heoc5cc2072013-05-14 13:52:38 -0700390static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
391{
392 INIT_LIST_HEAD(&qn->node);
393 bio_list_init(&qn->bios);
394 qn->tg = tg;
395}
396
397/**
398 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
399 * @bio: bio being added
400 * @qn: qnode to add bio to
401 * @queued: the service_queue->queued[] list @qn belongs to
402 *
403 * Add @bio to @qn and put @qn on @queued if it's not already on.
404 * @qn->tg's reference count is bumped when @qn is activated. See the
405 * comment on top of throtl_qnode definition for details.
406 */
407static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
408 struct list_head *queued)
409{
410 bio_list_add(&qn->bios, bio);
411 if (list_empty(&qn->node)) {
412 list_add_tail(&qn->node, queued);
413 blkg_get(tg_to_blkg(qn->tg));
414 }
415}
416
417/**
418 * throtl_peek_queued - peek the first bio on a qnode list
419 * @queued: the qnode list to peek
420 */
421static struct bio *throtl_peek_queued(struct list_head *queued)
422{
423 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
424 struct bio *bio;
425
426 if (list_empty(queued))
427 return NULL;
428
429 bio = bio_list_peek(&qn->bios);
430 WARN_ON_ONCE(!bio);
431 return bio;
432}
433
434/**
435 * throtl_pop_queued - pop the first bio form a qnode list
436 * @queued: the qnode list to pop a bio from
437 * @tg_to_put: optional out argument for throtl_grp to put
438 *
439 * Pop the first bio from the qnode list @queued. After popping, the first
440 * qnode is removed from @queued if empty or moved to the end of @queued so
441 * that the popping order is round-robin.
442 *
443 * When the first qnode is removed, its associated throtl_grp should be put
444 * too. If @tg_to_put is NULL, this function automatically puts it;
445 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
446 * responsible for putting it.
447 */
448static struct bio *throtl_pop_queued(struct list_head *queued,
449 struct throtl_grp **tg_to_put)
450{
451 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
452 struct bio *bio;
453
454 if (list_empty(queued))
455 return NULL;
456
457 bio = bio_list_pop(&qn->bios);
458 WARN_ON_ONCE(!bio);
459
460 if (bio_list_empty(&qn->bios)) {
461 list_del_init(&qn->node);
462 if (tg_to_put)
463 *tg_to_put = qn->tg;
464 else
465 blkg_put(tg_to_blkg(qn->tg));
466 } else {
467 list_move_tail(&qn->node, queued);
468 }
469
470 return bio;
471}
472
Tejun Heo49a2f1e2013-05-14 13:52:34 -0700473/* init a service_queue, assumes the caller zeroed it */
Tejun Heob2ce2642015-08-18 14:55:13 -0700474static void throtl_service_queue_init(struct throtl_service_queue *sq)
Tejun Heo49a2f1e2013-05-14 13:52:34 -0700475{
Tejun Heoc5cc2072013-05-14 13:52:38 -0700476 INIT_LIST_HEAD(&sq->queued[0]);
477 INIT_LIST_HEAD(&sq->queued[1]);
Tejun Heo49a2f1e2013-05-14 13:52:34 -0700478 sq->pending_tree = RB_ROOT;
Kees Cooke99e88a2017-10-16 14:43:17 -0700479 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
Tejun Heo69df0ab2013-05-14 13:52:36 -0700480}
481
Tejun Heo001bea72015-08-18 14:55:11 -0700482static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
483{
Tejun Heo4fb72032015-08-18 14:55:12 -0700484 struct throtl_grp *tg;
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700485 int rw;
Tejun Heo4fb72032015-08-18 14:55:12 -0700486
487 tg = kzalloc_node(sizeof(*tg), gfp, node);
488 if (!tg)
Tejun Heo77ea7332015-08-18 14:55:24 -0700489 return NULL;
Tejun Heo4fb72032015-08-18 14:55:12 -0700490
Tejun Heob2ce2642015-08-18 14:55:13 -0700491 throtl_service_queue_init(&tg->service_queue);
492
493 for (rw = READ; rw <= WRITE; rw++) {
494 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
495 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
496 }
497
498 RB_CLEAR_NODE(&tg->rb_node);
Shaohua Li9f626e32017-03-27 10:51:30 -0700499 tg->bps[READ][LIMIT_MAX] = U64_MAX;
500 tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
501 tg->iops[READ][LIMIT_MAX] = UINT_MAX;
502 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700503 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
504 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
505 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
506 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
507 /* LIMIT_LOW will have default value 0 */
Tejun Heob2ce2642015-08-18 14:55:13 -0700508
Shaohua Liec809912017-03-27 10:51:44 -0700509 tg->latency_target = DFL_LATENCY_TARGET;
Shaohua Li5b81fc32017-05-17 13:07:24 -0700510 tg->latency_target_conf = DFL_LATENCY_TARGET;
Shaohua Lib4f428e2017-05-17 13:07:27 -0700511 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
512 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
Shaohua Liec809912017-03-27 10:51:44 -0700513
Tejun Heo4fb72032015-08-18 14:55:12 -0700514 return &tg->pd;
Tejun Heo001bea72015-08-18 14:55:11 -0700515}
516
Tejun Heoa9520cd2015-08-18 14:55:14 -0700517static void throtl_pd_init(struct blkg_policy_data *pd)
Vivek Goyala29a1712011-05-19 15:38:19 -0400518{
Tejun Heoa9520cd2015-08-18 14:55:14 -0700519 struct throtl_grp *tg = pd_to_tg(pd);
520 struct blkcg_gq *blkg = tg_to_blkg(tg);
Tejun Heo77216b02013-05-14 13:52:36 -0700521 struct throtl_data *td = blkg->q->td;
Tejun Heob2ce2642015-08-18 14:55:13 -0700522 struct throtl_service_queue *sq = &tg->service_queue;
Tejun Heocd1604f2012-03-05 13:15:06 -0800523
Tejun Heo91381252013-05-14 13:52:38 -0700524 /*
Tejun Heoaa6ec292014-07-09 10:08:08 -0400525 * If on the default hierarchy, we switch to properly hierarchical
Tejun Heo91381252013-05-14 13:52:38 -0700526 * behavior where limits on a given throtl_grp are applied to the
527 * whole subtree rather than just the group itself. e.g. If 16M
528 * read_bps limit is set on the root group, the whole system can't
529 * exceed 16M for the device.
530 *
Tejun Heoaa6ec292014-07-09 10:08:08 -0400531 * If not on the default hierarchy, the broken flat hierarchy
Tejun Heo91381252013-05-14 13:52:38 -0700532 * behavior is retained where all throtl_grps are treated as if
533 * they're all separate root groups right below throtl_data.
534 * Limits of a group don't interact with limits of other groups
535 * regardless of the position of the group in the hierarchy.
536 */
Tejun Heob2ce2642015-08-18 14:55:13 -0700537 sq->parent_sq = &td->service_queue;
Tejun Heo9e10a132015-09-18 11:56:28 -0400538 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
Tejun Heob2ce2642015-08-18 14:55:13 -0700539 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
Tejun Heo77216b02013-05-14 13:52:36 -0700540 tg->td = td;
Tejun Heo8a3d2612012-04-01 14:38:44 -0700541}
542
Tejun Heo693e7512013-05-14 13:52:38 -0700543/*
544 * Set has_rules[] if @tg or any of its parents have limits configured.
545 * This doesn't require walking up to the top of the hierarchy as the
546 * parent's has_rules[] is guaranteed to be correct.
547 */
548static void tg_update_has_rules(struct throtl_grp *tg)
549{
550 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
Shaohua Li9f626e32017-03-27 10:51:30 -0700551 struct throtl_data *td = tg->td;
Tejun Heo693e7512013-05-14 13:52:38 -0700552 int rw;
553
554 for (rw = READ; rw <= WRITE; rw++)
555 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
Shaohua Li9f626e32017-03-27 10:51:30 -0700556 (td->limit_valid[td->limit_index] &&
557 (tg_bps_limit(tg, rw) != U64_MAX ||
558 tg_iops_limit(tg, rw) != UINT_MAX));
Tejun Heo693e7512013-05-14 13:52:38 -0700559}
560
Tejun Heoa9520cd2015-08-18 14:55:14 -0700561static void throtl_pd_online(struct blkg_policy_data *pd)
Tejun Heo693e7512013-05-14 13:52:38 -0700562{
Shaohua Liaec24242017-03-27 10:51:39 -0700563 struct throtl_grp *tg = pd_to_tg(pd);
Tejun Heo693e7512013-05-14 13:52:38 -0700564 /*
565 * We don't want new groups to escape the limits of its ancestors.
566 * Update has_rules[] after a new group is brought online.
567 */
Shaohua Liaec24242017-03-27 10:51:39 -0700568 tg_update_has_rules(tg);
Tejun Heo693e7512013-05-14 13:52:38 -0700569}
570
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700571static void blk_throtl_update_limit_valid(struct throtl_data *td)
572{
573 struct cgroup_subsys_state *pos_css;
574 struct blkcg_gq *blkg;
575 bool low_valid = false;
576
577 rcu_read_lock();
578 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
579 struct throtl_grp *tg = blkg_to_tg(blkg);
580
581 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
Liu Bo43ada782018-06-29 09:56:56 +0800582 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700583 low_valid = true;
Liu Bo43ada782018-06-29 09:56:56 +0800584 break;
585 }
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700586 }
587 rcu_read_unlock();
588
589 td->limit_valid[LIMIT_LOW] = low_valid;
590}
591
Shaohua Lic79892c2017-03-27 10:51:34 -0700592static void throtl_upgrade_state(struct throtl_data *td);
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700593static void throtl_pd_offline(struct blkg_policy_data *pd)
594{
595 struct throtl_grp *tg = pd_to_tg(pd);
596
597 tg->bps[READ][LIMIT_LOW] = 0;
598 tg->bps[WRITE][LIMIT_LOW] = 0;
599 tg->iops[READ][LIMIT_LOW] = 0;
600 tg->iops[WRITE][LIMIT_LOW] = 0;
601
602 blk_throtl_update_limit_valid(tg->td);
603
Shaohua Lic79892c2017-03-27 10:51:34 -0700604 if (!tg->td->limit_valid[tg->td->limit_index])
605 throtl_upgrade_state(tg->td);
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700606}
607
Tejun Heo001bea72015-08-18 14:55:11 -0700608static void throtl_pd_free(struct blkg_policy_data *pd)
609{
Tejun Heo4fb72032015-08-18 14:55:12 -0700610 struct throtl_grp *tg = pd_to_tg(pd);
611
Tejun Heob2ce2642015-08-18 14:55:13 -0700612 del_timer_sync(&tg->service_queue.pending_timer);
Tejun Heo4fb72032015-08-18 14:55:12 -0700613 kfree(tg);
Tejun Heo001bea72015-08-18 14:55:11 -0700614}
615
Tejun Heo0049af72013-05-14 13:52:33 -0700616static struct throtl_grp *
617throtl_rb_first(struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400618{
619 /* Service tree is empty */
Tejun Heo0049af72013-05-14 13:52:33 -0700620 if (!parent_sq->nr_pending)
Vivek Goyale43473b2010-09-15 17:06:35 -0400621 return NULL;
622
Tejun Heo0049af72013-05-14 13:52:33 -0700623 if (!parent_sq->first_pending)
624 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
Vivek Goyale43473b2010-09-15 17:06:35 -0400625
Tejun Heo0049af72013-05-14 13:52:33 -0700626 if (parent_sq->first_pending)
627 return rb_entry_tg(parent_sq->first_pending);
Vivek Goyale43473b2010-09-15 17:06:35 -0400628
629 return NULL;
630}
631
632static void rb_erase_init(struct rb_node *n, struct rb_root *root)
633{
634 rb_erase(n, root);
635 RB_CLEAR_NODE(n);
636}
637
Tejun Heo0049af72013-05-14 13:52:33 -0700638static void throtl_rb_erase(struct rb_node *n,
639 struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400640{
Tejun Heo0049af72013-05-14 13:52:33 -0700641 if (parent_sq->first_pending == n)
642 parent_sq->first_pending = NULL;
643 rb_erase_init(n, &parent_sq->pending_tree);
644 --parent_sq->nr_pending;
Vivek Goyale43473b2010-09-15 17:06:35 -0400645}
646
Tejun Heo0049af72013-05-14 13:52:33 -0700647static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400648{
649 struct throtl_grp *tg;
650
Tejun Heo0049af72013-05-14 13:52:33 -0700651 tg = throtl_rb_first(parent_sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400652 if (!tg)
653 return;
654
Tejun Heo0049af72013-05-14 13:52:33 -0700655 parent_sq->first_pending_disptime = tg->disptime;
Vivek Goyale43473b2010-09-15 17:06:35 -0400656}
657
Tejun Heo77216b02013-05-14 13:52:36 -0700658static void tg_service_queue_add(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400659{
Tejun Heo77216b02013-05-14 13:52:36 -0700660 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
Tejun Heo0049af72013-05-14 13:52:33 -0700661 struct rb_node **node = &parent_sq->pending_tree.rb_node;
Vivek Goyale43473b2010-09-15 17:06:35 -0400662 struct rb_node *parent = NULL;
663 struct throtl_grp *__tg;
664 unsigned long key = tg->disptime;
665 int left = 1;
666
667 while (*node != NULL) {
668 parent = *node;
669 __tg = rb_entry_tg(parent);
670
671 if (time_before(key, __tg->disptime))
672 node = &parent->rb_left;
673 else {
674 node = &parent->rb_right;
675 left = 0;
676 }
677 }
678
679 if (left)
Tejun Heo0049af72013-05-14 13:52:33 -0700680 parent_sq->first_pending = &tg->rb_node;
Vivek Goyale43473b2010-09-15 17:06:35 -0400681
682 rb_link_node(&tg->rb_node, parent, node);
Tejun Heo0049af72013-05-14 13:52:33 -0700683 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
Vivek Goyale43473b2010-09-15 17:06:35 -0400684}
685
Tejun Heo77216b02013-05-14 13:52:36 -0700686static void __throtl_enqueue_tg(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400687{
Tejun Heo77216b02013-05-14 13:52:36 -0700688 tg_service_queue_add(tg);
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700689 tg->flags |= THROTL_TG_PENDING;
Tejun Heo77216b02013-05-14 13:52:36 -0700690 tg->service_queue.parent_sq->nr_pending++;
Vivek Goyale43473b2010-09-15 17:06:35 -0400691}
692
Tejun Heo77216b02013-05-14 13:52:36 -0700693static void throtl_enqueue_tg(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400694{
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700695 if (!(tg->flags & THROTL_TG_PENDING))
Tejun Heo77216b02013-05-14 13:52:36 -0700696 __throtl_enqueue_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400697}
698
Tejun Heo77216b02013-05-14 13:52:36 -0700699static void __throtl_dequeue_tg(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400700{
Tejun Heo77216b02013-05-14 13:52:36 -0700701 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700702 tg->flags &= ~THROTL_TG_PENDING;
Vivek Goyale43473b2010-09-15 17:06:35 -0400703}
704
Tejun Heo77216b02013-05-14 13:52:36 -0700705static void throtl_dequeue_tg(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400706{
Tejun Heo5b2c16a2013-05-14 13:52:32 -0700707 if (tg->flags & THROTL_TG_PENDING)
Tejun Heo77216b02013-05-14 13:52:36 -0700708 __throtl_dequeue_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400709}
710
Tejun Heoa9131a22013-05-14 13:52:31 -0700711/* Call with queue lock held */
Tejun Heo69df0ab2013-05-14 13:52:36 -0700712static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
713 unsigned long expires)
Tejun Heoa9131a22013-05-14 13:52:31 -0700714{
Joseph Qia41b8162017-06-07 11:36:14 +0800715 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
Shaohua Li06cceed2017-03-27 10:51:36 -0700716
717 /*
718 * Since we are adjusting the throttle limit dynamically, the sleep
719 * time calculated according to previous limit might be invalid. It's
720 * possible the cgroup sleep time is very long and no other cgroups
721 * have IO running so notify the limit changes. Make sure the cgroup
722 * doesn't sleep too long to avoid the missed notification.
723 */
724 if (time_after(expires, max_expire))
725 expires = max_expire;
Tejun Heo69df0ab2013-05-14 13:52:36 -0700726 mod_timer(&sq->pending_timer, expires);
727 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
728 expires - jiffies, jiffies);
Tejun Heoa9131a22013-05-14 13:52:31 -0700729}
730
Tejun Heo7f52f982013-05-14 13:52:37 -0700731/**
732 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
733 * @sq: the service_queue to schedule dispatch for
734 * @force: force scheduling
735 *
736 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
737 * dispatch time of the first pending child. Returns %true if either timer
738 * is armed or there's no pending child left. %false if the current
739 * dispatch window is still open and the caller should continue
740 * dispatching.
741 *
742 * If @force is %true, the dispatch timer is always scheduled and this
743 * function is guaranteed to return %true. This is to be used when the
744 * caller can't dispatch itself and needs to invoke pending_timer
745 * unconditionally. Note that forced scheduling is likely to induce short
746 * delay before dispatch starts even if @sq->first_pending_disptime is not
747 * in the future and thus shouldn't be used in hot paths.
748 */
749static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
750 bool force)
Vivek Goyale43473b2010-09-15 17:06:35 -0400751{
Tejun Heo6a525602013-05-14 13:52:32 -0700752 /* any pending children left? */
Tejun Heoc9e03322013-05-14 13:52:32 -0700753 if (!sq->nr_pending)
Tejun Heo7f52f982013-05-14 13:52:37 -0700754 return true;
Vivek Goyale43473b2010-09-15 17:06:35 -0400755
Tejun Heoc9e03322013-05-14 13:52:32 -0700756 update_min_dispatch_time(sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400757
Tejun Heo69df0ab2013-05-14 13:52:36 -0700758 /* is the next dispatch time in the future? */
Tejun Heo7f52f982013-05-14 13:52:37 -0700759 if (force || time_after(sq->first_pending_disptime, jiffies)) {
Tejun Heo69df0ab2013-05-14 13:52:36 -0700760 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
Tejun Heo7f52f982013-05-14 13:52:37 -0700761 return true;
Tejun Heo69df0ab2013-05-14 13:52:36 -0700762 }
763
Tejun Heo7f52f982013-05-14 13:52:37 -0700764 /* tell the caller to continue dispatching */
765 return false;
Vivek Goyale43473b2010-09-15 17:06:35 -0400766}
767
Vivek Goyal32ee5bc2013-05-14 13:52:38 -0700768static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
769 bool rw, unsigned long start)
770{
771 tg->bytes_disp[rw] = 0;
772 tg->io_disp[rw] = 0;
773
774 /*
775 * Previous slice has expired. We must have trimmed it after last
776 * bio dispatch. That means since start of last slice, we never used
777 * that bandwidth. Do try to make use of that bandwidth while giving
778 * credit.
779 */
780 if (time_after_eq(start, tg->slice_start[rw]))
781 tg->slice_start[rw] = start;
782
Shaohua Li297e3d82017-03-27 10:51:37 -0700783 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
Vivek Goyal32ee5bc2013-05-14 13:52:38 -0700784 throtl_log(&tg->service_queue,
785 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
786 rw == READ ? 'R' : 'W', tg->slice_start[rw],
787 tg->slice_end[rw], jiffies);
788}
789
Tejun Heo0f3457f2013-05-14 13:52:32 -0700790static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400791{
792 tg->bytes_disp[rw] = 0;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400793 tg->io_disp[rw] = 0;
Vivek Goyale43473b2010-09-15 17:06:35 -0400794 tg->slice_start[rw] = jiffies;
Shaohua Li297e3d82017-03-27 10:51:37 -0700795 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
Tejun Heofda6f272013-05-14 13:52:36 -0700796 throtl_log(&tg->service_queue,
797 "[%c] new slice start=%lu end=%lu jiffies=%lu",
798 rw == READ ? 'R' : 'W', tg->slice_start[rw],
799 tg->slice_end[rw], jiffies);
Vivek Goyale43473b2010-09-15 17:06:35 -0400800}
801
Tejun Heo0f3457f2013-05-14 13:52:32 -0700802static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
803 unsigned long jiffy_end)
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100804{
Shaohua Li297e3d82017-03-27 10:51:37 -0700805 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100806}
807
Tejun Heo0f3457f2013-05-14 13:52:32 -0700808static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
809 unsigned long jiffy_end)
Vivek Goyale43473b2010-09-15 17:06:35 -0400810{
Shaohua Li297e3d82017-03-27 10:51:37 -0700811 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
Tejun Heofda6f272013-05-14 13:52:36 -0700812 throtl_log(&tg->service_queue,
813 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
814 rw == READ ? 'R' : 'W', tg->slice_start[rw],
815 tg->slice_end[rw], jiffies);
Vivek Goyale43473b2010-09-15 17:06:35 -0400816}
817
818/* Determine if previously allocated or extended slice is complete or not */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700819static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400820{
821 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
Fabian Frederick5cf8c222014-05-02 18:28:17 +0200822 return false;
Vivek Goyale43473b2010-09-15 17:06:35 -0400823
Chengguang Xu0b6bad72018-05-29 18:32:44 +0800824 return true;
Vivek Goyale43473b2010-09-15 17:06:35 -0400825}
826
827/* Trim the used slices and adjust slice start accordingly */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700828static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400829{
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200830 unsigned long nr_slices, time_elapsed, io_trim;
831 u64 bytes_trim, tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400832
833 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
834
835 /*
836 * If bps are unlimited (-1), then time slice don't get
837 * renewed. Don't try to trim the slice if slice is used. A new
838 * slice will start when appropriate.
839 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700840 if (throtl_slice_used(tg, rw))
Vivek Goyale43473b2010-09-15 17:06:35 -0400841 return;
842
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100843 /*
844 * A bio has been dispatched. Also adjust slice_end. It might happen
845 * that initially cgroup limit was very low resulting in high
846 * slice_end, but later limit was bumped up and bio was dispached
847 * sooner, then we need to reduce slice_end. A high bogus slice_end
848 * is bad because it does not allow new slice to start.
849 */
850
Shaohua Li297e3d82017-03-27 10:51:37 -0700851 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100852
Vivek Goyale43473b2010-09-15 17:06:35 -0400853 time_elapsed = jiffies - tg->slice_start[rw];
854
Shaohua Li297e3d82017-03-27 10:51:37 -0700855 nr_slices = time_elapsed / tg->td->throtl_slice;
Vivek Goyale43473b2010-09-15 17:06:35 -0400856
857 if (!nr_slices)
858 return;
Shaohua Li297e3d82017-03-27 10:51:37 -0700859 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200860 do_div(tmp, HZ);
861 bytes_trim = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400862
Shaohua Li297e3d82017-03-27 10:51:37 -0700863 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
864 HZ;
Vivek Goyale43473b2010-09-15 17:06:35 -0400865
Vivek Goyal8e89d132010-09-15 17:06:37 -0400866 if (!bytes_trim && !io_trim)
Vivek Goyale43473b2010-09-15 17:06:35 -0400867 return;
868
869 if (tg->bytes_disp[rw] >= bytes_trim)
870 tg->bytes_disp[rw] -= bytes_trim;
871 else
872 tg->bytes_disp[rw] = 0;
873
Vivek Goyal8e89d132010-09-15 17:06:37 -0400874 if (tg->io_disp[rw] >= io_trim)
875 tg->io_disp[rw] -= io_trim;
876 else
877 tg->io_disp[rw] = 0;
878
Shaohua Li297e3d82017-03-27 10:51:37 -0700879 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
Vivek Goyale43473b2010-09-15 17:06:35 -0400880
Tejun Heofda6f272013-05-14 13:52:36 -0700881 throtl_log(&tg->service_queue,
882 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
883 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
884 tg->slice_start[rw], tg->slice_end[rw], jiffies);
Vivek Goyale43473b2010-09-15 17:06:35 -0400885}
886
Tejun Heo0f3457f2013-05-14 13:52:32 -0700887static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
888 unsigned long *wait)
Vivek Goyale43473b2010-09-15 17:06:35 -0400889{
890 bool rw = bio_data_dir(bio);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400891 unsigned int io_allowed;
Vivek Goyale43473b2010-09-15 17:06:35 -0400892 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200893 u64 tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400894
Konstantin Khlebnikov1ab644b2019-07-08 18:29:57 +0300895 jiffy_elapsed = jiffies - tg->slice_start[rw];
Vivek Goyale43473b2010-09-15 17:06:35 -0400896
Konstantin Khlebnikov1ab644b2019-07-08 18:29:57 +0300897 /* Round up to the next throttle slice, wait time must be nonzero */
898 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400899
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200900 /*
901 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
902 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
903 * will allow dispatch after 1 second and after that slice should
904 * have been trimmed.
905 */
906
Shaohua Li9f626e32017-03-27 10:51:30 -0700907 tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200908 do_div(tmp, HZ);
909
910 if (tmp > UINT_MAX)
911 io_allowed = UINT_MAX;
912 else
913 io_allowed = tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400914
915 if (tg->io_disp[rw] + 1 <= io_allowed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400916 if (wait)
917 *wait = 0;
Fabian Frederick5cf8c222014-05-02 18:28:17 +0200918 return true;
Vivek Goyale43473b2010-09-15 17:06:35 -0400919 }
920
Vivek Goyal8e89d132010-09-15 17:06:37 -0400921 /* Calc approx time to dispatch */
Liu Bo991f61f2018-08-10 01:47:02 +0800922 jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400923
924 if (wait)
925 *wait = jiffy_wait;
Chengguang Xu0b6bad72018-05-29 18:32:44 +0800926 return false;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400927}
928
Tejun Heo0f3457f2013-05-14 13:52:32 -0700929static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
930 unsigned long *wait)
Vivek Goyal8e89d132010-09-15 17:06:37 -0400931{
932 bool rw = bio_data_dir(bio);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200933 u64 bytes_allowed, extra_bytes, tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400934 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Shaohua Liea0ea2b2017-08-18 16:08:13 -0700935 unsigned int bio_size = throtl_bio_data_size(bio);
Vivek Goyale43473b2010-09-15 17:06:35 -0400936
937 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
938
939 /* Slice has just started. Consider one slice interval */
940 if (!jiffy_elapsed)
Shaohua Li297e3d82017-03-27 10:51:37 -0700941 jiffy_elapsed_rnd = tg->td->throtl_slice;
Vivek Goyale43473b2010-09-15 17:06:35 -0400942
Shaohua Li297e3d82017-03-27 10:51:37 -0700943 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
Vivek Goyale43473b2010-09-15 17:06:35 -0400944
Shaohua Li9f626e32017-03-27 10:51:30 -0700945 tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
Vivek Goyal5e901a22010-10-01 21:16:38 +0200946 do_div(tmp, HZ);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200947 bytes_allowed = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400948
Shaohua Liea0ea2b2017-08-18 16:08:13 -0700949 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400950 if (wait)
951 *wait = 0;
Fabian Frederick5cf8c222014-05-02 18:28:17 +0200952 return true;
Vivek Goyale43473b2010-09-15 17:06:35 -0400953 }
954
955 /* Calc approx time to dispatch */
Shaohua Liea0ea2b2017-08-18 16:08:13 -0700956 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
Shaohua Li9f626e32017-03-27 10:51:30 -0700957 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
Vivek Goyale43473b2010-09-15 17:06:35 -0400958
959 if (!jiffy_wait)
960 jiffy_wait = 1;
961
962 /*
963 * This wait time is without taking into consideration the rounding
964 * up we did. Add that time also.
965 */
966 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
Vivek Goyale43473b2010-09-15 17:06:35 -0400967 if (wait)
968 *wait = jiffy_wait;
Chengguang Xu0b6bad72018-05-29 18:32:44 +0800969 return false;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400970}
Vivek Goyale43473b2010-09-15 17:06:35 -0400971
Vivek Goyal8e89d132010-09-15 17:06:37 -0400972/*
973 * Returns whether one can dispatch a bio or not. Also returns approx number
974 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
975 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700976static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
977 unsigned long *wait)
Vivek Goyal8e89d132010-09-15 17:06:37 -0400978{
979 bool rw = bio_data_dir(bio);
980 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
981
982 /*
983 * Currently whole state machine of group depends on first bio
984 * queued in the group bio list. So one should not be calling
985 * this function with a different bio if there are other bios
986 * queued.
987 */
Tejun Heo73f0d492013-05-14 13:52:35 -0700988 BUG_ON(tg->service_queue.nr_queued[rw] &&
Tejun Heoc5cc2072013-05-14 13:52:38 -0700989 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
Vivek Goyal8e89d132010-09-15 17:06:37 -0400990
991 /* If tg->bps = -1, then BW is unlimited */
Shaohua Li9f626e32017-03-27 10:51:30 -0700992 if (tg_bps_limit(tg, rw) == U64_MAX &&
993 tg_iops_limit(tg, rw) == UINT_MAX) {
Vivek Goyal8e89d132010-09-15 17:06:37 -0400994 if (wait)
995 *wait = 0;
Fabian Frederick5cf8c222014-05-02 18:28:17 +0200996 return true;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400997 }
998
999 /*
1000 * If previous slice expired, start a new one otherwise renew/extend
1001 * existing slice to make sure it is at least throtl_slice interval
Vivek Goyal164c80e2016-09-19 15:12:41 -06001002 * long since now. New slice is started only for empty throttle group.
1003 * If there is queued bio, that means there should be an active
1004 * slice and it should be extended instead.
Vivek Goyal8e89d132010-09-15 17:06:37 -04001005 */
Vivek Goyal164c80e2016-09-19 15:12:41 -06001006 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
Tejun Heo0f3457f2013-05-14 13:52:32 -07001007 throtl_start_new_slice(tg, rw);
Vivek Goyal8e89d132010-09-15 17:06:37 -04001008 else {
Shaohua Li297e3d82017-03-27 10:51:37 -07001009 if (time_before(tg->slice_end[rw],
1010 jiffies + tg->td->throtl_slice))
1011 throtl_extend_slice(tg, rw,
1012 jiffies + tg->td->throtl_slice);
Vivek Goyal8e89d132010-09-15 17:06:37 -04001013 }
1014
Tejun Heo0f3457f2013-05-14 13:52:32 -07001015 if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
1016 tg_with_in_iops_limit(tg, bio, &iops_wait)) {
Vivek Goyal8e89d132010-09-15 17:06:37 -04001017 if (wait)
1018 *wait = 0;
Chengguang Xu0b6bad72018-05-29 18:32:44 +08001019 return true;
Vivek Goyal8e89d132010-09-15 17:06:37 -04001020 }
1021
1022 max_wait = max(bps_wait, iops_wait);
1023
1024 if (wait)
1025 *wait = max_wait;
1026
1027 if (time_before(tg->slice_end[rw], jiffies + max_wait))
Tejun Heo0f3457f2013-05-14 13:52:32 -07001028 throtl_extend_slice(tg, rw, jiffies + max_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -04001029
Chengguang Xu0b6bad72018-05-29 18:32:44 +08001030 return false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001031}
1032
1033static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1034{
1035 bool rw = bio_data_dir(bio);
Shaohua Liea0ea2b2017-08-18 16:08:13 -07001036 unsigned int bio_size = throtl_bio_data_size(bio);
Vivek Goyale43473b2010-09-15 17:06:35 -04001037
1038 /* Charge the bio to the group */
Shaohua Liea0ea2b2017-08-18 16:08:13 -07001039 tg->bytes_disp[rw] += bio_size;
Vivek Goyal8e89d132010-09-15 17:06:37 -04001040 tg->io_disp[rw]++;
Shaohua Liea0ea2b2017-08-18 16:08:13 -07001041 tg->last_bytes_disp[rw] += bio_size;
Shaohua Li3f0abd82017-03-27 10:51:35 -07001042 tg->last_io_disp[rw]++;
Vivek Goyale43473b2010-09-15 17:06:35 -04001043
Tejun Heo2a0f61e2013-05-14 13:52:36 -07001044 /*
Christoph Hellwig8d2bbd42016-10-20 15:12:12 +02001045 * BIO_THROTTLED is used to prevent the same bio to be throttled
Tejun Heo2a0f61e2013-05-14 13:52:36 -07001046 * more than once as a throttled bio will go through blk-throtl the
1047 * second time when it eventually gets issued. Set it when a bio
1048 * is being charged to a tg.
Tejun Heo2a0f61e2013-05-14 13:52:36 -07001049 */
Christoph Hellwig8d2bbd42016-10-20 15:12:12 +02001050 if (!bio_flagged(bio, BIO_THROTTLED))
1051 bio_set_flag(bio, BIO_THROTTLED);
Vivek Goyale43473b2010-09-15 17:06:35 -04001052}
1053
Tejun Heoc5cc2072013-05-14 13:52:38 -07001054/**
1055 * throtl_add_bio_tg - add a bio to the specified throtl_grp
1056 * @bio: bio to add
1057 * @qn: qnode to use
1058 * @tg: the target throtl_grp
1059 *
1060 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
1061 * tg->qnode_on_self[] is used.
1062 */
1063static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1064 struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -04001065{
Tejun Heo73f0d492013-05-14 13:52:35 -07001066 struct throtl_service_queue *sq = &tg->service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -04001067 bool rw = bio_data_dir(bio);
1068
Tejun Heoc5cc2072013-05-14 13:52:38 -07001069 if (!qn)
1070 qn = &tg->qnode_on_self[rw];
1071
Tejun Heo0e9f4162013-05-14 13:52:35 -07001072 /*
1073 * If @tg doesn't currently have any bios queued in the same
1074 * direction, queueing @bio can change when @tg should be
1075 * dispatched. Mark that @tg was empty. This is automatically
1076 * cleaered on the next tg_update_disptime().
1077 */
1078 if (!sq->nr_queued[rw])
1079 tg->flags |= THROTL_TG_WAS_EMPTY;
1080
Tejun Heoc5cc2072013-05-14 13:52:38 -07001081 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1082
Tejun Heo73f0d492013-05-14 13:52:35 -07001083 sq->nr_queued[rw]++;
Tejun Heo77216b02013-05-14 13:52:36 -07001084 throtl_enqueue_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001085}
1086
Tejun Heo77216b02013-05-14 13:52:36 -07001087static void tg_update_disptime(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -04001088{
Tejun Heo73f0d492013-05-14 13:52:35 -07001089 struct throtl_service_queue *sq = &tg->service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -04001090 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1091 struct bio *bio;
1092
Markus Elfringd609af32017-01-21 22:15:33 +01001093 bio = throtl_peek_queued(&sq->queued[READ]);
1094 if (bio)
Tejun Heo0f3457f2013-05-14 13:52:32 -07001095 tg_may_dispatch(tg, bio, &read_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -04001096
Markus Elfringd609af32017-01-21 22:15:33 +01001097 bio = throtl_peek_queued(&sq->queued[WRITE]);
1098 if (bio)
Tejun Heo0f3457f2013-05-14 13:52:32 -07001099 tg_may_dispatch(tg, bio, &write_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -04001100
1101 min_wait = min(read_wait, write_wait);
1102 disptime = jiffies + min_wait;
1103
Vivek Goyale43473b2010-09-15 17:06:35 -04001104 /* Update dispatch time */
Tejun Heo77216b02013-05-14 13:52:36 -07001105 throtl_dequeue_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001106 tg->disptime = disptime;
Tejun Heo77216b02013-05-14 13:52:36 -07001107 throtl_enqueue_tg(tg);
Tejun Heo0e9f4162013-05-14 13:52:35 -07001108
1109 /* see throtl_add_bio_tg() */
1110 tg->flags &= ~THROTL_TG_WAS_EMPTY;
Vivek Goyale43473b2010-09-15 17:06:35 -04001111}
1112
Vivek Goyal32ee5bc2013-05-14 13:52:38 -07001113static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1114 struct throtl_grp *parent_tg, bool rw)
1115{
1116 if (throtl_slice_used(parent_tg, rw)) {
1117 throtl_start_new_slice_with_credit(parent_tg, rw,
1118 child_tg->slice_start[rw]);
1119 }
1120
1121}
1122
Tejun Heo77216b02013-05-14 13:52:36 -07001123static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -04001124{
Tejun Heo73f0d492013-05-14 13:52:35 -07001125 struct throtl_service_queue *sq = &tg->service_queue;
Tejun Heo6bc9c2b2013-05-14 13:52:38 -07001126 struct throtl_service_queue *parent_sq = sq->parent_sq;
1127 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
Tejun Heoc5cc2072013-05-14 13:52:38 -07001128 struct throtl_grp *tg_to_put = NULL;
Vivek Goyale43473b2010-09-15 17:06:35 -04001129 struct bio *bio;
1130
Tejun Heoc5cc2072013-05-14 13:52:38 -07001131 /*
1132 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1133 * from @tg may put its reference and @parent_sq might end up
1134 * getting released prematurely. Remember the tg to put and put it
1135 * after @bio is transferred to @parent_sq.
1136 */
1137 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
Tejun Heo73f0d492013-05-14 13:52:35 -07001138 sq->nr_queued[rw]--;
Vivek Goyale43473b2010-09-15 17:06:35 -04001139
1140 throtl_charge_bio(tg, bio);
Tejun Heo6bc9c2b2013-05-14 13:52:38 -07001141
1142 /*
1143 * If our parent is another tg, we just need to transfer @bio to
1144 * the parent using throtl_add_bio_tg(). If our parent is
1145 * @td->service_queue, @bio is ready to be issued. Put it on its
1146 * bio_lists[] and decrease total number queued. The caller is
1147 * responsible for issuing these bios.
1148 */
1149 if (parent_tg) {
Tejun Heoc5cc2072013-05-14 13:52:38 -07001150 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
Vivek Goyal32ee5bc2013-05-14 13:52:38 -07001151 start_parent_slice_with_credit(tg, parent_tg, rw);
Tejun Heo6bc9c2b2013-05-14 13:52:38 -07001152 } else {
Tejun Heoc5cc2072013-05-14 13:52:38 -07001153 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1154 &parent_sq->queued[rw]);
Tejun Heo6bc9c2b2013-05-14 13:52:38 -07001155 BUG_ON(tg->td->nr_queued[rw] <= 0);
1156 tg->td->nr_queued[rw]--;
1157 }
Vivek Goyale43473b2010-09-15 17:06:35 -04001158
Tejun Heo0f3457f2013-05-14 13:52:32 -07001159 throtl_trim_slice(tg, rw);
Tejun Heo6bc9c2b2013-05-14 13:52:38 -07001160
Tejun Heoc5cc2072013-05-14 13:52:38 -07001161 if (tg_to_put)
1162 blkg_put(tg_to_blkg(tg_to_put));
Vivek Goyale43473b2010-09-15 17:06:35 -04001163}
1164
Tejun Heo77216b02013-05-14 13:52:36 -07001165static int throtl_dispatch_tg(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -04001166{
Tejun Heo73f0d492013-05-14 13:52:35 -07001167 struct throtl_service_queue *sq = &tg->service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -04001168 unsigned int nr_reads = 0, nr_writes = 0;
1169 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
Vivek Goyalc2f68052010-11-15 19:32:42 +01001170 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
Vivek Goyale43473b2010-09-15 17:06:35 -04001171 struct bio *bio;
1172
1173 /* Try to dispatch 75% READS and 25% WRITES */
1174
Tejun Heoc5cc2072013-05-14 13:52:38 -07001175 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
Tejun Heo0f3457f2013-05-14 13:52:32 -07001176 tg_may_dispatch(tg, bio, NULL)) {
Vivek Goyale43473b2010-09-15 17:06:35 -04001177
Tejun Heo77216b02013-05-14 13:52:36 -07001178 tg_dispatch_one_bio(tg, bio_data_dir(bio));
Vivek Goyale43473b2010-09-15 17:06:35 -04001179 nr_reads++;
1180
1181 if (nr_reads >= max_nr_reads)
1182 break;
1183 }
1184
Tejun Heoc5cc2072013-05-14 13:52:38 -07001185 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
Tejun Heo0f3457f2013-05-14 13:52:32 -07001186 tg_may_dispatch(tg, bio, NULL)) {
Vivek Goyale43473b2010-09-15 17:06:35 -04001187
Tejun Heo77216b02013-05-14 13:52:36 -07001188 tg_dispatch_one_bio(tg, bio_data_dir(bio));
Vivek Goyale43473b2010-09-15 17:06:35 -04001189 nr_writes++;
1190
1191 if (nr_writes >= max_nr_writes)
1192 break;
1193 }
1194
1195 return nr_reads + nr_writes;
1196}
1197
Tejun Heo651930b2013-05-14 13:52:35 -07001198static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -04001199{
1200 unsigned int nr_disp = 0;
Vivek Goyale43473b2010-09-15 17:06:35 -04001201
1202 while (1) {
Tejun Heo73f0d492013-05-14 13:52:35 -07001203 struct throtl_grp *tg = throtl_rb_first(parent_sq);
Liu Bo2ab74cd2018-05-29 16:29:12 +08001204 struct throtl_service_queue *sq;
Vivek Goyale43473b2010-09-15 17:06:35 -04001205
1206 if (!tg)
1207 break;
1208
1209 if (time_before(jiffies, tg->disptime))
1210 break;
1211
Tejun Heo77216b02013-05-14 13:52:36 -07001212 throtl_dequeue_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001213
Tejun Heo77216b02013-05-14 13:52:36 -07001214 nr_disp += throtl_dispatch_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001215
Liu Bo2ab74cd2018-05-29 16:29:12 +08001216 sq = &tg->service_queue;
Tejun Heo73f0d492013-05-14 13:52:35 -07001217 if (sq->nr_queued[0] || sq->nr_queued[1])
Tejun Heo77216b02013-05-14 13:52:36 -07001218 tg_update_disptime(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001219
1220 if (nr_disp >= throtl_quantum)
1221 break;
1222 }
1223
1224 return nr_disp;
1225}
1226
Shaohua Lic79892c2017-03-27 10:51:34 -07001227static bool throtl_can_upgrade(struct throtl_data *td,
1228 struct throtl_grp *this_tg);
Tejun Heo6e1a5702013-05-14 13:52:37 -07001229/**
1230 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1231 * @arg: the throtl_service_queue being serviced
1232 *
1233 * This timer is armed when a child throtl_grp with active bio's become
1234 * pending and queued on the service_queue's pending_tree and expires when
1235 * the first child throtl_grp should be dispatched. This function
Tejun Heo2e48a532013-05-14 13:52:38 -07001236 * dispatches bio's from the children throtl_grps to the parent
1237 * service_queue.
1238 *
1239 * If the parent's parent is another throtl_grp, dispatching is propagated
1240 * by either arming its pending_timer or repeating dispatch directly. If
1241 * the top-level service_tree is reached, throtl_data->dispatch_work is
1242 * kicked so that the ready bio's are issued.
Tejun Heo6e1a5702013-05-14 13:52:37 -07001243 */
Kees Cooke99e88a2017-10-16 14:43:17 -07001244static void throtl_pending_timer_fn(struct timer_list *t)
Tejun Heo69df0ab2013-05-14 13:52:36 -07001245{
Kees Cooke99e88a2017-10-16 14:43:17 -07001246 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
Tejun Heo2e48a532013-05-14 13:52:38 -07001247 struct throtl_grp *tg = sq_to_tg(sq);
Tejun Heo69df0ab2013-05-14 13:52:36 -07001248 struct throtl_data *td = sq_to_td(sq);
Tejun Heocb761992013-05-14 13:52:31 -07001249 struct request_queue *q = td->queue;
Tejun Heo2e48a532013-05-14 13:52:38 -07001250 struct throtl_service_queue *parent_sq;
1251 bool dispatched;
Tejun Heo6e1a5702013-05-14 13:52:37 -07001252 int ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04001253
1254 spin_lock_irq(q->queue_lock);
Shaohua Lic79892c2017-03-27 10:51:34 -07001255 if (throtl_can_upgrade(td, NULL))
1256 throtl_upgrade_state(td);
1257
Tejun Heo2e48a532013-05-14 13:52:38 -07001258again:
1259 parent_sq = sq->parent_sq;
1260 dispatched = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001261
Tejun Heo7f52f982013-05-14 13:52:37 -07001262 while (true) {
1263 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
Tejun Heo2e48a532013-05-14 13:52:38 -07001264 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1265 sq->nr_queued[READ], sq->nr_queued[WRITE]);
Vivek Goyale43473b2010-09-15 17:06:35 -04001266
Tejun Heo7f52f982013-05-14 13:52:37 -07001267 ret = throtl_select_dispatch(sq);
1268 if (ret) {
Tejun Heo7f52f982013-05-14 13:52:37 -07001269 throtl_log(sq, "bios disp=%u", ret);
1270 dispatched = true;
Tejun Heo651930b2013-05-14 13:52:35 -07001271 }
Vivek Goyale43473b2010-09-15 17:06:35 -04001272
Tejun Heo7f52f982013-05-14 13:52:37 -07001273 if (throtl_schedule_next_dispatch(sq, false))
1274 break;
1275
1276 /* this dispatch windows is still open, relax and repeat */
1277 spin_unlock_irq(q->queue_lock);
1278 cpu_relax();
1279 spin_lock_irq(q->queue_lock);
1280 }
Tejun Heo6a525602013-05-14 13:52:32 -07001281
Tejun Heo2e48a532013-05-14 13:52:38 -07001282 if (!dispatched)
1283 goto out_unlock;
Tejun Heo6e1a5702013-05-14 13:52:37 -07001284
Tejun Heo2e48a532013-05-14 13:52:38 -07001285 if (parent_sq) {
1286 /* @parent_sq is another throl_grp, propagate dispatch */
1287 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1288 tg_update_disptime(tg);
1289 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1290 /* window is already open, repeat dispatching */
1291 sq = parent_sq;
1292 tg = sq_to_tg(sq);
1293 goto again;
1294 }
1295 }
1296 } else {
1297 /* reached the top-level, queue issueing */
1298 queue_work(kthrotld_workqueue, &td->dispatch_work);
1299 }
1300out_unlock:
Tejun Heo6e1a5702013-05-14 13:52:37 -07001301 spin_unlock_irq(q->queue_lock);
1302}
1303
1304/**
1305 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1306 * @work: work item being executed
1307 *
1308 * This function is queued for execution when bio's reach the bio_lists[]
1309 * of throtl_data->service_queue. Those bio's are ready and issued by this
1310 * function.
1311 */
Fabian Frederick8876e142014-04-17 21:41:16 +02001312static void blk_throtl_dispatch_work_fn(struct work_struct *work)
Tejun Heo6e1a5702013-05-14 13:52:37 -07001313{
1314 struct throtl_data *td = container_of(work, struct throtl_data,
1315 dispatch_work);
1316 struct throtl_service_queue *td_sq = &td->service_queue;
1317 struct request_queue *q = td->queue;
1318 struct bio_list bio_list_on_stack;
1319 struct bio *bio;
1320 struct blk_plug plug;
1321 int rw;
1322
1323 bio_list_init(&bio_list_on_stack);
1324
1325 spin_lock_irq(q->queue_lock);
Tejun Heoc5cc2072013-05-14 13:52:38 -07001326 for (rw = READ; rw <= WRITE; rw++)
1327 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1328 bio_list_add(&bio_list_on_stack, bio);
Vivek Goyale43473b2010-09-15 17:06:35 -04001329 spin_unlock_irq(q->queue_lock);
1330
Tejun Heo6e1a5702013-05-14 13:52:37 -07001331 if (!bio_list_empty(&bio_list_on_stack)) {
Vivek Goyal69d60eb2011-03-09 08:27:37 +01001332 blk_start_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -04001333 while((bio = bio_list_pop(&bio_list_on_stack)))
1334 generic_make_request(bio);
Vivek Goyal69d60eb2011-03-09 08:27:37 +01001335 blk_finish_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -04001336 }
Vivek Goyale43473b2010-09-15 17:06:35 -04001337}
1338
Tejun Heof95a04a2012-04-16 13:57:26 -07001339static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1340 int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001341{
Tejun Heof95a04a2012-04-16 13:57:26 -07001342 struct throtl_grp *tg = pd_to_tg(pd);
1343 u64 v = *(u64 *)((void *)tg + off);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001344
Shaohua Li2ab54922017-03-27 10:51:29 -07001345 if (v == U64_MAX)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001346 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -07001347 return __blkg_prfill_u64(sf, pd, v);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001348}
1349
Tejun Heof95a04a2012-04-16 13:57:26 -07001350static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1351 int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001352{
Tejun Heof95a04a2012-04-16 13:57:26 -07001353 struct throtl_grp *tg = pd_to_tg(pd);
1354 unsigned int v = *(unsigned int *)((void *)tg + off);
Tejun Heoaf133ce2012-04-01 14:38:44 -07001355
Shaohua Li2ab54922017-03-27 10:51:29 -07001356 if (v == UINT_MAX)
Tejun Heoaf133ce2012-04-01 14:38:44 -07001357 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -07001358 return __blkg_prfill_u64(sf, pd, v);
Tejun Heoaf133ce2012-04-01 14:38:44 -07001359}
1360
Tejun Heo2da8ca82013-12-05 12:28:04 -05001361static int tg_print_conf_u64(struct seq_file *sf, void *v)
Tejun Heoaf133ce2012-04-01 14:38:44 -07001362{
Tejun Heo2da8ca82013-12-05 12:28:04 -05001363 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1364 &blkcg_policy_throtl, seq_cft(sf)->private, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001365 return 0;
1366}
1367
Tejun Heo2da8ca82013-12-05 12:28:04 -05001368static int tg_print_conf_uint(struct seq_file *sf, void *v)
Vivek Goyale43473b2010-09-15 17:06:35 -04001369{
Tejun Heo2da8ca82013-12-05 12:28:04 -05001370 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1371 &blkcg_policy_throtl, seq_cft(sf)->private, false);
Tejun Heoaf133ce2012-04-01 14:38:44 -07001372 return 0;
Vivek Goyale43473b2010-09-15 17:06:35 -04001373}
1374
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001375static void tg_conf_updated(struct throtl_grp *tg, bool global)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001376{
Tejun Heo69948b02015-08-18 14:55:32 -07001377 struct throtl_service_queue *sq = &tg->service_queue;
Tejun Heo492eb212013-08-08 20:11:25 -04001378 struct cgroup_subsys_state *pos_css;
Tejun Heo69948b02015-08-18 14:55:32 -07001379 struct blkcg_gq *blkg;
Tejun Heoaf133ce2012-04-01 14:38:44 -07001380
Tejun Heofda6f272013-05-14 13:52:36 -07001381 throtl_log(&tg->service_queue,
1382 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
Shaohua Li9f626e32017-03-27 10:51:30 -07001383 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1384 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
Tejun Heo632b4492013-05-14 13:52:31 -07001385
1386 /*
Tejun Heo693e7512013-05-14 13:52:38 -07001387 * Update has_rules[] flags for the updated tg's subtree. A tg is
1388 * considered to have rules if either the tg itself or any of its
1389 * ancestors has rules. This identifies groups without any
1390 * restrictions in the whole hierarchy and allows them to bypass
1391 * blk-throttle.
1392 */
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001393 blkg_for_each_descendant_pre(blkg, pos_css,
1394 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
Shaohua Li5b81fc32017-05-17 13:07:24 -07001395 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1396 struct throtl_grp *parent_tg;
1397
1398 tg_update_has_rules(this_tg);
1399 /* ignore root/second level */
1400 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1401 !blkg->parent->parent)
1402 continue;
1403 parent_tg = blkg_to_tg(blkg->parent);
1404 /*
1405 * make sure all children has lower idle time threshold and
1406 * higher latency target
1407 */
1408 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1409 parent_tg->idletime_threshold);
1410 this_tg->latency_target = max(this_tg->latency_target,
1411 parent_tg->latency_target);
1412 }
Tejun Heo693e7512013-05-14 13:52:38 -07001413
1414 /*
Tejun Heo632b4492013-05-14 13:52:31 -07001415 * We're already holding queue_lock and know @tg is valid. Let's
1416 * apply the new config directly.
1417 *
1418 * Restart the slices for both READ and WRITES. It might happen
1419 * that a group's limit are dropped suddenly and we don't want to
1420 * account recently dispatched IO with new low rate.
1421 */
Tejun Heo0f3457f2013-05-14 13:52:32 -07001422 throtl_start_new_slice(tg, 0);
1423 throtl_start_new_slice(tg, 1);
Tejun Heo632b4492013-05-14 13:52:31 -07001424
Tejun Heo5b2c16a2013-05-14 13:52:32 -07001425 if (tg->flags & THROTL_TG_PENDING) {
Tejun Heo77216b02013-05-14 13:52:36 -07001426 tg_update_disptime(tg);
Tejun Heo7f52f982013-05-14 13:52:37 -07001427 throtl_schedule_next_dispatch(sq->parent_sq, true);
Tejun Heo632b4492013-05-14 13:52:31 -07001428 }
Tejun Heo69948b02015-08-18 14:55:32 -07001429}
Tejun Heo60c2bc22012-04-01 14:38:43 -07001430
Tejun Heo69948b02015-08-18 14:55:32 -07001431static ssize_t tg_set_conf(struct kernfs_open_file *of,
1432 char *buf, size_t nbytes, loff_t off, bool is_u64)
1433{
1434 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1435 struct blkg_conf_ctx ctx;
1436 struct throtl_grp *tg;
1437 int ret;
1438 u64 v;
1439
1440 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1441 if (ret)
1442 return ret;
1443
1444 ret = -EINVAL;
1445 if (sscanf(ctx.body, "%llu", &v) != 1)
1446 goto out_finish;
1447 if (!v)
Shaohua Li2ab54922017-03-27 10:51:29 -07001448 v = U64_MAX;
Tejun Heo69948b02015-08-18 14:55:32 -07001449
1450 tg = blkg_to_tg(ctx.blkg);
1451
1452 if (is_u64)
1453 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1454 else
1455 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1456
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001457 tg_conf_updated(tg, false);
Tejun Heo36aa9e52015-08-18 14:55:31 -07001458 ret = 0;
1459out_finish:
Tejun Heo60c2bc22012-04-01 14:38:43 -07001460 blkg_conf_finish(&ctx);
Tejun Heo36aa9e52015-08-18 14:55:31 -07001461 return ret ?: nbytes;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001462}
1463
Tejun Heo451af502014-05-13 12:16:21 -04001464static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1465 char *buf, size_t nbytes, loff_t off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001466{
Tejun Heo451af502014-05-13 12:16:21 -04001467 return tg_set_conf(of, buf, nbytes, off, true);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001468}
1469
Tejun Heo451af502014-05-13 12:16:21 -04001470static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1471 char *buf, size_t nbytes, loff_t off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001472{
Tejun Heo451af502014-05-13 12:16:21 -04001473 return tg_set_conf(of, buf, nbytes, off, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001474}
1475
Tejun Heo880f50e2015-08-18 14:55:30 -07001476static struct cftype throtl_legacy_files[] = {
Tejun Heo60c2bc22012-04-01 14:38:43 -07001477 {
1478 .name = "throttle.read_bps_device",
Shaohua Li9f626e32017-03-27 10:51:30 -07001479 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
Tejun Heo2da8ca82013-12-05 12:28:04 -05001480 .seq_show = tg_print_conf_u64,
Tejun Heo451af502014-05-13 12:16:21 -04001481 .write = tg_set_conf_u64,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001482 },
1483 {
1484 .name = "throttle.write_bps_device",
Shaohua Li9f626e32017-03-27 10:51:30 -07001485 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
Tejun Heo2da8ca82013-12-05 12:28:04 -05001486 .seq_show = tg_print_conf_u64,
Tejun Heo451af502014-05-13 12:16:21 -04001487 .write = tg_set_conf_u64,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001488 },
1489 {
1490 .name = "throttle.read_iops_device",
Shaohua Li9f626e32017-03-27 10:51:30 -07001491 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
Tejun Heo2da8ca82013-12-05 12:28:04 -05001492 .seq_show = tg_print_conf_uint,
Tejun Heo451af502014-05-13 12:16:21 -04001493 .write = tg_set_conf_uint,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001494 },
1495 {
1496 .name = "throttle.write_iops_device",
Shaohua Li9f626e32017-03-27 10:51:30 -07001497 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
Tejun Heo2da8ca82013-12-05 12:28:04 -05001498 .seq_show = tg_print_conf_uint,
Tejun Heo451af502014-05-13 12:16:21 -04001499 .write = tg_set_conf_uint,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001500 },
1501 {
1502 .name = "throttle.io_service_bytes",
Tejun Heo77ea7332015-08-18 14:55:24 -07001503 .private = (unsigned long)&blkcg_policy_throtl,
1504 .seq_show = blkg_print_stat_bytes,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001505 },
1506 {
weiping zhang17534c62017-12-11 22:56:25 +08001507 .name = "throttle.io_service_bytes_recursive",
1508 .private = (unsigned long)&blkcg_policy_throtl,
1509 .seq_show = blkg_print_stat_bytes_recursive,
1510 },
1511 {
Tejun Heo60c2bc22012-04-01 14:38:43 -07001512 .name = "throttle.io_serviced",
Tejun Heo77ea7332015-08-18 14:55:24 -07001513 .private = (unsigned long)&blkcg_policy_throtl,
1514 .seq_show = blkg_print_stat_ios,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001515 },
weiping zhang17534c62017-12-11 22:56:25 +08001516 {
1517 .name = "throttle.io_serviced_recursive",
1518 .private = (unsigned long)&blkcg_policy_throtl,
1519 .seq_show = blkg_print_stat_ios_recursive,
1520 },
Tejun Heo60c2bc22012-04-01 14:38:43 -07001521 { } /* terminate */
1522};
1523
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001524static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001525 int off)
1526{
1527 struct throtl_grp *tg = pd_to_tg(pd);
1528 const char *dname = blkg_dev_name(pd->blkg);
1529 char bufs[4][21] = { "max", "max", "max", "max" };
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001530 u64 bps_dft;
1531 unsigned int iops_dft;
Shaohua Liada75b62017-03-27 10:51:42 -07001532 char idle_time[26] = "";
Shaohua Liec809912017-03-27 10:51:44 -07001533 char latency_time[26] = "";
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001534
1535 if (!dname)
1536 return 0;
Shaohua Li9f626e32017-03-27 10:51:30 -07001537
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001538 if (off == LIMIT_LOW) {
1539 bps_dft = 0;
1540 iops_dft = 0;
1541 } else {
1542 bps_dft = U64_MAX;
1543 iops_dft = UINT_MAX;
1544 }
1545
1546 if (tg->bps_conf[READ][off] == bps_dft &&
1547 tg->bps_conf[WRITE][off] == bps_dft &&
1548 tg->iops_conf[READ][off] == iops_dft &&
Shaohua Liada75b62017-03-27 10:51:42 -07001549 tg->iops_conf[WRITE][off] == iops_dft &&
Shaohua Liec809912017-03-27 10:51:44 -07001550 (off != LIMIT_LOW ||
Shaohua Lib4f428e2017-05-17 13:07:27 -07001551 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
Shaohua Li5b81fc32017-05-17 13:07:24 -07001552 tg->latency_target_conf == DFL_LATENCY_TARGET)))
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001553 return 0;
1554
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001555 if (tg->bps_conf[READ][off] != U64_MAX)
Shaohua Li9f626e32017-03-27 10:51:30 -07001556 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001557 tg->bps_conf[READ][off]);
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001558 if (tg->bps_conf[WRITE][off] != U64_MAX)
Shaohua Li9f626e32017-03-27 10:51:30 -07001559 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001560 tg->bps_conf[WRITE][off]);
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001561 if (tg->iops_conf[READ][off] != UINT_MAX)
Shaohua Li9f626e32017-03-27 10:51:30 -07001562 snprintf(bufs[2], sizeof(bufs[2]), "%u",
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001563 tg->iops_conf[READ][off]);
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001564 if (tg->iops_conf[WRITE][off] != UINT_MAX)
Shaohua Li9f626e32017-03-27 10:51:30 -07001565 snprintf(bufs[3], sizeof(bufs[3]), "%u",
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001566 tg->iops_conf[WRITE][off]);
Shaohua Liada75b62017-03-27 10:51:42 -07001567 if (off == LIMIT_LOW) {
Shaohua Li5b81fc32017-05-17 13:07:24 -07001568 if (tg->idletime_threshold_conf == ULONG_MAX)
Shaohua Liada75b62017-03-27 10:51:42 -07001569 strcpy(idle_time, " idle=max");
1570 else
1571 snprintf(idle_time, sizeof(idle_time), " idle=%lu",
Shaohua Li5b81fc32017-05-17 13:07:24 -07001572 tg->idletime_threshold_conf);
Shaohua Liec809912017-03-27 10:51:44 -07001573
Shaohua Li5b81fc32017-05-17 13:07:24 -07001574 if (tg->latency_target_conf == ULONG_MAX)
Shaohua Liec809912017-03-27 10:51:44 -07001575 strcpy(latency_time, " latency=max");
1576 else
1577 snprintf(latency_time, sizeof(latency_time),
Shaohua Li5b81fc32017-05-17 13:07:24 -07001578 " latency=%lu", tg->latency_target_conf);
Shaohua Liada75b62017-03-27 10:51:42 -07001579 }
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001580
Shaohua Liec809912017-03-27 10:51:44 -07001581 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1582 dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1583 latency_time);
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001584 return 0;
1585}
1586
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001587static int tg_print_limit(struct seq_file *sf, void *v)
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001588{
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001589 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001590 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1591 return 0;
1592}
1593
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001594static ssize_t tg_set_limit(struct kernfs_open_file *of,
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001595 char *buf, size_t nbytes, loff_t off)
1596{
1597 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1598 struct blkg_conf_ctx ctx;
1599 struct throtl_grp *tg;
1600 u64 v[4];
Shaohua Liada75b62017-03-27 10:51:42 -07001601 unsigned long idle_time;
Shaohua Liec809912017-03-27 10:51:44 -07001602 unsigned long latency_time;
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001603 int ret;
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001604 int index = of_cft(of)->private;
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001605
1606 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1607 if (ret)
1608 return ret;
1609
1610 tg = blkg_to_tg(ctx.blkg);
1611
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001612 v[0] = tg->bps_conf[READ][index];
1613 v[1] = tg->bps_conf[WRITE][index];
1614 v[2] = tg->iops_conf[READ][index];
1615 v[3] = tg->iops_conf[WRITE][index];
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001616
Shaohua Li5b81fc32017-05-17 13:07:24 -07001617 idle_time = tg->idletime_threshold_conf;
1618 latency_time = tg->latency_target_conf;
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001619 while (true) {
1620 char tok[27]; /* wiops=18446744073709551616 */
1621 char *p;
Shaohua Li2ab54922017-03-27 10:51:29 -07001622 u64 val = U64_MAX;
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001623 int len;
1624
1625 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1626 break;
1627 if (tok[0] == '\0')
1628 break;
1629 ctx.body += len;
1630
1631 ret = -EINVAL;
1632 p = tok;
1633 strsep(&p, "=");
1634 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1635 goto out_finish;
1636
1637 ret = -ERANGE;
1638 if (!val)
1639 goto out_finish;
1640
1641 ret = -EINVAL;
1642 if (!strcmp(tok, "rbps"))
1643 v[0] = val;
1644 else if (!strcmp(tok, "wbps"))
1645 v[1] = val;
1646 else if (!strcmp(tok, "riops"))
1647 v[2] = min_t(u64, val, UINT_MAX);
1648 else if (!strcmp(tok, "wiops"))
1649 v[3] = min_t(u64, val, UINT_MAX);
Shaohua Liada75b62017-03-27 10:51:42 -07001650 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1651 idle_time = val;
Shaohua Liec809912017-03-27 10:51:44 -07001652 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1653 latency_time = val;
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001654 else
1655 goto out_finish;
1656 }
1657
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001658 tg->bps_conf[READ][index] = v[0];
1659 tg->bps_conf[WRITE][index] = v[1];
1660 tg->iops_conf[READ][index] = v[2];
1661 tg->iops_conf[WRITE][index] = v[3];
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001662
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001663 if (index == LIMIT_MAX) {
1664 tg->bps[READ][index] = v[0];
1665 tg->bps[WRITE][index] = v[1];
1666 tg->iops[READ][index] = v[2];
1667 tg->iops[WRITE][index] = v[3];
1668 }
1669 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1670 tg->bps_conf[READ][LIMIT_MAX]);
1671 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1672 tg->bps_conf[WRITE][LIMIT_MAX]);
1673 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1674 tg->iops_conf[READ][LIMIT_MAX]);
1675 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1676 tg->iops_conf[WRITE][LIMIT_MAX]);
Shaohua Lib4f428e2017-05-17 13:07:27 -07001677 tg->idletime_threshold_conf = idle_time;
1678 tg->latency_target_conf = latency_time;
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001679
Shaohua Lib4f428e2017-05-17 13:07:27 -07001680 /* force user to configure all settings for low limit */
1681 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1682 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1683 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1684 tg->latency_target_conf == DFL_LATENCY_TARGET) {
1685 tg->bps[READ][LIMIT_LOW] = 0;
1686 tg->bps[WRITE][LIMIT_LOW] = 0;
1687 tg->iops[READ][LIMIT_LOW] = 0;
1688 tg->iops[WRITE][LIMIT_LOW] = 0;
1689 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1690 tg->latency_target = DFL_LATENCY_TARGET;
1691 } else if (index == LIMIT_LOW) {
Shaohua Li5b81fc32017-05-17 13:07:24 -07001692 tg->idletime_threshold = tg->idletime_threshold_conf;
Shaohua Li5b81fc32017-05-17 13:07:24 -07001693 tg->latency_target = tg->latency_target_conf;
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001694 }
Shaohua Lib4f428e2017-05-17 13:07:27 -07001695
1696 blk_throtl_update_limit_valid(tg->td);
1697 if (tg->td->limit_valid[LIMIT_LOW]) {
1698 if (index == LIMIT_LOW)
1699 tg->td->limit_index = LIMIT_LOW;
1700 } else
1701 tg->td->limit_index = LIMIT_MAX;
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001702 tg_conf_updated(tg, index == LIMIT_LOW &&
1703 tg->td->limit_valid[LIMIT_LOW]);
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001704 ret = 0;
1705out_finish:
1706 blkg_conf_finish(&ctx);
1707 return ret ?: nbytes;
1708}
1709
1710static struct cftype throtl_files[] = {
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001711#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1712 {
1713 .name = "low",
1714 .flags = CFTYPE_NOT_ON_ROOT,
1715 .seq_show = tg_print_limit,
1716 .write = tg_set_limit,
1717 .private = LIMIT_LOW,
1718 },
1719#endif
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001720 {
1721 .name = "max",
1722 .flags = CFTYPE_NOT_ON_ROOT,
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001723 .seq_show = tg_print_limit,
1724 .write = tg_set_limit,
1725 .private = LIMIT_MAX,
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001726 },
1727 { } /* terminate */
1728};
1729
Vivek Goyalda527772011-03-02 19:05:33 -05001730static void throtl_shutdown_wq(struct request_queue *q)
Vivek Goyale43473b2010-09-15 17:06:35 -04001731{
1732 struct throtl_data *td = q->td;
1733
Tejun Heo69df0ab2013-05-14 13:52:36 -07001734 cancel_work_sync(&td->dispatch_work);
Vivek Goyale43473b2010-09-15 17:06:35 -04001735}
1736
Tejun Heo3c798392012-04-16 13:57:25 -07001737static struct blkcg_policy blkcg_policy_throtl = {
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001738 .dfl_cftypes = throtl_files,
Tejun Heo880f50e2015-08-18 14:55:30 -07001739 .legacy_cftypes = throtl_legacy_files,
Tejun Heof9fcc2d2012-04-16 13:57:27 -07001740
Tejun Heo001bea72015-08-18 14:55:11 -07001741 .pd_alloc_fn = throtl_pd_alloc,
Tejun Heof9fcc2d2012-04-16 13:57:27 -07001742 .pd_init_fn = throtl_pd_init,
Tejun Heo693e7512013-05-14 13:52:38 -07001743 .pd_online_fn = throtl_pd_online,
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001744 .pd_offline_fn = throtl_pd_offline,
Tejun Heo001bea72015-08-18 14:55:11 -07001745 .pd_free_fn = throtl_pd_free,
Vivek Goyale43473b2010-09-15 17:06:35 -04001746};
1747
Shaohua Li3f0abd82017-03-27 10:51:35 -07001748static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1749{
1750 unsigned long rtime = jiffies, wtime = jiffies;
1751
1752 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1753 rtime = tg->last_low_overflow_time[READ];
1754 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1755 wtime = tg->last_low_overflow_time[WRITE];
1756 return min(rtime, wtime);
1757}
1758
1759/* tg should not be an intermediate node */
1760static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1761{
1762 struct throtl_service_queue *parent_sq;
1763 struct throtl_grp *parent = tg;
1764 unsigned long ret = __tg_last_low_overflow_time(tg);
1765
1766 while (true) {
1767 parent_sq = parent->service_queue.parent_sq;
1768 parent = sq_to_tg(parent_sq);
1769 if (!parent)
1770 break;
1771
1772 /*
1773 * The parent doesn't have low limit, it always reaches low
1774 * limit. Its overflow time is useless for children
1775 */
1776 if (!parent->bps[READ][LIMIT_LOW] &&
1777 !parent->iops[READ][LIMIT_LOW] &&
1778 !parent->bps[WRITE][LIMIT_LOW] &&
1779 !parent->iops[WRITE][LIMIT_LOW])
1780 continue;
1781 if (time_after(__tg_last_low_overflow_time(parent), ret))
1782 ret = __tg_last_low_overflow_time(parent);
1783 }
1784 return ret;
1785}
1786
Shaohua Li9e234ee2017-03-27 10:51:41 -07001787static bool throtl_tg_is_idle(struct throtl_grp *tg)
1788{
1789 /*
1790 * cgroup is idle if:
1791 * - single idle is too long, longer than a fixed value (in case user
Shaohua Lib4f428e2017-05-17 13:07:27 -07001792 * configure a too big threshold) or 4 times of idletime threshold
Shaohua Li9e234ee2017-03-27 10:51:41 -07001793 * - average think time is more than threshold
Shaohua Li53696b82017-03-27 15:19:43 -07001794 * - IO latency is largely below threshold
Shaohua Li9e234ee2017-03-27 10:51:41 -07001795 */
Shaohua Lib4f428e2017-05-17 13:07:27 -07001796 unsigned long time;
Shaohua Li4cff7292017-05-17 13:07:25 -07001797 bool ret;
Shaohua Li9e234ee2017-03-27 10:51:41 -07001798
Shaohua Lib4f428e2017-05-17 13:07:27 -07001799 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1800 ret = tg->latency_target == DFL_LATENCY_TARGET ||
1801 tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1802 (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1803 tg->avg_idletime > tg->idletime_threshold ||
1804 (tg->latency_target && tg->bio_cnt &&
Shaohua Li53696b82017-03-27 15:19:43 -07001805 tg->bad_bio_cnt * 5 < tg->bio_cnt);
Shaohua Li4cff7292017-05-17 13:07:25 -07001806 throtl_log(&tg->service_queue,
1807 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1808 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1809 tg->bio_cnt, ret, tg->td->scale);
1810 return ret;
Shaohua Li9e234ee2017-03-27 10:51:41 -07001811}
1812
Shaohua Lic79892c2017-03-27 10:51:34 -07001813static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1814{
1815 struct throtl_service_queue *sq = &tg->service_queue;
1816 bool read_limit, write_limit;
1817
1818 /*
1819 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1820 * reaches), it's ok to upgrade to next limit
1821 */
1822 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1823 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1824 if (!read_limit && !write_limit)
1825 return true;
1826 if (read_limit && sq->nr_queued[READ] &&
1827 (!write_limit || sq->nr_queued[WRITE]))
1828 return true;
1829 if (write_limit && sq->nr_queued[WRITE] &&
1830 (!read_limit || sq->nr_queued[READ]))
1831 return true;
Shaohua Liaec24242017-03-27 10:51:39 -07001832
1833 if (time_after_eq(jiffies,
Shaohua Lifa6fb5a2017-03-27 10:51:43 -07001834 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1835 throtl_tg_is_idle(tg))
Shaohua Liaec24242017-03-27 10:51:39 -07001836 return true;
Shaohua Lic79892c2017-03-27 10:51:34 -07001837 return false;
1838}
1839
1840static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1841{
1842 while (true) {
1843 if (throtl_tg_can_upgrade(tg))
1844 return true;
1845 tg = sq_to_tg(tg->service_queue.parent_sq);
1846 if (!tg || !tg_to_blkg(tg)->parent)
1847 return false;
1848 }
1849 return false;
1850}
1851
1852static bool throtl_can_upgrade(struct throtl_data *td,
1853 struct throtl_grp *this_tg)
1854{
1855 struct cgroup_subsys_state *pos_css;
1856 struct blkcg_gq *blkg;
1857
1858 if (td->limit_index != LIMIT_LOW)
1859 return false;
1860
Shaohua Li297e3d82017-03-27 10:51:37 -07001861 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
Shaohua Li3f0abd82017-03-27 10:51:35 -07001862 return false;
1863
Shaohua Lic79892c2017-03-27 10:51:34 -07001864 rcu_read_lock();
1865 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1866 struct throtl_grp *tg = blkg_to_tg(blkg);
1867
1868 if (tg == this_tg)
1869 continue;
1870 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1871 continue;
1872 if (!throtl_hierarchy_can_upgrade(tg)) {
1873 rcu_read_unlock();
1874 return false;
1875 }
1876 }
1877 rcu_read_unlock();
1878 return true;
1879}
1880
Shaohua Lifa6fb5a2017-03-27 10:51:43 -07001881static void throtl_upgrade_check(struct throtl_grp *tg)
1882{
1883 unsigned long now = jiffies;
1884
1885 if (tg->td->limit_index != LIMIT_LOW)
1886 return;
1887
1888 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1889 return;
1890
1891 tg->last_check_time = now;
1892
1893 if (!time_after_eq(now,
1894 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1895 return;
1896
1897 if (throtl_can_upgrade(tg->td, NULL))
1898 throtl_upgrade_state(tg->td);
1899}
1900
Shaohua Lic79892c2017-03-27 10:51:34 -07001901static void throtl_upgrade_state(struct throtl_data *td)
1902{
1903 struct cgroup_subsys_state *pos_css;
1904 struct blkcg_gq *blkg;
1905
Shaohua Li4cff7292017-05-17 13:07:25 -07001906 throtl_log(&td->service_queue, "upgrade to max");
Shaohua Lic79892c2017-03-27 10:51:34 -07001907 td->limit_index = LIMIT_MAX;
Shaohua Li3f0abd82017-03-27 10:51:35 -07001908 td->low_upgrade_time = jiffies;
Shaohua Li7394e312017-03-27 10:51:40 -07001909 td->scale = 0;
Shaohua Lic79892c2017-03-27 10:51:34 -07001910 rcu_read_lock();
1911 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1912 struct throtl_grp *tg = blkg_to_tg(blkg);
1913 struct throtl_service_queue *sq = &tg->service_queue;
1914
1915 tg->disptime = jiffies - 1;
1916 throtl_select_dispatch(sq);
Joseph Qi4f02fb72017-09-30 14:38:49 +08001917 throtl_schedule_next_dispatch(sq, true);
Shaohua Lic79892c2017-03-27 10:51:34 -07001918 }
1919 rcu_read_unlock();
1920 throtl_select_dispatch(&td->service_queue);
Joseph Qi4f02fb72017-09-30 14:38:49 +08001921 throtl_schedule_next_dispatch(&td->service_queue, true);
Shaohua Lic79892c2017-03-27 10:51:34 -07001922 queue_work(kthrotld_workqueue, &td->dispatch_work);
1923}
1924
Shaohua Li3f0abd82017-03-27 10:51:35 -07001925static void throtl_downgrade_state(struct throtl_data *td, int new)
1926{
Shaohua Li7394e312017-03-27 10:51:40 -07001927 td->scale /= 2;
1928
Shaohua Li4cff7292017-05-17 13:07:25 -07001929 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
Shaohua Li7394e312017-03-27 10:51:40 -07001930 if (td->scale) {
1931 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1932 return;
1933 }
1934
Shaohua Li3f0abd82017-03-27 10:51:35 -07001935 td->limit_index = new;
1936 td->low_downgrade_time = jiffies;
1937}
1938
1939static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1940{
1941 struct throtl_data *td = tg->td;
1942 unsigned long now = jiffies;
1943
1944 /*
1945 * If cgroup is below low limit, consider downgrade and throttle other
1946 * cgroups
1947 */
Shaohua Li297e3d82017-03-27 10:51:37 -07001948 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1949 time_after_eq(now, tg_last_low_overflow_time(tg) +
Shaohua Lifa6fb5a2017-03-27 10:51:43 -07001950 td->throtl_slice) &&
1951 (!throtl_tg_is_idle(tg) ||
1952 !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
Shaohua Li3f0abd82017-03-27 10:51:35 -07001953 return true;
1954 return false;
1955}
1956
1957static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1958{
1959 while (true) {
1960 if (!throtl_tg_can_downgrade(tg))
1961 return false;
1962 tg = sq_to_tg(tg->service_queue.parent_sq);
1963 if (!tg || !tg_to_blkg(tg)->parent)
1964 break;
1965 }
1966 return true;
1967}
1968
1969static void throtl_downgrade_check(struct throtl_grp *tg)
1970{
1971 uint64_t bps;
1972 unsigned int iops;
1973 unsigned long elapsed_time;
1974 unsigned long now = jiffies;
1975
1976 if (tg->td->limit_index != LIMIT_MAX ||
1977 !tg->td->limit_valid[LIMIT_LOW])
1978 return;
1979 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1980 return;
Shaohua Li297e3d82017-03-27 10:51:37 -07001981 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
Shaohua Li3f0abd82017-03-27 10:51:35 -07001982 return;
1983
1984 elapsed_time = now - tg->last_check_time;
1985 tg->last_check_time = now;
1986
Shaohua Li297e3d82017-03-27 10:51:37 -07001987 if (time_before(now, tg_last_low_overflow_time(tg) +
1988 tg->td->throtl_slice))
Shaohua Li3f0abd82017-03-27 10:51:35 -07001989 return;
1990
1991 if (tg->bps[READ][LIMIT_LOW]) {
1992 bps = tg->last_bytes_disp[READ] * HZ;
1993 do_div(bps, elapsed_time);
1994 if (bps >= tg->bps[READ][LIMIT_LOW])
1995 tg->last_low_overflow_time[READ] = now;
1996 }
1997
1998 if (tg->bps[WRITE][LIMIT_LOW]) {
1999 bps = tg->last_bytes_disp[WRITE] * HZ;
2000 do_div(bps, elapsed_time);
2001 if (bps >= tg->bps[WRITE][LIMIT_LOW])
2002 tg->last_low_overflow_time[WRITE] = now;
2003 }
2004
2005 if (tg->iops[READ][LIMIT_LOW]) {
2006 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2007 if (iops >= tg->iops[READ][LIMIT_LOW])
2008 tg->last_low_overflow_time[READ] = now;
2009 }
2010
2011 if (tg->iops[WRITE][LIMIT_LOW]) {
2012 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2013 if (iops >= tg->iops[WRITE][LIMIT_LOW])
2014 tg->last_low_overflow_time[WRITE] = now;
2015 }
2016
2017 /*
2018 * If cgroup is below low limit, consider downgrade and throttle other
2019 * cgroups
2020 */
2021 if (throtl_hierarchy_can_downgrade(tg))
2022 throtl_downgrade_state(tg->td, LIMIT_LOW);
2023
2024 tg->last_bytes_disp[READ] = 0;
2025 tg->last_bytes_disp[WRITE] = 0;
2026 tg->last_io_disp[READ] = 0;
2027 tg->last_io_disp[WRITE] = 0;
2028}
2029
Shaohua Li9e234ee2017-03-27 10:51:41 -07002030static void blk_throtl_update_idletime(struct throtl_grp *tg)
2031{
2032 unsigned long now = ktime_get_ns() >> 10;
2033 unsigned long last_finish_time = tg->last_finish_time;
2034
2035 if (now <= last_finish_time || last_finish_time == 0 ||
2036 last_finish_time == tg->checked_last_finish_time)
2037 return;
2038
2039 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2040 tg->checked_last_finish_time = last_finish_time;
2041}
2042
Shaohua Lib9147dd2017-03-27 15:19:42 -07002043#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2044static void throtl_update_latency_buckets(struct throtl_data *td)
2045{
Joseph Qib889bf62017-11-21 09:38:30 +08002046 struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
2047 int i, cpu, rw;
2048 unsigned long last_latency[2] = { 0 };
2049 unsigned long latency[2];
Shaohua Lib9147dd2017-03-27 15:19:42 -07002050
2051 if (!blk_queue_nonrot(td->queue))
2052 return;
2053 if (time_before(jiffies, td->last_calculate_time + HZ))
2054 return;
2055 td->last_calculate_time = jiffies;
2056
2057 memset(avg_latency, 0, sizeof(avg_latency));
Joseph Qib889bf62017-11-21 09:38:30 +08002058 for (rw = READ; rw <= WRITE; rw++) {
2059 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2060 struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
Shaohua Lib9147dd2017-03-27 15:19:42 -07002061
Joseph Qib889bf62017-11-21 09:38:30 +08002062 for_each_possible_cpu(cpu) {
2063 struct latency_bucket *bucket;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002064
Joseph Qib889bf62017-11-21 09:38:30 +08002065 /* this isn't race free, but ok in practice */
2066 bucket = per_cpu_ptr(td->latency_buckets[rw],
2067 cpu);
2068 tmp->total_latency += bucket[i].total_latency;
2069 tmp->samples += bucket[i].samples;
2070 bucket[i].total_latency = 0;
2071 bucket[i].samples = 0;
2072 }
Shaohua Lib9147dd2017-03-27 15:19:42 -07002073
Joseph Qib889bf62017-11-21 09:38:30 +08002074 if (tmp->samples >= 32) {
2075 int samples = tmp->samples;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002076
Joseph Qib889bf62017-11-21 09:38:30 +08002077 latency[rw] = tmp->total_latency;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002078
Joseph Qib889bf62017-11-21 09:38:30 +08002079 tmp->total_latency = 0;
2080 tmp->samples = 0;
2081 latency[rw] /= samples;
2082 if (latency[rw] == 0)
2083 continue;
2084 avg_latency[rw][i].latency = latency[rw];
2085 }
Shaohua Lib9147dd2017-03-27 15:19:42 -07002086 }
2087 }
2088
Joseph Qib889bf62017-11-21 09:38:30 +08002089 for (rw = READ; rw <= WRITE; rw++) {
2090 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2091 if (!avg_latency[rw][i].latency) {
2092 if (td->avg_buckets[rw][i].latency < last_latency[rw])
2093 td->avg_buckets[rw][i].latency =
2094 last_latency[rw];
2095 continue;
2096 }
2097
2098 if (!td->avg_buckets[rw][i].valid)
2099 latency[rw] = avg_latency[rw][i].latency;
2100 else
2101 latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2102 avg_latency[rw][i].latency) >> 3;
2103
2104 td->avg_buckets[rw][i].latency = max(latency[rw],
2105 last_latency[rw]);
2106 td->avg_buckets[rw][i].valid = true;
2107 last_latency[rw] = td->avg_buckets[rw][i].latency;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002108 }
Shaohua Lib9147dd2017-03-27 15:19:42 -07002109 }
Shaohua Li4cff7292017-05-17 13:07:25 -07002110
2111 for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2112 throtl_log(&td->service_queue,
Joseph Qib889bf62017-11-21 09:38:30 +08002113 "Latency bucket %d: read latency=%ld, read valid=%d, "
2114 "write latency=%ld, write valid=%d", i,
2115 td->avg_buckets[READ][i].latency,
2116 td->avg_buckets[READ][i].valid,
2117 td->avg_buckets[WRITE][i].latency,
2118 td->avg_buckets[WRITE][i].valid);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002119}
2120#else
2121static inline void throtl_update_latency_buckets(struct throtl_data *td)
2122{
2123}
2124#endif
2125
Jens Axboe2bc19cd2017-04-20 09:41:36 -06002126static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
2127{
2128#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
Dennis Zhou (Facebook)31118852018-08-31 16:22:44 -04002129 /* fallback to root_blkg if we fail to get a blkg ref */
2130 if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
2131 bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
Omar Sandoval5238dcf2018-05-09 02:08:49 -07002132 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
Jens Axboe2bc19cd2017-04-20 09:41:36 -06002133#endif
2134}
2135
Tejun Heoae118892015-08-18 14:55:20 -07002136bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
2137 struct bio *bio)
Vivek Goyale43473b2010-09-15 17:06:35 -04002138{
Tejun Heoc5cc2072013-05-14 13:52:38 -07002139 struct throtl_qnode *qn = NULL;
Tejun Heoae118892015-08-18 14:55:20 -07002140 struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
Tejun Heo73f0d492013-05-14 13:52:35 -07002141 struct throtl_service_queue *sq;
Tejun Heo0e9f4162013-05-14 13:52:35 -07002142 bool rw = bio_data_dir(bio);
Tejun Heobc16a4f2011-10-19 14:33:01 +02002143 bool throttled = false;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002144 struct throtl_data *td = tg->td;
Vivek Goyale43473b2010-09-15 17:06:35 -04002145
Tejun Heoae118892015-08-18 14:55:20 -07002146 WARN_ON_ONCE(!rcu_read_lock_held());
2147
Tejun Heo2a0f61e2013-05-14 13:52:36 -07002148 /* see throtl_charge_bio() */
Christoph Hellwig8d2bbd42016-10-20 15:12:12 +02002149 if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
Tejun Heobc16a4f2011-10-19 14:33:01 +02002150 goto out;
Vivek Goyale43473b2010-09-15 17:06:35 -04002151
2152 spin_lock_irq(q->queue_lock);
Tejun Heoc9589f02015-08-18 14:55:19 -07002153
Shaohua Lib9147dd2017-03-27 15:19:42 -07002154 throtl_update_latency_buckets(td);
2155
Tejun Heoc9589f02015-08-18 14:55:19 -07002156 if (unlikely(blk_queue_bypass(q)))
Tejun Heobc16a4f2011-10-19 14:33:01 +02002157 goto out_unlock;
Vivek Goyalf469a7b2011-05-19 15:38:23 -04002158
Jens Axboe2bc19cd2017-04-20 09:41:36 -06002159 blk_throtl_assoc_bio(tg, bio);
Shaohua Li9e234ee2017-03-27 10:51:41 -07002160 blk_throtl_update_idletime(tg);
2161
Tejun Heo73f0d492013-05-14 13:52:35 -07002162 sq = &tg->service_queue;
2163
Shaohua Lic79892c2017-03-27 10:51:34 -07002164again:
Tejun Heo9e660ac2013-05-14 13:52:38 -07002165 while (true) {
Shaohua Li3f0abd82017-03-27 10:51:35 -07002166 if (tg->last_low_overflow_time[rw] == 0)
2167 tg->last_low_overflow_time[rw] = jiffies;
2168 throtl_downgrade_check(tg);
Shaohua Lifa6fb5a2017-03-27 10:51:43 -07002169 throtl_upgrade_check(tg);
Tejun Heo9e660ac2013-05-14 13:52:38 -07002170 /* throtl is FIFO - if bios are already queued, should queue */
2171 if (sq->nr_queued[rw])
2172 break;
Vivek Goyalde701c72011-03-07 21:09:32 +01002173
Tejun Heo9e660ac2013-05-14 13:52:38 -07002174 /* if above limits, break to queue */
Shaohua Lic79892c2017-03-27 10:51:34 -07002175 if (!tg_may_dispatch(tg, bio, NULL)) {
Shaohua Li3f0abd82017-03-27 10:51:35 -07002176 tg->last_low_overflow_time[rw] = jiffies;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002177 if (throtl_can_upgrade(td, tg)) {
2178 throtl_upgrade_state(td);
Shaohua Lic79892c2017-03-27 10:51:34 -07002179 goto again;
2180 }
Tejun Heo9e660ac2013-05-14 13:52:38 -07002181 break;
Shaohua Lic79892c2017-03-27 10:51:34 -07002182 }
Tejun Heo9e660ac2013-05-14 13:52:38 -07002183
2184 /* within limits, let's charge and dispatch directly */
Vivek Goyale43473b2010-09-15 17:06:35 -04002185 throtl_charge_bio(tg, bio);
Vivek Goyal04521db2011-03-22 21:54:29 +01002186
2187 /*
2188 * We need to trim slice even when bios are not being queued
2189 * otherwise it might happen that a bio is not queued for
2190 * a long time and slice keeps on extending and trim is not
2191 * called for a long time. Now if limits are reduced suddenly
2192 * we take into account all the IO dispatched so far at new
2193 * low rate and * newly queued IO gets a really long dispatch
2194 * time.
2195 *
2196 * So keep on trimming slice even if bio is not queued.
2197 */
Tejun Heo0f3457f2013-05-14 13:52:32 -07002198 throtl_trim_slice(tg, rw);
Tejun Heo9e660ac2013-05-14 13:52:38 -07002199
2200 /*
2201 * @bio passed through this layer without being throttled.
2202 * Climb up the ladder. If we''re already at the top, it
2203 * can be executed directly.
2204 */
Tejun Heoc5cc2072013-05-14 13:52:38 -07002205 qn = &tg->qnode_on_parent[rw];
Tejun Heo9e660ac2013-05-14 13:52:38 -07002206 sq = sq->parent_sq;
2207 tg = sq_to_tg(sq);
2208 if (!tg)
2209 goto out_unlock;
Vivek Goyale43473b2010-09-15 17:06:35 -04002210 }
2211
Tejun Heo9e660ac2013-05-14 13:52:38 -07002212 /* out-of-limit, queue to @tg */
Tejun Heofda6f272013-05-14 13:52:36 -07002213 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2214 rw == READ ? 'R' : 'W',
Shaohua Li9f626e32017-03-27 10:51:30 -07002215 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2216 tg_bps_limit(tg, rw),
2217 tg->io_disp[rw], tg_iops_limit(tg, rw),
Tejun Heofda6f272013-05-14 13:52:36 -07002218 sq->nr_queued[READ], sq->nr_queued[WRITE]);
Vivek Goyale43473b2010-09-15 17:06:35 -04002219
Shaohua Li3f0abd82017-03-27 10:51:35 -07002220 tg->last_low_overflow_time[rw] = jiffies;
2221
Shaohua Lib9147dd2017-03-27 15:19:42 -07002222 td->nr_queued[rw]++;
Tejun Heoc5cc2072013-05-14 13:52:38 -07002223 throtl_add_bio_tg(bio, qn, tg);
Tejun Heobc16a4f2011-10-19 14:33:01 +02002224 throttled = true;
Vivek Goyale43473b2010-09-15 17:06:35 -04002225
Tejun Heo7f52f982013-05-14 13:52:37 -07002226 /*
2227 * Update @tg's dispatch time and force schedule dispatch if @tg
2228 * was empty before @bio. The forced scheduling isn't likely to
2229 * cause undue delay as @bio is likely to be dispatched directly if
2230 * its @tg's disptime is not in the future.
2231 */
Tejun Heo0e9f4162013-05-14 13:52:35 -07002232 if (tg->flags & THROTL_TG_WAS_EMPTY) {
Tejun Heo77216b02013-05-14 13:52:36 -07002233 tg_update_disptime(tg);
Tejun Heo7f52f982013-05-14 13:52:37 -07002234 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
Vivek Goyale43473b2010-09-15 17:06:35 -04002235 }
2236
Tejun Heobc16a4f2011-10-19 14:33:01 +02002237out_unlock:
Vivek Goyale43473b2010-09-15 17:06:35 -04002238 spin_unlock_irq(q->queue_lock);
Tejun Heobc16a4f2011-10-19 14:33:01 +02002239out:
Shaohua Li111be882017-12-20 11:10:17 -07002240 bio_set_flag(bio, BIO_THROTTLED);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002241
2242#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2243 if (throttled || !td->track_bio_latency)
Omar Sandoval5238dcf2018-05-09 02:08:49 -07002244 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002245#endif
Tejun Heobc16a4f2011-10-19 14:33:01 +02002246 return throttled;
Vivek Goyale43473b2010-09-15 17:06:35 -04002247}
2248
Shaohua Li9e234ee2017-03-27 10:51:41 -07002249#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
Shaohua Lib9147dd2017-03-27 15:19:42 -07002250static void throtl_track_latency(struct throtl_data *td, sector_t size,
2251 int op, unsigned long time)
2252{
2253 struct latency_bucket *latency;
2254 int index;
2255
Joseph Qib889bf62017-11-21 09:38:30 +08002256 if (!td || td->limit_index != LIMIT_LOW ||
2257 !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
Shaohua Lib9147dd2017-03-27 15:19:42 -07002258 !blk_queue_nonrot(td->queue))
2259 return;
2260
2261 index = request_bucket_index(size);
2262
Joseph Qib889bf62017-11-21 09:38:30 +08002263 latency = get_cpu_ptr(td->latency_buckets[op]);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002264 latency[index].total_latency += time;
2265 latency[index].samples++;
Joseph Qib889bf62017-11-21 09:38:30 +08002266 put_cpu_ptr(td->latency_buckets[op]);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002267}
2268
2269void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2270{
2271 struct request_queue *q = rq->q;
2272 struct throtl_data *td = q->td;
2273
Omar Sandoval544ccc8d2018-05-09 02:08:50 -07002274 throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002275}
2276
Shaohua Li9e234ee2017-03-27 10:51:41 -07002277void blk_throtl_bio_endio(struct bio *bio)
2278{
Josef Bacik08e18ea2018-07-03 11:14:50 -04002279 struct blkcg_gq *blkg;
Shaohua Li9e234ee2017-03-27 10:51:41 -07002280 struct throtl_grp *tg;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002281 u64 finish_time_ns;
2282 unsigned long finish_time;
2283 unsigned long start_time;
2284 unsigned long lat;
Joseph Qib889bf62017-11-21 09:38:30 +08002285 int rw = bio_data_dir(bio);
Shaohua Li9e234ee2017-03-27 10:51:41 -07002286
Josef Bacik08e18ea2018-07-03 11:14:50 -04002287 blkg = bio->bi_blkg;
2288 if (!blkg)
Shaohua Li9e234ee2017-03-27 10:51:41 -07002289 return;
Josef Bacik08e18ea2018-07-03 11:14:50 -04002290 tg = blkg_to_tg(blkg);
Shaohua Li9e234ee2017-03-27 10:51:41 -07002291
Shaohua Lib9147dd2017-03-27 15:19:42 -07002292 finish_time_ns = ktime_get_ns();
2293 tg->last_finish_time = finish_time_ns >> 10;
2294
Omar Sandoval5238dcf2018-05-09 02:08:49 -07002295 start_time = bio_issue_time(&bio->bi_issue) >> 10;
2296 finish_time = __bio_issue_time(finish_time_ns) >> 10;
Josef Bacik08e18ea2018-07-03 11:14:50 -04002297 if (!start_time || finish_time <= start_time)
Shaohua Li53696b82017-03-27 15:19:43 -07002298 return;
2299
2300 lat = finish_time - start_time;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002301 /* this is only for bio based driver */
Omar Sandoval5238dcf2018-05-09 02:08:49 -07002302 if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2303 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2304 bio_op(bio), lat);
Shaohua Li53696b82017-03-27 15:19:43 -07002305
Shaohua Li6679a902017-06-06 12:40:43 -07002306 if (tg->latency_target && lat >= tg->td->filtered_latency) {
Shaohua Li53696b82017-03-27 15:19:43 -07002307 int bucket;
2308 unsigned int threshold;
2309
Omar Sandoval5238dcf2018-05-09 02:08:49 -07002310 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
Joseph Qib889bf62017-11-21 09:38:30 +08002311 threshold = tg->td->avg_buckets[rw][bucket].latency +
Shaohua Li53696b82017-03-27 15:19:43 -07002312 tg->latency_target;
2313 if (lat > threshold)
2314 tg->bad_bio_cnt++;
2315 /*
2316 * Not race free, could get wrong count, which means cgroups
2317 * will be throttled
2318 */
2319 tg->bio_cnt++;
2320 }
2321
2322 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2323 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2324 tg->bio_cnt /= 2;
2325 tg->bad_bio_cnt /= 2;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002326 }
Shaohua Li9e234ee2017-03-27 10:51:41 -07002327}
2328#endif
2329
Tejun Heo2a12f0d2013-05-14 13:52:37 -07002330/*
2331 * Dispatch all bios from all children tg's queued on @parent_sq. On
2332 * return, @parent_sq is guaranteed to not have any active children tg's
2333 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2334 */
2335static void tg_drain_bios(struct throtl_service_queue *parent_sq)
2336{
2337 struct throtl_grp *tg;
2338
2339 while ((tg = throtl_rb_first(parent_sq))) {
2340 struct throtl_service_queue *sq = &tg->service_queue;
2341 struct bio *bio;
2342
2343 throtl_dequeue_tg(tg);
2344
Tejun Heoc5cc2072013-05-14 13:52:38 -07002345 while ((bio = throtl_peek_queued(&sq->queued[READ])))
Tejun Heo2a12f0d2013-05-14 13:52:37 -07002346 tg_dispatch_one_bio(tg, bio_data_dir(bio));
Tejun Heoc5cc2072013-05-14 13:52:38 -07002347 while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
Tejun Heo2a12f0d2013-05-14 13:52:37 -07002348 tg_dispatch_one_bio(tg, bio_data_dir(bio));
2349 }
2350}
2351
Tejun Heoc9a929d2011-10-19 14:42:16 +02002352/**
2353 * blk_throtl_drain - drain throttled bios
2354 * @q: request_queue to drain throttled bios for
2355 *
2356 * Dispatch all currently throttled bios on @q through ->make_request_fn().
2357 */
2358void blk_throtl_drain(struct request_queue *q)
2359 __releases(q->queue_lock) __acquires(q->queue_lock)
2360{
2361 struct throtl_data *td = q->td;
Tejun Heo2a12f0d2013-05-14 13:52:37 -07002362 struct blkcg_gq *blkg;
Tejun Heo492eb212013-08-08 20:11:25 -04002363 struct cgroup_subsys_state *pos_css;
Tejun Heoc9a929d2011-10-19 14:42:16 +02002364 struct bio *bio;
Tejun Heo651930b2013-05-14 13:52:35 -07002365 int rw;
Tejun Heoc9a929d2011-10-19 14:42:16 +02002366
Andi Kleen8bcb6c72012-03-30 12:33:28 +02002367 queue_lockdep_assert_held(q);
Tejun Heo2a12f0d2013-05-14 13:52:37 -07002368 rcu_read_lock();
Tejun Heoc9a929d2011-10-19 14:42:16 +02002369
Tejun Heo2a12f0d2013-05-14 13:52:37 -07002370 /*
2371 * Drain each tg while doing post-order walk on the blkg tree, so
2372 * that all bios are propagated to td->service_queue. It'd be
2373 * better to walk service_queue tree directly but blkg walk is
2374 * easier.
2375 */
Tejun Heo492eb212013-08-08 20:11:25 -04002376 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
Tejun Heo2a12f0d2013-05-14 13:52:37 -07002377 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
Tejun Heo73f0d492013-05-14 13:52:35 -07002378
Tejun Heo2a12f0d2013-05-14 13:52:37 -07002379 /* finally, transfer bios from top-level tg's into the td */
2380 tg_drain_bios(&td->service_queue);
2381
2382 rcu_read_unlock();
Tejun Heoc9a929d2011-10-19 14:42:16 +02002383 spin_unlock_irq(q->queue_lock);
2384
Tejun Heo2a12f0d2013-05-14 13:52:37 -07002385 /* all bios now should be in td->service_queue, issue them */
Tejun Heo651930b2013-05-14 13:52:35 -07002386 for (rw = READ; rw <= WRITE; rw++)
Tejun Heoc5cc2072013-05-14 13:52:38 -07002387 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
2388 NULL)))
Tejun Heo651930b2013-05-14 13:52:35 -07002389 generic_make_request(bio);
Tejun Heoc9a929d2011-10-19 14:42:16 +02002390
2391 spin_lock_irq(q->queue_lock);
2392}
2393
Vivek Goyale43473b2010-09-15 17:06:35 -04002394int blk_throtl_init(struct request_queue *q)
2395{
2396 struct throtl_data *td;
Tejun Heoa2b16932012-04-13 13:11:33 -07002397 int ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04002398
2399 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2400 if (!td)
2401 return -ENOMEM;
Joseph Qib889bf62017-11-21 09:38:30 +08002402 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
Shaohua Lib9147dd2017-03-27 15:19:42 -07002403 LATENCY_BUCKET_SIZE, __alignof__(u64));
Joseph Qib889bf62017-11-21 09:38:30 +08002404 if (!td->latency_buckets[READ]) {
2405 kfree(td);
2406 return -ENOMEM;
2407 }
2408 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2409 LATENCY_BUCKET_SIZE, __alignof__(u64));
2410 if (!td->latency_buckets[WRITE]) {
2411 free_percpu(td->latency_buckets[READ]);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002412 kfree(td);
2413 return -ENOMEM;
2414 }
Vivek Goyale43473b2010-09-15 17:06:35 -04002415
Tejun Heo69df0ab2013-05-14 13:52:36 -07002416 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
Tejun Heob2ce2642015-08-18 14:55:13 -07002417 throtl_service_queue_init(&td->service_queue);
Vivek Goyale43473b2010-09-15 17:06:35 -04002418
Tejun Heocd1604f2012-03-05 13:15:06 -08002419 q->td = td;
Vivek Goyal29b12582011-05-19 15:38:24 -04002420 td->queue = q;
Vivek Goyal02977e42010-10-01 14:49:48 +02002421
Shaohua Li9f626e32017-03-27 10:51:30 -07002422 td->limit_valid[LIMIT_MAX] = true;
Shaohua Licd5ab1b2017-03-27 10:51:32 -07002423 td->limit_index = LIMIT_MAX;
Shaohua Li3f0abd82017-03-27 10:51:35 -07002424 td->low_upgrade_time = jiffies;
2425 td->low_downgrade_time = jiffies;
Shaohua Li9e234ee2017-03-27 10:51:41 -07002426
Tejun Heoa2b16932012-04-13 13:11:33 -07002427 /* activate policy */
Tejun Heo3c798392012-04-16 13:57:25 -07002428 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002429 if (ret) {
Joseph Qib889bf62017-11-21 09:38:30 +08002430 free_percpu(td->latency_buckets[READ]);
2431 free_percpu(td->latency_buckets[WRITE]);
Vivek Goyal29b12582011-05-19 15:38:24 -04002432 kfree(td);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002433 }
Tejun Heoa2b16932012-04-13 13:11:33 -07002434 return ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04002435}
2436
2437void blk_throtl_exit(struct request_queue *q)
2438{
Tejun Heoc875f4d2012-03-05 13:15:22 -08002439 BUG_ON(!q->td);
Vivek Goyalda527772011-03-02 19:05:33 -05002440 throtl_shutdown_wq(q);
Tejun Heo3c798392012-04-16 13:57:25 -07002441 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
Joseph Qib889bf62017-11-21 09:38:30 +08002442 free_percpu(q->td->latency_buckets[READ]);
2443 free_percpu(q->td->latency_buckets[WRITE]);
Tejun Heoc9a929d2011-10-19 14:42:16 +02002444 kfree(q->td);
Vivek Goyale43473b2010-09-15 17:06:35 -04002445}
2446
Shaohua Lid61fcfa2017-03-27 10:51:38 -07002447void blk_throtl_register_queue(struct request_queue *q)
2448{
2449 struct throtl_data *td;
Shaohua Li6679a902017-06-06 12:40:43 -07002450 int i;
Shaohua Lid61fcfa2017-03-27 10:51:38 -07002451
2452 td = q->td;
2453 BUG_ON(!td);
2454
Shaohua Li6679a902017-06-06 12:40:43 -07002455 if (blk_queue_nonrot(q)) {
Shaohua Lid61fcfa2017-03-27 10:51:38 -07002456 td->throtl_slice = DFL_THROTL_SLICE_SSD;
Shaohua Li6679a902017-06-06 12:40:43 -07002457 td->filtered_latency = LATENCY_FILTERED_SSD;
2458 } else {
Shaohua Lid61fcfa2017-03-27 10:51:38 -07002459 td->throtl_slice = DFL_THROTL_SLICE_HD;
Shaohua Li6679a902017-06-06 12:40:43 -07002460 td->filtered_latency = LATENCY_FILTERED_HD;
Joseph Qib889bf62017-11-21 09:38:30 +08002461 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2462 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2463 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2464 }
Shaohua Li6679a902017-06-06 12:40:43 -07002465 }
Shaohua Lid61fcfa2017-03-27 10:51:38 -07002466#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2467 /* if no low limit, use previous default */
2468 td->throtl_slice = DFL_THROTL_SLICE_HD;
2469#endif
Shaohua Li9e234ee2017-03-27 10:51:41 -07002470
weiping zhang475a0552018-01-20 07:34:25 +08002471 td->track_bio_latency = !queue_is_rq_based(q);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002472 if (!td->track_bio_latency)
2473 blk_stat_enable_accounting(q);
Shaohua Lid61fcfa2017-03-27 10:51:38 -07002474}
2475
Shaohua Li297e3d82017-03-27 10:51:37 -07002476#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2477ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2478{
2479 if (!q->td)
2480 return -EINVAL;
2481 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2482}
2483
2484ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2485 const char *page, size_t count)
2486{
2487 unsigned long v;
2488 unsigned long t;
2489
2490 if (!q->td)
2491 return -EINVAL;
2492 if (kstrtoul(page, 10, &v))
2493 return -EINVAL;
2494 t = msecs_to_jiffies(v);
2495 if (t == 0 || t > MAX_THROTL_SLICE)
2496 return -EINVAL;
2497 q->td->throtl_slice = t;
2498 return count;
2499}
2500#endif
2501
Vivek Goyale43473b2010-09-15 17:06:35 -04002502static int __init throtl_init(void)
2503{
Vivek Goyal450adcb2011-03-01 13:40:54 -05002504 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2505 if (!kthrotld_workqueue)
2506 panic("Failed to create kthrotld\n");
2507
Tejun Heo3c798392012-04-16 13:57:25 -07002508 return blkcg_policy_register(&blkcg_policy_throtl);
Vivek Goyale43473b2010-09-15 17:06:35 -04002509}
2510
2511module_init(throtl_init);