blob: e6b47c2555213958e34598074ab08453f614f846 [file] [log] [blame]
Josef Bacikd7067512018-07-03 11:15:01 -04001/*
2 * Block rq-qos base io controller
3 *
4 * This works similar to wbt with a few exceptions
5 *
6 * - It's bio based, so the latency covers the whole block layer in addition to
7 * the actual io.
8 * - We will throttle all IO that comes in here if we need to.
9 * - We use the mean latency over the 100ms window. This is because writes can
10 * be particularly fast, which could give us a false sense of the impact of
11 * other workloads on our protected workload.
Josef Bacika2843902018-07-11 10:34:42 -040012 * - By default there's no throttling, we set the queue_depth to UINT_MAX so
13 * that we can have as many outstanding bio's as we're allowed to. Only at
Josef Bacikd7067512018-07-03 11:15:01 -040014 * throttle time do we pay attention to the actual queue depth.
15 *
16 * The hierarchy works like the cpu controller does, we track the latency at
17 * every configured node, and each configured node has it's own independent
18 * queue depth. This means that we only care about our latency targets at the
19 * peer level. Some group at the bottom of the hierarchy isn't going to affect
20 * a group at the end of some other path if we're only configred at leaf level.
21 *
22 * Consider the following
23 *
24 * root blkg
25 * / \
26 * fast (target=5ms) slow (target=10ms)
27 * / \ / \
28 * a b normal(15ms) unloved
29 *
30 * "a" and "b" have no target, but their combined io under "fast" cannot exceed
31 * an average latency of 5ms. If it does then we will throttle the "slow"
32 * group. In the case of "normal", if it exceeds its 15ms target, we will
33 * throttle "unloved", but nobody else.
34 *
35 * In this example "fast", "slow", and "normal" will be the only groups actually
36 * accounting their io latencies. We have to walk up the heirarchy to the root
37 * on every submit and complete so we can do the appropriate stat recording and
38 * adjust the queue depth of ourselves if needed.
39 *
40 * There are 2 ways we throttle IO.
41 *
42 * 1) Queue depth throttling. As we throttle down we will adjust the maximum
43 * number of IO's we're allowed to have in flight. This starts at (u64)-1 down
44 * to 1. If the group is only ever submitting IO for itself then this is the
45 * only way we throttle.
46 *
47 * 2) Induced delay throttling. This is for the case that a group is generating
48 * IO that has to be issued by the root cg to avoid priority inversion. So think
49 * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot
50 * of work done for us on behalf of the root cg and are being asked to scale
51 * down more then we induce a latency at userspace return. We accumulate the
52 * total amount of time we need to be punished by doing
53 *
54 * total_time += min_lat_nsec - actual_io_completion
55 *
56 * and then at throttle time will do
57 *
58 * throttle_time = min(total_time, NSEC_PER_SEC)
59 *
60 * This induced delay will throttle back the activity that is generating the
61 * root cg issued io's, wethere that's some metadata intensive operation or the
62 * group is using so much memory that it is pushing us into swap.
63 *
64 * Copyright (C) 2018 Josef Bacik
65 */
66#include <linux/kernel.h>
67#include <linux/blk_types.h>
68#include <linux/backing-dev.h>
69#include <linux/module.h>
70#include <linux/timer.h>
71#include <linux/memcontrol.h>
Dennis Zhou (Facebook)c480bcf2018-08-01 23:15:41 -070072#include <linux/sched/loadavg.h>
Josef Bacikd7067512018-07-03 11:15:01 -040073#include <linux/sched/signal.h>
74#include <trace/events/block.h>
75#include "blk-rq-qos.h"
76#include "blk-stat.h"
77
78#define DEFAULT_SCALE_COOKIE 1000000U
79
80static struct blkcg_policy blkcg_policy_iolatency;
81struct iolatency_grp;
82
83struct blk_iolatency {
84 struct rq_qos rqos;
85 struct timer_list timer;
86 atomic_t enabled;
87};
88
89static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
90{
91 return container_of(rqos, struct blk_iolatency, rqos);
92}
93
94static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
95{
96 return atomic_read(&blkiolat->enabled) > 0;
97}
98
99struct child_latency_info {
100 spinlock_t lock;
101
102 /* Last time we adjusted the scale of everybody. */
103 u64 last_scale_event;
104
105 /* The latency that we missed. */
106 u64 scale_lat;
107
108 /* Total io's from all of our children for the last summation. */
109 u64 nr_samples;
110
111 /* The guy who actually changed the latency numbers. */
112 struct iolatency_grp *scale_grp;
113
114 /* Cookie to tell if we need to scale up or down. */
115 atomic_t scale_cookie;
116};
117
Josef Bacik1fa28402018-09-28 13:45:42 -0400118struct percentile_stats {
119 u64 total;
120 u64 missed;
121};
122
123struct latency_stat {
124 union {
125 struct percentile_stats ps;
126 struct blk_rq_stat rqs;
127 };
128};
129
Josef Bacikd7067512018-07-03 11:15:01 -0400130struct iolatency_grp {
131 struct blkg_policy_data pd;
Josef Bacik1fa28402018-09-28 13:45:42 -0400132 struct latency_stat __percpu *stats;
Josef Bacik451bb7c2018-09-28 13:45:43 -0400133 struct latency_stat cur_stat;
Josef Bacikd7067512018-07-03 11:15:01 -0400134 struct blk_iolatency *blkiolat;
135 struct rq_depth rq_depth;
136 struct rq_wait rq_wait;
137 atomic64_t window_start;
138 atomic_t scale_cookie;
139 u64 min_lat_nsec;
140 u64 cur_win_nsec;
141
142 /* total running average of our io latency. */
Dennis Zhou (Facebook)c480bcf2018-08-01 23:15:41 -0700143 u64 lat_avg;
Josef Bacikd7067512018-07-03 11:15:01 -0400144
145 /* Our current number of IO's for the last summation. */
146 u64 nr_samples;
147
Josef Bacik1fa28402018-09-28 13:45:42 -0400148 bool ssd;
Josef Bacikd7067512018-07-03 11:15:01 -0400149 struct child_latency_info child_lat;
150};
151
Dennis Zhou (Facebook)c480bcf2018-08-01 23:15:41 -0700152#define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
153#define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
154/*
155 * These are the constants used to fake the fixed-point moving average
Johannes Weiner8508cf32018-10-26 15:06:11 -0700156 * calculation just like load average. The call to calc_load() folds
Dennis Zhou (Facebook)c480bcf2018-08-01 23:15:41 -0700157 * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
158 * window size is bucketed to try to approximately calculate average
159 * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
160 * elapse immediately. Note, windows only elapse with IO activity. Idle
161 * periods extend the most recent window.
162 */
163#define BLKIOLATENCY_NR_EXP_FACTORS 5
164#define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
165 (BLKIOLATENCY_NR_EXP_FACTORS - 1))
166static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
167 2045, // exp(1/600) - 600 samples
168 2039, // exp(1/240) - 240 samples
169 2031, // exp(1/120) - 120 samples
170 2023, // exp(1/80) - 80 samples
171 2014, // exp(1/60) - 60 samples
172};
173
Josef Bacikd7067512018-07-03 11:15:01 -0400174static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
175{
176 return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
177}
178
179static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
180{
181 return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
182}
183
184static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
185{
186 return pd_to_blkg(&iolat->pd);
187}
188
Josef Bacik1fa28402018-09-28 13:45:42 -0400189static inline void latency_stat_init(struct iolatency_grp *iolat,
190 struct latency_stat *stat)
191{
192 if (iolat->ssd) {
193 stat->ps.total = 0;
194 stat->ps.missed = 0;
195 } else
196 blk_rq_stat_init(&stat->rqs);
197}
198
199static inline void latency_stat_sum(struct iolatency_grp *iolat,
200 struct latency_stat *sum,
201 struct latency_stat *stat)
202{
203 if (iolat->ssd) {
204 sum->ps.total += stat->ps.total;
205 sum->ps.missed += stat->ps.missed;
206 } else
207 blk_rq_stat_sum(&sum->rqs, &stat->rqs);
208}
209
210static inline void latency_stat_record_time(struct iolatency_grp *iolat,
211 u64 req_time)
212{
213 struct latency_stat *stat = get_cpu_ptr(iolat->stats);
214 if (iolat->ssd) {
215 if (req_time >= iolat->min_lat_nsec)
216 stat->ps.missed++;
217 stat->ps.total++;
218 } else
219 blk_rq_stat_add(&stat->rqs, req_time);
220 put_cpu_ptr(stat);
221}
222
223static inline bool latency_sum_ok(struct iolatency_grp *iolat,
224 struct latency_stat *stat)
225{
226 if (iolat->ssd) {
227 u64 thresh = div64_u64(stat->ps.total, 10);
228 thresh = max(thresh, 1ULL);
229 return stat->ps.missed < thresh;
230 }
231 return stat->rqs.mean <= iolat->min_lat_nsec;
232}
233
234static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
235 struct latency_stat *stat)
236{
237 if (iolat->ssd)
238 return stat->ps.total;
239 return stat->rqs.nr_samples;
240}
241
242static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
243 struct latency_stat *stat)
244{
245 int exp_idx;
246
247 if (iolat->ssd)
248 return;
249
250 /*
Johannes Weiner8508cf32018-10-26 15:06:11 -0700251 * calc_load() takes in a number stored in fixed point representation.
Josef Bacik1fa28402018-09-28 13:45:42 -0400252 * Because we are using this for IO time in ns, the values stored
253 * are significantly larger than the FIXED_1 denominator (2048).
254 * Therefore, rounding errors in the calculation are negligible and
255 * can be ignored.
256 */
257 exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
258 div64_u64(iolat->cur_win_nsec,
259 BLKIOLATENCY_EXP_BUCKET_SIZE));
Johannes Weiner8508cf32018-10-26 15:06:11 -0700260 iolat->lat_avg = calc_load(iolat->lat_avg,
261 iolatency_exp_factors[exp_idx],
262 stat->rqs.mean);
Josef Bacik1fa28402018-09-28 13:45:42 -0400263}
264
Josef Bacikd7067512018-07-03 11:15:01 -0400265static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
266 wait_queue_entry_t *wait,
267 bool first_block)
268{
269 struct rq_wait *rqw = &iolat->rq_wait;
270
271 if (first_block && waitqueue_active(&rqw->wait) &&
272 rqw->wait.head.next != &wait->entry)
273 return false;
274 return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
275}
276
277static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
278 struct iolatency_grp *iolat,
Christoph Hellwigd5337562018-11-14 17:02:09 +0100279 bool issue_as_root,
Josef Bacikd7067512018-07-03 11:15:01 -0400280 bool use_memdelay)
Josef Bacikd7067512018-07-03 11:15:01 -0400281{
282 struct rq_wait *rqw = &iolat->rq_wait;
283 unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
284 DEFINE_WAIT(wait);
285 bool first_block = true;
286
287 if (use_delay)
288 blkcg_schedule_throttle(rqos->q, use_memdelay);
289
290 /*
291 * To avoid priority inversions we want to just take a slot if we are
292 * issuing as root. If we're being killed off there's no point in
293 * delaying things, we may have been killed by OOM so throttling may
294 * make recovery take even longer, so just let the IO's through so the
295 * task can go away.
296 */
297 if (issue_as_root || fatal_signal_pending(current)) {
298 atomic_inc(&rqw->inflight);
299 return;
300 }
301
302 if (iolatency_may_queue(iolat, &wait, first_block))
303 return;
304
305 do {
306 prepare_to_wait_exclusive(&rqw->wait, &wait,
307 TASK_UNINTERRUPTIBLE);
308
309 if (iolatency_may_queue(iolat, &wait, first_block))
310 break;
311 first_block = false;
Christoph Hellwigd5337562018-11-14 17:02:09 +0100312 io_schedule();
Josef Bacikd7067512018-07-03 11:15:01 -0400313 } while (1);
314
315 finish_wait(&rqw->wait, &wait);
316}
317
318#define SCALE_DOWN_FACTOR 2
319#define SCALE_UP_FACTOR 4
320
321static inline unsigned long scale_amount(unsigned long qd, bool up)
322{
323 return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
324}
325
326/*
327 * We scale the qd down faster than we scale up, so we need to use this helper
328 * to adjust the scale_cookie accordingly so we don't prematurely get
329 * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
330 *
331 * Each group has their own local copy of the last scale cookie they saw, so if
332 * the global scale cookie goes up or down they know which way they need to go
333 * based on their last knowledge of it.
334 */
335static void scale_cookie_change(struct blk_iolatency *blkiolat,
336 struct child_latency_info *lat_info,
337 bool up)
338{
Josef Bacikff4cee02018-09-28 13:45:39 -0400339 unsigned long qd = blkiolat->rqos.q->nr_requests;
Josef Bacikd7067512018-07-03 11:15:01 -0400340 unsigned long scale = scale_amount(qd, up);
341 unsigned long old = atomic_read(&lat_info->scale_cookie);
342 unsigned long max_scale = qd << 1;
343 unsigned long diff = 0;
344
345 if (old < DEFAULT_SCALE_COOKIE)
346 diff = DEFAULT_SCALE_COOKIE - old;
347
348 if (up) {
349 if (scale + old > DEFAULT_SCALE_COOKIE)
350 atomic_set(&lat_info->scale_cookie,
351 DEFAULT_SCALE_COOKIE);
352 else if (diff > qd)
353 atomic_inc(&lat_info->scale_cookie);
354 else
355 atomic_add(scale, &lat_info->scale_cookie);
356 } else {
357 /*
358 * We don't want to dig a hole so deep that it takes us hours to
359 * dig out of it. Just enough that we don't throttle/unthrottle
360 * with jagged workloads but can still unthrottle once pressure
361 * has sufficiently dissipated.
362 */
363 if (diff > qd) {
364 if (diff < max_scale)
365 atomic_dec(&lat_info->scale_cookie);
366 } else {
367 atomic_sub(scale, &lat_info->scale_cookie);
368 }
369 }
370}
371
372/*
373 * Change the queue depth of the iolatency_grp. We add/subtract 1/16th of the
374 * queue depth at a time so we don't get wild swings and hopefully dial in to
375 * fairer distribution of the overall queue depth.
376 */
377static void scale_change(struct iolatency_grp *iolat, bool up)
378{
Josef Bacikff4cee02018-09-28 13:45:39 -0400379 unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
Josef Bacikd7067512018-07-03 11:15:01 -0400380 unsigned long scale = scale_amount(qd, up);
381 unsigned long old = iolat->rq_depth.max_depth;
Josef Bacikd7067512018-07-03 11:15:01 -0400382
383 if (old > qd)
384 old = qd;
385
386 if (up) {
387 if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
388 return;
389
390 if (old < qd) {
Josef Bacikd7067512018-07-03 11:15:01 -0400391 old += scale;
392 old = min(old, qd);
393 iolat->rq_depth.max_depth = old;
394 wake_up_all(&iolat->rq_wait.wait);
395 }
Josef Bacik9f605112018-09-28 13:45:40 -0400396 } else {
Josef Bacikd7067512018-07-03 11:15:01 -0400397 old >>= 1;
Josef Bacikd7067512018-07-03 11:15:01 -0400398 iolat->rq_depth.max_depth = max(old, 1UL);
399 }
400}
401
402/* Check our parent and see if the scale cookie has changed. */
403static void check_scale_change(struct iolatency_grp *iolat)
404{
405 struct iolatency_grp *parent;
406 struct child_latency_info *lat_info;
407 unsigned int cur_cookie;
408 unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
409 u64 scale_lat;
410 unsigned int old;
411 int direction = 0;
412
413 if (lat_to_blkg(iolat)->parent == NULL)
414 return;
415
416 parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
417 if (!parent)
418 return;
419
420 lat_info = &parent->child_lat;
421 cur_cookie = atomic_read(&lat_info->scale_cookie);
422 scale_lat = READ_ONCE(lat_info->scale_lat);
423
424 if (cur_cookie < our_cookie)
425 direction = -1;
426 else if (cur_cookie > our_cookie)
427 direction = 1;
428 else
429 return;
430
431 old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
432
433 /* Somebody beat us to the punch, just bail. */
434 if (old != our_cookie)
435 return;
436
437 if (direction < 0 && iolat->min_lat_nsec) {
438 u64 samples_thresh;
439
440 if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
441 return;
442
443 /*
444 * Sometimes high priority groups are their own worst enemy, so
445 * instead of taking it out on some poor other group that did 5%
446 * or less of the IO's for the last summation just skip this
447 * scale down event.
448 */
449 samples_thresh = lat_info->nr_samples * 5;
Josef Bacik22ed8a92018-09-28 13:45:41 -0400450 samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
Josef Bacikd7067512018-07-03 11:15:01 -0400451 if (iolat->nr_samples <= samples_thresh)
452 return;
453 }
454
455 /* We're as low as we can go. */
456 if (iolat->rq_depth.max_depth == 1 && direction < 0) {
457 blkcg_use_delay(lat_to_blkg(iolat));
458 return;
459 }
460
461 /* We're back to the default cookie, unthrottle all the things. */
462 if (cur_cookie == DEFAULT_SCALE_COOKIE) {
463 blkcg_clear_delay(lat_to_blkg(iolat));
Josef Bacika2843902018-07-11 10:34:42 -0400464 iolat->rq_depth.max_depth = UINT_MAX;
Josef Bacikd7067512018-07-03 11:15:01 -0400465 wake_up_all(&iolat->rq_wait.wait);
466 return;
467 }
468
469 scale_change(iolat, direction > 0);
470}
471
Christoph Hellwigd5337562018-11-14 17:02:09 +0100472static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
Josef Bacikd7067512018-07-03 11:15:01 -0400473{
474 struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
Dennis Zhou5cdf2e32018-12-05 12:10:31 -0500475 struct blkcg_gq *blkg = bio->bi_blkg;
Josef Bacikd7067512018-07-03 11:15:01 -0400476 bool issue_as_root = bio_issue_as_root_blkg(bio);
477
478 if (!blk_iolatency_enabled(blkiolat))
479 return;
480
Dennis Zhoub5f29542018-11-01 17:24:10 -0400481 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
Dennis Zhoubeea9da2018-12-05 12:10:28 -0500482
Josef Bacikd7067512018-07-03 11:15:01 -0400483 while (blkg && blkg->parent) {
484 struct iolatency_grp *iolat = blkg_to_lat(blkg);
485 if (!iolat) {
486 blkg = blkg->parent;
487 continue;
488 }
489
490 check_scale_change(iolat);
Christoph Hellwigd5337562018-11-14 17:02:09 +0100491 __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
Josef Bacikd7067512018-07-03 11:15:01 -0400492 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
493 blkg = blkg->parent;
494 }
495 if (!timer_pending(&blkiolat->timer))
496 mod_timer(&blkiolat->timer, jiffies + HZ);
497}
498
499static void iolatency_record_time(struct iolatency_grp *iolat,
500 struct bio_issue *issue, u64 now,
501 bool issue_as_root)
502{
Josef Bacikd7067512018-07-03 11:15:01 -0400503 u64 start = bio_issue_time(issue);
504 u64 req_time;
505
Josef Bacik71e96902018-07-16 12:12:23 -0400506 /*
507 * Have to do this so we are truncated to the correct time that our
508 * issue is truncated to.
509 */
510 now = __bio_issue_time(now);
511
Josef Bacikd7067512018-07-03 11:15:01 -0400512 if (now <= start)
513 return;
514
515 req_time = now - start;
516
517 /*
518 * We don't want to count issue_as_root bio's in the cgroups latency
519 * statistics as it could skew the numbers downwards.
520 */
Josef Bacika2843902018-07-11 10:34:42 -0400521 if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
Josef Bacikd7067512018-07-03 11:15:01 -0400522 u64 sub = iolat->min_lat_nsec;
523 if (req_time < sub)
524 blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
525 return;
526 }
527
Josef Bacik1fa28402018-09-28 13:45:42 -0400528 latency_stat_record_time(iolat, req_time);
Josef Bacikd7067512018-07-03 11:15:01 -0400529}
530
531#define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
532#define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
533
534static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
535{
536 struct blkcg_gq *blkg = lat_to_blkg(iolat);
537 struct iolatency_grp *parent;
538 struct child_latency_info *lat_info;
Josef Bacik1fa28402018-09-28 13:45:42 -0400539 struct latency_stat stat;
Josef Bacikd7067512018-07-03 11:15:01 -0400540 unsigned long flags;
Josef Bacik1fa28402018-09-28 13:45:42 -0400541 int cpu;
Josef Bacikd7067512018-07-03 11:15:01 -0400542
Josef Bacik1fa28402018-09-28 13:45:42 -0400543 latency_stat_init(iolat, &stat);
Josef Bacikd7067512018-07-03 11:15:01 -0400544 preempt_disable();
545 for_each_online_cpu(cpu) {
Josef Bacik1fa28402018-09-28 13:45:42 -0400546 struct latency_stat *s;
Josef Bacikd7067512018-07-03 11:15:01 -0400547 s = per_cpu_ptr(iolat->stats, cpu);
Josef Bacik1fa28402018-09-28 13:45:42 -0400548 latency_stat_sum(iolat, &stat, s);
549 latency_stat_init(iolat, s);
Josef Bacikd7067512018-07-03 11:15:01 -0400550 }
551 preempt_enable();
552
Josef Bacikd7067512018-07-03 11:15:01 -0400553 parent = blkg_to_lat(blkg->parent);
554 if (!parent)
555 return;
556
557 lat_info = &parent->child_lat;
558
Josef Bacik1fa28402018-09-28 13:45:42 -0400559 iolat_update_total_lat_avg(iolat, &stat);
Josef Bacikd7067512018-07-03 11:15:01 -0400560
561 /* Everything is ok and we don't need to adjust the scale. */
Josef Bacik1fa28402018-09-28 13:45:42 -0400562 if (latency_sum_ok(iolat, &stat) &&
Josef Bacikd7067512018-07-03 11:15:01 -0400563 atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
564 return;
565
566 /* Somebody beat us to the punch, just bail. */
567 spin_lock_irqsave(&lat_info->lock, flags);
Josef Bacik451bb7c2018-09-28 13:45:43 -0400568
569 latency_stat_sum(iolat, &iolat->cur_stat, &stat);
Josef Bacikd7067512018-07-03 11:15:01 -0400570 lat_info->nr_samples -= iolat->nr_samples;
Josef Bacik451bb7c2018-09-28 13:45:43 -0400571 lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
572 iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
Josef Bacikd7067512018-07-03 11:15:01 -0400573
574 if ((lat_info->last_scale_event >= now ||
Josef Bacik451bb7c2018-09-28 13:45:43 -0400575 now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
Josef Bacikd7067512018-07-03 11:15:01 -0400576 goto out;
577
Josef Bacik451bb7c2018-09-28 13:45:43 -0400578 if (latency_sum_ok(iolat, &iolat->cur_stat) &&
579 latency_sum_ok(iolat, &stat)) {
580 if (latency_stat_samples(iolat, &iolat->cur_stat) <
Josef Bacik1fa28402018-09-28 13:45:42 -0400581 BLKIOLATENCY_MIN_GOOD_SAMPLES)
582 goto out;
Josef Bacikd7067512018-07-03 11:15:01 -0400583 if (lat_info->scale_grp == iolat) {
584 lat_info->last_scale_event = now;
585 scale_cookie_change(iolat->blkiolat, lat_info, true);
586 }
Josef Bacik451bb7c2018-09-28 13:45:43 -0400587 } else if (lat_info->scale_lat == 0 ||
588 lat_info->scale_lat >= iolat->min_lat_nsec) {
Josef Bacikd7067512018-07-03 11:15:01 -0400589 lat_info->last_scale_event = now;
590 if (!lat_info->scale_grp ||
591 lat_info->scale_lat > iolat->min_lat_nsec) {
592 WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
593 lat_info->scale_grp = iolat;
594 }
595 scale_cookie_change(iolat->blkiolat, lat_info, false);
596 }
Josef Bacik451bb7c2018-09-28 13:45:43 -0400597 latency_stat_init(iolat, &iolat->cur_stat);
Josef Bacikd7067512018-07-03 11:15:01 -0400598out:
599 spin_unlock_irqrestore(&lat_info->lock, flags);
600}
601
602static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
603{
604 struct blkcg_gq *blkg;
605 struct rq_wait *rqw;
606 struct iolatency_grp *iolat;
607 u64 window_start;
608 u64 now = ktime_to_ns(ktime_get());
609 bool issue_as_root = bio_issue_as_root_blkg(bio);
610 bool enabled = false;
611
612 blkg = bio->bi_blkg;
613 if (!blkg)
614 return;
615
616 iolat = blkg_to_lat(bio->bi_blkg);
617 if (!iolat)
618 return;
619
620 enabled = blk_iolatency_enabled(iolat->blkiolat);
621 while (blkg && blkg->parent) {
622 iolat = blkg_to_lat(blkg);
623 if (!iolat) {
624 blkg = blkg->parent;
625 continue;
626 }
627 rqw = &iolat->rq_wait;
628
629 atomic_dec(&rqw->inflight);
630 if (!enabled || iolat->min_lat_nsec == 0)
631 goto next;
632 iolatency_record_time(iolat, &bio->bi_issue, now,
633 issue_as_root);
634 window_start = atomic64_read(&iolat->window_start);
635 if (now > window_start &&
636 (now - window_start) >= iolat->cur_win_nsec) {
637 if (atomic64_cmpxchg(&iolat->window_start,
638 window_start, now) == window_start)
639 iolatency_check_latencies(iolat, now);
640 }
641next:
642 wake_up(&rqw->wait);
643 blkg = blkg->parent;
644 }
645}
646
647static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
648{
649 struct blkcg_gq *blkg;
650
651 blkg = bio->bi_blkg;
652 while (blkg && blkg->parent) {
653 struct rq_wait *rqw;
654 struct iolatency_grp *iolat;
655
656 iolat = blkg_to_lat(blkg);
657 if (!iolat)
658 goto next;
659
660 rqw = &iolat->rq_wait;
661 atomic_dec(&rqw->inflight);
662 wake_up(&rqw->wait);
663next:
664 blkg = blkg->parent;
665 }
666}
667
668static void blkcg_iolatency_exit(struct rq_qos *rqos)
669{
670 struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
671
672 del_timer_sync(&blkiolat->timer);
673 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
674 kfree(blkiolat);
675}
676
677static struct rq_qos_ops blkcg_iolatency_ops = {
678 .throttle = blkcg_iolatency_throttle,
679 .cleanup = blkcg_iolatency_cleanup,
680 .done_bio = blkcg_iolatency_done_bio,
681 .exit = blkcg_iolatency_exit,
682};
683
684static void blkiolatency_timer_fn(struct timer_list *t)
685{
686 struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
687 struct blkcg_gq *blkg;
688 struct cgroup_subsys_state *pos_css;
689 u64 now = ktime_to_ns(ktime_get());
690
691 rcu_read_lock();
692 blkg_for_each_descendant_pre(blkg, pos_css,
693 blkiolat->rqos.q->root_blkg) {
694 struct iolatency_grp *iolat;
695 struct child_latency_info *lat_info;
696 unsigned long flags;
697 u64 cookie;
698
699 /*
700 * We could be exiting, don't access the pd unless we have a
701 * ref on the blkg.
702 */
Dennis Zhoub5f29542018-11-01 17:24:10 -0400703 if (!blkg_try_get(blkg))
Josef Bacikd7067512018-07-03 11:15:01 -0400704 continue;
705
706 iolat = blkg_to_lat(blkg);
707 if (!iolat)
Josef Bacik52a11992018-07-31 12:39:02 -0400708 goto next;
Josef Bacikd7067512018-07-03 11:15:01 -0400709
710 lat_info = &iolat->child_lat;
711 cookie = atomic_read(&lat_info->scale_cookie);
712
713 if (cookie >= DEFAULT_SCALE_COOKIE)
714 goto next;
715
716 spin_lock_irqsave(&lat_info->lock, flags);
717 if (lat_info->last_scale_event >= now)
718 goto next_lock;
719
720 /*
721 * We scaled down but don't have a scale_grp, scale up and carry
722 * on.
723 */
724 if (lat_info->scale_grp == NULL) {
725 scale_cookie_change(iolat->blkiolat, lat_info, true);
726 goto next_lock;
727 }
728
729 /*
730 * It's been 5 seconds since our last scale event, clear the
731 * scale grp in case the group that needed the scale down isn't
732 * doing any IO currently.
733 */
734 if (now - lat_info->last_scale_event >=
735 ((u64)NSEC_PER_SEC * 5))
736 lat_info->scale_grp = NULL;
737next_lock:
738 spin_unlock_irqrestore(&lat_info->lock, flags);
739next:
740 blkg_put(blkg);
741 }
742 rcu_read_unlock();
743}
744
745int blk_iolatency_init(struct request_queue *q)
746{
747 struct blk_iolatency *blkiolat;
748 struct rq_qos *rqos;
749 int ret;
750
751 blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
752 if (!blkiolat)
753 return -ENOMEM;
754
755 rqos = &blkiolat->rqos;
756 rqos->id = RQ_QOS_CGROUP;
757 rqos->ops = &blkcg_iolatency_ops;
758 rqos->q = q;
759
760 rq_qos_add(q, rqos);
761
762 ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
763 if (ret) {
764 rq_qos_del(q, rqos);
765 kfree(blkiolat);
766 return ret;
767 }
768
769 timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
770
771 return 0;
772}
773
774static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
775{
776 struct iolatency_grp *iolat = blkg_to_lat(blkg);
777 struct blk_iolatency *blkiolat = iolat->blkiolat;
778 u64 oldval = iolat->min_lat_nsec;
779
780 iolat->min_lat_nsec = val;
Dennis Zhou (Facebook)c480bcf2018-08-01 23:15:41 -0700781 iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
782 iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
783 BLKIOLATENCY_MAX_WIN_SIZE);
Josef Bacikd7067512018-07-03 11:15:01 -0400784
785 if (!oldval && val)
786 atomic_inc(&blkiolat->enabled);
787 if (oldval && !val)
788 atomic_dec(&blkiolat->enabled);
789}
790
791static void iolatency_clear_scaling(struct blkcg_gq *blkg)
792{
793 if (blkg->parent) {
794 struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
795 struct child_latency_info *lat_info;
796 if (!iolat)
797 return;
798
799 lat_info = &iolat->child_lat;
800 spin_lock(&lat_info->lock);
801 atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
802 lat_info->last_scale_event = 0;
803 lat_info->scale_grp = NULL;
804 lat_info->scale_lat = 0;
805 spin_unlock(&lat_info->lock);
806 }
807}
808
809static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
810 size_t nbytes, loff_t off)
811{
812 struct blkcg *blkcg = css_to_blkcg(of_css(of));
813 struct blkcg_gq *blkg;
Josef Bacikd7067512018-07-03 11:15:01 -0400814 struct blkg_conf_ctx ctx;
815 struct iolatency_grp *iolat;
816 char *p, *tok;
817 u64 lat_val = 0;
818 u64 oldval;
819 int ret;
820
821 ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
822 if (ret)
823 return ret;
824
825 iolat = blkg_to_lat(ctx.blkg);
Josef Bacikd7067512018-07-03 11:15:01 -0400826 p = ctx.body;
827
828 ret = -EINVAL;
829 while ((tok = strsep(&p, " "))) {
830 char key[16];
831 char val[21]; /* 18446744073709551616 */
832
833 if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
834 goto out;
835
836 if (!strcmp(key, "target")) {
837 u64 v;
838
839 if (!strcmp(val, "max"))
840 lat_val = 0;
841 else if (sscanf(val, "%llu", &v) == 1)
842 lat_val = v * NSEC_PER_USEC;
843 else
844 goto out;
845 } else {
846 goto out;
847 }
848 }
849
850 /* Walk up the tree to see if our new val is lower than it should be. */
851 blkg = ctx.blkg;
852 oldval = iolat->min_lat_nsec;
853
854 iolatency_set_min_lat_nsec(blkg, lat_val);
855 if (oldval != iolat->min_lat_nsec) {
856 iolatency_clear_scaling(blkg);
857 }
858
859 ret = 0;
860out:
861 blkg_conf_finish(&ctx);
862 return ret ?: nbytes;
863}
864
865static u64 iolatency_prfill_limit(struct seq_file *sf,
866 struct blkg_policy_data *pd, int off)
867{
868 struct iolatency_grp *iolat = pd_to_lat(pd);
869 const char *dname = blkg_dev_name(pd->blkg);
870
871 if (!dname || !iolat->min_lat_nsec)
872 return 0;
873 seq_printf(sf, "%s target=%llu\n",
Arnd Bergmann88b72102018-07-10 17:21:34 +0200874 dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
Josef Bacikd7067512018-07-03 11:15:01 -0400875 return 0;
876}
877
878static int iolatency_print_limit(struct seq_file *sf, void *v)
879{
880 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
881 iolatency_prfill_limit,
882 &blkcg_policy_iolatency, seq_cft(sf)->private, false);
883 return 0;
884}
885
Josef Bacik1fa28402018-09-28 13:45:42 -0400886static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
887 size_t size)
888{
889 struct latency_stat stat;
890 int cpu;
891
892 latency_stat_init(iolat, &stat);
893 preempt_disable();
894 for_each_online_cpu(cpu) {
895 struct latency_stat *s;
896 s = per_cpu_ptr(iolat->stats, cpu);
897 latency_stat_sum(iolat, &stat, s);
898 }
899 preempt_enable();
900
901 if (iolat->rq_depth.max_depth == UINT_MAX)
902 return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
903 (unsigned long long)stat.ps.missed,
904 (unsigned long long)stat.ps.total);
905 return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
906 (unsigned long long)stat.ps.missed,
907 (unsigned long long)stat.ps.total,
908 iolat->rq_depth.max_depth);
909}
910
Josef Bacikd7067512018-07-03 11:15:01 -0400911static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
912 size_t size)
913{
914 struct iolatency_grp *iolat = pd_to_lat(pd);
Josef Bacik1fa28402018-09-28 13:45:42 -0400915 unsigned long long avg_lat;
916 unsigned long long cur_win;
Josef Bacikd7067512018-07-03 11:15:01 -0400917
Josef Bacik1fa28402018-09-28 13:45:42 -0400918 if (iolat->ssd)
919 return iolatency_ssd_stat(iolat, buf, size);
920
921 avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
922 cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
Josef Bacika2843902018-07-11 10:34:42 -0400923 if (iolat->rq_depth.max_depth == UINT_MAX)
Dennis Zhou (Facebook)c480bcf2018-08-01 23:15:41 -0700924 return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
925 avg_lat, cur_win);
Josef Bacikd7067512018-07-03 11:15:01 -0400926
Dennis Zhou (Facebook)c480bcf2018-08-01 23:15:41 -0700927 return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
928 iolat->rq_depth.max_depth, avg_lat, cur_win);
Josef Bacikd7067512018-07-03 11:15:01 -0400929}
930
931
932static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
933{
934 struct iolatency_grp *iolat;
935
936 iolat = kzalloc_node(sizeof(*iolat), gfp, node);
937 if (!iolat)
938 return NULL;
Josef Bacik1fa28402018-09-28 13:45:42 -0400939 iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
940 __alignof__(struct latency_stat), gfp);
Josef Bacikd7067512018-07-03 11:15:01 -0400941 if (!iolat->stats) {
942 kfree(iolat);
943 return NULL;
944 }
945 return &iolat->pd;
946}
947
948static void iolatency_pd_init(struct blkg_policy_data *pd)
949{
950 struct iolatency_grp *iolat = pd_to_lat(pd);
951 struct blkcg_gq *blkg = lat_to_blkg(iolat);
952 struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
953 struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
954 u64 now = ktime_to_ns(ktime_get());
955 int cpu;
956
Josef Bacik1fa28402018-09-28 13:45:42 -0400957 if (blk_queue_nonrot(blkg->q))
958 iolat->ssd = true;
959 else
960 iolat->ssd = false;
961
Josef Bacikd7067512018-07-03 11:15:01 -0400962 for_each_possible_cpu(cpu) {
Josef Bacik1fa28402018-09-28 13:45:42 -0400963 struct latency_stat *stat;
Josef Bacikd7067512018-07-03 11:15:01 -0400964 stat = per_cpu_ptr(iolat->stats, cpu);
Josef Bacik1fa28402018-09-28 13:45:42 -0400965 latency_stat_init(iolat, stat);
Josef Bacikd7067512018-07-03 11:15:01 -0400966 }
967
Josef Bacik451bb7c2018-09-28 13:45:43 -0400968 latency_stat_init(iolat, &iolat->cur_stat);
Josef Bacikd7067512018-07-03 11:15:01 -0400969 rq_wait_init(&iolat->rq_wait);
970 spin_lock_init(&iolat->child_lat.lock);
Josef Bacikff4cee02018-09-28 13:45:39 -0400971 iolat->rq_depth.queue_depth = blkg->q->nr_requests;
Josef Bacika2843902018-07-11 10:34:42 -0400972 iolat->rq_depth.max_depth = UINT_MAX;
Josef Bacikd7067512018-07-03 11:15:01 -0400973 iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
974 iolat->blkiolat = blkiolat;
975 iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
976 atomic64_set(&iolat->window_start, now);
977
978 /*
979 * We init things in list order, so the pd for the parent may not be
980 * init'ed yet for whatever reason.
981 */
982 if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
983 struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
984 atomic_set(&iolat->scale_cookie,
985 atomic_read(&parent->child_lat.scale_cookie));
986 } else {
987 atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
988 }
989
990 atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
991}
992
993static void iolatency_pd_offline(struct blkg_policy_data *pd)
994{
995 struct iolatency_grp *iolat = pd_to_lat(pd);
996 struct blkcg_gq *blkg = lat_to_blkg(iolat);
997
998 iolatency_set_min_lat_nsec(blkg, 0);
999 iolatency_clear_scaling(blkg);
1000}
1001
1002static void iolatency_pd_free(struct blkg_policy_data *pd)
1003{
1004 struct iolatency_grp *iolat = pd_to_lat(pd);
1005 free_percpu(iolat->stats);
1006 kfree(iolat);
1007}
1008
1009static struct cftype iolatency_files[] = {
1010 {
1011 .name = "latency",
1012 .flags = CFTYPE_NOT_ON_ROOT,
1013 .seq_show = iolatency_print_limit,
1014 .write = iolatency_set_limit,
1015 },
1016 {}
1017};
1018
1019static struct blkcg_policy blkcg_policy_iolatency = {
1020 .dfl_cftypes = iolatency_files,
1021 .pd_alloc_fn = iolatency_pd_alloc,
1022 .pd_init_fn = iolatency_pd_init,
1023 .pd_offline_fn = iolatency_pd_offline,
1024 .pd_free_fn = iolatency_pd_free,
1025 .pd_stat_fn = iolatency_pd_stat,
1026};
1027
1028static int __init iolatency_init(void)
1029{
1030 return blkcg_policy_register(&blkcg_policy_iolatency);
1031}
1032
1033static void __exit iolatency_exit(void)
1034{
1035 return blkcg_policy_unregister(&blkcg_policy_iolatency);
1036}
1037
1038module_init(iolatency_init);
1039module_exit(iolatency_exit);