blob: b7e9c7feeab2acbd1a846d0c31285460aba076ec [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
Jens Axboe0fe23472006-09-04 15:41:16 +02007 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Ingo Molnare6017572017-02-01 16:36:40 +010011#include <linux/sched/clock.h>
Al Viro1cc9be62006-03-18 12:29:52 -050012#include <linux/blkdev.h>
13#include <linux/elevator.h>
Jeff Moyer9a7f38c2016-06-08 08:55:34 -060014#include <linux/ktime.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/rbtree.h>
Jens Axboe22e2c502005-06-27 10:55:12 +020016#include <linux/ioprio.h>
Jens Axboe7b679132008-05-30 12:23:07 +020017#include <linux/blktrace_api.h>
Tejun Heoeea8f412015-05-22 17:13:17 -040018#include <linux/blk-cgroup.h>
Tejun Heo6e736be2011-12-14 00:33:38 +010019#include "blk.h"
Jens Axboe87760e52016-11-09 12:38:14 -070020#include "blk-wbt.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22/*
23 * tunables
24 */
Jens Axboefe094d92008-01-31 13:08:54 +010025/* max queue in one round of service */
Shaohua Liabc3c742010-03-01 09:20:54 +010026static const int cfq_quantum = 8;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -060027static const u64 cfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
Jens Axboefe094d92008-01-31 13:08:54 +010028/* maximum backwards seek, in KiB */
29static const int cfq_back_max = 16 * 1024;
30/* penalty of a backwards seek */
31static const int cfq_back_penalty = 2;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -060032static const u64 cfq_slice_sync = NSEC_PER_SEC / 10;
33static u64 cfq_slice_async = NSEC_PER_SEC / 25;
Arjan van de Ven64100092006-01-06 09:46:02 +010034static const int cfq_slice_async_rq = 2;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -060035static u64 cfq_slice_idle = NSEC_PER_SEC / 125;
36static u64 cfq_group_idle = NSEC_PER_SEC / 125;
37static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
Corrado Zoccolo5db5d642009-10-26 22:44:04 +010038static const int cfq_hist_divisor = 4;
Jens Axboe22e2c502005-06-27 10:55:12 +020039
Jens Axboed9e76202007-04-20 14:27:50 +020040/*
Hou Tao5be6b752017-03-01 09:02:33 +080041 * offset from end of queue service tree for idle class
Jens Axboed9e76202007-04-20 14:27:50 +020042 */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -060043#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
Hou Tao5be6b752017-03-01 09:02:33 +080044/* offset from end of group service tree under time slice mode */
45#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
46/* offset from end of group service under IOPS mode */
47#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
Jens Axboed9e76202007-04-20 14:27:50 +020048
49/*
50 * below this threshold, we consider thinktime immediate
51 */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -060052#define CFQ_MIN_TT (2 * NSEC_PER_SEC / HZ)
Jens Axboed9e76202007-04-20 14:27:50 +020053
Jens Axboe22e2c502005-06-27 10:55:12 +020054#define CFQ_SLICE_SCALE (5)
Aaron Carroll45333d52008-08-26 15:52:36 +020055#define CFQ_HW_QUEUE_MIN (5)
Vivek Goyal25bc6b02009-12-03 12:59:43 -050056#define CFQ_SERVICE_SHIFT 12
Jens Axboe22e2c502005-06-27 10:55:12 +020057
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +010058#define CFQQ_SEEK_THR (sector_t)(8 * 100)
Shaohua Lie9ce3352010-03-19 08:03:04 +010059#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
Corrado Zoccolo41647e72010-02-27 19:45:40 +010060#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +010061#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
Shaohua Liae54abe2010-02-05 13:11:45 +010062
Tejun Heoa612fdd2011-12-14 00:33:41 +010063#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
64#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
65#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Christoph Lametere18b8902006-12-06 20:33:20 -080067static struct kmem_cache *cfq_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
Jens Axboe22e2c502005-06-27 10:55:12 +020069#define CFQ_PRIO_LISTS IOPRIO_BE_NR
70#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
Jens Axboe22e2c502005-06-27 10:55:12 +020071#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
72
Jens Axboe206dc692006-03-28 13:03:44 +020073#define sample_valid(samples) ((samples) > 80)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -050074#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
Jens Axboe206dc692006-03-28 13:03:44 +020075
Arianna Avanzinie48453c2015-06-05 23:38:42 +020076/* blkio-related constants */
Tejun Heo3ecca622015-08-18 14:55:35 -070077#define CFQ_WEIGHT_LEGACY_MIN 10
78#define CFQ_WEIGHT_LEGACY_DFL 500
79#define CFQ_WEIGHT_LEGACY_MAX 1000
Arianna Avanzinie48453c2015-06-05 23:38:42 +020080
Tejun Heoc5869802011-12-14 00:33:41 +010081struct cfq_ttime {
Jeff Moyer9a7f38c2016-06-08 08:55:34 -060082 u64 last_end_request;
Tejun Heoc5869802011-12-14 00:33:41 +010083
Jeff Moyer9a7f38c2016-06-08 08:55:34 -060084 u64 ttime_total;
85 u64 ttime_mean;
Tejun Heoc5869802011-12-14 00:33:41 +010086 unsigned long ttime_samples;
Tejun Heoc5869802011-12-14 00:33:41 +010087};
88
Jens Axboe22e2c502005-06-27 10:55:12 +020089/*
Jens Axboecc09e292007-04-26 12:53:50 +020090 * Most of our rbtree usage is for sorting with min extraction, so
91 * if we cache the leftmost node we don't have to walk down the tree
92 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
93 * move this into the elevator for the rq sorting as well.
94 */
95struct cfq_rb_root {
96 struct rb_root rb;
97 struct rb_node *left;
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +010098 unsigned count;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -050099 u64 min_vdisktime;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +0200100 struct cfq_ttime ttime;
Jens Axboecc09e292007-04-26 12:53:50 +0200101};
Shaohua Lif5f2b6c2011-07-12 14:24:55 +0200102#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600103 .ttime = {.last_end_request = ktime_get_ns(),},}
Jens Axboecc09e292007-04-26 12:53:50 +0200104
105/*
Jens Axboe6118b702009-06-30 09:34:12 +0200106 * Per process-grouping structure
107 */
108struct cfq_queue {
109 /* reference count */
Shaohua Li30d7b942011-01-07 08:46:59 +0100110 int ref;
Jens Axboe6118b702009-06-30 09:34:12 +0200111 /* various state flags, see below */
112 unsigned int flags;
113 /* parent cfq_data */
114 struct cfq_data *cfqd;
115 /* service_tree member */
116 struct rb_node rb_node;
117 /* service_tree key */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600118 u64 rb_key;
Jens Axboe6118b702009-06-30 09:34:12 +0200119 /* prio tree member */
120 struct rb_node p_node;
121 /* prio tree root we belong to, if any */
122 struct rb_root *p_root;
123 /* sorted list of pending requests */
124 struct rb_root sort_list;
125 /* if fifo isn't expired, next request to serve */
126 struct request *next_rq;
127 /* requests queued in sort_list */
128 int queued[2];
129 /* currently allocated requests */
130 int allocated[2];
131 /* fifo list of requests in sort_list */
132 struct list_head fifo;
133
Vivek Goyaldae739e2009-12-03 12:59:45 -0500134 /* time when queue got scheduled in to dispatch first request. */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600135 u64 dispatch_start;
136 u64 allocated_slice;
137 u64 slice_dispatch;
Vivek Goyaldae739e2009-12-03 12:59:45 -0500138 /* time when first request from queue completed and slice started. */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600139 u64 slice_start;
140 u64 slice_end;
Jan Kara93fdf142016-06-28 09:04:00 +0200141 s64 slice_resid;
Jens Axboe6118b702009-06-30 09:34:12 +0200142
Christoph Hellwig65299a32011-08-23 14:50:29 +0200143 /* pending priority requests */
144 int prio_pending;
Jens Axboe6118b702009-06-30 09:34:12 +0200145 /* number of requests that are on the dispatch list or inside driver */
146 int dispatched;
147
148 /* io prio of this group */
149 unsigned short ioprio, org_ioprio;
Jens Axboeb8269db2016-06-09 15:47:29 -0600150 unsigned short ioprio_class, org_ioprio_class;
Jens Axboe6118b702009-06-30 09:34:12 +0200151
Richard Kennedyc4081ba2010-02-22 13:49:24 +0100152 pid_t pid;
153
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +0100154 u32 seek_history;
Jeff Moyerb2c18e12009-10-23 17:14:49 -0400155 sector_t last_request_pos;
156
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +0100157 struct cfq_rb_root *service_tree;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -0400158 struct cfq_queue *new_cfqq;
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500159 struct cfq_group *cfqg;
Vivek Goyalc4e78932010-08-23 12:25:03 +0200160 /* Number of sectors dispatched from queue in single dispatch round */
161 unsigned long nr_sectors;
Jens Axboe6118b702009-06-30 09:34:12 +0200162};
163
164/*
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100165 * First index in the service_trees.
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100166 * IDLE is handled separately, so it has negative index
167 */
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400168enum wl_class_t {
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100169 BE_WORKLOAD = 0,
Vivek Goyal615f0252009-12-03 12:59:39 -0500170 RT_WORKLOAD = 1,
171 IDLE_WORKLOAD = 2,
Vivek Goyalb4627322010-10-22 09:48:43 +0200172 CFQ_PRIO_NR,
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100173};
174
175/*
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100176 * Second index in the service_trees.
177 */
178enum wl_type_t {
179 ASYNC_WORKLOAD = 0,
180 SYNC_NOIDLE_WORKLOAD = 1,
181 SYNC_WORKLOAD = 2
182};
183
Tejun Heo155fead2012-04-01 14:38:44 -0700184struct cfqg_stats {
185#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo155fead2012-04-01 14:38:44 -0700186 /* number of ios merged */
187 struct blkg_rwstat merged;
188 /* total time spent on device in ns, may not be accurate w/ queueing */
189 struct blkg_rwstat service_time;
190 /* total time spent waiting in scheduler queue in ns */
191 struct blkg_rwstat wait_time;
192 /* number of IOs queued up */
193 struct blkg_rwstat queued;
Tejun Heo155fead2012-04-01 14:38:44 -0700194 /* total disk time and nr sectors dispatched by this group */
195 struct blkg_stat time;
196#ifdef CONFIG_DEBUG_BLK_CGROUP
197 /* time not charged to this cgroup */
198 struct blkg_stat unaccounted_time;
199 /* sum of number of ios queued across all samples */
200 struct blkg_stat avg_queue_size_sum;
201 /* count of samples taken for average */
202 struct blkg_stat avg_queue_size_samples;
203 /* how many times this group has been removed from service tree */
204 struct blkg_stat dequeue;
205 /* total time spent waiting for it to be assigned a timeslice. */
206 struct blkg_stat group_wait_time;
Tejun Heo3c798392012-04-16 13:57:25 -0700207 /* time spent idling for this blkcg_gq */
Tejun Heo155fead2012-04-01 14:38:44 -0700208 struct blkg_stat idle_time;
209 /* total time with empty current active q with other requests queued */
210 struct blkg_stat empty_time;
211 /* fields after this shouldn't be cleared on stat reset */
212 uint64_t start_group_wait_time;
213 uint64_t start_idle_time;
214 uint64_t start_empty_time;
215 uint16_t flags;
216#endif /* CONFIG_DEBUG_BLK_CGROUP */
217#endif /* CONFIG_CFQ_GROUP_IOSCHED */
218};
219
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200220/* Per-cgroup data */
221struct cfq_group_data {
222 /* must be the first member */
Tejun Heo81437642015-08-18 14:55:15 -0700223 struct blkcg_policy_data cpd;
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200224
225 unsigned int weight;
226 unsigned int leaf_weight;
227};
228
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500229/* This is per cgroup per device grouping structure */
230struct cfq_group {
Tejun Heof95a04a2012-04-16 13:57:26 -0700231 /* must be the first member */
232 struct blkg_policy_data pd;
233
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500234 /* group service_tree member */
235 struct rb_node rb_node;
236
237 /* group service_tree key */
238 u64 vdisktime;
Tejun Heoe71357e2013-01-09 08:05:10 -0800239
240 /*
Tejun Heo7918ffb2013-01-09 08:05:11 -0800241 * The number of active cfqgs and sum of their weights under this
242 * cfqg. This covers this cfqg's leaf_weight and all children's
243 * weights, but does not cover weights of further descendants.
244 *
245 * If a cfqg is on the service tree, it's active. An active cfqg
246 * also activates its parent and contributes to the children_weight
247 * of the parent.
248 */
249 int nr_active;
250 unsigned int children_weight;
251
252 /*
Tejun Heo1d3650f2013-01-09 08:05:11 -0800253 * vfraction is the fraction of vdisktime that the tasks in this
254 * cfqg are entitled to. This is determined by compounding the
255 * ratios walking up from this cfqg to the root.
256 *
257 * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
258 * vfractions on a service tree is approximately 1. The sum may
259 * deviate a bit due to rounding errors and fluctuations caused by
260 * cfqgs entering and leaving the service tree.
261 */
262 unsigned int vfraction;
263
264 /*
Tejun Heoe71357e2013-01-09 08:05:10 -0800265 * There are two weights - (internal) weight is the weight of this
266 * cfqg against the sibling cfqgs. leaf_weight is the wight of
267 * this cfqg against the child cfqgs. For the root cfqg, both
268 * weights are kept in sync for backward compatibility.
269 */
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500270 unsigned int weight;
Justin TerAvest8184f932011-03-17 16:12:36 +0100271 unsigned int new_weight;
Tejun Heo3381cb82012-04-01 14:38:44 -0700272 unsigned int dev_weight;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500273
Tejun Heoe71357e2013-01-09 08:05:10 -0800274 unsigned int leaf_weight;
275 unsigned int new_leaf_weight;
276 unsigned int dev_leaf_weight;
277
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500278 /* number of cfqq currently on this group */
279 int nr_cfqq;
280
Jens Axboe22e2c502005-06-27 10:55:12 +0200281 /*
Kyungmin Park4495a7d2011-05-31 10:04:09 +0200282 * Per group busy queues average. Useful for workload slice calc. We
Vivek Goyalb4627322010-10-22 09:48:43 +0200283 * create the array for each prio class but at run time it is used
284 * only for RT and BE class and slot for IDLE class remains unused.
285 * This is primarily done to avoid confusion and a gcc warning.
286 */
287 unsigned int busy_queues_avg[CFQ_PRIO_NR];
288 /*
289 * rr lists of queues with requests. We maintain service trees for
290 * RT and BE classes. These trees are subdivided in subclasses
291 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
292 * class there is no subclassification and all the cfq queues go on
293 * a single tree service_tree_idle.
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100294 * Counts are embedded in the cfq_rb_root
Jens Axboe22e2c502005-06-27 10:55:12 +0200295 */
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100296 struct cfq_rb_root service_trees[2][3];
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100297 struct cfq_rb_root service_tree_idle;
Vivek Goyaldae739e2009-12-03 12:59:45 -0500298
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600299 u64 saved_wl_slice;
Vivek Goyal4d2ceea2012-10-03 16:56:57 -0400300 enum wl_type_t saved_wl_type;
301 enum wl_class_t saved_wl_class;
Tejun Heo4eef3042012-03-05 13:15:18 -0800302
Vivek Goyal80bdf0c2010-08-23 12:24:26 +0200303 /* number of requests that are on the dispatch list or inside driver */
304 int dispatched;
Shaohua Li7700fc42011-07-12 14:24:56 +0200305 struct cfq_ttime ttime;
Tejun Heo0b399202013-01-09 08:05:13 -0800306 struct cfqg_stats stats; /* stats for this cfqg */
Tejun Heo60a83702015-08-18 14:55:05 -0700307
308 /* async queue for each priority case */
309 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
310 struct cfq_queue *async_idle_cfqq;
311
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500312};
313
Tejun Heoc5869802011-12-14 00:33:41 +0100314struct cfq_io_cq {
315 struct io_cq icq; /* must be the first member */
316 struct cfq_queue *cfqq[2];
317 struct cfq_ttime ttime;
Tejun Heo598971b2012-03-19 15:10:58 -0700318 int ioprio; /* the current ioprio */
319#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heof4da8072014-09-08 08:15:20 +0900320 uint64_t blkcg_serial_nr; /* the current blkcg serial */
Tejun Heo598971b2012-03-19 15:10:58 -0700321#endif
Tejun Heoc5869802011-12-14 00:33:41 +0100322};
323
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500324/*
325 * Per block device queue structure
326 */
327struct cfq_data {
328 struct request_queue *queue;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500329 /* Root service tree for cfq_groups */
330 struct cfq_rb_root grp_service_tree;
Tejun Heof51b8022012-03-05 13:15:05 -0800331 struct cfq_group *root_group;
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500332
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100333 /*
334 * The priority currently being served
335 */
Vivek Goyal4d2ceea2012-10-03 16:56:57 -0400336 enum wl_class_t serving_wl_class;
337 enum wl_type_t serving_wl_type;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600338 u64 workload_expires;
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500339 struct cfq_group *serving_group;
Jens Axboea36e71f2009-04-15 12:15:11 +0200340
341 /*
342 * Each priority tree is sorted by next_request position. These
343 * trees are used when determining if two or more queues are
344 * interleaving requests (see cfq_close_cooperator).
345 */
346 struct rb_root prio_trees[CFQ_PRIO_LISTS];
347
Jens Axboe22e2c502005-06-27 10:55:12 +0200348 unsigned int busy_queues;
Shaohua Lief8a41d2011-03-07 09:26:29 +0100349 unsigned int busy_sync_queues;
Jens Axboe22e2c502005-06-27 10:55:12 +0200350
Corrado Zoccolo53c583d2010-02-28 19:45:05 +0100351 int rq_in_driver;
352 int rq_in_flight[2];
Aaron Carroll45333d52008-08-26 15:52:36 +0200353
354 /*
355 * queue-depth detection
356 */
357 int rq_queued;
Jens Axboe25776e32006-06-01 10:12:26 +0200358 int hw_tag;
Corrado Zoccoloe459dd02009-11-26 10:02:57 +0100359 /*
360 * hw_tag can be
361 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
362 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
363 * 0 => no NCQ
364 */
365 int hw_tag_est_depth;
366 unsigned int hw_tag_samples;
Jens Axboe22e2c502005-06-27 10:55:12 +0200367
368 /*
Jens Axboe22e2c502005-06-27 10:55:12 +0200369 * idle window management
370 */
Jan Kara91148322016-06-08 15:11:39 +0200371 struct hrtimer idle_slice_timer;
Jens Axboe23e018a2009-10-05 08:52:35 +0200372 struct work_struct unplug_work;
Jens Axboe22e2c502005-06-27 10:55:12 +0200373
374 struct cfq_queue *active_queue;
Tejun Heoc5869802011-12-14 00:33:41 +0100375 struct cfq_io_cq *active_cic;
Jens Axboe22e2c502005-06-27 10:55:12 +0200376
Jens Axboe6d048f52007-04-25 12:44:27 +0200377 sector_t last_position;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 /*
380 * tunables, see top of file
381 */
382 unsigned int cfq_quantum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 unsigned int cfq_back_penalty;
384 unsigned int cfq_back_max;
Jens Axboe22e2c502005-06-27 10:55:12 +0200385 unsigned int cfq_slice_async_rq;
Jens Axboe963b72f2009-10-03 19:42:18 +0200386 unsigned int cfq_latency;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600387 u64 cfq_fifo_expire[2];
388 u64 cfq_slice[2];
389 u64 cfq_slice_idle;
390 u64 cfq_group_idle;
391 u64 cfq_target_latency;
Al Virod9ff4182006-03-18 13:51:22 -0500392
Jens Axboe6118b702009-06-30 09:34:12 +0200393 /*
394 * Fallback dummy cfqq for extreme OOM conditions
395 */
396 struct cfq_queue oom_cfqq;
Vivek Goyal365722b2009-10-03 15:21:27 +0200397
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600398 u64 last_delayed_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399};
400
Vivek Goyal25fb5162009-12-03 12:59:46 -0500401static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
Tejun Heo60a83702015-08-18 14:55:05 -0700402static void cfq_put_queue(struct cfq_queue *cfqq);
Vivek Goyal25fb5162009-12-03 12:59:46 -0500403
Vivek Goyal34b98d02012-10-03 16:56:58 -0400404static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400405 enum wl_class_t class,
Vivek Goyal65b32a52009-12-16 17:52:59 -0500406 enum wl_type_t type)
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100407{
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500408 if (!cfqg)
409 return NULL;
410
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400411 if (class == IDLE_WORKLOAD)
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500412 return &cfqg->service_tree_idle;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100413
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400414 return &cfqg->service_trees[class][type];
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100415}
416
Jens Axboe3b181522005-06-27 10:56:24 +0200417enum cfqq_state_flags {
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100418 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
419 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
Jens Axboeb0291952009-04-07 11:38:31 +0200420 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100421 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100422 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
423 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
424 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
Jens Axboe44f7c162007-01-19 11:51:58 +1100425 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
Vasily Tarasov91fac312007-04-25 12:29:51 +0200426 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
Jeff Moyerb3b6d042009-10-23 17:14:51 -0400427 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
Shaohua Liae54abe2010-02-05 13:11:45 +0100428 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
Corrado Zoccolo76280af2009-11-26 10:02:58 +0100429 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
Vivek Goyalf75edf22009-12-03 12:59:53 -0500430 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
Jens Axboe3b181522005-06-27 10:56:24 +0200431};
432
433#define CFQ_CFQQ_FNS(name) \
434static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
435{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100436 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
Jens Axboe3b181522005-06-27 10:56:24 +0200437} \
438static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
439{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100440 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
Jens Axboe3b181522005-06-27 10:56:24 +0200441} \
442static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
443{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100444 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
Jens Axboe3b181522005-06-27 10:56:24 +0200445}
446
447CFQ_CFQQ_FNS(on_rr);
448CFQ_CFQQ_FNS(wait_request);
Jens Axboeb0291952009-04-07 11:38:31 +0200449CFQ_CFQQ_FNS(must_dispatch);
Jens Axboe3b181522005-06-27 10:56:24 +0200450CFQ_CFQQ_FNS(must_alloc_slice);
Jens Axboe3b181522005-06-27 10:56:24 +0200451CFQ_CFQQ_FNS(fifo_expire);
452CFQ_CFQQ_FNS(idle_window);
453CFQ_CFQQ_FNS(prio_changed);
Jens Axboe44f7c162007-01-19 11:51:58 +1100454CFQ_CFQQ_FNS(slice_new);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200455CFQ_CFQQ_FNS(sync);
Jens Axboea36e71f2009-04-15 12:15:11 +0200456CFQ_CFQQ_FNS(coop);
Shaohua Liae54abe2010-02-05 13:11:45 +0100457CFQ_CFQQ_FNS(split_coop);
Corrado Zoccolo76280af2009-11-26 10:02:58 +0100458CFQ_CFQQ_FNS(deep);
Vivek Goyalf75edf22009-12-03 12:59:53 -0500459CFQ_CFQQ_FNS(wait_busy);
Jens Axboe3b181522005-06-27 10:56:24 +0200460#undef CFQ_CFQQ_FNS
461
Tejun Heo629ed0b2012-04-01 14:38:44 -0700462#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700463
Tejun Heo155fead2012-04-01 14:38:44 -0700464/* cfqg stats flags */
465enum cfqg_stats_flags {
466 CFQG_stats_waiting = 0,
467 CFQG_stats_idling,
468 CFQG_stats_empty,
Tejun Heo629ed0b2012-04-01 14:38:44 -0700469};
470
Tejun Heo155fead2012-04-01 14:38:44 -0700471#define CFQG_FLAG_FNS(name) \
472static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700473{ \
Tejun Heo155fead2012-04-01 14:38:44 -0700474 stats->flags |= (1 << CFQG_stats_##name); \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700475} \
Tejun Heo155fead2012-04-01 14:38:44 -0700476static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700477{ \
Tejun Heo155fead2012-04-01 14:38:44 -0700478 stats->flags &= ~(1 << CFQG_stats_##name); \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700479} \
Tejun Heo155fead2012-04-01 14:38:44 -0700480static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700481{ \
Tejun Heo155fead2012-04-01 14:38:44 -0700482 return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700483} \
484
Tejun Heo155fead2012-04-01 14:38:44 -0700485CFQG_FLAG_FNS(waiting)
486CFQG_FLAG_FNS(idling)
487CFQG_FLAG_FNS(empty)
488#undef CFQG_FLAG_FNS
Tejun Heo629ed0b2012-04-01 14:38:44 -0700489
490/* This should be called with the queue_lock held. */
Tejun Heo155fead2012-04-01 14:38:44 -0700491static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700492{
493 unsigned long long now;
494
Tejun Heo155fead2012-04-01 14:38:44 -0700495 if (!cfqg_stats_waiting(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700496 return;
497
498 now = sched_clock();
499 if (time_after64(now, stats->start_group_wait_time))
500 blkg_stat_add(&stats->group_wait_time,
501 now - stats->start_group_wait_time);
Tejun Heo155fead2012-04-01 14:38:44 -0700502 cfqg_stats_clear_waiting(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700503}
504
505/* This should be called with the queue_lock held. */
Tejun Heo155fead2012-04-01 14:38:44 -0700506static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
507 struct cfq_group *curr_cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700508{
Tejun Heo155fead2012-04-01 14:38:44 -0700509 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700510
Tejun Heo155fead2012-04-01 14:38:44 -0700511 if (cfqg_stats_waiting(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700512 return;
Tejun Heo155fead2012-04-01 14:38:44 -0700513 if (cfqg == curr_cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700514 return;
Tejun Heo155fead2012-04-01 14:38:44 -0700515 stats->start_group_wait_time = sched_clock();
516 cfqg_stats_mark_waiting(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700517}
518
519/* This should be called with the queue_lock held. */
Tejun Heo155fead2012-04-01 14:38:44 -0700520static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700521{
522 unsigned long long now;
523
Tejun Heo155fead2012-04-01 14:38:44 -0700524 if (!cfqg_stats_empty(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700525 return;
526
527 now = sched_clock();
528 if (time_after64(now, stats->start_empty_time))
529 blkg_stat_add(&stats->empty_time,
530 now - stats->start_empty_time);
Tejun Heo155fead2012-04-01 14:38:44 -0700531 cfqg_stats_clear_empty(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700532}
533
Tejun Heo155fead2012-04-01 14:38:44 -0700534static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700535{
Tejun Heo155fead2012-04-01 14:38:44 -0700536 blkg_stat_add(&cfqg->stats.dequeue, 1);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700537}
538
Tejun Heo155fead2012-04-01 14:38:44 -0700539static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700540{
Tejun Heo155fead2012-04-01 14:38:44 -0700541 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700542
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800543 if (blkg_rwstat_total(&stats->queued))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700544 return;
545
546 /*
547 * group is already marked empty. This can happen if cfqq got new
548 * request in parent group and moved to this group while being added
549 * to service tree. Just ignore the event and move on.
550 */
Tejun Heo155fead2012-04-01 14:38:44 -0700551 if (cfqg_stats_empty(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700552 return;
553
554 stats->start_empty_time = sched_clock();
Tejun Heo155fead2012-04-01 14:38:44 -0700555 cfqg_stats_mark_empty(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700556}
557
Tejun Heo155fead2012-04-01 14:38:44 -0700558static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700559{
Tejun Heo155fead2012-04-01 14:38:44 -0700560 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700561
Tejun Heo155fead2012-04-01 14:38:44 -0700562 if (cfqg_stats_idling(stats)) {
Tejun Heo629ed0b2012-04-01 14:38:44 -0700563 unsigned long long now = sched_clock();
564
565 if (time_after64(now, stats->start_idle_time))
566 blkg_stat_add(&stats->idle_time,
567 now - stats->start_idle_time);
Tejun Heo155fead2012-04-01 14:38:44 -0700568 cfqg_stats_clear_idling(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700569 }
570}
571
Tejun Heo155fead2012-04-01 14:38:44 -0700572static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700573{
Tejun Heo155fead2012-04-01 14:38:44 -0700574 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700575
Tejun Heo155fead2012-04-01 14:38:44 -0700576 BUG_ON(cfqg_stats_idling(stats));
Tejun Heo629ed0b2012-04-01 14:38:44 -0700577
578 stats->start_idle_time = sched_clock();
Tejun Heo155fead2012-04-01 14:38:44 -0700579 cfqg_stats_mark_idling(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700580}
581
Tejun Heo155fead2012-04-01 14:38:44 -0700582static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700583{
Tejun Heo155fead2012-04-01 14:38:44 -0700584 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700585
586 blkg_stat_add(&stats->avg_queue_size_sum,
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800587 blkg_rwstat_total(&stats->queued));
Tejun Heo629ed0b2012-04-01 14:38:44 -0700588 blkg_stat_add(&stats->avg_queue_size_samples, 1);
Tejun Heo155fead2012-04-01 14:38:44 -0700589 cfqg_stats_update_group_wait_time(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700590}
591
592#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
593
Tejun Heof48ec1d2012-04-13 13:11:25 -0700594static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
595static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
596static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
597static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
598static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
599static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
600static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
Tejun Heo629ed0b2012-04-01 14:38:44 -0700601
602#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
603
604#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo2ce4d502012-04-01 14:38:43 -0700605
Jens Axboe4ceab712015-06-19 10:13:01 -0600606static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
607{
608 return pd ? container_of(pd, struct cfq_group, pd) : NULL;
609}
610
611static struct cfq_group_data
612*cpd_to_cfqgd(struct blkcg_policy_data *cpd)
613{
Tejun Heo81437642015-08-18 14:55:15 -0700614 return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL;
Jens Axboe4ceab712015-06-19 10:13:01 -0600615}
616
617static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
618{
619 return pd_to_blkg(&cfqg->pd);
620}
621
Tejun Heoffea73f2012-06-04 10:02:29 +0200622static struct blkcg_policy blkcg_policy_cfq;
623
624static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
625{
626 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
627}
628
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200629static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg)
630{
631 return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq));
632}
633
Tejun Heod02f7aa2013-01-09 08:05:11 -0800634static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
Tejun Heo7918ffb2013-01-09 08:05:11 -0800635{
Tejun Heod02f7aa2013-01-09 08:05:11 -0800636 struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
Tejun Heo7918ffb2013-01-09 08:05:11 -0800637
Tejun Heod02f7aa2013-01-09 08:05:11 -0800638 return pblkg ? blkg_to_cfqg(pblkg) : NULL;
Tejun Heo7918ffb2013-01-09 08:05:11 -0800639}
640
Jan Kara3984aa52016-01-12 16:24:19 +0100641static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
642 struct cfq_group *ancestor)
643{
644 return cgroup_is_descendant(cfqg_to_blkg(cfqg)->blkcg->css.cgroup,
645 cfqg_to_blkg(ancestor)->blkcg->css.cgroup);
646}
647
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100648static inline void cfqg_get(struct cfq_group *cfqg)
649{
650 return blkg_get(cfqg_to_blkg(cfqg));
651}
652
653static inline void cfqg_put(struct cfq_group *cfqg)
654{
655 return blkg_put(cfqg_to_blkg(cfqg));
656}
657
Tejun Heo54e7ed12012-04-16 13:57:23 -0700658#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
659 char __pbuf[128]; \
660 \
661 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
Vivek Goyalb226e5c2012-10-03 16:57:01 -0400662 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
663 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
664 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
Tejun Heo54e7ed12012-04-16 13:57:23 -0700665 __pbuf, ##args); \
666} while (0)
Vivek Goyal2868ef72009-12-03 12:59:48 -0500667
Tejun Heo54e7ed12012-04-16 13:57:23 -0700668#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
669 char __pbuf[128]; \
670 \
671 blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
672 blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
673} while (0)
Vivek Goyal2868ef72009-12-03 12:59:48 -0500674
Tejun Heo155fead2012-04-01 14:38:44 -0700675static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600676 struct cfq_group *curr_cfqg,
677 unsigned int op)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700678{
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600679 blkg_rwstat_add(&cfqg->stats.queued, op, 1);
Tejun Heo155fead2012-04-01 14:38:44 -0700680 cfqg_stats_end_empty_time(&cfqg->stats);
681 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700682}
683
Tejun Heo155fead2012-04-01 14:38:44 -0700684static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600685 uint64_t time, unsigned long unaccounted_time)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700686{
Tejun Heo155fead2012-04-01 14:38:44 -0700687 blkg_stat_add(&cfqg->stats.time, time);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700688#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heo155fead2012-04-01 14:38:44 -0700689 blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700690#endif
Tejun Heo2ce4d502012-04-01 14:38:43 -0700691}
692
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600693static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
694 unsigned int op)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700695{
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600696 blkg_rwstat_add(&cfqg->stats.queued, op, -1);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700697}
698
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600699static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
700 unsigned int op)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700701{
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600702 blkg_rwstat_add(&cfqg->stats.merged, op, 1);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700703}
704
Tejun Heo155fead2012-04-01 14:38:44 -0700705static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600706 uint64_t start_time, uint64_t io_start_time,
707 unsigned int op)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700708{
Tejun Heo155fead2012-04-01 14:38:44 -0700709 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700710 unsigned long long now = sched_clock();
Tejun Heo629ed0b2012-04-01 14:38:44 -0700711
712 if (time_after64(now, io_start_time))
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600713 blkg_rwstat_add(&stats->service_time, op, now - io_start_time);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700714 if (time_after64(io_start_time, start_time))
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600715 blkg_rwstat_add(&stats->wait_time, op,
Tejun Heo629ed0b2012-04-01 14:38:44 -0700716 io_start_time - start_time);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700717}
718
Tejun Heo689665a2013-01-09 08:05:13 -0800719/* @stats = 0 */
720static void cfqg_stats_reset(struct cfqg_stats *stats)
Tejun Heo155fead2012-04-01 14:38:44 -0700721{
Tejun Heo155fead2012-04-01 14:38:44 -0700722 /* queued stats shouldn't be cleared */
Tejun Heo155fead2012-04-01 14:38:44 -0700723 blkg_rwstat_reset(&stats->merged);
724 blkg_rwstat_reset(&stats->service_time);
725 blkg_rwstat_reset(&stats->wait_time);
726 blkg_stat_reset(&stats->time);
727#ifdef CONFIG_DEBUG_BLK_CGROUP
728 blkg_stat_reset(&stats->unaccounted_time);
729 blkg_stat_reset(&stats->avg_queue_size_sum);
730 blkg_stat_reset(&stats->avg_queue_size_samples);
731 blkg_stat_reset(&stats->dequeue);
732 blkg_stat_reset(&stats->group_wait_time);
733 blkg_stat_reset(&stats->idle_time);
734 blkg_stat_reset(&stats->empty_time);
735#endif
736}
737
Tejun Heo0b399202013-01-09 08:05:13 -0800738/* @to += @from */
Tejun Heoe6269c42015-08-18 14:55:21 -0700739static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from)
Tejun Heo0b399202013-01-09 08:05:13 -0800740{
741 /* queued stats shouldn't be cleared */
Tejun Heoe6269c42015-08-18 14:55:21 -0700742 blkg_rwstat_add_aux(&to->merged, &from->merged);
743 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
744 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
745 blkg_stat_add_aux(&from->time, &from->time);
Tejun Heo0b399202013-01-09 08:05:13 -0800746#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heoe6269c42015-08-18 14:55:21 -0700747 blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
748 blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
749 blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
750 blkg_stat_add_aux(&to->dequeue, &from->dequeue);
751 blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
752 blkg_stat_add_aux(&to->idle_time, &from->idle_time);
753 blkg_stat_add_aux(&to->empty_time, &from->empty_time);
Tejun Heo0b399202013-01-09 08:05:13 -0800754#endif
755}
756
757/*
Tejun Heoe6269c42015-08-18 14:55:21 -0700758 * Transfer @cfqg's stats to its parent's aux counts so that the ancestors'
Tejun Heo0b399202013-01-09 08:05:13 -0800759 * recursive stats can still account for the amount used by this cfqg after
760 * it's gone.
761 */
762static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
763{
764 struct cfq_group *parent = cfqg_parent(cfqg);
765
766 lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
767
768 if (unlikely(!parent))
769 return;
770
Tejun Heoe6269c42015-08-18 14:55:21 -0700771 cfqg_stats_add_aux(&parent->stats, &cfqg->stats);
Tejun Heo0b399202013-01-09 08:05:13 -0800772 cfqg_stats_reset(&cfqg->stats);
Tejun Heo0b399202013-01-09 08:05:13 -0800773}
774
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100775#else /* CONFIG_CFQ_GROUP_IOSCHED */
776
Tejun Heod02f7aa2013-01-09 08:05:11 -0800777static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
Jan Kara3984aa52016-01-12 16:24:19 +0100778static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
779 struct cfq_group *ancestor)
780{
781 return true;
782}
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100783static inline void cfqg_get(struct cfq_group *cfqg) { }
784static inline void cfqg_put(struct cfq_group *cfqg) { }
785
Jens Axboe7b679132008-05-30 12:23:07 +0200786#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
Vivek Goyalb226e5c2012-10-03 16:57:01 -0400787 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
788 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
789 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
790 ##args)
Kyungmin Park4495a7d2011-05-31 10:04:09 +0200791#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100792
Tejun Heo155fead2012-04-01 14:38:44 -0700793static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600794 struct cfq_group *curr_cfqg, unsigned int op) { }
Tejun Heo155fead2012-04-01 14:38:44 -0700795static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600796 uint64_t time, unsigned long unaccounted_time) { }
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600797static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
798 unsigned int op) { }
799static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
800 unsigned int op) { }
Tejun Heo155fead2012-04-01 14:38:44 -0700801static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600802 uint64_t start_time, uint64_t io_start_time,
803 unsigned int op) { }
Tejun Heo2ce4d502012-04-01 14:38:43 -0700804
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100805#endif /* CONFIG_CFQ_GROUP_IOSCHED */
806
Jens Axboe7b679132008-05-30 12:23:07 +0200807#define cfq_log(cfqd, fmt, args...) \
808 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
809
Vivek Goyal615f0252009-12-03 12:59:39 -0500810/* Traverses through cfq group service trees */
811#define for_each_cfqg_st(cfqg, i, j, st) \
812 for (i = 0; i <= IDLE_WORKLOAD; i++) \
813 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
814 : &cfqg->service_tree_idle; \
815 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
816 (i == IDLE_WORKLOAD && j == 0); \
817 j++, st = i < IDLE_WORKLOAD ? \
818 &cfqg->service_trees[i][j]: NULL) \
819
Shaohua Lif5f2b6c2011-07-12 14:24:55 +0200820static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
821 struct cfq_ttime *ttime, bool group_idle)
822{
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600823 u64 slice;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +0200824 if (!sample_valid(ttime->ttime_samples))
825 return false;
826 if (group_idle)
827 slice = cfqd->cfq_group_idle;
828 else
829 slice = cfqd->cfq_slice_idle;
830 return ttime->ttime_mean > slice;
831}
Vivek Goyal615f0252009-12-03 12:59:39 -0500832
Vivek Goyal02b35082010-08-23 12:23:53 +0200833static inline bool iops_mode(struct cfq_data *cfqd)
834{
835 /*
836 * If we are not idling on queues and it is a NCQ drive, parallel
837 * execution of requests is on and measuring time is not possible
838 * in most of the cases until and unless we drive shallower queue
839 * depths and that becomes a performance bottleneck. In such cases
840 * switch to start providing fairness in terms of number of IOs.
841 */
842 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
843 return true;
844 else
845 return false;
846}
847
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400848static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100849{
850 if (cfq_class_idle(cfqq))
851 return IDLE_WORKLOAD;
852 if (cfq_class_rt(cfqq))
853 return RT_WORKLOAD;
854 return BE_WORKLOAD;
855}
856
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100857
858static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
859{
860 if (!cfq_cfqq_sync(cfqq))
861 return ASYNC_WORKLOAD;
862 if (!cfq_cfqq_idle_window(cfqq))
863 return SYNC_NOIDLE_WORKLOAD;
864 return SYNC_WORKLOAD;
865}
866
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400867static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
Vivek Goyal58ff82f2009-12-03 12:59:44 -0500868 struct cfq_data *cfqd,
869 struct cfq_group *cfqg)
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100870{
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400871 if (wl_class == IDLE_WORKLOAD)
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500872 return cfqg->service_tree_idle.count;
873
Vivek Goyal34b98d02012-10-03 16:56:58 -0400874 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
875 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
876 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100877}
878
Vivek Goyalf26bd1f2009-12-03 12:59:54 -0500879static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
880 struct cfq_group *cfqg)
881{
Vivek Goyal34b98d02012-10-03 16:56:58 -0400882 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
883 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
Vivek Goyalf26bd1f2009-12-03 12:59:54 -0500884}
885
Jens Axboe165125e2007-07-24 09:28:11 +0200886static void cfq_dispatch_insert(struct request_queue *, struct request *);
Tejun Heo4f85cb92012-03-05 13:15:28 -0800887static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
Tejun Heo2da8de02015-08-18 14:55:02 -0700888 struct cfq_io_cq *cic, struct bio *bio);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200889
Tejun Heoc5869802011-12-14 00:33:41 +0100890static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
891{
892 /* cic->icq is the first member, %NULL will convert to %NULL */
893 return container_of(icq, struct cfq_io_cq, icq);
894}
895
Tejun Heo47fdd4c2011-12-14 00:33:42 +0100896static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
897 struct io_context *ioc)
898{
899 if (ioc)
900 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
901 return NULL;
902}
903
Tejun Heoc5869802011-12-14 00:33:41 +0100904static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
Vasily Tarasov91fac312007-04-25 12:29:51 +0200905{
Jens Axboea6151c32009-10-07 20:02:57 +0200906 return cic->cfqq[is_sync];
Vasily Tarasov91fac312007-04-25 12:29:51 +0200907}
908
Tejun Heoc5869802011-12-14 00:33:41 +0100909static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
910 bool is_sync)
Vasily Tarasov91fac312007-04-25 12:29:51 +0200911{
Jens Axboea6151c32009-10-07 20:02:57 +0200912 cic->cfqq[is_sync] = cfqq;
Vasily Tarasov91fac312007-04-25 12:29:51 +0200913}
914
Tejun Heoc5869802011-12-14 00:33:41 +0100915static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +0400916{
Tejun Heoc5869802011-12-14 00:33:41 +0100917 return cic->icq.q->elevator->elevator_data;
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +0400918}
919
Vasily Tarasov91fac312007-04-25 12:29:51 +0200920/*
Andrew Morton99f95e52005-06-27 20:14:05 -0700921 * scheduler run of queue, if there are requests pending and no one in the
922 * driver that will restart queueing
923 */
Jens Axboe23e018a2009-10-05 08:52:35 +0200924static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
Andrew Morton99f95e52005-06-27 20:14:05 -0700925{
Jens Axboe7b679132008-05-30 12:23:07 +0200926 if (cfqd->busy_queues) {
927 cfq_log(cfqd, "schedule dispatch");
Jens Axboe59c3d452014-04-08 09:15:35 -0600928 kblockd_schedule_work(&cfqd->unplug_work);
Jens Axboe7b679132008-05-30 12:23:07 +0200929 }
Andrew Morton99f95e52005-06-27 20:14:05 -0700930}
931
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932/*
Jens Axboe44f7c162007-01-19 11:51:58 +1100933 * Scale schedule slice based on io priority. Use the sync time slice only
934 * if a queue is marked sync and has sync io queued. A sync queue with async
935 * io only, should not get full sync slice length.
936 */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600937static inline u64 cfq_prio_slice(struct cfq_data *cfqd, bool sync,
Jens Axboed9e76202007-04-20 14:27:50 +0200938 unsigned short prio)
939{
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600940 u64 base_slice = cfqd->cfq_slice[sync];
941 u64 slice = div_u64(base_slice, CFQ_SLICE_SCALE);
Jens Axboed9e76202007-04-20 14:27:50 +0200942
943 WARN_ON(prio >= IOPRIO_BE_NR);
944
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600945 return base_slice + (slice * (4 - prio));
Jens Axboed9e76202007-04-20 14:27:50 +0200946}
947
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600948static inline u64
Jens Axboe44f7c162007-01-19 11:51:58 +1100949cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
950{
Jens Axboed9e76202007-04-20 14:27:50 +0200951 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
Jens Axboe44f7c162007-01-19 11:51:58 +1100952}
953
Tejun Heo1d3650f2013-01-09 08:05:11 -0800954/**
955 * cfqg_scale_charge - scale disk time charge according to cfqg weight
956 * @charge: disk time being charged
957 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
958 *
959 * Scale @charge according to @vfraction, which is in range (0, 1]. The
960 * scaling is inversely proportional.
961 *
962 * scaled = charge / vfraction
963 *
964 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
965 */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600966static inline u64 cfqg_scale_charge(u64 charge,
Tejun Heo1d3650f2013-01-09 08:05:11 -0800967 unsigned int vfraction)
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500968{
Tejun Heo1d3650f2013-01-09 08:05:11 -0800969 u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500970
Tejun Heo1d3650f2013-01-09 08:05:11 -0800971 /* charge / vfraction */
972 c <<= CFQ_SERVICE_SHIFT;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -0600973 return div_u64(c, vfraction);
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500974}
975
976static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
977{
978 s64 delta = (s64)(vdisktime - min_vdisktime);
979 if (delta > 0)
980 min_vdisktime = vdisktime;
981
982 return min_vdisktime;
983}
984
985static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
986{
987 s64 delta = (s64)(vdisktime - min_vdisktime);
988 if (delta < 0)
989 min_vdisktime = vdisktime;
990
991 return min_vdisktime;
992}
993
994static void update_min_vdisktime(struct cfq_rb_root *st)
995{
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500996 struct cfq_group *cfqg;
997
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500998 if (st->left) {
999 cfqg = rb_entry_cfqg(st->left);
Gui Jianfenga6032712011-03-07 09:28:09 +01001000 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
1001 cfqg->vdisktime);
Vivek Goyal25bc6b02009-12-03 12:59:43 -05001002 }
Vivek Goyal25bc6b02009-12-03 12:59:43 -05001003}
1004
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001005/*
1006 * get averaged number of queues of RT/BE priority.
1007 * average is updated, with a formula that gives more weight to higher numbers,
1008 * to quickly follows sudden increases and decrease slowly
1009 */
1010
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001011static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
1012 struct cfq_group *cfqg, bool rt)
Jens Axboe5869619c2009-10-28 09:27:07 +01001013{
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001014 unsigned min_q, max_q;
1015 unsigned mult = cfq_hist_divisor - 1;
1016 unsigned round = cfq_hist_divisor / 2;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001017 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001018
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001019 min_q = min(cfqg->busy_queues_avg[rt], busy);
1020 max_q = max(cfqg->busy_queues_avg[rt], busy);
1021 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001022 cfq_hist_divisor;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001023 return cfqg->busy_queues_avg[rt];
1024}
1025
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001026static inline u64
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001027cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1028{
Tejun Heo41cad6a2013-01-09 08:05:11 -08001029 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001030}
1031
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001032static inline u64
Vivek Goyalba5bd522011-01-19 08:25:02 -07001033cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe44f7c162007-01-19 11:51:58 +11001034{
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001035 u64 slice = cfq_prio_to_slice(cfqd, cfqq);
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001036 if (cfqd->cfq_latency) {
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001037 /*
1038 * interested queues (we consider only the ones with the same
1039 * priority class in the cfq group)
1040 */
1041 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1042 cfq_class_rt(cfqq));
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001043 u64 sync_slice = cfqd->cfq_slice[1];
1044 u64 expect_latency = sync_slice * iq;
1045 u64 group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001046
1047 if (expect_latency > group_slice) {
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001048 u64 base_low_slice = 2 * cfqd->cfq_slice_idle;
1049 u64 low_slice;
1050
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001051 /* scale low_slice according to IO priority
1052 * and sync vs async */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001053 low_slice = div64_u64(base_low_slice*slice, sync_slice);
1054 low_slice = min(slice, low_slice);
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001055 /* the adapted slice value is scaled to fit all iqs
1056 * into the target latency */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001057 slice = div64_u64(slice*group_slice, expect_latency);
1058 slice = max(slice, low_slice);
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001059 }
1060 }
Shaohua Lic553f8e2011-01-14 08:41:03 +01001061 return slice;
1062}
1063
1064static inline void
1065cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1066{
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001067 u64 slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1068 u64 now = ktime_get_ns();
Shaohua Lic553f8e2011-01-14 08:41:03 +01001069
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001070 cfqq->slice_start = now;
1071 cfqq->slice_end = now + slice;
Vivek Goyalf75edf22009-12-03 12:59:53 -05001072 cfqq->allocated_slice = slice;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001073 cfq_log_cfqq(cfqd, cfqq, "set_slice=%llu", cfqq->slice_end - now);
Jens Axboe44f7c162007-01-19 11:51:58 +11001074}
1075
1076/*
1077 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1078 * isn't valid until the first request from the dispatch is activated
1079 * and the slice time set.
1080 */
Jens Axboea6151c32009-10-07 20:02:57 +02001081static inline bool cfq_slice_used(struct cfq_queue *cfqq)
Jens Axboe44f7c162007-01-19 11:51:58 +11001082{
1083 if (cfq_cfqq_slice_new(cfqq))
Shaohua Lic1e44752010-11-08 15:01:02 +01001084 return false;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001085 if (ktime_get_ns() < cfqq->slice_end)
Shaohua Lic1e44752010-11-08 15:01:02 +01001086 return false;
Jens Axboe44f7c162007-01-19 11:51:58 +11001087
Shaohua Lic1e44752010-11-08 15:01:02 +01001088 return true;
Jens Axboe44f7c162007-01-19 11:51:58 +11001089}
1090
1091/*
Jens Axboe5e705372006-07-13 12:39:25 +02001092 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 * We choose the request that is closest to the head right now. Distance
Andreas Mohre8a99052006-03-28 08:59:49 +02001094 * behind the head is penalized and only allowed to a certain extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 */
Jens Axboe5e705372006-07-13 12:39:25 +02001096static struct request *
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01001097cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098{
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01001099 sector_t s1, s2, d1 = 0, d2 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 unsigned long back_max;
Andreas Mohre8a99052006-03-28 08:59:49 +02001101#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
1102#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
1103 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104
Jens Axboe5e705372006-07-13 12:39:25 +02001105 if (rq1 == NULL || rq1 == rq2)
1106 return rq2;
1107 if (rq2 == NULL)
1108 return rq1;
Jens Axboe9c2c38a2005-08-24 14:57:54 +02001109
Namhyung Kim229836b2011-05-24 10:23:21 +02001110 if (rq_is_sync(rq1) != rq_is_sync(rq2))
1111 return rq_is_sync(rq1) ? rq1 : rq2;
1112
Christoph Hellwig65299a32011-08-23 14:50:29 +02001113 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1114 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
Jens Axboeb53d1ed2011-08-19 08:34:48 +02001115
Tejun Heo83096eb2009-05-07 22:24:39 +09001116 s1 = blk_rq_pos(rq1);
1117 s2 = blk_rq_pos(rq2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 /*
1120 * by definition, 1KiB is 2 sectors
1121 */
1122 back_max = cfqd->cfq_back_max * 2;
1123
1124 /*
1125 * Strict one way elevator _except_ in the case where we allow
1126 * short backward seeks which are biased as twice the cost of a
1127 * similar forward seek.
1128 */
1129 if (s1 >= last)
1130 d1 = s1 - last;
1131 else if (s1 + back_max >= last)
1132 d1 = (last - s1) * cfqd->cfq_back_penalty;
1133 else
Andreas Mohre8a99052006-03-28 08:59:49 +02001134 wrap |= CFQ_RQ1_WRAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
1136 if (s2 >= last)
1137 d2 = s2 - last;
1138 else if (s2 + back_max >= last)
1139 d2 = (last - s2) * cfqd->cfq_back_penalty;
1140 else
Andreas Mohre8a99052006-03-28 08:59:49 +02001141 wrap |= CFQ_RQ2_WRAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
1143 /* Found required data */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
Andreas Mohre8a99052006-03-28 08:59:49 +02001145 /*
1146 * By doing switch() on the bit mask "wrap" we avoid having to
1147 * check two variables for all permutations: --> faster!
1148 */
1149 switch (wrap) {
Jens Axboe5e705372006-07-13 12:39:25 +02001150 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
Andreas Mohre8a99052006-03-28 08:59:49 +02001151 if (d1 < d2)
Jens Axboe5e705372006-07-13 12:39:25 +02001152 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +02001153 else if (d2 < d1)
Jens Axboe5e705372006-07-13 12:39:25 +02001154 return rq2;
Andreas Mohre8a99052006-03-28 08:59:49 +02001155 else {
1156 if (s1 >= s2)
Jens Axboe5e705372006-07-13 12:39:25 +02001157 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +02001158 else
Jens Axboe5e705372006-07-13 12:39:25 +02001159 return rq2;
Andreas Mohre8a99052006-03-28 08:59:49 +02001160 }
1161
1162 case CFQ_RQ2_WRAP:
Jens Axboe5e705372006-07-13 12:39:25 +02001163 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +02001164 case CFQ_RQ1_WRAP:
Jens Axboe5e705372006-07-13 12:39:25 +02001165 return rq2;
1166 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
Andreas Mohre8a99052006-03-28 08:59:49 +02001167 default:
1168 /*
1169 * Since both rqs are wrapped,
1170 * start with the one that's further behind head
1171 * (--> only *one* back seek required),
1172 * since back seek takes more time than forward.
1173 */
1174 if (s1 <= s2)
Jens Axboe5e705372006-07-13 12:39:25 +02001175 return rq1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 else
Jens Axboe5e705372006-07-13 12:39:25 +02001177 return rq2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 }
1179}
1180
Jens Axboe498d3aa22007-04-26 12:54:48 +02001181/*
1182 * The below is leftmost cache rbtree addon
1183 */
Jens Axboe08717142008-01-28 11:38:15 +01001184static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
Jens Axboecc09e292007-04-26 12:53:50 +02001185{
Vivek Goyal615f0252009-12-03 12:59:39 -05001186 /* Service tree is empty */
1187 if (!root->count)
1188 return NULL;
1189
Jens Axboecc09e292007-04-26 12:53:50 +02001190 if (!root->left)
1191 root->left = rb_first(&root->rb);
1192
Jens Axboe08717142008-01-28 11:38:15 +01001193 if (root->left)
1194 return rb_entry(root->left, struct cfq_queue, rb_node);
1195
1196 return NULL;
Jens Axboecc09e292007-04-26 12:53:50 +02001197}
1198
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001199static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1200{
1201 if (!root->left)
1202 root->left = rb_first(&root->rb);
1203
1204 if (root->left)
1205 return rb_entry_cfqg(root->left);
1206
1207 return NULL;
1208}
1209
Jens Axboea36e71f2009-04-15 12:15:11 +02001210static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1211{
1212 rb_erase(n, root);
1213 RB_CLEAR_NODE(n);
1214}
1215
Jens Axboecc09e292007-04-26 12:53:50 +02001216static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1217{
1218 if (root->left == n)
1219 root->left = NULL;
Jens Axboea36e71f2009-04-15 12:15:11 +02001220 rb_erase_init(n, &root->rb);
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01001221 --root->count;
Jens Axboecc09e292007-04-26 12:53:50 +02001222}
1223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224/*
1225 * would be nice to take fifo expire time into account as well
1226 */
Jens Axboe5e705372006-07-13 12:39:25 +02001227static struct request *
1228cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1229 struct request *last)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230{
Jens Axboe21183b02006-07-13 12:33:14 +02001231 struct rb_node *rbnext = rb_next(&last->rb_node);
1232 struct rb_node *rbprev = rb_prev(&last->rb_node);
Jens Axboe5e705372006-07-13 12:39:25 +02001233 struct request *next = NULL, *prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
Jens Axboe21183b02006-07-13 12:33:14 +02001235 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
1237 if (rbprev)
Jens Axboe5e705372006-07-13 12:39:25 +02001238 prev = rb_entry_rq(rbprev);
Jens Axboe21183b02006-07-13 12:33:14 +02001239
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 if (rbnext)
Jens Axboe5e705372006-07-13 12:39:25 +02001241 next = rb_entry_rq(rbnext);
Jens Axboe21183b02006-07-13 12:33:14 +02001242 else {
1243 rbnext = rb_first(&cfqq->sort_list);
1244 if (rbnext && rbnext != &last->rb_node)
Jens Axboe5e705372006-07-13 12:39:25 +02001245 next = rb_entry_rq(rbnext);
Jens Axboe21183b02006-07-13 12:33:14 +02001246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01001248 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249}
1250
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001251static u64 cfq_slice_offset(struct cfq_data *cfqd,
1252 struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253{
Jens Axboed9e76202007-04-20 14:27:50 +02001254 /*
1255 * just an approximation, should be ok.
1256 */
Vivek Goyalcdb16e82009-12-03 12:59:38 -05001257 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
Jens Axboe464191c2009-11-30 09:38:13 +01001258 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
Jens Axboed9e76202007-04-20 14:27:50 +02001259}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001261static inline s64
1262cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1263{
1264 return cfqg->vdisktime - st->min_vdisktime;
1265}
1266
1267static void
1268__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1269{
1270 struct rb_node **node = &st->rb.rb_node;
1271 struct rb_node *parent = NULL;
1272 struct cfq_group *__cfqg;
1273 s64 key = cfqg_key(st, cfqg);
1274 int left = 1;
1275
1276 while (*node != NULL) {
1277 parent = *node;
1278 __cfqg = rb_entry_cfqg(parent);
1279
1280 if (key < cfqg_key(st, __cfqg))
1281 node = &parent->rb_left;
1282 else {
1283 node = &parent->rb_right;
1284 left = 0;
1285 }
1286 }
1287
1288 if (left)
1289 st->left = &cfqg->rb_node;
1290
1291 rb_link_node(&cfqg->rb_node, parent, node);
1292 rb_insert_color(&cfqg->rb_node, &st->rb);
1293}
1294
Toshiaki Makita7b5af5c2014-08-28 17:14:58 +09001295/*
1296 * This has to be called only on activation of cfqg
1297 */
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001298static void
Justin TerAvest8184f932011-03-17 16:12:36 +01001299cfq_update_group_weight(struct cfq_group *cfqg)
1300{
Tejun Heo3381cb82012-04-01 14:38:44 -07001301 if (cfqg->new_weight) {
Justin TerAvest8184f932011-03-17 16:12:36 +01001302 cfqg->weight = cfqg->new_weight;
Tejun Heo3381cb82012-04-01 14:38:44 -07001303 cfqg->new_weight = 0;
Justin TerAvest8184f932011-03-17 16:12:36 +01001304 }
Toshiaki Makitae15693e2014-08-26 20:56:36 +09001305}
1306
1307static void
1308cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1309{
1310 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
Tejun Heoe71357e2013-01-09 08:05:10 -08001311
1312 if (cfqg->new_leaf_weight) {
1313 cfqg->leaf_weight = cfqg->new_leaf_weight;
1314 cfqg->new_leaf_weight = 0;
1315 }
Justin TerAvest8184f932011-03-17 16:12:36 +01001316}
1317
1318static void
1319cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1320{
Tejun Heo1d3650f2013-01-09 08:05:11 -08001321 unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */
Tejun Heo7918ffb2013-01-09 08:05:11 -08001322 struct cfq_group *pos = cfqg;
Tejun Heo1d3650f2013-01-09 08:05:11 -08001323 struct cfq_group *parent;
Tejun Heo7918ffb2013-01-09 08:05:11 -08001324 bool propagate;
1325
1326 /* add to the service tree */
Justin TerAvest8184f932011-03-17 16:12:36 +01001327 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1328
Toshiaki Makita7b5af5c2014-08-28 17:14:58 +09001329 /*
1330 * Update leaf_weight. We cannot update weight at this point
1331 * because cfqg might already have been activated and is
1332 * contributing its current weight to the parent's child_weight.
1333 */
Toshiaki Makitae15693e2014-08-26 20:56:36 +09001334 cfq_update_group_leaf_weight(cfqg);
Justin TerAvest8184f932011-03-17 16:12:36 +01001335 __cfq_group_service_tree_add(st, cfqg);
Tejun Heo7918ffb2013-01-09 08:05:11 -08001336
1337 /*
Tejun Heo1d3650f2013-01-09 08:05:11 -08001338 * Activate @cfqg and calculate the portion of vfraction @cfqg is
1339 * entitled to. vfraction is calculated by walking the tree
1340 * towards the root calculating the fraction it has at each level.
1341 * The compounded ratio is how much vfraction @cfqg owns.
1342 *
1343 * Start with the proportion tasks in this cfqg has against active
1344 * children cfqgs - its leaf_weight against children_weight.
Tejun Heo7918ffb2013-01-09 08:05:11 -08001345 */
1346 propagate = !pos->nr_active++;
1347 pos->children_weight += pos->leaf_weight;
Tejun Heo1d3650f2013-01-09 08:05:11 -08001348 vfr = vfr * pos->leaf_weight / pos->children_weight;
Tejun Heo7918ffb2013-01-09 08:05:11 -08001349
Tejun Heo1d3650f2013-01-09 08:05:11 -08001350 /*
1351 * Compound ->weight walking up the tree. Both activation and
1352 * vfraction calculation are done in the same loop. Propagation
1353 * stops once an already activated node is met. vfraction
1354 * calculation should always continue to the root.
1355 */
Tejun Heod02f7aa2013-01-09 08:05:11 -08001356 while ((parent = cfqg_parent(pos))) {
Tejun Heo1d3650f2013-01-09 08:05:11 -08001357 if (propagate) {
Toshiaki Makitae15693e2014-08-26 20:56:36 +09001358 cfq_update_group_weight(pos);
Tejun Heo1d3650f2013-01-09 08:05:11 -08001359 propagate = !parent->nr_active++;
1360 parent->children_weight += pos->weight;
1361 }
1362 vfr = vfr * pos->weight / parent->children_weight;
Tejun Heo7918ffb2013-01-09 08:05:11 -08001363 pos = parent;
1364 }
Tejun Heo1d3650f2013-01-09 08:05:11 -08001365
1366 cfqg->vfraction = max_t(unsigned, vfr, 1);
Justin TerAvest8184f932011-03-17 16:12:36 +01001367}
1368
Hou Tao5be6b752017-03-01 09:02:33 +08001369static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
1370{
1371 if (!iops_mode(cfqd))
1372 return CFQ_SLICE_MODE_GROUP_DELAY;
1373 else
1374 return CFQ_IOPS_MODE_GROUP_DELAY;
1375}
1376
Justin TerAvest8184f932011-03-17 16:12:36 +01001377static void
1378cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001379{
1380 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1381 struct cfq_group *__cfqg;
1382 struct rb_node *n;
1383
1384 cfqg->nr_cfqq++;
Gui Jianfeng760701b2010-11-30 20:52:47 +01001385 if (!RB_EMPTY_NODE(&cfqg->rb_node))
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001386 return;
1387
1388 /*
1389 * Currently put the group at the end. Later implement something
1390 * so that groups get lesser vtime based on their weights, so that
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001391 * if group does not loose all if it was not continuously backlogged.
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001392 */
1393 n = rb_last(&st->rb);
1394 if (n) {
1395 __cfqg = rb_entry_cfqg(n);
Hou Tao5be6b752017-03-01 09:02:33 +08001396 cfqg->vdisktime = __cfqg->vdisktime +
1397 cfq_get_cfqg_vdisktime_delay(cfqd);
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001398 } else
1399 cfqg->vdisktime = st->min_vdisktime;
Justin TerAvest8184f932011-03-17 16:12:36 +01001400 cfq_group_service_tree_add(st, cfqg);
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001401}
1402
1403static void
Justin TerAvest8184f932011-03-17 16:12:36 +01001404cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1405{
Tejun Heo7918ffb2013-01-09 08:05:11 -08001406 struct cfq_group *pos = cfqg;
1407 bool propagate;
1408
1409 /*
1410 * Undo activation from cfq_group_service_tree_add(). Deactivate
1411 * @cfqg and propagate deactivation upwards.
1412 */
1413 propagate = !--pos->nr_active;
1414 pos->children_weight -= pos->leaf_weight;
1415
1416 while (propagate) {
Tejun Heod02f7aa2013-01-09 08:05:11 -08001417 struct cfq_group *parent = cfqg_parent(pos);
Tejun Heo7918ffb2013-01-09 08:05:11 -08001418
1419 /* @pos has 0 nr_active at this point */
1420 WARN_ON_ONCE(pos->children_weight);
Tejun Heo1d3650f2013-01-09 08:05:11 -08001421 pos->vfraction = 0;
Tejun Heo7918ffb2013-01-09 08:05:11 -08001422
1423 if (!parent)
1424 break;
1425
1426 propagate = !--parent->nr_active;
1427 parent->children_weight -= pos->weight;
1428 pos = parent;
1429 }
1430
1431 /* remove from the service tree */
Justin TerAvest8184f932011-03-17 16:12:36 +01001432 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1433 cfq_rb_erase(&cfqg->rb_node, st);
1434}
1435
1436static void
1437cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001438{
1439 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1440
1441 BUG_ON(cfqg->nr_cfqq < 1);
1442 cfqg->nr_cfqq--;
Vivek Goyal25bc6b02009-12-03 12:59:43 -05001443
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001444 /* If there are other cfq queues under this group, don't delete it */
1445 if (cfqg->nr_cfqq)
1446 return;
1447
Vivek Goyal2868ef72009-12-03 12:59:48 -05001448 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
Justin TerAvest8184f932011-03-17 16:12:36 +01001449 cfq_group_service_tree_del(st, cfqg);
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04001450 cfqg->saved_wl_slice = 0;
Tejun Heo155fead2012-04-01 14:38:44 -07001451 cfqg_stats_update_dequeue(cfqg);
Vivek Goyaldae739e2009-12-03 12:59:45 -05001452}
1453
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001454static inline u64 cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1455 u64 *unaccounted_time)
Vivek Goyaldae739e2009-12-03 12:59:45 -05001456{
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001457 u64 slice_used;
1458 u64 now = ktime_get_ns();
Vivek Goyaldae739e2009-12-03 12:59:45 -05001459
1460 /*
1461 * Queue got expired before even a single request completed or
1462 * got expired immediately after first request completion.
1463 */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001464 if (!cfqq->slice_start || cfqq->slice_start == now) {
Vivek Goyaldae739e2009-12-03 12:59:45 -05001465 /*
1466 * Also charge the seek time incurred to the group, otherwise
1467 * if there are mutiple queues in the group, each can dispatch
1468 * a single request on seeky media and cause lots of seek time
1469 * and group will never know it.
1470 */
Jan Kara0b31c102016-06-28 09:04:02 +02001471 slice_used = max_t(u64, (now - cfqq->dispatch_start),
1472 jiffies_to_nsecs(1));
Vivek Goyaldae739e2009-12-03 12:59:45 -05001473 } else {
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001474 slice_used = now - cfqq->slice_start;
Justin TerAvest167400d2011-03-12 16:54:00 +01001475 if (slice_used > cfqq->allocated_slice) {
1476 *unaccounted_time = slice_used - cfqq->allocated_slice;
Vivek Goyalf75edf22009-12-03 12:59:53 -05001477 slice_used = cfqq->allocated_slice;
Justin TerAvest167400d2011-03-12 16:54:00 +01001478 }
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001479 if (cfqq->slice_start > cfqq->dispatch_start)
Justin TerAvest167400d2011-03-12 16:54:00 +01001480 *unaccounted_time += cfqq->slice_start -
1481 cfqq->dispatch_start;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001482 }
1483
Vivek Goyaldae739e2009-12-03 12:59:45 -05001484 return slice_used;
1485}
1486
1487static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
Vivek Goyale5ff0822010-04-26 19:25:11 +02001488 struct cfq_queue *cfqq)
Vivek Goyaldae739e2009-12-03 12:59:45 -05001489{
1490 struct cfq_rb_root *st = &cfqd->grp_service_tree;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001491 u64 used_sl, charge, unaccounted_sl = 0;
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05001492 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1493 - cfqg->service_tree_idle.count;
Tejun Heo1d3650f2013-01-09 08:05:11 -08001494 unsigned int vfr;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001495 u64 now = ktime_get_ns();
Vivek Goyaldae739e2009-12-03 12:59:45 -05001496
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05001497 BUG_ON(nr_sync < 0);
Justin TerAvest167400d2011-03-12 16:54:00 +01001498 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05001499
Vivek Goyal02b35082010-08-23 12:23:53 +02001500 if (iops_mode(cfqd))
1501 charge = cfqq->slice_dispatch;
1502 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1503 charge = cfqq->allocated_slice;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001504
Tejun Heo1d3650f2013-01-09 08:05:11 -08001505 /*
1506 * Can't update vdisktime while on service tree and cfqg->vfraction
1507 * is valid only while on it. Cache vfr, leave the service tree,
1508 * update vdisktime and go back on. The re-addition to the tree
1509 * will also update the weights as necessary.
1510 */
1511 vfr = cfqg->vfraction;
Justin TerAvest8184f932011-03-17 16:12:36 +01001512 cfq_group_service_tree_del(st, cfqg);
Tejun Heo1d3650f2013-01-09 08:05:11 -08001513 cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
Justin TerAvest8184f932011-03-17 16:12:36 +01001514 cfq_group_service_tree_add(st, cfqg);
Vivek Goyaldae739e2009-12-03 12:59:45 -05001515
1516 /* This group is being expired. Save the context */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001517 if (cfqd->workload_expires > now) {
1518 cfqg->saved_wl_slice = cfqd->workload_expires - now;
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04001519 cfqg->saved_wl_type = cfqd->serving_wl_type;
1520 cfqg->saved_wl_class = cfqd->serving_wl_class;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001521 } else
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04001522 cfqg->saved_wl_slice = 0;
Vivek Goyal2868ef72009-12-03 12:59:48 -05001523
1524 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1525 st->min_vdisktime);
Joe Perchesfd16d262011-06-13 10:42:49 +02001526 cfq_log_cfqq(cfqq->cfqd, cfqq,
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001527 "sl_used=%llu disp=%llu charge=%llu iops=%u sect=%lu",
Joe Perchesfd16d262011-06-13 10:42:49 +02001528 used_sl, cfqq->slice_dispatch, charge,
1529 iops_mode(cfqd), cfqq->nr_sectors);
Tejun Heo155fead2012-04-01 14:38:44 -07001530 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1531 cfqg_stats_set_start_empty_time(cfqg);
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001532}
1533
Tejun Heof51b8022012-03-05 13:15:05 -08001534/**
1535 * cfq_init_cfqg_base - initialize base part of a cfq_group
1536 * @cfqg: cfq_group to initialize
1537 *
1538 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1539 * is enabled or not.
1540 */
1541static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1542{
1543 struct cfq_rb_root *st;
1544 int i, j;
1545
1546 for_each_cfqg_st(cfqg, i, j, st)
1547 *st = CFQ_RB_ROOT;
1548 RB_CLEAR_NODE(&cfqg->rb_node);
1549
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06001550 cfqg->ttime.last_end_request = ktime_get_ns();
Tejun Heof51b8022012-03-05 13:15:05 -08001551}
1552
Vivek Goyal25fb5162009-12-03 12:59:46 -05001553#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo69d7fde2015-08-18 14:55:36 -07001554static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
1555 bool on_dfl, bool reset_dev, bool is_leaf_weight);
1556
Tejun Heo24bdb8e2015-08-18 14:55:22 -07001557static void cfqg_stats_exit(struct cfqg_stats *stats)
Peter Zijlstra90d38392013-11-12 19:42:14 -08001558{
Tejun Heo24bdb8e2015-08-18 14:55:22 -07001559 blkg_rwstat_exit(&stats->merged);
1560 blkg_rwstat_exit(&stats->service_time);
1561 blkg_rwstat_exit(&stats->wait_time);
1562 blkg_rwstat_exit(&stats->queued);
Tejun Heo24bdb8e2015-08-18 14:55:22 -07001563 blkg_stat_exit(&stats->time);
1564#ifdef CONFIG_DEBUG_BLK_CGROUP
1565 blkg_stat_exit(&stats->unaccounted_time);
1566 blkg_stat_exit(&stats->avg_queue_size_sum);
1567 blkg_stat_exit(&stats->avg_queue_size_samples);
1568 blkg_stat_exit(&stats->dequeue);
1569 blkg_stat_exit(&stats->group_wait_time);
1570 blkg_stat_exit(&stats->idle_time);
1571 blkg_stat_exit(&stats->empty_time);
1572#endif
1573}
1574
1575static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp)
1576{
Tejun Heo77ea7332015-08-18 14:55:24 -07001577 if (blkg_rwstat_init(&stats->merged, gfp) ||
Tejun Heo24bdb8e2015-08-18 14:55:22 -07001578 blkg_rwstat_init(&stats->service_time, gfp) ||
1579 blkg_rwstat_init(&stats->wait_time, gfp) ||
1580 blkg_rwstat_init(&stats->queued, gfp) ||
Tejun Heo24bdb8e2015-08-18 14:55:22 -07001581 blkg_stat_init(&stats->time, gfp))
1582 goto err;
Peter Zijlstra90d38392013-11-12 19:42:14 -08001583
1584#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heo24bdb8e2015-08-18 14:55:22 -07001585 if (blkg_stat_init(&stats->unaccounted_time, gfp) ||
1586 blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
1587 blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
1588 blkg_stat_init(&stats->dequeue, gfp) ||
1589 blkg_stat_init(&stats->group_wait_time, gfp) ||
1590 blkg_stat_init(&stats->idle_time, gfp) ||
1591 blkg_stat_init(&stats->empty_time, gfp))
1592 goto err;
Peter Zijlstra90d38392013-11-12 19:42:14 -08001593#endif
Tejun Heo24bdb8e2015-08-18 14:55:22 -07001594 return 0;
1595err:
1596 cfqg_stats_exit(stats);
1597 return -ENOMEM;
Peter Zijlstra90d38392013-11-12 19:42:14 -08001598}
1599
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001600static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
1601{
1602 struct cfq_group_data *cgd;
1603
Tejun Heoebc4ff62016-11-10 11:16:37 -05001604 cgd = kzalloc(sizeof(*cgd), gfp);
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001605 if (!cgd)
1606 return NULL;
1607 return &cgd->cpd;
1608}
1609
Tejun Heo81437642015-08-18 14:55:15 -07001610static void cfq_cpd_init(struct blkcg_policy_data *cpd)
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001611{
Tejun Heo81437642015-08-18 14:55:15 -07001612 struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
Tejun Heo9e10a132015-09-18 11:56:28 -04001613 unsigned int weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
Tejun Heo69d7fde2015-08-18 14:55:36 -07001614 CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001615
Tejun Heo69d7fde2015-08-18 14:55:36 -07001616 if (cpd_to_blkcg(cpd) == &blkcg_root)
1617 weight *= 2;
1618
1619 cgd->weight = weight;
1620 cgd->leaf_weight = weight;
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001621}
1622
Tejun Heoe4a9bde2015-08-18 14:55:16 -07001623static void cfq_cpd_free(struct blkcg_policy_data *cpd)
1624{
1625 kfree(cpd_to_cfqgd(cpd));
1626}
1627
Tejun Heo69d7fde2015-08-18 14:55:36 -07001628static void cfq_cpd_bind(struct blkcg_policy_data *cpd)
1629{
1630 struct blkcg *blkcg = cpd_to_blkcg(cpd);
Tejun Heo9e10a132015-09-18 11:56:28 -04001631 bool on_dfl = cgroup_subsys_on_dfl(io_cgrp_subsys);
Tejun Heo69d7fde2015-08-18 14:55:36 -07001632 unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
1633
1634 if (blkcg == &blkcg_root)
1635 weight *= 2;
1636
1637 WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, false));
1638 WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, true));
1639}
1640
Tejun Heo001bea72015-08-18 14:55:11 -07001641static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
1642{
Tejun Heob2ce2642015-08-18 14:55:13 -07001643 struct cfq_group *cfqg;
1644
1645 cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
1646 if (!cfqg)
1647 return NULL;
1648
1649 cfq_init_cfqg_base(cfqg);
Tejun Heo24bdb8e2015-08-18 14:55:22 -07001650 if (cfqg_stats_init(&cfqg->stats, gfp)) {
1651 kfree(cfqg);
1652 return NULL;
1653 }
Tejun Heob2ce2642015-08-18 14:55:13 -07001654
1655 return &cfqg->pd;
Tejun Heo001bea72015-08-18 14:55:11 -07001656}
1657
Tejun Heoa9520cd2015-08-18 14:55:14 -07001658static void cfq_pd_init(struct blkg_policy_data *pd)
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001659{
Tejun Heoa9520cd2015-08-18 14:55:14 -07001660 struct cfq_group *cfqg = pd_to_cfqg(pd);
1661 struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
Vivek Goyal25fb5162009-12-03 12:59:46 -05001662
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001663 cfqg->weight = cgd->weight;
1664 cfqg->leaf_weight = cgd->leaf_weight;
Vivek Goyal25fb5162009-12-03 12:59:46 -05001665}
1666
Tejun Heoa9520cd2015-08-18 14:55:14 -07001667static void cfq_pd_offline(struct blkg_policy_data *pd)
Tejun Heo0b399202013-01-09 08:05:13 -08001668{
Tejun Heoa9520cd2015-08-18 14:55:14 -07001669 struct cfq_group *cfqg = pd_to_cfqg(pd);
Tejun Heo60a83702015-08-18 14:55:05 -07001670 int i;
1671
1672 for (i = 0; i < IOPRIO_BE_NR; i++) {
1673 if (cfqg->async_cfqq[0][i])
1674 cfq_put_queue(cfqg->async_cfqq[0][i]);
1675 if (cfqg->async_cfqq[1][i])
1676 cfq_put_queue(cfqg->async_cfqq[1][i]);
1677 }
1678
1679 if (cfqg->async_idle_cfqq)
1680 cfq_put_queue(cfqg->async_idle_cfqq);
1681
Tejun Heo0b399202013-01-09 08:05:13 -08001682 /*
1683 * @blkg is going offline and will be ignored by
1684 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
1685 * that they don't get lost. If IOs complete after this point, the
1686 * stats for them will be lost. Oh well...
1687 */
Tejun Heo60a83702015-08-18 14:55:05 -07001688 cfqg_stats_xfer_dead(cfqg);
Tejun Heo0b399202013-01-09 08:05:13 -08001689}
1690
Tejun Heo001bea72015-08-18 14:55:11 -07001691static void cfq_pd_free(struct blkg_policy_data *pd)
1692{
Tejun Heo24bdb8e2015-08-18 14:55:22 -07001693 struct cfq_group *cfqg = pd_to_cfqg(pd);
1694
1695 cfqg_stats_exit(&cfqg->stats);
1696 return kfree(cfqg);
Tejun Heo001bea72015-08-18 14:55:11 -07001697}
1698
Tejun Heoa9520cd2015-08-18 14:55:14 -07001699static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
Tejun Heo689665a2013-01-09 08:05:13 -08001700{
Tejun Heoa9520cd2015-08-18 14:55:14 -07001701 struct cfq_group *cfqg = pd_to_cfqg(pd);
Tejun Heo689665a2013-01-09 08:05:13 -08001702
1703 cfqg_stats_reset(&cfqg->stats);
Vivek Goyal25fb5162009-12-03 12:59:46 -05001704}
1705
Tejun Heoae118892015-08-18 14:55:20 -07001706static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
1707 struct blkcg *blkcg)
Vivek Goyal25fb5162009-12-03 12:59:46 -05001708{
Tejun Heoae118892015-08-18 14:55:20 -07001709 struct blkcg_gq *blkg;
Vivek Goyal25fb5162009-12-03 12:59:46 -05001710
Tejun Heoae118892015-08-18 14:55:20 -07001711 blkg = blkg_lookup(blkcg, cfqd->queue);
1712 if (likely(blkg))
1713 return blkg_to_cfqg(blkg);
1714 return NULL;
Vivek Goyal25fb5162009-12-03 12:59:46 -05001715}
1716
1717static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1718{
Vivek Goyal25fb5162009-12-03 12:59:46 -05001719 cfqq->cfqg = cfqg;
Vivek Goyalb1c35762009-12-03 12:59:47 -05001720 /* cfqq reference on cfqg */
Tejun Heoeb7d8c072012-03-23 14:02:53 +01001721 cfqg_get(cfqg);
Vivek Goyalb1c35762009-12-03 12:59:47 -05001722}
1723
Tejun Heof95a04a2012-04-16 13:57:26 -07001724static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1725 struct blkg_policy_data *pd, int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001726{
Tejun Heof95a04a2012-04-16 13:57:26 -07001727 struct cfq_group *cfqg = pd_to_cfqg(pd);
Tejun Heo3381cb82012-04-01 14:38:44 -07001728
1729 if (!cfqg->dev_weight)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001730 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -07001731 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001732}
1733
Tejun Heo2da8ca82013-12-05 12:28:04 -05001734static int cfqg_print_weight_device(struct seq_file *sf, void *v)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001735{
Tejun Heo2da8ca82013-12-05 12:28:04 -05001736 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1737 cfqg_prfill_weight_device, &blkcg_policy_cfq,
1738 0, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001739 return 0;
1740}
1741
Tejun Heoe71357e2013-01-09 08:05:10 -08001742static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1743 struct blkg_policy_data *pd, int off)
1744{
1745 struct cfq_group *cfqg = pd_to_cfqg(pd);
1746
1747 if (!cfqg->dev_leaf_weight)
1748 return 0;
1749 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1750}
1751
Tejun Heo2da8ca82013-12-05 12:28:04 -05001752static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
Tejun Heoe71357e2013-01-09 08:05:10 -08001753{
Tejun Heo2da8ca82013-12-05 12:28:04 -05001754 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1755 cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
1756 0, false);
Tejun Heoe71357e2013-01-09 08:05:10 -08001757 return 0;
1758}
1759
Tejun Heo2da8ca82013-12-05 12:28:04 -05001760static int cfq_print_weight(struct seq_file *sf, void *v)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001761{
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001762 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
Jens Axboe9470e4a2015-06-19 10:19:36 -06001763 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1764 unsigned int val = 0;
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001765
Jens Axboe9470e4a2015-06-19 10:19:36 -06001766 if (cgd)
1767 val = cgd->weight;
1768
1769 seq_printf(sf, "%u\n", val);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001770 return 0;
1771}
1772
Tejun Heo2da8ca82013-12-05 12:28:04 -05001773static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
Tejun Heoe71357e2013-01-09 08:05:10 -08001774{
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001775 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
Jens Axboe9470e4a2015-06-19 10:19:36 -06001776 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1777 unsigned int val = 0;
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001778
Jens Axboe9470e4a2015-06-19 10:19:36 -06001779 if (cgd)
1780 val = cgd->leaf_weight;
1781
1782 seq_printf(sf, "%u\n", val);
Tejun Heoe71357e2013-01-09 08:05:10 -08001783 return 0;
1784}
1785
Tejun Heo451af502014-05-13 12:16:21 -04001786static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
1787 char *buf, size_t nbytes, loff_t off,
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001788 bool on_dfl, bool is_leaf_weight)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001789{
Tejun Heo69d7fde2015-08-18 14:55:36 -07001790 unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1791 unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
Tejun Heo451af502014-05-13 12:16:21 -04001792 struct blkcg *blkcg = css_to_blkcg(of_css(of));
Tejun Heo60c2bc22012-04-01 14:38:43 -07001793 struct blkg_conf_ctx ctx;
Tejun Heo3381cb82012-04-01 14:38:44 -07001794 struct cfq_group *cfqg;
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001795 struct cfq_group_data *cfqgd;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001796 int ret;
Tejun Heo36aa9e52015-08-18 14:55:31 -07001797 u64 v;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001798
Tejun Heo3c798392012-04-16 13:57:25 -07001799 ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001800 if (ret)
1801 return ret;
1802
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001803 if (sscanf(ctx.body, "%llu", &v) == 1) {
1804 /* require "default" on dfl */
1805 ret = -ERANGE;
1806 if (!v && on_dfl)
1807 goto out_finish;
1808 } else if (!strcmp(strim(ctx.body), "default")) {
1809 v = 0;
1810 } else {
1811 ret = -EINVAL;
Tejun Heo36aa9e52015-08-18 14:55:31 -07001812 goto out_finish;
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001813 }
Tejun Heo36aa9e52015-08-18 14:55:31 -07001814
Tejun Heo3381cb82012-04-01 14:38:44 -07001815 cfqg = blkg_to_cfqg(ctx.blkg);
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001816 cfqgd = blkcg_to_cfqgd(blkcg);
Jens Axboeae994ea2015-06-20 10:26:50 -06001817
Tejun Heo20386ce2015-08-18 14:55:28 -07001818 ret = -ERANGE;
Tejun Heo69d7fde2015-08-18 14:55:36 -07001819 if (!v || (v >= min && v <= max)) {
Tejun Heoe71357e2013-01-09 08:05:10 -08001820 if (!is_leaf_weight) {
Tejun Heo36aa9e52015-08-18 14:55:31 -07001821 cfqg->dev_weight = v;
1822 cfqg->new_weight = v ?: cfqgd->weight;
Tejun Heoe71357e2013-01-09 08:05:10 -08001823 } else {
Tejun Heo36aa9e52015-08-18 14:55:31 -07001824 cfqg->dev_leaf_weight = v;
1825 cfqg->new_leaf_weight = v ?: cfqgd->leaf_weight;
Tejun Heoe71357e2013-01-09 08:05:10 -08001826 }
Tejun Heo60c2bc22012-04-01 14:38:43 -07001827 ret = 0;
1828 }
Tejun Heo36aa9e52015-08-18 14:55:31 -07001829out_finish:
Tejun Heo60c2bc22012-04-01 14:38:43 -07001830 blkg_conf_finish(&ctx);
Tejun Heo451af502014-05-13 12:16:21 -04001831 return ret ?: nbytes;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001832}
1833
Tejun Heo451af502014-05-13 12:16:21 -04001834static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
1835 char *buf, size_t nbytes, loff_t off)
Tejun Heoe71357e2013-01-09 08:05:10 -08001836{
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001837 return __cfqg_set_weight_device(of, buf, nbytes, off, false, false);
Tejun Heoe71357e2013-01-09 08:05:10 -08001838}
1839
Tejun Heo451af502014-05-13 12:16:21 -04001840static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
1841 char *buf, size_t nbytes, loff_t off)
Tejun Heoe71357e2013-01-09 08:05:10 -08001842{
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001843 return __cfqg_set_weight_device(of, buf, nbytes, off, false, true);
Tejun Heoe71357e2013-01-09 08:05:10 -08001844}
1845
Tejun Heodd165eb2015-08-18 14:55:33 -07001846static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
Tejun Heo69d7fde2015-08-18 14:55:36 -07001847 bool on_dfl, bool reset_dev, bool is_leaf_weight)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001848{
Tejun Heo69d7fde2015-08-18 14:55:36 -07001849 unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1850 unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
Tejun Heo182446d2013-08-08 20:11:24 -04001851 struct blkcg *blkcg = css_to_blkcg(css);
Tejun Heo3c798392012-04-16 13:57:25 -07001852 struct blkcg_gq *blkg;
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001853 struct cfq_group_data *cfqgd;
Jens Axboeae994ea2015-06-20 10:26:50 -06001854 int ret = 0;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001855
Tejun Heo69d7fde2015-08-18 14:55:36 -07001856 if (val < min || val > max)
1857 return -ERANGE;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001858
1859 spin_lock_irq(&blkcg->lock);
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001860 cfqgd = blkcg_to_cfqgd(blkcg);
Jens Axboeae994ea2015-06-20 10:26:50 -06001861 if (!cfqgd) {
1862 ret = -EINVAL;
1863 goto out;
1864 }
Tejun Heoe71357e2013-01-09 08:05:10 -08001865
1866 if (!is_leaf_weight)
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001867 cfqgd->weight = val;
Tejun Heoe71357e2013-01-09 08:05:10 -08001868 else
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001869 cfqgd->leaf_weight = val;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001870
Sasha Levinb67bfe02013-02-27 17:06:00 -08001871 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
Tejun Heo3381cb82012-04-01 14:38:44 -07001872 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001873
Tejun Heoe71357e2013-01-09 08:05:10 -08001874 if (!cfqg)
1875 continue;
1876
1877 if (!is_leaf_weight) {
Tejun Heo69d7fde2015-08-18 14:55:36 -07001878 if (reset_dev)
1879 cfqg->dev_weight = 0;
Tejun Heoe71357e2013-01-09 08:05:10 -08001880 if (!cfqg->dev_weight)
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001881 cfqg->new_weight = cfqgd->weight;
Tejun Heoe71357e2013-01-09 08:05:10 -08001882 } else {
Tejun Heo69d7fde2015-08-18 14:55:36 -07001883 if (reset_dev)
1884 cfqg->dev_leaf_weight = 0;
Tejun Heoe71357e2013-01-09 08:05:10 -08001885 if (!cfqg->dev_leaf_weight)
Arianna Avanzinie48453c2015-06-05 23:38:42 +02001886 cfqg->new_leaf_weight = cfqgd->leaf_weight;
Tejun Heoe71357e2013-01-09 08:05:10 -08001887 }
Tejun Heo60c2bc22012-04-01 14:38:43 -07001888 }
1889
Jens Axboeae994ea2015-06-20 10:26:50 -06001890out:
Tejun Heo60c2bc22012-04-01 14:38:43 -07001891 spin_unlock_irq(&blkcg->lock);
Jens Axboeae994ea2015-06-20 10:26:50 -06001892 return ret;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001893}
1894
Tejun Heo182446d2013-08-08 20:11:24 -04001895static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1896 u64 val)
Tejun Heoe71357e2013-01-09 08:05:10 -08001897{
Tejun Heo69d7fde2015-08-18 14:55:36 -07001898 return __cfq_set_weight(css, val, false, false, false);
Tejun Heoe71357e2013-01-09 08:05:10 -08001899}
1900
Tejun Heo182446d2013-08-08 20:11:24 -04001901static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1902 struct cftype *cft, u64 val)
Tejun Heoe71357e2013-01-09 08:05:10 -08001903{
Tejun Heo69d7fde2015-08-18 14:55:36 -07001904 return __cfq_set_weight(css, val, false, false, true);
Tejun Heoe71357e2013-01-09 08:05:10 -08001905}
1906
Tejun Heo2da8ca82013-12-05 12:28:04 -05001907static int cfqg_print_stat(struct seq_file *sf, void *v)
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001908{
Tejun Heo2da8ca82013-12-05 12:28:04 -05001909 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1910 &blkcg_policy_cfq, seq_cft(sf)->private, false);
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001911 return 0;
1912}
1913
Tejun Heo2da8ca82013-12-05 12:28:04 -05001914static int cfqg_print_rwstat(struct seq_file *sf, void *v)
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001915{
Tejun Heo2da8ca82013-12-05 12:28:04 -05001916 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1917 &blkcg_policy_cfq, seq_cft(sf)->private, true);
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001918 return 0;
1919}
1920
Tejun Heo43114012013-01-09 08:05:13 -08001921static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1922 struct blkg_policy_data *pd, int off)
1923{
Tejun Heof12c74c2015-08-18 14:55:23 -07001924 u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
1925 &blkcg_policy_cfq, off);
Tejun Heo43114012013-01-09 08:05:13 -08001926 return __blkg_prfill_u64(sf, pd, sum);
1927}
1928
1929static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1930 struct blkg_policy_data *pd, int off)
1931{
Tejun Heof12c74c2015-08-18 14:55:23 -07001932 struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
1933 &blkcg_policy_cfq, off);
Tejun Heo43114012013-01-09 08:05:13 -08001934 return __blkg_prfill_rwstat(sf, pd, &sum);
1935}
1936
Tejun Heo2da8ca82013-12-05 12:28:04 -05001937static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
Tejun Heo43114012013-01-09 08:05:13 -08001938{
Tejun Heo2da8ca82013-12-05 12:28:04 -05001939 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1940 cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
1941 seq_cft(sf)->private, false);
Tejun Heo43114012013-01-09 08:05:13 -08001942 return 0;
1943}
1944
Tejun Heo2da8ca82013-12-05 12:28:04 -05001945static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
Tejun Heo43114012013-01-09 08:05:13 -08001946{
Tejun Heo2da8ca82013-12-05 12:28:04 -05001947 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1948 cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
1949 seq_cft(sf)->private, true);
Tejun Heo43114012013-01-09 08:05:13 -08001950 return 0;
1951}
1952
Tejun Heo702747c2015-08-18 14:55:25 -07001953static u64 cfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1954 int off)
1955{
1956 u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
1957
1958 return __blkg_prfill_u64(sf, pd, sum >> 9);
1959}
1960
1961static int cfqg_print_stat_sectors(struct seq_file *sf, void *v)
1962{
1963 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1964 cfqg_prfill_sectors, &blkcg_policy_cfq, 0, false);
1965 return 0;
1966}
1967
1968static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf,
1969 struct blkg_policy_data *pd, int off)
1970{
1971 struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
1972 offsetof(struct blkcg_gq, stat_bytes));
1973 u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
1974 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
1975
1976 return __blkg_prfill_u64(sf, pd, sum >> 9);
1977}
1978
1979static int cfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1980{
1981 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1982 cfqg_prfill_sectors_recursive, &blkcg_policy_cfq, 0,
1983 false);
1984 return 0;
1985}
1986
Tejun Heo60c2bc22012-04-01 14:38:43 -07001987#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heof95a04a2012-04-16 13:57:26 -07001988static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1989 struct blkg_policy_data *pd, int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001990{
Tejun Heof95a04a2012-04-16 13:57:26 -07001991 struct cfq_group *cfqg = pd_to_cfqg(pd);
Tejun Heo155fead2012-04-01 14:38:44 -07001992 u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001993 u64 v = 0;
1994
1995 if (samples) {
Tejun Heo155fead2012-04-01 14:38:44 -07001996 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
Anatol Pomozovf3cff252013-09-22 12:43:47 -06001997 v = div64_u64(v, samples);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001998 }
Tejun Heof95a04a2012-04-16 13:57:26 -07001999 __blkg_prfill_u64(sf, pd, v);
Tejun Heo60c2bc22012-04-01 14:38:43 -07002000 return 0;
2001}
2002
2003/* print avg_queue_size */
Tejun Heo2da8ca82013-12-05 12:28:04 -05002004static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
Tejun Heo60c2bc22012-04-01 14:38:43 -07002005{
Tejun Heo2da8ca82013-12-05 12:28:04 -05002006 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
2007 cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
2008 0, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -07002009 return 0;
2010}
2011#endif /* CONFIG_DEBUG_BLK_CGROUP */
2012
Tejun Heo880f50e2015-08-18 14:55:30 -07002013static struct cftype cfq_blkcg_legacy_files[] = {
Tejun Heo1d3650f2013-01-09 08:05:11 -08002014 /* on root, weight is mapped to leaf_weight */
Tejun Heo60c2bc22012-04-01 14:38:43 -07002015 {
2016 .name = "weight_device",
Tejun Heo1d3650f2013-01-09 08:05:11 -08002017 .flags = CFTYPE_ONLY_ON_ROOT,
Tejun Heo2da8ca82013-12-05 12:28:04 -05002018 .seq_show = cfqg_print_leaf_weight_device,
Tejun Heo451af502014-05-13 12:16:21 -04002019 .write = cfqg_set_leaf_weight_device,
Tejun Heo1d3650f2013-01-09 08:05:11 -08002020 },
2021 {
2022 .name = "weight",
2023 .flags = CFTYPE_ONLY_ON_ROOT,
Tejun Heo2da8ca82013-12-05 12:28:04 -05002024 .seq_show = cfq_print_leaf_weight,
Tejun Heo1d3650f2013-01-09 08:05:11 -08002025 .write_u64 = cfq_set_leaf_weight,
2026 },
2027
2028 /* no such mapping necessary for !roots */
2029 {
2030 .name = "weight_device",
2031 .flags = CFTYPE_NOT_ON_ROOT,
Tejun Heo2da8ca82013-12-05 12:28:04 -05002032 .seq_show = cfqg_print_weight_device,
Tejun Heo451af502014-05-13 12:16:21 -04002033 .write = cfqg_set_weight_device,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002034 },
2035 {
2036 .name = "weight",
Tejun Heoe71357e2013-01-09 08:05:10 -08002037 .flags = CFTYPE_NOT_ON_ROOT,
Tejun Heo2da8ca82013-12-05 12:28:04 -05002038 .seq_show = cfq_print_weight,
Tejun Heo3381cb82012-04-01 14:38:44 -07002039 .write_u64 = cfq_set_weight,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002040 },
Tejun Heo1d3650f2013-01-09 08:05:11 -08002041
2042 {
2043 .name = "leaf_weight_device",
Tejun Heo2da8ca82013-12-05 12:28:04 -05002044 .seq_show = cfqg_print_leaf_weight_device,
Tejun Heo451af502014-05-13 12:16:21 -04002045 .write = cfqg_set_leaf_weight_device,
Tejun Heoe71357e2013-01-09 08:05:10 -08002046 },
2047 {
2048 .name = "leaf_weight",
Tejun Heo2da8ca82013-12-05 12:28:04 -05002049 .seq_show = cfq_print_leaf_weight,
Tejun Heoe71357e2013-01-09 08:05:10 -08002050 .write_u64 = cfq_set_leaf_weight,
2051 },
2052
Tejun Heo43114012013-01-09 08:05:13 -08002053 /* statistics, covers only the tasks in the cfqg */
Tejun Heo60c2bc22012-04-01 14:38:43 -07002054 {
2055 .name = "time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07002056 .private = offsetof(struct cfq_group, stats.time),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002057 .seq_show = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002058 },
2059 {
2060 .name = "sectors",
Tejun Heo702747c2015-08-18 14:55:25 -07002061 .seq_show = cfqg_print_stat_sectors,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002062 },
2063 {
2064 .name = "io_service_bytes",
Tejun Heo77ea7332015-08-18 14:55:24 -07002065 .private = (unsigned long)&blkcg_policy_cfq,
2066 .seq_show = blkg_print_stat_bytes,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002067 },
2068 {
2069 .name = "io_serviced",
Tejun Heo77ea7332015-08-18 14:55:24 -07002070 .private = (unsigned long)&blkcg_policy_cfq,
2071 .seq_show = blkg_print_stat_ios,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002072 },
2073 {
2074 .name = "io_service_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07002075 .private = offsetof(struct cfq_group, stats.service_time),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002076 .seq_show = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002077 },
2078 {
2079 .name = "io_wait_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07002080 .private = offsetof(struct cfq_group, stats.wait_time),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002081 .seq_show = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002082 },
2083 {
2084 .name = "io_merged",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07002085 .private = offsetof(struct cfq_group, stats.merged),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002086 .seq_show = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002087 },
2088 {
2089 .name = "io_queued",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07002090 .private = offsetof(struct cfq_group, stats.queued),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002091 .seq_show = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002092 },
Tejun Heo43114012013-01-09 08:05:13 -08002093
2094 /* the same statictics which cover the cfqg and its descendants */
2095 {
2096 .name = "time_recursive",
2097 .private = offsetof(struct cfq_group, stats.time),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002098 .seq_show = cfqg_print_stat_recursive,
Tejun Heo43114012013-01-09 08:05:13 -08002099 },
2100 {
2101 .name = "sectors_recursive",
Tejun Heo702747c2015-08-18 14:55:25 -07002102 .seq_show = cfqg_print_stat_sectors_recursive,
Tejun Heo43114012013-01-09 08:05:13 -08002103 },
2104 {
2105 .name = "io_service_bytes_recursive",
Tejun Heo77ea7332015-08-18 14:55:24 -07002106 .private = (unsigned long)&blkcg_policy_cfq,
2107 .seq_show = blkg_print_stat_bytes_recursive,
Tejun Heo43114012013-01-09 08:05:13 -08002108 },
2109 {
2110 .name = "io_serviced_recursive",
Tejun Heo77ea7332015-08-18 14:55:24 -07002111 .private = (unsigned long)&blkcg_policy_cfq,
2112 .seq_show = blkg_print_stat_ios_recursive,
Tejun Heo43114012013-01-09 08:05:13 -08002113 },
2114 {
2115 .name = "io_service_time_recursive",
2116 .private = offsetof(struct cfq_group, stats.service_time),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002117 .seq_show = cfqg_print_rwstat_recursive,
Tejun Heo43114012013-01-09 08:05:13 -08002118 },
2119 {
2120 .name = "io_wait_time_recursive",
2121 .private = offsetof(struct cfq_group, stats.wait_time),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002122 .seq_show = cfqg_print_rwstat_recursive,
Tejun Heo43114012013-01-09 08:05:13 -08002123 },
2124 {
2125 .name = "io_merged_recursive",
2126 .private = offsetof(struct cfq_group, stats.merged),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002127 .seq_show = cfqg_print_rwstat_recursive,
Tejun Heo43114012013-01-09 08:05:13 -08002128 },
2129 {
2130 .name = "io_queued_recursive",
2131 .private = offsetof(struct cfq_group, stats.queued),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002132 .seq_show = cfqg_print_rwstat_recursive,
Tejun Heo43114012013-01-09 08:05:13 -08002133 },
Tejun Heo60c2bc22012-04-01 14:38:43 -07002134#ifdef CONFIG_DEBUG_BLK_CGROUP
2135 {
2136 .name = "avg_queue_size",
Tejun Heo2da8ca82013-12-05 12:28:04 -05002137 .seq_show = cfqg_print_avg_queue_size,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002138 },
2139 {
2140 .name = "group_wait_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07002141 .private = offsetof(struct cfq_group, stats.group_wait_time),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002142 .seq_show = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002143 },
2144 {
2145 .name = "idle_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07002146 .private = offsetof(struct cfq_group, stats.idle_time),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002147 .seq_show = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002148 },
2149 {
2150 .name = "empty_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07002151 .private = offsetof(struct cfq_group, stats.empty_time),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002152 .seq_show = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002153 },
2154 {
2155 .name = "dequeue",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07002156 .private = offsetof(struct cfq_group, stats.dequeue),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002157 .seq_show = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002158 },
2159 {
2160 .name = "unaccounted_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07002161 .private = offsetof(struct cfq_group, stats.unaccounted_time),
Tejun Heo2da8ca82013-12-05 12:28:04 -05002162 .seq_show = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002163 },
2164#endif /* CONFIG_DEBUG_BLK_CGROUP */
2165 { } /* terminate */
2166};
Tejun Heo2ee867dc2015-08-18 14:55:34 -07002167
2168static int cfq_print_weight_on_dfl(struct seq_file *sf, void *v)
2169{
2170 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2171 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
2172
2173 seq_printf(sf, "default %u\n", cgd->weight);
2174 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_weight_device,
2175 &blkcg_policy_cfq, 0, false);
2176 return 0;
2177}
2178
2179static ssize_t cfq_set_weight_on_dfl(struct kernfs_open_file *of,
2180 char *buf, size_t nbytes, loff_t off)
2181{
2182 char *endp;
2183 int ret;
2184 u64 v;
2185
2186 buf = strim(buf);
2187
2188 /* "WEIGHT" or "default WEIGHT" sets the default weight */
2189 v = simple_strtoull(buf, &endp, 0);
2190 if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
Tejun Heo69d7fde2015-08-18 14:55:36 -07002191 ret = __cfq_set_weight(of_css(of), v, true, false, false);
Tejun Heo2ee867dc2015-08-18 14:55:34 -07002192 return ret ?: nbytes;
2193 }
2194
2195 /* "MAJ:MIN WEIGHT" */
2196 return __cfqg_set_weight_device(of, buf, nbytes, off, true, false);
2197}
2198
2199static struct cftype cfq_blkcg_files[] = {
2200 {
2201 .name = "weight",
2202 .flags = CFTYPE_NOT_ON_ROOT,
2203 .seq_show = cfq_print_weight_on_dfl,
2204 .write = cfq_set_weight_on_dfl,
2205 },
2206 { } /* terminate */
2207};
2208
Vivek Goyal25fb5162009-12-03 12:59:46 -05002209#else /* GROUP_IOSCHED */
Tejun Heoae118892015-08-18 14:55:20 -07002210static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
2211 struct blkcg *blkcg)
Vivek Goyal25fb5162009-12-03 12:59:46 -05002212{
Tejun Heof51b8022012-03-05 13:15:05 -08002213 return cfqd->root_group;
Vivek Goyal25fb5162009-12-03 12:59:46 -05002214}
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +02002215
Vivek Goyal25fb5162009-12-03 12:59:46 -05002216static inline void
2217cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
2218 cfqq->cfqg = cfqg;
2219}
2220
2221#endif /* GROUP_IOSCHED */
2222
Jens Axboe498d3aa22007-04-26 12:54:48 +02002223/*
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002224 * The cfqd->service_trees holds all pending cfq_queue's that have
Jens Axboe498d3aa22007-04-26 12:54:48 +02002225 * requests waiting to be processed. It is sorted in the order that
2226 * we will service the queues.
2227 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002228static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Jens Axboea6151c32009-10-07 20:02:57 +02002229 bool add_front)
Jens Axboed9e76202007-04-20 14:27:50 +02002230{
Jens Axboe08717142008-01-28 11:38:15 +01002231 struct rb_node **p, *parent;
2232 struct cfq_queue *__cfqq;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06002233 u64 rb_key;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002234 struct cfq_rb_root *st;
Jens Axboe498d3aa22007-04-26 12:54:48 +02002235 int left;
Vivek Goyaldae739e2009-12-03 12:59:45 -05002236 int new_cfqq = 1;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06002237 u64 now = ktime_get_ns();
Vivek Goyalae30c282009-12-03 12:59:55 -05002238
Vivek Goyal34b98d02012-10-03 16:56:58 -04002239 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
Jens Axboe08717142008-01-28 11:38:15 +01002240 if (cfq_class_idle(cfqq)) {
2241 rb_key = CFQ_IDLE_DELAY;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002242 parent = rb_last(&st->rb);
Jens Axboe08717142008-01-28 11:38:15 +01002243 if (parent && parent != &cfqq->rb_node) {
2244 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2245 rb_key += __cfqq->rb_key;
2246 } else
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06002247 rb_key += now;
Jens Axboe08717142008-01-28 11:38:15 +01002248 } else if (!add_front) {
Jens Axboeb9c89462009-10-06 20:53:44 +02002249 /*
2250 * Get our rb key offset. Subtract any residual slice
2251 * value carried from last service. A negative resid
2252 * count indicates slice overrun, and this should position
2253 * the next service time further away in the tree.
2254 */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06002255 rb_key = cfq_slice_offset(cfqd, cfqq) + now;
Jens Axboeb9c89462009-10-06 20:53:44 +02002256 rb_key -= cfqq->slice_resid;
Jens Axboeedd75ff2007-04-19 12:03:34 +02002257 cfqq->slice_resid = 0;
Corrado Zoccolo48e025e2009-10-05 08:49:23 +02002258 } else {
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06002259 rb_key = -NSEC_PER_SEC;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002260 __cfqq = cfq_rb_first(st);
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06002261 rb_key += __cfqq ? __cfqq->rb_key : now;
Corrado Zoccolo48e025e2009-10-05 08:49:23 +02002262 }
Jens Axboed9e76202007-04-20 14:27:50 +02002263
2264 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
Vivek Goyaldae739e2009-12-03 12:59:45 -05002265 new_cfqq = 0;
Jens Axboe99f96282007-02-05 11:56:25 +01002266 /*
Jens Axboed9e76202007-04-20 14:27:50 +02002267 * same position, nothing more to do
Jens Axboe99f96282007-02-05 11:56:25 +01002268 */
Vivek Goyal34b98d02012-10-03 16:56:58 -04002269 if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
Jens Axboed9e76202007-04-20 14:27:50 +02002270 return;
Jens Axboe53b037442006-07-28 09:48:51 +02002271
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01002272 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2273 cfqq->service_tree = NULL;
Jens Axboe22e2c502005-06-27 10:55:12 +02002274 }
Jens Axboed9e76202007-04-20 14:27:50 +02002275
Jens Axboe498d3aa22007-04-26 12:54:48 +02002276 left = 1;
Jens Axboe08717142008-01-28 11:38:15 +01002277 parent = NULL;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002278 cfqq->service_tree = st;
2279 p = &st->rb.rb_node;
Jens Axboed9e76202007-04-20 14:27:50 +02002280 while (*p) {
2281 parent = *p;
2282 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2283
Jens Axboe0c534e02007-04-18 20:01:57 +02002284 /*
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002285 * sort by key, that represents service time.
Jens Axboe0c534e02007-04-18 20:01:57 +02002286 */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06002287 if (rb_key < __cfqq->rb_key)
Vivek Goyal1f23f122012-10-03 16:57:00 -04002288 p = &parent->rb_left;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002289 else {
Vivek Goyal1f23f122012-10-03 16:57:00 -04002290 p = &parent->rb_right;
Jens Axboecc09e292007-04-26 12:53:50 +02002291 left = 0;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002292 }
Jens Axboed9e76202007-04-20 14:27:50 +02002293 }
2294
Jens Axboecc09e292007-04-26 12:53:50 +02002295 if (left)
Vivek Goyal34b98d02012-10-03 16:56:58 -04002296 st->left = &cfqq->rb_node;
Jens Axboecc09e292007-04-26 12:53:50 +02002297
Jens Axboed9e76202007-04-20 14:27:50 +02002298 cfqq->rb_key = rb_key;
2299 rb_link_node(&cfqq->rb_node, parent, p);
Vivek Goyal34b98d02012-10-03 16:56:58 -04002300 rb_insert_color(&cfqq->rb_node, &st->rb);
2301 st->count++;
Namhyung Kim20359f22011-05-24 10:23:22 +02002302 if (add_front || !new_cfqq)
Vivek Goyaldae739e2009-12-03 12:59:45 -05002303 return;
Justin TerAvest8184f932011-03-17 16:12:36 +01002304 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305}
2306
Jens Axboea36e71f2009-04-15 12:15:11 +02002307static struct cfq_queue *
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002308cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2309 sector_t sector, struct rb_node **ret_parent,
2310 struct rb_node ***rb_link)
Jens Axboea36e71f2009-04-15 12:15:11 +02002311{
Jens Axboea36e71f2009-04-15 12:15:11 +02002312 struct rb_node **p, *parent;
2313 struct cfq_queue *cfqq = NULL;
2314
2315 parent = NULL;
2316 p = &root->rb_node;
2317 while (*p) {
2318 struct rb_node **n;
2319
2320 parent = *p;
2321 cfqq = rb_entry(parent, struct cfq_queue, p_node);
2322
2323 /*
2324 * Sort strictly based on sector. Smallest to the left,
2325 * largest to the right.
2326 */
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002327 if (sector > blk_rq_pos(cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02002328 n = &(*p)->rb_right;
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002329 else if (sector < blk_rq_pos(cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02002330 n = &(*p)->rb_left;
2331 else
2332 break;
2333 p = n;
Jens Axboe3ac6c9f2009-04-23 12:14:56 +02002334 cfqq = NULL;
Jens Axboea36e71f2009-04-15 12:15:11 +02002335 }
2336
2337 *ret_parent = parent;
2338 if (rb_link)
2339 *rb_link = p;
Jens Axboe3ac6c9f2009-04-23 12:14:56 +02002340 return cfqq;
Jens Axboea36e71f2009-04-15 12:15:11 +02002341}
2342
2343static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2344{
Jens Axboea36e71f2009-04-15 12:15:11 +02002345 struct rb_node **p, *parent;
2346 struct cfq_queue *__cfqq;
2347
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002348 if (cfqq->p_root) {
2349 rb_erase(&cfqq->p_node, cfqq->p_root);
2350 cfqq->p_root = NULL;
2351 }
Jens Axboea36e71f2009-04-15 12:15:11 +02002352
2353 if (cfq_class_idle(cfqq))
2354 return;
2355 if (!cfqq->next_rq)
2356 return;
2357
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002358 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002359 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2360 blk_rq_pos(cfqq->next_rq), &parent, &p);
Jens Axboe3ac6c9f2009-04-23 12:14:56 +02002361 if (!__cfqq) {
2362 rb_link_node(&cfqq->p_node, parent, p);
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002363 rb_insert_color(&cfqq->p_node, cfqq->p_root);
2364 } else
2365 cfqq->p_root = NULL;
Jens Axboea36e71f2009-04-15 12:15:11 +02002366}
2367
Jens Axboe498d3aa22007-04-26 12:54:48 +02002368/*
2369 * Update cfqq's position in the service tree.
2370 */
Jens Axboeedd75ff2007-04-19 12:03:34 +02002371static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002372{
Jens Axboe6d048f52007-04-25 12:44:27 +02002373 /*
2374 * Resorting requires the cfqq to be on the RR list already.
2375 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002376 if (cfq_cfqq_on_rr(cfqq)) {
Jens Axboeedd75ff2007-04-19 12:03:34 +02002377 cfq_service_tree_add(cfqd, cfqq, 0);
Jens Axboea36e71f2009-04-15 12:15:11 +02002378 cfq_prio_tree_add(cfqd, cfqq);
2379 }
Jens Axboe6d048f52007-04-25 12:44:27 +02002380}
2381
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382/*
2383 * add to busy list of queues for service, trying to be fair in ordering
Jens Axboe22e2c502005-06-27 10:55:12 +02002384 * the pending list according to last request service
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 */
Jens Axboefebffd62008-01-28 13:19:43 +01002386static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387{
Jens Axboe7b679132008-05-30 12:23:07 +02002388 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
Jens Axboe3b181522005-06-27 10:56:24 +02002389 BUG_ON(cfq_cfqq_on_rr(cfqq));
2390 cfq_mark_cfqq_on_rr(cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 cfqd->busy_queues++;
Shaohua Lief8a41d2011-03-07 09:26:29 +01002392 if (cfq_cfqq_sync(cfqq))
2393 cfqd->busy_sync_queues++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
Jens Axboeedd75ff2007-04-19 12:03:34 +02002395 cfq_resort_rr_list(cfqd, cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396}
2397
Jens Axboe498d3aa22007-04-26 12:54:48 +02002398/*
2399 * Called when the cfqq no longer has requests pending, remove it from
2400 * the service tree.
2401 */
Jens Axboefebffd62008-01-28 13:19:43 +01002402static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403{
Jens Axboe7b679132008-05-30 12:23:07 +02002404 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
Jens Axboe3b181522005-06-27 10:56:24 +02002405 BUG_ON(!cfq_cfqq_on_rr(cfqq));
2406 cfq_clear_cfqq_on_rr(cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01002408 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2409 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2410 cfqq->service_tree = NULL;
2411 }
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002412 if (cfqq->p_root) {
2413 rb_erase(&cfqq->p_node, cfqq->p_root);
2414 cfqq->p_root = NULL;
2415 }
Jens Axboed9e76202007-04-20 14:27:50 +02002416
Justin TerAvest8184f932011-03-17 16:12:36 +01002417 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 BUG_ON(!cfqd->busy_queues);
2419 cfqd->busy_queues--;
Shaohua Lief8a41d2011-03-07 09:26:29 +01002420 if (cfq_cfqq_sync(cfqq))
2421 cfqd->busy_sync_queues--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422}
2423
2424/*
2425 * rb tree support functions
2426 */
Jens Axboefebffd62008-01-28 13:19:43 +01002427static void cfq_del_rq_rb(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428{
Jens Axboe5e705372006-07-13 12:39:25 +02002429 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe5e705372006-07-13 12:39:25 +02002430 const int sync = rq_is_sync(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431
Jens Axboeb4878f22005-10-20 16:42:29 +02002432 BUG_ON(!cfqq->queued[sync]);
2433 cfqq->queued[sync]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434
Jens Axboe5e705372006-07-13 12:39:25 +02002435 elv_rb_del(&cfqq->sort_list, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436
Vivek Goyalf04a6422009-12-03 12:59:40 -05002437 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2438 /*
2439 * Queue will be deleted from service tree when we actually
2440 * expire it later. Right now just remove it from prio tree
2441 * as it is empty.
2442 */
2443 if (cfqq->p_root) {
2444 rb_erase(&cfqq->p_node, cfqq->p_root);
2445 cfqq->p_root = NULL;
2446 }
2447 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448}
2449
Jens Axboe5e705372006-07-13 12:39:25 +02002450static void cfq_add_rq_rb(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451{
Jens Axboe5e705372006-07-13 12:39:25 +02002452 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 struct cfq_data *cfqd = cfqq->cfqd;
Jeff Moyer796d5112011-06-02 21:19:05 +02002454 struct request *prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455
Jens Axboe5380a102006-07-13 12:37:56 +02002456 cfqq->queued[rq_is_sync(rq)]++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457
Jeff Moyer796d5112011-06-02 21:19:05 +02002458 elv_rb_add(&cfqq->sort_list, rq);
Jens Axboe5fccbf62006-10-31 14:21:55 +01002459
2460 if (!cfq_cfqq_on_rr(cfqq))
2461 cfq_add_cfqq_rr(cfqd, cfqq);
Jens Axboe5044eed2007-04-25 11:53:48 +02002462
2463 /*
2464 * check if this request is a better next-serve candidate
2465 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002466 prev = cfqq->next_rq;
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01002467 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
Jens Axboea36e71f2009-04-15 12:15:11 +02002468
2469 /*
2470 * adjust priority tree position, if ->next_rq changes
2471 */
2472 if (prev != cfqq->next_rq)
2473 cfq_prio_tree_add(cfqd, cfqq);
2474
Jens Axboe5044eed2007-04-25 11:53:48 +02002475 BUG_ON(!cfqq->next_rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476}
2477
Jens Axboefebffd62008-01-28 13:19:43 +01002478static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479{
Jens Axboe5380a102006-07-13 12:37:56 +02002480 elv_rb_del(&cfqq->sort_list, rq);
2481 cfqq->queued[rq_is_sync(rq)]--;
Christoph Hellwigef295ec2016-10-28 08:48:16 -06002482 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
Jens Axboe5e705372006-07-13 12:39:25 +02002483 cfq_add_rq_rb(rq);
Tejun Heo155fead2012-04-01 14:38:44 -07002484 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
Christoph Hellwigef295ec2016-10-28 08:48:16 -06002485 rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486}
2487
Jens Axboe206dc692006-03-28 13:03:44 +02002488static struct request *
2489cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490{
Jens Axboe206dc692006-03-28 13:03:44 +02002491 struct task_struct *tsk = current;
Tejun Heoc5869802011-12-14 00:33:41 +01002492 struct cfq_io_cq *cic;
Jens Axboe206dc692006-03-28 13:03:44 +02002493 struct cfq_queue *cfqq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494
Jens Axboe4ac845a2008-01-24 08:44:49 +01002495 cic = cfq_cic_lookup(cfqd, tsk->io_context);
Vasily Tarasov91fac312007-04-25 12:29:51 +02002496 if (!cic)
2497 return NULL;
2498
Christoph Hellwigaa39ebd2016-11-01 07:40:02 -06002499 cfqq = cic_to_cfqq(cic, op_is_sync(bio->bi_opf));
Kent Overstreetf73a1c72012-09-25 15:05:12 -07002500 if (cfqq)
2501 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 return NULL;
2504}
2505
Jens Axboe165125e2007-07-24 09:28:11 +02002506static void cfq_activate_request(struct request_queue *q, struct request *rq)
Jens Axboeb4878f22005-10-20 16:42:29 +02002507{
2508 struct cfq_data *cfqd = q->elevator->elevator_data;
2509
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002510 cfqd->rq_in_driver++;
Jens Axboe7b679132008-05-30 12:23:07 +02002511 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002512 cfqd->rq_in_driver);
Jens Axboe25776e32006-06-01 10:12:26 +02002513
Tejun Heo5b936292009-05-07 22:24:38 +09002514 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +02002515}
2516
Jens Axboe165125e2007-07-24 09:28:11 +02002517static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518{
Jens Axboe22e2c502005-06-27 10:55:12 +02002519 struct cfq_data *cfqd = q->elevator->elevator_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002521 WARN_ON(!cfqd->rq_in_driver);
2522 cfqd->rq_in_driver--;
Jens Axboe7b679132008-05-30 12:23:07 +02002523 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002524 cfqd->rq_in_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525}
2526
Jens Axboeb4878f22005-10-20 16:42:29 +02002527static void cfq_remove_request(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528{
Jens Axboe5e705372006-07-13 12:39:25 +02002529 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe21183b02006-07-13 12:33:14 +02002530
Jens Axboe5e705372006-07-13 12:39:25 +02002531 if (cfqq->next_rq == rq)
2532 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533
Jens Axboeb4878f22005-10-20 16:42:29 +02002534 list_del_init(&rq->queuelist);
Jens Axboe5e705372006-07-13 12:39:25 +02002535 cfq_del_rq_rb(rq);
Jens Axboe374f84a2006-07-23 01:42:19 +02002536
Aaron Carroll45333d52008-08-26 15:52:36 +02002537 cfqq->cfqd->rq_queued--;
Christoph Hellwigef295ec2016-10-28 08:48:16 -06002538 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
Christoph Hellwig65299a32011-08-23 14:50:29 +02002539 if (rq->cmd_flags & REQ_PRIO) {
2540 WARN_ON(!cfqq->prio_pending);
2541 cfqq->prio_pending--;
Jens Axboeb53d1ed2011-08-19 08:34:48 +02002542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543}
2544
Christoph Hellwig34fe7c02017-02-08 14:46:48 +01002545static enum elv_merge cfq_merge(struct request_queue *q, struct request **req,
Jens Axboe165125e2007-07-24 09:28:11 +02002546 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547{
2548 struct cfq_data *cfqd = q->elevator->elevator_data;
2549 struct request *__rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550
Jens Axboe206dc692006-03-28 13:03:44 +02002551 __rq = cfq_find_rq_fmerge(cfqd, bio);
Tahsin Erdogan72ef7992016-07-07 11:48:22 -07002552 if (__rq && elv_bio_merge_ok(__rq, bio)) {
Jens Axboe98170642006-07-28 09:23:08 +02002553 *req = __rq;
2554 return ELEVATOR_FRONT_MERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 }
2556
2557 return ELEVATOR_NO_MERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558}
2559
Jens Axboe165125e2007-07-24 09:28:11 +02002560static void cfq_merged_request(struct request_queue *q, struct request *req,
Christoph Hellwig34fe7c02017-02-08 14:46:48 +01002561 enum elv_merge type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562{
Jens Axboe21183b02006-07-13 12:33:14 +02002563 if (type == ELEVATOR_FRONT_MERGE) {
Jens Axboe5e705372006-07-13 12:39:25 +02002564 struct cfq_queue *cfqq = RQ_CFQQ(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565
Jens Axboe5e705372006-07-13 12:39:25 +02002566 cfq_reposition_rq_rb(cfqq, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568}
2569
Divyesh Shah812d4022010-04-08 21:14:23 -07002570static void cfq_bio_merged(struct request_queue *q, struct request *req,
2571 struct bio *bio)
2572{
Christoph Hellwigef295ec2016-10-28 08:48:16 -06002573 cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_opf);
Divyesh Shah812d4022010-04-08 21:14:23 -07002574}
2575
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576static void
Jens Axboe165125e2007-07-24 09:28:11 +02002577cfq_merged_requests(struct request_queue *q, struct request *rq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578 struct request *next)
2579{
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01002580 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Shaohua Li4a0b75c2011-12-16 14:00:22 +01002581 struct cfq_data *cfqd = q->elevator->elevator_data;
2582
Jens Axboe22e2c502005-06-27 10:55:12 +02002583 /*
2584 * reposition in fifo if next is older than rq
2585 */
2586 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06002587 next->fifo_time < rq->fifo_time &&
Shaohua Li3d106fba2012-11-06 12:39:51 +01002588 cfqq == RQ_CFQQ(next)) {
Jens Axboe22e2c502005-06-27 10:55:12 +02002589 list_move(&rq->queuelist, &next->queuelist);
Jan Kara8b4922d2014-02-24 16:39:52 +01002590 rq->fifo_time = next->fifo_time;
Jens Axboe30996f42009-10-05 11:03:39 +02002591 }
Jens Axboe22e2c502005-06-27 10:55:12 +02002592
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01002593 if (cfqq->next_rq == next)
2594 cfqq->next_rq = rq;
Jens Axboeb4878f22005-10-20 16:42:29 +02002595 cfq_remove_request(next);
Christoph Hellwigef295ec2016-10-28 08:48:16 -06002596 cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
Shaohua Li4a0b75c2011-12-16 14:00:22 +01002597
2598 cfqq = RQ_CFQQ(next);
2599 /*
2600 * all requests of this queue are merged to other queues, delete it
2601 * from the service tree. If it's the active_queue,
2602 * cfq_dispatch_requests() will choose to expire it or do idle
2603 */
2604 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2605 cfqq != cfqd->active_queue)
2606 cfq_del_cfqq_rr(cfqd, cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002607}
2608
Tahsin Erdogan72ef7992016-07-07 11:48:22 -07002609static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq,
2610 struct bio *bio)
Jens Axboeda775262006-12-20 11:04:12 +01002611{
2612 struct cfq_data *cfqd = q->elevator->elevator_data;
Christoph Hellwigaa39ebd2016-11-01 07:40:02 -06002613 bool is_sync = op_is_sync(bio->bi_opf);
Tejun Heoc5869802011-12-14 00:33:41 +01002614 struct cfq_io_cq *cic;
Jens Axboeda775262006-12-20 11:04:12 +01002615 struct cfq_queue *cfqq;
Jens Axboeda775262006-12-20 11:04:12 +01002616
2617 /*
Jens Axboeec8acb62007-01-02 18:32:11 +01002618 * Disallow merge of a sync bio into an async request.
Jens Axboeda775262006-12-20 11:04:12 +01002619 */
Christoph Hellwigaa39ebd2016-11-01 07:40:02 -06002620 if (is_sync && !rq_is_sync(rq))
Jens Axboea6151c32009-10-07 20:02:57 +02002621 return false;
Jens Axboeda775262006-12-20 11:04:12 +01002622
2623 /*
Tejun Heof1a4f4d2011-12-14 00:33:39 +01002624 * Lookup the cfqq that this bio will be queued with and allow
Tejun Heo07c2bd32012-02-08 09:19:42 +01002625 * merge only if rq is queued there.
Jens Axboeda775262006-12-20 11:04:12 +01002626 */
Tejun Heo07c2bd32012-02-08 09:19:42 +01002627 cic = cfq_cic_lookup(cfqd, current->io_context);
2628 if (!cic)
2629 return false;
Jens Axboe719d3402006-12-22 09:38:53 +01002630
Christoph Hellwigaa39ebd2016-11-01 07:40:02 -06002631 cfqq = cic_to_cfqq(cic, is_sync);
Jens Axboea6151c32009-10-07 20:02:57 +02002632 return cfqq == RQ_CFQQ(rq);
Jens Axboeda775262006-12-20 11:04:12 +01002633}
2634
Tahsin Erdogan72ef7992016-07-07 11:48:22 -07002635static int cfq_allow_rq_merge(struct request_queue *q, struct request *rq,
2636 struct request *next)
2637{
2638 return RQ_CFQQ(rq) == RQ_CFQQ(next);
2639}
2640
Divyesh Shah812df482010-04-08 21:15:35 -07002641static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2642{
Jan Kara91148322016-06-08 15:11:39 +02002643 hrtimer_try_to_cancel(&cfqd->idle_slice_timer);
Tejun Heo155fead2012-04-01 14:38:44 -07002644 cfqg_stats_update_idle_time(cfqq->cfqg);
Divyesh Shah812df482010-04-08 21:15:35 -07002645}
2646
Jens Axboefebffd62008-01-28 13:19:43 +01002647static void __cfq_set_active_queue(struct cfq_data *cfqd,
2648 struct cfq_queue *cfqq)
Jens Axboe22e2c502005-06-27 10:55:12 +02002649{
2650 if (cfqq) {
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04002651 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002652 cfqd->serving_wl_class, cfqd->serving_wl_type);
Tejun Heo155fead2012-04-01 14:38:44 -07002653 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
Justin TerAvest62a37f62011-03-23 08:25:44 +01002654 cfqq->slice_start = 0;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06002655 cfqq->dispatch_start = ktime_get_ns();
Justin TerAvest62a37f62011-03-23 08:25:44 +01002656 cfqq->allocated_slice = 0;
2657 cfqq->slice_end = 0;
2658 cfqq->slice_dispatch = 0;
2659 cfqq->nr_sectors = 0;
2660
2661 cfq_clear_cfqq_wait_request(cfqq);
2662 cfq_clear_cfqq_must_dispatch(cfqq);
2663 cfq_clear_cfqq_must_alloc_slice(cfqq);
2664 cfq_clear_cfqq_fifo_expire(cfqq);
2665 cfq_mark_cfqq_slice_new(cfqq);
2666
2667 cfq_del_timer(cfqd, cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002668 }
2669
2670 cfqd->active_queue = cfqq;
2671}
2672
2673/*
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002674 * current cfqq expired its slice (or was too idle), select new one
2675 */
2676static void
2677__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Vivek Goyale5ff0822010-04-26 19:25:11 +02002678 bool timed_out)
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002679{
Jens Axboe7b679132008-05-30 12:23:07 +02002680 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2681
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002682 if (cfq_cfqq_wait_request(cfqq))
Divyesh Shah812df482010-04-08 21:15:35 -07002683 cfq_del_timer(cfqd, cfqq);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002684
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002685 cfq_clear_cfqq_wait_request(cfqq);
Vivek Goyalf75edf22009-12-03 12:59:53 -05002686 cfq_clear_cfqq_wait_busy(cfqq);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002687
2688 /*
Shaohua Liae54abe2010-02-05 13:11:45 +01002689 * If this cfqq is shared between multiple processes, check to
2690 * make sure that those processes are still issuing I/Os within
2691 * the mean seek distance. If not, it may be time to break the
2692 * queues apart again.
2693 */
2694 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2695 cfq_mark_cfqq_split_coop(cfqq);
2696
2697 /*
Jens Axboe6084cdd2007-04-23 08:25:00 +02002698 * store what was left of this slice, if the queue idled/timed out
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002699 */
Shaohua Lic553f8e2011-01-14 08:41:03 +01002700 if (timed_out) {
2701 if (cfq_cfqq_slice_new(cfqq))
Vivek Goyalba5bd522011-01-19 08:25:02 -07002702 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
Shaohua Lic553f8e2011-01-14 08:41:03 +01002703 else
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06002704 cfqq->slice_resid = cfqq->slice_end - ktime_get_ns();
Jan Kara93fdf142016-06-28 09:04:00 +02002705 cfq_log_cfqq(cfqd, cfqq, "resid=%lld", cfqq->slice_resid);
Jens Axboe7b679132008-05-30 12:23:07 +02002706 }
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002707
Vivek Goyale5ff0822010-04-26 19:25:11 +02002708 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
Vivek Goyaldae739e2009-12-03 12:59:45 -05002709
Vivek Goyalf04a6422009-12-03 12:59:40 -05002710 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2711 cfq_del_cfqq_rr(cfqd, cfqq);
2712
Jens Axboeedd75ff2007-04-19 12:03:34 +02002713 cfq_resort_rr_list(cfqd, cfqq);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002714
2715 if (cfqq == cfqd->active_queue)
2716 cfqd->active_queue = NULL;
2717
2718 if (cfqd->active_cic) {
Tejun Heo11a31222012-02-07 07:51:30 +01002719 put_io_context(cfqd->active_cic->icq.ioc);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002720 cfqd->active_cic = NULL;
2721 }
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002722}
2723
Vivek Goyale5ff0822010-04-26 19:25:11 +02002724static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002725{
2726 struct cfq_queue *cfqq = cfqd->active_queue;
2727
2728 if (cfqq)
Vivek Goyale5ff0822010-04-26 19:25:11 +02002729 __cfq_slice_expired(cfqd, cfqq, timed_out);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002730}
2731
Jens Axboe498d3aa22007-04-26 12:54:48 +02002732/*
2733 * Get next queue for service. Unless we have a queue preemption,
2734 * we'll simply select the first cfqq in the service tree.
2735 */
Jens Axboe6d048f52007-04-25 12:44:27 +02002736static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +02002737{
Vivek Goyal34b98d02012-10-03 16:56:58 -04002738 struct cfq_rb_root *st = st_for(cfqd->serving_group,
2739 cfqd->serving_wl_class, cfqd->serving_wl_type);
Jens Axboeedd75ff2007-04-19 12:03:34 +02002740
Vivek Goyalf04a6422009-12-03 12:59:40 -05002741 if (!cfqd->rq_queued)
2742 return NULL;
2743
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002744 /* There is nothing to dispatch */
Vivek Goyal34b98d02012-10-03 16:56:58 -04002745 if (!st)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002746 return NULL;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002747 if (RB_EMPTY_ROOT(&st->rb))
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002748 return NULL;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002749 return cfq_rb_first(st);
Jens Axboe6d048f52007-04-25 12:44:27 +02002750}
2751
Vivek Goyalf04a6422009-12-03 12:59:40 -05002752static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2753{
Vivek Goyal25fb5162009-12-03 12:59:46 -05002754 struct cfq_group *cfqg;
Vivek Goyalf04a6422009-12-03 12:59:40 -05002755 struct cfq_queue *cfqq;
2756 int i, j;
2757 struct cfq_rb_root *st;
2758
2759 if (!cfqd->rq_queued)
2760 return NULL;
2761
Vivek Goyal25fb5162009-12-03 12:59:46 -05002762 cfqg = cfq_get_next_cfqg(cfqd);
2763 if (!cfqg)
2764 return NULL;
2765
Markus Elfring1cf41752017-01-21 22:44:07 +01002766 for_each_cfqg_st(cfqg, i, j, st) {
2767 cfqq = cfq_rb_first(st);
2768 if (cfqq)
Vivek Goyalf04a6422009-12-03 12:59:40 -05002769 return cfqq;
Markus Elfring1cf41752017-01-21 22:44:07 +01002770 }
Vivek Goyalf04a6422009-12-03 12:59:40 -05002771 return NULL;
2772}
2773
Jens Axboe498d3aa22007-04-26 12:54:48 +02002774/*
2775 * Get and set a new active queue for service.
2776 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002777static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2778 struct cfq_queue *cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002779{
Jens Axboee00ef792009-11-04 08:54:55 +01002780 if (!cfqq)
Jens Axboea36e71f2009-04-15 12:15:11 +02002781 cfqq = cfq_get_next_queue(cfqd);
Jens Axboe6d048f52007-04-25 12:44:27 +02002782
Jens Axboe22e2c502005-06-27 10:55:12 +02002783 __cfq_set_active_queue(cfqd, cfqq);
Jens Axboe3b181522005-06-27 10:56:24 +02002784 return cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02002785}
2786
Jens Axboed9e76202007-04-20 14:27:50 +02002787static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2788 struct request *rq)
2789{
Tejun Heo83096eb2009-05-07 22:24:39 +09002790 if (blk_rq_pos(rq) >= cfqd->last_position)
2791 return blk_rq_pos(rq) - cfqd->last_position;
Jens Axboed9e76202007-04-20 14:27:50 +02002792 else
Tejun Heo83096eb2009-05-07 22:24:39 +09002793 return cfqd->last_position - blk_rq_pos(rq);
Jens Axboed9e76202007-04-20 14:27:50 +02002794}
2795
Jeff Moyerb2c18e12009-10-23 17:14:49 -04002796static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Shaohua Lie9ce3352010-03-19 08:03:04 +01002797 struct request *rq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002798{
Shaohua Lie9ce3352010-03-19 08:03:04 +01002799 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
Jens Axboe6d048f52007-04-25 12:44:27 +02002800}
2801
Jens Axboea36e71f2009-04-15 12:15:11 +02002802static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2803 struct cfq_queue *cur_cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002804{
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002805 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
Jens Axboea36e71f2009-04-15 12:15:11 +02002806 struct rb_node *parent, *node;
2807 struct cfq_queue *__cfqq;
2808 sector_t sector = cfqd->last_position;
2809
2810 if (RB_EMPTY_ROOT(root))
2811 return NULL;
2812
2813 /*
2814 * First, if we find a request starting at the end of the last
2815 * request, choose it.
2816 */
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002817 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
Jens Axboea36e71f2009-04-15 12:15:11 +02002818 if (__cfqq)
2819 return __cfqq;
2820
2821 /*
2822 * If the exact sector wasn't found, the parent of the NULL leaf
2823 * will contain the closest sector.
2824 */
2825 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
Shaohua Lie9ce3352010-03-19 08:03:04 +01002826 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02002827 return __cfqq;
2828
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002829 if (blk_rq_pos(__cfqq->next_rq) < sector)
Jens Axboea36e71f2009-04-15 12:15:11 +02002830 node = rb_next(&__cfqq->p_node);
2831 else
2832 node = rb_prev(&__cfqq->p_node);
2833 if (!node)
2834 return NULL;
2835
2836 __cfqq = rb_entry(node, struct cfq_queue, p_node);
Shaohua Lie9ce3352010-03-19 08:03:04 +01002837 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02002838 return __cfqq;
2839
2840 return NULL;
2841}
2842
2843/*
2844 * cfqd - obvious
2845 * cur_cfqq - passed in so that we don't decide that the current queue is
2846 * closely cooperating with itself.
2847 *
2848 * So, basically we're assuming that that cur_cfqq has dispatched at least
2849 * one request, and that cfqd->last_position reflects a position on the disk
2850 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
2851 * assumption.
2852 */
2853static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
Jeff Moyerb3b6d042009-10-23 17:14:51 -04002854 struct cfq_queue *cur_cfqq)
Jens Axboea36e71f2009-04-15 12:15:11 +02002855{
2856 struct cfq_queue *cfqq;
2857
Divyesh Shah39c01b22010-03-25 15:45:57 +01002858 if (cfq_class_idle(cur_cfqq))
2859 return NULL;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002860 if (!cfq_cfqq_sync(cur_cfqq))
2861 return NULL;
2862 if (CFQQ_SEEKY(cur_cfqq))
2863 return NULL;
2864
Jens Axboea36e71f2009-04-15 12:15:11 +02002865 /*
Gui Jianfengb9d8f4c2009-12-08 08:54:17 +01002866 * Don't search priority tree if it's the only queue in the group.
2867 */
2868 if (cur_cfqq->cfqg->nr_cfqq == 1)
2869 return NULL;
2870
2871 /*
Jens Axboed9e76202007-04-20 14:27:50 +02002872 * We should notice if some of the queues are cooperating, eg
2873 * working closely on the same area of the disk. In that case,
2874 * we can group them together and don't waste time idling.
Jens Axboe6d048f52007-04-25 12:44:27 +02002875 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002876 cfqq = cfqq_close(cfqd, cur_cfqq);
2877 if (!cfqq)
2878 return NULL;
2879
Vivek Goyal8682e1f2009-12-03 12:59:50 -05002880 /* If new queue belongs to different cfq_group, don't choose it */
2881 if (cur_cfqq->cfqg != cfqq->cfqg)
2882 return NULL;
2883
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002884 /*
2885 * It only makes sense to merge sync queues.
2886 */
2887 if (!cfq_cfqq_sync(cfqq))
2888 return NULL;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002889 if (CFQQ_SEEKY(cfqq))
2890 return NULL;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002891
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002892 /*
2893 * Do not merge queues of different priority classes
2894 */
2895 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2896 return NULL;
2897
Jens Axboea36e71f2009-04-15 12:15:11 +02002898 return cfqq;
Jens Axboe6d048f52007-04-25 12:44:27 +02002899}
2900
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002901/*
2902 * Determine whether we should enforce idle window for this queue.
2903 */
2904
2905static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2906{
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04002907 enum wl_class_t wl_class = cfqq_class(cfqq);
Vivek Goyal34b98d02012-10-03 16:56:58 -04002908 struct cfq_rb_root *st = cfqq->service_tree;
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002909
Vivek Goyal34b98d02012-10-03 16:56:58 -04002910 BUG_ON(!st);
2911 BUG_ON(!st->count);
Vivek Goyalf04a6422009-12-03 12:59:40 -05002912
Vivek Goyalb6508c12010-08-23 12:23:33 +02002913 if (!cfqd->cfq_slice_idle)
2914 return false;
2915
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002916 /* We never do for idle class queues. */
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04002917 if (wl_class == IDLE_WORKLOAD)
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002918 return false;
2919
2920 /* We do for queues that were marked with idle window flag. */
Shaohua Li3c764b72009-12-04 13:12:06 +01002921 if (cfq_cfqq_idle_window(cfqq) &&
2922 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002923 return true;
2924
2925 /*
2926 * Otherwise, we do only if they are the last ones
2927 * in their service tree.
2928 */
Vivek Goyal34b98d02012-10-03 16:56:58 -04002929 if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2930 !cfq_io_thinktime_big(cfqd, &st->ttime, false))
Shaohua Lic1e44752010-11-08 15:01:02 +01002931 return true;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002932 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
Shaohua Lic1e44752010-11-08 15:01:02 +01002933 return false;
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002934}
2935
Jens Axboe6d048f52007-04-25 12:44:27 +02002936static void cfq_arm_slice_timer(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +02002937{
Jens Axboe17926692007-01-19 11:59:30 +11002938 struct cfq_queue *cfqq = cfqd->active_queue;
Jan Karae7954212016-01-12 16:24:15 +01002939 struct cfq_rb_root *st = cfqq->service_tree;
Tejun Heoc5869802011-12-14 00:33:41 +01002940 struct cfq_io_cq *cic;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06002941 u64 sl, group_idle = 0;
2942 u64 now = ktime_get_ns();
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002943
Jens Axboea68bbddba2008-09-24 13:03:33 +02002944 /*
Jens Axboef7d7b7a2008-09-25 11:37:50 +02002945 * SSD device without seek penalty, disable idling. But only do so
2946 * for devices that support queuing, otherwise we still have a problem
2947 * with sync vs async workloads.
Jens Axboea68bbddba2008-09-24 13:03:33 +02002948 */
Jens Axboef7d7b7a2008-09-25 11:37:50 +02002949 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
Jens Axboea68bbddba2008-09-24 13:03:33 +02002950 return;
2951
Jens Axboedd67d052006-06-21 09:36:18 +02002952 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
Jens Axboe6d048f52007-04-25 12:44:27 +02002953 WARN_ON(cfq_cfqq_slice_new(cfqq));
Jens Axboe22e2c502005-06-27 10:55:12 +02002954
2955 /*
2956 * idle is disabled, either manually or by past process history
2957 */
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002958 if (!cfq_should_idle(cfqd, cfqq)) {
2959 /* no queue idling. Check for group idling */
2960 if (cfqd->cfq_group_idle)
2961 group_idle = cfqd->cfq_group_idle;
2962 else
2963 return;
2964 }
Jens Axboe6d048f52007-04-25 12:44:27 +02002965
Jens Axboe22e2c502005-06-27 10:55:12 +02002966 /*
Corrado Zoccolo8e550632009-11-26 10:02:58 +01002967 * still active requests from this queue, don't idle
Jens Axboe7b679132008-05-30 12:23:07 +02002968 */
Corrado Zoccolo8e550632009-11-26 10:02:58 +01002969 if (cfqq->dispatched)
Jens Axboe7b679132008-05-30 12:23:07 +02002970 return;
2971
2972 /*
Jens Axboe22e2c502005-06-27 10:55:12 +02002973 * task has exited, don't wait
2974 */
Jens Axboe206dc692006-03-28 13:03:44 +02002975 cic = cfqd->active_cic;
Tejun Heof6e8d012012-03-05 13:15:26 -08002976 if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
Jens Axboe6d048f52007-04-25 12:44:27 +02002977 return;
2978
Corrado Zoccolo355b6592009-10-08 08:43:32 +02002979 /*
2980 * If our average think time is larger than the remaining time
2981 * slice, then don't idle. This avoids overrunning the allotted
2982 * time slice.
2983 */
Shaohua Li383cd722011-07-12 14:24:35 +02002984 if (sample_valid(cic->ttime.ttime_samples) &&
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06002985 (cfqq->slice_end - now < cic->ttime.ttime_mean)) {
2986 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%llu",
Shaohua Li383cd722011-07-12 14:24:35 +02002987 cic->ttime.ttime_mean);
Corrado Zoccolo355b6592009-10-08 08:43:32 +02002988 return;
Divyesh Shahb1ffe732010-03-25 15:45:03 +01002989 }
Corrado Zoccolo355b6592009-10-08 08:43:32 +02002990
Jan Karae7954212016-01-12 16:24:15 +01002991 /*
2992 * There are other queues in the group or this is the only group and
2993 * it has too big thinktime, don't do group idle.
2994 */
2995 if (group_idle &&
2996 (cfqq->cfqg->nr_cfqq > 1 ||
2997 cfq_io_thinktime_big(cfqd, &st->ttime, true)))
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002998 return;
2999
Jens Axboe3b181522005-06-27 10:56:24 +02003000 cfq_mark_cfqq_wait_request(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003001
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02003002 if (group_idle)
3003 sl = cfqd->cfq_group_idle;
3004 else
3005 sl = cfqd->cfq_slice_idle;
Jens Axboe206dc692006-03-28 13:03:44 +02003006
Jan Kara91148322016-06-08 15:11:39 +02003007 hrtimer_start(&cfqd->idle_slice_timer, ns_to_ktime(sl),
3008 HRTIMER_MODE_REL);
Tejun Heo155fead2012-04-01 14:38:44 -07003009 cfqg_stats_set_start_idle_time(cfqq->cfqg);
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003010 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %llu group_idle: %d", sl,
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02003011 group_idle ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012}
3013
Jens Axboe498d3aa22007-04-26 12:54:48 +02003014/*
3015 * Move request from internal lists to the request queue dispatch list.
3016 */
Jens Axboe165125e2007-07-24 09:28:11 +02003017static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018{
Jens Axboe3ed9a292007-04-23 08:33:33 +02003019 struct cfq_data *cfqd = q->elevator->elevator_data;
Jens Axboe5e705372006-07-13 12:39:25 +02003020 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003021
Jens Axboe7b679132008-05-30 12:23:07 +02003022 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
3023
Jeff Moyer06d21882009-09-11 17:08:59 +02003024 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
Jens Axboe5380a102006-07-13 12:37:56 +02003025 cfq_remove_request(rq);
Jens Axboe6d048f52007-04-25 12:44:27 +02003026 cfqq->dispatched++;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02003027 (RQ_CFQG(rq))->dispatched++;
Jens Axboe5380a102006-07-13 12:37:56 +02003028 elv_dispatch_sort(q, rq);
Jens Axboe3ed9a292007-04-23 08:33:33 +02003029
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003030 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
Vivek Goyalc4e78932010-08-23 12:25:03 +02003031 cfqq->nr_sectors += blk_rq_sectors(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032}
3033
3034/*
3035 * return expired entry, or NULL to just start from scratch in rbtree
3036 */
Jens Axboefebffd62008-01-28 13:19:43 +01003037static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038{
Jens Axboe30996f42009-10-05 11:03:39 +02003039 struct request *rq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040
Jens Axboe3b181522005-06-27 10:56:24 +02003041 if (cfq_cfqq_fifo_expire(cfqq))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 return NULL;
Jens Axboecb887412007-01-19 12:01:16 +11003043
3044 cfq_mark_cfqq_fifo_expire(cfqq);
3045
Jens Axboe89850f72006-07-22 16:48:31 +02003046 if (list_empty(&cfqq->fifo))
3047 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048
Jens Axboe89850f72006-07-22 16:48:31 +02003049 rq = rq_entry_fifo(cfqq->fifo.next);
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003050 if (ktime_get_ns() < rq->fifo_time)
Jens Axboe7b679132008-05-30 12:23:07 +02003051 rq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052
Jens Axboe6d048f52007-04-25 12:44:27 +02003053 return rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054}
3055
Jens Axboe22e2c502005-06-27 10:55:12 +02003056static inline int
3057cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3058{
3059 const int base_rq = cfqd->cfq_slice_async_rq;
3060
3061 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
3062
Namhyung Kimb9f8ce02011-05-24 10:23:21 +02003063 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
Jens Axboe22e2c502005-06-27 10:55:12 +02003064}
3065
3066/*
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003067 * Must be called with the queue_lock held.
3068 */
3069static int cfqq_process_refs(struct cfq_queue *cfqq)
3070{
3071 int process_refs, io_refs;
3072
3073 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
Shaohua Li30d7b942011-01-07 08:46:59 +01003074 process_refs = cfqq->ref - io_refs;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003075 BUG_ON(process_refs < 0);
3076 return process_refs;
3077}
3078
3079static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
3080{
Jeff Moyere6c5bc72009-10-23 17:14:52 -04003081 int process_refs, new_process_refs;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003082 struct cfq_queue *__cfqq;
3083
Jeff Moyerc10b61f2010-06-17 10:19:11 -04003084 /*
3085 * If there are no process references on the new_cfqq, then it is
3086 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
3087 * chain may have dropped their last reference (not just their
3088 * last process reference).
3089 */
3090 if (!cfqq_process_refs(new_cfqq))
3091 return;
3092
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003093 /* Avoid a circular list and skip interim queue merges */
3094 while ((__cfqq = new_cfqq->new_cfqq)) {
3095 if (__cfqq == cfqq)
3096 return;
3097 new_cfqq = __cfqq;
3098 }
3099
3100 process_refs = cfqq_process_refs(cfqq);
Jeff Moyerc10b61f2010-06-17 10:19:11 -04003101 new_process_refs = cfqq_process_refs(new_cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003102 /*
3103 * If the process for the cfqq has gone away, there is no
3104 * sense in merging the queues.
3105 */
Jeff Moyerc10b61f2010-06-17 10:19:11 -04003106 if (process_refs == 0 || new_process_refs == 0)
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003107 return;
3108
Jeff Moyere6c5bc72009-10-23 17:14:52 -04003109 /*
3110 * Merge in the direction of the lesser amount of work.
3111 */
Jeff Moyere6c5bc72009-10-23 17:14:52 -04003112 if (new_process_refs >= process_refs) {
3113 cfqq->new_cfqq = new_cfqq;
Shaohua Li30d7b942011-01-07 08:46:59 +01003114 new_cfqq->ref += process_refs;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04003115 } else {
3116 new_cfqq->new_cfqq = cfqq;
Shaohua Li30d7b942011-01-07 08:46:59 +01003117 cfqq->ref += new_process_refs;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04003118 }
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003119}
3120
Vivek Goyal6d816ec2012-10-03 16:56:59 -04003121static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04003122 struct cfq_group *cfqg, enum wl_class_t wl_class)
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003123{
3124 struct cfq_queue *queue;
3125 int i;
3126 bool key_valid = false;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003127 u64 lowest_key = 0;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003128 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
3129
Vivek Goyal65b32a52009-12-16 17:52:59 -05003130 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
3131 /* select the one with lowest rb_key */
Vivek Goyal34b98d02012-10-03 16:56:58 -04003132 queue = cfq_rb_first(st_for(cfqg, wl_class, i));
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003133 if (queue &&
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003134 (!key_valid || queue->rb_key < lowest_key)) {
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003135 lowest_key = queue->rb_key;
3136 cur_best = i;
3137 key_valid = true;
3138 }
3139 }
3140
3141 return cur_best;
3142}
3143
Vivek Goyal6d816ec2012-10-03 16:56:59 -04003144static void
3145choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003146{
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003147 u64 slice;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003148 unsigned count;
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003149 struct cfq_rb_root *st;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003150 u64 group_slice;
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003151 enum wl_class_t original_class = cfqd->serving_wl_class;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003152 u64 now = ktime_get_ns();
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05003153
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003154 /* Choose next priority. RT > BE > IDLE */
Vivek Goyal58ff82f2009-12-03 12:59:44 -05003155 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003156 cfqd->serving_wl_class = RT_WORKLOAD;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05003157 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003158 cfqd->serving_wl_class = BE_WORKLOAD;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003159 else {
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003160 cfqd->serving_wl_class = IDLE_WORKLOAD;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003161 cfqd->workload_expires = now + jiffies_to_nsecs(1);
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003162 return;
3163 }
3164
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003165 if (original_class != cfqd->serving_wl_class)
Shaohua Li writese4ea0c12010-12-13 14:32:22 +01003166 goto new_workload;
3167
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003168 /*
3169 * For RT and BE, we have to choose also the type
3170 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
3171 * expiration time
3172 */
Vivek Goyal34b98d02012-10-03 16:56:58 -04003173 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003174 count = st->count;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003175
3176 /*
Vivek Goyal65b32a52009-12-16 17:52:59 -05003177 * check workload expiration, and that we still have other queues ready
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003178 */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003179 if (count && !(now > cfqd->workload_expires))
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003180 return;
3181
Shaohua Li writese4ea0c12010-12-13 14:32:22 +01003182new_workload:
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003183 /* otherwise select new workload type */
Vivek Goyal6d816ec2012-10-03 16:56:59 -04003184 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003185 cfqd->serving_wl_class);
Vivek Goyal34b98d02012-10-03 16:56:58 -04003186 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003187 count = st->count;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003188
3189 /*
3190 * the workload slice is computed as a fraction of target latency
3191 * proportional to the number of queues in that workload, over
3192 * all the queues in the same priority class
3193 */
Vivek Goyal58ff82f2009-12-03 12:59:44 -05003194 group_slice = cfq_group_slice(cfqd, cfqg);
3195
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003196 slice = div_u64(group_slice * count,
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003197 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
3198 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003199 cfqg)));
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003200
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003201 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003202 u64 tmp;
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05003203
3204 /*
3205 * Async queues are currently system wide. Just taking
3206 * proportion of queues with-in same group will lead to higher
3207 * async ratio system wide as generally root group is going
3208 * to have higher weight. A more accurate thing would be to
3209 * calculate system wide asnc/sync ratio.
3210 */
Tao Ma5bf14c02012-04-01 14:33:39 -07003211 tmp = cfqd->cfq_target_latency *
3212 cfqg_busy_async_queues(cfqd, cfqg);
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003213 tmp = div_u64(tmp, cfqd->busy_queues);
3214 slice = min_t(u64, slice, tmp);
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05003215
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003216 /* async workload slice is scaled down according to
3217 * the sync/async slice ratio. */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003218 slice = div64_u64(slice*cfqd->cfq_slice[0], cfqd->cfq_slice[1]);
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05003219 } else
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003220 /* sync workload slice is at least 2 * cfq_slice_idle */
3221 slice = max(slice, 2 * cfqd->cfq_slice_idle);
3222
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003223 slice = max_t(u64, slice, CFQ_MIN_TT);
3224 cfq_log(cfqd, "workload slice:%llu", slice);
3225 cfqd->workload_expires = now + slice;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003226}
3227
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05003228static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
3229{
3230 struct cfq_rb_root *st = &cfqd->grp_service_tree;
Vivek Goyal25bc6b02009-12-03 12:59:43 -05003231 struct cfq_group *cfqg;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05003232
3233 if (RB_EMPTY_ROOT(&st->rb))
3234 return NULL;
Vivek Goyal25bc6b02009-12-03 12:59:43 -05003235 cfqg = cfq_rb_first_group(st);
Vivek Goyal25bc6b02009-12-03 12:59:43 -05003236 update_min_vdisktime(st);
3237 return cfqg;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05003238}
3239
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003240static void cfq_choose_cfqg(struct cfq_data *cfqd)
3241{
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05003242 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003243 u64 now = ktime_get_ns();
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05003244
3245 cfqd->serving_group = cfqg;
Vivek Goyaldae739e2009-12-03 12:59:45 -05003246
3247 /* Restore the workload type data */
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003248 if (cfqg->saved_wl_slice) {
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003249 cfqd->workload_expires = now + cfqg->saved_wl_slice;
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003250 cfqd->serving_wl_type = cfqg->saved_wl_type;
3251 cfqd->serving_wl_class = cfqg->saved_wl_class;
Gui Jianfeng66ae2912009-12-15 10:08:45 +01003252 } else
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003253 cfqd->workload_expires = now - 1;
Gui Jianfeng66ae2912009-12-15 10:08:45 +01003254
Vivek Goyal6d816ec2012-10-03 16:56:59 -04003255 choose_wl_class_and_type(cfqd, cfqg);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003256}
3257
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003258/*
Jens Axboe498d3aa22007-04-26 12:54:48 +02003259 * Select a queue for service. If we have a current active queue,
3260 * check whether to continue servicing it, or retrieve and set a new one.
Jens Axboe22e2c502005-06-27 10:55:12 +02003261 */
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003262static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +02003263{
Jens Axboea36e71f2009-04-15 12:15:11 +02003264 struct cfq_queue *cfqq, *new_cfqq = NULL;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003265 u64 now = ktime_get_ns();
Jens Axboe22e2c502005-06-27 10:55:12 +02003266
3267 cfqq = cfqd->active_queue;
3268 if (!cfqq)
3269 goto new_queue;
3270
Vivek Goyalf04a6422009-12-03 12:59:40 -05003271 if (!cfqd->rq_queued)
3272 return NULL;
Vivek Goyalc244bb52009-12-08 17:52:57 -05003273
3274 /*
3275 * We were waiting for group to get backlogged. Expire the queue
3276 */
3277 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3278 goto expire;
3279
Jens Axboe22e2c502005-06-27 10:55:12 +02003280 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02003281 * The active queue has run out of time, expire it and select new.
Jens Axboe22e2c502005-06-27 10:55:12 +02003282 */
Vivek Goyal7667aa02009-12-08 17:52:58 -05003283 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3284 /*
3285 * If slice had not expired at the completion of last request
3286 * we might not have turned on wait_busy flag. Don't expire
3287 * the queue yet. Allow the group to get backlogged.
3288 *
3289 * The very fact that we have used the slice, that means we
3290 * have been idling all along on this queue and it should be
3291 * ok to wait for this request to complete.
3292 */
Vivek Goyal82bbbf22009-12-10 19:25:41 +01003293 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3294 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3295 cfqq = NULL;
Vivek Goyal7667aa02009-12-08 17:52:58 -05003296 goto keep_queue;
Vivek Goyal82bbbf22009-12-10 19:25:41 +01003297 } else
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02003298 goto check_group_idle;
Vivek Goyal7667aa02009-12-08 17:52:58 -05003299 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003300
3301 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02003302 * The active queue has requests and isn't expired, allow it to
3303 * dispatch.
Jens Axboe22e2c502005-06-27 10:55:12 +02003304 */
Jens Axboedd67d052006-06-21 09:36:18 +02003305 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
Jens Axboe22e2c502005-06-27 10:55:12 +02003306 goto keep_queue;
Jens Axboe6d048f52007-04-25 12:44:27 +02003307
3308 /*
Jens Axboea36e71f2009-04-15 12:15:11 +02003309 * If another queue has a request waiting within our mean seek
3310 * distance, let it run. The expire code will check for close
3311 * cooperators and put the close queue at the front of the service
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003312 * tree. If possible, merge the expiring queue with the new cfqq.
Jens Axboea36e71f2009-04-15 12:15:11 +02003313 */
Jeff Moyerb3b6d042009-10-23 17:14:51 -04003314 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003315 if (new_cfqq) {
3316 if (!cfqq->new_cfqq)
3317 cfq_setup_merge(cfqq, new_cfqq);
Jens Axboea36e71f2009-04-15 12:15:11 +02003318 goto expire;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003319 }
Jens Axboea36e71f2009-04-15 12:15:11 +02003320
3321 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02003322 * No requests pending. If the active queue still has requests in
3323 * flight or is idling for a new request, allow either of these
3324 * conditions to happen (or time out) before selecting a new queue.
3325 */
Jan Kara91148322016-06-08 15:11:39 +02003326 if (hrtimer_active(&cfqd->idle_slice_timer)) {
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02003327 cfqq = NULL;
3328 goto keep_queue;
3329 }
3330
Shaohua Li8e1ac662010-11-08 15:01:04 +01003331 /*
3332 * This is a deep seek queue, but the device is much faster than
3333 * the queue can deliver, don't idle
3334 **/
3335 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3336 (cfq_cfqq_slice_new(cfqq) ||
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003337 (cfqq->slice_end - now > now - cfqq->slice_start))) {
Shaohua Li8e1ac662010-11-08 15:01:04 +01003338 cfq_clear_cfqq_deep(cfqq);
3339 cfq_clear_cfqq_idle_window(cfqq);
3340 }
3341
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02003342 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3343 cfqq = NULL;
3344 goto keep_queue;
3345 }
3346
3347 /*
3348 * If group idle is enabled and there are requests dispatched from
3349 * this group, wait for requests to complete.
3350 */
3351check_group_idle:
Shaohua Li7700fc42011-07-12 14:24:56 +02003352 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3353 cfqq->cfqg->dispatched &&
3354 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
Jens Axboecaaa5f92006-06-16 11:23:00 +02003355 cfqq = NULL;
3356 goto keep_queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02003357 }
3358
Jens Axboe3b181522005-06-27 10:56:24 +02003359expire:
Vivek Goyale5ff0822010-04-26 19:25:11 +02003360 cfq_slice_expired(cfqd, 0);
Jens Axboe3b181522005-06-27 10:56:24 +02003361new_queue:
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003362 /*
3363 * Current queue expired. Check if we have to switch to a new
3364 * service tree
3365 */
3366 if (!new_cfqq)
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003367 cfq_choose_cfqg(cfqd);
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003368
Jens Axboea36e71f2009-04-15 12:15:11 +02003369 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003370keep_queue:
Jens Axboe3b181522005-06-27 10:56:24 +02003371 return cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02003372}
3373
Jens Axboefebffd62008-01-28 13:19:43 +01003374static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
Jens Axboed9e76202007-04-20 14:27:50 +02003375{
3376 int dispatched = 0;
3377
3378 while (cfqq->next_rq) {
3379 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3380 dispatched++;
3381 }
3382
3383 BUG_ON(!list_empty(&cfqq->fifo));
Vivek Goyalf04a6422009-12-03 12:59:40 -05003384
3385 /* By default cfqq is not expired if it is empty. Do it explicitly */
Vivek Goyale5ff0822010-04-26 19:25:11 +02003386 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
Jens Axboed9e76202007-04-20 14:27:50 +02003387 return dispatched;
3388}
3389
Jens Axboe498d3aa22007-04-26 12:54:48 +02003390/*
3391 * Drain our current requests. Used for barriers and when switching
3392 * io schedulers on-the-fly.
3393 */
Jens Axboed9e76202007-04-20 14:27:50 +02003394static int cfq_forced_dispatch(struct cfq_data *cfqd)
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003395{
Jens Axboe08717142008-01-28 11:38:15 +01003396 struct cfq_queue *cfqq;
Jens Axboed9e76202007-04-20 14:27:50 +02003397 int dispatched = 0;
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003398
Divyesh Shah3440c492010-04-09 09:29:57 +02003399 /* Expire the timeslice of the current active queue first */
Vivek Goyale5ff0822010-04-26 19:25:11 +02003400 cfq_slice_expired(cfqd, 0);
Divyesh Shah3440c492010-04-09 09:29:57 +02003401 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3402 __cfq_set_active_queue(cfqd, cfqq);
Vivek Goyalf04a6422009-12-03 12:59:40 -05003403 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
Divyesh Shah3440c492010-04-09 09:29:57 +02003404 }
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003405
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003406 BUG_ON(cfqd->busy_queues);
3407
Jeff Moyer69237152009-06-12 15:29:30 +02003408 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003409 return dispatched;
3410}
3411
Shaohua Liabc3c742010-03-01 09:20:54 +01003412static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3413 struct cfq_queue *cfqq)
3414{
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003415 u64 now = ktime_get_ns();
3416
Shaohua Liabc3c742010-03-01 09:20:54 +01003417 /* the queue hasn't finished any request, can't estimate */
3418 if (cfq_cfqq_slice_new(cfqq))
Shaohua Lic1e44752010-11-08 15:01:02 +01003419 return true;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003420 if (now + cfqd->cfq_slice_idle * cfqq->dispatched > cfqq->slice_end)
Shaohua Lic1e44752010-11-08 15:01:02 +01003421 return true;
Shaohua Liabc3c742010-03-01 09:20:54 +01003422
Shaohua Lic1e44752010-11-08 15:01:02 +01003423 return false;
Shaohua Liabc3c742010-03-01 09:20:54 +01003424}
3425
Jens Axboe0b182d62009-10-06 20:49:37 +02003426static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe2f5cb732009-04-07 08:51:19 +02003427{
Jens Axboe2f5cb732009-04-07 08:51:19 +02003428 unsigned int max_dispatch;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429
Glauber Costa3932a862016-09-22 20:59:59 -04003430 if (cfq_cfqq_must_dispatch(cfqq))
3431 return true;
3432
Jens Axboe2f5cb732009-04-07 08:51:19 +02003433 /*
Jens Axboe5ad531d2009-07-03 12:57:48 +02003434 * Drain async requests before we start sync IO
3435 */
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003436 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
Jens Axboe0b182d62009-10-06 20:49:37 +02003437 return false;
Jens Axboe5ad531d2009-07-03 12:57:48 +02003438
3439 /*
Jens Axboe2f5cb732009-04-07 08:51:19 +02003440 * If this is an async queue and we have sync IO in flight, let it wait
3441 */
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003442 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
Jens Axboe0b182d62009-10-06 20:49:37 +02003443 return false;
Jens Axboe2f5cb732009-04-07 08:51:19 +02003444
Shaohua Liabc3c742010-03-01 09:20:54 +01003445 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
Jens Axboe2f5cb732009-04-07 08:51:19 +02003446 if (cfq_class_idle(cfqq))
3447 max_dispatch = 1;
3448
3449 /*
3450 * Does this cfqq already have too much IO in flight?
3451 */
3452 if (cfqq->dispatched >= max_dispatch) {
Shaohua Lief8a41d2011-03-07 09:26:29 +01003453 bool promote_sync = false;
Jens Axboe2f5cb732009-04-07 08:51:19 +02003454 /*
3455 * idle queue must always only have a single IO in flight
3456 */
Jens Axboe3ed9a292007-04-23 08:33:33 +02003457 if (cfq_class_idle(cfqq))
Jens Axboe0b182d62009-10-06 20:49:37 +02003458 return false;
Jens Axboe3ed9a292007-04-23 08:33:33 +02003459
Jens Axboe2f5cb732009-04-07 08:51:19 +02003460 /*
Li, Shaohuac4ade942011-03-23 08:30:34 +01003461 * If there is only one sync queue
3462 * we can ignore async queue here and give the sync
Shaohua Lief8a41d2011-03-07 09:26:29 +01003463 * queue no dispatch limit. The reason is a sync queue can
3464 * preempt async queue, limiting the sync queue doesn't make
3465 * sense. This is useful for aiostress test.
3466 */
Li, Shaohuac4ade942011-03-23 08:30:34 +01003467 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3468 promote_sync = true;
Shaohua Lief8a41d2011-03-07 09:26:29 +01003469
3470 /*
Jens Axboe2f5cb732009-04-07 08:51:19 +02003471 * We have other queues, don't allow more IO from this one
3472 */
Shaohua Lief8a41d2011-03-07 09:26:29 +01003473 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3474 !promote_sync)
Jens Axboe0b182d62009-10-06 20:49:37 +02003475 return false;
Jens Axboe9ede2092007-01-19 12:11:44 +11003476
Jens Axboe2f5cb732009-04-07 08:51:19 +02003477 /*
Shaohua Li474b18c2009-12-03 12:58:05 +01003478 * Sole queue user, no limit
Vivek Goyal365722b2009-10-03 15:21:27 +02003479 */
Shaohua Lief8a41d2011-03-07 09:26:29 +01003480 if (cfqd->busy_queues == 1 || promote_sync)
Shaohua Liabc3c742010-03-01 09:20:54 +01003481 max_dispatch = -1;
3482 else
3483 /*
3484 * Normally we start throttling cfqq when cfq_quantum/2
3485 * requests have been dispatched. But we can drive
3486 * deeper queue depths at the beginning of slice
3487 * subjected to upper limit of cfq_quantum.
3488 * */
3489 max_dispatch = cfqd->cfq_quantum;
Jens Axboe8e296752009-10-03 16:26:03 +02003490 }
3491
3492 /*
3493 * Async queues must wait a bit before being allowed dispatch.
3494 * We also ramp up the dispatch depth gradually for async IO,
3495 * based on the last sync IO we serviced
3496 */
Jens Axboe963b72f2009-10-03 19:42:18 +02003497 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003498 u64 last_sync = ktime_get_ns() - cfqd->last_delayed_sync;
Jens Axboe8e296752009-10-03 16:26:03 +02003499 unsigned int depth;
Vivek Goyal365722b2009-10-03 15:21:27 +02003500
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003501 depth = div64_u64(last_sync, cfqd->cfq_slice[1]);
Jens Axboee00c54c2009-10-04 20:36:19 +02003502 if (!depth && !cfqq->dispatched)
3503 depth = 1;
Jens Axboe8e296752009-10-03 16:26:03 +02003504 if (depth < max_dispatch)
3505 max_dispatch = depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506 }
3507
Jens Axboe0b182d62009-10-06 20:49:37 +02003508 /*
3509 * If we're below the current max, allow a dispatch
3510 */
3511 return cfqq->dispatched < max_dispatch;
3512}
3513
3514/*
3515 * Dispatch a request from cfqq, moving them to the request queue
3516 * dispatch list.
3517 */
3518static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3519{
3520 struct request *rq;
3521
3522 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3523
Glauber Costa3932a862016-09-22 20:59:59 -04003524 rq = cfq_check_fifo(cfqq);
3525 if (rq)
3526 cfq_mark_cfqq_must_dispatch(cfqq);
3527
Jens Axboe0b182d62009-10-06 20:49:37 +02003528 if (!cfq_may_dispatch(cfqd, cfqq))
3529 return false;
3530
3531 /*
3532 * follow expired path, else get first next available
3533 */
Jens Axboe0b182d62009-10-06 20:49:37 +02003534 if (!rq)
3535 rq = cfqq->next_rq;
Glauber Costa3932a862016-09-22 20:59:59 -04003536 else
3537 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
Jens Axboe0b182d62009-10-06 20:49:37 +02003538
3539 /*
3540 * insert request into driver dispatch list
3541 */
3542 cfq_dispatch_insert(cfqd->queue, rq);
3543
3544 if (!cfqd->active_cic) {
Tejun Heoc5869802011-12-14 00:33:41 +01003545 struct cfq_io_cq *cic = RQ_CIC(rq);
Jens Axboe0b182d62009-10-06 20:49:37 +02003546
Tejun Heoc5869802011-12-14 00:33:41 +01003547 atomic_long_inc(&cic->icq.ioc->refcount);
Jens Axboe0b182d62009-10-06 20:49:37 +02003548 cfqd->active_cic = cic;
3549 }
3550
3551 return true;
3552}
3553
3554/*
3555 * Find the cfqq that we need to service and move a request from that to the
3556 * dispatch list
3557 */
3558static int cfq_dispatch_requests(struct request_queue *q, int force)
3559{
3560 struct cfq_data *cfqd = q->elevator->elevator_data;
3561 struct cfq_queue *cfqq;
3562
3563 if (!cfqd->busy_queues)
3564 return 0;
3565
3566 if (unlikely(force))
3567 return cfq_forced_dispatch(cfqd);
3568
3569 cfqq = cfq_select_queue(cfqd);
3570 if (!cfqq)
Jens Axboe8e296752009-10-03 16:26:03 +02003571 return 0;
3572
Jens Axboe2f5cb732009-04-07 08:51:19 +02003573 /*
Jens Axboe0b182d62009-10-06 20:49:37 +02003574 * Dispatch a request from this cfqq, if it is allowed
Jens Axboe2f5cb732009-04-07 08:51:19 +02003575 */
Jens Axboe0b182d62009-10-06 20:49:37 +02003576 if (!cfq_dispatch_request(cfqd, cfqq))
3577 return 0;
3578
Jens Axboe2f5cb732009-04-07 08:51:19 +02003579 cfqq->slice_dispatch++;
Jens Axboeb0291952009-04-07 11:38:31 +02003580 cfq_clear_cfqq_must_dispatch(cfqq);
Jens Axboe2f5cb732009-04-07 08:51:19 +02003581
3582 /*
3583 * expire an async queue immediately if it has used up its slice. idle
3584 * queue always expire after 1 dispatch round.
3585 */
3586 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3587 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3588 cfq_class_idle(cfqq))) {
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003589 cfqq->slice_end = ktime_get_ns() + 1;
Vivek Goyale5ff0822010-04-26 19:25:11 +02003590 cfq_slice_expired(cfqd, 0);
Jens Axboe2f5cb732009-04-07 08:51:19 +02003591 }
3592
Shan Weib217a902009-09-01 10:06:42 +02003593 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
Jens Axboe2f5cb732009-04-07 08:51:19 +02003594 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595}
3596
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597/*
Jens Axboe5e705372006-07-13 12:39:25 +02003598 * task holds one reference to the queue, dropped when task exits. each rq
3599 * in-flight on this queue also holds a reference, dropped when rq is freed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600 *
Vivek Goyalb1c35762009-12-03 12:59:47 -05003601 * Each cfq queue took a reference on the parent group. Drop it now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 * queue lock must be held here.
3603 */
3604static void cfq_put_queue(struct cfq_queue *cfqq)
3605{
Jens Axboe22e2c502005-06-27 10:55:12 +02003606 struct cfq_data *cfqd = cfqq->cfqd;
Justin TerAvest0bbfeb82011-03-01 15:05:08 -05003607 struct cfq_group *cfqg;
Jens Axboe22e2c502005-06-27 10:55:12 +02003608
Shaohua Li30d7b942011-01-07 08:46:59 +01003609 BUG_ON(cfqq->ref <= 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610
Shaohua Li30d7b942011-01-07 08:46:59 +01003611 cfqq->ref--;
3612 if (cfqq->ref)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 return;
3614
Jens Axboe7b679132008-05-30 12:23:07 +02003615 cfq_log_cfqq(cfqd, cfqq, "put_queue");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616 BUG_ON(rb_first(&cfqq->sort_list));
Jens Axboe22e2c502005-06-27 10:55:12 +02003617 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
Vivek Goyalb1c35762009-12-03 12:59:47 -05003618 cfqg = cfqq->cfqg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619
Jens Axboe28f95cbc2007-01-19 12:09:53 +11003620 if (unlikely(cfqd->active_queue == cfqq)) {
Vivek Goyale5ff0822010-04-26 19:25:11 +02003621 __cfq_slice_expired(cfqd, cfqq, 0);
Jens Axboe23e018a2009-10-05 08:52:35 +02003622 cfq_schedule_dispatch(cfqd);
Jens Axboe28f95cbc2007-01-19 12:09:53 +11003623 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003624
Vivek Goyalf04a6422009-12-03 12:59:40 -05003625 BUG_ON(cfq_cfqq_on_rr(cfqq));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626 kmem_cache_free(cfq_pool, cfqq);
Tejun Heoeb7d8c072012-03-23 14:02:53 +01003627 cfqg_put(cfqg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628}
3629
Shaohua Lid02a2c02010-05-25 10:16:53 +02003630static void cfq_put_cooperator(struct cfq_queue *cfqq)
Jens Axboe89850f72006-07-22 16:48:31 +02003631{
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003632 struct cfq_queue *__cfqq, *next;
3633
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003634 /*
3635 * If this queue was scheduled to merge with another queue, be
3636 * sure to drop the reference taken on that queue (and others in
3637 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
3638 */
3639 __cfqq = cfqq->new_cfqq;
3640 while (__cfqq) {
3641 if (__cfqq == cfqq) {
3642 WARN(1, "cfqq->new_cfqq loop detected\n");
3643 break;
3644 }
3645 next = __cfqq->new_cfqq;
3646 cfq_put_queue(__cfqq);
3647 __cfqq = next;
3648 }
Shaohua Lid02a2c02010-05-25 10:16:53 +02003649}
3650
3651static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3652{
3653 if (unlikely(cfqq == cfqd->active_queue)) {
3654 __cfq_slice_expired(cfqd, cfqq, 0);
3655 cfq_schedule_dispatch(cfqd);
3656 }
3657
3658 cfq_put_cooperator(cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003659
Jens Axboe89850f72006-07-22 16:48:31 +02003660 cfq_put_queue(cfqq);
3661}
3662
Tejun Heo9b84cac2011-12-14 00:33:42 +01003663static void cfq_init_icq(struct io_cq *icq)
3664{
3665 struct cfq_io_cq *cic = icq_to_cic(icq);
3666
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003667 cic->ttime.last_end_request = ktime_get_ns();
Tejun Heo9b84cac2011-12-14 00:33:42 +01003668}
3669
Tejun Heoc5869802011-12-14 00:33:41 +01003670static void cfq_exit_icq(struct io_cq *icq)
Jens Axboe89850f72006-07-22 16:48:31 +02003671{
Tejun Heoc5869802011-12-14 00:33:41 +01003672 struct cfq_io_cq *cic = icq_to_cic(icq);
Tejun Heo283287a2011-12-14 00:33:38 +01003673 struct cfq_data *cfqd = cic_to_cfqd(cic);
Fabio Checconi4faa3c82008-04-10 08:28:01 +02003674
Tejun Heo563180a2015-08-18 14:55:00 -07003675 if (cic_to_cfqq(cic, false)) {
3676 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false));
3677 cic_set_cfqq(cic, NULL, false);
Jens Axboe89850f72006-07-22 16:48:31 +02003678 }
3679
Tejun Heo563180a2015-08-18 14:55:00 -07003680 if (cic_to_cfqq(cic, true)) {
3681 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true));
3682 cic_set_cfqq(cic, NULL, true);
Jens Axboe89850f72006-07-22 16:48:31 +02003683 }
Jens Axboe89850f72006-07-22 16:48:31 +02003684}
3685
Tejun Heoabede6d2012-03-19 15:10:57 -07003686static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
Jens Axboe22e2c502005-06-27 10:55:12 +02003687{
3688 struct task_struct *tsk = current;
3689 int ioprio_class;
3690
Jens Axboe3b181522005-06-27 10:56:24 +02003691 if (!cfq_cfqq_prio_changed(cfqq))
Jens Axboe22e2c502005-06-27 10:55:12 +02003692 return;
3693
Tejun Heo598971b2012-03-19 15:10:58 -07003694 ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
Jens Axboe22e2c502005-06-27 10:55:12 +02003695 switch (ioprio_class) {
Jens Axboefe094d92008-01-31 13:08:54 +01003696 default:
3697 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3698 case IOPRIO_CLASS_NONE:
3699 /*
Jens Axboe6d63c272008-05-07 09:51:23 +02003700 * no prio set, inherit CPU scheduling settings
Jens Axboefe094d92008-01-31 13:08:54 +01003701 */
3702 cfqq->ioprio = task_nice_ioprio(tsk);
Jens Axboe6d63c272008-05-07 09:51:23 +02003703 cfqq->ioprio_class = task_nice_ioclass(tsk);
Jens Axboefe094d92008-01-31 13:08:54 +01003704 break;
3705 case IOPRIO_CLASS_RT:
Tejun Heo598971b2012-03-19 15:10:58 -07003706 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
Jens Axboefe094d92008-01-31 13:08:54 +01003707 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3708 break;
3709 case IOPRIO_CLASS_BE:
Tejun Heo598971b2012-03-19 15:10:58 -07003710 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
Jens Axboefe094d92008-01-31 13:08:54 +01003711 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3712 break;
3713 case IOPRIO_CLASS_IDLE:
3714 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3715 cfqq->ioprio = 7;
3716 cfq_clear_cfqq_idle_window(cfqq);
3717 break;
Jens Axboe22e2c502005-06-27 10:55:12 +02003718 }
3719
3720 /*
3721 * keep track of original prio settings in case we have to temporarily
3722 * elevate the priority of this queue
3723 */
3724 cfqq->org_ioprio = cfqq->ioprio;
Jens Axboeb8269db2016-06-09 15:47:29 -06003725 cfqq->org_ioprio_class = cfqq->ioprio_class;
Jens Axboe3b181522005-06-27 10:56:24 +02003726 cfq_clear_cfqq_prio_changed(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003727}
3728
Tejun Heo598971b2012-03-19 15:10:58 -07003729static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
Jens Axboe22e2c502005-06-27 10:55:12 +02003730{
Tejun Heo598971b2012-03-19 15:10:58 -07003731 int ioprio = cic->icq.ioc->ioprio;
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +04003732 struct cfq_data *cfqd = cic_to_cfqd(cic);
Al Viro478a82b2006-03-18 13:25:24 -05003733 struct cfq_queue *cfqq;
Jens Axboe35e60772006-06-14 09:10:45 +02003734
Tejun Heo598971b2012-03-19 15:10:58 -07003735 /*
3736 * Check whether ioprio has changed. The condition may trigger
3737 * spuriously on a newly created cic but there's no harm.
3738 */
3739 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
Jens Axboecaaa5f92006-06-16 11:23:00 +02003740 return;
3741
Tejun Heo563180a2015-08-18 14:55:00 -07003742 cfqq = cic_to_cfqq(cic, false);
Jens Axboecaaa5f92006-06-16 11:23:00 +02003743 if (cfqq) {
Tejun Heo563180a2015-08-18 14:55:00 -07003744 cfq_put_queue(cfqq);
Tejun Heo2da8de02015-08-18 14:55:02 -07003745 cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
Tejun Heo563180a2015-08-18 14:55:00 -07003746 cic_set_cfqq(cic, cfqq, false);
Jens Axboe22e2c502005-06-27 10:55:12 +02003747 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02003748
Tejun Heo563180a2015-08-18 14:55:00 -07003749 cfqq = cic_to_cfqq(cic, true);
Jens Axboecaaa5f92006-06-16 11:23:00 +02003750 if (cfqq)
3751 cfq_mark_cfqq_prio_changed(cfqq);
Tejun Heo598971b2012-03-19 15:10:58 -07003752
3753 cic->ioprio = ioprio;
Jens Axboe22e2c502005-06-27 10:55:12 +02003754}
3755
Jens Axboed5036d72009-06-26 10:44:34 +02003756static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Jens Axboea6151c32009-10-07 20:02:57 +02003757 pid_t pid, bool is_sync)
Jens Axboed5036d72009-06-26 10:44:34 +02003758{
3759 RB_CLEAR_NODE(&cfqq->rb_node);
3760 RB_CLEAR_NODE(&cfqq->p_node);
3761 INIT_LIST_HEAD(&cfqq->fifo);
3762
Shaohua Li30d7b942011-01-07 08:46:59 +01003763 cfqq->ref = 0;
Jens Axboed5036d72009-06-26 10:44:34 +02003764 cfqq->cfqd = cfqd;
3765
3766 cfq_mark_cfqq_prio_changed(cfqq);
3767
3768 if (is_sync) {
3769 if (!cfq_class_idle(cfqq))
3770 cfq_mark_cfqq_idle_window(cfqq);
3771 cfq_mark_cfqq_sync(cfqq);
3772 }
3773 cfqq->pid = pid;
3774}
3775
Vivek Goyal246103332009-12-03 12:59:51 -05003776#ifdef CONFIG_CFQ_GROUP_IOSCHED
Jan Kara142bbdf2017-04-04 14:31:30 +02003777static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
Vivek Goyal246103332009-12-03 12:59:51 -05003778{
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +04003779 struct cfq_data *cfqd = cic_to_cfqd(cic);
Tejun Heo60a83702015-08-18 14:55:05 -07003780 struct cfq_queue *cfqq;
Tejun Heof4da8072014-09-08 08:15:20 +09003781 uint64_t serial_nr;
Vivek Goyal246103332009-12-03 12:59:51 -05003782
Tejun Heo598971b2012-03-19 15:10:58 -07003783 rcu_read_lock();
Tejun Heof4da8072014-09-08 08:15:20 +09003784 serial_nr = bio_blkcg(bio)->css.serial_nr;
Tejun Heo598971b2012-03-19 15:10:58 -07003785 rcu_read_unlock();
3786
3787 /*
3788 * Check whether blkcg has changed. The condition may trigger
3789 * spuriously on a newly created cic but there's no harm.
3790 */
Tejun Heof4da8072014-09-08 08:15:20 +09003791 if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
Jan Kara142bbdf2017-04-04 14:31:30 +02003792 return;
Jens Axboe87760e52016-11-09 12:38:14 -07003793
3794 /*
Tejun Heo60a83702015-08-18 14:55:05 -07003795 * Drop reference to queues. New queues will be assigned in new
3796 * group upon arrival of fresh requests.
3797 */
3798 cfqq = cic_to_cfqq(cic, false);
3799 if (cfqq) {
3800 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3801 cic_set_cfqq(cic, NULL, false);
3802 cfq_put_queue(cfqq);
3803 }
3804
3805 cfqq = cic_to_cfqq(cic, true);
3806 if (cfqq) {
3807 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3808 cic_set_cfqq(cic, NULL, true);
3809 cfq_put_queue(cfqq);
Vivek Goyal246103332009-12-03 12:59:51 -05003810 }
Tejun Heo598971b2012-03-19 15:10:58 -07003811
Tejun Heof4da8072014-09-08 08:15:20 +09003812 cic->blkcg_serial_nr = serial_nr;
Vivek Goyal246103332009-12-03 12:59:51 -05003813}
Tejun Heo598971b2012-03-19 15:10:58 -07003814#else
Jan Kara142bbdf2017-04-04 14:31:30 +02003815static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
Jens Axboe5d7f5ce2017-02-16 07:57:33 -07003816{
Jens Axboe5d7f5ce2017-02-16 07:57:33 -07003817}
Vivek Goyal246103332009-12-03 12:59:51 -05003818#endif /* CONFIG_CFQ_GROUP_IOSCHED */
3819
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003820static struct cfq_queue **
Tejun Heo60a83702015-08-18 14:55:05 -07003821cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio)
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003822{
Jens Axboefe094d92008-01-31 13:08:54 +01003823 switch (ioprio_class) {
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003824 case IOPRIO_CLASS_RT:
Tejun Heo60a83702015-08-18 14:55:05 -07003825 return &cfqg->async_cfqq[0][ioprio];
Tejun Heo598971b2012-03-19 15:10:58 -07003826 case IOPRIO_CLASS_NONE:
3827 ioprio = IOPRIO_NORM;
3828 /* fall through */
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003829 case IOPRIO_CLASS_BE:
Tejun Heo60a83702015-08-18 14:55:05 -07003830 return &cfqg->async_cfqq[1][ioprio];
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003831 case IOPRIO_CLASS_IDLE:
Tejun Heo60a83702015-08-18 14:55:05 -07003832 return &cfqg->async_idle_cfqq;
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003833 default:
3834 BUG();
3835 }
3836}
3837
Jens Axboe15c31be2007-07-10 13:43:25 +02003838static struct cfq_queue *
Tejun Heoabede6d2012-03-19 15:10:57 -07003839cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
Tejun Heo2da8de02015-08-18 14:55:02 -07003840 struct bio *bio)
Jens Axboe15c31be2007-07-10 13:43:25 +02003841{
Jeff Moyerc6ce1942015-01-12 15:21:01 -05003842 int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3843 int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
Tejun Heod4aad7f2015-08-18 14:55:04 -07003844 struct cfq_queue **async_cfqq = NULL;
Tejun Heo4ebc1c62015-08-18 14:54:57 -07003845 struct cfq_queue *cfqq;
Tejun Heo322731e2015-08-18 14:55:03 -07003846 struct cfq_group *cfqg;
3847
3848 rcu_read_lock();
Tejun Heoae118892015-08-18 14:55:20 -07003849 cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
Tejun Heo322731e2015-08-18 14:55:03 -07003850 if (!cfqg) {
3851 cfqq = &cfqd->oom_cfqq;
3852 goto out;
3853 }
Jens Axboe15c31be2007-07-10 13:43:25 +02003854
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003855 if (!is_sync) {
Jeff Moyerc6ce1942015-01-12 15:21:01 -05003856 if (!ioprio_valid(cic->ioprio)) {
3857 struct task_struct *tsk = current;
3858 ioprio = task_nice_ioprio(tsk);
3859 ioprio_class = task_nice_ioclass(tsk);
3860 }
Tejun Heo60a83702015-08-18 14:55:05 -07003861 async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003862 cfqq = *async_cfqq;
Tejun Heo4ebc1c62015-08-18 14:54:57 -07003863 if (cfqq)
3864 goto out;
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003865 }
3866
Tejun Heoe00f4f42016-11-21 18:03:32 -05003867 cfqq = kmem_cache_alloc_node(cfq_pool,
3868 GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
Tejun Heod4aad7f2015-08-18 14:55:04 -07003869 cfqd->queue->node);
3870 if (!cfqq) {
3871 cfqq = &cfqd->oom_cfqq;
3872 goto out;
3873 }
Jens Axboe15c31be2007-07-10 13:43:25 +02003874
Alexander Potapenko4d608ba2017-01-23 15:06:43 +01003875 /* cfq_init_cfqq() assumes cfqq->ioprio_class is initialized. */
3876 cfqq->ioprio_class = IOPRIO_CLASS_NONE;
Tejun Heod4aad7f2015-08-18 14:55:04 -07003877 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3878 cfq_init_prio_data(cfqq, cic);
3879 cfq_link_cfqq_cfqg(cfqq, cfqg);
3880 cfq_log_cfqq(cfqd, cfqq, "alloced");
3881
3882 if (async_cfqq) {
3883 /* a new async queue is created, pin and remember */
Shaohua Li30d7b942011-01-07 08:46:59 +01003884 cfqq->ref++;
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003885 *async_cfqq = cfqq;
Jens Axboe15c31be2007-07-10 13:43:25 +02003886 }
Tejun Heo4ebc1c62015-08-18 14:54:57 -07003887out:
Shaohua Li30d7b942011-01-07 08:46:59 +01003888 cfqq->ref++;
Tejun Heo322731e2015-08-18 14:55:03 -07003889 rcu_read_unlock();
Jens Axboe15c31be2007-07-10 13:43:25 +02003890 return cfqq;
3891}
3892
Jens Axboe22e2c502005-06-27 10:55:12 +02003893static void
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003894__cfq_update_io_thinktime(struct cfq_ttime *ttime, u64 slice_idle)
Jens Axboe22e2c502005-06-27 10:55:12 +02003895{
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003896 u64 elapsed = ktime_get_ns() - ttime->last_end_request;
Shaohua Li383cd722011-07-12 14:24:35 +02003897 elapsed = min(elapsed, 2UL * slice_idle);
Jens Axboe22e2c502005-06-27 10:55:12 +02003898
Shaohua Li383cd722011-07-12 14:24:35 +02003899 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06003900 ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
3901 ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
3902 ttime->ttime_samples);
Shaohua Li383cd722011-07-12 14:24:35 +02003903}
3904
3905static void
3906cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Tejun Heoc5869802011-12-14 00:33:41 +01003907 struct cfq_io_cq *cic)
Shaohua Li383cd722011-07-12 14:24:35 +02003908{
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02003909 if (cfq_cfqq_sync(cfqq)) {
Shaohua Li383cd722011-07-12 14:24:35 +02003910 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02003911 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3912 cfqd->cfq_slice_idle);
3913 }
Shaohua Li7700fc42011-07-12 14:24:56 +02003914#ifdef CONFIG_CFQ_GROUP_IOSCHED
3915 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3916#endif
Jens Axboe22e2c502005-06-27 10:55:12 +02003917}
3918
Jens Axboe206dc692006-03-28 13:03:44 +02003919static void
Jeff Moyerb2c18e12009-10-23 17:14:49 -04003920cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Jens Axboe6d048f52007-04-25 12:44:27 +02003921 struct request *rq)
Jens Axboe206dc692006-03-28 13:03:44 +02003922{
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +01003923 sector_t sdist = 0;
Corrado Zoccolo41647e72010-02-27 19:45:40 +01003924 sector_t n_sec = blk_rq_sectors(rq);
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +01003925 if (cfqq->last_request_pos) {
3926 if (cfqq->last_request_pos < blk_rq_pos(rq))
3927 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3928 else
3929 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3930 }
Jens Axboe206dc692006-03-28 13:03:44 +02003931
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +01003932 cfqq->seek_history <<= 1;
Corrado Zoccolo41647e72010-02-27 19:45:40 +01003933 if (blk_queue_nonrot(cfqd->queue))
3934 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3935 else
3936 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
Jens Axboe206dc692006-03-28 13:03:44 +02003937}
Jens Axboe22e2c502005-06-27 10:55:12 +02003938
Christoph Hellwiga2b80962016-11-01 07:40:09 -06003939static inline bool req_noidle(struct request *req)
3940{
3941 return req_op(req) == REQ_OP_WRITE &&
3942 (req->cmd_flags & (REQ_SYNC | REQ_IDLE)) == REQ_SYNC;
3943}
3944
Jens Axboe22e2c502005-06-27 10:55:12 +02003945/*
3946 * Disable idle window if the process thinks too long or seeks so much that
3947 * it doesn't matter
3948 */
3949static void
3950cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Tejun Heoc5869802011-12-14 00:33:41 +01003951 struct cfq_io_cq *cic)
Jens Axboe22e2c502005-06-27 10:55:12 +02003952{
Jens Axboe7b679132008-05-30 12:23:07 +02003953 int old_idle, enable_idle;
Jens Axboe1be92f2f2007-04-19 14:32:26 +02003954
Jens Axboe08717142008-01-28 11:38:15 +01003955 /*
3956 * Don't idle for async or idle io prio class
3957 */
3958 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
Jens Axboe1be92f2f2007-04-19 14:32:26 +02003959 return;
3960
Jens Axboec265a7f2008-06-26 13:49:33 +02003961 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003962
Corrado Zoccolo76280af2009-11-26 10:02:58 +01003963 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3964 cfq_mark_cfqq_deep(cfqq);
3965
Christoph Hellwiga2b80962016-11-01 07:40:09 -06003966 if (cfqq->next_rq && req_noidle(cfqq->next_rq))
Corrado Zoccolo749ef9f2010-09-20 15:24:50 +02003967 enable_idle = 0;
Tejun Heof6e8d012012-03-05 13:15:26 -08003968 else if (!atomic_read(&cic->icq.ioc->active_ref) ||
Tejun Heoc5869802011-12-14 00:33:41 +01003969 !cfqd->cfq_slice_idle ||
3970 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
Jens Axboe22e2c502005-06-27 10:55:12 +02003971 enable_idle = 0;
Shaohua Li383cd722011-07-12 14:24:35 +02003972 else if (sample_valid(cic->ttime.ttime_samples)) {
3973 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
Jens Axboe22e2c502005-06-27 10:55:12 +02003974 enable_idle = 0;
3975 else
3976 enable_idle = 1;
3977 }
3978
Jens Axboe7b679132008-05-30 12:23:07 +02003979 if (old_idle != enable_idle) {
3980 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3981 if (enable_idle)
3982 cfq_mark_cfqq_idle_window(cfqq);
3983 else
3984 cfq_clear_cfqq_idle_window(cfqq);
3985 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003986}
3987
Jens Axboe22e2c502005-06-27 10:55:12 +02003988/*
3989 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3990 * no or if we aren't sure, a 1 will cause a preempt.
3991 */
Jens Axboea6151c32009-10-07 20:02:57 +02003992static bool
Jens Axboe22e2c502005-06-27 10:55:12 +02003993cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
Jens Axboe5e705372006-07-13 12:39:25 +02003994 struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02003995{
Jens Axboe6d048f52007-04-25 12:44:27 +02003996 struct cfq_queue *cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02003997
Jens Axboe6d048f52007-04-25 12:44:27 +02003998 cfqq = cfqd->active_queue;
3999 if (!cfqq)
Jens Axboea6151c32009-10-07 20:02:57 +02004000 return false;
Jens Axboe22e2c502005-06-27 10:55:12 +02004001
Jens Axboe6d048f52007-04-25 12:44:27 +02004002 if (cfq_class_idle(new_cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02004003 return false;
Jens Axboe22e2c502005-06-27 10:55:12 +02004004
4005 if (cfq_class_idle(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02004006 return true;
Jens Axboe1e3335d2007-02-14 19:59:49 +01004007
Jens Axboe22e2c502005-06-27 10:55:12 +02004008 /*
Divyesh Shah875feb62010-01-06 18:58:20 -08004009 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
4010 */
4011 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
4012 return false;
4013
4014 /*
Jens Axboe374f84a2006-07-23 01:42:19 +02004015 * if the new request is sync, but the currently running queue is
4016 * not, let the sync request have priority.
4017 */
Glauber Costa3932a862016-09-22 20:59:59 -04004018 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02004019 return true;
Jens Axboe1e3335d2007-02-14 19:59:49 +01004020
Jan Kara3984aa52016-01-12 16:24:19 +01004021 /*
4022 * Treat ancestors of current cgroup the same way as current cgroup.
4023 * For anybody else we disallow preemption to guarantee service
4024 * fairness among cgroups.
4025 */
4026 if (!cfqg_is_descendant(cfqq->cfqg, new_cfqq->cfqg))
Vivek Goyal8682e1f2009-12-03 12:59:50 -05004027 return false;
4028
4029 if (cfq_slice_used(cfqq))
4030 return true;
4031
Jan Kara6c80731c2016-01-12 16:24:16 +01004032 /*
4033 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
4034 */
4035 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
4036 return true;
4037
4038 WARN_ON_ONCE(cfqq->ioprio_class != new_cfqq->ioprio_class);
Vivek Goyal8682e1f2009-12-03 12:59:50 -05004039 /* Allow preemption only if we are idling on sync-noidle tree */
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04004040 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
Vivek Goyal8682e1f2009-12-03 12:59:50 -05004041 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
Vivek Goyal8682e1f2009-12-03 12:59:50 -05004042 RB_EMPTY_ROOT(&cfqq->sort_list))
4043 return true;
4044
Jens Axboe374f84a2006-07-23 01:42:19 +02004045 /*
Jens Axboeb53d1ed2011-08-19 08:34:48 +02004046 * So both queues are sync. Let the new request get disk time if
4047 * it's a metadata request and the current queue is doing regular IO.
4048 */
Christoph Hellwig65299a32011-08-23 14:50:29 +02004049 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
Jens Axboeb53d1ed2011-08-19 08:34:48 +02004050 return true;
4051
Shaohua Lid2d59e12010-11-08 15:01:03 +01004052 /* An idle queue should not be idle now for some reason */
4053 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
4054 return true;
4055
Jens Axboe1e3335d2007-02-14 19:59:49 +01004056 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02004057 return false;
Jens Axboe1e3335d2007-02-14 19:59:49 +01004058
4059 /*
4060 * if this request is as-good as one we would expect from the
4061 * current cfqq, let it preempt
4062 */
Shaohua Lie9ce3352010-03-19 08:03:04 +01004063 if (cfq_rq_close(cfqd, cfqq, rq))
Jens Axboea6151c32009-10-07 20:02:57 +02004064 return true;
Jens Axboe1e3335d2007-02-14 19:59:49 +01004065
Jens Axboea6151c32009-10-07 20:02:57 +02004066 return false;
Jens Axboe22e2c502005-06-27 10:55:12 +02004067}
4068
4069/*
4070 * cfqq preempts the active queue. if we allowed preempt with no slice left,
4071 * let it have half of its nominal slice.
4072 */
4073static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4074{
Shaohua Lidf0793a2012-01-19 09:20:09 +01004075 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
4076
Jens Axboe7b679132008-05-30 12:23:07 +02004077 cfq_log_cfqq(cfqd, cfqq, "preempt");
Shaohua Lidf0793a2012-01-19 09:20:09 +01004078 cfq_slice_expired(cfqd, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02004079
Jens Axboebf572252006-07-19 20:29:12 +02004080 /*
Shaohua Lif8ae6e32011-01-14 08:41:02 +01004081 * workload type is changed, don't save slice, otherwise preempt
4082 * doesn't happen
4083 */
Shaohua Lidf0793a2012-01-19 09:20:09 +01004084 if (old_type != cfqq_type(cfqq))
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04004085 cfqq->cfqg->saved_wl_slice = 0;
Shaohua Lif8ae6e32011-01-14 08:41:02 +01004086
4087 /*
Jens Axboebf572252006-07-19 20:29:12 +02004088 * Put the new queue at the front of the of the current list,
4089 * so we know that it will be selected next.
4090 */
4091 BUG_ON(!cfq_cfqq_on_rr(cfqq));
Jens Axboeedd75ff2007-04-19 12:03:34 +02004092
4093 cfq_service_tree_add(cfqd, cfqq, 1);
Justin TerAvesteda5e0c2011-03-22 21:26:49 +01004094
Justin TerAvest62a37f62011-03-23 08:25:44 +01004095 cfqq->slice_end = 0;
4096 cfq_mark_cfqq_slice_new(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004097}
4098
4099/*
Jens Axboe5e705372006-07-13 12:39:25 +02004100 * Called when a new fs request (rq) is added (to cfqq). Check if there's
Jens Axboe22e2c502005-06-27 10:55:12 +02004101 * something we should do about it
4102 */
4103static void
Jens Axboe5e705372006-07-13 12:39:25 +02004104cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
4105 struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02004106{
Tejun Heoc5869802011-12-14 00:33:41 +01004107 struct cfq_io_cq *cic = RQ_CIC(rq);
Jens Axboe12e9fdd2006-06-01 10:09:56 +02004108
Aaron Carroll45333d52008-08-26 15:52:36 +02004109 cfqd->rq_queued++;
Christoph Hellwig65299a32011-08-23 14:50:29 +02004110 if (rq->cmd_flags & REQ_PRIO)
4111 cfqq->prio_pending++;
Jens Axboe374f84a2006-07-23 01:42:19 +02004112
Shaohua Li383cd722011-07-12 14:24:35 +02004113 cfq_update_io_thinktime(cfqd, cfqq, cic);
Jeff Moyerb2c18e12009-10-23 17:14:49 -04004114 cfq_update_io_seektime(cfqd, cfqq, rq);
Jens Axboe9c2c38a2005-08-24 14:57:54 +02004115 cfq_update_idle_window(cfqd, cfqq, cic);
4116
Jeff Moyerb2c18e12009-10-23 17:14:49 -04004117 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004118
4119 if (cfqq == cfqd->active_queue) {
4120 /*
Jens Axboeb0291952009-04-07 11:38:31 +02004121 * Remember that we saw a request from this process, but
4122 * don't start queuing just yet. Otherwise we risk seeing lots
4123 * of tiny requests, because we disrupt the normal plugging
Jens Axboed6ceb252009-04-14 14:18:16 +02004124 * and merging. If the request is already larger than a single
4125 * page, let it rip immediately. For that case we assume that
Jens Axboe2d870722009-04-15 12:12:46 +02004126 * merging is already done. Ditto for a busy system that
4127 * has other work pending, don't risk delaying until the
4128 * idle timer unplug to continue working.
Jens Axboe22e2c502005-06-27 10:55:12 +02004129 */
Jens Axboed6ceb252009-04-14 14:18:16 +02004130 if (cfq_cfqq_wait_request(cfqq)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004131 if (blk_rq_bytes(rq) > PAGE_SIZE ||
Jens Axboe2d870722009-04-15 12:12:46 +02004132 cfqd->busy_queues > 1) {
Divyesh Shah812df482010-04-08 21:15:35 -07004133 cfq_del_timer(cfqd, cfqq);
Gui Jianfeng554554f2009-12-10 09:38:39 +01004134 cfq_clear_cfqq_wait_request(cfqq);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02004135 __blk_run_queue(cfqd->queue);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +02004136 } else {
Tejun Heo155fead2012-04-01 14:38:44 -07004137 cfqg_stats_update_idle_time(cfqq->cfqg);
Vivek Goyalbf7919372009-12-03 12:59:37 -05004138 cfq_mark_cfqq_must_dispatch(cfqq);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +02004139 }
Jens Axboed6ceb252009-04-14 14:18:16 +02004140 }
Jens Axboe5e705372006-07-13 12:39:25 +02004141 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
Jens Axboe22e2c502005-06-27 10:55:12 +02004142 /*
4143 * not the active queue - expire current slice if it is
4144 * idle and has expired it's mean thinktime or this new queue
Divyesh Shah3a9a3f62009-01-30 12:46:41 +01004145 * has some old slice time left and is of higher priority or
4146 * this new queue is RT and the current one is BE
Jens Axboe22e2c502005-06-27 10:55:12 +02004147 */
4148 cfq_preempt_queue(cfqd, cfqq);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02004149 __blk_run_queue(cfqd->queue);
Jens Axboe22e2c502005-06-27 10:55:12 +02004150 }
4151}
4152
Jens Axboe165125e2007-07-24 09:28:11 +02004153static void cfq_insert_request(struct request_queue *q, struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02004154{
Jens Axboeb4878f22005-10-20 16:42:29 +02004155 struct cfq_data *cfqd = q->elevator->elevator_data;
Jens Axboe5e705372006-07-13 12:39:25 +02004156 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004157
Jens Axboe7b679132008-05-30 12:23:07 +02004158 cfq_log_cfqq(cfqd, cfqq, "insert_request");
Tejun Heoabede6d2012-03-19 15:10:57 -07004159 cfq_init_prio_data(cfqq, RQ_CIC(rq));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06004161 rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
Jens Axboe22e2c502005-06-27 10:55:12 +02004162 list_add_tail(&rq->queuelist, &cfqq->fifo);
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01004163 cfq_add_rq_rb(rq);
Christoph Hellwigef295ec2016-10-28 08:48:16 -06004164 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
Tejun Heo155fead2012-04-01 14:38:44 -07004165 rq->cmd_flags);
Jens Axboe5e705372006-07-13 12:39:25 +02004166 cfq_rq_enqueued(cfqd, cfqq, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167}
4168
Aaron Carroll45333d52008-08-26 15:52:36 +02004169/*
4170 * Update hw_tag based on peak queue depth over 50 samples under
4171 * sufficient load.
4172 */
4173static void cfq_update_hw_tag(struct cfq_data *cfqd)
4174{
Shaohua Li1a1238a2009-10-27 08:46:23 +01004175 struct cfq_queue *cfqq = cfqd->active_queue;
4176
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01004177 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
4178 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
Corrado Zoccoloe459dd02009-11-26 10:02:57 +01004179
4180 if (cfqd->hw_tag == 1)
4181 return;
Aaron Carroll45333d52008-08-26 15:52:36 +02004182
4183 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01004184 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
Aaron Carroll45333d52008-08-26 15:52:36 +02004185 return;
4186
Shaohua Li1a1238a2009-10-27 08:46:23 +01004187 /*
4188 * If active queue hasn't enough requests and can idle, cfq might not
4189 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
4190 * case
4191 */
4192 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
4193 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01004194 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
Shaohua Li1a1238a2009-10-27 08:46:23 +01004195 return;
4196
Aaron Carroll45333d52008-08-26 15:52:36 +02004197 if (cfqd->hw_tag_samples++ < 50)
4198 return;
4199
Corrado Zoccoloe459dd02009-11-26 10:02:57 +01004200 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
Aaron Carroll45333d52008-08-26 15:52:36 +02004201 cfqd->hw_tag = 1;
4202 else
4203 cfqd->hw_tag = 0;
Aaron Carroll45333d52008-08-26 15:52:36 +02004204}
4205
Vivek Goyal7667aa02009-12-08 17:52:58 -05004206static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4207{
Tejun Heoc5869802011-12-14 00:33:41 +01004208 struct cfq_io_cq *cic = cfqd->active_cic;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06004209 u64 now = ktime_get_ns();
Vivek Goyal7667aa02009-12-08 17:52:58 -05004210
Justin TerAvest02a8f012011-02-09 14:20:03 +01004211 /* If the queue already has requests, don't wait */
4212 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4213 return false;
4214
Vivek Goyal7667aa02009-12-08 17:52:58 -05004215 /* If there are other queues in the group, don't wait */
4216 if (cfqq->cfqg->nr_cfqq > 1)
4217 return false;
4218
Shaohua Li7700fc42011-07-12 14:24:56 +02004219 /* the only queue in the group, but think time is big */
4220 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
4221 return false;
4222
Vivek Goyal7667aa02009-12-08 17:52:58 -05004223 if (cfq_slice_used(cfqq))
4224 return true;
4225
4226 /* if slice left is less than think time, wait busy */
Shaohua Li383cd722011-07-12 14:24:35 +02004227 if (cic && sample_valid(cic->ttime.ttime_samples)
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06004228 && (cfqq->slice_end - now < cic->ttime.ttime_mean))
Vivek Goyal7667aa02009-12-08 17:52:58 -05004229 return true;
4230
4231 /*
4232 * If think times is less than a jiffy than ttime_mean=0 and above
4233 * will not be true. It might happen that slice has not expired yet
4234 * but will expire soon (4-5 ns) during select_queue(). To cover the
4235 * case where think time is less than a jiffy, mark the queue wait
4236 * busy if only 1 jiffy is left in the slice.
4237 */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06004238 if (cfqq->slice_end - now <= jiffies_to_nsecs(1))
Vivek Goyal7667aa02009-12-08 17:52:58 -05004239 return true;
4240
4241 return false;
4242}
4243
Jens Axboe165125e2007-07-24 09:28:11 +02004244static void cfq_completed_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245{
Jens Axboe5e705372006-07-13 12:39:25 +02004246 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +02004247 struct cfq_data *cfqd = cfqq->cfqd;
Jens Axboe5380a102006-07-13 12:37:56 +02004248 const int sync = rq_is_sync(rq);
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06004249 u64 now = ktime_get_ns();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250
Christoph Hellwiga2b80962016-11-01 07:40:09 -06004251 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", req_noidle(rq));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252
Aaron Carroll45333d52008-08-26 15:52:36 +02004253 cfq_update_hw_tag(cfqd);
4254
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01004255 WARN_ON(!cfqd->rq_in_driver);
Jens Axboe6d048f52007-04-25 12:44:27 +02004256 WARN_ON(!cfqq->dispatched);
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01004257 cfqd->rq_in_driver--;
Jens Axboe6d048f52007-04-25 12:44:27 +02004258 cfqq->dispatched--;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004259 (RQ_CFQG(rq))->dispatched--;
Tejun Heo155fead2012-04-01 14:38:44 -07004260 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
Christoph Hellwigef295ec2016-10-28 08:48:16 -06004261 rq_io_start_time_ns(rq), rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01004263 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
Jens Axboe3ed9a292007-04-23 08:33:33 +02004264
Vivek Goyal365722b2009-10-03 15:21:27 +02004265 if (sync) {
Vivek Goyal34b98d02012-10-03 16:56:58 -04004266 struct cfq_rb_root *st;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02004267
Shaohua Li383cd722011-07-12 14:24:35 +02004268 RQ_CIC(rq)->ttime.last_end_request = now;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02004269
4270 if (cfq_cfqq_on_rr(cfqq))
Vivek Goyal34b98d02012-10-03 16:56:58 -04004271 st = cfqq->service_tree;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02004272 else
Vivek Goyal34b98d02012-10-03 16:56:58 -04004273 st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4274 cfqq_type(cfqq));
4275
4276 st->ttime.last_end_request = now;
Jan Kara149321a2016-06-28 09:04:01 +02004277 /*
4278 * We have to do this check in jiffies since start_time is in
4279 * jiffies and it is not trivial to convert to ns. If
4280 * cfq_fifo_expire[1] ever comes close to 1 jiffie, this test
4281 * will become problematic but so far we are fine (the default
4282 * is 128 ms).
4283 */
4284 if (!time_after(rq->start_time +
4285 nsecs_to_jiffies(cfqd->cfq_fifo_expire[1]),
4286 jiffies))
Corrado Zoccolo573412b2009-12-06 11:48:52 +01004287 cfqd->last_delayed_sync = now;
Vivek Goyal365722b2009-10-03 15:21:27 +02004288 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02004289
Shaohua Li7700fc42011-07-12 14:24:56 +02004290#ifdef CONFIG_CFQ_GROUP_IOSCHED
4291 cfqq->cfqg->ttime.last_end_request = now;
4292#endif
4293
Jens Axboecaaa5f92006-06-16 11:23:00 +02004294 /*
4295 * If this is the active queue, check if it needs to be expired,
4296 * or if we want to idle in case it has no pending requests.
4297 */
4298 if (cfqd->active_queue == cfqq) {
Jens Axboea36e71f2009-04-15 12:15:11 +02004299 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4300
Jens Axboe44f7c162007-01-19 11:51:58 +11004301 if (cfq_cfqq_slice_new(cfqq)) {
4302 cfq_set_prio_slice(cfqd, cfqq);
4303 cfq_clear_cfqq_slice_new(cfqq);
4304 }
Vivek Goyalf75edf22009-12-03 12:59:53 -05004305
4306 /*
Vivek Goyal7667aa02009-12-08 17:52:58 -05004307 * Should we wait for next request to come in before we expire
4308 * the queue.
Vivek Goyalf75edf22009-12-03 12:59:53 -05004309 */
Vivek Goyal7667aa02009-12-08 17:52:58 -05004310 if (cfq_should_wait_busy(cfqd, cfqq)) {
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06004311 u64 extend_sl = cfqd->cfq_slice_idle;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004312 if (!cfqd->cfq_slice_idle)
4313 extend_sl = cfqd->cfq_group_idle;
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06004314 cfqq->slice_end = now + extend_sl;
Vivek Goyalf75edf22009-12-03 12:59:53 -05004315 cfq_mark_cfqq_wait_busy(cfqq);
Divyesh Shahb1ffe732010-03-25 15:45:03 +01004316 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
Vivek Goyalf75edf22009-12-03 12:59:53 -05004317 }
4318
Jens Axboea36e71f2009-04-15 12:15:11 +02004319 /*
Corrado Zoccolo8e550632009-11-26 10:02:58 +01004320 * Idling is not enabled on:
4321 * - expired queues
4322 * - idle-priority queues
4323 * - async queues
4324 * - queues with still some requests queued
4325 * - when there is a close cooperator
Jens Axboea36e71f2009-04-15 12:15:11 +02004326 */
Jens Axboe08717142008-01-28 11:38:15 +01004327 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
Vivek Goyale5ff0822010-04-26 19:25:11 +02004328 cfq_slice_expired(cfqd, 1);
Corrado Zoccolo8e550632009-11-26 10:02:58 +01004329 else if (sync && cfqq_empty &&
4330 !cfq_close_cooperator(cfqd, cfqq)) {
Corrado Zoccolo749ef9f2010-09-20 15:24:50 +02004331 cfq_arm_slice_timer(cfqd);
Corrado Zoccolo8e550632009-11-26 10:02:58 +01004332 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02004333 }
Jens Axboe6d048f52007-04-25 12:44:27 +02004334
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01004335 if (!cfqd->rq_in_driver)
Jens Axboe23e018a2009-10-05 08:52:35 +02004336 cfq_schedule_dispatch(cfqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337}
4338
Christoph Hellwigef295ec2016-10-28 08:48:16 -06004339static void cfqq_boost_on_prio(struct cfq_queue *cfqq, unsigned int op)
Jens Axboeb8269db2016-06-09 15:47:29 -06004340{
4341 /*
4342 * If REQ_PRIO is set, boost class and prio level, if it's below
4343 * BE/NORM. If prio is not set, restore the potentially boosted
4344 * class/prio level.
4345 */
Christoph Hellwigef295ec2016-10-28 08:48:16 -06004346 if (!(op & REQ_PRIO)) {
Jens Axboeb8269db2016-06-09 15:47:29 -06004347 cfqq->ioprio_class = cfqq->org_ioprio_class;
4348 cfqq->ioprio = cfqq->org_ioprio;
4349 } else {
4350 if (cfq_class_idle(cfqq))
4351 cfqq->ioprio_class = IOPRIO_CLASS_BE;
4352 if (cfqq->ioprio > IOPRIO_NORM)
4353 cfqq->ioprio = IOPRIO_NORM;
4354 }
4355}
4356
Jens Axboe89850f72006-07-22 16:48:31 +02004357static inline int __cfq_may_queue(struct cfq_queue *cfqq)
Jens Axboe22e2c502005-06-27 10:55:12 +02004358{
Jens Axboe1b379d82009-08-11 08:26:11 +02004359 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
Jens Axboe3b181522005-06-27 10:56:24 +02004360 cfq_mark_cfqq_must_alloc_slice(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004361 return ELV_MQUEUE_MUST;
Jens Axboe3b181522005-06-27 10:56:24 +02004362 }
Jens Axboe22e2c502005-06-27 10:55:12 +02004363
4364 return ELV_MQUEUE_MAY;
Jens Axboe22e2c502005-06-27 10:55:12 +02004365}
4366
Christoph Hellwigef295ec2016-10-28 08:48:16 -06004367static int cfq_may_queue(struct request_queue *q, unsigned int op)
Jens Axboe22e2c502005-06-27 10:55:12 +02004368{
4369 struct cfq_data *cfqd = q->elevator->elevator_data;
4370 struct task_struct *tsk = current;
Tejun Heoc5869802011-12-14 00:33:41 +01004371 struct cfq_io_cq *cic;
Jens Axboe22e2c502005-06-27 10:55:12 +02004372 struct cfq_queue *cfqq;
4373
4374 /*
4375 * don't force setup of a queue from here, as a call to may_queue
4376 * does not necessarily imply that a request actually will be queued.
4377 * so just lookup a possibly existing queue, or return 'may queue'
4378 * if that fails
4379 */
Jens Axboe4ac845a2008-01-24 08:44:49 +01004380 cic = cfq_cic_lookup(cfqd, tsk->io_context);
Vasily Tarasov91fac312007-04-25 12:29:51 +02004381 if (!cic)
4382 return ELV_MQUEUE_MAY;
4383
Christoph Hellwigef295ec2016-10-28 08:48:16 -06004384 cfqq = cic_to_cfqq(cic, op_is_sync(op));
Jens Axboe22e2c502005-06-27 10:55:12 +02004385 if (cfqq) {
Tejun Heoabede6d2012-03-19 15:10:57 -07004386 cfq_init_prio_data(cfqq, cic);
Christoph Hellwigef295ec2016-10-28 08:48:16 -06004387 cfqq_boost_on_prio(cfqq, op);
Jens Axboe22e2c502005-06-27 10:55:12 +02004388
Jens Axboe89850f72006-07-22 16:48:31 +02004389 return __cfq_may_queue(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004390 }
4391
4392 return ELV_MQUEUE_MAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393}
4394
Linus Torvalds1da177e2005-04-16 15:20:36 -07004395/*
4396 * queue lock held here
4397 */
Jens Axboebb37b942006-12-01 10:42:33 +01004398static void cfq_put_request(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004399{
Jens Axboe5e705372006-07-13 12:39:25 +02004400 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401
Jens Axboe5e705372006-07-13 12:39:25 +02004402 if (cfqq) {
Jens Axboe22e2c502005-06-27 10:55:12 +02004403 const int rw = rq_data_dir(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404
Jens Axboe22e2c502005-06-27 10:55:12 +02004405 BUG_ON(!cfqq->allocated[rw]);
4406 cfqq->allocated[rw]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +02004408 /* Put down rq reference on cfqg */
Tejun Heoeb7d8c072012-03-23 14:02:53 +01004409 cfqg_put(RQ_CFQG(rq));
Tejun Heoa612fdd2011-12-14 00:33:41 +01004410 rq->elv.priv[0] = NULL;
4411 rq->elv.priv[1] = NULL;
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +02004412
Linus Torvalds1da177e2005-04-16 15:20:36 -07004413 cfq_put_queue(cfqq);
4414 }
4415}
4416
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004417static struct cfq_queue *
Tejun Heoc5869802011-12-14 00:33:41 +01004418cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004419 struct cfq_queue *cfqq)
4420{
4421 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4422 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
Jeff Moyerb3b6d042009-10-23 17:14:51 -04004423 cfq_mark_cfqq_coop(cfqq->new_cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004424 cfq_put_queue(cfqq);
4425 return cic_to_cfqq(cic, 1);
4426}
4427
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004428/*
4429 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4430 * was the last process referring to said cfqq.
4431 */
4432static struct cfq_queue *
Tejun Heoc5869802011-12-14 00:33:41 +01004433split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004434{
4435 if (cfqq_process_refs(cfqq) == 1) {
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004436 cfqq->pid = current->pid;
4437 cfq_clear_cfqq_coop(cfqq);
Shaohua Liae54abe2010-02-05 13:11:45 +01004438 cfq_clear_cfqq_split_coop(cfqq);
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004439 return cfqq;
4440 }
4441
4442 cic_set_cfqq(cic, NULL, 1);
Shaohua Lid02a2c02010-05-25 10:16:53 +02004443
4444 cfq_put_cooperator(cfqq);
4445
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004446 cfq_put_queue(cfqq);
4447 return NULL;
4448}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449/*
Jens Axboe22e2c502005-06-27 10:55:12 +02004450 * Allocate cfq data structures associated with this request.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 */
Jens Axboe22e2c502005-06-27 10:55:12 +02004452static int
Tejun Heo852c7882012-03-05 13:15:27 -08004453cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4454 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455{
4456 struct cfq_data *cfqd = q->elevator->elevator_data;
Tejun Heof1f8cc92011-12-14 00:33:42 +01004457 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458 const int rw = rq_data_dir(rq);
Jens Axboea6151c32009-10-07 20:02:57 +02004459 const bool is_sync = rq_is_sync(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004460 struct cfq_queue *cfqq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461
Tejun Heo216284c32011-12-14 00:33:38 +01004462 spin_lock_irq(q->queue_lock);
Tejun Heof1f8cc92011-12-14 00:33:42 +01004463
Tejun Heo598971b2012-03-19 15:10:58 -07004464 check_ioprio_changed(cic, bio);
Jan Kara142bbdf2017-04-04 14:31:30 +02004465 check_blkcg_changed(cic, bio);
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004466new_queue:
Vasily Tarasov91fac312007-04-25 12:29:51 +02004467 cfqq = cic_to_cfqq(cic, is_sync);
Vivek Goyal32f2e802009-07-09 22:13:16 +02004468 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
Tejun Heobce61332015-08-18 14:54:59 -07004469 if (cfqq)
4470 cfq_put_queue(cfqq);
Tejun Heo2da8de02015-08-18 14:55:02 -07004471 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
Vasily Tarasov91fac312007-04-25 12:29:51 +02004472 cic_set_cfqq(cic, cfqq, is_sync);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004473 } else {
4474 /*
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004475 * If the queue was seeky for too long, break it apart.
4476 */
Shaohua Liae54abe2010-02-05 13:11:45 +01004477 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004478 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4479 cfqq = split_cfqq(cic, cfqq);
4480 if (!cfqq)
4481 goto new_queue;
4482 }
4483
4484 /*
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004485 * Check to see if this queue is scheduled to merge with
4486 * another, closely cooperating queue. The merging of
4487 * queues happens here as it must be done in process context.
4488 * The reference on new_cfqq was taken in merge_cfqqs.
4489 */
4490 if (cfqq->new_cfqq)
4491 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
Vasily Tarasov91fac312007-04-25 12:29:51 +02004492 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493
4494 cfqq->allocated[rw]++;
Jens Axboe5e705372006-07-13 12:39:25 +02004495
Jens Axboe6fae9c22011-03-01 15:04:39 -05004496 cfqq->ref++;
Tejun Heoeb7d8c072012-03-23 14:02:53 +01004497 cfqg_get(cfqq->cfqg);
Tejun Heoa612fdd2011-12-14 00:33:41 +01004498 rq->elv.priv[0] = cfqq;
Tejun Heo1adaf3d2012-03-05 13:15:15 -08004499 rq->elv.priv[1] = cfqq->cfqg;
Tejun Heo216284c32011-12-14 00:33:38 +01004500 spin_unlock_irq(q->queue_lock);
Jens Axboe5d7f5ce2017-02-16 07:57:33 -07004501
Jens Axboe5e705372006-07-13 12:39:25 +02004502 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503}
4504
David Howells65f27f32006-11-22 14:55:48 +00004505static void cfq_kick_queue(struct work_struct *work)
Jens Axboe22e2c502005-06-27 10:55:12 +02004506{
David Howells65f27f32006-11-22 14:55:48 +00004507 struct cfq_data *cfqd =
Jens Axboe23e018a2009-10-05 08:52:35 +02004508 container_of(work, struct cfq_data, unplug_work);
Jens Axboe165125e2007-07-24 09:28:11 +02004509 struct request_queue *q = cfqd->queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02004510
Jens Axboe40bb54d2009-04-15 12:11:10 +02004511 spin_lock_irq(q->queue_lock);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02004512 __blk_run_queue(cfqd->queue);
Jens Axboe40bb54d2009-04-15 12:11:10 +02004513 spin_unlock_irq(q->queue_lock);
Jens Axboe22e2c502005-06-27 10:55:12 +02004514}
4515
4516/*
4517 * Timer running if the active_queue is currently idling inside its time slice
4518 */
Jan Kara91148322016-06-08 15:11:39 +02004519static enum hrtimer_restart cfq_idle_slice_timer(struct hrtimer *timer)
Jens Axboe22e2c502005-06-27 10:55:12 +02004520{
Jan Kara91148322016-06-08 15:11:39 +02004521 struct cfq_data *cfqd = container_of(timer, struct cfq_data,
4522 idle_slice_timer);
Jens Axboe22e2c502005-06-27 10:55:12 +02004523 struct cfq_queue *cfqq;
4524 unsigned long flags;
Jens Axboe3c6bd2f2007-01-19 12:06:33 +11004525 int timed_out = 1;
Jens Axboe22e2c502005-06-27 10:55:12 +02004526
Jens Axboe7b679132008-05-30 12:23:07 +02004527 cfq_log(cfqd, "idle timer fired");
4528
Jens Axboe22e2c502005-06-27 10:55:12 +02004529 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4530
Jens Axboefe094d92008-01-31 13:08:54 +01004531 cfqq = cfqd->active_queue;
4532 if (cfqq) {
Jens Axboe3c6bd2f2007-01-19 12:06:33 +11004533 timed_out = 0;
4534
Jens Axboe22e2c502005-06-27 10:55:12 +02004535 /*
Jens Axboeb0291952009-04-07 11:38:31 +02004536 * We saw a request before the queue expired, let it through
4537 */
4538 if (cfq_cfqq_must_dispatch(cfqq))
4539 goto out_kick;
4540
4541 /*
Jens Axboe22e2c502005-06-27 10:55:12 +02004542 * expired
4543 */
Jens Axboe44f7c162007-01-19 11:51:58 +11004544 if (cfq_slice_used(cfqq))
Jens Axboe22e2c502005-06-27 10:55:12 +02004545 goto expire;
4546
4547 /*
4548 * only expire and reinvoke request handler, if there are
4549 * other queues with pending requests
4550 */
Jens Axboecaaa5f92006-06-16 11:23:00 +02004551 if (!cfqd->busy_queues)
Jens Axboe22e2c502005-06-27 10:55:12 +02004552 goto out_cont;
Jens Axboe22e2c502005-06-27 10:55:12 +02004553
4554 /*
4555 * not expired and it has a request pending, let it dispatch
4556 */
Jens Axboe75e50982009-04-07 08:56:14 +02004557 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
Jens Axboe22e2c502005-06-27 10:55:12 +02004558 goto out_kick;
Corrado Zoccolo76280af2009-11-26 10:02:58 +01004559
4560 /*
4561 * Queue depth flag is reset only when the idle didn't succeed
4562 */
4563 cfq_clear_cfqq_deep(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004564 }
4565expire:
Vivek Goyale5ff0822010-04-26 19:25:11 +02004566 cfq_slice_expired(cfqd, timed_out);
Jens Axboe22e2c502005-06-27 10:55:12 +02004567out_kick:
Jens Axboe23e018a2009-10-05 08:52:35 +02004568 cfq_schedule_dispatch(cfqd);
Jens Axboe22e2c502005-06-27 10:55:12 +02004569out_cont:
4570 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
Jan Kara91148322016-06-08 15:11:39 +02004571 return HRTIMER_NORESTART;
Jens Axboe22e2c502005-06-27 10:55:12 +02004572}
4573
Jens Axboe3b181522005-06-27 10:56:24 +02004574static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4575{
Jan Kara91148322016-06-08 15:11:39 +02004576 hrtimer_cancel(&cfqd->idle_slice_timer);
Jens Axboe23e018a2009-10-05 08:52:35 +02004577 cancel_work_sync(&cfqd->unplug_work);
Jens Axboe3b181522005-06-27 10:56:24 +02004578}
Jens Axboe22e2c502005-06-27 10:55:12 +02004579
Jens Axboeb374d182008-10-31 10:05:07 +01004580static void cfq_exit_queue(struct elevator_queue *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581{
Jens Axboe22e2c502005-06-27 10:55:12 +02004582 struct cfq_data *cfqd = e->elevator_data;
Jens Axboe165125e2007-07-24 09:28:11 +02004583 struct request_queue *q = cfqd->queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02004584
Jens Axboe3b181522005-06-27 10:56:24 +02004585 cfq_shutdown_timer_wq(cfqd);
Jens Axboee2d74ac2006-03-28 08:59:01 +02004586
Al Virod9ff4182006-03-18 13:51:22 -05004587 spin_lock_irq(q->queue_lock);
Jens Axboee2d74ac2006-03-28 08:59:01 +02004588
Al Virod9ff4182006-03-18 13:51:22 -05004589 if (cfqd->active_queue)
Vivek Goyale5ff0822010-04-26 19:25:11 +02004590 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
Jens Axboee2d74ac2006-03-28 08:59:01 +02004591
Tejun Heo03aa2642012-03-05 13:15:19 -08004592 spin_unlock_irq(q->queue_lock);
4593
Al Viroa90d7422006-03-18 12:05:37 -05004594 cfq_shutdown_timer_wq(cfqd);
4595
Tejun Heoffea73f2012-06-04 10:02:29 +02004596#ifdef CONFIG_CFQ_GROUP_IOSCHED
4597 blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4598#else
Tejun Heof51b8022012-03-05 13:15:05 -08004599 kfree(cfqd->root_group);
Vivek Goyal2abae552011-05-23 10:02:19 +02004600#endif
Vivek Goyal56edf7d2011-05-19 15:38:22 -04004601 kfree(cfqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004602}
4603
Jianpeng Mad50235b2013-07-03 13:25:24 +02004604static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004605{
4606 struct cfq_data *cfqd;
Tejun Heo3c798392012-04-16 13:57:25 -07004607 struct blkcg_gq *blkg __maybe_unused;
Tejun Heoa2b16932012-04-13 13:11:33 -07004608 int i, ret;
Jianpeng Mad50235b2013-07-03 13:25:24 +02004609 struct elevator_queue *eq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610
Jianpeng Mad50235b2013-07-03 13:25:24 +02004611 eq = elevator_alloc(q, e);
4612 if (!eq)
Tejun Heob2fab5a2012-03-05 13:14:57 -08004613 return -ENOMEM;
Konstantin Khlebnikov80b15c72010-05-20 23:21:41 +04004614
Joe Perchesc1b511e2013-08-29 15:21:42 -07004615 cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
Jianpeng Mad50235b2013-07-03 13:25:24 +02004616 if (!cfqd) {
4617 kobject_put(&eq->kobj);
4618 return -ENOMEM;
4619 }
4620 eq->elevator_data = cfqd;
4621
Tejun Heof51b8022012-03-05 13:15:05 -08004622 cfqd->queue = q;
Jianpeng Mad50235b2013-07-03 13:25:24 +02004623 spin_lock_irq(q->queue_lock);
4624 q->elevator = eq;
4625 spin_unlock_irq(q->queue_lock);
Tejun Heof51b8022012-03-05 13:15:05 -08004626
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05004627 /* Init root service tree */
4628 cfqd->grp_service_tree = CFQ_RB_ROOT;
4629
Tejun Heof51b8022012-03-05 13:15:05 -08004630 /* Init root group and prefer root group over other groups by default */
Vivek Goyal25fb5162009-12-03 12:59:46 -05004631#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004632 ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
Tejun Heoa2b16932012-04-13 13:11:33 -07004633 if (ret)
4634 goto out_free;
Vivek Goyal5624a4e2011-05-19 15:38:28 -04004635
Tejun Heoa2b16932012-04-13 13:11:33 -07004636 cfqd->root_group = blkg_to_cfqg(q->root_blkg);
Tejun Heof51b8022012-03-05 13:15:05 -08004637#else
Tejun Heoa2b16932012-04-13 13:11:33 -07004638 ret = -ENOMEM;
Tejun Heof51b8022012-03-05 13:15:05 -08004639 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4640 GFP_KERNEL, cfqd->queue->node);
Tejun Heoa2b16932012-04-13 13:11:33 -07004641 if (!cfqd->root_group)
4642 goto out_free;
Vivek Goyal5624a4e2011-05-19 15:38:28 -04004643
Tejun Heoa2b16932012-04-13 13:11:33 -07004644 cfq_init_cfqg_base(cfqd->root_group);
Tejun Heo3ecca622015-08-18 14:55:35 -07004645 cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
4646 cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
Tejun Heo69d7fde2015-08-18 14:55:36 -07004647#endif
Vivek Goyal5624a4e2011-05-19 15:38:28 -04004648
Jens Axboe26a2ac02009-04-23 12:13:27 +02004649 /*
4650 * Not strictly needed (since RB_ROOT just clears the node and we
4651 * zeroed cfqd on alloc), but better be safe in case someone decides
4652 * to add magic to the rb code
4653 */
4654 for (i = 0; i < CFQ_PRIO_LISTS; i++)
4655 cfqd->prio_trees[i] = RB_ROOT;
4656
Jens Axboe6118b702009-06-30 09:34:12 +02004657 /*
Tejun Heod4aad7f2015-08-18 14:55:04 -07004658 * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
Jens Axboe6118b702009-06-30 09:34:12 +02004659 * Grab a permanent reference to it, so that the normal code flow
Tejun Heof51b8022012-03-05 13:15:05 -08004660 * will not attempt to free it. oom_cfqq is linked to root_group
4661 * but shouldn't hold a reference as it'll never be unlinked. Lose
4662 * the reference from linking right away.
Jens Axboe6118b702009-06-30 09:34:12 +02004663 */
4664 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
Shaohua Li30d7b942011-01-07 08:46:59 +01004665 cfqd->oom_cfqq.ref++;
Tejun Heo1adaf3d2012-03-05 13:15:15 -08004666
4667 spin_lock_irq(q->queue_lock);
Tejun Heof51b8022012-03-05 13:15:05 -08004668 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
Tejun Heoeb7d8c072012-03-23 14:02:53 +01004669 cfqg_put(cfqd->root_group);
Tejun Heo1adaf3d2012-03-05 13:15:15 -08004670 spin_unlock_irq(q->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671
Jan Kara91148322016-06-08 15:11:39 +02004672 hrtimer_init(&cfqd->idle_slice_timer, CLOCK_MONOTONIC,
4673 HRTIMER_MODE_REL);
Jens Axboe22e2c502005-06-27 10:55:12 +02004674 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
Jens Axboe22e2c502005-06-27 10:55:12 +02004675
Jens Axboe23e018a2009-10-05 08:52:35 +02004676 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
Jens Axboe22e2c502005-06-27 10:55:12 +02004677
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678 cfqd->cfq_quantum = cfq_quantum;
Jens Axboe22e2c502005-06-27 10:55:12 +02004679 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4680 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004681 cfqd->cfq_back_max = cfq_back_max;
4682 cfqd->cfq_back_penalty = cfq_back_penalty;
Jens Axboe22e2c502005-06-27 10:55:12 +02004683 cfqd->cfq_slice[0] = cfq_slice_async;
4684 cfqd->cfq_slice[1] = cfq_slice_sync;
Tao Ma5bf14c02012-04-01 14:33:39 -07004685 cfqd->cfq_target_latency = cfq_target_latency;
Jens Axboe22e2c502005-06-27 10:55:12 +02004686 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
Jens Axboe0bb97942015-06-10 08:01:20 -06004687 cfqd->cfq_slice_idle = cfq_slice_idle;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004688 cfqd->cfq_group_idle = cfq_group_idle;
Jens Axboe963b72f2009-10-03 19:42:18 +02004689 cfqd->cfq_latency = 1;
Corrado Zoccoloe459dd02009-11-26 10:02:57 +01004690 cfqd->hw_tag = -1;
Corrado Zoccoloedc71132009-12-09 20:56:04 +01004691 /*
4692 * we optimistically start assuming sync ops weren't delayed in last
4693 * second, in order to have larger depth for async operations.
4694 */
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06004695 cfqd->last_delayed_sync = ktime_get_ns() - NSEC_PER_SEC;
Tejun Heob2fab5a2012-03-05 13:14:57 -08004696 return 0;
Tejun Heoa2b16932012-04-13 13:11:33 -07004697
4698out_free:
4699 kfree(cfqd);
Jianpeng Mad50235b2013-07-03 13:25:24 +02004700 kobject_put(&eq->kobj);
Tejun Heoa2b16932012-04-13 13:11:33 -07004701 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004702}
4703
Jens Axboe0bb97942015-06-10 08:01:20 -06004704static void cfq_registered_queue(struct request_queue *q)
4705{
4706 struct elevator_queue *e = q->elevator;
4707 struct cfq_data *cfqd = e->elevator_data;
4708
4709 /*
4710 * Default to IOPS mode with no idling for SSDs
4711 */
4712 if (blk_queue_nonrot(q))
4713 cfqd->cfq_slice_idle = 0;
Jan Kara142bbdf2017-04-04 14:31:30 +02004714 wbt_disable_default(q);
Jens Axboe0bb97942015-06-10 08:01:20 -06004715}
4716
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717/*
4718 * sysfs parts below -->
4719 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720static ssize_t
4721cfq_var_show(unsigned int var, char *page)
4722{
Masanari Iida176167a2014-04-28 12:38:34 +09004723 return sprintf(page, "%u\n", var);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004724}
4725
4726static ssize_t
4727cfq_var_store(unsigned int *var, const char *page, size_t count)
4728{
4729 char *p = (char *) page;
4730
4731 *var = simple_strtoul(p, &p, 10);
4732 return count;
4733}
4734
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
Jens Axboeb374d182008-10-31 10:05:07 +01004736static ssize_t __FUNC(struct elevator_queue *e, char *page) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737{ \
Al Viro3d1ab402006-03-18 18:35:43 -05004738 struct cfq_data *cfqd = e->elevator_data; \
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06004739 u64 __data = __VAR; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740 if (__CONV) \
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06004741 __data = div_u64(__data, NSEC_PER_MSEC); \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742 return cfq_var_show(__data, (page)); \
4743}
4744SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02004745SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4746SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
Al Viroe572ec72006-03-18 22:27:18 -05004747SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4748SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02004749SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004750SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02004751SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4752SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4753SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
Jens Axboe963b72f2009-10-03 19:42:18 +02004754SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
Tao Ma5bf14c02012-04-01 14:33:39 -07004755SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004756#undef SHOW_FUNCTION
4757
Jeff Moyerd2d481d2016-06-08 15:11:38 +02004758#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
4759static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4760{ \
4761 struct cfq_data *cfqd = e->elevator_data; \
4762 u64 __data = __VAR; \
4763 __data = div_u64(__data, NSEC_PER_USEC); \
4764 return cfq_var_show(__data, (page)); \
4765}
4766USEC_SHOW_FUNCTION(cfq_slice_idle_us_show, cfqd->cfq_slice_idle);
4767USEC_SHOW_FUNCTION(cfq_group_idle_us_show, cfqd->cfq_group_idle);
4768USEC_SHOW_FUNCTION(cfq_slice_sync_us_show, cfqd->cfq_slice[1]);
4769USEC_SHOW_FUNCTION(cfq_slice_async_us_show, cfqd->cfq_slice[0]);
4770USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
4771#undef USEC_SHOW_FUNCTION
4772
Linus Torvalds1da177e2005-04-16 15:20:36 -07004773#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
Jens Axboeb374d182008-10-31 10:05:07 +01004774static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775{ \
Al Viro3d1ab402006-03-18 18:35:43 -05004776 struct cfq_data *cfqd = e->elevator_data; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004777 unsigned int __data; \
4778 int ret = cfq_var_store(&__data, (page), count); \
4779 if (__data < (MIN)) \
4780 __data = (MIN); \
4781 else if (__data > (MAX)) \
4782 __data = (MAX); \
4783 if (__CONV) \
Jeff Moyer9a7f38c2016-06-08 08:55:34 -06004784 *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785 else \
4786 *(__PTR) = __data; \
4787 return ret; \
4788}
4789STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
Jens Axboefe094d92008-01-31 13:08:54 +01004790STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4791 UINT_MAX, 1);
4792STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4793 UINT_MAX, 1);
Al Viroe572ec72006-03-18 22:27:18 -05004794STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
Jens Axboefe094d92008-01-31 13:08:54 +01004795STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4796 UINT_MAX, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02004797STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004798STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02004799STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4800STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
Jens Axboefe094d92008-01-31 13:08:54 +01004801STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4802 UINT_MAX, 0);
Jens Axboe963b72f2009-10-03 19:42:18 +02004803STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
Tao Ma5bf14c02012-04-01 14:33:39 -07004804STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004805#undef STORE_FUNCTION
4806
Jeff Moyerd2d481d2016-06-08 15:11:38 +02004807#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
4808static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4809{ \
4810 struct cfq_data *cfqd = e->elevator_data; \
4811 unsigned int __data; \
4812 int ret = cfq_var_store(&__data, (page), count); \
4813 if (__data < (MIN)) \
4814 __data = (MIN); \
4815 else if (__data > (MAX)) \
4816 __data = (MAX); \
4817 *(__PTR) = (u64)__data * NSEC_PER_USEC; \
4818 return ret; \
4819}
4820USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX);
4821USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX);
4822USEC_STORE_FUNCTION(cfq_slice_sync_us_store, &cfqd->cfq_slice[1], 1, UINT_MAX);
4823USEC_STORE_FUNCTION(cfq_slice_async_us_store, &cfqd->cfq_slice[0], 1, UINT_MAX);
4824USEC_STORE_FUNCTION(cfq_target_latency_us_store, &cfqd->cfq_target_latency, 1, UINT_MAX);
4825#undef USEC_STORE_FUNCTION
4826
Al Viroe572ec72006-03-18 22:27:18 -05004827#define CFQ_ATTR(name) \
4828 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
Jens Axboe3b181522005-06-27 10:56:24 +02004829
Al Viroe572ec72006-03-18 22:27:18 -05004830static struct elv_fs_entry cfq_attrs[] = {
4831 CFQ_ATTR(quantum),
Al Viroe572ec72006-03-18 22:27:18 -05004832 CFQ_ATTR(fifo_expire_sync),
4833 CFQ_ATTR(fifo_expire_async),
4834 CFQ_ATTR(back_seek_max),
4835 CFQ_ATTR(back_seek_penalty),
4836 CFQ_ATTR(slice_sync),
Jeff Moyerd2d481d2016-06-08 15:11:38 +02004837 CFQ_ATTR(slice_sync_us),
Al Viroe572ec72006-03-18 22:27:18 -05004838 CFQ_ATTR(slice_async),
Jeff Moyerd2d481d2016-06-08 15:11:38 +02004839 CFQ_ATTR(slice_async_us),
Al Viroe572ec72006-03-18 22:27:18 -05004840 CFQ_ATTR(slice_async_rq),
4841 CFQ_ATTR(slice_idle),
Jeff Moyerd2d481d2016-06-08 15:11:38 +02004842 CFQ_ATTR(slice_idle_us),
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004843 CFQ_ATTR(group_idle),
Jeff Moyerd2d481d2016-06-08 15:11:38 +02004844 CFQ_ATTR(group_idle_us),
Jens Axboe963b72f2009-10-03 19:42:18 +02004845 CFQ_ATTR(low_latency),
Tao Ma5bf14c02012-04-01 14:33:39 -07004846 CFQ_ATTR(target_latency),
Jeff Moyerd2d481d2016-06-08 15:11:38 +02004847 CFQ_ATTR(target_latency_us),
Al Viroe572ec72006-03-18 22:27:18 -05004848 __ATTR_NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -07004849};
4850
Linus Torvalds1da177e2005-04-16 15:20:36 -07004851static struct elevator_type iosched_cfq = {
Jens Axboec51ca6c2016-12-10 15:13:59 -07004852 .ops.sq = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004853 .elevator_merge_fn = cfq_merge,
4854 .elevator_merged_fn = cfq_merged_request,
4855 .elevator_merge_req_fn = cfq_merged_requests,
Tahsin Erdogan72ef7992016-07-07 11:48:22 -07004856 .elevator_allow_bio_merge_fn = cfq_allow_bio_merge,
4857 .elevator_allow_rq_merge_fn = cfq_allow_rq_merge,
Divyesh Shah812d4022010-04-08 21:14:23 -07004858 .elevator_bio_merged_fn = cfq_bio_merged,
Jens Axboeb4878f22005-10-20 16:42:29 +02004859 .elevator_dispatch_fn = cfq_dispatch_requests,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004860 .elevator_add_req_fn = cfq_insert_request,
Jens Axboeb4878f22005-10-20 16:42:29 +02004861 .elevator_activate_req_fn = cfq_activate_request,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862 .elevator_deactivate_req_fn = cfq_deactivate_request,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863 .elevator_completed_req_fn = cfq_completed_request,
Jens Axboe21183b02006-07-13 12:33:14 +02004864 .elevator_former_req_fn = elv_rb_former_request,
4865 .elevator_latter_req_fn = elv_rb_latter_request,
Tejun Heo9b84cac2011-12-14 00:33:42 +01004866 .elevator_init_icq_fn = cfq_init_icq,
Tejun Heo7e5a8792011-12-14 00:33:42 +01004867 .elevator_exit_icq_fn = cfq_exit_icq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004868 .elevator_set_req_fn = cfq_set_request,
4869 .elevator_put_req_fn = cfq_put_request,
4870 .elevator_may_queue_fn = cfq_may_queue,
4871 .elevator_init_fn = cfq_init_queue,
4872 .elevator_exit_fn = cfq_exit_queue,
Jens Axboe0bb97942015-06-10 08:01:20 -06004873 .elevator_registered_fn = cfq_registered_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004874 },
Tejun Heo3d3c2372011-12-14 00:33:42 +01004875 .icq_size = sizeof(struct cfq_io_cq),
4876 .icq_align = __alignof__(struct cfq_io_cq),
Al Viro3d1ab402006-03-18 18:35:43 -05004877 .elevator_attrs = cfq_attrs,
Tejun Heo3d3c2372011-12-14 00:33:42 +01004878 .elevator_name = "cfq",
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879 .elevator_owner = THIS_MODULE,
4880};
4881
Vivek Goyal3e252062009-12-04 10:36:42 -05004882#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004883static struct blkcg_policy blkcg_policy_cfq = {
Tejun Heo2ee867dc2015-08-18 14:55:34 -07004884 .dfl_cftypes = cfq_blkcg_files,
Tejun Heo880f50e2015-08-18 14:55:30 -07004885 .legacy_cftypes = cfq_blkcg_legacy_files,
Tejun Heof9fcc2d2012-04-16 13:57:27 -07004886
Tejun Heoe4a9bde2015-08-18 14:55:16 -07004887 .cpd_alloc_fn = cfq_cpd_alloc,
Arianna Avanzinie48453c2015-06-05 23:38:42 +02004888 .cpd_init_fn = cfq_cpd_init,
Tejun Heoe4a9bde2015-08-18 14:55:16 -07004889 .cpd_free_fn = cfq_cpd_free,
Tejun Heo69d7fde2015-08-18 14:55:36 -07004890 .cpd_bind_fn = cfq_cpd_bind,
Tejun Heoe4a9bde2015-08-18 14:55:16 -07004891
Tejun Heo001bea72015-08-18 14:55:11 -07004892 .pd_alloc_fn = cfq_pd_alloc,
Tejun Heof9fcc2d2012-04-16 13:57:27 -07004893 .pd_init_fn = cfq_pd_init,
Tejun Heo0b399202013-01-09 08:05:13 -08004894 .pd_offline_fn = cfq_pd_offline,
Tejun Heo001bea72015-08-18 14:55:11 -07004895 .pd_free_fn = cfq_pd_free,
Tejun Heof9fcc2d2012-04-16 13:57:27 -07004896 .pd_reset_stats_fn = cfq_pd_reset_stats,
Vivek Goyal3e252062009-12-04 10:36:42 -05004897};
Vivek Goyal3e252062009-12-04 10:36:42 -05004898#endif
4899
Linus Torvalds1da177e2005-04-16 15:20:36 -07004900static int __init cfq_init(void)
4901{
Tejun Heo3d3c2372011-12-14 00:33:42 +01004902 int ret;
4903
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004904#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004905 ret = blkcg_policy_register(&blkcg_policy_cfq);
Tejun Heo8bd435b2012-04-13 13:11:28 -07004906 if (ret)
4907 return ret;
Tejun Heoffea73f2012-06-04 10:02:29 +02004908#else
4909 cfq_group_idle = 0;
4910#endif
Tejun Heo8bd435b2012-04-13 13:11:28 -07004911
Tejun Heofd794952012-06-04 10:01:38 +02004912 ret = -ENOMEM;
Tejun Heo3d3c2372011-12-14 00:33:42 +01004913 cfq_pool = KMEM_CACHE(cfq_queue, 0);
4914 if (!cfq_pool)
Tejun Heo8bd435b2012-04-13 13:11:28 -07004915 goto err_pol_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004916
Tejun Heo3d3c2372011-12-14 00:33:42 +01004917 ret = elv_register(&iosched_cfq);
Tejun Heo8bd435b2012-04-13 13:11:28 -07004918 if (ret)
4919 goto err_free_pool;
Tejun Heo3d3c2372011-12-14 00:33:42 +01004920
Adrian Bunk2fdd82b2007-12-12 18:51:56 +01004921 return 0;
Tejun Heo8bd435b2012-04-13 13:11:28 -07004922
4923err_free_pool:
4924 kmem_cache_destroy(cfq_pool);
4925err_pol_unreg:
Tejun Heoffea73f2012-06-04 10:02:29 +02004926#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004927 blkcg_policy_unregister(&blkcg_policy_cfq);
Tejun Heoffea73f2012-06-04 10:02:29 +02004928#endif
Tejun Heo8bd435b2012-04-13 13:11:28 -07004929 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004930}
4931
4932static void __exit cfq_exit(void)
4933{
Tejun Heoffea73f2012-06-04 10:02:29 +02004934#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004935 blkcg_policy_unregister(&blkcg_policy_cfq);
Tejun Heoffea73f2012-06-04 10:02:29 +02004936#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004937 elv_unregister(&iosched_cfq);
Tejun Heo3d3c2372011-12-14 00:33:42 +01004938 kmem_cache_destroy(cfq_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004939}
4940
4941module_init(cfq_init);
4942module_exit(cfq_exit);
4943
4944MODULE_AUTHOR("Jens Axboe");
4945MODULE_LICENSE("GPL");
4946MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");