blob: 4d5cec1ad80d3e64314f43a84577a79d9e5d2506 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
Jens Axboe0fe23472006-09-04 15:41:16 +02007 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Al Viro1cc9be62006-03-18 12:29:52 -050011#include <linux/blkdev.h>
12#include <linux/elevator.h>
Randy Dunlapad5ebd22009-11-11 13:47:45 +010013#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/rbtree.h>
Jens Axboe22e2c502005-06-27 10:55:12 +020015#include <linux/ioprio.h>
Jens Axboe7b679132008-05-30 12:23:07 +020016#include <linux/blktrace_api.h>
Tejun Heo6e736be2011-12-14 00:33:38 +010017#include "blk.h"
Tejun Heo629ed0b2012-04-01 14:38:44 -070018#include "blk-cgroup.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20/*
21 * tunables
22 */
Jens Axboefe094d92008-01-31 13:08:54 +010023/* max queue in one round of service */
Shaohua Liabc3c742010-03-01 09:20:54 +010024static const int cfq_quantum = 8;
Arjan van de Ven64100092006-01-06 09:46:02 +010025static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
Jens Axboefe094d92008-01-31 13:08:54 +010026/* maximum backwards seek, in KiB */
27static const int cfq_back_max = 16 * 1024;
28/* penalty of a backwards seek */
29static const int cfq_back_penalty = 2;
Arjan van de Ven64100092006-01-06 09:46:02 +010030static const int cfq_slice_sync = HZ / 10;
Jens Axboe3b181522005-06-27 10:56:24 +020031static int cfq_slice_async = HZ / 25;
Arjan van de Ven64100092006-01-06 09:46:02 +010032static const int cfq_slice_async_rq = 2;
Jens Axboecaaa5f92006-06-16 11:23:00 +020033static int cfq_slice_idle = HZ / 125;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +020034static int cfq_group_idle = HZ / 125;
Corrado Zoccolo5db5d642009-10-26 22:44:04 +010035static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
36static const int cfq_hist_divisor = 4;
Jens Axboe22e2c502005-06-27 10:55:12 +020037
Jens Axboed9e76202007-04-20 14:27:50 +020038/*
Jens Axboe08717142008-01-28 11:38:15 +010039 * offset from end of service tree
Jens Axboed9e76202007-04-20 14:27:50 +020040 */
Jens Axboe08717142008-01-28 11:38:15 +010041#define CFQ_IDLE_DELAY (HZ / 5)
Jens Axboed9e76202007-04-20 14:27:50 +020042
43/*
44 * below this threshold, we consider thinktime immediate
45 */
46#define CFQ_MIN_TT (2)
47
Jens Axboe22e2c502005-06-27 10:55:12 +020048#define CFQ_SLICE_SCALE (5)
Aaron Carroll45333d52008-08-26 15:52:36 +020049#define CFQ_HW_QUEUE_MIN (5)
Vivek Goyal25bc6b02009-12-03 12:59:43 -050050#define CFQ_SERVICE_SHIFT 12
Jens Axboe22e2c502005-06-27 10:55:12 +020051
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +010052#define CFQQ_SEEK_THR (sector_t)(8 * 100)
Shaohua Lie9ce3352010-03-19 08:03:04 +010053#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
Corrado Zoccolo41647e72010-02-27 19:45:40 +010054#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +010055#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
Shaohua Liae54abe2010-02-05 13:11:45 +010056
Tejun Heoa612fdd2011-12-14 00:33:41 +010057#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
58#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
59#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Christoph Lametere18b8902006-12-06 20:33:20 -080061static struct kmem_cache *cfq_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Jens Axboe22e2c502005-06-27 10:55:12 +020063#define CFQ_PRIO_LISTS IOPRIO_BE_NR
64#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
Jens Axboe22e2c502005-06-27 10:55:12 +020065#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66
Jens Axboe206dc692006-03-28 13:03:44 +020067#define sample_valid(samples) ((samples) > 80)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -050068#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
Jens Axboe206dc692006-03-28 13:03:44 +020069
Tejun Heoc5869802011-12-14 00:33:41 +010070struct cfq_ttime {
71 unsigned long last_end_request;
72
73 unsigned long ttime_total;
74 unsigned long ttime_samples;
75 unsigned long ttime_mean;
76};
77
Jens Axboe22e2c502005-06-27 10:55:12 +020078/*
Jens Axboecc09e292007-04-26 12:53:50 +020079 * Most of our rbtree usage is for sorting with min extraction, so
80 * if we cache the leftmost node we don't have to walk down the tree
81 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
82 * move this into the elevator for the rq sorting as well.
83 */
84struct cfq_rb_root {
85 struct rb_root rb;
86 struct rb_node *left;
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +010087 unsigned count;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -050088 u64 min_vdisktime;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +020089 struct cfq_ttime ttime;
Jens Axboecc09e292007-04-26 12:53:50 +020090};
Shaohua Lif5f2b6c2011-07-12 14:24:55 +020091#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
92 .ttime = {.last_end_request = jiffies,},}
Jens Axboecc09e292007-04-26 12:53:50 +020093
94/*
Jens Axboe6118b702009-06-30 09:34:12 +020095 * Per process-grouping structure
96 */
97struct cfq_queue {
98 /* reference count */
Shaohua Li30d7b942011-01-07 08:46:59 +010099 int ref;
Jens Axboe6118b702009-06-30 09:34:12 +0200100 /* various state flags, see below */
101 unsigned int flags;
102 /* parent cfq_data */
103 struct cfq_data *cfqd;
104 /* service_tree member */
105 struct rb_node rb_node;
106 /* service_tree key */
107 unsigned long rb_key;
108 /* prio tree member */
109 struct rb_node p_node;
110 /* prio tree root we belong to, if any */
111 struct rb_root *p_root;
112 /* sorted list of pending requests */
113 struct rb_root sort_list;
114 /* if fifo isn't expired, next request to serve */
115 struct request *next_rq;
116 /* requests queued in sort_list */
117 int queued[2];
118 /* currently allocated requests */
119 int allocated[2];
120 /* fifo list of requests in sort_list */
121 struct list_head fifo;
122
Vivek Goyaldae739e2009-12-03 12:59:45 -0500123 /* time when queue got scheduled in to dispatch first request. */
124 unsigned long dispatch_start;
Vivek Goyalf75edf22009-12-03 12:59:53 -0500125 unsigned int allocated_slice;
Richard Kennedyc4081ba2010-02-22 13:49:24 +0100126 unsigned int slice_dispatch;
Vivek Goyaldae739e2009-12-03 12:59:45 -0500127 /* time when first request from queue completed and slice started. */
128 unsigned long slice_start;
Jens Axboe6118b702009-06-30 09:34:12 +0200129 unsigned long slice_end;
130 long slice_resid;
Jens Axboe6118b702009-06-30 09:34:12 +0200131
Christoph Hellwig65299a32011-08-23 14:50:29 +0200132 /* pending priority requests */
133 int prio_pending;
Jens Axboe6118b702009-06-30 09:34:12 +0200134 /* number of requests that are on the dispatch list or inside driver */
135 int dispatched;
136
137 /* io prio of this group */
138 unsigned short ioprio, org_ioprio;
Justin TerAvest4aede842011-07-12 08:31:45 +0200139 unsigned short ioprio_class;
Jens Axboe6118b702009-06-30 09:34:12 +0200140
Richard Kennedyc4081ba2010-02-22 13:49:24 +0100141 pid_t pid;
142
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +0100143 u32 seek_history;
Jeff Moyerb2c18e12009-10-23 17:14:49 -0400144 sector_t last_request_pos;
145
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +0100146 struct cfq_rb_root *service_tree;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -0400147 struct cfq_queue *new_cfqq;
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500148 struct cfq_group *cfqg;
Vivek Goyalc4e78932010-08-23 12:25:03 +0200149 /* Number of sectors dispatched from queue in single dispatch round */
150 unsigned long nr_sectors;
Jens Axboe6118b702009-06-30 09:34:12 +0200151};
152
153/*
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100154 * First index in the service_trees.
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100155 * IDLE is handled separately, so it has negative index
156 */
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400157enum wl_class_t {
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100158 BE_WORKLOAD = 0,
Vivek Goyal615f0252009-12-03 12:59:39 -0500159 RT_WORKLOAD = 1,
160 IDLE_WORKLOAD = 2,
Vivek Goyalb4627322010-10-22 09:48:43 +0200161 CFQ_PRIO_NR,
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100162};
163
164/*
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100165 * Second index in the service_trees.
166 */
167enum wl_type_t {
168 ASYNC_WORKLOAD = 0,
169 SYNC_NOIDLE_WORKLOAD = 1,
170 SYNC_WORKLOAD = 2
171};
172
Tejun Heo155fead2012-04-01 14:38:44 -0700173struct cfqg_stats {
174#ifdef CONFIG_CFQ_GROUP_IOSCHED
175 /* total bytes transferred */
176 struct blkg_rwstat service_bytes;
177 /* total IOs serviced, post merge */
178 struct blkg_rwstat serviced;
179 /* number of ios merged */
180 struct blkg_rwstat merged;
181 /* total time spent on device in ns, may not be accurate w/ queueing */
182 struct blkg_rwstat service_time;
183 /* total time spent waiting in scheduler queue in ns */
184 struct blkg_rwstat wait_time;
185 /* number of IOs queued up */
186 struct blkg_rwstat queued;
187 /* total sectors transferred */
188 struct blkg_stat sectors;
189 /* total disk time and nr sectors dispatched by this group */
190 struct blkg_stat time;
191#ifdef CONFIG_DEBUG_BLK_CGROUP
192 /* time not charged to this cgroup */
193 struct blkg_stat unaccounted_time;
194 /* sum of number of ios queued across all samples */
195 struct blkg_stat avg_queue_size_sum;
196 /* count of samples taken for average */
197 struct blkg_stat avg_queue_size_samples;
198 /* how many times this group has been removed from service tree */
199 struct blkg_stat dequeue;
200 /* total time spent waiting for it to be assigned a timeslice. */
201 struct blkg_stat group_wait_time;
Tejun Heo3c798392012-04-16 13:57:25 -0700202 /* time spent idling for this blkcg_gq */
Tejun Heo155fead2012-04-01 14:38:44 -0700203 struct blkg_stat idle_time;
204 /* total time with empty current active q with other requests queued */
205 struct blkg_stat empty_time;
206 /* fields after this shouldn't be cleared on stat reset */
207 uint64_t start_group_wait_time;
208 uint64_t start_idle_time;
209 uint64_t start_empty_time;
210 uint16_t flags;
211#endif /* CONFIG_DEBUG_BLK_CGROUP */
212#endif /* CONFIG_CFQ_GROUP_IOSCHED */
213};
214
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500215/* This is per cgroup per device grouping structure */
216struct cfq_group {
Tejun Heof95a04a2012-04-16 13:57:26 -0700217 /* must be the first member */
218 struct blkg_policy_data pd;
219
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500220 /* group service_tree member */
221 struct rb_node rb_node;
222
223 /* group service_tree key */
224 u64 vdisktime;
Tejun Heoe71357e2013-01-09 08:05:10 -0800225
226 /*
Tejun Heo7918ffb2013-01-09 08:05:11 -0800227 * The number of active cfqgs and sum of their weights under this
228 * cfqg. This covers this cfqg's leaf_weight and all children's
229 * weights, but does not cover weights of further descendants.
230 *
231 * If a cfqg is on the service tree, it's active. An active cfqg
232 * also activates its parent and contributes to the children_weight
233 * of the parent.
234 */
235 int nr_active;
236 unsigned int children_weight;
237
238 /*
Tejun Heo1d3650f2013-01-09 08:05:11 -0800239 * vfraction is the fraction of vdisktime that the tasks in this
240 * cfqg are entitled to. This is determined by compounding the
241 * ratios walking up from this cfqg to the root.
242 *
243 * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
244 * vfractions on a service tree is approximately 1. The sum may
245 * deviate a bit due to rounding errors and fluctuations caused by
246 * cfqgs entering and leaving the service tree.
247 */
248 unsigned int vfraction;
249
250 /*
Tejun Heoe71357e2013-01-09 08:05:10 -0800251 * There are two weights - (internal) weight is the weight of this
252 * cfqg against the sibling cfqgs. leaf_weight is the wight of
253 * this cfqg against the child cfqgs. For the root cfqg, both
254 * weights are kept in sync for backward compatibility.
255 */
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500256 unsigned int weight;
Justin TerAvest8184f932011-03-17 16:12:36 +0100257 unsigned int new_weight;
Tejun Heo3381cb82012-04-01 14:38:44 -0700258 unsigned int dev_weight;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500259
Tejun Heoe71357e2013-01-09 08:05:10 -0800260 unsigned int leaf_weight;
261 unsigned int new_leaf_weight;
262 unsigned int dev_leaf_weight;
263
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500264 /* number of cfqq currently on this group */
265 int nr_cfqq;
266
Jens Axboe22e2c502005-06-27 10:55:12 +0200267 /*
Kyungmin Park4495a7d2011-05-31 10:04:09 +0200268 * Per group busy queues average. Useful for workload slice calc. We
Vivek Goyalb4627322010-10-22 09:48:43 +0200269 * create the array for each prio class but at run time it is used
270 * only for RT and BE class and slot for IDLE class remains unused.
271 * This is primarily done to avoid confusion and a gcc warning.
272 */
273 unsigned int busy_queues_avg[CFQ_PRIO_NR];
274 /*
275 * rr lists of queues with requests. We maintain service trees for
276 * RT and BE classes. These trees are subdivided in subclasses
277 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
278 * class there is no subclassification and all the cfq queues go on
279 * a single tree service_tree_idle.
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100280 * Counts are embedded in the cfq_rb_root
Jens Axboe22e2c502005-06-27 10:55:12 +0200281 */
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100282 struct cfq_rb_root service_trees[2][3];
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100283 struct cfq_rb_root service_tree_idle;
Vivek Goyaldae739e2009-12-03 12:59:45 -0500284
Vivek Goyal4d2ceea2012-10-03 16:56:57 -0400285 unsigned long saved_wl_slice;
286 enum wl_type_t saved_wl_type;
287 enum wl_class_t saved_wl_class;
Tejun Heo4eef3042012-03-05 13:15:18 -0800288
Vivek Goyal80bdf0c2010-08-23 12:24:26 +0200289 /* number of requests that are on the dispatch list or inside driver */
290 int dispatched;
Shaohua Li7700fc42011-07-12 14:24:56 +0200291 struct cfq_ttime ttime;
Tejun Heo0b399202013-01-09 08:05:13 -0800292 struct cfqg_stats stats; /* stats for this cfqg */
293 struct cfqg_stats dead_stats; /* stats pushed from dead children */
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500294};
295
Tejun Heoc5869802011-12-14 00:33:41 +0100296struct cfq_io_cq {
297 struct io_cq icq; /* must be the first member */
298 struct cfq_queue *cfqq[2];
299 struct cfq_ttime ttime;
Tejun Heo598971b2012-03-19 15:10:58 -0700300 int ioprio; /* the current ioprio */
301#ifdef CONFIG_CFQ_GROUP_IOSCHED
302 uint64_t blkcg_id; /* the current blkcg ID */
303#endif
Tejun Heoc5869802011-12-14 00:33:41 +0100304};
305
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500306/*
307 * Per block device queue structure
308 */
309struct cfq_data {
310 struct request_queue *queue;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500311 /* Root service tree for cfq_groups */
312 struct cfq_rb_root grp_service_tree;
Tejun Heof51b8022012-03-05 13:15:05 -0800313 struct cfq_group *root_group;
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500314
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100315 /*
316 * The priority currently being served
317 */
Vivek Goyal4d2ceea2012-10-03 16:56:57 -0400318 enum wl_class_t serving_wl_class;
319 enum wl_type_t serving_wl_type;
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100320 unsigned long workload_expires;
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500321 struct cfq_group *serving_group;
Jens Axboea36e71f2009-04-15 12:15:11 +0200322
323 /*
324 * Each priority tree is sorted by next_request position. These
325 * trees are used when determining if two or more queues are
326 * interleaving requests (see cfq_close_cooperator).
327 */
328 struct rb_root prio_trees[CFQ_PRIO_LISTS];
329
Jens Axboe22e2c502005-06-27 10:55:12 +0200330 unsigned int busy_queues;
Shaohua Lief8a41d2011-03-07 09:26:29 +0100331 unsigned int busy_sync_queues;
Jens Axboe22e2c502005-06-27 10:55:12 +0200332
Corrado Zoccolo53c583d2010-02-28 19:45:05 +0100333 int rq_in_driver;
334 int rq_in_flight[2];
Aaron Carroll45333d52008-08-26 15:52:36 +0200335
336 /*
337 * queue-depth detection
338 */
339 int rq_queued;
Jens Axboe25776e32006-06-01 10:12:26 +0200340 int hw_tag;
Corrado Zoccoloe459dd02009-11-26 10:02:57 +0100341 /*
342 * hw_tag can be
343 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
344 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
345 * 0 => no NCQ
346 */
347 int hw_tag_est_depth;
348 unsigned int hw_tag_samples;
Jens Axboe22e2c502005-06-27 10:55:12 +0200349
350 /*
Jens Axboe22e2c502005-06-27 10:55:12 +0200351 * idle window management
352 */
353 struct timer_list idle_slice_timer;
Jens Axboe23e018a2009-10-05 08:52:35 +0200354 struct work_struct unplug_work;
Jens Axboe22e2c502005-06-27 10:55:12 +0200355
356 struct cfq_queue *active_queue;
Tejun Heoc5869802011-12-14 00:33:41 +0100357 struct cfq_io_cq *active_cic;
Jens Axboe22e2c502005-06-27 10:55:12 +0200358
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +0200359 /*
360 * async queue for each priority case
361 */
362 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
363 struct cfq_queue *async_idle_cfqq;
Jens Axboe15c31be2007-07-10 13:43:25 +0200364
Jens Axboe6d048f52007-04-25 12:44:27 +0200365 sector_t last_position;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 /*
368 * tunables, see top of file
369 */
370 unsigned int cfq_quantum;
Jens Axboe22e2c502005-06-27 10:55:12 +0200371 unsigned int cfq_fifo_expire[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 unsigned int cfq_back_penalty;
373 unsigned int cfq_back_max;
Jens Axboe22e2c502005-06-27 10:55:12 +0200374 unsigned int cfq_slice[2];
375 unsigned int cfq_slice_async_rq;
376 unsigned int cfq_slice_idle;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +0200377 unsigned int cfq_group_idle;
Jens Axboe963b72f2009-10-03 19:42:18 +0200378 unsigned int cfq_latency;
Tao Ma5bf14c02012-04-01 14:33:39 -0700379 unsigned int cfq_target_latency;
Al Virod9ff4182006-03-18 13:51:22 -0500380
Jens Axboe6118b702009-06-30 09:34:12 +0200381 /*
382 * Fallback dummy cfqq for extreme OOM conditions
383 */
384 struct cfq_queue oom_cfqq;
Vivek Goyal365722b2009-10-03 15:21:27 +0200385
Corrado Zoccolo573412b2009-12-06 11:48:52 +0100386 unsigned long last_delayed_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387};
388
Vivek Goyal25fb5162009-12-03 12:59:46 -0500389static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
390
Vivek Goyal34b98d02012-10-03 16:56:58 -0400391static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400392 enum wl_class_t class,
Vivek Goyal65b32a52009-12-16 17:52:59 -0500393 enum wl_type_t type)
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100394{
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500395 if (!cfqg)
396 return NULL;
397
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400398 if (class == IDLE_WORKLOAD)
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500399 return &cfqg->service_tree_idle;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100400
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400401 return &cfqg->service_trees[class][type];
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100402}
403
Jens Axboe3b181522005-06-27 10:56:24 +0200404enum cfqq_state_flags {
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100405 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
406 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
Jens Axboeb0291952009-04-07 11:38:31 +0200407 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100408 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100409 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
410 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
411 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
Jens Axboe44f7c162007-01-19 11:51:58 +1100412 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
Vasily Tarasov91fac312007-04-25 12:29:51 +0200413 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
Jeff Moyerb3b6d042009-10-23 17:14:51 -0400414 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
Shaohua Liae54abe2010-02-05 13:11:45 +0100415 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
Corrado Zoccolo76280af2009-11-26 10:02:58 +0100416 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
Vivek Goyalf75edf22009-12-03 12:59:53 -0500417 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
Jens Axboe3b181522005-06-27 10:56:24 +0200418};
419
420#define CFQ_CFQQ_FNS(name) \
421static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
422{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100423 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
Jens Axboe3b181522005-06-27 10:56:24 +0200424} \
425static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
426{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100427 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
Jens Axboe3b181522005-06-27 10:56:24 +0200428} \
429static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
430{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100431 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
Jens Axboe3b181522005-06-27 10:56:24 +0200432}
433
434CFQ_CFQQ_FNS(on_rr);
435CFQ_CFQQ_FNS(wait_request);
Jens Axboeb0291952009-04-07 11:38:31 +0200436CFQ_CFQQ_FNS(must_dispatch);
Jens Axboe3b181522005-06-27 10:56:24 +0200437CFQ_CFQQ_FNS(must_alloc_slice);
Jens Axboe3b181522005-06-27 10:56:24 +0200438CFQ_CFQQ_FNS(fifo_expire);
439CFQ_CFQQ_FNS(idle_window);
440CFQ_CFQQ_FNS(prio_changed);
Jens Axboe44f7c162007-01-19 11:51:58 +1100441CFQ_CFQQ_FNS(slice_new);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200442CFQ_CFQQ_FNS(sync);
Jens Axboea36e71f2009-04-15 12:15:11 +0200443CFQ_CFQQ_FNS(coop);
Shaohua Liae54abe2010-02-05 13:11:45 +0100444CFQ_CFQQ_FNS(split_coop);
Corrado Zoccolo76280af2009-11-26 10:02:58 +0100445CFQ_CFQQ_FNS(deep);
Vivek Goyalf75edf22009-12-03 12:59:53 -0500446CFQ_CFQQ_FNS(wait_busy);
Jens Axboe3b181522005-06-27 10:56:24 +0200447#undef CFQ_CFQQ_FNS
448
Tejun Heof95a04a2012-04-16 13:57:26 -0700449static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
450{
451 return pd ? container_of(pd, struct cfq_group, pd) : NULL;
452}
453
Tejun Heof95a04a2012-04-16 13:57:26 -0700454static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
455{
456 return pd_to_blkg(&cfqg->pd);
457}
458
Tejun Heo629ed0b2012-04-01 14:38:44 -0700459#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700460
Tejun Heo155fead2012-04-01 14:38:44 -0700461/* cfqg stats flags */
462enum cfqg_stats_flags {
463 CFQG_stats_waiting = 0,
464 CFQG_stats_idling,
465 CFQG_stats_empty,
Tejun Heo629ed0b2012-04-01 14:38:44 -0700466};
467
Tejun Heo155fead2012-04-01 14:38:44 -0700468#define CFQG_FLAG_FNS(name) \
469static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700470{ \
Tejun Heo155fead2012-04-01 14:38:44 -0700471 stats->flags |= (1 << CFQG_stats_##name); \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700472} \
Tejun Heo155fead2012-04-01 14:38:44 -0700473static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700474{ \
Tejun Heo155fead2012-04-01 14:38:44 -0700475 stats->flags &= ~(1 << CFQG_stats_##name); \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700476} \
Tejun Heo155fead2012-04-01 14:38:44 -0700477static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700478{ \
Tejun Heo155fead2012-04-01 14:38:44 -0700479 return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700480} \
481
Tejun Heo155fead2012-04-01 14:38:44 -0700482CFQG_FLAG_FNS(waiting)
483CFQG_FLAG_FNS(idling)
484CFQG_FLAG_FNS(empty)
485#undef CFQG_FLAG_FNS
Tejun Heo629ed0b2012-04-01 14:38:44 -0700486
487/* This should be called with the queue_lock held. */
Tejun Heo155fead2012-04-01 14:38:44 -0700488static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700489{
490 unsigned long long now;
491
Tejun Heo155fead2012-04-01 14:38:44 -0700492 if (!cfqg_stats_waiting(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700493 return;
494
495 now = sched_clock();
496 if (time_after64(now, stats->start_group_wait_time))
497 blkg_stat_add(&stats->group_wait_time,
498 now - stats->start_group_wait_time);
Tejun Heo155fead2012-04-01 14:38:44 -0700499 cfqg_stats_clear_waiting(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700500}
501
502/* This should be called with the queue_lock held. */
Tejun Heo155fead2012-04-01 14:38:44 -0700503static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
504 struct cfq_group *curr_cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700505{
Tejun Heo155fead2012-04-01 14:38:44 -0700506 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700507
Tejun Heo155fead2012-04-01 14:38:44 -0700508 if (cfqg_stats_waiting(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700509 return;
Tejun Heo155fead2012-04-01 14:38:44 -0700510 if (cfqg == curr_cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700511 return;
Tejun Heo155fead2012-04-01 14:38:44 -0700512 stats->start_group_wait_time = sched_clock();
513 cfqg_stats_mark_waiting(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700514}
515
516/* This should be called with the queue_lock held. */
Tejun Heo155fead2012-04-01 14:38:44 -0700517static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700518{
519 unsigned long long now;
520
Tejun Heo155fead2012-04-01 14:38:44 -0700521 if (!cfqg_stats_empty(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700522 return;
523
524 now = sched_clock();
525 if (time_after64(now, stats->start_empty_time))
526 blkg_stat_add(&stats->empty_time,
527 now - stats->start_empty_time);
Tejun Heo155fead2012-04-01 14:38:44 -0700528 cfqg_stats_clear_empty(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700529}
530
Tejun Heo155fead2012-04-01 14:38:44 -0700531static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700532{
Tejun Heo155fead2012-04-01 14:38:44 -0700533 blkg_stat_add(&cfqg->stats.dequeue, 1);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700534}
535
Tejun Heo155fead2012-04-01 14:38:44 -0700536static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700537{
Tejun Heo155fead2012-04-01 14:38:44 -0700538 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700539
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800540 if (blkg_rwstat_total(&stats->queued))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700541 return;
542
543 /*
544 * group is already marked empty. This can happen if cfqq got new
545 * request in parent group and moved to this group while being added
546 * to service tree. Just ignore the event and move on.
547 */
Tejun Heo155fead2012-04-01 14:38:44 -0700548 if (cfqg_stats_empty(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700549 return;
550
551 stats->start_empty_time = sched_clock();
Tejun Heo155fead2012-04-01 14:38:44 -0700552 cfqg_stats_mark_empty(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700553}
554
Tejun Heo155fead2012-04-01 14:38:44 -0700555static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700556{
Tejun Heo155fead2012-04-01 14:38:44 -0700557 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700558
Tejun Heo155fead2012-04-01 14:38:44 -0700559 if (cfqg_stats_idling(stats)) {
Tejun Heo629ed0b2012-04-01 14:38:44 -0700560 unsigned long long now = sched_clock();
561
562 if (time_after64(now, stats->start_idle_time))
563 blkg_stat_add(&stats->idle_time,
564 now - stats->start_idle_time);
Tejun Heo155fead2012-04-01 14:38:44 -0700565 cfqg_stats_clear_idling(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700566 }
567}
568
Tejun Heo155fead2012-04-01 14:38:44 -0700569static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700570{
Tejun Heo155fead2012-04-01 14:38:44 -0700571 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700572
Tejun Heo155fead2012-04-01 14:38:44 -0700573 BUG_ON(cfqg_stats_idling(stats));
Tejun Heo629ed0b2012-04-01 14:38:44 -0700574
575 stats->start_idle_time = sched_clock();
Tejun Heo155fead2012-04-01 14:38:44 -0700576 cfqg_stats_mark_idling(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700577}
578
Tejun Heo155fead2012-04-01 14:38:44 -0700579static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700580{
Tejun Heo155fead2012-04-01 14:38:44 -0700581 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700582
583 blkg_stat_add(&stats->avg_queue_size_sum,
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800584 blkg_rwstat_total(&stats->queued));
Tejun Heo629ed0b2012-04-01 14:38:44 -0700585 blkg_stat_add(&stats->avg_queue_size_samples, 1);
Tejun Heo155fead2012-04-01 14:38:44 -0700586 cfqg_stats_update_group_wait_time(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700587}
588
589#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
590
Tejun Heof48ec1d2012-04-13 13:11:25 -0700591static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
592static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
593static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
594static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
595static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
596static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
597static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
Tejun Heo629ed0b2012-04-01 14:38:44 -0700598
599#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
600
601#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo2ce4d502012-04-01 14:38:43 -0700602
Tejun Heoffea73f2012-06-04 10:02:29 +0200603static struct blkcg_policy blkcg_policy_cfq;
604
605static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
606{
607 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
608}
609
Tejun Heod02f7aa2013-01-09 08:05:11 -0800610static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
Tejun Heo7918ffb2013-01-09 08:05:11 -0800611{
Tejun Heod02f7aa2013-01-09 08:05:11 -0800612 struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
Tejun Heo7918ffb2013-01-09 08:05:11 -0800613
Tejun Heod02f7aa2013-01-09 08:05:11 -0800614 return pblkg ? blkg_to_cfqg(pblkg) : NULL;
Tejun Heo7918ffb2013-01-09 08:05:11 -0800615}
616
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100617static inline void cfqg_get(struct cfq_group *cfqg)
618{
619 return blkg_get(cfqg_to_blkg(cfqg));
620}
621
622static inline void cfqg_put(struct cfq_group *cfqg)
623{
624 return blkg_put(cfqg_to_blkg(cfqg));
625}
626
Tejun Heo54e7ed12012-04-16 13:57:23 -0700627#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
628 char __pbuf[128]; \
629 \
630 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
Vivek Goyalb226e5c2012-10-03 16:57:01 -0400631 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
632 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
633 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
Tejun Heo54e7ed12012-04-16 13:57:23 -0700634 __pbuf, ##args); \
635} while (0)
Vivek Goyal2868ef72009-12-03 12:59:48 -0500636
Tejun Heo54e7ed12012-04-16 13:57:23 -0700637#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
638 char __pbuf[128]; \
639 \
640 blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
641 blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
642} while (0)
Vivek Goyal2868ef72009-12-03 12:59:48 -0500643
Tejun Heo155fead2012-04-01 14:38:44 -0700644static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
645 struct cfq_group *curr_cfqg, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700646{
Tejun Heo155fead2012-04-01 14:38:44 -0700647 blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
648 cfqg_stats_end_empty_time(&cfqg->stats);
649 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700650}
651
Tejun Heo155fead2012-04-01 14:38:44 -0700652static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
653 unsigned long time, unsigned long unaccounted_time)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700654{
Tejun Heo155fead2012-04-01 14:38:44 -0700655 blkg_stat_add(&cfqg->stats.time, time);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700656#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heo155fead2012-04-01 14:38:44 -0700657 blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700658#endif
Tejun Heo2ce4d502012-04-01 14:38:43 -0700659}
660
Tejun Heo155fead2012-04-01 14:38:44 -0700661static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700662{
Tejun Heo155fead2012-04-01 14:38:44 -0700663 blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700664}
665
Tejun Heo155fead2012-04-01 14:38:44 -0700666static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700667{
Tejun Heo155fead2012-04-01 14:38:44 -0700668 blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700669}
670
Tejun Heo155fead2012-04-01 14:38:44 -0700671static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
672 uint64_t bytes, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700673{
Tejun Heo155fead2012-04-01 14:38:44 -0700674 blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
675 blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
676 blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700677}
678
Tejun Heo155fead2012-04-01 14:38:44 -0700679static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
680 uint64_t start_time, uint64_t io_start_time, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700681{
Tejun Heo155fead2012-04-01 14:38:44 -0700682 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700683 unsigned long long now = sched_clock();
Tejun Heo629ed0b2012-04-01 14:38:44 -0700684
685 if (time_after64(now, io_start_time))
686 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
687 if (time_after64(io_start_time, start_time))
688 blkg_rwstat_add(&stats->wait_time, rw,
689 io_start_time - start_time);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700690}
691
Tejun Heo689665a2013-01-09 08:05:13 -0800692/* @stats = 0 */
693static void cfqg_stats_reset(struct cfqg_stats *stats)
Tejun Heo155fead2012-04-01 14:38:44 -0700694{
Tejun Heo155fead2012-04-01 14:38:44 -0700695 /* queued stats shouldn't be cleared */
696 blkg_rwstat_reset(&stats->service_bytes);
697 blkg_rwstat_reset(&stats->serviced);
698 blkg_rwstat_reset(&stats->merged);
699 blkg_rwstat_reset(&stats->service_time);
700 blkg_rwstat_reset(&stats->wait_time);
701 blkg_stat_reset(&stats->time);
702#ifdef CONFIG_DEBUG_BLK_CGROUP
703 blkg_stat_reset(&stats->unaccounted_time);
704 blkg_stat_reset(&stats->avg_queue_size_sum);
705 blkg_stat_reset(&stats->avg_queue_size_samples);
706 blkg_stat_reset(&stats->dequeue);
707 blkg_stat_reset(&stats->group_wait_time);
708 blkg_stat_reset(&stats->idle_time);
709 blkg_stat_reset(&stats->empty_time);
710#endif
711}
712
Tejun Heo0b399202013-01-09 08:05:13 -0800713/* @to += @from */
714static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from)
715{
716 /* queued stats shouldn't be cleared */
717 blkg_rwstat_merge(&to->service_bytes, &from->service_bytes);
718 blkg_rwstat_merge(&to->serviced, &from->serviced);
719 blkg_rwstat_merge(&to->merged, &from->merged);
720 blkg_rwstat_merge(&to->service_time, &from->service_time);
721 blkg_rwstat_merge(&to->wait_time, &from->wait_time);
722 blkg_stat_merge(&from->time, &from->time);
723#ifdef CONFIG_DEBUG_BLK_CGROUP
724 blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time);
725 blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
726 blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
727 blkg_stat_merge(&to->dequeue, &from->dequeue);
728 blkg_stat_merge(&to->group_wait_time, &from->group_wait_time);
729 blkg_stat_merge(&to->idle_time, &from->idle_time);
730 blkg_stat_merge(&to->empty_time, &from->empty_time);
731#endif
732}
733
734/*
735 * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors'
736 * recursive stats can still account for the amount used by this cfqg after
737 * it's gone.
738 */
739static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
740{
741 struct cfq_group *parent = cfqg_parent(cfqg);
742
743 lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
744
745 if (unlikely(!parent))
746 return;
747
748 cfqg_stats_merge(&parent->dead_stats, &cfqg->stats);
749 cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
750 cfqg_stats_reset(&cfqg->stats);
751 cfqg_stats_reset(&cfqg->dead_stats);
752}
753
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100754#else /* CONFIG_CFQ_GROUP_IOSCHED */
755
Tejun Heod02f7aa2013-01-09 08:05:11 -0800756static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100757static inline void cfqg_get(struct cfq_group *cfqg) { }
758static inline void cfqg_put(struct cfq_group *cfqg) { }
759
Jens Axboe7b679132008-05-30 12:23:07 +0200760#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
Vivek Goyalb226e5c2012-10-03 16:57:01 -0400761 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
762 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
763 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
764 ##args)
Kyungmin Park4495a7d2011-05-31 10:04:09 +0200765#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100766
Tejun Heo155fead2012-04-01 14:38:44 -0700767static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
768 struct cfq_group *curr_cfqg, int rw) { }
769static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
770 unsigned long time, unsigned long unaccounted_time) { }
771static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
772static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
773static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
774 uint64_t bytes, int rw) { }
775static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
776 uint64_t start_time, uint64_t io_start_time, int rw) { }
Tejun Heo2ce4d502012-04-01 14:38:43 -0700777
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100778#endif /* CONFIG_CFQ_GROUP_IOSCHED */
779
Jens Axboe7b679132008-05-30 12:23:07 +0200780#define cfq_log(cfqd, fmt, args...) \
781 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
782
Vivek Goyal615f0252009-12-03 12:59:39 -0500783/* Traverses through cfq group service trees */
784#define for_each_cfqg_st(cfqg, i, j, st) \
785 for (i = 0; i <= IDLE_WORKLOAD; i++) \
786 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
787 : &cfqg->service_tree_idle; \
788 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
789 (i == IDLE_WORKLOAD && j == 0); \
790 j++, st = i < IDLE_WORKLOAD ? \
791 &cfqg->service_trees[i][j]: NULL) \
792
Shaohua Lif5f2b6c2011-07-12 14:24:55 +0200793static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
794 struct cfq_ttime *ttime, bool group_idle)
795{
796 unsigned long slice;
797 if (!sample_valid(ttime->ttime_samples))
798 return false;
799 if (group_idle)
800 slice = cfqd->cfq_group_idle;
801 else
802 slice = cfqd->cfq_slice_idle;
803 return ttime->ttime_mean > slice;
804}
Vivek Goyal615f0252009-12-03 12:59:39 -0500805
Vivek Goyal02b35082010-08-23 12:23:53 +0200806static inline bool iops_mode(struct cfq_data *cfqd)
807{
808 /*
809 * If we are not idling on queues and it is a NCQ drive, parallel
810 * execution of requests is on and measuring time is not possible
811 * in most of the cases until and unless we drive shallower queue
812 * depths and that becomes a performance bottleneck. In such cases
813 * switch to start providing fairness in terms of number of IOs.
814 */
815 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
816 return true;
817 else
818 return false;
819}
820
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400821static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100822{
823 if (cfq_class_idle(cfqq))
824 return IDLE_WORKLOAD;
825 if (cfq_class_rt(cfqq))
826 return RT_WORKLOAD;
827 return BE_WORKLOAD;
828}
829
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100830
831static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
832{
833 if (!cfq_cfqq_sync(cfqq))
834 return ASYNC_WORKLOAD;
835 if (!cfq_cfqq_idle_window(cfqq))
836 return SYNC_NOIDLE_WORKLOAD;
837 return SYNC_WORKLOAD;
838}
839
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400840static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
Vivek Goyal58ff82f2009-12-03 12:59:44 -0500841 struct cfq_data *cfqd,
842 struct cfq_group *cfqg)
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100843{
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400844 if (wl_class == IDLE_WORKLOAD)
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500845 return cfqg->service_tree_idle.count;
846
Vivek Goyal34b98d02012-10-03 16:56:58 -0400847 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
848 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
849 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100850}
851
Vivek Goyalf26bd1f2009-12-03 12:59:54 -0500852static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
853 struct cfq_group *cfqg)
854{
Vivek Goyal34b98d02012-10-03 16:56:58 -0400855 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
856 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
Vivek Goyalf26bd1f2009-12-03 12:59:54 -0500857}
858
Jens Axboe165125e2007-07-24 09:28:11 +0200859static void cfq_dispatch_insert(struct request_queue *, struct request *);
Tejun Heo4f85cb92012-03-05 13:15:28 -0800860static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
Tejun Heoabede6d2012-03-19 15:10:57 -0700861 struct cfq_io_cq *cic, struct bio *bio,
Tejun Heo4f85cb92012-03-05 13:15:28 -0800862 gfp_t gfp_mask);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200863
Tejun Heoc5869802011-12-14 00:33:41 +0100864static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
865{
866 /* cic->icq is the first member, %NULL will convert to %NULL */
867 return container_of(icq, struct cfq_io_cq, icq);
868}
869
Tejun Heo47fdd4c2011-12-14 00:33:42 +0100870static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
871 struct io_context *ioc)
872{
873 if (ioc)
874 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
875 return NULL;
876}
877
Tejun Heoc5869802011-12-14 00:33:41 +0100878static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
Vasily Tarasov91fac312007-04-25 12:29:51 +0200879{
Jens Axboea6151c32009-10-07 20:02:57 +0200880 return cic->cfqq[is_sync];
Vasily Tarasov91fac312007-04-25 12:29:51 +0200881}
882
Tejun Heoc5869802011-12-14 00:33:41 +0100883static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
884 bool is_sync)
Vasily Tarasov91fac312007-04-25 12:29:51 +0200885{
Jens Axboea6151c32009-10-07 20:02:57 +0200886 cic->cfqq[is_sync] = cfqq;
Vasily Tarasov91fac312007-04-25 12:29:51 +0200887}
888
Tejun Heoc5869802011-12-14 00:33:41 +0100889static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +0400890{
Tejun Heoc5869802011-12-14 00:33:41 +0100891 return cic->icq.q->elevator->elevator_data;
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +0400892}
893
Vasily Tarasov91fac312007-04-25 12:29:51 +0200894/*
895 * We regard a request as SYNC, if it's either a read or has the SYNC bit
896 * set (in which case it could also be direct WRITE).
897 */
Jens Axboea6151c32009-10-07 20:02:57 +0200898static inline bool cfq_bio_sync(struct bio *bio)
Vasily Tarasov91fac312007-04-25 12:29:51 +0200899{
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200900 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200901}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903/*
Andrew Morton99f95e52005-06-27 20:14:05 -0700904 * scheduler run of queue, if there are requests pending and no one in the
905 * driver that will restart queueing
906 */
Jens Axboe23e018a2009-10-05 08:52:35 +0200907static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
Andrew Morton99f95e52005-06-27 20:14:05 -0700908{
Jens Axboe7b679132008-05-30 12:23:07 +0200909 if (cfqd->busy_queues) {
910 cfq_log(cfqd, "schedule dispatch");
Jens Axboe23e018a2009-10-05 08:52:35 +0200911 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
Jens Axboe7b679132008-05-30 12:23:07 +0200912 }
Andrew Morton99f95e52005-06-27 20:14:05 -0700913}
914
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915/*
Jens Axboe44f7c162007-01-19 11:51:58 +1100916 * Scale schedule slice based on io priority. Use the sync time slice only
917 * if a queue is marked sync and has sync io queued. A sync queue with async
918 * io only, should not get full sync slice length.
919 */
Jens Axboea6151c32009-10-07 20:02:57 +0200920static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
Jens Axboed9e76202007-04-20 14:27:50 +0200921 unsigned short prio)
922{
923 const int base_slice = cfqd->cfq_slice[sync];
924
925 WARN_ON(prio >= IOPRIO_BE_NR);
926
927 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
928}
929
Jens Axboe44f7c162007-01-19 11:51:58 +1100930static inline int
931cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
932{
Jens Axboed9e76202007-04-20 14:27:50 +0200933 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
Jens Axboe44f7c162007-01-19 11:51:58 +1100934}
935
Tejun Heo1d3650f2013-01-09 08:05:11 -0800936/**
937 * cfqg_scale_charge - scale disk time charge according to cfqg weight
938 * @charge: disk time being charged
939 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
940 *
941 * Scale @charge according to @vfraction, which is in range (0, 1]. The
942 * scaling is inversely proportional.
943 *
944 * scaled = charge / vfraction
945 *
946 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
947 */
948static inline u64 cfqg_scale_charge(unsigned long charge,
949 unsigned int vfraction)
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500950{
Tejun Heo1d3650f2013-01-09 08:05:11 -0800951 u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500952
Tejun Heo1d3650f2013-01-09 08:05:11 -0800953 /* charge / vfraction */
954 c <<= CFQ_SERVICE_SHIFT;
955 do_div(c, vfraction);
956 return c;
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500957}
958
959static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
960{
961 s64 delta = (s64)(vdisktime - min_vdisktime);
962 if (delta > 0)
963 min_vdisktime = vdisktime;
964
965 return min_vdisktime;
966}
967
968static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
969{
970 s64 delta = (s64)(vdisktime - min_vdisktime);
971 if (delta < 0)
972 min_vdisktime = vdisktime;
973
974 return min_vdisktime;
975}
976
977static void update_min_vdisktime(struct cfq_rb_root *st)
978{
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500979 struct cfq_group *cfqg;
980
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500981 if (st->left) {
982 cfqg = rb_entry_cfqg(st->left);
Gui Jianfenga6032712011-03-07 09:28:09 +0100983 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
984 cfqg->vdisktime);
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500985 }
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500986}
987
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100988/*
989 * get averaged number of queues of RT/BE priority.
990 * average is updated, with a formula that gives more weight to higher numbers,
991 * to quickly follows sudden increases and decrease slowly
992 */
993
Vivek Goyal58ff82f2009-12-03 12:59:44 -0500994static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
995 struct cfq_group *cfqg, bool rt)
Jens Axboe5869619c2009-10-28 09:27:07 +0100996{
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100997 unsigned min_q, max_q;
998 unsigned mult = cfq_hist_divisor - 1;
999 unsigned round = cfq_hist_divisor / 2;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001000 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001001
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001002 min_q = min(cfqg->busy_queues_avg[rt], busy);
1003 max_q = max(cfqg->busy_queues_avg[rt], busy);
1004 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001005 cfq_hist_divisor;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001006 return cfqg->busy_queues_avg[rt];
1007}
1008
1009static inline unsigned
1010cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1011{
Tejun Heo41cad6a2013-01-09 08:05:11 -08001012 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001013}
1014
Shaohua Lic553f8e2011-01-14 08:41:03 +01001015static inline unsigned
Vivek Goyalba5bd522011-01-19 08:25:02 -07001016cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe44f7c162007-01-19 11:51:58 +11001017{
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001018 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
1019 if (cfqd->cfq_latency) {
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001020 /*
1021 * interested queues (we consider only the ones with the same
1022 * priority class in the cfq group)
1023 */
1024 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1025 cfq_class_rt(cfqq));
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001026 unsigned sync_slice = cfqd->cfq_slice[1];
1027 unsigned expect_latency = sync_slice * iq;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001028 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1029
1030 if (expect_latency > group_slice) {
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001031 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
1032 /* scale low_slice according to IO priority
1033 * and sync vs async */
1034 unsigned low_slice =
1035 min(slice, base_low_slice * slice / sync_slice);
1036 /* the adapted slice value is scaled to fit all iqs
1037 * into the target latency */
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001038 slice = max(slice * group_slice / expect_latency,
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001039 low_slice);
1040 }
1041 }
Shaohua Lic553f8e2011-01-14 08:41:03 +01001042 return slice;
1043}
1044
1045static inline void
1046cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1047{
Vivek Goyalba5bd522011-01-19 08:25:02 -07001048 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
Shaohua Lic553f8e2011-01-14 08:41:03 +01001049
Vivek Goyaldae739e2009-12-03 12:59:45 -05001050 cfqq->slice_start = jiffies;
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001051 cfqq->slice_end = jiffies + slice;
Vivek Goyalf75edf22009-12-03 12:59:53 -05001052 cfqq->allocated_slice = slice;
Jens Axboe7b679132008-05-30 12:23:07 +02001053 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
Jens Axboe44f7c162007-01-19 11:51:58 +11001054}
1055
1056/*
1057 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1058 * isn't valid until the first request from the dispatch is activated
1059 * and the slice time set.
1060 */
Jens Axboea6151c32009-10-07 20:02:57 +02001061static inline bool cfq_slice_used(struct cfq_queue *cfqq)
Jens Axboe44f7c162007-01-19 11:51:58 +11001062{
1063 if (cfq_cfqq_slice_new(cfqq))
Shaohua Lic1e44752010-11-08 15:01:02 +01001064 return false;
Jens Axboe44f7c162007-01-19 11:51:58 +11001065 if (time_before(jiffies, cfqq->slice_end))
Shaohua Lic1e44752010-11-08 15:01:02 +01001066 return false;
Jens Axboe44f7c162007-01-19 11:51:58 +11001067
Shaohua Lic1e44752010-11-08 15:01:02 +01001068 return true;
Jens Axboe44f7c162007-01-19 11:51:58 +11001069}
1070
1071/*
Jens Axboe5e705372006-07-13 12:39:25 +02001072 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 * We choose the request that is closest to the head right now. Distance
Andreas Mohre8a99052006-03-28 08:59:49 +02001074 * behind the head is penalized and only allowed to a certain extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 */
Jens Axboe5e705372006-07-13 12:39:25 +02001076static struct request *
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01001077cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078{
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01001079 sector_t s1, s2, d1 = 0, d2 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 unsigned long back_max;
Andreas Mohre8a99052006-03-28 08:59:49 +02001081#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
1082#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
1083 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
Jens Axboe5e705372006-07-13 12:39:25 +02001085 if (rq1 == NULL || rq1 == rq2)
1086 return rq2;
1087 if (rq2 == NULL)
1088 return rq1;
Jens Axboe9c2c38a2005-08-24 14:57:54 +02001089
Namhyung Kim229836b2011-05-24 10:23:21 +02001090 if (rq_is_sync(rq1) != rq_is_sync(rq2))
1091 return rq_is_sync(rq1) ? rq1 : rq2;
1092
Christoph Hellwig65299a32011-08-23 14:50:29 +02001093 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1094 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
Jens Axboeb53d1ed2011-08-19 08:34:48 +02001095
Tejun Heo83096eb2009-05-07 22:24:39 +09001096 s1 = blk_rq_pos(rq1);
1097 s2 = blk_rq_pos(rq2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 /*
1100 * by definition, 1KiB is 2 sectors
1101 */
1102 back_max = cfqd->cfq_back_max * 2;
1103
1104 /*
1105 * Strict one way elevator _except_ in the case where we allow
1106 * short backward seeks which are biased as twice the cost of a
1107 * similar forward seek.
1108 */
1109 if (s1 >= last)
1110 d1 = s1 - last;
1111 else if (s1 + back_max >= last)
1112 d1 = (last - s1) * cfqd->cfq_back_penalty;
1113 else
Andreas Mohre8a99052006-03-28 08:59:49 +02001114 wrap |= CFQ_RQ1_WRAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
1116 if (s2 >= last)
1117 d2 = s2 - last;
1118 else if (s2 + back_max >= last)
1119 d2 = (last - s2) * cfqd->cfq_back_penalty;
1120 else
Andreas Mohre8a99052006-03-28 08:59:49 +02001121 wrap |= CFQ_RQ2_WRAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
1123 /* Found required data */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
Andreas Mohre8a99052006-03-28 08:59:49 +02001125 /*
1126 * By doing switch() on the bit mask "wrap" we avoid having to
1127 * check two variables for all permutations: --> faster!
1128 */
1129 switch (wrap) {
Jens Axboe5e705372006-07-13 12:39:25 +02001130 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
Andreas Mohre8a99052006-03-28 08:59:49 +02001131 if (d1 < d2)
Jens Axboe5e705372006-07-13 12:39:25 +02001132 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +02001133 else if (d2 < d1)
Jens Axboe5e705372006-07-13 12:39:25 +02001134 return rq2;
Andreas Mohre8a99052006-03-28 08:59:49 +02001135 else {
1136 if (s1 >= s2)
Jens Axboe5e705372006-07-13 12:39:25 +02001137 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +02001138 else
Jens Axboe5e705372006-07-13 12:39:25 +02001139 return rq2;
Andreas Mohre8a99052006-03-28 08:59:49 +02001140 }
1141
1142 case CFQ_RQ2_WRAP:
Jens Axboe5e705372006-07-13 12:39:25 +02001143 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +02001144 case CFQ_RQ1_WRAP:
Jens Axboe5e705372006-07-13 12:39:25 +02001145 return rq2;
1146 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
Andreas Mohre8a99052006-03-28 08:59:49 +02001147 default:
1148 /*
1149 * Since both rqs are wrapped,
1150 * start with the one that's further behind head
1151 * (--> only *one* back seek required),
1152 * since back seek takes more time than forward.
1153 */
1154 if (s1 <= s2)
Jens Axboe5e705372006-07-13 12:39:25 +02001155 return rq1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 else
Jens Axboe5e705372006-07-13 12:39:25 +02001157 return rq2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 }
1159}
1160
Jens Axboe498d3aa22007-04-26 12:54:48 +02001161/*
1162 * The below is leftmost cache rbtree addon
1163 */
Jens Axboe08717142008-01-28 11:38:15 +01001164static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
Jens Axboecc09e292007-04-26 12:53:50 +02001165{
Vivek Goyal615f0252009-12-03 12:59:39 -05001166 /* Service tree is empty */
1167 if (!root->count)
1168 return NULL;
1169
Jens Axboecc09e292007-04-26 12:53:50 +02001170 if (!root->left)
1171 root->left = rb_first(&root->rb);
1172
Jens Axboe08717142008-01-28 11:38:15 +01001173 if (root->left)
1174 return rb_entry(root->left, struct cfq_queue, rb_node);
1175
1176 return NULL;
Jens Axboecc09e292007-04-26 12:53:50 +02001177}
1178
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001179static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1180{
1181 if (!root->left)
1182 root->left = rb_first(&root->rb);
1183
1184 if (root->left)
1185 return rb_entry_cfqg(root->left);
1186
1187 return NULL;
1188}
1189
Jens Axboea36e71f2009-04-15 12:15:11 +02001190static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1191{
1192 rb_erase(n, root);
1193 RB_CLEAR_NODE(n);
1194}
1195
Jens Axboecc09e292007-04-26 12:53:50 +02001196static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1197{
1198 if (root->left == n)
1199 root->left = NULL;
Jens Axboea36e71f2009-04-15 12:15:11 +02001200 rb_erase_init(n, &root->rb);
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01001201 --root->count;
Jens Axboecc09e292007-04-26 12:53:50 +02001202}
1203
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204/*
1205 * would be nice to take fifo expire time into account as well
1206 */
Jens Axboe5e705372006-07-13 12:39:25 +02001207static struct request *
1208cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1209 struct request *last)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210{
Jens Axboe21183b02006-07-13 12:33:14 +02001211 struct rb_node *rbnext = rb_next(&last->rb_node);
1212 struct rb_node *rbprev = rb_prev(&last->rb_node);
Jens Axboe5e705372006-07-13 12:39:25 +02001213 struct request *next = NULL, *prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
Jens Axboe21183b02006-07-13 12:33:14 +02001215 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
1217 if (rbprev)
Jens Axboe5e705372006-07-13 12:39:25 +02001218 prev = rb_entry_rq(rbprev);
Jens Axboe21183b02006-07-13 12:33:14 +02001219
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 if (rbnext)
Jens Axboe5e705372006-07-13 12:39:25 +02001221 next = rb_entry_rq(rbnext);
Jens Axboe21183b02006-07-13 12:33:14 +02001222 else {
1223 rbnext = rb_first(&cfqq->sort_list);
1224 if (rbnext && rbnext != &last->rb_node)
Jens Axboe5e705372006-07-13 12:39:25 +02001225 next = rb_entry_rq(rbnext);
Jens Axboe21183b02006-07-13 12:33:14 +02001226 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01001228 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230
Jens Axboed9e76202007-04-20 14:27:50 +02001231static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1232 struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
Jens Axboed9e76202007-04-20 14:27:50 +02001234 /*
1235 * just an approximation, should be ok.
1236 */
Vivek Goyalcdb16e82009-12-03 12:59:38 -05001237 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
Jens Axboe464191c2009-11-30 09:38:13 +01001238 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
Jens Axboed9e76202007-04-20 14:27:50 +02001239}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001241static inline s64
1242cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1243{
1244 return cfqg->vdisktime - st->min_vdisktime;
1245}
1246
1247static void
1248__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1249{
1250 struct rb_node **node = &st->rb.rb_node;
1251 struct rb_node *parent = NULL;
1252 struct cfq_group *__cfqg;
1253 s64 key = cfqg_key(st, cfqg);
1254 int left = 1;
1255
1256 while (*node != NULL) {
1257 parent = *node;
1258 __cfqg = rb_entry_cfqg(parent);
1259
1260 if (key < cfqg_key(st, __cfqg))
1261 node = &parent->rb_left;
1262 else {
1263 node = &parent->rb_right;
1264 left = 0;
1265 }
1266 }
1267
1268 if (left)
1269 st->left = &cfqg->rb_node;
1270
1271 rb_link_node(&cfqg->rb_node, parent, node);
1272 rb_insert_color(&cfqg->rb_node, &st->rb);
1273}
1274
1275static void
Justin TerAvest8184f932011-03-17 16:12:36 +01001276cfq_update_group_weight(struct cfq_group *cfqg)
1277{
1278 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
Tejun Heoe71357e2013-01-09 08:05:10 -08001279
Tejun Heo3381cb82012-04-01 14:38:44 -07001280 if (cfqg->new_weight) {
Justin TerAvest8184f932011-03-17 16:12:36 +01001281 cfqg->weight = cfqg->new_weight;
Tejun Heo3381cb82012-04-01 14:38:44 -07001282 cfqg->new_weight = 0;
Justin TerAvest8184f932011-03-17 16:12:36 +01001283 }
Tejun Heoe71357e2013-01-09 08:05:10 -08001284
1285 if (cfqg->new_leaf_weight) {
1286 cfqg->leaf_weight = cfqg->new_leaf_weight;
1287 cfqg->new_leaf_weight = 0;
1288 }
Justin TerAvest8184f932011-03-17 16:12:36 +01001289}
1290
1291static void
1292cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1293{
Tejun Heo1d3650f2013-01-09 08:05:11 -08001294 unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */
Tejun Heo7918ffb2013-01-09 08:05:11 -08001295 struct cfq_group *pos = cfqg;
Tejun Heo1d3650f2013-01-09 08:05:11 -08001296 struct cfq_group *parent;
Tejun Heo7918ffb2013-01-09 08:05:11 -08001297 bool propagate;
1298
1299 /* add to the service tree */
Justin TerAvest8184f932011-03-17 16:12:36 +01001300 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1301
1302 cfq_update_group_weight(cfqg);
1303 __cfq_group_service_tree_add(st, cfqg);
Tejun Heo7918ffb2013-01-09 08:05:11 -08001304
1305 /*
Tejun Heo1d3650f2013-01-09 08:05:11 -08001306 * Activate @cfqg and calculate the portion of vfraction @cfqg is
1307 * entitled to. vfraction is calculated by walking the tree
1308 * towards the root calculating the fraction it has at each level.
1309 * The compounded ratio is how much vfraction @cfqg owns.
1310 *
1311 * Start with the proportion tasks in this cfqg has against active
1312 * children cfqgs - its leaf_weight against children_weight.
Tejun Heo7918ffb2013-01-09 08:05:11 -08001313 */
1314 propagate = !pos->nr_active++;
1315 pos->children_weight += pos->leaf_weight;
Tejun Heo1d3650f2013-01-09 08:05:11 -08001316 vfr = vfr * pos->leaf_weight / pos->children_weight;
Tejun Heo7918ffb2013-01-09 08:05:11 -08001317
Tejun Heo1d3650f2013-01-09 08:05:11 -08001318 /*
1319 * Compound ->weight walking up the tree. Both activation and
1320 * vfraction calculation are done in the same loop. Propagation
1321 * stops once an already activated node is met. vfraction
1322 * calculation should always continue to the root.
1323 */
Tejun Heod02f7aa2013-01-09 08:05:11 -08001324 while ((parent = cfqg_parent(pos))) {
Tejun Heo1d3650f2013-01-09 08:05:11 -08001325 if (propagate) {
1326 propagate = !parent->nr_active++;
1327 parent->children_weight += pos->weight;
1328 }
1329 vfr = vfr * pos->weight / parent->children_weight;
Tejun Heo7918ffb2013-01-09 08:05:11 -08001330 pos = parent;
1331 }
Tejun Heo1d3650f2013-01-09 08:05:11 -08001332
1333 cfqg->vfraction = max_t(unsigned, vfr, 1);
Justin TerAvest8184f932011-03-17 16:12:36 +01001334}
1335
1336static void
1337cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001338{
1339 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1340 struct cfq_group *__cfqg;
1341 struct rb_node *n;
1342
1343 cfqg->nr_cfqq++;
Gui Jianfeng760701b2010-11-30 20:52:47 +01001344 if (!RB_EMPTY_NODE(&cfqg->rb_node))
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001345 return;
1346
1347 /*
1348 * Currently put the group at the end. Later implement something
1349 * so that groups get lesser vtime based on their weights, so that
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001350 * if group does not loose all if it was not continuously backlogged.
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001351 */
1352 n = rb_last(&st->rb);
1353 if (n) {
1354 __cfqg = rb_entry_cfqg(n);
1355 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1356 } else
1357 cfqg->vdisktime = st->min_vdisktime;
Justin TerAvest8184f932011-03-17 16:12:36 +01001358 cfq_group_service_tree_add(st, cfqg);
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001359}
1360
1361static void
Justin TerAvest8184f932011-03-17 16:12:36 +01001362cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1363{
Tejun Heo7918ffb2013-01-09 08:05:11 -08001364 struct cfq_group *pos = cfqg;
1365 bool propagate;
1366
1367 /*
1368 * Undo activation from cfq_group_service_tree_add(). Deactivate
1369 * @cfqg and propagate deactivation upwards.
1370 */
1371 propagate = !--pos->nr_active;
1372 pos->children_weight -= pos->leaf_weight;
1373
1374 while (propagate) {
Tejun Heod02f7aa2013-01-09 08:05:11 -08001375 struct cfq_group *parent = cfqg_parent(pos);
Tejun Heo7918ffb2013-01-09 08:05:11 -08001376
1377 /* @pos has 0 nr_active at this point */
1378 WARN_ON_ONCE(pos->children_weight);
Tejun Heo1d3650f2013-01-09 08:05:11 -08001379 pos->vfraction = 0;
Tejun Heo7918ffb2013-01-09 08:05:11 -08001380
1381 if (!parent)
1382 break;
1383
1384 propagate = !--parent->nr_active;
1385 parent->children_weight -= pos->weight;
1386 pos = parent;
1387 }
1388
1389 /* remove from the service tree */
Justin TerAvest8184f932011-03-17 16:12:36 +01001390 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1391 cfq_rb_erase(&cfqg->rb_node, st);
1392}
1393
1394static void
1395cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001396{
1397 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1398
1399 BUG_ON(cfqg->nr_cfqq < 1);
1400 cfqg->nr_cfqq--;
Vivek Goyal25bc6b02009-12-03 12:59:43 -05001401
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001402 /* If there are other cfq queues under this group, don't delete it */
1403 if (cfqg->nr_cfqq)
1404 return;
1405
Vivek Goyal2868ef72009-12-03 12:59:48 -05001406 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
Justin TerAvest8184f932011-03-17 16:12:36 +01001407 cfq_group_service_tree_del(st, cfqg);
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04001408 cfqg->saved_wl_slice = 0;
Tejun Heo155fead2012-04-01 14:38:44 -07001409 cfqg_stats_update_dequeue(cfqg);
Vivek Goyaldae739e2009-12-03 12:59:45 -05001410}
1411
Justin TerAvest167400d2011-03-12 16:54:00 +01001412static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1413 unsigned int *unaccounted_time)
Vivek Goyaldae739e2009-12-03 12:59:45 -05001414{
Vivek Goyalf75edf22009-12-03 12:59:53 -05001415 unsigned int slice_used;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001416
1417 /*
1418 * Queue got expired before even a single request completed or
1419 * got expired immediately after first request completion.
1420 */
1421 if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1422 /*
1423 * Also charge the seek time incurred to the group, otherwise
1424 * if there are mutiple queues in the group, each can dispatch
1425 * a single request on seeky media and cause lots of seek time
1426 * and group will never know it.
1427 */
1428 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1429 1);
1430 } else {
1431 slice_used = jiffies - cfqq->slice_start;
Justin TerAvest167400d2011-03-12 16:54:00 +01001432 if (slice_used > cfqq->allocated_slice) {
1433 *unaccounted_time = slice_used - cfqq->allocated_slice;
Vivek Goyalf75edf22009-12-03 12:59:53 -05001434 slice_used = cfqq->allocated_slice;
Justin TerAvest167400d2011-03-12 16:54:00 +01001435 }
1436 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1437 *unaccounted_time += cfqq->slice_start -
1438 cfqq->dispatch_start;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001439 }
1440
Vivek Goyaldae739e2009-12-03 12:59:45 -05001441 return slice_used;
1442}
1443
1444static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
Vivek Goyale5ff0822010-04-26 19:25:11 +02001445 struct cfq_queue *cfqq)
Vivek Goyaldae739e2009-12-03 12:59:45 -05001446{
1447 struct cfq_rb_root *st = &cfqd->grp_service_tree;
Justin TerAvest167400d2011-03-12 16:54:00 +01001448 unsigned int used_sl, charge, unaccounted_sl = 0;
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05001449 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1450 - cfqg->service_tree_idle.count;
Tejun Heo1d3650f2013-01-09 08:05:11 -08001451 unsigned int vfr;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001452
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05001453 BUG_ON(nr_sync < 0);
Justin TerAvest167400d2011-03-12 16:54:00 +01001454 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05001455
Vivek Goyal02b35082010-08-23 12:23:53 +02001456 if (iops_mode(cfqd))
1457 charge = cfqq->slice_dispatch;
1458 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1459 charge = cfqq->allocated_slice;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001460
Tejun Heo1d3650f2013-01-09 08:05:11 -08001461 /*
1462 * Can't update vdisktime while on service tree and cfqg->vfraction
1463 * is valid only while on it. Cache vfr, leave the service tree,
1464 * update vdisktime and go back on. The re-addition to the tree
1465 * will also update the weights as necessary.
1466 */
1467 vfr = cfqg->vfraction;
Justin TerAvest8184f932011-03-17 16:12:36 +01001468 cfq_group_service_tree_del(st, cfqg);
Tejun Heo1d3650f2013-01-09 08:05:11 -08001469 cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
Justin TerAvest8184f932011-03-17 16:12:36 +01001470 cfq_group_service_tree_add(st, cfqg);
Vivek Goyaldae739e2009-12-03 12:59:45 -05001471
1472 /* This group is being expired. Save the context */
1473 if (time_after(cfqd->workload_expires, jiffies)) {
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04001474 cfqg->saved_wl_slice = cfqd->workload_expires
Vivek Goyaldae739e2009-12-03 12:59:45 -05001475 - jiffies;
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04001476 cfqg->saved_wl_type = cfqd->serving_wl_type;
1477 cfqg->saved_wl_class = cfqd->serving_wl_class;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001478 } else
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04001479 cfqg->saved_wl_slice = 0;
Vivek Goyal2868ef72009-12-03 12:59:48 -05001480
1481 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1482 st->min_vdisktime);
Joe Perchesfd16d262011-06-13 10:42:49 +02001483 cfq_log_cfqq(cfqq->cfqd, cfqq,
1484 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1485 used_sl, cfqq->slice_dispatch, charge,
1486 iops_mode(cfqd), cfqq->nr_sectors);
Tejun Heo155fead2012-04-01 14:38:44 -07001487 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1488 cfqg_stats_set_start_empty_time(cfqg);
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001489}
1490
Tejun Heof51b8022012-03-05 13:15:05 -08001491/**
1492 * cfq_init_cfqg_base - initialize base part of a cfq_group
1493 * @cfqg: cfq_group to initialize
1494 *
1495 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1496 * is enabled or not.
1497 */
1498static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1499{
1500 struct cfq_rb_root *st;
1501 int i, j;
1502
1503 for_each_cfqg_st(cfqg, i, j, st)
1504 *st = CFQ_RB_ROOT;
1505 RB_CLEAR_NODE(&cfqg->rb_node);
1506
1507 cfqg->ttime.last_end_request = jiffies;
1508}
1509
Vivek Goyal25fb5162009-12-03 12:59:46 -05001510#ifdef CONFIG_CFQ_GROUP_IOSCHED
Peter Zijlstra90d38392013-11-12 19:42:14 -08001511static void cfqg_stats_init(struct cfqg_stats *stats)
1512{
1513 blkg_rwstat_init(&stats->service_bytes);
1514 blkg_rwstat_init(&stats->serviced);
1515 blkg_rwstat_init(&stats->merged);
1516 blkg_rwstat_init(&stats->service_time);
1517 blkg_rwstat_init(&stats->wait_time);
1518 blkg_rwstat_init(&stats->queued);
1519
1520 blkg_stat_init(&stats->sectors);
1521 blkg_stat_init(&stats->time);
1522
1523#ifdef CONFIG_DEBUG_BLK_CGROUP
1524 blkg_stat_init(&stats->unaccounted_time);
1525 blkg_stat_init(&stats->avg_queue_size_sum);
1526 blkg_stat_init(&stats->avg_queue_size_samples);
1527 blkg_stat_init(&stats->dequeue);
1528 blkg_stat_init(&stats->group_wait_time);
1529 blkg_stat_init(&stats->idle_time);
1530 blkg_stat_init(&stats->empty_time);
1531#endif
1532}
1533
Tejun Heo3c798392012-04-16 13:57:25 -07001534static void cfq_pd_init(struct blkcg_gq *blkg)
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001535{
Tejun Heo03814112012-03-05 13:15:14 -08001536 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
Vivek Goyal25fb5162009-12-03 12:59:46 -05001537
Tejun Heof51b8022012-03-05 13:15:05 -08001538 cfq_init_cfqg_base(cfqg);
Tejun Heo3381cb82012-04-01 14:38:44 -07001539 cfqg->weight = blkg->blkcg->cfq_weight;
Tejun Heoe71357e2013-01-09 08:05:10 -08001540 cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
Peter Zijlstra90d38392013-11-12 19:42:14 -08001541 cfqg_stats_init(&cfqg->stats);
1542 cfqg_stats_init(&cfqg->dead_stats);
Vivek Goyal25fb5162009-12-03 12:59:46 -05001543}
1544
Tejun Heo0b399202013-01-09 08:05:13 -08001545static void cfq_pd_offline(struct blkcg_gq *blkg)
1546{
1547 /*
1548 * @blkg is going offline and will be ignored by
1549 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
1550 * that they don't get lost. If IOs complete after this point, the
1551 * stats for them will be lost. Oh well...
1552 */
1553 cfqg_stats_xfer_dead(blkg_to_cfqg(blkg));
1554}
1555
Tejun Heo43114012013-01-09 08:05:13 -08001556/* offset delta from cfqg->stats to cfqg->dead_stats */
1557static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
1558 offsetof(struct cfq_group, stats);
1559
1560/* to be used by recursive prfill, sums live and dead stats recursively */
1561static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
1562{
1563 u64 sum = 0;
1564
1565 sum += blkg_stat_recursive_sum(pd, off);
1566 sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta);
1567 return sum;
1568}
1569
1570/* to be used by recursive prfill, sums live and dead rwstats recursively */
1571static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd,
1572 int off)
1573{
1574 struct blkg_rwstat a, b;
1575
1576 a = blkg_rwstat_recursive_sum(pd, off);
1577 b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta);
1578 blkg_rwstat_merge(&a, &b);
1579 return a;
1580}
1581
Tejun Heo689665a2013-01-09 08:05:13 -08001582static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
1583{
1584 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1585
1586 cfqg_stats_reset(&cfqg->stats);
Tejun Heo0b399202013-01-09 08:05:13 -08001587 cfqg_stats_reset(&cfqg->dead_stats);
Vivek Goyal25fb5162009-12-03 12:59:46 -05001588}
1589
1590/*
Vivek Goyal3e59cf92011-05-19 15:38:21 -04001591 * Search for the cfq group current task belongs to. request_queue lock must
1592 * be held.
Vivek Goyal25fb5162009-12-03 12:59:46 -05001593 */
Tejun Heocd1604f2012-03-05 13:15:06 -08001594static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
Tejun Heo3c798392012-04-16 13:57:25 -07001595 struct blkcg *blkcg)
Vivek Goyal25fb5162009-12-03 12:59:46 -05001596{
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001597 struct request_queue *q = cfqd->queue;
Tejun Heocd1604f2012-03-05 13:15:06 -08001598 struct cfq_group *cfqg = NULL;
Vivek Goyal25fb5162009-12-03 12:59:46 -05001599
Tejun Heo3c798392012-04-16 13:57:25 -07001600 /* avoid lookup for the common case where there's no blkcg */
1601 if (blkcg == &blkcg_root) {
Tejun Heocd1604f2012-03-05 13:15:06 -08001602 cfqg = cfqd->root_group;
1603 } else {
Tejun Heo3c798392012-04-16 13:57:25 -07001604 struct blkcg_gq *blkg;
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001605
Tejun Heo3c96cb32012-04-13 13:11:34 -07001606 blkg = blkg_lookup_create(blkcg, q);
Tejun Heocd1604f2012-03-05 13:15:06 -08001607 if (!IS_ERR(blkg))
Tejun Heo03814112012-03-05 13:15:14 -08001608 cfqg = blkg_to_cfqg(blkg);
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001609 }
1610
Vivek Goyal25fb5162009-12-03 12:59:46 -05001611 return cfqg;
1612}
1613
1614static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1615{
1616 /* Currently, all async queues are mapped to root group */
1617 if (!cfq_cfqq_sync(cfqq))
Tejun Heof51b8022012-03-05 13:15:05 -08001618 cfqg = cfqq->cfqd->root_group;
Vivek Goyal25fb5162009-12-03 12:59:46 -05001619
1620 cfqq->cfqg = cfqg;
Vivek Goyalb1c35762009-12-03 12:59:47 -05001621 /* cfqq reference on cfqg */
Tejun Heoeb7d8c072012-03-23 14:02:53 +01001622 cfqg_get(cfqg);
Vivek Goyalb1c35762009-12-03 12:59:47 -05001623}
1624
Tejun Heof95a04a2012-04-16 13:57:26 -07001625static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1626 struct blkg_policy_data *pd, int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001627{
Tejun Heof95a04a2012-04-16 13:57:26 -07001628 struct cfq_group *cfqg = pd_to_cfqg(pd);
Tejun Heo3381cb82012-04-01 14:38:44 -07001629
1630 if (!cfqg->dev_weight)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001631 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -07001632 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001633}
1634
Tejun Heo182446d2013-08-08 20:11:24 -04001635static int cfqg_print_weight_device(struct cgroup_subsys_state *css,
1636 struct cftype *cft, struct seq_file *sf)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001637{
Tejun Heo182446d2013-08-08 20:11:24 -04001638 blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_weight_device,
1639 &blkcg_policy_cfq, 0, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001640 return 0;
1641}
1642
Tejun Heoe71357e2013-01-09 08:05:10 -08001643static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1644 struct blkg_policy_data *pd, int off)
1645{
1646 struct cfq_group *cfqg = pd_to_cfqg(pd);
1647
1648 if (!cfqg->dev_leaf_weight)
1649 return 0;
1650 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1651}
1652
Tejun Heo182446d2013-08-08 20:11:24 -04001653static int cfqg_print_leaf_weight_device(struct cgroup_subsys_state *css,
Tejun Heoe71357e2013-01-09 08:05:10 -08001654 struct cftype *cft,
1655 struct seq_file *sf)
1656{
Tejun Heo182446d2013-08-08 20:11:24 -04001657 blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_leaf_weight_device,
1658 &blkcg_policy_cfq, 0, false);
Tejun Heoe71357e2013-01-09 08:05:10 -08001659 return 0;
1660}
1661
Tejun Heo182446d2013-08-08 20:11:24 -04001662static int cfq_print_weight(struct cgroup_subsys_state *css, struct cftype *cft,
Tejun Heo3381cb82012-04-01 14:38:44 -07001663 struct seq_file *sf)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001664{
Tejun Heo182446d2013-08-08 20:11:24 -04001665 seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_weight);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001666 return 0;
1667}
1668
Tejun Heo182446d2013-08-08 20:11:24 -04001669static int cfq_print_leaf_weight(struct cgroup_subsys_state *css,
1670 struct cftype *cft, struct seq_file *sf)
Tejun Heoe71357e2013-01-09 08:05:10 -08001671{
Tejun Heo182446d2013-08-08 20:11:24 -04001672 seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_leaf_weight);
Tejun Heoe71357e2013-01-09 08:05:10 -08001673 return 0;
1674}
1675
Tejun Heo182446d2013-08-08 20:11:24 -04001676static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
1677 struct cftype *cft, const char *buf,
1678 bool is_leaf_weight)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001679{
Tejun Heo182446d2013-08-08 20:11:24 -04001680 struct blkcg *blkcg = css_to_blkcg(css);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001681 struct blkg_conf_ctx ctx;
Tejun Heo3381cb82012-04-01 14:38:44 -07001682 struct cfq_group *cfqg;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001683 int ret;
1684
Tejun Heo3c798392012-04-16 13:57:25 -07001685 ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001686 if (ret)
1687 return ret;
1688
1689 ret = -EINVAL;
Tejun Heo3381cb82012-04-01 14:38:44 -07001690 cfqg = blkg_to_cfqg(ctx.blkg);
Tejun Heoa2b16932012-04-13 13:11:33 -07001691 if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
Tejun Heoe71357e2013-01-09 08:05:10 -08001692 if (!is_leaf_weight) {
1693 cfqg->dev_weight = ctx.v;
1694 cfqg->new_weight = ctx.v ?: blkcg->cfq_weight;
1695 } else {
1696 cfqg->dev_leaf_weight = ctx.v;
1697 cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight;
1698 }
Tejun Heo60c2bc22012-04-01 14:38:43 -07001699 ret = 0;
1700 }
1701
1702 blkg_conf_finish(&ctx);
1703 return ret;
1704}
1705
Tejun Heo182446d2013-08-08 20:11:24 -04001706static int cfqg_set_weight_device(struct cgroup_subsys_state *css,
1707 struct cftype *cft, const char *buf)
Tejun Heoe71357e2013-01-09 08:05:10 -08001708{
Tejun Heo182446d2013-08-08 20:11:24 -04001709 return __cfqg_set_weight_device(css, cft, buf, false);
Tejun Heoe71357e2013-01-09 08:05:10 -08001710}
1711
Tejun Heo182446d2013-08-08 20:11:24 -04001712static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css,
1713 struct cftype *cft, const char *buf)
Tejun Heoe71357e2013-01-09 08:05:10 -08001714{
Tejun Heo182446d2013-08-08 20:11:24 -04001715 return __cfqg_set_weight_device(css, cft, buf, true);
Tejun Heoe71357e2013-01-09 08:05:10 -08001716}
1717
Tejun Heo182446d2013-08-08 20:11:24 -04001718static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1719 u64 val, bool is_leaf_weight)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001720{
Tejun Heo182446d2013-08-08 20:11:24 -04001721 struct blkcg *blkcg = css_to_blkcg(css);
Tejun Heo3c798392012-04-16 13:57:25 -07001722 struct blkcg_gq *blkg;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001723
Tejun Heo3381cb82012-04-01 14:38:44 -07001724 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001725 return -EINVAL;
1726
1727 spin_lock_irq(&blkcg->lock);
Tejun Heoe71357e2013-01-09 08:05:10 -08001728
1729 if (!is_leaf_weight)
1730 blkcg->cfq_weight = val;
1731 else
1732 blkcg->cfq_leaf_weight = val;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001733
Sasha Levinb67bfe02013-02-27 17:06:00 -08001734 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
Tejun Heo3381cb82012-04-01 14:38:44 -07001735 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001736
Tejun Heoe71357e2013-01-09 08:05:10 -08001737 if (!cfqg)
1738 continue;
1739
1740 if (!is_leaf_weight) {
1741 if (!cfqg->dev_weight)
1742 cfqg->new_weight = blkcg->cfq_weight;
1743 } else {
1744 if (!cfqg->dev_leaf_weight)
1745 cfqg->new_leaf_weight = blkcg->cfq_leaf_weight;
1746 }
Tejun Heo60c2bc22012-04-01 14:38:43 -07001747 }
1748
1749 spin_unlock_irq(&blkcg->lock);
1750 return 0;
1751}
1752
Tejun Heo182446d2013-08-08 20:11:24 -04001753static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1754 u64 val)
Tejun Heoe71357e2013-01-09 08:05:10 -08001755{
Tejun Heo182446d2013-08-08 20:11:24 -04001756 return __cfq_set_weight(css, cft, val, false);
Tejun Heoe71357e2013-01-09 08:05:10 -08001757}
1758
Tejun Heo182446d2013-08-08 20:11:24 -04001759static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1760 struct cftype *cft, u64 val)
Tejun Heoe71357e2013-01-09 08:05:10 -08001761{
Tejun Heo182446d2013-08-08 20:11:24 -04001762 return __cfq_set_weight(css, cft, val, true);
Tejun Heoe71357e2013-01-09 08:05:10 -08001763}
1764
Tejun Heo182446d2013-08-08 20:11:24 -04001765static int cfqg_print_stat(struct cgroup_subsys_state *css, struct cftype *cft,
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001766 struct seq_file *sf)
1767{
Tejun Heo182446d2013-08-08 20:11:24 -04001768 struct blkcg *blkcg = css_to_blkcg(css);
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001769
Tejun Heo3c798392012-04-16 13:57:25 -07001770 blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001771 cft->private, false);
1772 return 0;
1773}
1774
Tejun Heo182446d2013-08-08 20:11:24 -04001775static int cfqg_print_rwstat(struct cgroup_subsys_state *css,
1776 struct cftype *cft, struct seq_file *sf)
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001777{
Tejun Heo182446d2013-08-08 20:11:24 -04001778 struct blkcg *blkcg = css_to_blkcg(css);
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001779
Tejun Heo3c798392012-04-16 13:57:25 -07001780 blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001781 cft->private, true);
1782 return 0;
1783}
1784
Tejun Heo43114012013-01-09 08:05:13 -08001785static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1786 struct blkg_policy_data *pd, int off)
1787{
1788 u64 sum = cfqg_stat_pd_recursive_sum(pd, off);
1789
1790 return __blkg_prfill_u64(sf, pd, sum);
1791}
1792
1793static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1794 struct blkg_policy_data *pd, int off)
1795{
1796 struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off);
1797
1798 return __blkg_prfill_rwstat(sf, pd, &sum);
1799}
1800
Tejun Heo182446d2013-08-08 20:11:24 -04001801static int cfqg_print_stat_recursive(struct cgroup_subsys_state *css,
1802 struct cftype *cft, struct seq_file *sf)
Tejun Heo43114012013-01-09 08:05:13 -08001803{
Tejun Heo182446d2013-08-08 20:11:24 -04001804 struct blkcg *blkcg = css_to_blkcg(css);
Tejun Heo43114012013-01-09 08:05:13 -08001805
1806 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive,
1807 &blkcg_policy_cfq, cft->private, false);
1808 return 0;
1809}
1810
Tejun Heo182446d2013-08-08 20:11:24 -04001811static int cfqg_print_rwstat_recursive(struct cgroup_subsys_state *css,
1812 struct cftype *cft, struct seq_file *sf)
Tejun Heo43114012013-01-09 08:05:13 -08001813{
Tejun Heo182446d2013-08-08 20:11:24 -04001814 struct blkcg *blkcg = css_to_blkcg(css);
Tejun Heo43114012013-01-09 08:05:13 -08001815
1816 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive,
1817 &blkcg_policy_cfq, cft->private, true);
1818 return 0;
1819}
1820
Tejun Heo60c2bc22012-04-01 14:38:43 -07001821#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heof95a04a2012-04-16 13:57:26 -07001822static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1823 struct blkg_policy_data *pd, int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001824{
Tejun Heof95a04a2012-04-16 13:57:26 -07001825 struct cfq_group *cfqg = pd_to_cfqg(pd);
Tejun Heo155fead2012-04-01 14:38:44 -07001826 u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001827 u64 v = 0;
1828
1829 if (samples) {
Tejun Heo155fead2012-04-01 14:38:44 -07001830 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
Anatol Pomozovf3cff252013-09-22 12:43:47 -06001831 v = div64_u64(v, samples);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001832 }
Tejun Heof95a04a2012-04-16 13:57:26 -07001833 __blkg_prfill_u64(sf, pd, v);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001834 return 0;
1835}
1836
1837/* print avg_queue_size */
Tejun Heo182446d2013-08-08 20:11:24 -04001838static int cfqg_print_avg_queue_size(struct cgroup_subsys_state *css,
1839 struct cftype *cft, struct seq_file *sf)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001840{
Tejun Heo182446d2013-08-08 20:11:24 -04001841 struct blkcg *blkcg = css_to_blkcg(css);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001842
Tejun Heo155fead2012-04-01 14:38:44 -07001843 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
Tejun Heo3c798392012-04-16 13:57:25 -07001844 &blkcg_policy_cfq, 0, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001845 return 0;
1846}
1847#endif /* CONFIG_DEBUG_BLK_CGROUP */
1848
1849static struct cftype cfq_blkcg_files[] = {
Tejun Heo1d3650f2013-01-09 08:05:11 -08001850 /* on root, weight is mapped to leaf_weight */
Tejun Heo60c2bc22012-04-01 14:38:43 -07001851 {
1852 .name = "weight_device",
Tejun Heo1d3650f2013-01-09 08:05:11 -08001853 .flags = CFTYPE_ONLY_ON_ROOT,
1854 .read_seq_string = cfqg_print_leaf_weight_device,
1855 .write_string = cfqg_set_leaf_weight_device,
1856 .max_write_len = 256,
1857 },
1858 {
1859 .name = "weight",
1860 .flags = CFTYPE_ONLY_ON_ROOT,
1861 .read_seq_string = cfq_print_leaf_weight,
1862 .write_u64 = cfq_set_leaf_weight,
1863 },
1864
1865 /* no such mapping necessary for !roots */
1866 {
1867 .name = "weight_device",
1868 .flags = CFTYPE_NOT_ON_ROOT,
Tejun Heo3381cb82012-04-01 14:38:44 -07001869 .read_seq_string = cfqg_print_weight_device,
1870 .write_string = cfqg_set_weight_device,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001871 .max_write_len = 256,
1872 },
1873 {
1874 .name = "weight",
Tejun Heoe71357e2013-01-09 08:05:10 -08001875 .flags = CFTYPE_NOT_ON_ROOT,
Tejun Heo3381cb82012-04-01 14:38:44 -07001876 .read_seq_string = cfq_print_weight,
1877 .write_u64 = cfq_set_weight,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001878 },
Tejun Heo1d3650f2013-01-09 08:05:11 -08001879
1880 {
1881 .name = "leaf_weight_device",
Tejun Heoe71357e2013-01-09 08:05:10 -08001882 .read_seq_string = cfqg_print_leaf_weight_device,
1883 .write_string = cfqg_set_leaf_weight_device,
1884 .max_write_len = 256,
1885 },
1886 {
1887 .name = "leaf_weight",
Tejun Heoe71357e2013-01-09 08:05:10 -08001888 .read_seq_string = cfq_print_leaf_weight,
1889 .write_u64 = cfq_set_leaf_weight,
1890 },
1891
Tejun Heo43114012013-01-09 08:05:13 -08001892 /* statistics, covers only the tasks in the cfqg */
Tejun Heo60c2bc22012-04-01 14:38:43 -07001893 {
1894 .name = "time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001895 .private = offsetof(struct cfq_group, stats.time),
1896 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001897 },
1898 {
1899 .name = "sectors",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001900 .private = offsetof(struct cfq_group, stats.sectors),
1901 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001902 },
1903 {
1904 .name = "io_service_bytes",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001905 .private = offsetof(struct cfq_group, stats.service_bytes),
1906 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001907 },
1908 {
1909 .name = "io_serviced",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001910 .private = offsetof(struct cfq_group, stats.serviced),
1911 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001912 },
1913 {
1914 .name = "io_service_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001915 .private = offsetof(struct cfq_group, stats.service_time),
1916 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001917 },
1918 {
1919 .name = "io_wait_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001920 .private = offsetof(struct cfq_group, stats.wait_time),
1921 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001922 },
1923 {
1924 .name = "io_merged",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001925 .private = offsetof(struct cfq_group, stats.merged),
1926 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001927 },
1928 {
1929 .name = "io_queued",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001930 .private = offsetof(struct cfq_group, stats.queued),
1931 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001932 },
Tejun Heo43114012013-01-09 08:05:13 -08001933
1934 /* the same statictics which cover the cfqg and its descendants */
1935 {
1936 .name = "time_recursive",
1937 .private = offsetof(struct cfq_group, stats.time),
1938 .read_seq_string = cfqg_print_stat_recursive,
1939 },
1940 {
1941 .name = "sectors_recursive",
1942 .private = offsetof(struct cfq_group, stats.sectors),
1943 .read_seq_string = cfqg_print_stat_recursive,
1944 },
1945 {
1946 .name = "io_service_bytes_recursive",
1947 .private = offsetof(struct cfq_group, stats.service_bytes),
1948 .read_seq_string = cfqg_print_rwstat_recursive,
1949 },
1950 {
1951 .name = "io_serviced_recursive",
1952 .private = offsetof(struct cfq_group, stats.serviced),
1953 .read_seq_string = cfqg_print_rwstat_recursive,
1954 },
1955 {
1956 .name = "io_service_time_recursive",
1957 .private = offsetof(struct cfq_group, stats.service_time),
1958 .read_seq_string = cfqg_print_rwstat_recursive,
1959 },
1960 {
1961 .name = "io_wait_time_recursive",
1962 .private = offsetof(struct cfq_group, stats.wait_time),
1963 .read_seq_string = cfqg_print_rwstat_recursive,
1964 },
1965 {
1966 .name = "io_merged_recursive",
1967 .private = offsetof(struct cfq_group, stats.merged),
1968 .read_seq_string = cfqg_print_rwstat_recursive,
1969 },
1970 {
1971 .name = "io_queued_recursive",
1972 .private = offsetof(struct cfq_group, stats.queued),
1973 .read_seq_string = cfqg_print_rwstat_recursive,
1974 },
Tejun Heo60c2bc22012-04-01 14:38:43 -07001975#ifdef CONFIG_DEBUG_BLK_CGROUP
1976 {
1977 .name = "avg_queue_size",
Tejun Heo155fead2012-04-01 14:38:44 -07001978 .read_seq_string = cfqg_print_avg_queue_size,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001979 },
1980 {
1981 .name = "group_wait_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001982 .private = offsetof(struct cfq_group, stats.group_wait_time),
1983 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001984 },
1985 {
1986 .name = "idle_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001987 .private = offsetof(struct cfq_group, stats.idle_time),
1988 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001989 },
1990 {
1991 .name = "empty_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001992 .private = offsetof(struct cfq_group, stats.empty_time),
1993 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001994 },
1995 {
1996 .name = "dequeue",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001997 .private = offsetof(struct cfq_group, stats.dequeue),
1998 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001999 },
2000 {
2001 .name = "unaccounted_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07002002 .private = offsetof(struct cfq_group, stats.unaccounted_time),
2003 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07002004 },
2005#endif /* CONFIG_DEBUG_BLK_CGROUP */
2006 { } /* terminate */
2007};
Vivek Goyal25fb5162009-12-03 12:59:46 -05002008#else /* GROUP_IOSCHED */
Tejun Heocd1604f2012-03-05 13:15:06 -08002009static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
Tejun Heo3c798392012-04-16 13:57:25 -07002010 struct blkcg *blkcg)
Vivek Goyal25fb5162009-12-03 12:59:46 -05002011{
Tejun Heof51b8022012-03-05 13:15:05 -08002012 return cfqd->root_group;
Vivek Goyal25fb5162009-12-03 12:59:46 -05002013}
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +02002014
Vivek Goyal25fb5162009-12-03 12:59:46 -05002015static inline void
2016cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
2017 cfqq->cfqg = cfqg;
2018}
2019
2020#endif /* GROUP_IOSCHED */
2021
Jens Axboe498d3aa22007-04-26 12:54:48 +02002022/*
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002023 * The cfqd->service_trees holds all pending cfq_queue's that have
Jens Axboe498d3aa22007-04-26 12:54:48 +02002024 * requests waiting to be processed. It is sorted in the order that
2025 * we will service the queues.
2026 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002027static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Jens Axboea6151c32009-10-07 20:02:57 +02002028 bool add_front)
Jens Axboed9e76202007-04-20 14:27:50 +02002029{
Jens Axboe08717142008-01-28 11:38:15 +01002030 struct rb_node **p, *parent;
2031 struct cfq_queue *__cfqq;
Jens Axboed9e76202007-04-20 14:27:50 +02002032 unsigned long rb_key;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002033 struct cfq_rb_root *st;
Jens Axboe498d3aa22007-04-26 12:54:48 +02002034 int left;
Vivek Goyaldae739e2009-12-03 12:59:45 -05002035 int new_cfqq = 1;
Vivek Goyalae30c282009-12-03 12:59:55 -05002036
Vivek Goyal34b98d02012-10-03 16:56:58 -04002037 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
Jens Axboe08717142008-01-28 11:38:15 +01002038 if (cfq_class_idle(cfqq)) {
2039 rb_key = CFQ_IDLE_DELAY;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002040 parent = rb_last(&st->rb);
Jens Axboe08717142008-01-28 11:38:15 +01002041 if (parent && parent != &cfqq->rb_node) {
2042 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2043 rb_key += __cfqq->rb_key;
2044 } else
2045 rb_key += jiffies;
2046 } else if (!add_front) {
Jens Axboeb9c89462009-10-06 20:53:44 +02002047 /*
2048 * Get our rb key offset. Subtract any residual slice
2049 * value carried from last service. A negative resid
2050 * count indicates slice overrun, and this should position
2051 * the next service time further away in the tree.
2052 */
Jens Axboeedd75ff2007-04-19 12:03:34 +02002053 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
Jens Axboeb9c89462009-10-06 20:53:44 +02002054 rb_key -= cfqq->slice_resid;
Jens Axboeedd75ff2007-04-19 12:03:34 +02002055 cfqq->slice_resid = 0;
Corrado Zoccolo48e025e2009-10-05 08:49:23 +02002056 } else {
2057 rb_key = -HZ;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002058 __cfqq = cfq_rb_first(st);
Corrado Zoccolo48e025e2009-10-05 08:49:23 +02002059 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
2060 }
Jens Axboed9e76202007-04-20 14:27:50 +02002061
2062 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
Vivek Goyaldae739e2009-12-03 12:59:45 -05002063 new_cfqq = 0;
Jens Axboe99f96282007-02-05 11:56:25 +01002064 /*
Jens Axboed9e76202007-04-20 14:27:50 +02002065 * same position, nothing more to do
Jens Axboe99f96282007-02-05 11:56:25 +01002066 */
Vivek Goyal34b98d02012-10-03 16:56:58 -04002067 if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
Jens Axboed9e76202007-04-20 14:27:50 +02002068 return;
Jens Axboe53b037442006-07-28 09:48:51 +02002069
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01002070 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2071 cfqq->service_tree = NULL;
Jens Axboe22e2c502005-06-27 10:55:12 +02002072 }
Jens Axboed9e76202007-04-20 14:27:50 +02002073
Jens Axboe498d3aa22007-04-26 12:54:48 +02002074 left = 1;
Jens Axboe08717142008-01-28 11:38:15 +01002075 parent = NULL;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002076 cfqq->service_tree = st;
2077 p = &st->rb.rb_node;
Jens Axboed9e76202007-04-20 14:27:50 +02002078 while (*p) {
2079 parent = *p;
2080 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2081
Jens Axboe0c534e02007-04-18 20:01:57 +02002082 /*
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002083 * sort by key, that represents service time.
Jens Axboe0c534e02007-04-18 20:01:57 +02002084 */
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002085 if (time_before(rb_key, __cfqq->rb_key))
Vivek Goyal1f23f122012-10-03 16:57:00 -04002086 p = &parent->rb_left;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002087 else {
Vivek Goyal1f23f122012-10-03 16:57:00 -04002088 p = &parent->rb_right;
Jens Axboecc09e292007-04-26 12:53:50 +02002089 left = 0;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002090 }
Jens Axboed9e76202007-04-20 14:27:50 +02002091 }
2092
Jens Axboecc09e292007-04-26 12:53:50 +02002093 if (left)
Vivek Goyal34b98d02012-10-03 16:56:58 -04002094 st->left = &cfqq->rb_node;
Jens Axboecc09e292007-04-26 12:53:50 +02002095
Jens Axboed9e76202007-04-20 14:27:50 +02002096 cfqq->rb_key = rb_key;
2097 rb_link_node(&cfqq->rb_node, parent, p);
Vivek Goyal34b98d02012-10-03 16:56:58 -04002098 rb_insert_color(&cfqq->rb_node, &st->rb);
2099 st->count++;
Namhyung Kim20359f22011-05-24 10:23:22 +02002100 if (add_front || !new_cfqq)
Vivek Goyaldae739e2009-12-03 12:59:45 -05002101 return;
Justin TerAvest8184f932011-03-17 16:12:36 +01002102 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103}
2104
Jens Axboea36e71f2009-04-15 12:15:11 +02002105static struct cfq_queue *
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002106cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2107 sector_t sector, struct rb_node **ret_parent,
2108 struct rb_node ***rb_link)
Jens Axboea36e71f2009-04-15 12:15:11 +02002109{
Jens Axboea36e71f2009-04-15 12:15:11 +02002110 struct rb_node **p, *parent;
2111 struct cfq_queue *cfqq = NULL;
2112
2113 parent = NULL;
2114 p = &root->rb_node;
2115 while (*p) {
2116 struct rb_node **n;
2117
2118 parent = *p;
2119 cfqq = rb_entry(parent, struct cfq_queue, p_node);
2120
2121 /*
2122 * Sort strictly based on sector. Smallest to the left,
2123 * largest to the right.
2124 */
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002125 if (sector > blk_rq_pos(cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02002126 n = &(*p)->rb_right;
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002127 else if (sector < blk_rq_pos(cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02002128 n = &(*p)->rb_left;
2129 else
2130 break;
2131 p = n;
Jens Axboe3ac6c9f2009-04-23 12:14:56 +02002132 cfqq = NULL;
Jens Axboea36e71f2009-04-15 12:15:11 +02002133 }
2134
2135 *ret_parent = parent;
2136 if (rb_link)
2137 *rb_link = p;
Jens Axboe3ac6c9f2009-04-23 12:14:56 +02002138 return cfqq;
Jens Axboea36e71f2009-04-15 12:15:11 +02002139}
2140
2141static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2142{
Jens Axboea36e71f2009-04-15 12:15:11 +02002143 struct rb_node **p, *parent;
2144 struct cfq_queue *__cfqq;
2145
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002146 if (cfqq->p_root) {
2147 rb_erase(&cfqq->p_node, cfqq->p_root);
2148 cfqq->p_root = NULL;
2149 }
Jens Axboea36e71f2009-04-15 12:15:11 +02002150
2151 if (cfq_class_idle(cfqq))
2152 return;
2153 if (!cfqq->next_rq)
2154 return;
2155
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002156 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002157 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2158 blk_rq_pos(cfqq->next_rq), &parent, &p);
Jens Axboe3ac6c9f2009-04-23 12:14:56 +02002159 if (!__cfqq) {
2160 rb_link_node(&cfqq->p_node, parent, p);
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002161 rb_insert_color(&cfqq->p_node, cfqq->p_root);
2162 } else
2163 cfqq->p_root = NULL;
Jens Axboea36e71f2009-04-15 12:15:11 +02002164}
2165
Jens Axboe498d3aa22007-04-26 12:54:48 +02002166/*
2167 * Update cfqq's position in the service tree.
2168 */
Jens Axboeedd75ff2007-04-19 12:03:34 +02002169static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002170{
Jens Axboe6d048f52007-04-25 12:44:27 +02002171 /*
2172 * Resorting requires the cfqq to be on the RR list already.
2173 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002174 if (cfq_cfqq_on_rr(cfqq)) {
Jens Axboeedd75ff2007-04-19 12:03:34 +02002175 cfq_service_tree_add(cfqd, cfqq, 0);
Jens Axboea36e71f2009-04-15 12:15:11 +02002176 cfq_prio_tree_add(cfqd, cfqq);
2177 }
Jens Axboe6d048f52007-04-25 12:44:27 +02002178}
2179
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180/*
2181 * add to busy list of queues for service, trying to be fair in ordering
Jens Axboe22e2c502005-06-27 10:55:12 +02002182 * the pending list according to last request service
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 */
Jens Axboefebffd62008-01-28 13:19:43 +01002184static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185{
Jens Axboe7b679132008-05-30 12:23:07 +02002186 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
Jens Axboe3b181522005-06-27 10:56:24 +02002187 BUG_ON(cfq_cfqq_on_rr(cfqq));
2188 cfq_mark_cfqq_on_rr(cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 cfqd->busy_queues++;
Shaohua Lief8a41d2011-03-07 09:26:29 +01002190 if (cfq_cfqq_sync(cfqq))
2191 cfqd->busy_sync_queues++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Jens Axboeedd75ff2007-04-19 12:03:34 +02002193 cfq_resort_rr_list(cfqd, cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194}
2195
Jens Axboe498d3aa22007-04-26 12:54:48 +02002196/*
2197 * Called when the cfqq no longer has requests pending, remove it from
2198 * the service tree.
2199 */
Jens Axboefebffd62008-01-28 13:19:43 +01002200static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201{
Jens Axboe7b679132008-05-30 12:23:07 +02002202 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
Jens Axboe3b181522005-06-27 10:56:24 +02002203 BUG_ON(!cfq_cfqq_on_rr(cfqq));
2204 cfq_clear_cfqq_on_rr(cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01002206 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2207 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2208 cfqq->service_tree = NULL;
2209 }
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002210 if (cfqq->p_root) {
2211 rb_erase(&cfqq->p_node, cfqq->p_root);
2212 cfqq->p_root = NULL;
2213 }
Jens Axboed9e76202007-04-20 14:27:50 +02002214
Justin TerAvest8184f932011-03-17 16:12:36 +01002215 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 BUG_ON(!cfqd->busy_queues);
2217 cfqd->busy_queues--;
Shaohua Lief8a41d2011-03-07 09:26:29 +01002218 if (cfq_cfqq_sync(cfqq))
2219 cfqd->busy_sync_queues--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220}
2221
2222/*
2223 * rb tree support functions
2224 */
Jens Axboefebffd62008-01-28 13:19:43 +01002225static void cfq_del_rq_rb(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226{
Jens Axboe5e705372006-07-13 12:39:25 +02002227 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe5e705372006-07-13 12:39:25 +02002228 const int sync = rq_is_sync(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229
Jens Axboeb4878f22005-10-20 16:42:29 +02002230 BUG_ON(!cfqq->queued[sync]);
2231 cfqq->queued[sync]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
Jens Axboe5e705372006-07-13 12:39:25 +02002233 elv_rb_del(&cfqq->sort_list, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234
Vivek Goyalf04a6422009-12-03 12:59:40 -05002235 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2236 /*
2237 * Queue will be deleted from service tree when we actually
2238 * expire it later. Right now just remove it from prio tree
2239 * as it is empty.
2240 */
2241 if (cfqq->p_root) {
2242 rb_erase(&cfqq->p_node, cfqq->p_root);
2243 cfqq->p_root = NULL;
2244 }
2245 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246}
2247
Jens Axboe5e705372006-07-13 12:39:25 +02002248static void cfq_add_rq_rb(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249{
Jens Axboe5e705372006-07-13 12:39:25 +02002250 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 struct cfq_data *cfqd = cfqq->cfqd;
Jeff Moyer796d5112011-06-02 21:19:05 +02002252 struct request *prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
Jens Axboe5380a102006-07-13 12:37:56 +02002254 cfqq->queued[rq_is_sync(rq)]++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255
Jeff Moyer796d5112011-06-02 21:19:05 +02002256 elv_rb_add(&cfqq->sort_list, rq);
Jens Axboe5fccbf62006-10-31 14:21:55 +01002257
2258 if (!cfq_cfqq_on_rr(cfqq))
2259 cfq_add_cfqq_rr(cfqd, cfqq);
Jens Axboe5044eed2007-04-25 11:53:48 +02002260
2261 /*
2262 * check if this request is a better next-serve candidate
2263 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002264 prev = cfqq->next_rq;
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01002265 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
Jens Axboea36e71f2009-04-15 12:15:11 +02002266
2267 /*
2268 * adjust priority tree position, if ->next_rq changes
2269 */
2270 if (prev != cfqq->next_rq)
2271 cfq_prio_tree_add(cfqd, cfqq);
2272
Jens Axboe5044eed2007-04-25 11:53:48 +02002273 BUG_ON(!cfqq->next_rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274}
2275
Jens Axboefebffd62008-01-28 13:19:43 +01002276static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277{
Jens Axboe5380a102006-07-13 12:37:56 +02002278 elv_rb_del(&cfqq->sort_list, rq);
2279 cfqq->queued[rq_is_sync(rq)]--;
Tejun Heo155fead2012-04-01 14:38:44 -07002280 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
Jens Axboe5e705372006-07-13 12:39:25 +02002281 cfq_add_rq_rb(rq);
Tejun Heo155fead2012-04-01 14:38:44 -07002282 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2283 rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284}
2285
Jens Axboe206dc692006-03-28 13:03:44 +02002286static struct request *
2287cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288{
Jens Axboe206dc692006-03-28 13:03:44 +02002289 struct task_struct *tsk = current;
Tejun Heoc5869802011-12-14 00:33:41 +01002290 struct cfq_io_cq *cic;
Jens Axboe206dc692006-03-28 13:03:44 +02002291 struct cfq_queue *cfqq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292
Jens Axboe4ac845a2008-01-24 08:44:49 +01002293 cic = cfq_cic_lookup(cfqd, tsk->io_context);
Vasily Tarasov91fac312007-04-25 12:29:51 +02002294 if (!cic)
2295 return NULL;
2296
2297 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
Kent Overstreetf73a1c72012-09-25 15:05:12 -07002298 if (cfqq)
2299 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 return NULL;
2302}
2303
Jens Axboe165125e2007-07-24 09:28:11 +02002304static void cfq_activate_request(struct request_queue *q, struct request *rq)
Jens Axboeb4878f22005-10-20 16:42:29 +02002305{
2306 struct cfq_data *cfqd = q->elevator->elevator_data;
2307
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002308 cfqd->rq_in_driver++;
Jens Axboe7b679132008-05-30 12:23:07 +02002309 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002310 cfqd->rq_in_driver);
Jens Axboe25776e32006-06-01 10:12:26 +02002311
Tejun Heo5b936292009-05-07 22:24:38 +09002312 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +02002313}
2314
Jens Axboe165125e2007-07-24 09:28:11 +02002315static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316{
Jens Axboe22e2c502005-06-27 10:55:12 +02002317 struct cfq_data *cfqd = q->elevator->elevator_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002319 WARN_ON(!cfqd->rq_in_driver);
2320 cfqd->rq_in_driver--;
Jens Axboe7b679132008-05-30 12:23:07 +02002321 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002322 cfqd->rq_in_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323}
2324
Jens Axboeb4878f22005-10-20 16:42:29 +02002325static void cfq_remove_request(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326{
Jens Axboe5e705372006-07-13 12:39:25 +02002327 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe21183b02006-07-13 12:33:14 +02002328
Jens Axboe5e705372006-07-13 12:39:25 +02002329 if (cfqq->next_rq == rq)
2330 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331
Jens Axboeb4878f22005-10-20 16:42:29 +02002332 list_del_init(&rq->queuelist);
Jens Axboe5e705372006-07-13 12:39:25 +02002333 cfq_del_rq_rb(rq);
Jens Axboe374f84a2006-07-23 01:42:19 +02002334
Aaron Carroll45333d52008-08-26 15:52:36 +02002335 cfqq->cfqd->rq_queued--;
Tejun Heo155fead2012-04-01 14:38:44 -07002336 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
Christoph Hellwig65299a32011-08-23 14:50:29 +02002337 if (rq->cmd_flags & REQ_PRIO) {
2338 WARN_ON(!cfqq->prio_pending);
2339 cfqq->prio_pending--;
Jens Axboeb53d1ed2011-08-19 08:34:48 +02002340 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341}
2342
Jens Axboe165125e2007-07-24 09:28:11 +02002343static int cfq_merge(struct request_queue *q, struct request **req,
2344 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345{
2346 struct cfq_data *cfqd = q->elevator->elevator_data;
2347 struct request *__rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348
Jens Axboe206dc692006-03-28 13:03:44 +02002349 __rq = cfq_find_rq_fmerge(cfqd, bio);
Jens Axboe22e2c502005-06-27 10:55:12 +02002350 if (__rq && elv_rq_merge_ok(__rq, bio)) {
Jens Axboe98170642006-07-28 09:23:08 +02002351 *req = __rq;
2352 return ELEVATOR_FRONT_MERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 }
2354
2355 return ELEVATOR_NO_MERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356}
2357
Jens Axboe165125e2007-07-24 09:28:11 +02002358static void cfq_merged_request(struct request_queue *q, struct request *req,
Jens Axboe21183b02006-07-13 12:33:14 +02002359 int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360{
Jens Axboe21183b02006-07-13 12:33:14 +02002361 if (type == ELEVATOR_FRONT_MERGE) {
Jens Axboe5e705372006-07-13 12:39:25 +02002362 struct cfq_queue *cfqq = RQ_CFQQ(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
Jens Axboe5e705372006-07-13 12:39:25 +02002364 cfq_reposition_rq_rb(cfqq, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366}
2367
Divyesh Shah812d4022010-04-08 21:14:23 -07002368static void cfq_bio_merged(struct request_queue *q, struct request *req,
2369 struct bio *bio)
2370{
Tejun Heo155fead2012-04-01 14:38:44 -07002371 cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
Divyesh Shah812d4022010-04-08 21:14:23 -07002372}
2373
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374static void
Jens Axboe165125e2007-07-24 09:28:11 +02002375cfq_merged_requests(struct request_queue *q, struct request *rq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 struct request *next)
2377{
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01002378 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Shaohua Li4a0b75c2011-12-16 14:00:22 +01002379 struct cfq_data *cfqd = q->elevator->elevator_data;
2380
Jens Axboe22e2c502005-06-27 10:55:12 +02002381 /*
2382 * reposition in fifo if next is older than rq
2383 */
2384 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
Shaohua Li3d106fba2012-11-06 12:39:51 +01002385 time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
2386 cfqq == RQ_CFQQ(next)) {
Jens Axboe22e2c502005-06-27 10:55:12 +02002387 list_move(&rq->queuelist, &next->queuelist);
Jens Axboe30996f42009-10-05 11:03:39 +02002388 rq_set_fifo_time(rq, rq_fifo_time(next));
2389 }
Jens Axboe22e2c502005-06-27 10:55:12 +02002390
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01002391 if (cfqq->next_rq == next)
2392 cfqq->next_rq = rq;
Jens Axboeb4878f22005-10-20 16:42:29 +02002393 cfq_remove_request(next);
Tejun Heo155fead2012-04-01 14:38:44 -07002394 cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
Shaohua Li4a0b75c2011-12-16 14:00:22 +01002395
2396 cfqq = RQ_CFQQ(next);
2397 /*
2398 * all requests of this queue are merged to other queues, delete it
2399 * from the service tree. If it's the active_queue,
2400 * cfq_dispatch_requests() will choose to expire it or do idle
2401 */
2402 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2403 cfqq != cfqd->active_queue)
2404 cfq_del_cfqq_rr(cfqd, cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002405}
2406
Jens Axboe165125e2007-07-24 09:28:11 +02002407static int cfq_allow_merge(struct request_queue *q, struct request *rq,
Jens Axboeda775262006-12-20 11:04:12 +01002408 struct bio *bio)
2409{
2410 struct cfq_data *cfqd = q->elevator->elevator_data;
Tejun Heoc5869802011-12-14 00:33:41 +01002411 struct cfq_io_cq *cic;
Jens Axboeda775262006-12-20 11:04:12 +01002412 struct cfq_queue *cfqq;
Jens Axboeda775262006-12-20 11:04:12 +01002413
2414 /*
Jens Axboeec8acb62007-01-02 18:32:11 +01002415 * Disallow merge of a sync bio into an async request.
Jens Axboeda775262006-12-20 11:04:12 +01002416 */
Vasily Tarasov91fac312007-04-25 12:29:51 +02002417 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
Jens Axboea6151c32009-10-07 20:02:57 +02002418 return false;
Jens Axboeda775262006-12-20 11:04:12 +01002419
2420 /*
Tejun Heof1a4f4d2011-12-14 00:33:39 +01002421 * Lookup the cfqq that this bio will be queued with and allow
Tejun Heo07c2bd32012-02-08 09:19:42 +01002422 * merge only if rq is queued there.
Jens Axboeda775262006-12-20 11:04:12 +01002423 */
Tejun Heo07c2bd32012-02-08 09:19:42 +01002424 cic = cfq_cic_lookup(cfqd, current->io_context);
2425 if (!cic)
2426 return false;
Jens Axboe719d3402006-12-22 09:38:53 +01002427
Vasily Tarasov91fac312007-04-25 12:29:51 +02002428 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
Jens Axboea6151c32009-10-07 20:02:57 +02002429 return cfqq == RQ_CFQQ(rq);
Jens Axboeda775262006-12-20 11:04:12 +01002430}
2431
Divyesh Shah812df482010-04-08 21:15:35 -07002432static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2433{
2434 del_timer(&cfqd->idle_slice_timer);
Tejun Heo155fead2012-04-01 14:38:44 -07002435 cfqg_stats_update_idle_time(cfqq->cfqg);
Divyesh Shah812df482010-04-08 21:15:35 -07002436}
2437
Jens Axboefebffd62008-01-28 13:19:43 +01002438static void __cfq_set_active_queue(struct cfq_data *cfqd,
2439 struct cfq_queue *cfqq)
Jens Axboe22e2c502005-06-27 10:55:12 +02002440{
2441 if (cfqq) {
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04002442 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002443 cfqd->serving_wl_class, cfqd->serving_wl_type);
Tejun Heo155fead2012-04-01 14:38:44 -07002444 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
Justin TerAvest62a37f62011-03-23 08:25:44 +01002445 cfqq->slice_start = 0;
2446 cfqq->dispatch_start = jiffies;
2447 cfqq->allocated_slice = 0;
2448 cfqq->slice_end = 0;
2449 cfqq->slice_dispatch = 0;
2450 cfqq->nr_sectors = 0;
2451
2452 cfq_clear_cfqq_wait_request(cfqq);
2453 cfq_clear_cfqq_must_dispatch(cfqq);
2454 cfq_clear_cfqq_must_alloc_slice(cfqq);
2455 cfq_clear_cfqq_fifo_expire(cfqq);
2456 cfq_mark_cfqq_slice_new(cfqq);
2457
2458 cfq_del_timer(cfqd, cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002459 }
2460
2461 cfqd->active_queue = cfqq;
2462}
2463
2464/*
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002465 * current cfqq expired its slice (or was too idle), select new one
2466 */
2467static void
2468__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Vivek Goyale5ff0822010-04-26 19:25:11 +02002469 bool timed_out)
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002470{
Jens Axboe7b679132008-05-30 12:23:07 +02002471 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2472
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002473 if (cfq_cfqq_wait_request(cfqq))
Divyesh Shah812df482010-04-08 21:15:35 -07002474 cfq_del_timer(cfqd, cfqq);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002475
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002476 cfq_clear_cfqq_wait_request(cfqq);
Vivek Goyalf75edf22009-12-03 12:59:53 -05002477 cfq_clear_cfqq_wait_busy(cfqq);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002478
2479 /*
Shaohua Liae54abe2010-02-05 13:11:45 +01002480 * If this cfqq is shared between multiple processes, check to
2481 * make sure that those processes are still issuing I/Os within
2482 * the mean seek distance. If not, it may be time to break the
2483 * queues apart again.
2484 */
2485 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2486 cfq_mark_cfqq_split_coop(cfqq);
2487
2488 /*
Jens Axboe6084cdd2007-04-23 08:25:00 +02002489 * store what was left of this slice, if the queue idled/timed out
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002490 */
Shaohua Lic553f8e2011-01-14 08:41:03 +01002491 if (timed_out) {
2492 if (cfq_cfqq_slice_new(cfqq))
Vivek Goyalba5bd522011-01-19 08:25:02 -07002493 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
Shaohua Lic553f8e2011-01-14 08:41:03 +01002494 else
2495 cfqq->slice_resid = cfqq->slice_end - jiffies;
Jens Axboe7b679132008-05-30 12:23:07 +02002496 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2497 }
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002498
Vivek Goyale5ff0822010-04-26 19:25:11 +02002499 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
Vivek Goyaldae739e2009-12-03 12:59:45 -05002500
Vivek Goyalf04a6422009-12-03 12:59:40 -05002501 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2502 cfq_del_cfqq_rr(cfqd, cfqq);
2503
Jens Axboeedd75ff2007-04-19 12:03:34 +02002504 cfq_resort_rr_list(cfqd, cfqq);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002505
2506 if (cfqq == cfqd->active_queue)
2507 cfqd->active_queue = NULL;
2508
2509 if (cfqd->active_cic) {
Tejun Heo11a31222012-02-07 07:51:30 +01002510 put_io_context(cfqd->active_cic->icq.ioc);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002511 cfqd->active_cic = NULL;
2512 }
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002513}
2514
Vivek Goyale5ff0822010-04-26 19:25:11 +02002515static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002516{
2517 struct cfq_queue *cfqq = cfqd->active_queue;
2518
2519 if (cfqq)
Vivek Goyale5ff0822010-04-26 19:25:11 +02002520 __cfq_slice_expired(cfqd, cfqq, timed_out);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002521}
2522
Jens Axboe498d3aa22007-04-26 12:54:48 +02002523/*
2524 * Get next queue for service. Unless we have a queue preemption,
2525 * we'll simply select the first cfqq in the service tree.
2526 */
Jens Axboe6d048f52007-04-25 12:44:27 +02002527static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +02002528{
Vivek Goyal34b98d02012-10-03 16:56:58 -04002529 struct cfq_rb_root *st = st_for(cfqd->serving_group,
2530 cfqd->serving_wl_class, cfqd->serving_wl_type);
Jens Axboeedd75ff2007-04-19 12:03:34 +02002531
Vivek Goyalf04a6422009-12-03 12:59:40 -05002532 if (!cfqd->rq_queued)
2533 return NULL;
2534
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002535 /* There is nothing to dispatch */
Vivek Goyal34b98d02012-10-03 16:56:58 -04002536 if (!st)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002537 return NULL;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002538 if (RB_EMPTY_ROOT(&st->rb))
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002539 return NULL;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002540 return cfq_rb_first(st);
Jens Axboe6d048f52007-04-25 12:44:27 +02002541}
2542
Vivek Goyalf04a6422009-12-03 12:59:40 -05002543static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2544{
Vivek Goyal25fb5162009-12-03 12:59:46 -05002545 struct cfq_group *cfqg;
Vivek Goyalf04a6422009-12-03 12:59:40 -05002546 struct cfq_queue *cfqq;
2547 int i, j;
2548 struct cfq_rb_root *st;
2549
2550 if (!cfqd->rq_queued)
2551 return NULL;
2552
Vivek Goyal25fb5162009-12-03 12:59:46 -05002553 cfqg = cfq_get_next_cfqg(cfqd);
2554 if (!cfqg)
2555 return NULL;
2556
Vivek Goyalf04a6422009-12-03 12:59:40 -05002557 for_each_cfqg_st(cfqg, i, j, st)
2558 if ((cfqq = cfq_rb_first(st)) != NULL)
2559 return cfqq;
2560 return NULL;
2561}
2562
Jens Axboe498d3aa22007-04-26 12:54:48 +02002563/*
2564 * Get and set a new active queue for service.
2565 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002566static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2567 struct cfq_queue *cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002568{
Jens Axboee00ef792009-11-04 08:54:55 +01002569 if (!cfqq)
Jens Axboea36e71f2009-04-15 12:15:11 +02002570 cfqq = cfq_get_next_queue(cfqd);
Jens Axboe6d048f52007-04-25 12:44:27 +02002571
Jens Axboe22e2c502005-06-27 10:55:12 +02002572 __cfq_set_active_queue(cfqd, cfqq);
Jens Axboe3b181522005-06-27 10:56:24 +02002573 return cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02002574}
2575
Jens Axboed9e76202007-04-20 14:27:50 +02002576static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2577 struct request *rq)
2578{
Tejun Heo83096eb2009-05-07 22:24:39 +09002579 if (blk_rq_pos(rq) >= cfqd->last_position)
2580 return blk_rq_pos(rq) - cfqd->last_position;
Jens Axboed9e76202007-04-20 14:27:50 +02002581 else
Tejun Heo83096eb2009-05-07 22:24:39 +09002582 return cfqd->last_position - blk_rq_pos(rq);
Jens Axboed9e76202007-04-20 14:27:50 +02002583}
2584
Jeff Moyerb2c18e12009-10-23 17:14:49 -04002585static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Shaohua Lie9ce3352010-03-19 08:03:04 +01002586 struct request *rq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002587{
Shaohua Lie9ce3352010-03-19 08:03:04 +01002588 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
Jens Axboe6d048f52007-04-25 12:44:27 +02002589}
2590
Jens Axboea36e71f2009-04-15 12:15:11 +02002591static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2592 struct cfq_queue *cur_cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002593{
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002594 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
Jens Axboea36e71f2009-04-15 12:15:11 +02002595 struct rb_node *parent, *node;
2596 struct cfq_queue *__cfqq;
2597 sector_t sector = cfqd->last_position;
2598
2599 if (RB_EMPTY_ROOT(root))
2600 return NULL;
2601
2602 /*
2603 * First, if we find a request starting at the end of the last
2604 * request, choose it.
2605 */
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002606 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
Jens Axboea36e71f2009-04-15 12:15:11 +02002607 if (__cfqq)
2608 return __cfqq;
2609
2610 /*
2611 * If the exact sector wasn't found, the parent of the NULL leaf
2612 * will contain the closest sector.
2613 */
2614 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
Shaohua Lie9ce3352010-03-19 08:03:04 +01002615 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02002616 return __cfqq;
2617
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002618 if (blk_rq_pos(__cfqq->next_rq) < sector)
Jens Axboea36e71f2009-04-15 12:15:11 +02002619 node = rb_next(&__cfqq->p_node);
2620 else
2621 node = rb_prev(&__cfqq->p_node);
2622 if (!node)
2623 return NULL;
2624
2625 __cfqq = rb_entry(node, struct cfq_queue, p_node);
Shaohua Lie9ce3352010-03-19 08:03:04 +01002626 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02002627 return __cfqq;
2628
2629 return NULL;
2630}
2631
2632/*
2633 * cfqd - obvious
2634 * cur_cfqq - passed in so that we don't decide that the current queue is
2635 * closely cooperating with itself.
2636 *
2637 * So, basically we're assuming that that cur_cfqq has dispatched at least
2638 * one request, and that cfqd->last_position reflects a position on the disk
2639 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
2640 * assumption.
2641 */
2642static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
Jeff Moyerb3b6d042009-10-23 17:14:51 -04002643 struct cfq_queue *cur_cfqq)
Jens Axboea36e71f2009-04-15 12:15:11 +02002644{
2645 struct cfq_queue *cfqq;
2646
Divyesh Shah39c01b22010-03-25 15:45:57 +01002647 if (cfq_class_idle(cur_cfqq))
2648 return NULL;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002649 if (!cfq_cfqq_sync(cur_cfqq))
2650 return NULL;
2651 if (CFQQ_SEEKY(cur_cfqq))
2652 return NULL;
2653
Jens Axboea36e71f2009-04-15 12:15:11 +02002654 /*
Gui Jianfengb9d8f4c2009-12-08 08:54:17 +01002655 * Don't search priority tree if it's the only queue in the group.
2656 */
2657 if (cur_cfqq->cfqg->nr_cfqq == 1)
2658 return NULL;
2659
2660 /*
Jens Axboed9e76202007-04-20 14:27:50 +02002661 * We should notice if some of the queues are cooperating, eg
2662 * working closely on the same area of the disk. In that case,
2663 * we can group them together and don't waste time idling.
Jens Axboe6d048f52007-04-25 12:44:27 +02002664 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002665 cfqq = cfqq_close(cfqd, cur_cfqq);
2666 if (!cfqq)
2667 return NULL;
2668
Vivek Goyal8682e1f2009-12-03 12:59:50 -05002669 /* If new queue belongs to different cfq_group, don't choose it */
2670 if (cur_cfqq->cfqg != cfqq->cfqg)
2671 return NULL;
2672
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002673 /*
2674 * It only makes sense to merge sync queues.
2675 */
2676 if (!cfq_cfqq_sync(cfqq))
2677 return NULL;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002678 if (CFQQ_SEEKY(cfqq))
2679 return NULL;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002680
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002681 /*
2682 * Do not merge queues of different priority classes
2683 */
2684 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2685 return NULL;
2686
Jens Axboea36e71f2009-04-15 12:15:11 +02002687 return cfqq;
Jens Axboe6d048f52007-04-25 12:44:27 +02002688}
2689
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002690/*
2691 * Determine whether we should enforce idle window for this queue.
2692 */
2693
2694static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2695{
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04002696 enum wl_class_t wl_class = cfqq_class(cfqq);
Vivek Goyal34b98d02012-10-03 16:56:58 -04002697 struct cfq_rb_root *st = cfqq->service_tree;
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002698
Vivek Goyal34b98d02012-10-03 16:56:58 -04002699 BUG_ON(!st);
2700 BUG_ON(!st->count);
Vivek Goyalf04a6422009-12-03 12:59:40 -05002701
Vivek Goyalb6508c12010-08-23 12:23:33 +02002702 if (!cfqd->cfq_slice_idle)
2703 return false;
2704
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002705 /* We never do for idle class queues. */
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04002706 if (wl_class == IDLE_WORKLOAD)
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002707 return false;
2708
2709 /* We do for queues that were marked with idle window flag. */
Shaohua Li3c764b72009-12-04 13:12:06 +01002710 if (cfq_cfqq_idle_window(cfqq) &&
2711 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002712 return true;
2713
2714 /*
2715 * Otherwise, we do only if they are the last ones
2716 * in their service tree.
2717 */
Vivek Goyal34b98d02012-10-03 16:56:58 -04002718 if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2719 !cfq_io_thinktime_big(cfqd, &st->ttime, false))
Shaohua Lic1e44752010-11-08 15:01:02 +01002720 return true;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002721 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
Shaohua Lic1e44752010-11-08 15:01:02 +01002722 return false;
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002723}
2724
Jens Axboe6d048f52007-04-25 12:44:27 +02002725static void cfq_arm_slice_timer(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +02002726{
Jens Axboe17926692007-01-19 11:59:30 +11002727 struct cfq_queue *cfqq = cfqd->active_queue;
Tejun Heoc5869802011-12-14 00:33:41 +01002728 struct cfq_io_cq *cic;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002729 unsigned long sl, group_idle = 0;
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002730
Jens Axboea68bbdd2008-09-24 13:03:33 +02002731 /*
Jens Axboef7d7b7a2008-09-25 11:37:50 +02002732 * SSD device without seek penalty, disable idling. But only do so
2733 * for devices that support queuing, otherwise we still have a problem
2734 * with sync vs async workloads.
Jens Axboea68bbdd2008-09-24 13:03:33 +02002735 */
Jens Axboef7d7b7a2008-09-25 11:37:50 +02002736 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
Jens Axboea68bbdd2008-09-24 13:03:33 +02002737 return;
2738
Jens Axboedd67d052006-06-21 09:36:18 +02002739 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
Jens Axboe6d048f52007-04-25 12:44:27 +02002740 WARN_ON(cfq_cfqq_slice_new(cfqq));
Jens Axboe22e2c502005-06-27 10:55:12 +02002741
2742 /*
2743 * idle is disabled, either manually or by past process history
2744 */
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002745 if (!cfq_should_idle(cfqd, cfqq)) {
2746 /* no queue idling. Check for group idling */
2747 if (cfqd->cfq_group_idle)
2748 group_idle = cfqd->cfq_group_idle;
2749 else
2750 return;
2751 }
Jens Axboe6d048f52007-04-25 12:44:27 +02002752
Jens Axboe22e2c502005-06-27 10:55:12 +02002753 /*
Corrado Zoccolo8e550632009-11-26 10:02:58 +01002754 * still active requests from this queue, don't idle
Jens Axboe7b679132008-05-30 12:23:07 +02002755 */
Corrado Zoccolo8e550632009-11-26 10:02:58 +01002756 if (cfqq->dispatched)
Jens Axboe7b679132008-05-30 12:23:07 +02002757 return;
2758
2759 /*
Jens Axboe22e2c502005-06-27 10:55:12 +02002760 * task has exited, don't wait
2761 */
Jens Axboe206dc692006-03-28 13:03:44 +02002762 cic = cfqd->active_cic;
Tejun Heof6e8d012012-03-05 13:15:26 -08002763 if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
Jens Axboe6d048f52007-04-25 12:44:27 +02002764 return;
2765
Corrado Zoccolo355b6592009-10-08 08:43:32 +02002766 /*
2767 * If our average think time is larger than the remaining time
2768 * slice, then don't idle. This avoids overrunning the allotted
2769 * time slice.
2770 */
Shaohua Li383cd722011-07-12 14:24:35 +02002771 if (sample_valid(cic->ttime.ttime_samples) &&
2772 (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
Joe Perchesfd16d262011-06-13 10:42:49 +02002773 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
Shaohua Li383cd722011-07-12 14:24:35 +02002774 cic->ttime.ttime_mean);
Corrado Zoccolo355b6592009-10-08 08:43:32 +02002775 return;
Divyesh Shahb1ffe732010-03-25 15:45:03 +01002776 }
Corrado Zoccolo355b6592009-10-08 08:43:32 +02002777
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002778 /* There are other queues in the group, don't do group idle */
2779 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2780 return;
2781
Jens Axboe3b181522005-06-27 10:56:24 +02002782 cfq_mark_cfqq_wait_request(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002783
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002784 if (group_idle)
2785 sl = cfqd->cfq_group_idle;
2786 else
2787 sl = cfqd->cfq_slice_idle;
Jens Axboe206dc692006-03-28 13:03:44 +02002788
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002789 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
Tejun Heo155fead2012-04-01 14:38:44 -07002790 cfqg_stats_set_start_idle_time(cfqq->cfqg);
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002791 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2792 group_idle ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793}
2794
Jens Axboe498d3aa22007-04-26 12:54:48 +02002795/*
2796 * Move request from internal lists to the request queue dispatch list.
2797 */
Jens Axboe165125e2007-07-24 09:28:11 +02002798static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799{
Jens Axboe3ed9a292007-04-23 08:33:33 +02002800 struct cfq_data *cfqd = q->elevator->elevator_data;
Jens Axboe5e705372006-07-13 12:39:25 +02002801 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002802
Jens Axboe7b679132008-05-30 12:23:07 +02002803 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2804
Jeff Moyer06d21882009-09-11 17:08:59 +02002805 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
Jens Axboe5380a102006-07-13 12:37:56 +02002806 cfq_remove_request(rq);
Jens Axboe6d048f52007-04-25 12:44:27 +02002807 cfqq->dispatched++;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002808 (RQ_CFQG(rq))->dispatched++;
Jens Axboe5380a102006-07-13 12:37:56 +02002809 elv_dispatch_sort(q, rq);
Jens Axboe3ed9a292007-04-23 08:33:33 +02002810
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002811 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
Vivek Goyalc4e78932010-08-23 12:25:03 +02002812 cfqq->nr_sectors += blk_rq_sectors(rq);
Tejun Heo155fead2012-04-01 14:38:44 -07002813 cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814}
2815
2816/*
2817 * return expired entry, or NULL to just start from scratch in rbtree
2818 */
Jens Axboefebffd62008-01-28 13:19:43 +01002819static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820{
Jens Axboe30996f42009-10-05 11:03:39 +02002821 struct request *rq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822
Jens Axboe3b181522005-06-27 10:56:24 +02002823 if (cfq_cfqq_fifo_expire(cfqq))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 return NULL;
Jens Axboecb887412007-01-19 12:01:16 +11002825
2826 cfq_mark_cfqq_fifo_expire(cfqq);
2827
Jens Axboe89850f72006-07-22 16:48:31 +02002828 if (list_empty(&cfqq->fifo))
2829 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
Jens Axboe89850f72006-07-22 16:48:31 +02002831 rq = rq_entry_fifo(cfqq->fifo.next);
Jens Axboe30996f42009-10-05 11:03:39 +02002832 if (time_before(jiffies, rq_fifo_time(rq)))
Jens Axboe7b679132008-05-30 12:23:07 +02002833 rq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834
Jens Axboe30996f42009-10-05 11:03:39 +02002835 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
Jens Axboe6d048f52007-04-25 12:44:27 +02002836 return rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837}
2838
Jens Axboe22e2c502005-06-27 10:55:12 +02002839static inline int
2840cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2841{
2842 const int base_rq = cfqd->cfq_slice_async_rq;
2843
2844 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2845
Namhyung Kimb9f8ce02011-05-24 10:23:21 +02002846 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
Jens Axboe22e2c502005-06-27 10:55:12 +02002847}
2848
2849/*
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002850 * Must be called with the queue_lock held.
2851 */
2852static int cfqq_process_refs(struct cfq_queue *cfqq)
2853{
2854 int process_refs, io_refs;
2855
2856 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
Shaohua Li30d7b942011-01-07 08:46:59 +01002857 process_refs = cfqq->ref - io_refs;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002858 BUG_ON(process_refs < 0);
2859 return process_refs;
2860}
2861
2862static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2863{
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002864 int process_refs, new_process_refs;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002865 struct cfq_queue *__cfqq;
2866
Jeff Moyerc10b61f2010-06-17 10:19:11 -04002867 /*
2868 * If there are no process references on the new_cfqq, then it is
2869 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2870 * chain may have dropped their last reference (not just their
2871 * last process reference).
2872 */
2873 if (!cfqq_process_refs(new_cfqq))
2874 return;
2875
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002876 /* Avoid a circular list and skip interim queue merges */
2877 while ((__cfqq = new_cfqq->new_cfqq)) {
2878 if (__cfqq == cfqq)
2879 return;
2880 new_cfqq = __cfqq;
2881 }
2882
2883 process_refs = cfqq_process_refs(cfqq);
Jeff Moyerc10b61f2010-06-17 10:19:11 -04002884 new_process_refs = cfqq_process_refs(new_cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002885 /*
2886 * If the process for the cfqq has gone away, there is no
2887 * sense in merging the queues.
2888 */
Jeff Moyerc10b61f2010-06-17 10:19:11 -04002889 if (process_refs == 0 || new_process_refs == 0)
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002890 return;
2891
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002892 /*
2893 * Merge in the direction of the lesser amount of work.
2894 */
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002895 if (new_process_refs >= process_refs) {
2896 cfqq->new_cfqq = new_cfqq;
Shaohua Li30d7b942011-01-07 08:46:59 +01002897 new_cfqq->ref += process_refs;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002898 } else {
2899 new_cfqq->new_cfqq = cfqq;
Shaohua Li30d7b942011-01-07 08:46:59 +01002900 cfqq->ref += new_process_refs;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002901 }
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002902}
2903
Vivek Goyal6d816ec2012-10-03 16:56:59 -04002904static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04002905 struct cfq_group *cfqg, enum wl_class_t wl_class)
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002906{
2907 struct cfq_queue *queue;
2908 int i;
2909 bool key_valid = false;
2910 unsigned long lowest_key = 0;
2911 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2912
Vivek Goyal65b32a52009-12-16 17:52:59 -05002913 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2914 /* select the one with lowest rb_key */
Vivek Goyal34b98d02012-10-03 16:56:58 -04002915 queue = cfq_rb_first(st_for(cfqg, wl_class, i));
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002916 if (queue &&
2917 (!key_valid || time_before(queue->rb_key, lowest_key))) {
2918 lowest_key = queue->rb_key;
2919 cur_best = i;
2920 key_valid = true;
2921 }
2922 }
2923
2924 return cur_best;
2925}
2926
Vivek Goyal6d816ec2012-10-03 16:56:59 -04002927static void
2928choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002929{
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002930 unsigned slice;
2931 unsigned count;
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002932 struct cfq_rb_root *st;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05002933 unsigned group_slice;
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002934 enum wl_class_t original_class = cfqd->serving_wl_class;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002935
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002936 /* Choose next priority. RT > BE > IDLE */
Vivek Goyal58ff82f2009-12-03 12:59:44 -05002937 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002938 cfqd->serving_wl_class = RT_WORKLOAD;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05002939 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002940 cfqd->serving_wl_class = BE_WORKLOAD;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002941 else {
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002942 cfqd->serving_wl_class = IDLE_WORKLOAD;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002943 cfqd->workload_expires = jiffies + 1;
2944 return;
2945 }
2946
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002947 if (original_class != cfqd->serving_wl_class)
Shaohua Li writese4ea0c12010-12-13 14:32:22 +01002948 goto new_workload;
2949
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002950 /*
2951 * For RT and BE, we have to choose also the type
2952 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2953 * expiration time
2954 */
Vivek Goyal34b98d02012-10-03 16:56:58 -04002955 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002956 count = st->count;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002957
2958 /*
Vivek Goyal65b32a52009-12-16 17:52:59 -05002959 * check workload expiration, and that we still have other queues ready
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002960 */
Vivek Goyal65b32a52009-12-16 17:52:59 -05002961 if (count && !time_after(jiffies, cfqd->workload_expires))
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002962 return;
2963
Shaohua Li writese4ea0c12010-12-13 14:32:22 +01002964new_workload:
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002965 /* otherwise select new workload type */
Vivek Goyal6d816ec2012-10-03 16:56:59 -04002966 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002967 cfqd->serving_wl_class);
Vivek Goyal34b98d02012-10-03 16:56:58 -04002968 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002969 count = st->count;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002970
2971 /*
2972 * the workload slice is computed as a fraction of target latency
2973 * proportional to the number of queues in that workload, over
2974 * all the queues in the same priority class
2975 */
Vivek Goyal58ff82f2009-12-03 12:59:44 -05002976 group_slice = cfq_group_slice(cfqd, cfqg);
2977
2978 slice = group_slice * count /
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002979 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
2980 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04002981 cfqg));
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002982
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002983 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05002984 unsigned int tmp;
2985
2986 /*
2987 * Async queues are currently system wide. Just taking
2988 * proportion of queues with-in same group will lead to higher
2989 * async ratio system wide as generally root group is going
2990 * to have higher weight. A more accurate thing would be to
2991 * calculate system wide asnc/sync ratio.
2992 */
Tao Ma5bf14c02012-04-01 14:33:39 -07002993 tmp = cfqd->cfq_target_latency *
2994 cfqg_busy_async_queues(cfqd, cfqg);
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05002995 tmp = tmp/cfqd->busy_queues;
2996 slice = min_t(unsigned, slice, tmp);
2997
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002998 /* async workload slice is scaled down according to
2999 * the sync/async slice ratio. */
3000 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05003001 } else
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003002 /* sync workload slice is at least 2 * cfq_slice_idle */
3003 slice = max(slice, 2 * cfqd->cfq_slice_idle);
3004
3005 slice = max_t(unsigned, slice, CFQ_MIN_TT);
Divyesh Shahb1ffe732010-03-25 15:45:03 +01003006 cfq_log(cfqd, "workload slice:%d", slice);
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003007 cfqd->workload_expires = jiffies + slice;
3008}
3009
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05003010static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
3011{
3012 struct cfq_rb_root *st = &cfqd->grp_service_tree;
Vivek Goyal25bc6b02009-12-03 12:59:43 -05003013 struct cfq_group *cfqg;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05003014
3015 if (RB_EMPTY_ROOT(&st->rb))
3016 return NULL;
Vivek Goyal25bc6b02009-12-03 12:59:43 -05003017 cfqg = cfq_rb_first_group(st);
Vivek Goyal25bc6b02009-12-03 12:59:43 -05003018 update_min_vdisktime(st);
3019 return cfqg;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05003020}
3021
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003022static void cfq_choose_cfqg(struct cfq_data *cfqd)
3023{
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05003024 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3025
3026 cfqd->serving_group = cfqg;
Vivek Goyaldae739e2009-12-03 12:59:45 -05003027
3028 /* Restore the workload type data */
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003029 if (cfqg->saved_wl_slice) {
3030 cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
3031 cfqd->serving_wl_type = cfqg->saved_wl_type;
3032 cfqd->serving_wl_class = cfqg->saved_wl_class;
Gui Jianfeng66ae2912009-12-15 10:08:45 +01003033 } else
3034 cfqd->workload_expires = jiffies - 1;
3035
Vivek Goyal6d816ec2012-10-03 16:56:59 -04003036 choose_wl_class_and_type(cfqd, cfqg);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003037}
3038
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003039/*
Jens Axboe498d3aa22007-04-26 12:54:48 +02003040 * Select a queue for service. If we have a current active queue,
3041 * check whether to continue servicing it, or retrieve and set a new one.
Jens Axboe22e2c502005-06-27 10:55:12 +02003042 */
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003043static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +02003044{
Jens Axboea36e71f2009-04-15 12:15:11 +02003045 struct cfq_queue *cfqq, *new_cfqq = NULL;
Jens Axboe22e2c502005-06-27 10:55:12 +02003046
3047 cfqq = cfqd->active_queue;
3048 if (!cfqq)
3049 goto new_queue;
3050
Vivek Goyalf04a6422009-12-03 12:59:40 -05003051 if (!cfqd->rq_queued)
3052 return NULL;
Vivek Goyalc244bb52009-12-08 17:52:57 -05003053
3054 /*
3055 * We were waiting for group to get backlogged. Expire the queue
3056 */
3057 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3058 goto expire;
3059
Jens Axboe22e2c502005-06-27 10:55:12 +02003060 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02003061 * The active queue has run out of time, expire it and select new.
Jens Axboe22e2c502005-06-27 10:55:12 +02003062 */
Vivek Goyal7667aa02009-12-08 17:52:58 -05003063 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3064 /*
3065 * If slice had not expired at the completion of last request
3066 * we might not have turned on wait_busy flag. Don't expire
3067 * the queue yet. Allow the group to get backlogged.
3068 *
3069 * The very fact that we have used the slice, that means we
3070 * have been idling all along on this queue and it should be
3071 * ok to wait for this request to complete.
3072 */
Vivek Goyal82bbbf22009-12-10 19:25:41 +01003073 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3074 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3075 cfqq = NULL;
Vivek Goyal7667aa02009-12-08 17:52:58 -05003076 goto keep_queue;
Vivek Goyal82bbbf22009-12-10 19:25:41 +01003077 } else
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02003078 goto check_group_idle;
Vivek Goyal7667aa02009-12-08 17:52:58 -05003079 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003080
3081 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02003082 * The active queue has requests and isn't expired, allow it to
3083 * dispatch.
Jens Axboe22e2c502005-06-27 10:55:12 +02003084 */
Jens Axboedd67d052006-06-21 09:36:18 +02003085 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
Jens Axboe22e2c502005-06-27 10:55:12 +02003086 goto keep_queue;
Jens Axboe6d048f52007-04-25 12:44:27 +02003087
3088 /*
Jens Axboea36e71f2009-04-15 12:15:11 +02003089 * If another queue has a request waiting within our mean seek
3090 * distance, let it run. The expire code will check for close
3091 * cooperators and put the close queue at the front of the service
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003092 * tree. If possible, merge the expiring queue with the new cfqq.
Jens Axboea36e71f2009-04-15 12:15:11 +02003093 */
Jeff Moyerb3b6d042009-10-23 17:14:51 -04003094 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003095 if (new_cfqq) {
3096 if (!cfqq->new_cfqq)
3097 cfq_setup_merge(cfqq, new_cfqq);
Jens Axboea36e71f2009-04-15 12:15:11 +02003098 goto expire;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003099 }
Jens Axboea36e71f2009-04-15 12:15:11 +02003100
3101 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02003102 * No requests pending. If the active queue still has requests in
3103 * flight or is idling for a new request, allow either of these
3104 * conditions to happen (or time out) before selecting a new queue.
3105 */
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02003106 if (timer_pending(&cfqd->idle_slice_timer)) {
3107 cfqq = NULL;
3108 goto keep_queue;
3109 }
3110
Shaohua Li8e1ac662010-11-08 15:01:04 +01003111 /*
3112 * This is a deep seek queue, but the device is much faster than
3113 * the queue can deliver, don't idle
3114 **/
3115 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3116 (cfq_cfqq_slice_new(cfqq) ||
3117 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
3118 cfq_clear_cfqq_deep(cfqq);
3119 cfq_clear_cfqq_idle_window(cfqq);
3120 }
3121
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02003122 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3123 cfqq = NULL;
3124 goto keep_queue;
3125 }
3126
3127 /*
3128 * If group idle is enabled and there are requests dispatched from
3129 * this group, wait for requests to complete.
3130 */
3131check_group_idle:
Shaohua Li7700fc42011-07-12 14:24:56 +02003132 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3133 cfqq->cfqg->dispatched &&
3134 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
Jens Axboecaaa5f92006-06-16 11:23:00 +02003135 cfqq = NULL;
3136 goto keep_queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02003137 }
3138
Jens Axboe3b181522005-06-27 10:56:24 +02003139expire:
Vivek Goyale5ff0822010-04-26 19:25:11 +02003140 cfq_slice_expired(cfqd, 0);
Jens Axboe3b181522005-06-27 10:56:24 +02003141new_queue:
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003142 /*
3143 * Current queue expired. Check if we have to switch to a new
3144 * service tree
3145 */
3146 if (!new_cfqq)
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003147 cfq_choose_cfqg(cfqd);
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003148
Jens Axboea36e71f2009-04-15 12:15:11 +02003149 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003150keep_queue:
Jens Axboe3b181522005-06-27 10:56:24 +02003151 return cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02003152}
3153
Jens Axboefebffd62008-01-28 13:19:43 +01003154static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
Jens Axboed9e76202007-04-20 14:27:50 +02003155{
3156 int dispatched = 0;
3157
3158 while (cfqq->next_rq) {
3159 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3160 dispatched++;
3161 }
3162
3163 BUG_ON(!list_empty(&cfqq->fifo));
Vivek Goyalf04a6422009-12-03 12:59:40 -05003164
3165 /* By default cfqq is not expired if it is empty. Do it explicitly */
Vivek Goyale5ff0822010-04-26 19:25:11 +02003166 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
Jens Axboed9e76202007-04-20 14:27:50 +02003167 return dispatched;
3168}
3169
Jens Axboe498d3aa22007-04-26 12:54:48 +02003170/*
3171 * Drain our current requests. Used for barriers and when switching
3172 * io schedulers on-the-fly.
3173 */
Jens Axboed9e76202007-04-20 14:27:50 +02003174static int cfq_forced_dispatch(struct cfq_data *cfqd)
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003175{
Jens Axboe08717142008-01-28 11:38:15 +01003176 struct cfq_queue *cfqq;
Jens Axboed9e76202007-04-20 14:27:50 +02003177 int dispatched = 0;
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003178
Divyesh Shah3440c492010-04-09 09:29:57 +02003179 /* Expire the timeslice of the current active queue first */
Vivek Goyale5ff0822010-04-26 19:25:11 +02003180 cfq_slice_expired(cfqd, 0);
Divyesh Shah3440c492010-04-09 09:29:57 +02003181 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3182 __cfq_set_active_queue(cfqd, cfqq);
Vivek Goyalf04a6422009-12-03 12:59:40 -05003183 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
Divyesh Shah3440c492010-04-09 09:29:57 +02003184 }
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003185
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003186 BUG_ON(cfqd->busy_queues);
3187
Jeff Moyer69237152009-06-12 15:29:30 +02003188 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003189 return dispatched;
3190}
3191
Shaohua Liabc3c742010-03-01 09:20:54 +01003192static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3193 struct cfq_queue *cfqq)
3194{
3195 /* the queue hasn't finished any request, can't estimate */
3196 if (cfq_cfqq_slice_new(cfqq))
Shaohua Lic1e44752010-11-08 15:01:02 +01003197 return true;
Shaohua Liabc3c742010-03-01 09:20:54 +01003198 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
3199 cfqq->slice_end))
Shaohua Lic1e44752010-11-08 15:01:02 +01003200 return true;
Shaohua Liabc3c742010-03-01 09:20:54 +01003201
Shaohua Lic1e44752010-11-08 15:01:02 +01003202 return false;
Shaohua Liabc3c742010-03-01 09:20:54 +01003203}
3204
Jens Axboe0b182d62009-10-06 20:49:37 +02003205static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe2f5cb732009-04-07 08:51:19 +02003206{
Jens Axboe2f5cb732009-04-07 08:51:19 +02003207 unsigned int max_dispatch;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208
Jens Axboe2f5cb732009-04-07 08:51:19 +02003209 /*
Jens Axboe5ad531d2009-07-03 12:57:48 +02003210 * Drain async requests before we start sync IO
3211 */
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003212 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
Jens Axboe0b182d62009-10-06 20:49:37 +02003213 return false;
Jens Axboe5ad531d2009-07-03 12:57:48 +02003214
3215 /*
Jens Axboe2f5cb732009-04-07 08:51:19 +02003216 * If this is an async queue and we have sync IO in flight, let it wait
3217 */
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003218 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
Jens Axboe0b182d62009-10-06 20:49:37 +02003219 return false;
Jens Axboe2f5cb732009-04-07 08:51:19 +02003220
Shaohua Liabc3c742010-03-01 09:20:54 +01003221 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
Jens Axboe2f5cb732009-04-07 08:51:19 +02003222 if (cfq_class_idle(cfqq))
3223 max_dispatch = 1;
3224
3225 /*
3226 * Does this cfqq already have too much IO in flight?
3227 */
3228 if (cfqq->dispatched >= max_dispatch) {
Shaohua Lief8a41d2011-03-07 09:26:29 +01003229 bool promote_sync = false;
Jens Axboe2f5cb732009-04-07 08:51:19 +02003230 /*
3231 * idle queue must always only have a single IO in flight
3232 */
Jens Axboe3ed9a292007-04-23 08:33:33 +02003233 if (cfq_class_idle(cfqq))
Jens Axboe0b182d62009-10-06 20:49:37 +02003234 return false;
Jens Axboe3ed9a292007-04-23 08:33:33 +02003235
Jens Axboe2f5cb732009-04-07 08:51:19 +02003236 /*
Li, Shaohuac4ade942011-03-23 08:30:34 +01003237 * If there is only one sync queue
3238 * we can ignore async queue here and give the sync
Shaohua Lief8a41d2011-03-07 09:26:29 +01003239 * queue no dispatch limit. The reason is a sync queue can
3240 * preempt async queue, limiting the sync queue doesn't make
3241 * sense. This is useful for aiostress test.
3242 */
Li, Shaohuac4ade942011-03-23 08:30:34 +01003243 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3244 promote_sync = true;
Shaohua Lief8a41d2011-03-07 09:26:29 +01003245
3246 /*
Jens Axboe2f5cb732009-04-07 08:51:19 +02003247 * We have other queues, don't allow more IO from this one
3248 */
Shaohua Lief8a41d2011-03-07 09:26:29 +01003249 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3250 !promote_sync)
Jens Axboe0b182d62009-10-06 20:49:37 +02003251 return false;
Jens Axboe9ede2092007-01-19 12:11:44 +11003252
Jens Axboe2f5cb732009-04-07 08:51:19 +02003253 /*
Shaohua Li474b18c2009-12-03 12:58:05 +01003254 * Sole queue user, no limit
Vivek Goyal365722b2009-10-03 15:21:27 +02003255 */
Shaohua Lief8a41d2011-03-07 09:26:29 +01003256 if (cfqd->busy_queues == 1 || promote_sync)
Shaohua Liabc3c742010-03-01 09:20:54 +01003257 max_dispatch = -1;
3258 else
3259 /*
3260 * Normally we start throttling cfqq when cfq_quantum/2
3261 * requests have been dispatched. But we can drive
3262 * deeper queue depths at the beginning of slice
3263 * subjected to upper limit of cfq_quantum.
3264 * */
3265 max_dispatch = cfqd->cfq_quantum;
Jens Axboe8e296752009-10-03 16:26:03 +02003266 }
3267
3268 /*
3269 * Async queues must wait a bit before being allowed dispatch.
3270 * We also ramp up the dispatch depth gradually for async IO,
3271 * based on the last sync IO we serviced
3272 */
Jens Axboe963b72f2009-10-03 19:42:18 +02003273 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
Corrado Zoccolo573412b2009-12-06 11:48:52 +01003274 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
Jens Axboe8e296752009-10-03 16:26:03 +02003275 unsigned int depth;
Vivek Goyal365722b2009-10-03 15:21:27 +02003276
Jens Axboe61f0c1d2009-10-03 19:46:03 +02003277 depth = last_sync / cfqd->cfq_slice[1];
Jens Axboee00c54c2009-10-04 20:36:19 +02003278 if (!depth && !cfqq->dispatched)
3279 depth = 1;
Jens Axboe8e296752009-10-03 16:26:03 +02003280 if (depth < max_dispatch)
3281 max_dispatch = depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282 }
3283
Jens Axboe0b182d62009-10-06 20:49:37 +02003284 /*
3285 * If we're below the current max, allow a dispatch
3286 */
3287 return cfqq->dispatched < max_dispatch;
3288}
3289
3290/*
3291 * Dispatch a request from cfqq, moving them to the request queue
3292 * dispatch list.
3293 */
3294static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3295{
3296 struct request *rq;
3297
3298 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3299
3300 if (!cfq_may_dispatch(cfqd, cfqq))
3301 return false;
3302
3303 /*
3304 * follow expired path, else get first next available
3305 */
3306 rq = cfq_check_fifo(cfqq);
3307 if (!rq)
3308 rq = cfqq->next_rq;
3309
3310 /*
3311 * insert request into driver dispatch list
3312 */
3313 cfq_dispatch_insert(cfqd->queue, rq);
3314
3315 if (!cfqd->active_cic) {
Tejun Heoc5869802011-12-14 00:33:41 +01003316 struct cfq_io_cq *cic = RQ_CIC(rq);
Jens Axboe0b182d62009-10-06 20:49:37 +02003317
Tejun Heoc5869802011-12-14 00:33:41 +01003318 atomic_long_inc(&cic->icq.ioc->refcount);
Jens Axboe0b182d62009-10-06 20:49:37 +02003319 cfqd->active_cic = cic;
3320 }
3321
3322 return true;
3323}
3324
3325/*
3326 * Find the cfqq that we need to service and move a request from that to the
3327 * dispatch list
3328 */
3329static int cfq_dispatch_requests(struct request_queue *q, int force)
3330{
3331 struct cfq_data *cfqd = q->elevator->elevator_data;
3332 struct cfq_queue *cfqq;
3333
3334 if (!cfqd->busy_queues)
3335 return 0;
3336
3337 if (unlikely(force))
3338 return cfq_forced_dispatch(cfqd);
3339
3340 cfqq = cfq_select_queue(cfqd);
3341 if (!cfqq)
Jens Axboe8e296752009-10-03 16:26:03 +02003342 return 0;
3343
Jens Axboe2f5cb732009-04-07 08:51:19 +02003344 /*
Jens Axboe0b182d62009-10-06 20:49:37 +02003345 * Dispatch a request from this cfqq, if it is allowed
Jens Axboe2f5cb732009-04-07 08:51:19 +02003346 */
Jens Axboe0b182d62009-10-06 20:49:37 +02003347 if (!cfq_dispatch_request(cfqd, cfqq))
3348 return 0;
3349
Jens Axboe2f5cb732009-04-07 08:51:19 +02003350 cfqq->slice_dispatch++;
Jens Axboeb0291952009-04-07 11:38:31 +02003351 cfq_clear_cfqq_must_dispatch(cfqq);
Jens Axboe2f5cb732009-04-07 08:51:19 +02003352
3353 /*
3354 * expire an async queue immediately if it has used up its slice. idle
3355 * queue always expire after 1 dispatch round.
3356 */
3357 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3358 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3359 cfq_class_idle(cfqq))) {
3360 cfqq->slice_end = jiffies + 1;
Vivek Goyale5ff0822010-04-26 19:25:11 +02003361 cfq_slice_expired(cfqd, 0);
Jens Axboe2f5cb732009-04-07 08:51:19 +02003362 }
3363
Shan Weib217a902009-09-01 10:06:42 +02003364 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
Jens Axboe2f5cb732009-04-07 08:51:19 +02003365 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366}
3367
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368/*
Jens Axboe5e705372006-07-13 12:39:25 +02003369 * task holds one reference to the queue, dropped when task exits. each rq
3370 * in-flight on this queue also holds a reference, dropped when rq is freed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 *
Vivek Goyalb1c35762009-12-03 12:59:47 -05003372 * Each cfq queue took a reference on the parent group. Drop it now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 * queue lock must be held here.
3374 */
3375static void cfq_put_queue(struct cfq_queue *cfqq)
3376{
Jens Axboe22e2c502005-06-27 10:55:12 +02003377 struct cfq_data *cfqd = cfqq->cfqd;
Justin TerAvest0bbfeb82011-03-01 15:05:08 -05003378 struct cfq_group *cfqg;
Jens Axboe22e2c502005-06-27 10:55:12 +02003379
Shaohua Li30d7b942011-01-07 08:46:59 +01003380 BUG_ON(cfqq->ref <= 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381
Shaohua Li30d7b942011-01-07 08:46:59 +01003382 cfqq->ref--;
3383 if (cfqq->ref)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 return;
3385
Jens Axboe7b679132008-05-30 12:23:07 +02003386 cfq_log_cfqq(cfqd, cfqq, "put_queue");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387 BUG_ON(rb_first(&cfqq->sort_list));
Jens Axboe22e2c502005-06-27 10:55:12 +02003388 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
Vivek Goyalb1c35762009-12-03 12:59:47 -05003389 cfqg = cfqq->cfqg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390
Jens Axboe28f95cbc2007-01-19 12:09:53 +11003391 if (unlikely(cfqd->active_queue == cfqq)) {
Vivek Goyale5ff0822010-04-26 19:25:11 +02003392 __cfq_slice_expired(cfqd, cfqq, 0);
Jens Axboe23e018a2009-10-05 08:52:35 +02003393 cfq_schedule_dispatch(cfqd);
Jens Axboe28f95cbc2007-01-19 12:09:53 +11003394 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003395
Vivek Goyalf04a6422009-12-03 12:59:40 -05003396 BUG_ON(cfq_cfqq_on_rr(cfqq));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397 kmem_cache_free(cfq_pool, cfqq);
Tejun Heoeb7d8c072012-03-23 14:02:53 +01003398 cfqg_put(cfqg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399}
3400
Shaohua Lid02a2c02010-05-25 10:16:53 +02003401static void cfq_put_cooperator(struct cfq_queue *cfqq)
Jens Axboe89850f72006-07-22 16:48:31 +02003402{
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003403 struct cfq_queue *__cfqq, *next;
3404
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003405 /*
3406 * If this queue was scheduled to merge with another queue, be
3407 * sure to drop the reference taken on that queue (and others in
3408 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
3409 */
3410 __cfqq = cfqq->new_cfqq;
3411 while (__cfqq) {
3412 if (__cfqq == cfqq) {
3413 WARN(1, "cfqq->new_cfqq loop detected\n");
3414 break;
3415 }
3416 next = __cfqq->new_cfqq;
3417 cfq_put_queue(__cfqq);
3418 __cfqq = next;
3419 }
Shaohua Lid02a2c02010-05-25 10:16:53 +02003420}
3421
3422static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3423{
3424 if (unlikely(cfqq == cfqd->active_queue)) {
3425 __cfq_slice_expired(cfqd, cfqq, 0);
3426 cfq_schedule_dispatch(cfqd);
3427 }
3428
3429 cfq_put_cooperator(cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003430
Jens Axboe89850f72006-07-22 16:48:31 +02003431 cfq_put_queue(cfqq);
3432}
3433
Tejun Heo9b84cac2011-12-14 00:33:42 +01003434static void cfq_init_icq(struct io_cq *icq)
3435{
3436 struct cfq_io_cq *cic = icq_to_cic(icq);
3437
3438 cic->ttime.last_end_request = jiffies;
3439}
3440
Tejun Heoc5869802011-12-14 00:33:41 +01003441static void cfq_exit_icq(struct io_cq *icq)
Jens Axboe89850f72006-07-22 16:48:31 +02003442{
Tejun Heoc5869802011-12-14 00:33:41 +01003443 struct cfq_io_cq *cic = icq_to_cic(icq);
Tejun Heo283287a2011-12-14 00:33:38 +01003444 struct cfq_data *cfqd = cic_to_cfqd(cic);
Fabio Checconi4faa3c82008-04-10 08:28:01 +02003445
Jens Axboeff6657c2009-04-08 10:58:57 +02003446 if (cic->cfqq[BLK_RW_ASYNC]) {
3447 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
3448 cic->cfqq[BLK_RW_ASYNC] = NULL;
Jens Axboe89850f72006-07-22 16:48:31 +02003449 }
3450
Jens Axboeff6657c2009-04-08 10:58:57 +02003451 if (cic->cfqq[BLK_RW_SYNC]) {
3452 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
3453 cic->cfqq[BLK_RW_SYNC] = NULL;
Jens Axboe89850f72006-07-22 16:48:31 +02003454 }
Jens Axboe89850f72006-07-22 16:48:31 +02003455}
3456
Tejun Heoabede6d2012-03-19 15:10:57 -07003457static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
Jens Axboe22e2c502005-06-27 10:55:12 +02003458{
3459 struct task_struct *tsk = current;
3460 int ioprio_class;
3461
Jens Axboe3b181522005-06-27 10:56:24 +02003462 if (!cfq_cfqq_prio_changed(cfqq))
Jens Axboe22e2c502005-06-27 10:55:12 +02003463 return;
3464
Tejun Heo598971b2012-03-19 15:10:58 -07003465 ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
Jens Axboe22e2c502005-06-27 10:55:12 +02003466 switch (ioprio_class) {
Jens Axboefe094d92008-01-31 13:08:54 +01003467 default:
3468 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3469 case IOPRIO_CLASS_NONE:
3470 /*
Jens Axboe6d63c272008-05-07 09:51:23 +02003471 * no prio set, inherit CPU scheduling settings
Jens Axboefe094d92008-01-31 13:08:54 +01003472 */
3473 cfqq->ioprio = task_nice_ioprio(tsk);
Jens Axboe6d63c272008-05-07 09:51:23 +02003474 cfqq->ioprio_class = task_nice_ioclass(tsk);
Jens Axboefe094d92008-01-31 13:08:54 +01003475 break;
3476 case IOPRIO_CLASS_RT:
Tejun Heo598971b2012-03-19 15:10:58 -07003477 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
Jens Axboefe094d92008-01-31 13:08:54 +01003478 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3479 break;
3480 case IOPRIO_CLASS_BE:
Tejun Heo598971b2012-03-19 15:10:58 -07003481 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
Jens Axboefe094d92008-01-31 13:08:54 +01003482 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3483 break;
3484 case IOPRIO_CLASS_IDLE:
3485 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3486 cfqq->ioprio = 7;
3487 cfq_clear_cfqq_idle_window(cfqq);
3488 break;
Jens Axboe22e2c502005-06-27 10:55:12 +02003489 }
3490
3491 /*
3492 * keep track of original prio settings in case we have to temporarily
3493 * elevate the priority of this queue
3494 */
3495 cfqq->org_ioprio = cfqq->ioprio;
Jens Axboe3b181522005-06-27 10:56:24 +02003496 cfq_clear_cfqq_prio_changed(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003497}
3498
Tejun Heo598971b2012-03-19 15:10:58 -07003499static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
Jens Axboe22e2c502005-06-27 10:55:12 +02003500{
Tejun Heo598971b2012-03-19 15:10:58 -07003501 int ioprio = cic->icq.ioc->ioprio;
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +04003502 struct cfq_data *cfqd = cic_to_cfqd(cic);
Al Viro478a82b2006-03-18 13:25:24 -05003503 struct cfq_queue *cfqq;
Jens Axboe35e60772006-06-14 09:10:45 +02003504
Tejun Heo598971b2012-03-19 15:10:58 -07003505 /*
3506 * Check whether ioprio has changed. The condition may trigger
3507 * spuriously on a newly created cic but there's no harm.
3508 */
3509 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
Jens Axboecaaa5f92006-06-16 11:23:00 +02003510 return;
3511
Jens Axboeff6657c2009-04-08 10:58:57 +02003512 cfqq = cic->cfqq[BLK_RW_ASYNC];
Jens Axboecaaa5f92006-06-16 11:23:00 +02003513 if (cfqq) {
3514 struct cfq_queue *new_cfqq;
Tejun Heoabede6d2012-03-19 15:10:57 -07003515 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
3516 GFP_ATOMIC);
Jens Axboecaaa5f92006-06-16 11:23:00 +02003517 if (new_cfqq) {
Jens Axboeff6657c2009-04-08 10:58:57 +02003518 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
Jens Axboecaaa5f92006-06-16 11:23:00 +02003519 cfq_put_queue(cfqq);
3520 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003521 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02003522
Jens Axboeff6657c2009-04-08 10:58:57 +02003523 cfqq = cic->cfqq[BLK_RW_SYNC];
Jens Axboecaaa5f92006-06-16 11:23:00 +02003524 if (cfqq)
3525 cfq_mark_cfqq_prio_changed(cfqq);
Tejun Heo598971b2012-03-19 15:10:58 -07003526
3527 cic->ioprio = ioprio;
Jens Axboe22e2c502005-06-27 10:55:12 +02003528}
3529
Jens Axboed5036d72009-06-26 10:44:34 +02003530static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Jens Axboea6151c32009-10-07 20:02:57 +02003531 pid_t pid, bool is_sync)
Jens Axboed5036d72009-06-26 10:44:34 +02003532{
3533 RB_CLEAR_NODE(&cfqq->rb_node);
3534 RB_CLEAR_NODE(&cfqq->p_node);
3535 INIT_LIST_HEAD(&cfqq->fifo);
3536
Shaohua Li30d7b942011-01-07 08:46:59 +01003537 cfqq->ref = 0;
Jens Axboed5036d72009-06-26 10:44:34 +02003538 cfqq->cfqd = cfqd;
3539
3540 cfq_mark_cfqq_prio_changed(cfqq);
3541
3542 if (is_sync) {
3543 if (!cfq_class_idle(cfqq))
3544 cfq_mark_cfqq_idle_window(cfqq);
3545 cfq_mark_cfqq_sync(cfqq);
3546 }
3547 cfqq->pid = pid;
3548}
3549
Vivek Goyal246103332009-12-03 12:59:51 -05003550#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo598971b2012-03-19 15:10:58 -07003551static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
Vivek Goyal246103332009-12-03 12:59:51 -05003552{
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +04003553 struct cfq_data *cfqd = cic_to_cfqd(cic);
Tejun Heo598971b2012-03-19 15:10:58 -07003554 struct cfq_queue *sync_cfqq;
3555 uint64_t id;
Vivek Goyal246103332009-12-03 12:59:51 -05003556
Tejun Heo598971b2012-03-19 15:10:58 -07003557 rcu_read_lock();
Tejun Heo3c798392012-04-16 13:57:25 -07003558 id = bio_blkcg(bio)->id;
Tejun Heo598971b2012-03-19 15:10:58 -07003559 rcu_read_unlock();
3560
3561 /*
3562 * Check whether blkcg has changed. The condition may trigger
3563 * spuriously on a newly created cic but there's no harm.
3564 */
3565 if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
Vivek Goyal246103332009-12-03 12:59:51 -05003566 return;
3567
Tejun Heo598971b2012-03-19 15:10:58 -07003568 sync_cfqq = cic_to_cfqq(cic, 1);
Vivek Goyal246103332009-12-03 12:59:51 -05003569 if (sync_cfqq) {
3570 /*
3571 * Drop reference to sync queue. A new sync queue will be
3572 * assigned in new group upon arrival of a fresh request.
3573 */
3574 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
3575 cic_set_cfqq(cic, NULL, 1);
3576 cfq_put_queue(sync_cfqq);
3577 }
Tejun Heo598971b2012-03-19 15:10:58 -07003578
3579 cic->blkcg_id = id;
Vivek Goyal246103332009-12-03 12:59:51 -05003580}
Tejun Heo598971b2012-03-19 15:10:58 -07003581#else
3582static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
Vivek Goyal246103332009-12-03 12:59:51 -05003583#endif /* CONFIG_CFQ_GROUP_IOSCHED */
3584
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585static struct cfq_queue *
Tejun Heoabede6d2012-03-19 15:10:57 -07003586cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3587 struct bio *bio, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588{
Tejun Heo3c798392012-04-16 13:57:25 -07003589 struct blkcg *blkcg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590 struct cfq_queue *cfqq, *new_cfqq = NULL;
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003591 struct cfq_group *cfqg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592
3593retry:
Tejun Heo2a7f1242012-03-05 13:15:01 -08003594 rcu_read_lock();
3595
Tejun Heo3c798392012-04-16 13:57:25 -07003596 blkcg = bio_blkcg(bio);
Tejun Heocd1604f2012-03-05 13:15:06 -08003597 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
Vasily Tarasov91fac312007-04-25 12:29:51 +02003598 cfqq = cic_to_cfqq(cic, is_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599
Jens Axboe6118b702009-06-30 09:34:12 +02003600 /*
3601 * Always try a new alloc if we fell back to the OOM cfqq
3602 * originally, since it should just be a temporary situation.
3603 */
3604 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3605 cfqq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 if (new_cfqq) {
3607 cfqq = new_cfqq;
3608 new_cfqq = NULL;
Jens Axboe22e2c502005-06-27 10:55:12 +02003609 } else if (gfp_mask & __GFP_WAIT) {
Tejun Heo2a7f1242012-03-05 13:15:01 -08003610 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 spin_unlock_irq(cfqd->queue->queue_lock);
Christoph Lameter94f60302007-07-17 04:03:29 -07003612 new_cfqq = kmem_cache_alloc_node(cfq_pool,
Jens Axboe6118b702009-06-30 09:34:12 +02003613 gfp_mask | __GFP_ZERO,
Christoph Lameter94f60302007-07-17 04:03:29 -07003614 cfqd->queue->node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615 spin_lock_irq(cfqd->queue->queue_lock);
Jens Axboe6118b702009-06-30 09:34:12 +02003616 if (new_cfqq)
3617 goto retry;
Glauber Costaa3cc86c2013-02-21 15:16:41 -08003618 else
3619 return &cfqd->oom_cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02003620 } else {
Christoph Lameter94f60302007-07-17 04:03:29 -07003621 cfqq = kmem_cache_alloc_node(cfq_pool,
3622 gfp_mask | __GFP_ZERO,
3623 cfqd->queue->node);
Kiyoshi Ueda db3b5842005-06-17 16:15:10 +02003624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625
Jens Axboe6118b702009-06-30 09:34:12 +02003626 if (cfqq) {
3627 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
Tejun Heoabede6d2012-03-19 15:10:57 -07003628 cfq_init_prio_data(cfqq, cic);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003629 cfq_link_cfqq_cfqg(cfqq, cfqg);
Jens Axboe6118b702009-06-30 09:34:12 +02003630 cfq_log_cfqq(cfqd, cfqq, "alloced");
3631 } else
3632 cfqq = &cfqd->oom_cfqq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 }
3634
3635 if (new_cfqq)
3636 kmem_cache_free(cfq_pool, new_cfqq);
3637
Tejun Heo2a7f1242012-03-05 13:15:01 -08003638 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639 return cfqq;
3640}
3641
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003642static struct cfq_queue **
3643cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3644{
Jens Axboefe094d92008-01-31 13:08:54 +01003645 switch (ioprio_class) {
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003646 case IOPRIO_CLASS_RT:
3647 return &cfqd->async_cfqq[0][ioprio];
Tejun Heo598971b2012-03-19 15:10:58 -07003648 case IOPRIO_CLASS_NONE:
3649 ioprio = IOPRIO_NORM;
3650 /* fall through */
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003651 case IOPRIO_CLASS_BE:
3652 return &cfqd->async_cfqq[1][ioprio];
3653 case IOPRIO_CLASS_IDLE:
3654 return &cfqd->async_idle_cfqq;
3655 default:
3656 BUG();
3657 }
3658}
3659
Jens Axboe15c31be2007-07-10 13:43:25 +02003660static struct cfq_queue *
Tejun Heoabede6d2012-03-19 15:10:57 -07003661cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
Tejun Heo4f85cb92012-03-05 13:15:28 -08003662 struct bio *bio, gfp_t gfp_mask)
Jens Axboe15c31be2007-07-10 13:43:25 +02003663{
Tejun Heo598971b2012-03-19 15:10:58 -07003664 const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3665 const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003666 struct cfq_queue **async_cfqq = NULL;
Jens Axboe15c31be2007-07-10 13:43:25 +02003667 struct cfq_queue *cfqq = NULL;
3668
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003669 if (!is_sync) {
3670 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3671 cfqq = *async_cfqq;
3672 }
3673
Jens Axboe6118b702009-06-30 09:34:12 +02003674 if (!cfqq)
Tejun Heoabede6d2012-03-19 15:10:57 -07003675 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
Jens Axboe15c31be2007-07-10 13:43:25 +02003676
3677 /*
3678 * pin the queue now that it's allocated, scheduler exit will prune it
3679 */
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003680 if (!is_sync && !(*async_cfqq)) {
Shaohua Li30d7b942011-01-07 08:46:59 +01003681 cfqq->ref++;
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003682 *async_cfqq = cfqq;
Jens Axboe15c31be2007-07-10 13:43:25 +02003683 }
3684
Shaohua Li30d7b942011-01-07 08:46:59 +01003685 cfqq->ref++;
Jens Axboe15c31be2007-07-10 13:43:25 +02003686 return cfqq;
3687}
3688
Jens Axboe22e2c502005-06-27 10:55:12 +02003689static void
Shaohua Li383cd722011-07-12 14:24:35 +02003690__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
Jens Axboe22e2c502005-06-27 10:55:12 +02003691{
Shaohua Li383cd722011-07-12 14:24:35 +02003692 unsigned long elapsed = jiffies - ttime->last_end_request;
3693 elapsed = min(elapsed, 2UL * slice_idle);
Jens Axboe22e2c502005-06-27 10:55:12 +02003694
Shaohua Li383cd722011-07-12 14:24:35 +02003695 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3696 ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3697 ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3698}
3699
3700static void
3701cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Tejun Heoc5869802011-12-14 00:33:41 +01003702 struct cfq_io_cq *cic)
Shaohua Li383cd722011-07-12 14:24:35 +02003703{
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02003704 if (cfq_cfqq_sync(cfqq)) {
Shaohua Li383cd722011-07-12 14:24:35 +02003705 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02003706 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3707 cfqd->cfq_slice_idle);
3708 }
Shaohua Li7700fc42011-07-12 14:24:56 +02003709#ifdef CONFIG_CFQ_GROUP_IOSCHED
3710 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3711#endif
Jens Axboe22e2c502005-06-27 10:55:12 +02003712}
3713
Jens Axboe206dc692006-03-28 13:03:44 +02003714static void
Jeff Moyerb2c18e12009-10-23 17:14:49 -04003715cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Jens Axboe6d048f52007-04-25 12:44:27 +02003716 struct request *rq)
Jens Axboe206dc692006-03-28 13:03:44 +02003717{
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +01003718 sector_t sdist = 0;
Corrado Zoccolo41647e72010-02-27 19:45:40 +01003719 sector_t n_sec = blk_rq_sectors(rq);
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +01003720 if (cfqq->last_request_pos) {
3721 if (cfqq->last_request_pos < blk_rq_pos(rq))
3722 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3723 else
3724 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3725 }
Jens Axboe206dc692006-03-28 13:03:44 +02003726
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +01003727 cfqq->seek_history <<= 1;
Corrado Zoccolo41647e72010-02-27 19:45:40 +01003728 if (blk_queue_nonrot(cfqd->queue))
3729 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3730 else
3731 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
Jens Axboe206dc692006-03-28 13:03:44 +02003732}
Jens Axboe22e2c502005-06-27 10:55:12 +02003733
3734/*
3735 * Disable idle window if the process thinks too long or seeks so much that
3736 * it doesn't matter
3737 */
3738static void
3739cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Tejun Heoc5869802011-12-14 00:33:41 +01003740 struct cfq_io_cq *cic)
Jens Axboe22e2c502005-06-27 10:55:12 +02003741{
Jens Axboe7b679132008-05-30 12:23:07 +02003742 int old_idle, enable_idle;
Jens Axboe1be92f22007-04-19 14:32:26 +02003743
Jens Axboe08717142008-01-28 11:38:15 +01003744 /*
3745 * Don't idle for async or idle io prio class
3746 */
3747 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
Jens Axboe1be92f22007-04-19 14:32:26 +02003748 return;
3749
Jens Axboec265a7f2008-06-26 13:49:33 +02003750 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003751
Corrado Zoccolo76280af2009-11-26 10:02:58 +01003752 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3753 cfq_mark_cfqq_deep(cfqq);
3754
Corrado Zoccolo749ef9f2010-09-20 15:24:50 +02003755 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3756 enable_idle = 0;
Tejun Heof6e8d012012-03-05 13:15:26 -08003757 else if (!atomic_read(&cic->icq.ioc->active_ref) ||
Tejun Heoc5869802011-12-14 00:33:41 +01003758 !cfqd->cfq_slice_idle ||
3759 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
Jens Axboe22e2c502005-06-27 10:55:12 +02003760 enable_idle = 0;
Shaohua Li383cd722011-07-12 14:24:35 +02003761 else if (sample_valid(cic->ttime.ttime_samples)) {
3762 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
Jens Axboe22e2c502005-06-27 10:55:12 +02003763 enable_idle = 0;
3764 else
3765 enable_idle = 1;
3766 }
3767
Jens Axboe7b679132008-05-30 12:23:07 +02003768 if (old_idle != enable_idle) {
3769 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3770 if (enable_idle)
3771 cfq_mark_cfqq_idle_window(cfqq);
3772 else
3773 cfq_clear_cfqq_idle_window(cfqq);
3774 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003775}
3776
Jens Axboe22e2c502005-06-27 10:55:12 +02003777/*
3778 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3779 * no or if we aren't sure, a 1 will cause a preempt.
3780 */
Jens Axboea6151c32009-10-07 20:02:57 +02003781static bool
Jens Axboe22e2c502005-06-27 10:55:12 +02003782cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
Jens Axboe5e705372006-07-13 12:39:25 +02003783 struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02003784{
Jens Axboe6d048f52007-04-25 12:44:27 +02003785 struct cfq_queue *cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02003786
Jens Axboe6d048f52007-04-25 12:44:27 +02003787 cfqq = cfqd->active_queue;
3788 if (!cfqq)
Jens Axboea6151c32009-10-07 20:02:57 +02003789 return false;
Jens Axboe22e2c502005-06-27 10:55:12 +02003790
Jens Axboe6d048f52007-04-25 12:44:27 +02003791 if (cfq_class_idle(new_cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003792 return false;
Jens Axboe22e2c502005-06-27 10:55:12 +02003793
3794 if (cfq_class_idle(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003795 return true;
Jens Axboe1e3335d2007-02-14 19:59:49 +01003796
Jens Axboe22e2c502005-06-27 10:55:12 +02003797 /*
Divyesh Shah875feb62010-01-06 18:58:20 -08003798 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3799 */
3800 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3801 return false;
3802
3803 /*
Jens Axboe374f84a2006-07-23 01:42:19 +02003804 * if the new request is sync, but the currently running queue is
3805 * not, let the sync request have priority.
3806 */
Jens Axboe5e705372006-07-13 12:39:25 +02003807 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003808 return true;
Jens Axboe1e3335d2007-02-14 19:59:49 +01003809
Vivek Goyal8682e1f2009-12-03 12:59:50 -05003810 if (new_cfqq->cfqg != cfqq->cfqg)
3811 return false;
3812
3813 if (cfq_slice_used(cfqq))
3814 return true;
3815
3816 /* Allow preemption only if we are idling on sync-noidle tree */
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003817 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
Vivek Goyal8682e1f2009-12-03 12:59:50 -05003818 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3819 new_cfqq->service_tree->count == 2 &&
3820 RB_EMPTY_ROOT(&cfqq->sort_list))
3821 return true;
3822
Jens Axboe374f84a2006-07-23 01:42:19 +02003823 /*
Jens Axboeb53d1ed2011-08-19 08:34:48 +02003824 * So both queues are sync. Let the new request get disk time if
3825 * it's a metadata request and the current queue is doing regular IO.
3826 */
Christoph Hellwig65299a32011-08-23 14:50:29 +02003827 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
Jens Axboeb53d1ed2011-08-19 08:34:48 +02003828 return true;
3829
3830 /*
Divyesh Shah3a9a3f62009-01-30 12:46:41 +01003831 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3832 */
3833 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003834 return true;
Divyesh Shah3a9a3f62009-01-30 12:46:41 +01003835
Shaohua Lid2d59e12010-11-08 15:01:03 +01003836 /* An idle queue should not be idle now for some reason */
3837 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3838 return true;
3839
Jens Axboe1e3335d2007-02-14 19:59:49 +01003840 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003841 return false;
Jens Axboe1e3335d2007-02-14 19:59:49 +01003842
3843 /*
3844 * if this request is as-good as one we would expect from the
3845 * current cfqq, let it preempt
3846 */
Shaohua Lie9ce3352010-03-19 08:03:04 +01003847 if (cfq_rq_close(cfqd, cfqq, rq))
Jens Axboea6151c32009-10-07 20:02:57 +02003848 return true;
Jens Axboe1e3335d2007-02-14 19:59:49 +01003849
Jens Axboea6151c32009-10-07 20:02:57 +02003850 return false;
Jens Axboe22e2c502005-06-27 10:55:12 +02003851}
3852
3853/*
3854 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3855 * let it have half of its nominal slice.
3856 */
3857static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3858{
Shaohua Lidf0793a2012-01-19 09:20:09 +01003859 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3860
Jens Axboe7b679132008-05-30 12:23:07 +02003861 cfq_log_cfqq(cfqd, cfqq, "preempt");
Shaohua Lidf0793a2012-01-19 09:20:09 +01003862 cfq_slice_expired(cfqd, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02003863
Jens Axboebf572252006-07-19 20:29:12 +02003864 /*
Shaohua Lif8ae6e32011-01-14 08:41:02 +01003865 * workload type is changed, don't save slice, otherwise preempt
3866 * doesn't happen
3867 */
Shaohua Lidf0793a2012-01-19 09:20:09 +01003868 if (old_type != cfqq_type(cfqq))
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003869 cfqq->cfqg->saved_wl_slice = 0;
Shaohua Lif8ae6e32011-01-14 08:41:02 +01003870
3871 /*
Jens Axboebf572252006-07-19 20:29:12 +02003872 * Put the new queue at the front of the of the current list,
3873 * so we know that it will be selected next.
3874 */
3875 BUG_ON(!cfq_cfqq_on_rr(cfqq));
Jens Axboeedd75ff2007-04-19 12:03:34 +02003876
3877 cfq_service_tree_add(cfqd, cfqq, 1);
Justin TerAvesteda5e0c2011-03-22 21:26:49 +01003878
Justin TerAvest62a37f62011-03-23 08:25:44 +01003879 cfqq->slice_end = 0;
3880 cfq_mark_cfqq_slice_new(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003881}
3882
3883/*
Jens Axboe5e705372006-07-13 12:39:25 +02003884 * Called when a new fs request (rq) is added (to cfqq). Check if there's
Jens Axboe22e2c502005-06-27 10:55:12 +02003885 * something we should do about it
3886 */
3887static void
Jens Axboe5e705372006-07-13 12:39:25 +02003888cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3889 struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02003890{
Tejun Heoc5869802011-12-14 00:33:41 +01003891 struct cfq_io_cq *cic = RQ_CIC(rq);
Jens Axboe12e9fdd2006-06-01 10:09:56 +02003892
Aaron Carroll45333d52008-08-26 15:52:36 +02003893 cfqd->rq_queued++;
Christoph Hellwig65299a32011-08-23 14:50:29 +02003894 if (rq->cmd_flags & REQ_PRIO)
3895 cfqq->prio_pending++;
Jens Axboe374f84a2006-07-23 01:42:19 +02003896
Shaohua Li383cd722011-07-12 14:24:35 +02003897 cfq_update_io_thinktime(cfqd, cfqq, cic);
Jeff Moyerb2c18e12009-10-23 17:14:49 -04003898 cfq_update_io_seektime(cfqd, cfqq, rq);
Jens Axboe9c2c38a2005-08-24 14:57:54 +02003899 cfq_update_idle_window(cfqd, cfqq, cic);
3900
Jeff Moyerb2c18e12009-10-23 17:14:49 -04003901 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003902
3903 if (cfqq == cfqd->active_queue) {
3904 /*
Jens Axboeb0291952009-04-07 11:38:31 +02003905 * Remember that we saw a request from this process, but
3906 * don't start queuing just yet. Otherwise we risk seeing lots
3907 * of tiny requests, because we disrupt the normal plugging
Jens Axboed6ceb252009-04-14 14:18:16 +02003908 * and merging. If the request is already larger than a single
3909 * page, let it rip immediately. For that case we assume that
Jens Axboe2d870722009-04-15 12:12:46 +02003910 * merging is already done. Ditto for a busy system that
3911 * has other work pending, don't risk delaying until the
3912 * idle timer unplug to continue working.
Jens Axboe22e2c502005-06-27 10:55:12 +02003913 */
Jens Axboed6ceb252009-04-14 14:18:16 +02003914 if (cfq_cfqq_wait_request(cfqq)) {
Jens Axboe2d870722009-04-15 12:12:46 +02003915 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3916 cfqd->busy_queues > 1) {
Divyesh Shah812df482010-04-08 21:15:35 -07003917 cfq_del_timer(cfqd, cfqq);
Gui Jianfeng554554f2009-12-10 09:38:39 +01003918 cfq_clear_cfqq_wait_request(cfqq);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02003919 __blk_run_queue(cfqd->queue);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +02003920 } else {
Tejun Heo155fead2012-04-01 14:38:44 -07003921 cfqg_stats_update_idle_time(cfqq->cfqg);
Vivek Goyalbf7919372009-12-03 12:59:37 -05003922 cfq_mark_cfqq_must_dispatch(cfqq);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +02003923 }
Jens Axboed6ceb252009-04-14 14:18:16 +02003924 }
Jens Axboe5e705372006-07-13 12:39:25 +02003925 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
Jens Axboe22e2c502005-06-27 10:55:12 +02003926 /*
3927 * not the active queue - expire current slice if it is
3928 * idle and has expired it's mean thinktime or this new queue
Divyesh Shah3a9a3f62009-01-30 12:46:41 +01003929 * has some old slice time left and is of higher priority or
3930 * this new queue is RT and the current one is BE
Jens Axboe22e2c502005-06-27 10:55:12 +02003931 */
3932 cfq_preempt_queue(cfqd, cfqq);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02003933 __blk_run_queue(cfqd->queue);
Jens Axboe22e2c502005-06-27 10:55:12 +02003934 }
3935}
3936
Jens Axboe165125e2007-07-24 09:28:11 +02003937static void cfq_insert_request(struct request_queue *q, struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02003938{
Jens Axboeb4878f22005-10-20 16:42:29 +02003939 struct cfq_data *cfqd = q->elevator->elevator_data;
Jens Axboe5e705372006-07-13 12:39:25 +02003940 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003941
Jens Axboe7b679132008-05-30 12:23:07 +02003942 cfq_log_cfqq(cfqd, cfqq, "insert_request");
Tejun Heoabede6d2012-03-19 15:10:57 -07003943 cfq_init_prio_data(cfqq, RQ_CIC(rq));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944
Jens Axboe30996f42009-10-05 11:03:39 +02003945 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
Jens Axboe22e2c502005-06-27 10:55:12 +02003946 list_add_tail(&rq->queuelist, &cfqq->fifo);
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01003947 cfq_add_rq_rb(rq);
Tejun Heo155fead2012-04-01 14:38:44 -07003948 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
3949 rq->cmd_flags);
Jens Axboe5e705372006-07-13 12:39:25 +02003950 cfq_rq_enqueued(cfqd, cfqq, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951}
3952
Aaron Carroll45333d52008-08-26 15:52:36 +02003953/*
3954 * Update hw_tag based on peak queue depth over 50 samples under
3955 * sufficient load.
3956 */
3957static void cfq_update_hw_tag(struct cfq_data *cfqd)
3958{
Shaohua Li1a1238a2009-10-27 08:46:23 +01003959 struct cfq_queue *cfqq = cfqd->active_queue;
3960
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003961 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3962 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
Corrado Zoccoloe459dd02009-11-26 10:02:57 +01003963
3964 if (cfqd->hw_tag == 1)
3965 return;
Aaron Carroll45333d52008-08-26 15:52:36 +02003966
3967 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003968 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
Aaron Carroll45333d52008-08-26 15:52:36 +02003969 return;
3970
Shaohua Li1a1238a2009-10-27 08:46:23 +01003971 /*
3972 * If active queue hasn't enough requests and can idle, cfq might not
3973 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3974 * case
3975 */
3976 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3977 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003978 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
Shaohua Li1a1238a2009-10-27 08:46:23 +01003979 return;
3980
Aaron Carroll45333d52008-08-26 15:52:36 +02003981 if (cfqd->hw_tag_samples++ < 50)
3982 return;
3983
Corrado Zoccoloe459dd02009-11-26 10:02:57 +01003984 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
Aaron Carroll45333d52008-08-26 15:52:36 +02003985 cfqd->hw_tag = 1;
3986 else
3987 cfqd->hw_tag = 0;
Aaron Carroll45333d52008-08-26 15:52:36 +02003988}
3989
Vivek Goyal7667aa02009-12-08 17:52:58 -05003990static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3991{
Tejun Heoc5869802011-12-14 00:33:41 +01003992 struct cfq_io_cq *cic = cfqd->active_cic;
Vivek Goyal7667aa02009-12-08 17:52:58 -05003993
Justin TerAvest02a8f012011-02-09 14:20:03 +01003994 /* If the queue already has requests, don't wait */
3995 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3996 return false;
3997
Vivek Goyal7667aa02009-12-08 17:52:58 -05003998 /* If there are other queues in the group, don't wait */
3999 if (cfqq->cfqg->nr_cfqq > 1)
4000 return false;
4001
Shaohua Li7700fc42011-07-12 14:24:56 +02004002 /* the only queue in the group, but think time is big */
4003 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
4004 return false;
4005
Vivek Goyal7667aa02009-12-08 17:52:58 -05004006 if (cfq_slice_used(cfqq))
4007 return true;
4008
4009 /* if slice left is less than think time, wait busy */
Shaohua Li383cd722011-07-12 14:24:35 +02004010 if (cic && sample_valid(cic->ttime.ttime_samples)
4011 && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
Vivek Goyal7667aa02009-12-08 17:52:58 -05004012 return true;
4013
4014 /*
4015 * If think times is less than a jiffy than ttime_mean=0 and above
4016 * will not be true. It might happen that slice has not expired yet
4017 * but will expire soon (4-5 ns) during select_queue(). To cover the
4018 * case where think time is less than a jiffy, mark the queue wait
4019 * busy if only 1 jiffy is left in the slice.
4020 */
4021 if (cfqq->slice_end - jiffies == 1)
4022 return true;
4023
4024 return false;
4025}
4026
Jens Axboe165125e2007-07-24 09:28:11 +02004027static void cfq_completed_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028{
Jens Axboe5e705372006-07-13 12:39:25 +02004029 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +02004030 struct cfq_data *cfqd = cfqq->cfqd;
Jens Axboe5380a102006-07-13 12:37:56 +02004031 const int sync = rq_is_sync(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +02004032 unsigned long now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033
Jens Axboeb4878f22005-10-20 16:42:29 +02004034 now = jiffies;
Christoph Hellwig33659eb2010-08-07 18:17:56 +02004035 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
4036 !!(rq->cmd_flags & REQ_NOIDLE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037
Aaron Carroll45333d52008-08-26 15:52:36 +02004038 cfq_update_hw_tag(cfqd);
4039
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01004040 WARN_ON(!cfqd->rq_in_driver);
Jens Axboe6d048f52007-04-25 12:44:27 +02004041 WARN_ON(!cfqq->dispatched);
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01004042 cfqd->rq_in_driver--;
Jens Axboe6d048f52007-04-25 12:44:27 +02004043 cfqq->dispatched--;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004044 (RQ_CFQG(rq))->dispatched--;
Tejun Heo155fead2012-04-01 14:38:44 -07004045 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4046 rq_io_start_time_ns(rq), rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01004048 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
Jens Axboe3ed9a292007-04-23 08:33:33 +02004049
Vivek Goyal365722b2009-10-03 15:21:27 +02004050 if (sync) {
Vivek Goyal34b98d02012-10-03 16:56:58 -04004051 struct cfq_rb_root *st;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02004052
Shaohua Li383cd722011-07-12 14:24:35 +02004053 RQ_CIC(rq)->ttime.last_end_request = now;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02004054
4055 if (cfq_cfqq_on_rr(cfqq))
Vivek Goyal34b98d02012-10-03 16:56:58 -04004056 st = cfqq->service_tree;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02004057 else
Vivek Goyal34b98d02012-10-03 16:56:58 -04004058 st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4059 cfqq_type(cfqq));
4060
4061 st->ttime.last_end_request = now;
Corrado Zoccolo573412b2009-12-06 11:48:52 +01004062 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
4063 cfqd->last_delayed_sync = now;
Vivek Goyal365722b2009-10-03 15:21:27 +02004064 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02004065
Shaohua Li7700fc42011-07-12 14:24:56 +02004066#ifdef CONFIG_CFQ_GROUP_IOSCHED
4067 cfqq->cfqg->ttime.last_end_request = now;
4068#endif
4069
Jens Axboecaaa5f92006-06-16 11:23:00 +02004070 /*
4071 * If this is the active queue, check if it needs to be expired,
4072 * or if we want to idle in case it has no pending requests.
4073 */
4074 if (cfqd->active_queue == cfqq) {
Jens Axboea36e71f2009-04-15 12:15:11 +02004075 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4076
Jens Axboe44f7c162007-01-19 11:51:58 +11004077 if (cfq_cfqq_slice_new(cfqq)) {
4078 cfq_set_prio_slice(cfqd, cfqq);
4079 cfq_clear_cfqq_slice_new(cfqq);
4080 }
Vivek Goyalf75edf22009-12-03 12:59:53 -05004081
4082 /*
Vivek Goyal7667aa02009-12-08 17:52:58 -05004083 * Should we wait for next request to come in before we expire
4084 * the queue.
Vivek Goyalf75edf22009-12-03 12:59:53 -05004085 */
Vivek Goyal7667aa02009-12-08 17:52:58 -05004086 if (cfq_should_wait_busy(cfqd, cfqq)) {
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004087 unsigned long extend_sl = cfqd->cfq_slice_idle;
4088 if (!cfqd->cfq_slice_idle)
4089 extend_sl = cfqd->cfq_group_idle;
4090 cfqq->slice_end = jiffies + extend_sl;
Vivek Goyalf75edf22009-12-03 12:59:53 -05004091 cfq_mark_cfqq_wait_busy(cfqq);
Divyesh Shahb1ffe732010-03-25 15:45:03 +01004092 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
Vivek Goyalf75edf22009-12-03 12:59:53 -05004093 }
4094
Jens Axboea36e71f2009-04-15 12:15:11 +02004095 /*
Corrado Zoccolo8e550632009-11-26 10:02:58 +01004096 * Idling is not enabled on:
4097 * - expired queues
4098 * - idle-priority queues
4099 * - async queues
4100 * - queues with still some requests queued
4101 * - when there is a close cooperator
Jens Axboea36e71f2009-04-15 12:15:11 +02004102 */
Jens Axboe08717142008-01-28 11:38:15 +01004103 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
Vivek Goyale5ff0822010-04-26 19:25:11 +02004104 cfq_slice_expired(cfqd, 1);
Corrado Zoccolo8e550632009-11-26 10:02:58 +01004105 else if (sync && cfqq_empty &&
4106 !cfq_close_cooperator(cfqd, cfqq)) {
Corrado Zoccolo749ef9f2010-09-20 15:24:50 +02004107 cfq_arm_slice_timer(cfqd);
Corrado Zoccolo8e550632009-11-26 10:02:58 +01004108 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02004109 }
Jens Axboe6d048f52007-04-25 12:44:27 +02004110
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01004111 if (!cfqd->rq_in_driver)
Jens Axboe23e018a2009-10-05 08:52:35 +02004112 cfq_schedule_dispatch(cfqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113}
4114
Jens Axboe89850f72006-07-22 16:48:31 +02004115static inline int __cfq_may_queue(struct cfq_queue *cfqq)
Jens Axboe22e2c502005-06-27 10:55:12 +02004116{
Jens Axboe1b379d82009-08-11 08:26:11 +02004117 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
Jens Axboe3b181522005-06-27 10:56:24 +02004118 cfq_mark_cfqq_must_alloc_slice(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004119 return ELV_MQUEUE_MUST;
Jens Axboe3b181522005-06-27 10:56:24 +02004120 }
Jens Axboe22e2c502005-06-27 10:55:12 +02004121
4122 return ELV_MQUEUE_MAY;
Jens Axboe22e2c502005-06-27 10:55:12 +02004123}
4124
Jens Axboe165125e2007-07-24 09:28:11 +02004125static int cfq_may_queue(struct request_queue *q, int rw)
Jens Axboe22e2c502005-06-27 10:55:12 +02004126{
4127 struct cfq_data *cfqd = q->elevator->elevator_data;
4128 struct task_struct *tsk = current;
Tejun Heoc5869802011-12-14 00:33:41 +01004129 struct cfq_io_cq *cic;
Jens Axboe22e2c502005-06-27 10:55:12 +02004130 struct cfq_queue *cfqq;
4131
4132 /*
4133 * don't force setup of a queue from here, as a call to may_queue
4134 * does not necessarily imply that a request actually will be queued.
4135 * so just lookup a possibly existing queue, or return 'may queue'
4136 * if that fails
4137 */
Jens Axboe4ac845a2008-01-24 08:44:49 +01004138 cic = cfq_cic_lookup(cfqd, tsk->io_context);
Vasily Tarasov91fac312007-04-25 12:29:51 +02004139 if (!cic)
4140 return ELV_MQUEUE_MAY;
4141
Jens Axboeb0b78f82009-04-08 10:56:08 +02004142 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
Jens Axboe22e2c502005-06-27 10:55:12 +02004143 if (cfqq) {
Tejun Heoabede6d2012-03-19 15:10:57 -07004144 cfq_init_prio_data(cfqq, cic);
Jens Axboe22e2c502005-06-27 10:55:12 +02004145
Jens Axboe89850f72006-07-22 16:48:31 +02004146 return __cfq_may_queue(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004147 }
4148
4149 return ELV_MQUEUE_MAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150}
4151
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152/*
4153 * queue lock held here
4154 */
Jens Axboebb37b942006-12-01 10:42:33 +01004155static void cfq_put_request(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156{
Jens Axboe5e705372006-07-13 12:39:25 +02004157 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158
Jens Axboe5e705372006-07-13 12:39:25 +02004159 if (cfqq) {
Jens Axboe22e2c502005-06-27 10:55:12 +02004160 const int rw = rq_data_dir(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161
Jens Axboe22e2c502005-06-27 10:55:12 +02004162 BUG_ON(!cfqq->allocated[rw]);
4163 cfqq->allocated[rw]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +02004165 /* Put down rq reference on cfqg */
Tejun Heoeb7d8c072012-03-23 14:02:53 +01004166 cfqg_put(RQ_CFQG(rq));
Tejun Heoa612fdd2011-12-14 00:33:41 +01004167 rq->elv.priv[0] = NULL;
4168 rq->elv.priv[1] = NULL;
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +02004169
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 cfq_put_queue(cfqq);
4171 }
4172}
4173
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004174static struct cfq_queue *
Tejun Heoc5869802011-12-14 00:33:41 +01004175cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004176 struct cfq_queue *cfqq)
4177{
4178 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4179 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
Jeff Moyerb3b6d042009-10-23 17:14:51 -04004180 cfq_mark_cfqq_coop(cfqq->new_cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004181 cfq_put_queue(cfqq);
4182 return cic_to_cfqq(cic, 1);
4183}
4184
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004185/*
4186 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4187 * was the last process referring to said cfqq.
4188 */
4189static struct cfq_queue *
Tejun Heoc5869802011-12-14 00:33:41 +01004190split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004191{
4192 if (cfqq_process_refs(cfqq) == 1) {
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004193 cfqq->pid = current->pid;
4194 cfq_clear_cfqq_coop(cfqq);
Shaohua Liae54abe2010-02-05 13:11:45 +01004195 cfq_clear_cfqq_split_coop(cfqq);
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004196 return cfqq;
4197 }
4198
4199 cic_set_cfqq(cic, NULL, 1);
Shaohua Lid02a2c02010-05-25 10:16:53 +02004200
4201 cfq_put_cooperator(cfqq);
4202
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004203 cfq_put_queue(cfqq);
4204 return NULL;
4205}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206/*
Jens Axboe22e2c502005-06-27 10:55:12 +02004207 * Allocate cfq data structures associated with this request.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208 */
Jens Axboe22e2c502005-06-27 10:55:12 +02004209static int
Tejun Heo852c7882012-03-05 13:15:27 -08004210cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4211 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212{
4213 struct cfq_data *cfqd = q->elevator->elevator_data;
Tejun Heof1f8cc92011-12-14 00:33:42 +01004214 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 const int rw = rq_data_dir(rq);
Jens Axboea6151c32009-10-07 20:02:57 +02004216 const bool is_sync = rq_is_sync(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004217 struct cfq_queue *cfqq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218
4219 might_sleep_if(gfp_mask & __GFP_WAIT);
4220
Tejun Heo216284c2011-12-14 00:33:38 +01004221 spin_lock_irq(q->queue_lock);
Tejun Heof1f8cc92011-12-14 00:33:42 +01004222
Tejun Heo598971b2012-03-19 15:10:58 -07004223 check_ioprio_changed(cic, bio);
4224 check_blkcg_changed(cic, bio);
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004225new_queue:
Vasily Tarasov91fac312007-04-25 12:29:51 +02004226 cfqq = cic_to_cfqq(cic, is_sync);
Vivek Goyal32f2e802009-07-09 22:13:16 +02004227 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
Tejun Heoabede6d2012-03-19 15:10:57 -07004228 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
Vasily Tarasov91fac312007-04-25 12:29:51 +02004229 cic_set_cfqq(cic, cfqq, is_sync);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004230 } else {
4231 /*
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004232 * If the queue was seeky for too long, break it apart.
4233 */
Shaohua Liae54abe2010-02-05 13:11:45 +01004234 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004235 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4236 cfqq = split_cfqq(cic, cfqq);
4237 if (!cfqq)
4238 goto new_queue;
4239 }
4240
4241 /*
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004242 * Check to see if this queue is scheduled to merge with
4243 * another, closely cooperating queue. The merging of
4244 * queues happens here as it must be done in process context.
4245 * The reference on new_cfqq was taken in merge_cfqqs.
4246 */
4247 if (cfqq->new_cfqq)
4248 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
Vasily Tarasov91fac312007-04-25 12:29:51 +02004249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250
4251 cfqq->allocated[rw]++;
Jens Axboe5e705372006-07-13 12:39:25 +02004252
Jens Axboe6fae9c22011-03-01 15:04:39 -05004253 cfqq->ref++;
Tejun Heoeb7d8c072012-03-23 14:02:53 +01004254 cfqg_get(cfqq->cfqg);
Tejun Heoa612fdd2011-12-14 00:33:41 +01004255 rq->elv.priv[0] = cfqq;
Tejun Heo1adaf3d2012-03-05 13:15:15 -08004256 rq->elv.priv[1] = cfqq->cfqg;
Tejun Heo216284c2011-12-14 00:33:38 +01004257 spin_unlock_irq(q->queue_lock);
Jens Axboe5e705372006-07-13 12:39:25 +02004258 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259}
4260
David Howells65f27f32006-11-22 14:55:48 +00004261static void cfq_kick_queue(struct work_struct *work)
Jens Axboe22e2c502005-06-27 10:55:12 +02004262{
David Howells65f27f32006-11-22 14:55:48 +00004263 struct cfq_data *cfqd =
Jens Axboe23e018a2009-10-05 08:52:35 +02004264 container_of(work, struct cfq_data, unplug_work);
Jens Axboe165125e2007-07-24 09:28:11 +02004265 struct request_queue *q = cfqd->queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02004266
Jens Axboe40bb54d2009-04-15 12:11:10 +02004267 spin_lock_irq(q->queue_lock);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02004268 __blk_run_queue(cfqd->queue);
Jens Axboe40bb54d2009-04-15 12:11:10 +02004269 spin_unlock_irq(q->queue_lock);
Jens Axboe22e2c502005-06-27 10:55:12 +02004270}
4271
4272/*
4273 * Timer running if the active_queue is currently idling inside its time slice
4274 */
4275static void cfq_idle_slice_timer(unsigned long data)
4276{
4277 struct cfq_data *cfqd = (struct cfq_data *) data;
4278 struct cfq_queue *cfqq;
4279 unsigned long flags;
Jens Axboe3c6bd2f2007-01-19 12:06:33 +11004280 int timed_out = 1;
Jens Axboe22e2c502005-06-27 10:55:12 +02004281
Jens Axboe7b679132008-05-30 12:23:07 +02004282 cfq_log(cfqd, "idle timer fired");
4283
Jens Axboe22e2c502005-06-27 10:55:12 +02004284 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4285
Jens Axboefe094d92008-01-31 13:08:54 +01004286 cfqq = cfqd->active_queue;
4287 if (cfqq) {
Jens Axboe3c6bd2f2007-01-19 12:06:33 +11004288 timed_out = 0;
4289
Jens Axboe22e2c502005-06-27 10:55:12 +02004290 /*
Jens Axboeb0291952009-04-07 11:38:31 +02004291 * We saw a request before the queue expired, let it through
4292 */
4293 if (cfq_cfqq_must_dispatch(cfqq))
4294 goto out_kick;
4295
4296 /*
Jens Axboe22e2c502005-06-27 10:55:12 +02004297 * expired
4298 */
Jens Axboe44f7c162007-01-19 11:51:58 +11004299 if (cfq_slice_used(cfqq))
Jens Axboe22e2c502005-06-27 10:55:12 +02004300 goto expire;
4301
4302 /*
4303 * only expire and reinvoke request handler, if there are
4304 * other queues with pending requests
4305 */
Jens Axboecaaa5f92006-06-16 11:23:00 +02004306 if (!cfqd->busy_queues)
Jens Axboe22e2c502005-06-27 10:55:12 +02004307 goto out_cont;
Jens Axboe22e2c502005-06-27 10:55:12 +02004308
4309 /*
4310 * not expired and it has a request pending, let it dispatch
4311 */
Jens Axboe75e50982009-04-07 08:56:14 +02004312 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
Jens Axboe22e2c502005-06-27 10:55:12 +02004313 goto out_kick;
Corrado Zoccolo76280af2009-11-26 10:02:58 +01004314
4315 /*
4316 * Queue depth flag is reset only when the idle didn't succeed
4317 */
4318 cfq_clear_cfqq_deep(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004319 }
4320expire:
Vivek Goyale5ff0822010-04-26 19:25:11 +02004321 cfq_slice_expired(cfqd, timed_out);
Jens Axboe22e2c502005-06-27 10:55:12 +02004322out_kick:
Jens Axboe23e018a2009-10-05 08:52:35 +02004323 cfq_schedule_dispatch(cfqd);
Jens Axboe22e2c502005-06-27 10:55:12 +02004324out_cont:
4325 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4326}
4327
Jens Axboe3b181522005-06-27 10:56:24 +02004328static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4329{
4330 del_timer_sync(&cfqd->idle_slice_timer);
Jens Axboe23e018a2009-10-05 08:52:35 +02004331 cancel_work_sync(&cfqd->unplug_work);
Jens Axboe3b181522005-06-27 10:56:24 +02004332}
Jens Axboe22e2c502005-06-27 10:55:12 +02004333
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02004334static void cfq_put_async_queues(struct cfq_data *cfqd)
4335{
4336 int i;
4337
4338 for (i = 0; i < IOPRIO_BE_NR; i++) {
4339 if (cfqd->async_cfqq[0][i])
4340 cfq_put_queue(cfqd->async_cfqq[0][i]);
4341 if (cfqd->async_cfqq[1][i])
4342 cfq_put_queue(cfqd->async_cfqq[1][i]);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02004343 }
Oleg Nesterov2389d1e2007-11-05 08:58:05 +01004344
4345 if (cfqd->async_idle_cfqq)
4346 cfq_put_queue(cfqd->async_idle_cfqq);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02004347}
4348
Jens Axboeb374d182008-10-31 10:05:07 +01004349static void cfq_exit_queue(struct elevator_queue *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350{
Jens Axboe22e2c502005-06-27 10:55:12 +02004351 struct cfq_data *cfqd = e->elevator_data;
Jens Axboe165125e2007-07-24 09:28:11 +02004352 struct request_queue *q = cfqd->queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02004353
Jens Axboe3b181522005-06-27 10:56:24 +02004354 cfq_shutdown_timer_wq(cfqd);
Jens Axboee2d74ac2006-03-28 08:59:01 +02004355
Al Virod9ff4182006-03-18 13:51:22 -05004356 spin_lock_irq(q->queue_lock);
Jens Axboee2d74ac2006-03-28 08:59:01 +02004357
Al Virod9ff4182006-03-18 13:51:22 -05004358 if (cfqd->active_queue)
Vivek Goyale5ff0822010-04-26 19:25:11 +02004359 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
Jens Axboee2d74ac2006-03-28 08:59:01 +02004360
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02004361 cfq_put_async_queues(cfqd);
Tejun Heo03aa2642012-03-05 13:15:19 -08004362
4363 spin_unlock_irq(q->queue_lock);
4364
Al Viroa90d7422006-03-18 12:05:37 -05004365 cfq_shutdown_timer_wq(cfqd);
4366
Tejun Heoffea73f2012-06-04 10:02:29 +02004367#ifdef CONFIG_CFQ_GROUP_IOSCHED
4368 blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4369#else
Tejun Heof51b8022012-03-05 13:15:05 -08004370 kfree(cfqd->root_group);
Vivek Goyal2abae552011-05-23 10:02:19 +02004371#endif
Vivek Goyal56edf7d2011-05-19 15:38:22 -04004372 kfree(cfqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373}
4374
Jianpeng Mad50235b2013-07-03 13:25:24 +02004375static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376{
4377 struct cfq_data *cfqd;
Tejun Heo3c798392012-04-16 13:57:25 -07004378 struct blkcg_gq *blkg __maybe_unused;
Tejun Heoa2b16932012-04-13 13:11:33 -07004379 int i, ret;
Jianpeng Mad50235b2013-07-03 13:25:24 +02004380 struct elevator_queue *eq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381
Jianpeng Mad50235b2013-07-03 13:25:24 +02004382 eq = elevator_alloc(q, e);
4383 if (!eq)
Tejun Heob2fab5a2012-03-05 13:14:57 -08004384 return -ENOMEM;
Konstantin Khlebnikov80b15c72010-05-20 23:21:41 +04004385
Joe Perchesc1b511e2013-08-29 15:21:42 -07004386 cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
Jianpeng Mad50235b2013-07-03 13:25:24 +02004387 if (!cfqd) {
4388 kobject_put(&eq->kobj);
4389 return -ENOMEM;
4390 }
4391 eq->elevator_data = cfqd;
4392
Tejun Heof51b8022012-03-05 13:15:05 -08004393 cfqd->queue = q;
Jianpeng Mad50235b2013-07-03 13:25:24 +02004394 spin_lock_irq(q->queue_lock);
4395 q->elevator = eq;
4396 spin_unlock_irq(q->queue_lock);
Tejun Heof51b8022012-03-05 13:15:05 -08004397
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05004398 /* Init root service tree */
4399 cfqd->grp_service_tree = CFQ_RB_ROOT;
4400
Tejun Heof51b8022012-03-05 13:15:05 -08004401 /* Init root group and prefer root group over other groups by default */
Vivek Goyal25fb5162009-12-03 12:59:46 -05004402#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004403 ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
Tejun Heoa2b16932012-04-13 13:11:33 -07004404 if (ret)
4405 goto out_free;
Vivek Goyal5624a4e2011-05-19 15:38:28 -04004406
Tejun Heoa2b16932012-04-13 13:11:33 -07004407 cfqd->root_group = blkg_to_cfqg(q->root_blkg);
Tejun Heof51b8022012-03-05 13:15:05 -08004408#else
Tejun Heoa2b16932012-04-13 13:11:33 -07004409 ret = -ENOMEM;
Tejun Heof51b8022012-03-05 13:15:05 -08004410 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4411 GFP_KERNEL, cfqd->queue->node);
Tejun Heoa2b16932012-04-13 13:11:33 -07004412 if (!cfqd->root_group)
4413 goto out_free;
Vivek Goyal5624a4e2011-05-19 15:38:28 -04004414
Tejun Heoa2b16932012-04-13 13:11:33 -07004415 cfq_init_cfqg_base(cfqd->root_group);
4416#endif
Tejun Heo3381cb82012-04-01 14:38:44 -07004417 cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
Tejun Heoe71357e2013-01-09 08:05:10 -08004418 cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
Vivek Goyal5624a4e2011-05-19 15:38:28 -04004419
Jens Axboe26a2ac02009-04-23 12:13:27 +02004420 /*
4421 * Not strictly needed (since RB_ROOT just clears the node and we
4422 * zeroed cfqd on alloc), but better be safe in case someone decides
4423 * to add magic to the rb code
4424 */
4425 for (i = 0; i < CFQ_PRIO_LISTS; i++)
4426 cfqd->prio_trees[i] = RB_ROOT;
4427
Jens Axboe6118b702009-06-30 09:34:12 +02004428 /*
4429 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
4430 * Grab a permanent reference to it, so that the normal code flow
Tejun Heof51b8022012-03-05 13:15:05 -08004431 * will not attempt to free it. oom_cfqq is linked to root_group
4432 * but shouldn't hold a reference as it'll never be unlinked. Lose
4433 * the reference from linking right away.
Jens Axboe6118b702009-06-30 09:34:12 +02004434 */
4435 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
Shaohua Li30d7b942011-01-07 08:46:59 +01004436 cfqd->oom_cfqq.ref++;
Tejun Heo1adaf3d2012-03-05 13:15:15 -08004437
4438 spin_lock_irq(q->queue_lock);
Tejun Heof51b8022012-03-05 13:15:05 -08004439 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
Tejun Heoeb7d8c072012-03-23 14:02:53 +01004440 cfqg_put(cfqd->root_group);
Tejun Heo1adaf3d2012-03-05 13:15:15 -08004441 spin_unlock_irq(q->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442
Jens Axboe22e2c502005-06-27 10:55:12 +02004443 init_timer(&cfqd->idle_slice_timer);
4444 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4445 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4446
Jens Axboe23e018a2009-10-05 08:52:35 +02004447 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
Jens Axboe22e2c502005-06-27 10:55:12 +02004448
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449 cfqd->cfq_quantum = cfq_quantum;
Jens Axboe22e2c502005-06-27 10:55:12 +02004450 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4451 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452 cfqd->cfq_back_max = cfq_back_max;
4453 cfqd->cfq_back_penalty = cfq_back_penalty;
Jens Axboe22e2c502005-06-27 10:55:12 +02004454 cfqd->cfq_slice[0] = cfq_slice_async;
4455 cfqd->cfq_slice[1] = cfq_slice_sync;
Tao Ma5bf14c02012-04-01 14:33:39 -07004456 cfqd->cfq_target_latency = cfq_target_latency;
Jens Axboe22e2c502005-06-27 10:55:12 +02004457 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4458 cfqd->cfq_slice_idle = cfq_slice_idle;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004459 cfqd->cfq_group_idle = cfq_group_idle;
Jens Axboe963b72f2009-10-03 19:42:18 +02004460 cfqd->cfq_latency = 1;
Corrado Zoccoloe459dd02009-11-26 10:02:57 +01004461 cfqd->hw_tag = -1;
Corrado Zoccoloedc71132009-12-09 20:56:04 +01004462 /*
4463 * we optimistically start assuming sync ops weren't delayed in last
4464 * second, in order to have larger depth for async operations.
4465 */
Corrado Zoccolo573412b2009-12-06 11:48:52 +01004466 cfqd->last_delayed_sync = jiffies - HZ;
Tejun Heob2fab5a2012-03-05 13:14:57 -08004467 return 0;
Tejun Heoa2b16932012-04-13 13:11:33 -07004468
4469out_free:
4470 kfree(cfqd);
Jianpeng Mad50235b2013-07-03 13:25:24 +02004471 kobject_put(&eq->kobj);
Tejun Heoa2b16932012-04-13 13:11:33 -07004472 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473}
4474
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475/*
4476 * sysfs parts below -->
4477 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478static ssize_t
4479cfq_var_show(unsigned int var, char *page)
4480{
4481 return sprintf(page, "%d\n", var);
4482}
4483
4484static ssize_t
4485cfq_var_store(unsigned int *var, const char *page, size_t count)
4486{
4487 char *p = (char *) page;
4488
4489 *var = simple_strtoul(p, &p, 10);
4490 return count;
4491}
4492
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
Jens Axboeb374d182008-10-31 10:05:07 +01004494static ssize_t __FUNC(struct elevator_queue *e, char *page) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495{ \
Al Viro3d1ab402006-03-18 18:35:43 -05004496 struct cfq_data *cfqd = e->elevator_data; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497 unsigned int __data = __VAR; \
4498 if (__CONV) \
4499 __data = jiffies_to_msecs(__data); \
4500 return cfq_var_show(__data, (page)); \
4501}
4502SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02004503SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4504SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
Al Viroe572ec72006-03-18 22:27:18 -05004505SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4506SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02004507SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004508SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02004509SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4510SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4511SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
Jens Axboe963b72f2009-10-03 19:42:18 +02004512SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
Tao Ma5bf14c02012-04-01 14:33:39 -07004513SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514#undef SHOW_FUNCTION
4515
4516#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
Jens Axboeb374d182008-10-31 10:05:07 +01004517static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518{ \
Al Viro3d1ab402006-03-18 18:35:43 -05004519 struct cfq_data *cfqd = e->elevator_data; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 unsigned int __data; \
4521 int ret = cfq_var_store(&__data, (page), count); \
4522 if (__data < (MIN)) \
4523 __data = (MIN); \
4524 else if (__data > (MAX)) \
4525 __data = (MAX); \
4526 if (__CONV) \
4527 *(__PTR) = msecs_to_jiffies(__data); \
4528 else \
4529 *(__PTR) = __data; \
4530 return ret; \
4531}
4532STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
Jens Axboefe094d92008-01-31 13:08:54 +01004533STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4534 UINT_MAX, 1);
4535STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4536 UINT_MAX, 1);
Al Viroe572ec72006-03-18 22:27:18 -05004537STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
Jens Axboefe094d92008-01-31 13:08:54 +01004538STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4539 UINT_MAX, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02004540STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004541STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02004542STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4543STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
Jens Axboefe094d92008-01-31 13:08:54 +01004544STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4545 UINT_MAX, 0);
Jens Axboe963b72f2009-10-03 19:42:18 +02004546STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
Tao Ma5bf14c02012-04-01 14:33:39 -07004547STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548#undef STORE_FUNCTION
4549
Al Viroe572ec72006-03-18 22:27:18 -05004550#define CFQ_ATTR(name) \
4551 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
Jens Axboe3b181522005-06-27 10:56:24 +02004552
Al Viroe572ec72006-03-18 22:27:18 -05004553static struct elv_fs_entry cfq_attrs[] = {
4554 CFQ_ATTR(quantum),
Al Viroe572ec72006-03-18 22:27:18 -05004555 CFQ_ATTR(fifo_expire_sync),
4556 CFQ_ATTR(fifo_expire_async),
4557 CFQ_ATTR(back_seek_max),
4558 CFQ_ATTR(back_seek_penalty),
4559 CFQ_ATTR(slice_sync),
4560 CFQ_ATTR(slice_async),
4561 CFQ_ATTR(slice_async_rq),
4562 CFQ_ATTR(slice_idle),
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004563 CFQ_ATTR(group_idle),
Jens Axboe963b72f2009-10-03 19:42:18 +02004564 CFQ_ATTR(low_latency),
Tao Ma5bf14c02012-04-01 14:33:39 -07004565 CFQ_ATTR(target_latency),
Al Viroe572ec72006-03-18 22:27:18 -05004566 __ATTR_NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567};
4568
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569static struct elevator_type iosched_cfq = {
4570 .ops = {
4571 .elevator_merge_fn = cfq_merge,
4572 .elevator_merged_fn = cfq_merged_request,
4573 .elevator_merge_req_fn = cfq_merged_requests,
Jens Axboeda775262006-12-20 11:04:12 +01004574 .elevator_allow_merge_fn = cfq_allow_merge,
Divyesh Shah812d4022010-04-08 21:14:23 -07004575 .elevator_bio_merged_fn = cfq_bio_merged,
Jens Axboeb4878f22005-10-20 16:42:29 +02004576 .elevator_dispatch_fn = cfq_dispatch_requests,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577 .elevator_add_req_fn = cfq_insert_request,
Jens Axboeb4878f22005-10-20 16:42:29 +02004578 .elevator_activate_req_fn = cfq_activate_request,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579 .elevator_deactivate_req_fn = cfq_deactivate_request,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580 .elevator_completed_req_fn = cfq_completed_request,
Jens Axboe21183b02006-07-13 12:33:14 +02004581 .elevator_former_req_fn = elv_rb_former_request,
4582 .elevator_latter_req_fn = elv_rb_latter_request,
Tejun Heo9b84cac2011-12-14 00:33:42 +01004583 .elevator_init_icq_fn = cfq_init_icq,
Tejun Heo7e5a8792011-12-14 00:33:42 +01004584 .elevator_exit_icq_fn = cfq_exit_icq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004585 .elevator_set_req_fn = cfq_set_request,
4586 .elevator_put_req_fn = cfq_put_request,
4587 .elevator_may_queue_fn = cfq_may_queue,
4588 .elevator_init_fn = cfq_init_queue,
4589 .elevator_exit_fn = cfq_exit_queue,
4590 },
Tejun Heo3d3c2372011-12-14 00:33:42 +01004591 .icq_size = sizeof(struct cfq_io_cq),
4592 .icq_align = __alignof__(struct cfq_io_cq),
Al Viro3d1ab402006-03-18 18:35:43 -05004593 .elevator_attrs = cfq_attrs,
Tejun Heo3d3c2372011-12-14 00:33:42 +01004594 .elevator_name = "cfq",
Linus Torvalds1da177e2005-04-16 15:20:36 -07004595 .elevator_owner = THIS_MODULE,
4596};
4597
Vivek Goyal3e252062009-12-04 10:36:42 -05004598#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004599static struct blkcg_policy blkcg_policy_cfq = {
Tejun Heof9fcc2d2012-04-16 13:57:27 -07004600 .pd_size = sizeof(struct cfq_group),
4601 .cftypes = cfq_blkcg_files,
4602
4603 .pd_init_fn = cfq_pd_init,
Tejun Heo0b399202013-01-09 08:05:13 -08004604 .pd_offline_fn = cfq_pd_offline,
Tejun Heof9fcc2d2012-04-16 13:57:27 -07004605 .pd_reset_stats_fn = cfq_pd_reset_stats,
Vivek Goyal3e252062009-12-04 10:36:42 -05004606};
Vivek Goyal3e252062009-12-04 10:36:42 -05004607#endif
4608
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609static int __init cfq_init(void)
4610{
Tejun Heo3d3c2372011-12-14 00:33:42 +01004611 int ret;
4612
Jens Axboe22e2c502005-06-27 10:55:12 +02004613 /*
4614 * could be 0 on HZ < 1000 setups
4615 */
4616 if (!cfq_slice_async)
4617 cfq_slice_async = 1;
4618 if (!cfq_slice_idle)
4619 cfq_slice_idle = 1;
4620
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004621#ifdef CONFIG_CFQ_GROUP_IOSCHED
4622 if (!cfq_group_idle)
4623 cfq_group_idle = 1;
Tejun Heo8bd435b2012-04-13 13:11:28 -07004624
Tejun Heo3c798392012-04-16 13:57:25 -07004625 ret = blkcg_policy_register(&blkcg_policy_cfq);
Tejun Heo8bd435b2012-04-13 13:11:28 -07004626 if (ret)
4627 return ret;
Tejun Heoffea73f2012-06-04 10:02:29 +02004628#else
4629 cfq_group_idle = 0;
4630#endif
Tejun Heo8bd435b2012-04-13 13:11:28 -07004631
Tejun Heofd794952012-06-04 10:01:38 +02004632 ret = -ENOMEM;
Tejun Heo3d3c2372011-12-14 00:33:42 +01004633 cfq_pool = KMEM_CACHE(cfq_queue, 0);
4634 if (!cfq_pool)
Tejun Heo8bd435b2012-04-13 13:11:28 -07004635 goto err_pol_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636
Tejun Heo3d3c2372011-12-14 00:33:42 +01004637 ret = elv_register(&iosched_cfq);
Tejun Heo8bd435b2012-04-13 13:11:28 -07004638 if (ret)
4639 goto err_free_pool;
Tejun Heo3d3c2372011-12-14 00:33:42 +01004640
Adrian Bunk2fdd82b2007-12-12 18:51:56 +01004641 return 0;
Tejun Heo8bd435b2012-04-13 13:11:28 -07004642
4643err_free_pool:
4644 kmem_cache_destroy(cfq_pool);
4645err_pol_unreg:
Tejun Heoffea73f2012-06-04 10:02:29 +02004646#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004647 blkcg_policy_unregister(&blkcg_policy_cfq);
Tejun Heoffea73f2012-06-04 10:02:29 +02004648#endif
Tejun Heo8bd435b2012-04-13 13:11:28 -07004649 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650}
4651
4652static void __exit cfq_exit(void)
4653{
Tejun Heoffea73f2012-06-04 10:02:29 +02004654#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004655 blkcg_policy_unregister(&blkcg_policy_cfq);
Tejun Heoffea73f2012-06-04 10:02:29 +02004656#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657 elv_unregister(&iosched_cfq);
Tejun Heo3d3c2372011-12-14 00:33:42 +01004658 kmem_cache_destroy(cfq_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659}
4660
4661module_init(cfq_init);
4662module_exit(cfq_exit);
4663
4664MODULE_AUTHOR("Jens Axboe");
4665MODULE_LICENSE("GPL");
4666MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");