blob: 4d75b7944574847ec9acdcc4165d476ed936922a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
Jens Axboe0fe23472006-09-04 15:41:16 +02007 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Al Viro1cc9be62006-03-18 12:29:52 -050011#include <linux/blkdev.h>
12#include <linux/elevator.h>
Randy Dunlapad5ebd22009-11-11 13:47:45 +010013#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/rbtree.h>
Jens Axboe22e2c502005-06-27 10:55:12 +020015#include <linux/ioprio.h>
Jens Axboe7b679132008-05-30 12:23:07 +020016#include <linux/blktrace_api.h>
Tejun Heo6e736be2011-12-14 00:33:38 +010017#include "blk.h"
Tejun Heo629ed0b2012-04-01 14:38:44 -070018#include "blk-cgroup.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20/*
21 * tunables
22 */
Jens Axboefe094d92008-01-31 13:08:54 +010023/* max queue in one round of service */
Shaohua Liabc3c742010-03-01 09:20:54 +010024static const int cfq_quantum = 8;
Arjan van de Ven64100092006-01-06 09:46:02 +010025static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
Jens Axboefe094d92008-01-31 13:08:54 +010026/* maximum backwards seek, in KiB */
27static const int cfq_back_max = 16 * 1024;
28/* penalty of a backwards seek */
29static const int cfq_back_penalty = 2;
Arjan van de Ven64100092006-01-06 09:46:02 +010030static const int cfq_slice_sync = HZ / 10;
Jens Axboe3b181522005-06-27 10:56:24 +020031static int cfq_slice_async = HZ / 25;
Arjan van de Ven64100092006-01-06 09:46:02 +010032static const int cfq_slice_async_rq = 2;
Jens Axboecaaa5f92006-06-16 11:23:00 +020033static int cfq_slice_idle = HZ / 125;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +020034static int cfq_group_idle = HZ / 125;
Corrado Zoccolo5db5d642009-10-26 22:44:04 +010035static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
36static const int cfq_hist_divisor = 4;
Jens Axboe22e2c502005-06-27 10:55:12 +020037
Jens Axboed9e76202007-04-20 14:27:50 +020038/*
Jens Axboe08717142008-01-28 11:38:15 +010039 * offset from end of service tree
Jens Axboed9e76202007-04-20 14:27:50 +020040 */
Jens Axboe08717142008-01-28 11:38:15 +010041#define CFQ_IDLE_DELAY (HZ / 5)
Jens Axboed9e76202007-04-20 14:27:50 +020042
43/*
44 * below this threshold, we consider thinktime immediate
45 */
46#define CFQ_MIN_TT (2)
47
Jens Axboe22e2c502005-06-27 10:55:12 +020048#define CFQ_SLICE_SCALE (5)
Aaron Carroll45333d52008-08-26 15:52:36 +020049#define CFQ_HW_QUEUE_MIN (5)
Vivek Goyal25bc6b02009-12-03 12:59:43 -050050#define CFQ_SERVICE_SHIFT 12
Jens Axboe22e2c502005-06-27 10:55:12 +020051
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +010052#define CFQQ_SEEK_THR (sector_t)(8 * 100)
Shaohua Lie9ce3352010-03-19 08:03:04 +010053#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
Corrado Zoccolo41647e72010-02-27 19:45:40 +010054#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +010055#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
Shaohua Liae54abe2010-02-05 13:11:45 +010056
Tejun Heoa612fdd2011-12-14 00:33:41 +010057#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
58#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
59#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Christoph Lametere18b8902006-12-06 20:33:20 -080061static struct kmem_cache *cfq_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Jens Axboe22e2c502005-06-27 10:55:12 +020063#define CFQ_PRIO_LISTS IOPRIO_BE_NR
64#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
Jens Axboe22e2c502005-06-27 10:55:12 +020065#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66
Jens Axboe206dc692006-03-28 13:03:44 +020067#define sample_valid(samples) ((samples) > 80)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -050068#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
Jens Axboe206dc692006-03-28 13:03:44 +020069
Tejun Heoc5869802011-12-14 00:33:41 +010070struct cfq_ttime {
71 unsigned long last_end_request;
72
73 unsigned long ttime_total;
74 unsigned long ttime_samples;
75 unsigned long ttime_mean;
76};
77
Jens Axboe22e2c502005-06-27 10:55:12 +020078/*
Jens Axboecc09e292007-04-26 12:53:50 +020079 * Most of our rbtree usage is for sorting with min extraction, so
80 * if we cache the leftmost node we don't have to walk down the tree
81 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
82 * move this into the elevator for the rq sorting as well.
83 */
84struct cfq_rb_root {
85 struct rb_root rb;
86 struct rb_node *left;
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +010087 unsigned count;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -050088 u64 min_vdisktime;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +020089 struct cfq_ttime ttime;
Jens Axboecc09e292007-04-26 12:53:50 +020090};
Shaohua Lif5f2b6c2011-07-12 14:24:55 +020091#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
92 .ttime = {.last_end_request = jiffies,},}
Jens Axboecc09e292007-04-26 12:53:50 +020093
94/*
Jens Axboe6118b702009-06-30 09:34:12 +020095 * Per process-grouping structure
96 */
97struct cfq_queue {
98 /* reference count */
Shaohua Li30d7b942011-01-07 08:46:59 +010099 int ref;
Jens Axboe6118b702009-06-30 09:34:12 +0200100 /* various state flags, see below */
101 unsigned int flags;
102 /* parent cfq_data */
103 struct cfq_data *cfqd;
104 /* service_tree member */
105 struct rb_node rb_node;
106 /* service_tree key */
107 unsigned long rb_key;
108 /* prio tree member */
109 struct rb_node p_node;
110 /* prio tree root we belong to, if any */
111 struct rb_root *p_root;
112 /* sorted list of pending requests */
113 struct rb_root sort_list;
114 /* if fifo isn't expired, next request to serve */
115 struct request *next_rq;
116 /* requests queued in sort_list */
117 int queued[2];
118 /* currently allocated requests */
119 int allocated[2];
120 /* fifo list of requests in sort_list */
121 struct list_head fifo;
122
Vivek Goyaldae739e2009-12-03 12:59:45 -0500123 /* time when queue got scheduled in to dispatch first request. */
124 unsigned long dispatch_start;
Vivek Goyalf75edf22009-12-03 12:59:53 -0500125 unsigned int allocated_slice;
Richard Kennedyc4081ba2010-02-22 13:49:24 +0100126 unsigned int slice_dispatch;
Vivek Goyaldae739e2009-12-03 12:59:45 -0500127 /* time when first request from queue completed and slice started. */
128 unsigned long slice_start;
Jens Axboe6118b702009-06-30 09:34:12 +0200129 unsigned long slice_end;
130 long slice_resid;
Jens Axboe6118b702009-06-30 09:34:12 +0200131
Christoph Hellwig65299a32011-08-23 14:50:29 +0200132 /* pending priority requests */
133 int prio_pending;
Jens Axboe6118b702009-06-30 09:34:12 +0200134 /* number of requests that are on the dispatch list or inside driver */
135 int dispatched;
136
137 /* io prio of this group */
138 unsigned short ioprio, org_ioprio;
Justin TerAvest4aede842011-07-12 08:31:45 +0200139 unsigned short ioprio_class;
Jens Axboe6118b702009-06-30 09:34:12 +0200140
Richard Kennedyc4081ba2010-02-22 13:49:24 +0100141 pid_t pid;
142
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +0100143 u32 seek_history;
Jeff Moyerb2c18e12009-10-23 17:14:49 -0400144 sector_t last_request_pos;
145
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +0100146 struct cfq_rb_root *service_tree;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -0400147 struct cfq_queue *new_cfqq;
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500148 struct cfq_group *cfqg;
Vivek Goyalc4e78932010-08-23 12:25:03 +0200149 /* Number of sectors dispatched from queue in single dispatch round */
150 unsigned long nr_sectors;
Jens Axboe6118b702009-06-30 09:34:12 +0200151};
152
153/*
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100154 * First index in the service_trees.
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100155 * IDLE is handled separately, so it has negative index
156 */
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400157enum wl_class_t {
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100158 BE_WORKLOAD = 0,
Vivek Goyal615f0252009-12-03 12:59:39 -0500159 RT_WORKLOAD = 1,
160 IDLE_WORKLOAD = 2,
Vivek Goyalb4627322010-10-22 09:48:43 +0200161 CFQ_PRIO_NR,
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100162};
163
164/*
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100165 * Second index in the service_trees.
166 */
167enum wl_type_t {
168 ASYNC_WORKLOAD = 0,
169 SYNC_NOIDLE_WORKLOAD = 1,
170 SYNC_WORKLOAD = 2
171};
172
Tejun Heo155fead2012-04-01 14:38:44 -0700173struct cfqg_stats {
174#ifdef CONFIG_CFQ_GROUP_IOSCHED
175 /* total bytes transferred */
176 struct blkg_rwstat service_bytes;
177 /* total IOs serviced, post merge */
178 struct blkg_rwstat serviced;
179 /* number of ios merged */
180 struct blkg_rwstat merged;
181 /* total time spent on device in ns, may not be accurate w/ queueing */
182 struct blkg_rwstat service_time;
183 /* total time spent waiting in scheduler queue in ns */
184 struct blkg_rwstat wait_time;
185 /* number of IOs queued up */
186 struct blkg_rwstat queued;
187 /* total sectors transferred */
188 struct blkg_stat sectors;
189 /* total disk time and nr sectors dispatched by this group */
190 struct blkg_stat time;
191#ifdef CONFIG_DEBUG_BLK_CGROUP
192 /* time not charged to this cgroup */
193 struct blkg_stat unaccounted_time;
194 /* sum of number of ios queued across all samples */
195 struct blkg_stat avg_queue_size_sum;
196 /* count of samples taken for average */
197 struct blkg_stat avg_queue_size_samples;
198 /* how many times this group has been removed from service tree */
199 struct blkg_stat dequeue;
200 /* total time spent waiting for it to be assigned a timeslice. */
201 struct blkg_stat group_wait_time;
Tejun Heo3c798392012-04-16 13:57:25 -0700202 /* time spent idling for this blkcg_gq */
Tejun Heo155fead2012-04-01 14:38:44 -0700203 struct blkg_stat idle_time;
204 /* total time with empty current active q with other requests queued */
205 struct blkg_stat empty_time;
206 /* fields after this shouldn't be cleared on stat reset */
207 uint64_t start_group_wait_time;
208 uint64_t start_idle_time;
209 uint64_t start_empty_time;
210 uint16_t flags;
211#endif /* CONFIG_DEBUG_BLK_CGROUP */
212#endif /* CONFIG_CFQ_GROUP_IOSCHED */
213};
214
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500215/* This is per cgroup per device grouping structure */
216struct cfq_group {
Tejun Heof95a04a2012-04-16 13:57:26 -0700217 /* must be the first member */
218 struct blkg_policy_data pd;
219
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500220 /* group service_tree member */
221 struct rb_node rb_node;
222
223 /* group service_tree key */
224 u64 vdisktime;
Tejun Heoe71357e2013-01-09 08:05:10 -0800225
226 /*
Tejun Heo7918ffb2013-01-09 08:05:11 -0800227 * The number of active cfqgs and sum of their weights under this
228 * cfqg. This covers this cfqg's leaf_weight and all children's
229 * weights, but does not cover weights of further descendants.
230 *
231 * If a cfqg is on the service tree, it's active. An active cfqg
232 * also activates its parent and contributes to the children_weight
233 * of the parent.
234 */
235 int nr_active;
236 unsigned int children_weight;
237
238 /*
Tejun Heo1d3650f2013-01-09 08:05:11 -0800239 * vfraction is the fraction of vdisktime that the tasks in this
240 * cfqg are entitled to. This is determined by compounding the
241 * ratios walking up from this cfqg to the root.
242 *
243 * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
244 * vfractions on a service tree is approximately 1. The sum may
245 * deviate a bit due to rounding errors and fluctuations caused by
246 * cfqgs entering and leaving the service tree.
247 */
248 unsigned int vfraction;
249
250 /*
Tejun Heoe71357e2013-01-09 08:05:10 -0800251 * There are two weights - (internal) weight is the weight of this
252 * cfqg against the sibling cfqgs. leaf_weight is the wight of
253 * this cfqg against the child cfqgs. For the root cfqg, both
254 * weights are kept in sync for backward compatibility.
255 */
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500256 unsigned int weight;
Justin TerAvest8184f932011-03-17 16:12:36 +0100257 unsigned int new_weight;
Tejun Heo3381cb82012-04-01 14:38:44 -0700258 unsigned int dev_weight;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500259
Tejun Heoe71357e2013-01-09 08:05:10 -0800260 unsigned int leaf_weight;
261 unsigned int new_leaf_weight;
262 unsigned int dev_leaf_weight;
263
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500264 /* number of cfqq currently on this group */
265 int nr_cfqq;
266
Jens Axboe22e2c502005-06-27 10:55:12 +0200267 /*
Kyungmin Park4495a7d2011-05-31 10:04:09 +0200268 * Per group busy queues average. Useful for workload slice calc. We
Vivek Goyalb4627322010-10-22 09:48:43 +0200269 * create the array for each prio class but at run time it is used
270 * only for RT and BE class and slot for IDLE class remains unused.
271 * This is primarily done to avoid confusion and a gcc warning.
272 */
273 unsigned int busy_queues_avg[CFQ_PRIO_NR];
274 /*
275 * rr lists of queues with requests. We maintain service trees for
276 * RT and BE classes. These trees are subdivided in subclasses
277 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
278 * class there is no subclassification and all the cfq queues go on
279 * a single tree service_tree_idle.
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100280 * Counts are embedded in the cfq_rb_root
Jens Axboe22e2c502005-06-27 10:55:12 +0200281 */
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100282 struct cfq_rb_root service_trees[2][3];
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100283 struct cfq_rb_root service_tree_idle;
Vivek Goyaldae739e2009-12-03 12:59:45 -0500284
Vivek Goyal4d2ceea2012-10-03 16:56:57 -0400285 unsigned long saved_wl_slice;
286 enum wl_type_t saved_wl_type;
287 enum wl_class_t saved_wl_class;
Tejun Heo4eef3042012-03-05 13:15:18 -0800288
Vivek Goyal80bdf0c2010-08-23 12:24:26 +0200289 /* number of requests that are on the dispatch list or inside driver */
290 int dispatched;
Shaohua Li7700fc42011-07-12 14:24:56 +0200291 struct cfq_ttime ttime;
Tejun Heo0b399202013-01-09 08:05:13 -0800292 struct cfqg_stats stats; /* stats for this cfqg */
293 struct cfqg_stats dead_stats; /* stats pushed from dead children */
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500294};
295
Tejun Heoc5869802011-12-14 00:33:41 +0100296struct cfq_io_cq {
297 struct io_cq icq; /* must be the first member */
298 struct cfq_queue *cfqq[2];
299 struct cfq_ttime ttime;
Tejun Heo598971b2012-03-19 15:10:58 -0700300 int ioprio; /* the current ioprio */
301#ifdef CONFIG_CFQ_GROUP_IOSCHED
302 uint64_t blkcg_id; /* the current blkcg ID */
303#endif
Tejun Heoc5869802011-12-14 00:33:41 +0100304};
305
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500306/*
307 * Per block device queue structure
308 */
309struct cfq_data {
310 struct request_queue *queue;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500311 /* Root service tree for cfq_groups */
312 struct cfq_rb_root grp_service_tree;
Tejun Heof51b8022012-03-05 13:15:05 -0800313 struct cfq_group *root_group;
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500314
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100315 /*
316 * The priority currently being served
317 */
Vivek Goyal4d2ceea2012-10-03 16:56:57 -0400318 enum wl_class_t serving_wl_class;
319 enum wl_type_t serving_wl_type;
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100320 unsigned long workload_expires;
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500321 struct cfq_group *serving_group;
Jens Axboea36e71f2009-04-15 12:15:11 +0200322
323 /*
324 * Each priority tree is sorted by next_request position. These
325 * trees are used when determining if two or more queues are
326 * interleaving requests (see cfq_close_cooperator).
327 */
328 struct rb_root prio_trees[CFQ_PRIO_LISTS];
329
Jens Axboe22e2c502005-06-27 10:55:12 +0200330 unsigned int busy_queues;
Shaohua Lief8a41d2011-03-07 09:26:29 +0100331 unsigned int busy_sync_queues;
Jens Axboe22e2c502005-06-27 10:55:12 +0200332
Corrado Zoccolo53c583d2010-02-28 19:45:05 +0100333 int rq_in_driver;
334 int rq_in_flight[2];
Aaron Carroll45333d52008-08-26 15:52:36 +0200335
336 /*
337 * queue-depth detection
338 */
339 int rq_queued;
Jens Axboe25776e32006-06-01 10:12:26 +0200340 int hw_tag;
Corrado Zoccoloe459dd02009-11-26 10:02:57 +0100341 /*
342 * hw_tag can be
343 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
344 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
345 * 0 => no NCQ
346 */
347 int hw_tag_est_depth;
348 unsigned int hw_tag_samples;
Jens Axboe22e2c502005-06-27 10:55:12 +0200349
350 /*
Jens Axboe22e2c502005-06-27 10:55:12 +0200351 * idle window management
352 */
353 struct timer_list idle_slice_timer;
Jens Axboe23e018a2009-10-05 08:52:35 +0200354 struct work_struct unplug_work;
Jens Axboe22e2c502005-06-27 10:55:12 +0200355
356 struct cfq_queue *active_queue;
Tejun Heoc5869802011-12-14 00:33:41 +0100357 struct cfq_io_cq *active_cic;
Jens Axboe22e2c502005-06-27 10:55:12 +0200358
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +0200359 /*
360 * async queue for each priority case
361 */
362 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
363 struct cfq_queue *async_idle_cfqq;
Jens Axboe15c31be2007-07-10 13:43:25 +0200364
Jens Axboe6d048f52007-04-25 12:44:27 +0200365 sector_t last_position;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 /*
368 * tunables, see top of file
369 */
370 unsigned int cfq_quantum;
Jens Axboe22e2c502005-06-27 10:55:12 +0200371 unsigned int cfq_fifo_expire[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 unsigned int cfq_back_penalty;
373 unsigned int cfq_back_max;
Jens Axboe22e2c502005-06-27 10:55:12 +0200374 unsigned int cfq_slice[2];
375 unsigned int cfq_slice_async_rq;
376 unsigned int cfq_slice_idle;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +0200377 unsigned int cfq_group_idle;
Jens Axboe963b72f2009-10-03 19:42:18 +0200378 unsigned int cfq_latency;
Tao Ma5bf14c02012-04-01 14:33:39 -0700379 unsigned int cfq_target_latency;
Al Virod9ff4182006-03-18 13:51:22 -0500380
Jens Axboe6118b702009-06-30 09:34:12 +0200381 /*
382 * Fallback dummy cfqq for extreme OOM conditions
383 */
384 struct cfq_queue oom_cfqq;
Vivek Goyal365722b2009-10-03 15:21:27 +0200385
Corrado Zoccolo573412b2009-12-06 11:48:52 +0100386 unsigned long last_delayed_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387};
388
Vivek Goyal25fb5162009-12-03 12:59:46 -0500389static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
390
Vivek Goyal34b98d02012-10-03 16:56:58 -0400391static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400392 enum wl_class_t class,
Vivek Goyal65b32a52009-12-16 17:52:59 -0500393 enum wl_type_t type)
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100394{
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500395 if (!cfqg)
396 return NULL;
397
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400398 if (class == IDLE_WORKLOAD)
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500399 return &cfqg->service_tree_idle;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100400
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400401 return &cfqg->service_trees[class][type];
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100402}
403
Jens Axboe3b181522005-06-27 10:56:24 +0200404enum cfqq_state_flags {
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100405 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
406 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
Jens Axboeb0291952009-04-07 11:38:31 +0200407 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100408 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100409 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
410 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
411 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
Jens Axboe44f7c162007-01-19 11:51:58 +1100412 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
Vasily Tarasov91fac312007-04-25 12:29:51 +0200413 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
Jeff Moyerb3b6d042009-10-23 17:14:51 -0400414 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
Shaohua Liae54abe2010-02-05 13:11:45 +0100415 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
Corrado Zoccolo76280af2009-11-26 10:02:58 +0100416 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
Vivek Goyalf75edf22009-12-03 12:59:53 -0500417 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
Jens Axboe3b181522005-06-27 10:56:24 +0200418};
419
420#define CFQ_CFQQ_FNS(name) \
421static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
422{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100423 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
Jens Axboe3b181522005-06-27 10:56:24 +0200424} \
425static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
426{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100427 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
Jens Axboe3b181522005-06-27 10:56:24 +0200428} \
429static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
430{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100431 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
Jens Axboe3b181522005-06-27 10:56:24 +0200432}
433
434CFQ_CFQQ_FNS(on_rr);
435CFQ_CFQQ_FNS(wait_request);
Jens Axboeb0291952009-04-07 11:38:31 +0200436CFQ_CFQQ_FNS(must_dispatch);
Jens Axboe3b181522005-06-27 10:56:24 +0200437CFQ_CFQQ_FNS(must_alloc_slice);
Jens Axboe3b181522005-06-27 10:56:24 +0200438CFQ_CFQQ_FNS(fifo_expire);
439CFQ_CFQQ_FNS(idle_window);
440CFQ_CFQQ_FNS(prio_changed);
Jens Axboe44f7c162007-01-19 11:51:58 +1100441CFQ_CFQQ_FNS(slice_new);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200442CFQ_CFQQ_FNS(sync);
Jens Axboea36e71f2009-04-15 12:15:11 +0200443CFQ_CFQQ_FNS(coop);
Shaohua Liae54abe2010-02-05 13:11:45 +0100444CFQ_CFQQ_FNS(split_coop);
Corrado Zoccolo76280af2009-11-26 10:02:58 +0100445CFQ_CFQQ_FNS(deep);
Vivek Goyalf75edf22009-12-03 12:59:53 -0500446CFQ_CFQQ_FNS(wait_busy);
Jens Axboe3b181522005-06-27 10:56:24 +0200447#undef CFQ_CFQQ_FNS
448
Tejun Heof95a04a2012-04-16 13:57:26 -0700449static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
450{
451 return pd ? container_of(pd, struct cfq_group, pd) : NULL;
452}
453
Tejun Heof95a04a2012-04-16 13:57:26 -0700454static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
455{
456 return pd_to_blkg(&cfqg->pd);
457}
458
Tejun Heo629ed0b2012-04-01 14:38:44 -0700459#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700460
Tejun Heo155fead2012-04-01 14:38:44 -0700461/* cfqg stats flags */
462enum cfqg_stats_flags {
463 CFQG_stats_waiting = 0,
464 CFQG_stats_idling,
465 CFQG_stats_empty,
Tejun Heo629ed0b2012-04-01 14:38:44 -0700466};
467
Tejun Heo155fead2012-04-01 14:38:44 -0700468#define CFQG_FLAG_FNS(name) \
469static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700470{ \
Tejun Heo155fead2012-04-01 14:38:44 -0700471 stats->flags |= (1 << CFQG_stats_##name); \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700472} \
Tejun Heo155fead2012-04-01 14:38:44 -0700473static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700474{ \
Tejun Heo155fead2012-04-01 14:38:44 -0700475 stats->flags &= ~(1 << CFQG_stats_##name); \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700476} \
Tejun Heo155fead2012-04-01 14:38:44 -0700477static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700478{ \
Tejun Heo155fead2012-04-01 14:38:44 -0700479 return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700480} \
481
Tejun Heo155fead2012-04-01 14:38:44 -0700482CFQG_FLAG_FNS(waiting)
483CFQG_FLAG_FNS(idling)
484CFQG_FLAG_FNS(empty)
485#undef CFQG_FLAG_FNS
Tejun Heo629ed0b2012-04-01 14:38:44 -0700486
487/* This should be called with the queue_lock held. */
Tejun Heo155fead2012-04-01 14:38:44 -0700488static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700489{
490 unsigned long long now;
491
Tejun Heo155fead2012-04-01 14:38:44 -0700492 if (!cfqg_stats_waiting(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700493 return;
494
495 now = sched_clock();
496 if (time_after64(now, stats->start_group_wait_time))
497 blkg_stat_add(&stats->group_wait_time,
498 now - stats->start_group_wait_time);
Tejun Heo155fead2012-04-01 14:38:44 -0700499 cfqg_stats_clear_waiting(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700500}
501
502/* This should be called with the queue_lock held. */
Tejun Heo155fead2012-04-01 14:38:44 -0700503static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
504 struct cfq_group *curr_cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700505{
Tejun Heo155fead2012-04-01 14:38:44 -0700506 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700507
Tejun Heo155fead2012-04-01 14:38:44 -0700508 if (cfqg_stats_waiting(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700509 return;
Tejun Heo155fead2012-04-01 14:38:44 -0700510 if (cfqg == curr_cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700511 return;
Tejun Heo155fead2012-04-01 14:38:44 -0700512 stats->start_group_wait_time = sched_clock();
513 cfqg_stats_mark_waiting(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700514}
515
516/* This should be called with the queue_lock held. */
Tejun Heo155fead2012-04-01 14:38:44 -0700517static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700518{
519 unsigned long long now;
520
Tejun Heo155fead2012-04-01 14:38:44 -0700521 if (!cfqg_stats_empty(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700522 return;
523
524 now = sched_clock();
525 if (time_after64(now, stats->start_empty_time))
526 blkg_stat_add(&stats->empty_time,
527 now - stats->start_empty_time);
Tejun Heo155fead2012-04-01 14:38:44 -0700528 cfqg_stats_clear_empty(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700529}
530
Tejun Heo155fead2012-04-01 14:38:44 -0700531static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700532{
Tejun Heo155fead2012-04-01 14:38:44 -0700533 blkg_stat_add(&cfqg->stats.dequeue, 1);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700534}
535
Tejun Heo155fead2012-04-01 14:38:44 -0700536static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700537{
Tejun Heo155fead2012-04-01 14:38:44 -0700538 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700539
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800540 if (blkg_rwstat_total(&stats->queued))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700541 return;
542
543 /*
544 * group is already marked empty. This can happen if cfqq got new
545 * request in parent group and moved to this group while being added
546 * to service tree. Just ignore the event and move on.
547 */
Tejun Heo155fead2012-04-01 14:38:44 -0700548 if (cfqg_stats_empty(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700549 return;
550
551 stats->start_empty_time = sched_clock();
Tejun Heo155fead2012-04-01 14:38:44 -0700552 cfqg_stats_mark_empty(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700553}
554
Tejun Heo155fead2012-04-01 14:38:44 -0700555static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700556{
Tejun Heo155fead2012-04-01 14:38:44 -0700557 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700558
Tejun Heo155fead2012-04-01 14:38:44 -0700559 if (cfqg_stats_idling(stats)) {
Tejun Heo629ed0b2012-04-01 14:38:44 -0700560 unsigned long long now = sched_clock();
561
562 if (time_after64(now, stats->start_idle_time))
563 blkg_stat_add(&stats->idle_time,
564 now - stats->start_idle_time);
Tejun Heo155fead2012-04-01 14:38:44 -0700565 cfqg_stats_clear_idling(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700566 }
567}
568
Tejun Heo155fead2012-04-01 14:38:44 -0700569static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700570{
Tejun Heo155fead2012-04-01 14:38:44 -0700571 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700572
Tejun Heo155fead2012-04-01 14:38:44 -0700573 BUG_ON(cfqg_stats_idling(stats));
Tejun Heo629ed0b2012-04-01 14:38:44 -0700574
575 stats->start_idle_time = sched_clock();
Tejun Heo155fead2012-04-01 14:38:44 -0700576 cfqg_stats_mark_idling(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700577}
578
Tejun Heo155fead2012-04-01 14:38:44 -0700579static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700580{
Tejun Heo155fead2012-04-01 14:38:44 -0700581 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700582
583 blkg_stat_add(&stats->avg_queue_size_sum,
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800584 blkg_rwstat_total(&stats->queued));
Tejun Heo629ed0b2012-04-01 14:38:44 -0700585 blkg_stat_add(&stats->avg_queue_size_samples, 1);
Tejun Heo155fead2012-04-01 14:38:44 -0700586 cfqg_stats_update_group_wait_time(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700587}
588
589#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
590
Tejun Heof48ec1d2012-04-13 13:11:25 -0700591static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
592static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
593static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
594static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
595static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
596static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
597static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
Tejun Heo629ed0b2012-04-01 14:38:44 -0700598
599#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
600
601#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo2ce4d502012-04-01 14:38:43 -0700602
Tejun Heoffea73f2012-06-04 10:02:29 +0200603static struct blkcg_policy blkcg_policy_cfq;
604
605static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
606{
607 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
608}
609
Tejun Heod02f7aa2013-01-09 08:05:11 -0800610static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
Tejun Heo7918ffb2013-01-09 08:05:11 -0800611{
Tejun Heod02f7aa2013-01-09 08:05:11 -0800612 struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
Tejun Heo7918ffb2013-01-09 08:05:11 -0800613
Tejun Heod02f7aa2013-01-09 08:05:11 -0800614 return pblkg ? blkg_to_cfqg(pblkg) : NULL;
Tejun Heo7918ffb2013-01-09 08:05:11 -0800615}
616
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100617static inline void cfqg_get(struct cfq_group *cfqg)
618{
619 return blkg_get(cfqg_to_blkg(cfqg));
620}
621
622static inline void cfqg_put(struct cfq_group *cfqg)
623{
624 return blkg_put(cfqg_to_blkg(cfqg));
625}
626
Tejun Heo54e7ed12012-04-16 13:57:23 -0700627#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
628 char __pbuf[128]; \
629 \
630 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
Vivek Goyalb226e5c2012-10-03 16:57:01 -0400631 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
632 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
633 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
Tejun Heo54e7ed12012-04-16 13:57:23 -0700634 __pbuf, ##args); \
635} while (0)
Vivek Goyal2868ef72009-12-03 12:59:48 -0500636
Tejun Heo54e7ed12012-04-16 13:57:23 -0700637#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
638 char __pbuf[128]; \
639 \
640 blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
641 blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
642} while (0)
Vivek Goyal2868ef72009-12-03 12:59:48 -0500643
Tejun Heo155fead2012-04-01 14:38:44 -0700644static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
645 struct cfq_group *curr_cfqg, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700646{
Tejun Heo155fead2012-04-01 14:38:44 -0700647 blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
648 cfqg_stats_end_empty_time(&cfqg->stats);
649 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700650}
651
Tejun Heo155fead2012-04-01 14:38:44 -0700652static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
653 unsigned long time, unsigned long unaccounted_time)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700654{
Tejun Heo155fead2012-04-01 14:38:44 -0700655 blkg_stat_add(&cfqg->stats.time, time);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700656#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heo155fead2012-04-01 14:38:44 -0700657 blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700658#endif
Tejun Heo2ce4d502012-04-01 14:38:43 -0700659}
660
Tejun Heo155fead2012-04-01 14:38:44 -0700661static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700662{
Tejun Heo155fead2012-04-01 14:38:44 -0700663 blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700664}
665
Tejun Heo155fead2012-04-01 14:38:44 -0700666static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700667{
Tejun Heo155fead2012-04-01 14:38:44 -0700668 blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700669}
670
Tejun Heo155fead2012-04-01 14:38:44 -0700671static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
672 uint64_t bytes, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700673{
Tejun Heo155fead2012-04-01 14:38:44 -0700674 blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
675 blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
676 blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700677}
678
Tejun Heo155fead2012-04-01 14:38:44 -0700679static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
680 uint64_t start_time, uint64_t io_start_time, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700681{
Tejun Heo155fead2012-04-01 14:38:44 -0700682 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700683 unsigned long long now = sched_clock();
Tejun Heo629ed0b2012-04-01 14:38:44 -0700684
685 if (time_after64(now, io_start_time))
686 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
687 if (time_after64(io_start_time, start_time))
688 blkg_rwstat_add(&stats->wait_time, rw,
689 io_start_time - start_time);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700690}
691
Tejun Heo689665a2013-01-09 08:05:13 -0800692/* @stats = 0 */
693static void cfqg_stats_reset(struct cfqg_stats *stats)
Tejun Heo155fead2012-04-01 14:38:44 -0700694{
Tejun Heo155fead2012-04-01 14:38:44 -0700695 /* queued stats shouldn't be cleared */
696 blkg_rwstat_reset(&stats->service_bytes);
697 blkg_rwstat_reset(&stats->serviced);
698 blkg_rwstat_reset(&stats->merged);
699 blkg_rwstat_reset(&stats->service_time);
700 blkg_rwstat_reset(&stats->wait_time);
701 blkg_stat_reset(&stats->time);
702#ifdef CONFIG_DEBUG_BLK_CGROUP
703 blkg_stat_reset(&stats->unaccounted_time);
704 blkg_stat_reset(&stats->avg_queue_size_sum);
705 blkg_stat_reset(&stats->avg_queue_size_samples);
706 blkg_stat_reset(&stats->dequeue);
707 blkg_stat_reset(&stats->group_wait_time);
708 blkg_stat_reset(&stats->idle_time);
709 blkg_stat_reset(&stats->empty_time);
710#endif
711}
712
Tejun Heo0b399202013-01-09 08:05:13 -0800713/* @to += @from */
714static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from)
715{
716 /* queued stats shouldn't be cleared */
717 blkg_rwstat_merge(&to->service_bytes, &from->service_bytes);
718 blkg_rwstat_merge(&to->serviced, &from->serviced);
719 blkg_rwstat_merge(&to->merged, &from->merged);
720 blkg_rwstat_merge(&to->service_time, &from->service_time);
721 blkg_rwstat_merge(&to->wait_time, &from->wait_time);
722 blkg_stat_merge(&from->time, &from->time);
723#ifdef CONFIG_DEBUG_BLK_CGROUP
724 blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time);
725 blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
726 blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
727 blkg_stat_merge(&to->dequeue, &from->dequeue);
728 blkg_stat_merge(&to->group_wait_time, &from->group_wait_time);
729 blkg_stat_merge(&to->idle_time, &from->idle_time);
730 blkg_stat_merge(&to->empty_time, &from->empty_time);
731#endif
732}
733
734/*
735 * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors'
736 * recursive stats can still account for the amount used by this cfqg after
737 * it's gone.
738 */
739static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
740{
741 struct cfq_group *parent = cfqg_parent(cfqg);
742
743 lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
744
745 if (unlikely(!parent))
746 return;
747
748 cfqg_stats_merge(&parent->dead_stats, &cfqg->stats);
749 cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
750 cfqg_stats_reset(&cfqg->stats);
751 cfqg_stats_reset(&cfqg->dead_stats);
752}
753
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100754#else /* CONFIG_CFQ_GROUP_IOSCHED */
755
Tejun Heod02f7aa2013-01-09 08:05:11 -0800756static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100757static inline void cfqg_get(struct cfq_group *cfqg) { }
758static inline void cfqg_put(struct cfq_group *cfqg) { }
759
Jens Axboe7b679132008-05-30 12:23:07 +0200760#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
Vivek Goyalb226e5c2012-10-03 16:57:01 -0400761 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
762 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
763 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
764 ##args)
Kyungmin Park4495a7d2011-05-31 10:04:09 +0200765#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100766
Tejun Heo155fead2012-04-01 14:38:44 -0700767static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
768 struct cfq_group *curr_cfqg, int rw) { }
769static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
770 unsigned long time, unsigned long unaccounted_time) { }
771static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
772static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
773static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
774 uint64_t bytes, int rw) { }
775static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
776 uint64_t start_time, uint64_t io_start_time, int rw) { }
Tejun Heo2ce4d502012-04-01 14:38:43 -0700777
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100778#endif /* CONFIG_CFQ_GROUP_IOSCHED */
779
Jens Axboe7b679132008-05-30 12:23:07 +0200780#define cfq_log(cfqd, fmt, args...) \
781 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
782
Vivek Goyal615f0252009-12-03 12:59:39 -0500783/* Traverses through cfq group service trees */
784#define for_each_cfqg_st(cfqg, i, j, st) \
785 for (i = 0; i <= IDLE_WORKLOAD; i++) \
786 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
787 : &cfqg->service_tree_idle; \
788 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
789 (i == IDLE_WORKLOAD && j == 0); \
790 j++, st = i < IDLE_WORKLOAD ? \
791 &cfqg->service_trees[i][j]: NULL) \
792
Shaohua Lif5f2b6c2011-07-12 14:24:55 +0200793static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
794 struct cfq_ttime *ttime, bool group_idle)
795{
796 unsigned long slice;
797 if (!sample_valid(ttime->ttime_samples))
798 return false;
799 if (group_idle)
800 slice = cfqd->cfq_group_idle;
801 else
802 slice = cfqd->cfq_slice_idle;
803 return ttime->ttime_mean > slice;
804}
Vivek Goyal615f0252009-12-03 12:59:39 -0500805
Vivek Goyal02b35082010-08-23 12:23:53 +0200806static inline bool iops_mode(struct cfq_data *cfqd)
807{
808 /*
809 * If we are not idling on queues and it is a NCQ drive, parallel
810 * execution of requests is on and measuring time is not possible
811 * in most of the cases until and unless we drive shallower queue
812 * depths and that becomes a performance bottleneck. In such cases
813 * switch to start providing fairness in terms of number of IOs.
814 */
815 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
816 return true;
817 else
818 return false;
819}
820
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400821static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100822{
823 if (cfq_class_idle(cfqq))
824 return IDLE_WORKLOAD;
825 if (cfq_class_rt(cfqq))
826 return RT_WORKLOAD;
827 return BE_WORKLOAD;
828}
829
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100830
831static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
832{
833 if (!cfq_cfqq_sync(cfqq))
834 return ASYNC_WORKLOAD;
835 if (!cfq_cfqq_idle_window(cfqq))
836 return SYNC_NOIDLE_WORKLOAD;
837 return SYNC_WORKLOAD;
838}
839
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400840static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
Vivek Goyal58ff82f2009-12-03 12:59:44 -0500841 struct cfq_data *cfqd,
842 struct cfq_group *cfqg)
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100843{
Vivek Goyal3bf10fe2012-10-03 16:56:56 -0400844 if (wl_class == IDLE_WORKLOAD)
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500845 return cfqg->service_tree_idle.count;
846
Vivek Goyal34b98d02012-10-03 16:56:58 -0400847 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
848 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
849 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100850}
851
Vivek Goyalf26bd1f2009-12-03 12:59:54 -0500852static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
853 struct cfq_group *cfqg)
854{
Vivek Goyal34b98d02012-10-03 16:56:58 -0400855 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
856 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
Vivek Goyalf26bd1f2009-12-03 12:59:54 -0500857}
858
Jens Axboe165125e2007-07-24 09:28:11 +0200859static void cfq_dispatch_insert(struct request_queue *, struct request *);
Tejun Heo4f85cb92012-03-05 13:15:28 -0800860static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
Tejun Heoabede6d2012-03-19 15:10:57 -0700861 struct cfq_io_cq *cic, struct bio *bio,
Tejun Heo4f85cb92012-03-05 13:15:28 -0800862 gfp_t gfp_mask);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200863
Tejun Heoc5869802011-12-14 00:33:41 +0100864static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
865{
866 /* cic->icq is the first member, %NULL will convert to %NULL */
867 return container_of(icq, struct cfq_io_cq, icq);
868}
869
Tejun Heo47fdd4c2011-12-14 00:33:42 +0100870static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
871 struct io_context *ioc)
872{
873 if (ioc)
874 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
875 return NULL;
876}
877
Tejun Heoc5869802011-12-14 00:33:41 +0100878static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
Vasily Tarasov91fac312007-04-25 12:29:51 +0200879{
Jens Axboea6151c32009-10-07 20:02:57 +0200880 return cic->cfqq[is_sync];
Vasily Tarasov91fac312007-04-25 12:29:51 +0200881}
882
Tejun Heoc5869802011-12-14 00:33:41 +0100883static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
884 bool is_sync)
Vasily Tarasov91fac312007-04-25 12:29:51 +0200885{
Jens Axboea6151c32009-10-07 20:02:57 +0200886 cic->cfqq[is_sync] = cfqq;
Vasily Tarasov91fac312007-04-25 12:29:51 +0200887}
888
Tejun Heoc5869802011-12-14 00:33:41 +0100889static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +0400890{
Tejun Heoc5869802011-12-14 00:33:41 +0100891 return cic->icq.q->elevator->elevator_data;
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +0400892}
893
Vasily Tarasov91fac312007-04-25 12:29:51 +0200894/*
895 * We regard a request as SYNC, if it's either a read or has the SYNC bit
896 * set (in which case it could also be direct WRITE).
897 */
Jens Axboea6151c32009-10-07 20:02:57 +0200898static inline bool cfq_bio_sync(struct bio *bio)
Vasily Tarasov91fac312007-04-25 12:29:51 +0200899{
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200900 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200901}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903/*
Andrew Morton99f95e52005-06-27 20:14:05 -0700904 * scheduler run of queue, if there are requests pending and no one in the
905 * driver that will restart queueing
906 */
Jens Axboe23e018a2009-10-05 08:52:35 +0200907static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
Andrew Morton99f95e52005-06-27 20:14:05 -0700908{
Jens Axboe7b679132008-05-30 12:23:07 +0200909 if (cfqd->busy_queues) {
910 cfq_log(cfqd, "schedule dispatch");
Jens Axboe23e018a2009-10-05 08:52:35 +0200911 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
Jens Axboe7b679132008-05-30 12:23:07 +0200912 }
Andrew Morton99f95e52005-06-27 20:14:05 -0700913}
914
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915/*
Jens Axboe44f7c162007-01-19 11:51:58 +1100916 * Scale schedule slice based on io priority. Use the sync time slice only
917 * if a queue is marked sync and has sync io queued. A sync queue with async
918 * io only, should not get full sync slice length.
919 */
Jens Axboea6151c32009-10-07 20:02:57 +0200920static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
Jens Axboed9e76202007-04-20 14:27:50 +0200921 unsigned short prio)
922{
923 const int base_slice = cfqd->cfq_slice[sync];
924
925 WARN_ON(prio >= IOPRIO_BE_NR);
926
927 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
928}
929
Jens Axboe44f7c162007-01-19 11:51:58 +1100930static inline int
931cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
932{
Jens Axboed9e76202007-04-20 14:27:50 +0200933 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
Jens Axboe44f7c162007-01-19 11:51:58 +1100934}
935
Tejun Heo1d3650f2013-01-09 08:05:11 -0800936/**
937 * cfqg_scale_charge - scale disk time charge according to cfqg weight
938 * @charge: disk time being charged
939 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
940 *
941 * Scale @charge according to @vfraction, which is in range (0, 1]. The
942 * scaling is inversely proportional.
943 *
944 * scaled = charge / vfraction
945 *
946 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
947 */
948static inline u64 cfqg_scale_charge(unsigned long charge,
949 unsigned int vfraction)
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500950{
Tejun Heo1d3650f2013-01-09 08:05:11 -0800951 u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500952
Tejun Heo1d3650f2013-01-09 08:05:11 -0800953 /* charge / vfraction */
954 c <<= CFQ_SERVICE_SHIFT;
955 do_div(c, vfraction);
956 return c;
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500957}
958
959static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
960{
961 s64 delta = (s64)(vdisktime - min_vdisktime);
962 if (delta > 0)
963 min_vdisktime = vdisktime;
964
965 return min_vdisktime;
966}
967
968static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
969{
970 s64 delta = (s64)(vdisktime - min_vdisktime);
971 if (delta < 0)
972 min_vdisktime = vdisktime;
973
974 return min_vdisktime;
975}
976
977static void update_min_vdisktime(struct cfq_rb_root *st)
978{
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500979 struct cfq_group *cfqg;
980
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500981 if (st->left) {
982 cfqg = rb_entry_cfqg(st->left);
Gui Jianfenga6032712011-03-07 09:28:09 +0100983 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
984 cfqg->vdisktime);
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500985 }
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500986}
987
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100988/*
989 * get averaged number of queues of RT/BE priority.
990 * average is updated, with a formula that gives more weight to higher numbers,
991 * to quickly follows sudden increases and decrease slowly
992 */
993
Vivek Goyal58ff82f2009-12-03 12:59:44 -0500994static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
995 struct cfq_group *cfqg, bool rt)
Jens Axboe5869619c2009-10-28 09:27:07 +0100996{
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100997 unsigned min_q, max_q;
998 unsigned mult = cfq_hist_divisor - 1;
999 unsigned round = cfq_hist_divisor / 2;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001000 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001001
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001002 min_q = min(cfqg->busy_queues_avg[rt], busy);
1003 max_q = max(cfqg->busy_queues_avg[rt], busy);
1004 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001005 cfq_hist_divisor;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001006 return cfqg->busy_queues_avg[rt];
1007}
1008
1009static inline unsigned
1010cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1011{
Tejun Heo41cad6a2013-01-09 08:05:11 -08001012 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001013}
1014
Shaohua Lic553f8e2011-01-14 08:41:03 +01001015static inline unsigned
Vivek Goyalba5bd522011-01-19 08:25:02 -07001016cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe44f7c162007-01-19 11:51:58 +11001017{
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001018 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
1019 if (cfqd->cfq_latency) {
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001020 /*
1021 * interested queues (we consider only the ones with the same
1022 * priority class in the cfq group)
1023 */
1024 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1025 cfq_class_rt(cfqq));
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001026 unsigned sync_slice = cfqd->cfq_slice[1];
1027 unsigned expect_latency = sync_slice * iq;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001028 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1029
1030 if (expect_latency > group_slice) {
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001031 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
1032 /* scale low_slice according to IO priority
1033 * and sync vs async */
1034 unsigned low_slice =
1035 min(slice, base_low_slice * slice / sync_slice);
1036 /* the adapted slice value is scaled to fit all iqs
1037 * into the target latency */
Vivek Goyal58ff82f2009-12-03 12:59:44 -05001038 slice = max(slice * group_slice / expect_latency,
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001039 low_slice);
1040 }
1041 }
Shaohua Lic553f8e2011-01-14 08:41:03 +01001042 return slice;
1043}
1044
1045static inline void
1046cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1047{
Vivek Goyalba5bd522011-01-19 08:25:02 -07001048 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
Shaohua Lic553f8e2011-01-14 08:41:03 +01001049
Vivek Goyaldae739e2009-12-03 12:59:45 -05001050 cfqq->slice_start = jiffies;
Corrado Zoccolo5db5d642009-10-26 22:44:04 +01001051 cfqq->slice_end = jiffies + slice;
Vivek Goyalf75edf22009-12-03 12:59:53 -05001052 cfqq->allocated_slice = slice;
Jens Axboe7b679132008-05-30 12:23:07 +02001053 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
Jens Axboe44f7c162007-01-19 11:51:58 +11001054}
1055
1056/*
1057 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1058 * isn't valid until the first request from the dispatch is activated
1059 * and the slice time set.
1060 */
Jens Axboea6151c32009-10-07 20:02:57 +02001061static inline bool cfq_slice_used(struct cfq_queue *cfqq)
Jens Axboe44f7c162007-01-19 11:51:58 +11001062{
1063 if (cfq_cfqq_slice_new(cfqq))
Shaohua Lic1e44752010-11-08 15:01:02 +01001064 return false;
Jens Axboe44f7c162007-01-19 11:51:58 +11001065 if (time_before(jiffies, cfqq->slice_end))
Shaohua Lic1e44752010-11-08 15:01:02 +01001066 return false;
Jens Axboe44f7c162007-01-19 11:51:58 +11001067
Shaohua Lic1e44752010-11-08 15:01:02 +01001068 return true;
Jens Axboe44f7c162007-01-19 11:51:58 +11001069}
1070
1071/*
Jens Axboe5e705372006-07-13 12:39:25 +02001072 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 * We choose the request that is closest to the head right now. Distance
Andreas Mohre8a99052006-03-28 08:59:49 +02001074 * behind the head is penalized and only allowed to a certain extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 */
Jens Axboe5e705372006-07-13 12:39:25 +02001076static struct request *
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01001077cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078{
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01001079 sector_t s1, s2, d1 = 0, d2 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 unsigned long back_max;
Andreas Mohre8a99052006-03-28 08:59:49 +02001081#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
1082#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
1083 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
Jens Axboe5e705372006-07-13 12:39:25 +02001085 if (rq1 == NULL || rq1 == rq2)
1086 return rq2;
1087 if (rq2 == NULL)
1088 return rq1;
Jens Axboe9c2c38a2005-08-24 14:57:54 +02001089
Namhyung Kim229836b2011-05-24 10:23:21 +02001090 if (rq_is_sync(rq1) != rq_is_sync(rq2))
1091 return rq_is_sync(rq1) ? rq1 : rq2;
1092
Christoph Hellwig65299a32011-08-23 14:50:29 +02001093 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1094 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
Jens Axboeb53d1ed2011-08-19 08:34:48 +02001095
Tejun Heo83096eb2009-05-07 22:24:39 +09001096 s1 = blk_rq_pos(rq1);
1097 s2 = blk_rq_pos(rq2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 /*
1100 * by definition, 1KiB is 2 sectors
1101 */
1102 back_max = cfqd->cfq_back_max * 2;
1103
1104 /*
1105 * Strict one way elevator _except_ in the case where we allow
1106 * short backward seeks which are biased as twice the cost of a
1107 * similar forward seek.
1108 */
1109 if (s1 >= last)
1110 d1 = s1 - last;
1111 else if (s1 + back_max >= last)
1112 d1 = (last - s1) * cfqd->cfq_back_penalty;
1113 else
Andreas Mohre8a99052006-03-28 08:59:49 +02001114 wrap |= CFQ_RQ1_WRAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
1116 if (s2 >= last)
1117 d2 = s2 - last;
1118 else if (s2 + back_max >= last)
1119 d2 = (last - s2) * cfqd->cfq_back_penalty;
1120 else
Andreas Mohre8a99052006-03-28 08:59:49 +02001121 wrap |= CFQ_RQ2_WRAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
1123 /* Found required data */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
Andreas Mohre8a99052006-03-28 08:59:49 +02001125 /*
1126 * By doing switch() on the bit mask "wrap" we avoid having to
1127 * check two variables for all permutations: --> faster!
1128 */
1129 switch (wrap) {
Jens Axboe5e705372006-07-13 12:39:25 +02001130 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
Andreas Mohre8a99052006-03-28 08:59:49 +02001131 if (d1 < d2)
Jens Axboe5e705372006-07-13 12:39:25 +02001132 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +02001133 else if (d2 < d1)
Jens Axboe5e705372006-07-13 12:39:25 +02001134 return rq2;
Andreas Mohre8a99052006-03-28 08:59:49 +02001135 else {
1136 if (s1 >= s2)
Jens Axboe5e705372006-07-13 12:39:25 +02001137 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +02001138 else
Jens Axboe5e705372006-07-13 12:39:25 +02001139 return rq2;
Andreas Mohre8a99052006-03-28 08:59:49 +02001140 }
1141
1142 case CFQ_RQ2_WRAP:
Jens Axboe5e705372006-07-13 12:39:25 +02001143 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +02001144 case CFQ_RQ1_WRAP:
Jens Axboe5e705372006-07-13 12:39:25 +02001145 return rq2;
1146 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
Andreas Mohre8a99052006-03-28 08:59:49 +02001147 default:
1148 /*
1149 * Since both rqs are wrapped,
1150 * start with the one that's further behind head
1151 * (--> only *one* back seek required),
1152 * since back seek takes more time than forward.
1153 */
1154 if (s1 <= s2)
Jens Axboe5e705372006-07-13 12:39:25 +02001155 return rq1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 else
Jens Axboe5e705372006-07-13 12:39:25 +02001157 return rq2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 }
1159}
1160
Jens Axboe498d3aa22007-04-26 12:54:48 +02001161/*
1162 * The below is leftmost cache rbtree addon
1163 */
Jens Axboe08717142008-01-28 11:38:15 +01001164static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
Jens Axboecc09e292007-04-26 12:53:50 +02001165{
Vivek Goyal615f0252009-12-03 12:59:39 -05001166 /* Service tree is empty */
1167 if (!root->count)
1168 return NULL;
1169
Jens Axboecc09e292007-04-26 12:53:50 +02001170 if (!root->left)
1171 root->left = rb_first(&root->rb);
1172
Jens Axboe08717142008-01-28 11:38:15 +01001173 if (root->left)
1174 return rb_entry(root->left, struct cfq_queue, rb_node);
1175
1176 return NULL;
Jens Axboecc09e292007-04-26 12:53:50 +02001177}
1178
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001179static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1180{
1181 if (!root->left)
1182 root->left = rb_first(&root->rb);
1183
1184 if (root->left)
1185 return rb_entry_cfqg(root->left);
1186
1187 return NULL;
1188}
1189
Jens Axboea36e71f2009-04-15 12:15:11 +02001190static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1191{
1192 rb_erase(n, root);
1193 RB_CLEAR_NODE(n);
1194}
1195
Jens Axboecc09e292007-04-26 12:53:50 +02001196static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1197{
1198 if (root->left == n)
1199 root->left = NULL;
Jens Axboea36e71f2009-04-15 12:15:11 +02001200 rb_erase_init(n, &root->rb);
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01001201 --root->count;
Jens Axboecc09e292007-04-26 12:53:50 +02001202}
1203
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204/*
1205 * would be nice to take fifo expire time into account as well
1206 */
Jens Axboe5e705372006-07-13 12:39:25 +02001207static struct request *
1208cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1209 struct request *last)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210{
Jens Axboe21183b02006-07-13 12:33:14 +02001211 struct rb_node *rbnext = rb_next(&last->rb_node);
1212 struct rb_node *rbprev = rb_prev(&last->rb_node);
Jens Axboe5e705372006-07-13 12:39:25 +02001213 struct request *next = NULL, *prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
Jens Axboe21183b02006-07-13 12:33:14 +02001215 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
1217 if (rbprev)
Jens Axboe5e705372006-07-13 12:39:25 +02001218 prev = rb_entry_rq(rbprev);
Jens Axboe21183b02006-07-13 12:33:14 +02001219
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 if (rbnext)
Jens Axboe5e705372006-07-13 12:39:25 +02001221 next = rb_entry_rq(rbnext);
Jens Axboe21183b02006-07-13 12:33:14 +02001222 else {
1223 rbnext = rb_first(&cfqq->sort_list);
1224 if (rbnext && rbnext != &last->rb_node)
Jens Axboe5e705372006-07-13 12:39:25 +02001225 next = rb_entry_rq(rbnext);
Jens Axboe21183b02006-07-13 12:33:14 +02001226 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01001228 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230
Jens Axboed9e76202007-04-20 14:27:50 +02001231static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1232 struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
Jens Axboed9e76202007-04-20 14:27:50 +02001234 /*
1235 * just an approximation, should be ok.
1236 */
Vivek Goyalcdb16e82009-12-03 12:59:38 -05001237 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
Jens Axboe464191c2009-11-30 09:38:13 +01001238 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
Jens Axboed9e76202007-04-20 14:27:50 +02001239}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001241static inline s64
1242cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1243{
1244 return cfqg->vdisktime - st->min_vdisktime;
1245}
1246
1247static void
1248__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1249{
1250 struct rb_node **node = &st->rb.rb_node;
1251 struct rb_node *parent = NULL;
1252 struct cfq_group *__cfqg;
1253 s64 key = cfqg_key(st, cfqg);
1254 int left = 1;
1255
1256 while (*node != NULL) {
1257 parent = *node;
1258 __cfqg = rb_entry_cfqg(parent);
1259
1260 if (key < cfqg_key(st, __cfqg))
1261 node = &parent->rb_left;
1262 else {
1263 node = &parent->rb_right;
1264 left = 0;
1265 }
1266 }
1267
1268 if (left)
1269 st->left = &cfqg->rb_node;
1270
1271 rb_link_node(&cfqg->rb_node, parent, node);
1272 rb_insert_color(&cfqg->rb_node, &st->rb);
1273}
1274
1275static void
Justin TerAvest8184f932011-03-17 16:12:36 +01001276cfq_update_group_weight(struct cfq_group *cfqg)
1277{
1278 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
Tejun Heoe71357e2013-01-09 08:05:10 -08001279
Tejun Heo3381cb82012-04-01 14:38:44 -07001280 if (cfqg->new_weight) {
Justin TerAvest8184f932011-03-17 16:12:36 +01001281 cfqg->weight = cfqg->new_weight;
Tejun Heo3381cb82012-04-01 14:38:44 -07001282 cfqg->new_weight = 0;
Justin TerAvest8184f932011-03-17 16:12:36 +01001283 }
Tejun Heoe71357e2013-01-09 08:05:10 -08001284
1285 if (cfqg->new_leaf_weight) {
1286 cfqg->leaf_weight = cfqg->new_leaf_weight;
1287 cfqg->new_leaf_weight = 0;
1288 }
Justin TerAvest8184f932011-03-17 16:12:36 +01001289}
1290
1291static void
1292cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1293{
Tejun Heo1d3650f2013-01-09 08:05:11 -08001294 unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */
Tejun Heo7918ffb2013-01-09 08:05:11 -08001295 struct cfq_group *pos = cfqg;
Tejun Heo1d3650f2013-01-09 08:05:11 -08001296 struct cfq_group *parent;
Tejun Heo7918ffb2013-01-09 08:05:11 -08001297 bool propagate;
1298
1299 /* add to the service tree */
Justin TerAvest8184f932011-03-17 16:12:36 +01001300 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1301
1302 cfq_update_group_weight(cfqg);
1303 __cfq_group_service_tree_add(st, cfqg);
Tejun Heo7918ffb2013-01-09 08:05:11 -08001304
1305 /*
Tejun Heo1d3650f2013-01-09 08:05:11 -08001306 * Activate @cfqg and calculate the portion of vfraction @cfqg is
1307 * entitled to. vfraction is calculated by walking the tree
1308 * towards the root calculating the fraction it has at each level.
1309 * The compounded ratio is how much vfraction @cfqg owns.
1310 *
1311 * Start with the proportion tasks in this cfqg has against active
1312 * children cfqgs - its leaf_weight against children_weight.
Tejun Heo7918ffb2013-01-09 08:05:11 -08001313 */
1314 propagate = !pos->nr_active++;
1315 pos->children_weight += pos->leaf_weight;
Tejun Heo1d3650f2013-01-09 08:05:11 -08001316 vfr = vfr * pos->leaf_weight / pos->children_weight;
Tejun Heo7918ffb2013-01-09 08:05:11 -08001317
Tejun Heo1d3650f2013-01-09 08:05:11 -08001318 /*
1319 * Compound ->weight walking up the tree. Both activation and
1320 * vfraction calculation are done in the same loop. Propagation
1321 * stops once an already activated node is met. vfraction
1322 * calculation should always continue to the root.
1323 */
Tejun Heod02f7aa2013-01-09 08:05:11 -08001324 while ((parent = cfqg_parent(pos))) {
Tejun Heo1d3650f2013-01-09 08:05:11 -08001325 if (propagate) {
1326 propagate = !parent->nr_active++;
1327 parent->children_weight += pos->weight;
1328 }
1329 vfr = vfr * pos->weight / parent->children_weight;
Tejun Heo7918ffb2013-01-09 08:05:11 -08001330 pos = parent;
1331 }
Tejun Heo1d3650f2013-01-09 08:05:11 -08001332
1333 cfqg->vfraction = max_t(unsigned, vfr, 1);
Justin TerAvest8184f932011-03-17 16:12:36 +01001334}
1335
1336static void
1337cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001338{
1339 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1340 struct cfq_group *__cfqg;
1341 struct rb_node *n;
1342
1343 cfqg->nr_cfqq++;
Gui Jianfeng760701b2010-11-30 20:52:47 +01001344 if (!RB_EMPTY_NODE(&cfqg->rb_node))
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001345 return;
1346
1347 /*
1348 * Currently put the group at the end. Later implement something
1349 * so that groups get lesser vtime based on their weights, so that
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001350 * if group does not loose all if it was not continuously backlogged.
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001351 */
1352 n = rb_last(&st->rb);
1353 if (n) {
1354 __cfqg = rb_entry_cfqg(n);
1355 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1356 } else
1357 cfqg->vdisktime = st->min_vdisktime;
Justin TerAvest8184f932011-03-17 16:12:36 +01001358 cfq_group_service_tree_add(st, cfqg);
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001359}
1360
1361static void
Justin TerAvest8184f932011-03-17 16:12:36 +01001362cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1363{
Tejun Heo7918ffb2013-01-09 08:05:11 -08001364 struct cfq_group *pos = cfqg;
1365 bool propagate;
1366
1367 /*
1368 * Undo activation from cfq_group_service_tree_add(). Deactivate
1369 * @cfqg and propagate deactivation upwards.
1370 */
1371 propagate = !--pos->nr_active;
1372 pos->children_weight -= pos->leaf_weight;
1373
1374 while (propagate) {
Tejun Heod02f7aa2013-01-09 08:05:11 -08001375 struct cfq_group *parent = cfqg_parent(pos);
Tejun Heo7918ffb2013-01-09 08:05:11 -08001376
1377 /* @pos has 0 nr_active at this point */
1378 WARN_ON_ONCE(pos->children_weight);
Tejun Heo1d3650f2013-01-09 08:05:11 -08001379 pos->vfraction = 0;
Tejun Heo7918ffb2013-01-09 08:05:11 -08001380
1381 if (!parent)
1382 break;
1383
1384 propagate = !--parent->nr_active;
1385 parent->children_weight -= pos->weight;
1386 pos = parent;
1387 }
1388
1389 /* remove from the service tree */
Justin TerAvest8184f932011-03-17 16:12:36 +01001390 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1391 cfq_rb_erase(&cfqg->rb_node, st);
1392}
1393
1394static void
1395cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001396{
1397 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1398
1399 BUG_ON(cfqg->nr_cfqq < 1);
1400 cfqg->nr_cfqq--;
Vivek Goyal25bc6b02009-12-03 12:59:43 -05001401
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001402 /* If there are other cfq queues under this group, don't delete it */
1403 if (cfqg->nr_cfqq)
1404 return;
1405
Vivek Goyal2868ef72009-12-03 12:59:48 -05001406 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
Justin TerAvest8184f932011-03-17 16:12:36 +01001407 cfq_group_service_tree_del(st, cfqg);
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04001408 cfqg->saved_wl_slice = 0;
Tejun Heo155fead2012-04-01 14:38:44 -07001409 cfqg_stats_update_dequeue(cfqg);
Vivek Goyaldae739e2009-12-03 12:59:45 -05001410}
1411
Justin TerAvest167400d2011-03-12 16:54:00 +01001412static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1413 unsigned int *unaccounted_time)
Vivek Goyaldae739e2009-12-03 12:59:45 -05001414{
Vivek Goyalf75edf22009-12-03 12:59:53 -05001415 unsigned int slice_used;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001416
1417 /*
1418 * Queue got expired before even a single request completed or
1419 * got expired immediately after first request completion.
1420 */
1421 if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1422 /*
1423 * Also charge the seek time incurred to the group, otherwise
1424 * if there are mutiple queues in the group, each can dispatch
1425 * a single request on seeky media and cause lots of seek time
1426 * and group will never know it.
1427 */
1428 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1429 1);
1430 } else {
1431 slice_used = jiffies - cfqq->slice_start;
Justin TerAvest167400d2011-03-12 16:54:00 +01001432 if (slice_used > cfqq->allocated_slice) {
1433 *unaccounted_time = slice_used - cfqq->allocated_slice;
Vivek Goyalf75edf22009-12-03 12:59:53 -05001434 slice_used = cfqq->allocated_slice;
Justin TerAvest167400d2011-03-12 16:54:00 +01001435 }
1436 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1437 *unaccounted_time += cfqq->slice_start -
1438 cfqq->dispatch_start;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001439 }
1440
Vivek Goyaldae739e2009-12-03 12:59:45 -05001441 return slice_used;
1442}
1443
1444static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
Vivek Goyale5ff0822010-04-26 19:25:11 +02001445 struct cfq_queue *cfqq)
Vivek Goyaldae739e2009-12-03 12:59:45 -05001446{
1447 struct cfq_rb_root *st = &cfqd->grp_service_tree;
Justin TerAvest167400d2011-03-12 16:54:00 +01001448 unsigned int used_sl, charge, unaccounted_sl = 0;
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05001449 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1450 - cfqg->service_tree_idle.count;
Tejun Heo1d3650f2013-01-09 08:05:11 -08001451 unsigned int vfr;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001452
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05001453 BUG_ON(nr_sync < 0);
Justin TerAvest167400d2011-03-12 16:54:00 +01001454 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05001455
Vivek Goyal02b35082010-08-23 12:23:53 +02001456 if (iops_mode(cfqd))
1457 charge = cfqq->slice_dispatch;
1458 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1459 charge = cfqq->allocated_slice;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001460
Tejun Heo1d3650f2013-01-09 08:05:11 -08001461 /*
1462 * Can't update vdisktime while on service tree and cfqg->vfraction
1463 * is valid only while on it. Cache vfr, leave the service tree,
1464 * update vdisktime and go back on. The re-addition to the tree
1465 * will also update the weights as necessary.
1466 */
1467 vfr = cfqg->vfraction;
Justin TerAvest8184f932011-03-17 16:12:36 +01001468 cfq_group_service_tree_del(st, cfqg);
Tejun Heo1d3650f2013-01-09 08:05:11 -08001469 cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
Justin TerAvest8184f932011-03-17 16:12:36 +01001470 cfq_group_service_tree_add(st, cfqg);
Vivek Goyaldae739e2009-12-03 12:59:45 -05001471
1472 /* This group is being expired. Save the context */
1473 if (time_after(cfqd->workload_expires, jiffies)) {
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04001474 cfqg->saved_wl_slice = cfqd->workload_expires
Vivek Goyaldae739e2009-12-03 12:59:45 -05001475 - jiffies;
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04001476 cfqg->saved_wl_type = cfqd->serving_wl_type;
1477 cfqg->saved_wl_class = cfqd->serving_wl_class;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001478 } else
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04001479 cfqg->saved_wl_slice = 0;
Vivek Goyal2868ef72009-12-03 12:59:48 -05001480
1481 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1482 st->min_vdisktime);
Joe Perchesfd16d262011-06-13 10:42:49 +02001483 cfq_log_cfqq(cfqq->cfqd, cfqq,
1484 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1485 used_sl, cfqq->slice_dispatch, charge,
1486 iops_mode(cfqd), cfqq->nr_sectors);
Tejun Heo155fead2012-04-01 14:38:44 -07001487 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1488 cfqg_stats_set_start_empty_time(cfqg);
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001489}
1490
Tejun Heof51b8022012-03-05 13:15:05 -08001491/**
1492 * cfq_init_cfqg_base - initialize base part of a cfq_group
1493 * @cfqg: cfq_group to initialize
1494 *
1495 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1496 * is enabled or not.
1497 */
1498static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1499{
1500 struct cfq_rb_root *st;
1501 int i, j;
1502
1503 for_each_cfqg_st(cfqg, i, j, st)
1504 *st = CFQ_RB_ROOT;
1505 RB_CLEAR_NODE(&cfqg->rb_node);
1506
1507 cfqg->ttime.last_end_request = jiffies;
1508}
1509
Vivek Goyal25fb5162009-12-03 12:59:46 -05001510#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07001511static void cfq_pd_init(struct blkcg_gq *blkg)
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001512{
Tejun Heo03814112012-03-05 13:15:14 -08001513 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
Vivek Goyal25fb5162009-12-03 12:59:46 -05001514
Tejun Heof51b8022012-03-05 13:15:05 -08001515 cfq_init_cfqg_base(cfqg);
Tejun Heo3381cb82012-04-01 14:38:44 -07001516 cfqg->weight = blkg->blkcg->cfq_weight;
Tejun Heoe71357e2013-01-09 08:05:10 -08001517 cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
Vivek Goyal25fb5162009-12-03 12:59:46 -05001518}
1519
Tejun Heo0b399202013-01-09 08:05:13 -08001520static void cfq_pd_offline(struct blkcg_gq *blkg)
1521{
1522 /*
1523 * @blkg is going offline and will be ignored by
1524 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
1525 * that they don't get lost. If IOs complete after this point, the
1526 * stats for them will be lost. Oh well...
1527 */
1528 cfqg_stats_xfer_dead(blkg_to_cfqg(blkg));
1529}
1530
Tejun Heo689665a2013-01-09 08:05:13 -08001531static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
1532{
1533 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1534
1535 cfqg_stats_reset(&cfqg->stats);
Tejun Heo0b399202013-01-09 08:05:13 -08001536 cfqg_stats_reset(&cfqg->dead_stats);
Tejun Heo689665a2013-01-09 08:05:13 -08001537}
1538
Vivek Goyal25fb5162009-12-03 12:59:46 -05001539/*
Vivek Goyal3e59cf92011-05-19 15:38:21 -04001540 * Search for the cfq group current task belongs to. request_queue lock must
1541 * be held.
Vivek Goyal25fb5162009-12-03 12:59:46 -05001542 */
Tejun Heocd1604f2012-03-05 13:15:06 -08001543static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
Tejun Heo3c798392012-04-16 13:57:25 -07001544 struct blkcg *blkcg)
Vivek Goyal25fb5162009-12-03 12:59:46 -05001545{
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001546 struct request_queue *q = cfqd->queue;
Tejun Heocd1604f2012-03-05 13:15:06 -08001547 struct cfq_group *cfqg = NULL;
Vivek Goyal25fb5162009-12-03 12:59:46 -05001548
Tejun Heo3c798392012-04-16 13:57:25 -07001549 /* avoid lookup for the common case where there's no blkcg */
1550 if (blkcg == &blkcg_root) {
Tejun Heocd1604f2012-03-05 13:15:06 -08001551 cfqg = cfqd->root_group;
1552 } else {
Tejun Heo3c798392012-04-16 13:57:25 -07001553 struct blkcg_gq *blkg;
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001554
Tejun Heo3c96cb32012-04-13 13:11:34 -07001555 blkg = blkg_lookup_create(blkcg, q);
Tejun Heocd1604f2012-03-05 13:15:06 -08001556 if (!IS_ERR(blkg))
Tejun Heo03814112012-03-05 13:15:14 -08001557 cfqg = blkg_to_cfqg(blkg);
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001558 }
1559
Vivek Goyal25fb5162009-12-03 12:59:46 -05001560 return cfqg;
1561}
1562
1563static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1564{
1565 /* Currently, all async queues are mapped to root group */
1566 if (!cfq_cfqq_sync(cfqq))
Tejun Heof51b8022012-03-05 13:15:05 -08001567 cfqg = cfqq->cfqd->root_group;
Vivek Goyal25fb5162009-12-03 12:59:46 -05001568
1569 cfqq->cfqg = cfqg;
Vivek Goyalb1c35762009-12-03 12:59:47 -05001570 /* cfqq reference on cfqg */
Tejun Heoeb7d8c072012-03-23 14:02:53 +01001571 cfqg_get(cfqg);
Vivek Goyalb1c35762009-12-03 12:59:47 -05001572}
1573
Tejun Heof95a04a2012-04-16 13:57:26 -07001574static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1575 struct blkg_policy_data *pd, int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001576{
Tejun Heof95a04a2012-04-16 13:57:26 -07001577 struct cfq_group *cfqg = pd_to_cfqg(pd);
Tejun Heo3381cb82012-04-01 14:38:44 -07001578
1579 if (!cfqg->dev_weight)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001580 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -07001581 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001582}
1583
Tejun Heo3381cb82012-04-01 14:38:44 -07001584static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1585 struct seq_file *sf)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001586{
Tejun Heo3c798392012-04-16 13:57:25 -07001587 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
1588 cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001589 false);
1590 return 0;
1591}
1592
Tejun Heoe71357e2013-01-09 08:05:10 -08001593static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1594 struct blkg_policy_data *pd, int off)
1595{
1596 struct cfq_group *cfqg = pd_to_cfqg(pd);
1597
1598 if (!cfqg->dev_leaf_weight)
1599 return 0;
1600 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1601}
1602
1603static int cfqg_print_leaf_weight_device(struct cgroup *cgrp,
1604 struct cftype *cft,
1605 struct seq_file *sf)
1606{
1607 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
1608 cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0,
1609 false);
1610 return 0;
1611}
1612
Tejun Heo3381cb82012-04-01 14:38:44 -07001613static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
1614 struct seq_file *sf)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001615{
Tejun Heo3c798392012-04-16 13:57:25 -07001616 seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001617 return 0;
1618}
1619
Tejun Heoe71357e2013-01-09 08:05:10 -08001620static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft,
1621 struct seq_file *sf)
1622{
1623 seq_printf(sf, "%u\n",
1624 cgroup_to_blkcg(cgrp)->cfq_leaf_weight);
1625 return 0;
1626}
1627
1628static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1629 const char *buf, bool is_leaf_weight)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001630{
Tejun Heo3c798392012-04-16 13:57:25 -07001631 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001632 struct blkg_conf_ctx ctx;
Tejun Heo3381cb82012-04-01 14:38:44 -07001633 struct cfq_group *cfqg;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001634 int ret;
1635
Tejun Heo3c798392012-04-16 13:57:25 -07001636 ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001637 if (ret)
1638 return ret;
1639
1640 ret = -EINVAL;
Tejun Heo3381cb82012-04-01 14:38:44 -07001641 cfqg = blkg_to_cfqg(ctx.blkg);
Tejun Heoa2b16932012-04-13 13:11:33 -07001642 if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
Tejun Heoe71357e2013-01-09 08:05:10 -08001643 if (!is_leaf_weight) {
1644 cfqg->dev_weight = ctx.v;
1645 cfqg->new_weight = ctx.v ?: blkcg->cfq_weight;
1646 } else {
1647 cfqg->dev_leaf_weight = ctx.v;
1648 cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight;
1649 }
Tejun Heo60c2bc22012-04-01 14:38:43 -07001650 ret = 0;
1651 }
1652
1653 blkg_conf_finish(&ctx);
1654 return ret;
1655}
1656
Tejun Heoe71357e2013-01-09 08:05:10 -08001657static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1658 const char *buf)
1659{
1660 return __cfqg_set_weight_device(cgrp, cft, buf, false);
1661}
1662
1663static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft,
1664 const char *buf)
1665{
1666 return __cfqg_set_weight_device(cgrp, cft, buf, true);
1667}
1668
1669static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
1670 bool is_leaf_weight)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001671{
Tejun Heo3c798392012-04-16 13:57:25 -07001672 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1673 struct blkcg_gq *blkg;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001674 struct hlist_node *n;
1675
Tejun Heo3381cb82012-04-01 14:38:44 -07001676 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001677 return -EINVAL;
1678
1679 spin_lock_irq(&blkcg->lock);
Tejun Heoe71357e2013-01-09 08:05:10 -08001680
1681 if (!is_leaf_weight)
1682 blkcg->cfq_weight = val;
1683 else
1684 blkcg->cfq_leaf_weight = val;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001685
1686 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
Tejun Heo3381cb82012-04-01 14:38:44 -07001687 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001688
Tejun Heoe71357e2013-01-09 08:05:10 -08001689 if (!cfqg)
1690 continue;
1691
1692 if (!is_leaf_weight) {
1693 if (!cfqg->dev_weight)
1694 cfqg->new_weight = blkcg->cfq_weight;
1695 } else {
1696 if (!cfqg->dev_leaf_weight)
1697 cfqg->new_leaf_weight = blkcg->cfq_leaf_weight;
1698 }
Tejun Heo60c2bc22012-04-01 14:38:43 -07001699 }
1700
1701 spin_unlock_irq(&blkcg->lock);
1702 return 0;
1703}
1704
Tejun Heoe71357e2013-01-09 08:05:10 -08001705static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1706{
1707 return __cfq_set_weight(cgrp, cft, val, false);
1708}
1709
1710static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
1711{
1712 return __cfq_set_weight(cgrp, cft, val, true);
1713}
1714
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001715static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
1716 struct seq_file *sf)
1717{
Tejun Heo3c798392012-04-16 13:57:25 -07001718 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001719
Tejun Heo3c798392012-04-16 13:57:25 -07001720 blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001721 cft->private, false);
1722 return 0;
1723}
1724
1725static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
1726 struct seq_file *sf)
1727{
Tejun Heo3c798392012-04-16 13:57:25 -07001728 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001729
Tejun Heo3c798392012-04-16 13:57:25 -07001730 blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001731 cft->private, true);
1732 return 0;
1733}
1734
Tejun Heo60c2bc22012-04-01 14:38:43 -07001735#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heof95a04a2012-04-16 13:57:26 -07001736static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1737 struct blkg_policy_data *pd, int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001738{
Tejun Heof95a04a2012-04-16 13:57:26 -07001739 struct cfq_group *cfqg = pd_to_cfqg(pd);
Tejun Heo155fead2012-04-01 14:38:44 -07001740 u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001741 u64 v = 0;
1742
1743 if (samples) {
Tejun Heo155fead2012-04-01 14:38:44 -07001744 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001745 do_div(v, samples);
1746 }
Tejun Heof95a04a2012-04-16 13:57:26 -07001747 __blkg_prfill_u64(sf, pd, v);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001748 return 0;
1749}
1750
1751/* print avg_queue_size */
Tejun Heo155fead2012-04-01 14:38:44 -07001752static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
1753 struct seq_file *sf)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001754{
Tejun Heo3c798392012-04-16 13:57:25 -07001755 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001756
Tejun Heo155fead2012-04-01 14:38:44 -07001757 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
Tejun Heo3c798392012-04-16 13:57:25 -07001758 &blkcg_policy_cfq, 0, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001759 return 0;
1760}
1761#endif /* CONFIG_DEBUG_BLK_CGROUP */
1762
1763static struct cftype cfq_blkcg_files[] = {
Tejun Heo1d3650f2013-01-09 08:05:11 -08001764 /* on root, weight is mapped to leaf_weight */
Tejun Heo60c2bc22012-04-01 14:38:43 -07001765 {
1766 .name = "weight_device",
Tejun Heo1d3650f2013-01-09 08:05:11 -08001767 .flags = CFTYPE_ONLY_ON_ROOT,
1768 .read_seq_string = cfqg_print_leaf_weight_device,
1769 .write_string = cfqg_set_leaf_weight_device,
1770 .max_write_len = 256,
1771 },
1772 {
1773 .name = "weight",
1774 .flags = CFTYPE_ONLY_ON_ROOT,
1775 .read_seq_string = cfq_print_leaf_weight,
1776 .write_u64 = cfq_set_leaf_weight,
1777 },
1778
1779 /* no such mapping necessary for !roots */
1780 {
1781 .name = "weight_device",
1782 .flags = CFTYPE_NOT_ON_ROOT,
Tejun Heo3381cb82012-04-01 14:38:44 -07001783 .read_seq_string = cfqg_print_weight_device,
1784 .write_string = cfqg_set_weight_device,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001785 .max_write_len = 256,
1786 },
1787 {
1788 .name = "weight",
Tejun Heoe71357e2013-01-09 08:05:10 -08001789 .flags = CFTYPE_NOT_ON_ROOT,
Tejun Heo1d3650f2013-01-09 08:05:11 -08001790 .read_seq_string = cfq_print_weight,
1791 .write_u64 = cfq_set_weight,
1792 },
1793
1794 {
1795 .name = "leaf_weight_device",
Tejun Heoe71357e2013-01-09 08:05:10 -08001796 .read_seq_string = cfqg_print_leaf_weight_device,
1797 .write_string = cfqg_set_leaf_weight_device,
1798 .max_write_len = 256,
1799 },
1800 {
1801 .name = "leaf_weight",
Tejun Heoe71357e2013-01-09 08:05:10 -08001802 .read_seq_string = cfq_print_leaf_weight,
1803 .write_u64 = cfq_set_leaf_weight,
1804 },
1805
Tejun Heo60c2bc22012-04-01 14:38:43 -07001806 {
1807 .name = "time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001808 .private = offsetof(struct cfq_group, stats.time),
1809 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001810 },
1811 {
1812 .name = "sectors",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001813 .private = offsetof(struct cfq_group, stats.sectors),
1814 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001815 },
1816 {
1817 .name = "io_service_bytes",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001818 .private = offsetof(struct cfq_group, stats.service_bytes),
1819 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001820 },
1821 {
1822 .name = "io_serviced",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001823 .private = offsetof(struct cfq_group, stats.serviced),
1824 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001825 },
1826 {
1827 .name = "io_service_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001828 .private = offsetof(struct cfq_group, stats.service_time),
1829 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001830 },
1831 {
1832 .name = "io_wait_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001833 .private = offsetof(struct cfq_group, stats.wait_time),
1834 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001835 },
1836 {
1837 .name = "io_merged",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001838 .private = offsetof(struct cfq_group, stats.merged),
1839 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001840 },
1841 {
1842 .name = "io_queued",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001843 .private = offsetof(struct cfq_group, stats.queued),
1844 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001845 },
1846#ifdef CONFIG_DEBUG_BLK_CGROUP
1847 {
1848 .name = "avg_queue_size",
Tejun Heo155fead2012-04-01 14:38:44 -07001849 .read_seq_string = cfqg_print_avg_queue_size,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001850 },
1851 {
1852 .name = "group_wait_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001853 .private = offsetof(struct cfq_group, stats.group_wait_time),
1854 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001855 },
1856 {
1857 .name = "idle_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001858 .private = offsetof(struct cfq_group, stats.idle_time),
1859 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001860 },
1861 {
1862 .name = "empty_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001863 .private = offsetof(struct cfq_group, stats.empty_time),
1864 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001865 },
1866 {
1867 .name = "dequeue",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001868 .private = offsetof(struct cfq_group, stats.dequeue),
1869 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001870 },
1871 {
1872 .name = "unaccounted_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001873 .private = offsetof(struct cfq_group, stats.unaccounted_time),
1874 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001875 },
1876#endif /* CONFIG_DEBUG_BLK_CGROUP */
1877 { } /* terminate */
1878};
Vivek Goyal25fb5162009-12-03 12:59:46 -05001879#else /* GROUP_IOSCHED */
Tejun Heocd1604f2012-03-05 13:15:06 -08001880static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
Tejun Heo3c798392012-04-16 13:57:25 -07001881 struct blkcg *blkcg)
Vivek Goyal25fb5162009-12-03 12:59:46 -05001882{
Tejun Heof51b8022012-03-05 13:15:05 -08001883 return cfqd->root_group;
Vivek Goyal25fb5162009-12-03 12:59:46 -05001884}
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +02001885
Vivek Goyal25fb5162009-12-03 12:59:46 -05001886static inline void
1887cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1888 cfqq->cfqg = cfqg;
1889}
1890
1891#endif /* GROUP_IOSCHED */
1892
Jens Axboe498d3aa22007-04-26 12:54:48 +02001893/*
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01001894 * The cfqd->service_trees holds all pending cfq_queue's that have
Jens Axboe498d3aa22007-04-26 12:54:48 +02001895 * requests waiting to be processed. It is sorted in the order that
1896 * we will service the queues.
1897 */
Jens Axboea36e71f2009-04-15 12:15:11 +02001898static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Jens Axboea6151c32009-10-07 20:02:57 +02001899 bool add_front)
Jens Axboed9e76202007-04-20 14:27:50 +02001900{
Jens Axboe08717142008-01-28 11:38:15 +01001901 struct rb_node **p, *parent;
1902 struct cfq_queue *__cfqq;
Jens Axboed9e76202007-04-20 14:27:50 +02001903 unsigned long rb_key;
Vivek Goyal34b98d02012-10-03 16:56:58 -04001904 struct cfq_rb_root *st;
Jens Axboe498d3aa22007-04-26 12:54:48 +02001905 int left;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001906 int new_cfqq = 1;
Vivek Goyalae30c282009-12-03 12:59:55 -05001907
Vivek Goyal34b98d02012-10-03 16:56:58 -04001908 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
Jens Axboe08717142008-01-28 11:38:15 +01001909 if (cfq_class_idle(cfqq)) {
1910 rb_key = CFQ_IDLE_DELAY;
Vivek Goyal34b98d02012-10-03 16:56:58 -04001911 parent = rb_last(&st->rb);
Jens Axboe08717142008-01-28 11:38:15 +01001912 if (parent && parent != &cfqq->rb_node) {
1913 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1914 rb_key += __cfqq->rb_key;
1915 } else
1916 rb_key += jiffies;
1917 } else if (!add_front) {
Jens Axboeb9c89462009-10-06 20:53:44 +02001918 /*
1919 * Get our rb key offset. Subtract any residual slice
1920 * value carried from last service. A negative resid
1921 * count indicates slice overrun, and this should position
1922 * the next service time further away in the tree.
1923 */
Jens Axboeedd75ff2007-04-19 12:03:34 +02001924 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
Jens Axboeb9c89462009-10-06 20:53:44 +02001925 rb_key -= cfqq->slice_resid;
Jens Axboeedd75ff2007-04-19 12:03:34 +02001926 cfqq->slice_resid = 0;
Corrado Zoccolo48e025e2009-10-05 08:49:23 +02001927 } else {
1928 rb_key = -HZ;
Vivek Goyal34b98d02012-10-03 16:56:58 -04001929 __cfqq = cfq_rb_first(st);
Corrado Zoccolo48e025e2009-10-05 08:49:23 +02001930 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1931 }
Jens Axboed9e76202007-04-20 14:27:50 +02001932
1933 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
Vivek Goyaldae739e2009-12-03 12:59:45 -05001934 new_cfqq = 0;
Jens Axboe99f96282007-02-05 11:56:25 +01001935 /*
Jens Axboed9e76202007-04-20 14:27:50 +02001936 * same position, nothing more to do
Jens Axboe99f96282007-02-05 11:56:25 +01001937 */
Vivek Goyal34b98d02012-10-03 16:56:58 -04001938 if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
Jens Axboed9e76202007-04-20 14:27:50 +02001939 return;
Jens Axboe53b037442006-07-28 09:48:51 +02001940
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01001941 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1942 cfqq->service_tree = NULL;
Jens Axboe22e2c502005-06-27 10:55:12 +02001943 }
Jens Axboed9e76202007-04-20 14:27:50 +02001944
Jens Axboe498d3aa22007-04-26 12:54:48 +02001945 left = 1;
Jens Axboe08717142008-01-28 11:38:15 +01001946 parent = NULL;
Vivek Goyal34b98d02012-10-03 16:56:58 -04001947 cfqq->service_tree = st;
1948 p = &st->rb.rb_node;
Jens Axboed9e76202007-04-20 14:27:50 +02001949 while (*p) {
1950 parent = *p;
1951 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1952
Jens Axboe0c534e02007-04-18 20:01:57 +02001953 /*
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01001954 * sort by key, that represents service time.
Jens Axboe0c534e02007-04-18 20:01:57 +02001955 */
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01001956 if (time_before(rb_key, __cfqq->rb_key))
Vivek Goyal1f23f122012-10-03 16:57:00 -04001957 p = &parent->rb_left;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01001958 else {
Vivek Goyal1f23f122012-10-03 16:57:00 -04001959 p = &parent->rb_right;
Jens Axboecc09e292007-04-26 12:53:50 +02001960 left = 0;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01001961 }
Jens Axboed9e76202007-04-20 14:27:50 +02001962 }
1963
Jens Axboecc09e292007-04-26 12:53:50 +02001964 if (left)
Vivek Goyal34b98d02012-10-03 16:56:58 -04001965 st->left = &cfqq->rb_node;
Jens Axboecc09e292007-04-26 12:53:50 +02001966
Jens Axboed9e76202007-04-20 14:27:50 +02001967 cfqq->rb_key = rb_key;
1968 rb_link_node(&cfqq->rb_node, parent, p);
Vivek Goyal34b98d02012-10-03 16:56:58 -04001969 rb_insert_color(&cfqq->rb_node, &st->rb);
1970 st->count++;
Namhyung Kim20359f22011-05-24 10:23:22 +02001971 if (add_front || !new_cfqq)
Vivek Goyaldae739e2009-12-03 12:59:45 -05001972 return;
Justin TerAvest8184f932011-03-17 16:12:36 +01001973 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974}
1975
Jens Axboea36e71f2009-04-15 12:15:11 +02001976static struct cfq_queue *
Jens Axboef2d1f0a2009-04-23 12:19:38 +02001977cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1978 sector_t sector, struct rb_node **ret_parent,
1979 struct rb_node ***rb_link)
Jens Axboea36e71f2009-04-15 12:15:11 +02001980{
Jens Axboea36e71f2009-04-15 12:15:11 +02001981 struct rb_node **p, *parent;
1982 struct cfq_queue *cfqq = NULL;
1983
1984 parent = NULL;
1985 p = &root->rb_node;
1986 while (*p) {
1987 struct rb_node **n;
1988
1989 parent = *p;
1990 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1991
1992 /*
1993 * Sort strictly based on sector. Smallest to the left,
1994 * largest to the right.
1995 */
Tejun Heo2e46e8b2009-05-07 22:24:41 +09001996 if (sector > blk_rq_pos(cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02001997 n = &(*p)->rb_right;
Tejun Heo2e46e8b2009-05-07 22:24:41 +09001998 else if (sector < blk_rq_pos(cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02001999 n = &(*p)->rb_left;
2000 else
2001 break;
2002 p = n;
Jens Axboe3ac6c9f2009-04-23 12:14:56 +02002003 cfqq = NULL;
Jens Axboea36e71f2009-04-15 12:15:11 +02002004 }
2005
2006 *ret_parent = parent;
2007 if (rb_link)
2008 *rb_link = p;
Jens Axboe3ac6c9f2009-04-23 12:14:56 +02002009 return cfqq;
Jens Axboea36e71f2009-04-15 12:15:11 +02002010}
2011
2012static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2013{
Jens Axboea36e71f2009-04-15 12:15:11 +02002014 struct rb_node **p, *parent;
2015 struct cfq_queue *__cfqq;
2016
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002017 if (cfqq->p_root) {
2018 rb_erase(&cfqq->p_node, cfqq->p_root);
2019 cfqq->p_root = NULL;
2020 }
Jens Axboea36e71f2009-04-15 12:15:11 +02002021
2022 if (cfq_class_idle(cfqq))
2023 return;
2024 if (!cfqq->next_rq)
2025 return;
2026
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002027 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002028 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2029 blk_rq_pos(cfqq->next_rq), &parent, &p);
Jens Axboe3ac6c9f2009-04-23 12:14:56 +02002030 if (!__cfqq) {
2031 rb_link_node(&cfqq->p_node, parent, p);
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002032 rb_insert_color(&cfqq->p_node, cfqq->p_root);
2033 } else
2034 cfqq->p_root = NULL;
Jens Axboea36e71f2009-04-15 12:15:11 +02002035}
2036
Jens Axboe498d3aa22007-04-26 12:54:48 +02002037/*
2038 * Update cfqq's position in the service tree.
2039 */
Jens Axboeedd75ff2007-04-19 12:03:34 +02002040static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002041{
Jens Axboe6d048f52007-04-25 12:44:27 +02002042 /*
2043 * Resorting requires the cfqq to be on the RR list already.
2044 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002045 if (cfq_cfqq_on_rr(cfqq)) {
Jens Axboeedd75ff2007-04-19 12:03:34 +02002046 cfq_service_tree_add(cfqd, cfqq, 0);
Jens Axboea36e71f2009-04-15 12:15:11 +02002047 cfq_prio_tree_add(cfqd, cfqq);
2048 }
Jens Axboe6d048f52007-04-25 12:44:27 +02002049}
2050
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051/*
2052 * add to busy list of queues for service, trying to be fair in ordering
Jens Axboe22e2c502005-06-27 10:55:12 +02002053 * the pending list according to last request service
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 */
Jens Axboefebffd62008-01-28 13:19:43 +01002055static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056{
Jens Axboe7b679132008-05-30 12:23:07 +02002057 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
Jens Axboe3b181522005-06-27 10:56:24 +02002058 BUG_ON(cfq_cfqq_on_rr(cfqq));
2059 cfq_mark_cfqq_on_rr(cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 cfqd->busy_queues++;
Shaohua Lief8a41d2011-03-07 09:26:29 +01002061 if (cfq_cfqq_sync(cfqq))
2062 cfqd->busy_sync_queues++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063
Jens Axboeedd75ff2007-04-19 12:03:34 +02002064 cfq_resort_rr_list(cfqd, cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065}
2066
Jens Axboe498d3aa22007-04-26 12:54:48 +02002067/*
2068 * Called when the cfqq no longer has requests pending, remove it from
2069 * the service tree.
2070 */
Jens Axboefebffd62008-01-28 13:19:43 +01002071static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072{
Jens Axboe7b679132008-05-30 12:23:07 +02002073 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
Jens Axboe3b181522005-06-27 10:56:24 +02002074 BUG_ON(!cfq_cfqq_on_rr(cfqq));
2075 cfq_clear_cfqq_on_rr(cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01002077 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2078 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2079 cfqq->service_tree = NULL;
2080 }
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002081 if (cfqq->p_root) {
2082 rb_erase(&cfqq->p_node, cfqq->p_root);
2083 cfqq->p_root = NULL;
2084 }
Jens Axboed9e76202007-04-20 14:27:50 +02002085
Justin TerAvest8184f932011-03-17 16:12:36 +01002086 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 BUG_ON(!cfqd->busy_queues);
2088 cfqd->busy_queues--;
Shaohua Lief8a41d2011-03-07 09:26:29 +01002089 if (cfq_cfqq_sync(cfqq))
2090 cfqd->busy_sync_queues--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091}
2092
2093/*
2094 * rb tree support functions
2095 */
Jens Axboefebffd62008-01-28 13:19:43 +01002096static void cfq_del_rq_rb(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097{
Jens Axboe5e705372006-07-13 12:39:25 +02002098 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe5e705372006-07-13 12:39:25 +02002099 const int sync = rq_is_sync(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100
Jens Axboeb4878f22005-10-20 16:42:29 +02002101 BUG_ON(!cfqq->queued[sync]);
2102 cfqq->queued[sync]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103
Jens Axboe5e705372006-07-13 12:39:25 +02002104 elv_rb_del(&cfqq->sort_list, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105
Vivek Goyalf04a6422009-12-03 12:59:40 -05002106 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2107 /*
2108 * Queue will be deleted from service tree when we actually
2109 * expire it later. Right now just remove it from prio tree
2110 * as it is empty.
2111 */
2112 if (cfqq->p_root) {
2113 rb_erase(&cfqq->p_node, cfqq->p_root);
2114 cfqq->p_root = NULL;
2115 }
2116 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117}
2118
Jens Axboe5e705372006-07-13 12:39:25 +02002119static void cfq_add_rq_rb(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120{
Jens Axboe5e705372006-07-13 12:39:25 +02002121 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 struct cfq_data *cfqd = cfqq->cfqd;
Jeff Moyer796d5112011-06-02 21:19:05 +02002123 struct request *prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124
Jens Axboe5380a102006-07-13 12:37:56 +02002125 cfqq->queued[rq_is_sync(rq)]++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126
Jeff Moyer796d5112011-06-02 21:19:05 +02002127 elv_rb_add(&cfqq->sort_list, rq);
Jens Axboe5fccbf62006-10-31 14:21:55 +01002128
2129 if (!cfq_cfqq_on_rr(cfqq))
2130 cfq_add_cfqq_rr(cfqd, cfqq);
Jens Axboe5044eed2007-04-25 11:53:48 +02002131
2132 /*
2133 * check if this request is a better next-serve candidate
2134 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002135 prev = cfqq->next_rq;
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01002136 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
Jens Axboea36e71f2009-04-15 12:15:11 +02002137
2138 /*
2139 * adjust priority tree position, if ->next_rq changes
2140 */
2141 if (prev != cfqq->next_rq)
2142 cfq_prio_tree_add(cfqd, cfqq);
2143
Jens Axboe5044eed2007-04-25 11:53:48 +02002144 BUG_ON(!cfqq->next_rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145}
2146
Jens Axboefebffd62008-01-28 13:19:43 +01002147static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148{
Jens Axboe5380a102006-07-13 12:37:56 +02002149 elv_rb_del(&cfqq->sort_list, rq);
2150 cfqq->queued[rq_is_sync(rq)]--;
Tejun Heo155fead2012-04-01 14:38:44 -07002151 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
Jens Axboe5e705372006-07-13 12:39:25 +02002152 cfq_add_rq_rb(rq);
Tejun Heo155fead2012-04-01 14:38:44 -07002153 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2154 rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155}
2156
Jens Axboe206dc692006-03-28 13:03:44 +02002157static struct request *
2158cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159{
Jens Axboe206dc692006-03-28 13:03:44 +02002160 struct task_struct *tsk = current;
Tejun Heoc5869802011-12-14 00:33:41 +01002161 struct cfq_io_cq *cic;
Jens Axboe206dc692006-03-28 13:03:44 +02002162 struct cfq_queue *cfqq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163
Jens Axboe4ac845a2008-01-24 08:44:49 +01002164 cic = cfq_cic_lookup(cfqd, tsk->io_context);
Vasily Tarasov91fac312007-04-25 12:29:51 +02002165 if (!cic)
2166 return NULL;
2167
2168 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
Jens Axboe89850f72006-07-22 16:48:31 +02002169 if (cfqq) {
2170 sector_t sector = bio->bi_sector + bio_sectors(bio);
2171
Jens Axboe21183b02006-07-13 12:33:14 +02002172 return elv_rb_find(&cfqq->sort_list, sector);
Jens Axboe89850f72006-07-22 16:48:31 +02002173 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 return NULL;
2176}
2177
Jens Axboe165125e2007-07-24 09:28:11 +02002178static void cfq_activate_request(struct request_queue *q, struct request *rq)
Jens Axboeb4878f22005-10-20 16:42:29 +02002179{
2180 struct cfq_data *cfqd = q->elevator->elevator_data;
2181
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002182 cfqd->rq_in_driver++;
Jens Axboe7b679132008-05-30 12:23:07 +02002183 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002184 cfqd->rq_in_driver);
Jens Axboe25776e32006-06-01 10:12:26 +02002185
Tejun Heo5b936292009-05-07 22:24:38 +09002186 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +02002187}
2188
Jens Axboe165125e2007-07-24 09:28:11 +02002189static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190{
Jens Axboe22e2c502005-06-27 10:55:12 +02002191 struct cfq_data *cfqd = q->elevator->elevator_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002193 WARN_ON(!cfqd->rq_in_driver);
2194 cfqd->rq_in_driver--;
Jens Axboe7b679132008-05-30 12:23:07 +02002195 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002196 cfqd->rq_in_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197}
2198
Jens Axboeb4878f22005-10-20 16:42:29 +02002199static void cfq_remove_request(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200{
Jens Axboe5e705372006-07-13 12:39:25 +02002201 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe21183b02006-07-13 12:33:14 +02002202
Jens Axboe5e705372006-07-13 12:39:25 +02002203 if (cfqq->next_rq == rq)
2204 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
Jens Axboeb4878f22005-10-20 16:42:29 +02002206 list_del_init(&rq->queuelist);
Jens Axboe5e705372006-07-13 12:39:25 +02002207 cfq_del_rq_rb(rq);
Jens Axboe374f84a2006-07-23 01:42:19 +02002208
Aaron Carroll45333d52008-08-26 15:52:36 +02002209 cfqq->cfqd->rq_queued--;
Tejun Heo155fead2012-04-01 14:38:44 -07002210 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
Christoph Hellwig65299a32011-08-23 14:50:29 +02002211 if (rq->cmd_flags & REQ_PRIO) {
2212 WARN_ON(!cfqq->prio_pending);
2213 cfqq->prio_pending--;
Jens Axboeb53d1ed2011-08-19 08:34:48 +02002214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215}
2216
Jens Axboe165125e2007-07-24 09:28:11 +02002217static int cfq_merge(struct request_queue *q, struct request **req,
2218 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219{
2220 struct cfq_data *cfqd = q->elevator->elevator_data;
2221 struct request *__rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
Jens Axboe206dc692006-03-28 13:03:44 +02002223 __rq = cfq_find_rq_fmerge(cfqd, bio);
Jens Axboe22e2c502005-06-27 10:55:12 +02002224 if (__rq && elv_rq_merge_ok(__rq, bio)) {
Jens Axboe98170642006-07-28 09:23:08 +02002225 *req = __rq;
2226 return ELEVATOR_FRONT_MERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 }
2228
2229 return ELEVATOR_NO_MERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230}
2231
Jens Axboe165125e2007-07-24 09:28:11 +02002232static void cfq_merged_request(struct request_queue *q, struct request *req,
Jens Axboe21183b02006-07-13 12:33:14 +02002233 int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234{
Jens Axboe21183b02006-07-13 12:33:14 +02002235 if (type == ELEVATOR_FRONT_MERGE) {
Jens Axboe5e705372006-07-13 12:39:25 +02002236 struct cfq_queue *cfqq = RQ_CFQQ(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237
Jens Axboe5e705372006-07-13 12:39:25 +02002238 cfq_reposition_rq_rb(cfqq, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240}
2241
Divyesh Shah812d4022010-04-08 21:14:23 -07002242static void cfq_bio_merged(struct request_queue *q, struct request *req,
2243 struct bio *bio)
2244{
Tejun Heo155fead2012-04-01 14:38:44 -07002245 cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
Divyesh Shah812d4022010-04-08 21:14:23 -07002246}
2247
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248static void
Jens Axboe165125e2007-07-24 09:28:11 +02002249cfq_merged_requests(struct request_queue *q, struct request *rq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 struct request *next)
2251{
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01002252 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Shaohua Li4a0b75c2011-12-16 14:00:22 +01002253 struct cfq_data *cfqd = q->elevator->elevator_data;
2254
Jens Axboe22e2c502005-06-27 10:55:12 +02002255 /*
2256 * reposition in fifo if next is older than rq
2257 */
2258 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
Shaohua Li3d106fba2012-11-06 12:39:51 +01002259 time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
2260 cfqq == RQ_CFQQ(next)) {
Jens Axboe22e2c502005-06-27 10:55:12 +02002261 list_move(&rq->queuelist, &next->queuelist);
Jens Axboe30996f42009-10-05 11:03:39 +02002262 rq_set_fifo_time(rq, rq_fifo_time(next));
2263 }
Jens Axboe22e2c502005-06-27 10:55:12 +02002264
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01002265 if (cfqq->next_rq == next)
2266 cfqq->next_rq = rq;
Jens Axboeb4878f22005-10-20 16:42:29 +02002267 cfq_remove_request(next);
Tejun Heo155fead2012-04-01 14:38:44 -07002268 cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
Shaohua Li4a0b75c2011-12-16 14:00:22 +01002269
2270 cfqq = RQ_CFQQ(next);
2271 /*
2272 * all requests of this queue are merged to other queues, delete it
2273 * from the service tree. If it's the active_queue,
2274 * cfq_dispatch_requests() will choose to expire it or do idle
2275 */
2276 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2277 cfqq != cfqd->active_queue)
2278 cfq_del_cfqq_rr(cfqd, cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002279}
2280
Jens Axboe165125e2007-07-24 09:28:11 +02002281static int cfq_allow_merge(struct request_queue *q, struct request *rq,
Jens Axboeda775262006-12-20 11:04:12 +01002282 struct bio *bio)
2283{
2284 struct cfq_data *cfqd = q->elevator->elevator_data;
Tejun Heoc5869802011-12-14 00:33:41 +01002285 struct cfq_io_cq *cic;
Jens Axboeda775262006-12-20 11:04:12 +01002286 struct cfq_queue *cfqq;
Jens Axboeda775262006-12-20 11:04:12 +01002287
2288 /*
Jens Axboeec8acb62007-01-02 18:32:11 +01002289 * Disallow merge of a sync bio into an async request.
Jens Axboeda775262006-12-20 11:04:12 +01002290 */
Vasily Tarasov91fac312007-04-25 12:29:51 +02002291 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
Jens Axboea6151c32009-10-07 20:02:57 +02002292 return false;
Jens Axboeda775262006-12-20 11:04:12 +01002293
2294 /*
Tejun Heof1a4f4d2011-12-14 00:33:39 +01002295 * Lookup the cfqq that this bio will be queued with and allow
Tejun Heo07c2bd32012-02-08 09:19:42 +01002296 * merge only if rq is queued there.
Jens Axboeda775262006-12-20 11:04:12 +01002297 */
Tejun Heo07c2bd32012-02-08 09:19:42 +01002298 cic = cfq_cic_lookup(cfqd, current->io_context);
2299 if (!cic)
2300 return false;
Jens Axboe719d3402006-12-22 09:38:53 +01002301
Vasily Tarasov91fac312007-04-25 12:29:51 +02002302 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
Jens Axboea6151c32009-10-07 20:02:57 +02002303 return cfqq == RQ_CFQQ(rq);
Jens Axboeda775262006-12-20 11:04:12 +01002304}
2305
Divyesh Shah812df482010-04-08 21:15:35 -07002306static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2307{
2308 del_timer(&cfqd->idle_slice_timer);
Tejun Heo155fead2012-04-01 14:38:44 -07002309 cfqg_stats_update_idle_time(cfqq->cfqg);
Divyesh Shah812df482010-04-08 21:15:35 -07002310}
2311
Jens Axboefebffd62008-01-28 13:19:43 +01002312static void __cfq_set_active_queue(struct cfq_data *cfqd,
2313 struct cfq_queue *cfqq)
Jens Axboe22e2c502005-06-27 10:55:12 +02002314{
2315 if (cfqq) {
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04002316 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002317 cfqd->serving_wl_class, cfqd->serving_wl_type);
Tejun Heo155fead2012-04-01 14:38:44 -07002318 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
Justin TerAvest62a37f62011-03-23 08:25:44 +01002319 cfqq->slice_start = 0;
2320 cfqq->dispatch_start = jiffies;
2321 cfqq->allocated_slice = 0;
2322 cfqq->slice_end = 0;
2323 cfqq->slice_dispatch = 0;
2324 cfqq->nr_sectors = 0;
2325
2326 cfq_clear_cfqq_wait_request(cfqq);
2327 cfq_clear_cfqq_must_dispatch(cfqq);
2328 cfq_clear_cfqq_must_alloc_slice(cfqq);
2329 cfq_clear_cfqq_fifo_expire(cfqq);
2330 cfq_mark_cfqq_slice_new(cfqq);
2331
2332 cfq_del_timer(cfqd, cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002333 }
2334
2335 cfqd->active_queue = cfqq;
2336}
2337
2338/*
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002339 * current cfqq expired its slice (or was too idle), select new one
2340 */
2341static void
2342__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Vivek Goyale5ff0822010-04-26 19:25:11 +02002343 bool timed_out)
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002344{
Jens Axboe7b679132008-05-30 12:23:07 +02002345 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2346
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002347 if (cfq_cfqq_wait_request(cfqq))
Divyesh Shah812df482010-04-08 21:15:35 -07002348 cfq_del_timer(cfqd, cfqq);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002349
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002350 cfq_clear_cfqq_wait_request(cfqq);
Vivek Goyalf75edf22009-12-03 12:59:53 -05002351 cfq_clear_cfqq_wait_busy(cfqq);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002352
2353 /*
Shaohua Liae54abe2010-02-05 13:11:45 +01002354 * If this cfqq is shared between multiple processes, check to
2355 * make sure that those processes are still issuing I/Os within
2356 * the mean seek distance. If not, it may be time to break the
2357 * queues apart again.
2358 */
2359 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2360 cfq_mark_cfqq_split_coop(cfqq);
2361
2362 /*
Jens Axboe6084cdd2007-04-23 08:25:00 +02002363 * store what was left of this slice, if the queue idled/timed out
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002364 */
Shaohua Lic553f8e2011-01-14 08:41:03 +01002365 if (timed_out) {
2366 if (cfq_cfqq_slice_new(cfqq))
Vivek Goyalba5bd522011-01-19 08:25:02 -07002367 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
Shaohua Lic553f8e2011-01-14 08:41:03 +01002368 else
2369 cfqq->slice_resid = cfqq->slice_end - jiffies;
Jens Axboe7b679132008-05-30 12:23:07 +02002370 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2371 }
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002372
Vivek Goyale5ff0822010-04-26 19:25:11 +02002373 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
Vivek Goyaldae739e2009-12-03 12:59:45 -05002374
Vivek Goyalf04a6422009-12-03 12:59:40 -05002375 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2376 cfq_del_cfqq_rr(cfqd, cfqq);
2377
Jens Axboeedd75ff2007-04-19 12:03:34 +02002378 cfq_resort_rr_list(cfqd, cfqq);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002379
2380 if (cfqq == cfqd->active_queue)
2381 cfqd->active_queue = NULL;
2382
2383 if (cfqd->active_cic) {
Tejun Heo11a31222012-02-07 07:51:30 +01002384 put_io_context(cfqd->active_cic->icq.ioc);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002385 cfqd->active_cic = NULL;
2386 }
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002387}
2388
Vivek Goyale5ff0822010-04-26 19:25:11 +02002389static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002390{
2391 struct cfq_queue *cfqq = cfqd->active_queue;
2392
2393 if (cfqq)
Vivek Goyale5ff0822010-04-26 19:25:11 +02002394 __cfq_slice_expired(cfqd, cfqq, timed_out);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002395}
2396
Jens Axboe498d3aa22007-04-26 12:54:48 +02002397/*
2398 * Get next queue for service. Unless we have a queue preemption,
2399 * we'll simply select the first cfqq in the service tree.
2400 */
Jens Axboe6d048f52007-04-25 12:44:27 +02002401static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +02002402{
Vivek Goyal34b98d02012-10-03 16:56:58 -04002403 struct cfq_rb_root *st = st_for(cfqd->serving_group,
2404 cfqd->serving_wl_class, cfqd->serving_wl_type);
Jens Axboeedd75ff2007-04-19 12:03:34 +02002405
Vivek Goyalf04a6422009-12-03 12:59:40 -05002406 if (!cfqd->rq_queued)
2407 return NULL;
2408
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002409 /* There is nothing to dispatch */
Vivek Goyal34b98d02012-10-03 16:56:58 -04002410 if (!st)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002411 return NULL;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002412 if (RB_EMPTY_ROOT(&st->rb))
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002413 return NULL;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002414 return cfq_rb_first(st);
Jens Axboe6d048f52007-04-25 12:44:27 +02002415}
2416
Vivek Goyalf04a6422009-12-03 12:59:40 -05002417static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2418{
Vivek Goyal25fb5162009-12-03 12:59:46 -05002419 struct cfq_group *cfqg;
Vivek Goyalf04a6422009-12-03 12:59:40 -05002420 struct cfq_queue *cfqq;
2421 int i, j;
2422 struct cfq_rb_root *st;
2423
2424 if (!cfqd->rq_queued)
2425 return NULL;
2426
Vivek Goyal25fb5162009-12-03 12:59:46 -05002427 cfqg = cfq_get_next_cfqg(cfqd);
2428 if (!cfqg)
2429 return NULL;
2430
Vivek Goyalf04a6422009-12-03 12:59:40 -05002431 for_each_cfqg_st(cfqg, i, j, st)
2432 if ((cfqq = cfq_rb_first(st)) != NULL)
2433 return cfqq;
2434 return NULL;
2435}
2436
Jens Axboe498d3aa22007-04-26 12:54:48 +02002437/*
2438 * Get and set a new active queue for service.
2439 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002440static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2441 struct cfq_queue *cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002442{
Jens Axboee00ef792009-11-04 08:54:55 +01002443 if (!cfqq)
Jens Axboea36e71f2009-04-15 12:15:11 +02002444 cfqq = cfq_get_next_queue(cfqd);
Jens Axboe6d048f52007-04-25 12:44:27 +02002445
Jens Axboe22e2c502005-06-27 10:55:12 +02002446 __cfq_set_active_queue(cfqd, cfqq);
Jens Axboe3b181522005-06-27 10:56:24 +02002447 return cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02002448}
2449
Jens Axboed9e76202007-04-20 14:27:50 +02002450static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2451 struct request *rq)
2452{
Tejun Heo83096eb2009-05-07 22:24:39 +09002453 if (blk_rq_pos(rq) >= cfqd->last_position)
2454 return blk_rq_pos(rq) - cfqd->last_position;
Jens Axboed9e76202007-04-20 14:27:50 +02002455 else
Tejun Heo83096eb2009-05-07 22:24:39 +09002456 return cfqd->last_position - blk_rq_pos(rq);
Jens Axboed9e76202007-04-20 14:27:50 +02002457}
2458
Jeff Moyerb2c18e12009-10-23 17:14:49 -04002459static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Shaohua Lie9ce3352010-03-19 08:03:04 +01002460 struct request *rq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002461{
Shaohua Lie9ce3352010-03-19 08:03:04 +01002462 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
Jens Axboe6d048f52007-04-25 12:44:27 +02002463}
2464
Jens Axboea36e71f2009-04-15 12:15:11 +02002465static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2466 struct cfq_queue *cur_cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002467{
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002468 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
Jens Axboea36e71f2009-04-15 12:15:11 +02002469 struct rb_node *parent, *node;
2470 struct cfq_queue *__cfqq;
2471 sector_t sector = cfqd->last_position;
2472
2473 if (RB_EMPTY_ROOT(root))
2474 return NULL;
2475
2476 /*
2477 * First, if we find a request starting at the end of the last
2478 * request, choose it.
2479 */
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002480 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
Jens Axboea36e71f2009-04-15 12:15:11 +02002481 if (__cfqq)
2482 return __cfqq;
2483
2484 /*
2485 * If the exact sector wasn't found, the parent of the NULL leaf
2486 * will contain the closest sector.
2487 */
2488 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
Shaohua Lie9ce3352010-03-19 08:03:04 +01002489 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02002490 return __cfqq;
2491
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002492 if (blk_rq_pos(__cfqq->next_rq) < sector)
Jens Axboea36e71f2009-04-15 12:15:11 +02002493 node = rb_next(&__cfqq->p_node);
2494 else
2495 node = rb_prev(&__cfqq->p_node);
2496 if (!node)
2497 return NULL;
2498
2499 __cfqq = rb_entry(node, struct cfq_queue, p_node);
Shaohua Lie9ce3352010-03-19 08:03:04 +01002500 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02002501 return __cfqq;
2502
2503 return NULL;
2504}
2505
2506/*
2507 * cfqd - obvious
2508 * cur_cfqq - passed in so that we don't decide that the current queue is
2509 * closely cooperating with itself.
2510 *
2511 * So, basically we're assuming that that cur_cfqq has dispatched at least
2512 * one request, and that cfqd->last_position reflects a position on the disk
2513 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
2514 * assumption.
2515 */
2516static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
Jeff Moyerb3b6d042009-10-23 17:14:51 -04002517 struct cfq_queue *cur_cfqq)
Jens Axboea36e71f2009-04-15 12:15:11 +02002518{
2519 struct cfq_queue *cfqq;
2520
Divyesh Shah39c01b22010-03-25 15:45:57 +01002521 if (cfq_class_idle(cur_cfqq))
2522 return NULL;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002523 if (!cfq_cfqq_sync(cur_cfqq))
2524 return NULL;
2525 if (CFQQ_SEEKY(cur_cfqq))
2526 return NULL;
2527
Jens Axboea36e71f2009-04-15 12:15:11 +02002528 /*
Gui Jianfengb9d8f4c2009-12-08 08:54:17 +01002529 * Don't search priority tree if it's the only queue in the group.
2530 */
2531 if (cur_cfqq->cfqg->nr_cfqq == 1)
2532 return NULL;
2533
2534 /*
Jens Axboed9e76202007-04-20 14:27:50 +02002535 * We should notice if some of the queues are cooperating, eg
2536 * working closely on the same area of the disk. In that case,
2537 * we can group them together and don't waste time idling.
Jens Axboe6d048f52007-04-25 12:44:27 +02002538 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002539 cfqq = cfqq_close(cfqd, cur_cfqq);
2540 if (!cfqq)
2541 return NULL;
2542
Vivek Goyal8682e1f2009-12-03 12:59:50 -05002543 /* If new queue belongs to different cfq_group, don't choose it */
2544 if (cur_cfqq->cfqg != cfqq->cfqg)
2545 return NULL;
2546
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002547 /*
2548 * It only makes sense to merge sync queues.
2549 */
2550 if (!cfq_cfqq_sync(cfqq))
2551 return NULL;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002552 if (CFQQ_SEEKY(cfqq))
2553 return NULL;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002554
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002555 /*
2556 * Do not merge queues of different priority classes
2557 */
2558 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2559 return NULL;
2560
Jens Axboea36e71f2009-04-15 12:15:11 +02002561 return cfqq;
Jens Axboe6d048f52007-04-25 12:44:27 +02002562}
2563
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002564/*
2565 * Determine whether we should enforce idle window for this queue.
2566 */
2567
2568static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2569{
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04002570 enum wl_class_t wl_class = cfqq_class(cfqq);
Vivek Goyal34b98d02012-10-03 16:56:58 -04002571 struct cfq_rb_root *st = cfqq->service_tree;
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002572
Vivek Goyal34b98d02012-10-03 16:56:58 -04002573 BUG_ON(!st);
2574 BUG_ON(!st->count);
Vivek Goyalf04a6422009-12-03 12:59:40 -05002575
Vivek Goyalb6508c12010-08-23 12:23:33 +02002576 if (!cfqd->cfq_slice_idle)
2577 return false;
2578
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002579 /* We never do for idle class queues. */
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04002580 if (wl_class == IDLE_WORKLOAD)
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002581 return false;
2582
2583 /* We do for queues that were marked with idle window flag. */
Shaohua Li3c764b72009-12-04 13:12:06 +01002584 if (cfq_cfqq_idle_window(cfqq) &&
2585 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002586 return true;
2587
2588 /*
2589 * Otherwise, we do only if they are the last ones
2590 * in their service tree.
2591 */
Vivek Goyal34b98d02012-10-03 16:56:58 -04002592 if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2593 !cfq_io_thinktime_big(cfqd, &st->ttime, false))
Shaohua Lic1e44752010-11-08 15:01:02 +01002594 return true;
Vivek Goyal34b98d02012-10-03 16:56:58 -04002595 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
Shaohua Lic1e44752010-11-08 15:01:02 +01002596 return false;
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002597}
2598
Jens Axboe6d048f52007-04-25 12:44:27 +02002599static void cfq_arm_slice_timer(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +02002600{
Jens Axboe17926692007-01-19 11:59:30 +11002601 struct cfq_queue *cfqq = cfqd->active_queue;
Tejun Heoc5869802011-12-14 00:33:41 +01002602 struct cfq_io_cq *cic;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002603 unsigned long sl, group_idle = 0;
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002604
Jens Axboea68bbdd2008-09-24 13:03:33 +02002605 /*
Jens Axboef7d7b7a2008-09-25 11:37:50 +02002606 * SSD device without seek penalty, disable idling. But only do so
2607 * for devices that support queuing, otherwise we still have a problem
2608 * with sync vs async workloads.
Jens Axboea68bbdd2008-09-24 13:03:33 +02002609 */
Jens Axboef7d7b7a2008-09-25 11:37:50 +02002610 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
Jens Axboea68bbdd2008-09-24 13:03:33 +02002611 return;
2612
Jens Axboedd67d052006-06-21 09:36:18 +02002613 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
Jens Axboe6d048f52007-04-25 12:44:27 +02002614 WARN_ON(cfq_cfqq_slice_new(cfqq));
Jens Axboe22e2c502005-06-27 10:55:12 +02002615
2616 /*
2617 * idle is disabled, either manually or by past process history
2618 */
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002619 if (!cfq_should_idle(cfqd, cfqq)) {
2620 /* no queue idling. Check for group idling */
2621 if (cfqd->cfq_group_idle)
2622 group_idle = cfqd->cfq_group_idle;
2623 else
2624 return;
2625 }
Jens Axboe6d048f52007-04-25 12:44:27 +02002626
Jens Axboe22e2c502005-06-27 10:55:12 +02002627 /*
Corrado Zoccolo8e550632009-11-26 10:02:58 +01002628 * still active requests from this queue, don't idle
Jens Axboe7b679132008-05-30 12:23:07 +02002629 */
Corrado Zoccolo8e550632009-11-26 10:02:58 +01002630 if (cfqq->dispatched)
Jens Axboe7b679132008-05-30 12:23:07 +02002631 return;
2632
2633 /*
Jens Axboe22e2c502005-06-27 10:55:12 +02002634 * task has exited, don't wait
2635 */
Jens Axboe206dc692006-03-28 13:03:44 +02002636 cic = cfqd->active_cic;
Tejun Heof6e8d012012-03-05 13:15:26 -08002637 if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
Jens Axboe6d048f52007-04-25 12:44:27 +02002638 return;
2639
Corrado Zoccolo355b6592009-10-08 08:43:32 +02002640 /*
2641 * If our average think time is larger than the remaining time
2642 * slice, then don't idle. This avoids overrunning the allotted
2643 * time slice.
2644 */
Shaohua Li383cd722011-07-12 14:24:35 +02002645 if (sample_valid(cic->ttime.ttime_samples) &&
2646 (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
Joe Perchesfd16d262011-06-13 10:42:49 +02002647 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
Shaohua Li383cd722011-07-12 14:24:35 +02002648 cic->ttime.ttime_mean);
Corrado Zoccolo355b6592009-10-08 08:43:32 +02002649 return;
Divyesh Shahb1ffe732010-03-25 15:45:03 +01002650 }
Corrado Zoccolo355b6592009-10-08 08:43:32 +02002651
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002652 /* There are other queues in the group, don't do group idle */
2653 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2654 return;
2655
Jens Axboe3b181522005-06-27 10:56:24 +02002656 cfq_mark_cfqq_wait_request(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002657
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002658 if (group_idle)
2659 sl = cfqd->cfq_group_idle;
2660 else
2661 sl = cfqd->cfq_slice_idle;
Jens Axboe206dc692006-03-28 13:03:44 +02002662
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002663 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
Tejun Heo155fead2012-04-01 14:38:44 -07002664 cfqg_stats_set_start_idle_time(cfqq->cfqg);
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002665 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2666 group_idle ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667}
2668
Jens Axboe498d3aa22007-04-26 12:54:48 +02002669/*
2670 * Move request from internal lists to the request queue dispatch list.
2671 */
Jens Axboe165125e2007-07-24 09:28:11 +02002672static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673{
Jens Axboe3ed9a292007-04-23 08:33:33 +02002674 struct cfq_data *cfqd = q->elevator->elevator_data;
Jens Axboe5e705372006-07-13 12:39:25 +02002675 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002676
Jens Axboe7b679132008-05-30 12:23:07 +02002677 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2678
Jeff Moyer06d21882009-09-11 17:08:59 +02002679 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
Jens Axboe5380a102006-07-13 12:37:56 +02002680 cfq_remove_request(rq);
Jens Axboe6d048f52007-04-25 12:44:27 +02002681 cfqq->dispatched++;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002682 (RQ_CFQG(rq))->dispatched++;
Jens Axboe5380a102006-07-13 12:37:56 +02002683 elv_dispatch_sort(q, rq);
Jens Axboe3ed9a292007-04-23 08:33:33 +02002684
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002685 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
Vivek Goyalc4e78932010-08-23 12:25:03 +02002686 cfqq->nr_sectors += blk_rq_sectors(rq);
Tejun Heo155fead2012-04-01 14:38:44 -07002687 cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688}
2689
2690/*
2691 * return expired entry, or NULL to just start from scratch in rbtree
2692 */
Jens Axboefebffd62008-01-28 13:19:43 +01002693static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694{
Jens Axboe30996f42009-10-05 11:03:39 +02002695 struct request *rq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696
Jens Axboe3b181522005-06-27 10:56:24 +02002697 if (cfq_cfqq_fifo_expire(cfqq))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 return NULL;
Jens Axboecb887412007-01-19 12:01:16 +11002699
2700 cfq_mark_cfqq_fifo_expire(cfqq);
2701
Jens Axboe89850f72006-07-22 16:48:31 +02002702 if (list_empty(&cfqq->fifo))
2703 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704
Jens Axboe89850f72006-07-22 16:48:31 +02002705 rq = rq_entry_fifo(cfqq->fifo.next);
Jens Axboe30996f42009-10-05 11:03:39 +02002706 if (time_before(jiffies, rq_fifo_time(rq)))
Jens Axboe7b679132008-05-30 12:23:07 +02002707 rq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708
Jens Axboe30996f42009-10-05 11:03:39 +02002709 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
Jens Axboe6d048f52007-04-25 12:44:27 +02002710 return rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711}
2712
Jens Axboe22e2c502005-06-27 10:55:12 +02002713static inline int
2714cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2715{
2716 const int base_rq = cfqd->cfq_slice_async_rq;
2717
2718 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2719
Namhyung Kimb9f8ce02011-05-24 10:23:21 +02002720 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
Jens Axboe22e2c502005-06-27 10:55:12 +02002721}
2722
2723/*
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002724 * Must be called with the queue_lock held.
2725 */
2726static int cfqq_process_refs(struct cfq_queue *cfqq)
2727{
2728 int process_refs, io_refs;
2729
2730 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
Shaohua Li30d7b942011-01-07 08:46:59 +01002731 process_refs = cfqq->ref - io_refs;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002732 BUG_ON(process_refs < 0);
2733 return process_refs;
2734}
2735
2736static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2737{
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002738 int process_refs, new_process_refs;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002739 struct cfq_queue *__cfqq;
2740
Jeff Moyerc10b61f2010-06-17 10:19:11 -04002741 /*
2742 * If there are no process references on the new_cfqq, then it is
2743 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2744 * chain may have dropped their last reference (not just their
2745 * last process reference).
2746 */
2747 if (!cfqq_process_refs(new_cfqq))
2748 return;
2749
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002750 /* Avoid a circular list and skip interim queue merges */
2751 while ((__cfqq = new_cfqq->new_cfqq)) {
2752 if (__cfqq == cfqq)
2753 return;
2754 new_cfqq = __cfqq;
2755 }
2756
2757 process_refs = cfqq_process_refs(cfqq);
Jeff Moyerc10b61f2010-06-17 10:19:11 -04002758 new_process_refs = cfqq_process_refs(new_cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002759 /*
2760 * If the process for the cfqq has gone away, there is no
2761 * sense in merging the queues.
2762 */
Jeff Moyerc10b61f2010-06-17 10:19:11 -04002763 if (process_refs == 0 || new_process_refs == 0)
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002764 return;
2765
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002766 /*
2767 * Merge in the direction of the lesser amount of work.
2768 */
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002769 if (new_process_refs >= process_refs) {
2770 cfqq->new_cfqq = new_cfqq;
Shaohua Li30d7b942011-01-07 08:46:59 +01002771 new_cfqq->ref += process_refs;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002772 } else {
2773 new_cfqq->new_cfqq = cfqq;
Shaohua Li30d7b942011-01-07 08:46:59 +01002774 cfqq->ref += new_process_refs;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002775 }
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002776}
2777
Vivek Goyal6d816ec2012-10-03 16:56:59 -04002778static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04002779 struct cfq_group *cfqg, enum wl_class_t wl_class)
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002780{
2781 struct cfq_queue *queue;
2782 int i;
2783 bool key_valid = false;
2784 unsigned long lowest_key = 0;
2785 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2786
Vivek Goyal65b32a52009-12-16 17:52:59 -05002787 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2788 /* select the one with lowest rb_key */
Vivek Goyal34b98d02012-10-03 16:56:58 -04002789 queue = cfq_rb_first(st_for(cfqg, wl_class, i));
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002790 if (queue &&
2791 (!key_valid || time_before(queue->rb_key, lowest_key))) {
2792 lowest_key = queue->rb_key;
2793 cur_best = i;
2794 key_valid = true;
2795 }
2796 }
2797
2798 return cur_best;
2799}
2800
Vivek Goyal6d816ec2012-10-03 16:56:59 -04002801static void
2802choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002803{
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002804 unsigned slice;
2805 unsigned count;
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002806 struct cfq_rb_root *st;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05002807 unsigned group_slice;
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002808 enum wl_class_t original_class = cfqd->serving_wl_class;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002809
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002810 /* Choose next priority. RT > BE > IDLE */
Vivek Goyal58ff82f2009-12-03 12:59:44 -05002811 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002812 cfqd->serving_wl_class = RT_WORKLOAD;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05002813 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002814 cfqd->serving_wl_class = BE_WORKLOAD;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002815 else {
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002816 cfqd->serving_wl_class = IDLE_WORKLOAD;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002817 cfqd->workload_expires = jiffies + 1;
2818 return;
2819 }
2820
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002821 if (original_class != cfqd->serving_wl_class)
Shaohua Li writese4ea0c12010-12-13 14:32:22 +01002822 goto new_workload;
2823
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002824 /*
2825 * For RT and BE, we have to choose also the type
2826 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2827 * expiration time
2828 */
Vivek Goyal34b98d02012-10-03 16:56:58 -04002829 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002830 count = st->count;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002831
2832 /*
Vivek Goyal65b32a52009-12-16 17:52:59 -05002833 * check workload expiration, and that we still have other queues ready
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002834 */
Vivek Goyal65b32a52009-12-16 17:52:59 -05002835 if (count && !time_after(jiffies, cfqd->workload_expires))
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002836 return;
2837
Shaohua Li writese4ea0c12010-12-13 14:32:22 +01002838new_workload:
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002839 /* otherwise select new workload type */
Vivek Goyal6d816ec2012-10-03 16:56:59 -04002840 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002841 cfqd->serving_wl_class);
Vivek Goyal34b98d02012-10-03 16:56:58 -04002842 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002843 count = st->count;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002844
2845 /*
2846 * the workload slice is computed as a fraction of target latency
2847 * proportional to the number of queues in that workload, over
2848 * all the queues in the same priority class
2849 */
Vivek Goyal58ff82f2009-12-03 12:59:44 -05002850 group_slice = cfq_group_slice(cfqd, cfqg);
2851
2852 slice = group_slice * count /
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002853 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
2854 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
Vivek Goyal3bf10fe2012-10-03 16:56:56 -04002855 cfqg));
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002856
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002857 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05002858 unsigned int tmp;
2859
2860 /*
2861 * Async queues are currently system wide. Just taking
2862 * proportion of queues with-in same group will lead to higher
2863 * async ratio system wide as generally root group is going
2864 * to have higher weight. A more accurate thing would be to
2865 * calculate system wide asnc/sync ratio.
2866 */
Tao Ma5bf14c02012-04-01 14:33:39 -07002867 tmp = cfqd->cfq_target_latency *
2868 cfqg_busy_async_queues(cfqd, cfqg);
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05002869 tmp = tmp/cfqd->busy_queues;
2870 slice = min_t(unsigned, slice, tmp);
2871
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002872 /* async workload slice is scaled down according to
2873 * the sync/async slice ratio. */
2874 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05002875 } else
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002876 /* sync workload slice is at least 2 * cfq_slice_idle */
2877 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2878
2879 slice = max_t(unsigned, slice, CFQ_MIN_TT);
Divyesh Shahb1ffe732010-03-25 15:45:03 +01002880 cfq_log(cfqd, "workload slice:%d", slice);
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002881 cfqd->workload_expires = jiffies + slice;
2882}
2883
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002884static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2885{
2886 struct cfq_rb_root *st = &cfqd->grp_service_tree;
Vivek Goyal25bc6b02009-12-03 12:59:43 -05002887 struct cfq_group *cfqg;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002888
2889 if (RB_EMPTY_ROOT(&st->rb))
2890 return NULL;
Vivek Goyal25bc6b02009-12-03 12:59:43 -05002891 cfqg = cfq_rb_first_group(st);
Vivek Goyal25bc6b02009-12-03 12:59:43 -05002892 update_min_vdisktime(st);
2893 return cfqg;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002894}
2895
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002896static void cfq_choose_cfqg(struct cfq_data *cfqd)
2897{
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002898 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2899
2900 cfqd->serving_group = cfqg;
Vivek Goyaldae739e2009-12-03 12:59:45 -05002901
2902 /* Restore the workload type data */
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04002903 if (cfqg->saved_wl_slice) {
2904 cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
2905 cfqd->serving_wl_type = cfqg->saved_wl_type;
2906 cfqd->serving_wl_class = cfqg->saved_wl_class;
Gui Jianfeng66ae2912009-12-15 10:08:45 +01002907 } else
2908 cfqd->workload_expires = jiffies - 1;
2909
Vivek Goyal6d816ec2012-10-03 16:56:59 -04002910 choose_wl_class_and_type(cfqd, cfqg);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002911}
2912
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002913/*
Jens Axboe498d3aa22007-04-26 12:54:48 +02002914 * Select a queue for service. If we have a current active queue,
2915 * check whether to continue servicing it, or retrieve and set a new one.
Jens Axboe22e2c502005-06-27 10:55:12 +02002916 */
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01002917static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +02002918{
Jens Axboea36e71f2009-04-15 12:15:11 +02002919 struct cfq_queue *cfqq, *new_cfqq = NULL;
Jens Axboe22e2c502005-06-27 10:55:12 +02002920
2921 cfqq = cfqd->active_queue;
2922 if (!cfqq)
2923 goto new_queue;
2924
Vivek Goyalf04a6422009-12-03 12:59:40 -05002925 if (!cfqd->rq_queued)
2926 return NULL;
Vivek Goyalc244bb52009-12-08 17:52:57 -05002927
2928 /*
2929 * We were waiting for group to get backlogged. Expire the queue
2930 */
2931 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2932 goto expire;
2933
Jens Axboe22e2c502005-06-27 10:55:12 +02002934 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02002935 * The active queue has run out of time, expire it and select new.
Jens Axboe22e2c502005-06-27 10:55:12 +02002936 */
Vivek Goyal7667aa02009-12-08 17:52:58 -05002937 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2938 /*
2939 * If slice had not expired at the completion of last request
2940 * we might not have turned on wait_busy flag. Don't expire
2941 * the queue yet. Allow the group to get backlogged.
2942 *
2943 * The very fact that we have used the slice, that means we
2944 * have been idling all along on this queue and it should be
2945 * ok to wait for this request to complete.
2946 */
Vivek Goyal82bbbf22009-12-10 19:25:41 +01002947 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2948 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2949 cfqq = NULL;
Vivek Goyal7667aa02009-12-08 17:52:58 -05002950 goto keep_queue;
Vivek Goyal82bbbf22009-12-10 19:25:41 +01002951 } else
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002952 goto check_group_idle;
Vivek Goyal7667aa02009-12-08 17:52:58 -05002953 }
Jens Axboe22e2c502005-06-27 10:55:12 +02002954
2955 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02002956 * The active queue has requests and isn't expired, allow it to
2957 * dispatch.
Jens Axboe22e2c502005-06-27 10:55:12 +02002958 */
Jens Axboedd67d052006-06-21 09:36:18 +02002959 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
Jens Axboe22e2c502005-06-27 10:55:12 +02002960 goto keep_queue;
Jens Axboe6d048f52007-04-25 12:44:27 +02002961
2962 /*
Jens Axboea36e71f2009-04-15 12:15:11 +02002963 * If another queue has a request waiting within our mean seek
2964 * distance, let it run. The expire code will check for close
2965 * cooperators and put the close queue at the front of the service
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002966 * tree. If possible, merge the expiring queue with the new cfqq.
Jens Axboea36e71f2009-04-15 12:15:11 +02002967 */
Jeff Moyerb3b6d042009-10-23 17:14:51 -04002968 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002969 if (new_cfqq) {
2970 if (!cfqq->new_cfqq)
2971 cfq_setup_merge(cfqq, new_cfqq);
Jens Axboea36e71f2009-04-15 12:15:11 +02002972 goto expire;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002973 }
Jens Axboea36e71f2009-04-15 12:15:11 +02002974
2975 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02002976 * No requests pending. If the active queue still has requests in
2977 * flight or is idling for a new request, allow either of these
2978 * conditions to happen (or time out) before selecting a new queue.
2979 */
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002980 if (timer_pending(&cfqd->idle_slice_timer)) {
2981 cfqq = NULL;
2982 goto keep_queue;
2983 }
2984
Shaohua Li8e1ac662010-11-08 15:01:04 +01002985 /*
2986 * This is a deep seek queue, but the device is much faster than
2987 * the queue can deliver, don't idle
2988 **/
2989 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2990 (cfq_cfqq_slice_new(cfqq) ||
2991 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2992 cfq_clear_cfqq_deep(cfqq);
2993 cfq_clear_cfqq_idle_window(cfqq);
2994 }
2995
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002996 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2997 cfqq = NULL;
2998 goto keep_queue;
2999 }
3000
3001 /*
3002 * If group idle is enabled and there are requests dispatched from
3003 * this group, wait for requests to complete.
3004 */
3005check_group_idle:
Shaohua Li7700fc42011-07-12 14:24:56 +02003006 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3007 cfqq->cfqg->dispatched &&
3008 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
Jens Axboecaaa5f92006-06-16 11:23:00 +02003009 cfqq = NULL;
3010 goto keep_queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02003011 }
3012
Jens Axboe3b181522005-06-27 10:56:24 +02003013expire:
Vivek Goyale5ff0822010-04-26 19:25:11 +02003014 cfq_slice_expired(cfqd, 0);
Jens Axboe3b181522005-06-27 10:56:24 +02003015new_queue:
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003016 /*
3017 * Current queue expired. Check if we have to switch to a new
3018 * service tree
3019 */
3020 if (!new_cfqq)
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003021 cfq_choose_cfqg(cfqd);
Corrado Zoccolo718eee02009-10-26 22:45:29 +01003022
Jens Axboea36e71f2009-04-15 12:15:11 +02003023 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003024keep_queue:
Jens Axboe3b181522005-06-27 10:56:24 +02003025 return cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02003026}
3027
Jens Axboefebffd62008-01-28 13:19:43 +01003028static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
Jens Axboed9e76202007-04-20 14:27:50 +02003029{
3030 int dispatched = 0;
3031
3032 while (cfqq->next_rq) {
3033 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3034 dispatched++;
3035 }
3036
3037 BUG_ON(!list_empty(&cfqq->fifo));
Vivek Goyalf04a6422009-12-03 12:59:40 -05003038
3039 /* By default cfqq is not expired if it is empty. Do it explicitly */
Vivek Goyale5ff0822010-04-26 19:25:11 +02003040 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
Jens Axboed9e76202007-04-20 14:27:50 +02003041 return dispatched;
3042}
3043
Jens Axboe498d3aa22007-04-26 12:54:48 +02003044/*
3045 * Drain our current requests. Used for barriers and when switching
3046 * io schedulers on-the-fly.
3047 */
Jens Axboed9e76202007-04-20 14:27:50 +02003048static int cfq_forced_dispatch(struct cfq_data *cfqd)
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003049{
Jens Axboe08717142008-01-28 11:38:15 +01003050 struct cfq_queue *cfqq;
Jens Axboed9e76202007-04-20 14:27:50 +02003051 int dispatched = 0;
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003052
Divyesh Shah3440c492010-04-09 09:29:57 +02003053 /* Expire the timeslice of the current active queue first */
Vivek Goyale5ff0822010-04-26 19:25:11 +02003054 cfq_slice_expired(cfqd, 0);
Divyesh Shah3440c492010-04-09 09:29:57 +02003055 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3056 __cfq_set_active_queue(cfqd, cfqq);
Vivek Goyalf04a6422009-12-03 12:59:40 -05003057 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
Divyesh Shah3440c492010-04-09 09:29:57 +02003058 }
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003059
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003060 BUG_ON(cfqd->busy_queues);
3061
Jeff Moyer69237152009-06-12 15:29:30 +02003062 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01003063 return dispatched;
3064}
3065
Shaohua Liabc3c742010-03-01 09:20:54 +01003066static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3067 struct cfq_queue *cfqq)
3068{
3069 /* the queue hasn't finished any request, can't estimate */
3070 if (cfq_cfqq_slice_new(cfqq))
Shaohua Lic1e44752010-11-08 15:01:02 +01003071 return true;
Shaohua Liabc3c742010-03-01 09:20:54 +01003072 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
3073 cfqq->slice_end))
Shaohua Lic1e44752010-11-08 15:01:02 +01003074 return true;
Shaohua Liabc3c742010-03-01 09:20:54 +01003075
Shaohua Lic1e44752010-11-08 15:01:02 +01003076 return false;
Shaohua Liabc3c742010-03-01 09:20:54 +01003077}
3078
Jens Axboe0b182d62009-10-06 20:49:37 +02003079static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe2f5cb732009-04-07 08:51:19 +02003080{
Jens Axboe2f5cb732009-04-07 08:51:19 +02003081 unsigned int max_dispatch;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082
Jens Axboe2f5cb732009-04-07 08:51:19 +02003083 /*
Jens Axboe5ad531d2009-07-03 12:57:48 +02003084 * Drain async requests before we start sync IO
3085 */
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003086 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
Jens Axboe0b182d62009-10-06 20:49:37 +02003087 return false;
Jens Axboe5ad531d2009-07-03 12:57:48 +02003088
3089 /*
Jens Axboe2f5cb732009-04-07 08:51:19 +02003090 * If this is an async queue and we have sync IO in flight, let it wait
3091 */
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003092 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
Jens Axboe0b182d62009-10-06 20:49:37 +02003093 return false;
Jens Axboe2f5cb732009-04-07 08:51:19 +02003094
Shaohua Liabc3c742010-03-01 09:20:54 +01003095 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
Jens Axboe2f5cb732009-04-07 08:51:19 +02003096 if (cfq_class_idle(cfqq))
3097 max_dispatch = 1;
3098
3099 /*
3100 * Does this cfqq already have too much IO in flight?
3101 */
3102 if (cfqq->dispatched >= max_dispatch) {
Shaohua Lief8a41d2011-03-07 09:26:29 +01003103 bool promote_sync = false;
Jens Axboe2f5cb732009-04-07 08:51:19 +02003104 /*
3105 * idle queue must always only have a single IO in flight
3106 */
Jens Axboe3ed9a292007-04-23 08:33:33 +02003107 if (cfq_class_idle(cfqq))
Jens Axboe0b182d62009-10-06 20:49:37 +02003108 return false;
Jens Axboe3ed9a292007-04-23 08:33:33 +02003109
Jens Axboe2f5cb732009-04-07 08:51:19 +02003110 /*
Li, Shaohuac4ade942011-03-23 08:30:34 +01003111 * If there is only one sync queue
3112 * we can ignore async queue here and give the sync
Shaohua Lief8a41d2011-03-07 09:26:29 +01003113 * queue no dispatch limit. The reason is a sync queue can
3114 * preempt async queue, limiting the sync queue doesn't make
3115 * sense. This is useful for aiostress test.
3116 */
Li, Shaohuac4ade942011-03-23 08:30:34 +01003117 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3118 promote_sync = true;
Shaohua Lief8a41d2011-03-07 09:26:29 +01003119
3120 /*
Jens Axboe2f5cb732009-04-07 08:51:19 +02003121 * We have other queues, don't allow more IO from this one
3122 */
Shaohua Lief8a41d2011-03-07 09:26:29 +01003123 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3124 !promote_sync)
Jens Axboe0b182d62009-10-06 20:49:37 +02003125 return false;
Jens Axboe9ede2092007-01-19 12:11:44 +11003126
Jens Axboe2f5cb732009-04-07 08:51:19 +02003127 /*
Shaohua Li474b18c2009-12-03 12:58:05 +01003128 * Sole queue user, no limit
Vivek Goyal365722b2009-10-03 15:21:27 +02003129 */
Shaohua Lief8a41d2011-03-07 09:26:29 +01003130 if (cfqd->busy_queues == 1 || promote_sync)
Shaohua Liabc3c742010-03-01 09:20:54 +01003131 max_dispatch = -1;
3132 else
3133 /*
3134 * Normally we start throttling cfqq when cfq_quantum/2
3135 * requests have been dispatched. But we can drive
3136 * deeper queue depths at the beginning of slice
3137 * subjected to upper limit of cfq_quantum.
3138 * */
3139 max_dispatch = cfqd->cfq_quantum;
Jens Axboe8e296752009-10-03 16:26:03 +02003140 }
3141
3142 /*
3143 * Async queues must wait a bit before being allowed dispatch.
3144 * We also ramp up the dispatch depth gradually for async IO,
3145 * based on the last sync IO we serviced
3146 */
Jens Axboe963b72f2009-10-03 19:42:18 +02003147 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
Corrado Zoccolo573412b2009-12-06 11:48:52 +01003148 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
Jens Axboe8e296752009-10-03 16:26:03 +02003149 unsigned int depth;
Vivek Goyal365722b2009-10-03 15:21:27 +02003150
Jens Axboe61f0c1d2009-10-03 19:46:03 +02003151 depth = last_sync / cfqd->cfq_slice[1];
Jens Axboee00c54c2009-10-04 20:36:19 +02003152 if (!depth && !cfqq->dispatched)
3153 depth = 1;
Jens Axboe8e296752009-10-03 16:26:03 +02003154 if (depth < max_dispatch)
3155 max_dispatch = depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 }
3157
Jens Axboe0b182d62009-10-06 20:49:37 +02003158 /*
3159 * If we're below the current max, allow a dispatch
3160 */
3161 return cfqq->dispatched < max_dispatch;
3162}
3163
3164/*
3165 * Dispatch a request from cfqq, moving them to the request queue
3166 * dispatch list.
3167 */
3168static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3169{
3170 struct request *rq;
3171
3172 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3173
3174 if (!cfq_may_dispatch(cfqd, cfqq))
3175 return false;
3176
3177 /*
3178 * follow expired path, else get first next available
3179 */
3180 rq = cfq_check_fifo(cfqq);
3181 if (!rq)
3182 rq = cfqq->next_rq;
3183
3184 /*
3185 * insert request into driver dispatch list
3186 */
3187 cfq_dispatch_insert(cfqd->queue, rq);
3188
3189 if (!cfqd->active_cic) {
Tejun Heoc5869802011-12-14 00:33:41 +01003190 struct cfq_io_cq *cic = RQ_CIC(rq);
Jens Axboe0b182d62009-10-06 20:49:37 +02003191
Tejun Heoc5869802011-12-14 00:33:41 +01003192 atomic_long_inc(&cic->icq.ioc->refcount);
Jens Axboe0b182d62009-10-06 20:49:37 +02003193 cfqd->active_cic = cic;
3194 }
3195
3196 return true;
3197}
3198
3199/*
3200 * Find the cfqq that we need to service and move a request from that to the
3201 * dispatch list
3202 */
3203static int cfq_dispatch_requests(struct request_queue *q, int force)
3204{
3205 struct cfq_data *cfqd = q->elevator->elevator_data;
3206 struct cfq_queue *cfqq;
3207
3208 if (!cfqd->busy_queues)
3209 return 0;
3210
3211 if (unlikely(force))
3212 return cfq_forced_dispatch(cfqd);
3213
3214 cfqq = cfq_select_queue(cfqd);
3215 if (!cfqq)
Jens Axboe8e296752009-10-03 16:26:03 +02003216 return 0;
3217
Jens Axboe2f5cb732009-04-07 08:51:19 +02003218 /*
Jens Axboe0b182d62009-10-06 20:49:37 +02003219 * Dispatch a request from this cfqq, if it is allowed
Jens Axboe2f5cb732009-04-07 08:51:19 +02003220 */
Jens Axboe0b182d62009-10-06 20:49:37 +02003221 if (!cfq_dispatch_request(cfqd, cfqq))
3222 return 0;
3223
Jens Axboe2f5cb732009-04-07 08:51:19 +02003224 cfqq->slice_dispatch++;
Jens Axboeb0291952009-04-07 11:38:31 +02003225 cfq_clear_cfqq_must_dispatch(cfqq);
Jens Axboe2f5cb732009-04-07 08:51:19 +02003226
3227 /*
3228 * expire an async queue immediately if it has used up its slice. idle
3229 * queue always expire after 1 dispatch round.
3230 */
3231 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3232 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3233 cfq_class_idle(cfqq))) {
3234 cfqq->slice_end = jiffies + 1;
Vivek Goyale5ff0822010-04-26 19:25:11 +02003235 cfq_slice_expired(cfqd, 0);
Jens Axboe2f5cb732009-04-07 08:51:19 +02003236 }
3237
Shan Weib217a902009-09-01 10:06:42 +02003238 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
Jens Axboe2f5cb732009-04-07 08:51:19 +02003239 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240}
3241
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242/*
Jens Axboe5e705372006-07-13 12:39:25 +02003243 * task holds one reference to the queue, dropped when task exits. each rq
3244 * in-flight on this queue also holds a reference, dropped when rq is freed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245 *
Vivek Goyalb1c35762009-12-03 12:59:47 -05003246 * Each cfq queue took a reference on the parent group. Drop it now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 * queue lock must be held here.
3248 */
3249static void cfq_put_queue(struct cfq_queue *cfqq)
3250{
Jens Axboe22e2c502005-06-27 10:55:12 +02003251 struct cfq_data *cfqd = cfqq->cfqd;
Justin TerAvest0bbfeb82011-03-01 15:05:08 -05003252 struct cfq_group *cfqg;
Jens Axboe22e2c502005-06-27 10:55:12 +02003253
Shaohua Li30d7b942011-01-07 08:46:59 +01003254 BUG_ON(cfqq->ref <= 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255
Shaohua Li30d7b942011-01-07 08:46:59 +01003256 cfqq->ref--;
3257 if (cfqq->ref)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258 return;
3259
Jens Axboe7b679132008-05-30 12:23:07 +02003260 cfq_log_cfqq(cfqd, cfqq, "put_queue");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261 BUG_ON(rb_first(&cfqq->sort_list));
Jens Axboe22e2c502005-06-27 10:55:12 +02003262 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
Vivek Goyalb1c35762009-12-03 12:59:47 -05003263 cfqg = cfqq->cfqg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264
Jens Axboe28f95cbc2007-01-19 12:09:53 +11003265 if (unlikely(cfqd->active_queue == cfqq)) {
Vivek Goyale5ff0822010-04-26 19:25:11 +02003266 __cfq_slice_expired(cfqd, cfqq, 0);
Jens Axboe23e018a2009-10-05 08:52:35 +02003267 cfq_schedule_dispatch(cfqd);
Jens Axboe28f95cbc2007-01-19 12:09:53 +11003268 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003269
Vivek Goyalf04a6422009-12-03 12:59:40 -05003270 BUG_ON(cfq_cfqq_on_rr(cfqq));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271 kmem_cache_free(cfq_pool, cfqq);
Tejun Heoeb7d8c072012-03-23 14:02:53 +01003272 cfqg_put(cfqg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273}
3274
Shaohua Lid02a2c02010-05-25 10:16:53 +02003275static void cfq_put_cooperator(struct cfq_queue *cfqq)
Jens Axboe89850f72006-07-22 16:48:31 +02003276{
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003277 struct cfq_queue *__cfqq, *next;
3278
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003279 /*
3280 * If this queue was scheduled to merge with another queue, be
3281 * sure to drop the reference taken on that queue (and others in
3282 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
3283 */
3284 __cfqq = cfqq->new_cfqq;
3285 while (__cfqq) {
3286 if (__cfqq == cfqq) {
3287 WARN(1, "cfqq->new_cfqq loop detected\n");
3288 break;
3289 }
3290 next = __cfqq->new_cfqq;
3291 cfq_put_queue(__cfqq);
3292 __cfqq = next;
3293 }
Shaohua Lid02a2c02010-05-25 10:16:53 +02003294}
3295
3296static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3297{
3298 if (unlikely(cfqq == cfqd->active_queue)) {
3299 __cfq_slice_expired(cfqd, cfqq, 0);
3300 cfq_schedule_dispatch(cfqd);
3301 }
3302
3303 cfq_put_cooperator(cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003304
Jens Axboe89850f72006-07-22 16:48:31 +02003305 cfq_put_queue(cfqq);
3306}
3307
Tejun Heo9b84cac2011-12-14 00:33:42 +01003308static void cfq_init_icq(struct io_cq *icq)
3309{
3310 struct cfq_io_cq *cic = icq_to_cic(icq);
3311
3312 cic->ttime.last_end_request = jiffies;
3313}
3314
Tejun Heoc5869802011-12-14 00:33:41 +01003315static void cfq_exit_icq(struct io_cq *icq)
Jens Axboe89850f72006-07-22 16:48:31 +02003316{
Tejun Heoc5869802011-12-14 00:33:41 +01003317 struct cfq_io_cq *cic = icq_to_cic(icq);
Tejun Heo283287a2011-12-14 00:33:38 +01003318 struct cfq_data *cfqd = cic_to_cfqd(cic);
Fabio Checconi4faa3c82008-04-10 08:28:01 +02003319
Jens Axboeff6657c2009-04-08 10:58:57 +02003320 if (cic->cfqq[BLK_RW_ASYNC]) {
3321 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
3322 cic->cfqq[BLK_RW_ASYNC] = NULL;
Jens Axboe89850f72006-07-22 16:48:31 +02003323 }
3324
Jens Axboeff6657c2009-04-08 10:58:57 +02003325 if (cic->cfqq[BLK_RW_SYNC]) {
3326 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
3327 cic->cfqq[BLK_RW_SYNC] = NULL;
Jens Axboe89850f72006-07-22 16:48:31 +02003328 }
Jens Axboe89850f72006-07-22 16:48:31 +02003329}
3330
Tejun Heoabede6d2012-03-19 15:10:57 -07003331static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
Jens Axboe22e2c502005-06-27 10:55:12 +02003332{
3333 struct task_struct *tsk = current;
3334 int ioprio_class;
3335
Jens Axboe3b181522005-06-27 10:56:24 +02003336 if (!cfq_cfqq_prio_changed(cfqq))
Jens Axboe22e2c502005-06-27 10:55:12 +02003337 return;
3338
Tejun Heo598971b2012-03-19 15:10:58 -07003339 ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
Jens Axboe22e2c502005-06-27 10:55:12 +02003340 switch (ioprio_class) {
Jens Axboefe094d92008-01-31 13:08:54 +01003341 default:
3342 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3343 case IOPRIO_CLASS_NONE:
3344 /*
Jens Axboe6d63c272008-05-07 09:51:23 +02003345 * no prio set, inherit CPU scheduling settings
Jens Axboefe094d92008-01-31 13:08:54 +01003346 */
3347 cfqq->ioprio = task_nice_ioprio(tsk);
Jens Axboe6d63c272008-05-07 09:51:23 +02003348 cfqq->ioprio_class = task_nice_ioclass(tsk);
Jens Axboefe094d92008-01-31 13:08:54 +01003349 break;
3350 case IOPRIO_CLASS_RT:
Tejun Heo598971b2012-03-19 15:10:58 -07003351 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
Jens Axboefe094d92008-01-31 13:08:54 +01003352 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3353 break;
3354 case IOPRIO_CLASS_BE:
Tejun Heo598971b2012-03-19 15:10:58 -07003355 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
Jens Axboefe094d92008-01-31 13:08:54 +01003356 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3357 break;
3358 case IOPRIO_CLASS_IDLE:
3359 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3360 cfqq->ioprio = 7;
3361 cfq_clear_cfqq_idle_window(cfqq);
3362 break;
Jens Axboe22e2c502005-06-27 10:55:12 +02003363 }
3364
3365 /*
3366 * keep track of original prio settings in case we have to temporarily
3367 * elevate the priority of this queue
3368 */
3369 cfqq->org_ioprio = cfqq->ioprio;
Jens Axboe3b181522005-06-27 10:56:24 +02003370 cfq_clear_cfqq_prio_changed(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003371}
3372
Tejun Heo598971b2012-03-19 15:10:58 -07003373static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
Jens Axboe22e2c502005-06-27 10:55:12 +02003374{
Tejun Heo598971b2012-03-19 15:10:58 -07003375 int ioprio = cic->icq.ioc->ioprio;
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +04003376 struct cfq_data *cfqd = cic_to_cfqd(cic);
Al Viro478a82b2006-03-18 13:25:24 -05003377 struct cfq_queue *cfqq;
Jens Axboe35e60772006-06-14 09:10:45 +02003378
Tejun Heo598971b2012-03-19 15:10:58 -07003379 /*
3380 * Check whether ioprio has changed. The condition may trigger
3381 * spuriously on a newly created cic but there's no harm.
3382 */
3383 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
Jens Axboecaaa5f92006-06-16 11:23:00 +02003384 return;
3385
Jens Axboeff6657c2009-04-08 10:58:57 +02003386 cfqq = cic->cfqq[BLK_RW_ASYNC];
Jens Axboecaaa5f92006-06-16 11:23:00 +02003387 if (cfqq) {
3388 struct cfq_queue *new_cfqq;
Tejun Heoabede6d2012-03-19 15:10:57 -07003389 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
3390 GFP_ATOMIC);
Jens Axboecaaa5f92006-06-16 11:23:00 +02003391 if (new_cfqq) {
Jens Axboeff6657c2009-04-08 10:58:57 +02003392 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
Jens Axboecaaa5f92006-06-16 11:23:00 +02003393 cfq_put_queue(cfqq);
3394 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003395 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02003396
Jens Axboeff6657c2009-04-08 10:58:57 +02003397 cfqq = cic->cfqq[BLK_RW_SYNC];
Jens Axboecaaa5f92006-06-16 11:23:00 +02003398 if (cfqq)
3399 cfq_mark_cfqq_prio_changed(cfqq);
Tejun Heo598971b2012-03-19 15:10:58 -07003400
3401 cic->ioprio = ioprio;
Jens Axboe22e2c502005-06-27 10:55:12 +02003402}
3403
Jens Axboed5036d72009-06-26 10:44:34 +02003404static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Jens Axboea6151c32009-10-07 20:02:57 +02003405 pid_t pid, bool is_sync)
Jens Axboed5036d72009-06-26 10:44:34 +02003406{
3407 RB_CLEAR_NODE(&cfqq->rb_node);
3408 RB_CLEAR_NODE(&cfqq->p_node);
3409 INIT_LIST_HEAD(&cfqq->fifo);
3410
Shaohua Li30d7b942011-01-07 08:46:59 +01003411 cfqq->ref = 0;
Jens Axboed5036d72009-06-26 10:44:34 +02003412 cfqq->cfqd = cfqd;
3413
3414 cfq_mark_cfqq_prio_changed(cfqq);
3415
3416 if (is_sync) {
3417 if (!cfq_class_idle(cfqq))
3418 cfq_mark_cfqq_idle_window(cfqq);
3419 cfq_mark_cfqq_sync(cfqq);
3420 }
3421 cfqq->pid = pid;
3422}
3423
Vivek Goyal246103332009-12-03 12:59:51 -05003424#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo598971b2012-03-19 15:10:58 -07003425static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
Vivek Goyal246103332009-12-03 12:59:51 -05003426{
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +04003427 struct cfq_data *cfqd = cic_to_cfqd(cic);
Tejun Heo598971b2012-03-19 15:10:58 -07003428 struct cfq_queue *sync_cfqq;
3429 uint64_t id;
Vivek Goyal246103332009-12-03 12:59:51 -05003430
Tejun Heo598971b2012-03-19 15:10:58 -07003431 rcu_read_lock();
Tejun Heo3c798392012-04-16 13:57:25 -07003432 id = bio_blkcg(bio)->id;
Tejun Heo598971b2012-03-19 15:10:58 -07003433 rcu_read_unlock();
3434
3435 /*
3436 * Check whether blkcg has changed. The condition may trigger
3437 * spuriously on a newly created cic but there's no harm.
3438 */
3439 if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
Vivek Goyal246103332009-12-03 12:59:51 -05003440 return;
3441
Tejun Heo598971b2012-03-19 15:10:58 -07003442 sync_cfqq = cic_to_cfqq(cic, 1);
Vivek Goyal246103332009-12-03 12:59:51 -05003443 if (sync_cfqq) {
3444 /*
3445 * Drop reference to sync queue. A new sync queue will be
3446 * assigned in new group upon arrival of a fresh request.
3447 */
3448 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
3449 cic_set_cfqq(cic, NULL, 1);
3450 cfq_put_queue(sync_cfqq);
3451 }
Tejun Heo598971b2012-03-19 15:10:58 -07003452
3453 cic->blkcg_id = id;
Vivek Goyal246103332009-12-03 12:59:51 -05003454}
Tejun Heo598971b2012-03-19 15:10:58 -07003455#else
3456static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
Vivek Goyal246103332009-12-03 12:59:51 -05003457#endif /* CONFIG_CFQ_GROUP_IOSCHED */
3458
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459static struct cfq_queue *
Tejun Heoabede6d2012-03-19 15:10:57 -07003460cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3461 struct bio *bio, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462{
Tejun Heo3c798392012-04-16 13:57:25 -07003463 struct blkcg *blkcg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 struct cfq_queue *cfqq, *new_cfqq = NULL;
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003465 struct cfq_group *cfqg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466
3467retry:
Tejun Heo2a7f1242012-03-05 13:15:01 -08003468 rcu_read_lock();
3469
Tejun Heo3c798392012-04-16 13:57:25 -07003470 blkcg = bio_blkcg(bio);
Tejun Heocd1604f2012-03-05 13:15:06 -08003471 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
Vasily Tarasov91fac312007-04-25 12:29:51 +02003472 cfqq = cic_to_cfqq(cic, is_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473
Jens Axboe6118b702009-06-30 09:34:12 +02003474 /*
3475 * Always try a new alloc if we fell back to the OOM cfqq
3476 * originally, since it should just be a temporary situation.
3477 */
3478 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3479 cfqq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 if (new_cfqq) {
3481 cfqq = new_cfqq;
3482 new_cfqq = NULL;
Jens Axboe22e2c502005-06-27 10:55:12 +02003483 } else if (gfp_mask & __GFP_WAIT) {
Tejun Heo2a7f1242012-03-05 13:15:01 -08003484 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 spin_unlock_irq(cfqd->queue->queue_lock);
Christoph Lameter94f60302007-07-17 04:03:29 -07003486 new_cfqq = kmem_cache_alloc_node(cfq_pool,
Jens Axboe6118b702009-06-30 09:34:12 +02003487 gfp_mask | __GFP_ZERO,
Christoph Lameter94f60302007-07-17 04:03:29 -07003488 cfqd->queue->node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489 spin_lock_irq(cfqd->queue->queue_lock);
Jens Axboe6118b702009-06-30 09:34:12 +02003490 if (new_cfqq)
3491 goto retry;
Jens Axboe22e2c502005-06-27 10:55:12 +02003492 } else {
Christoph Lameter94f60302007-07-17 04:03:29 -07003493 cfqq = kmem_cache_alloc_node(cfq_pool,
3494 gfp_mask | __GFP_ZERO,
3495 cfqd->queue->node);
Kiyoshi Ueda db3b5842005-06-17 16:15:10 +02003496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497
Jens Axboe6118b702009-06-30 09:34:12 +02003498 if (cfqq) {
3499 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
Tejun Heoabede6d2012-03-19 15:10:57 -07003500 cfq_init_prio_data(cfqq, cic);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003501 cfq_link_cfqq_cfqg(cfqq, cfqg);
Jens Axboe6118b702009-06-30 09:34:12 +02003502 cfq_log_cfqq(cfqd, cfqq, "alloced");
3503 } else
3504 cfqq = &cfqd->oom_cfqq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 }
3506
3507 if (new_cfqq)
3508 kmem_cache_free(cfq_pool, new_cfqq);
3509
Tejun Heo2a7f1242012-03-05 13:15:01 -08003510 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511 return cfqq;
3512}
3513
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003514static struct cfq_queue **
3515cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3516{
Jens Axboefe094d92008-01-31 13:08:54 +01003517 switch (ioprio_class) {
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003518 case IOPRIO_CLASS_RT:
3519 return &cfqd->async_cfqq[0][ioprio];
Tejun Heo598971b2012-03-19 15:10:58 -07003520 case IOPRIO_CLASS_NONE:
3521 ioprio = IOPRIO_NORM;
3522 /* fall through */
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003523 case IOPRIO_CLASS_BE:
3524 return &cfqd->async_cfqq[1][ioprio];
3525 case IOPRIO_CLASS_IDLE:
3526 return &cfqd->async_idle_cfqq;
3527 default:
3528 BUG();
3529 }
3530}
3531
Jens Axboe15c31be2007-07-10 13:43:25 +02003532static struct cfq_queue *
Tejun Heoabede6d2012-03-19 15:10:57 -07003533cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
Tejun Heo4f85cb92012-03-05 13:15:28 -08003534 struct bio *bio, gfp_t gfp_mask)
Jens Axboe15c31be2007-07-10 13:43:25 +02003535{
Tejun Heo598971b2012-03-19 15:10:58 -07003536 const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3537 const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003538 struct cfq_queue **async_cfqq = NULL;
Jens Axboe15c31be2007-07-10 13:43:25 +02003539 struct cfq_queue *cfqq = NULL;
3540
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003541 if (!is_sync) {
3542 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3543 cfqq = *async_cfqq;
3544 }
3545
Jens Axboe6118b702009-06-30 09:34:12 +02003546 if (!cfqq)
Tejun Heoabede6d2012-03-19 15:10:57 -07003547 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
Jens Axboe15c31be2007-07-10 13:43:25 +02003548
3549 /*
3550 * pin the queue now that it's allocated, scheduler exit will prune it
3551 */
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003552 if (!is_sync && !(*async_cfqq)) {
Shaohua Li30d7b942011-01-07 08:46:59 +01003553 cfqq->ref++;
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003554 *async_cfqq = cfqq;
Jens Axboe15c31be2007-07-10 13:43:25 +02003555 }
3556
Shaohua Li30d7b942011-01-07 08:46:59 +01003557 cfqq->ref++;
Jens Axboe15c31be2007-07-10 13:43:25 +02003558 return cfqq;
3559}
3560
Jens Axboe22e2c502005-06-27 10:55:12 +02003561static void
Shaohua Li383cd722011-07-12 14:24:35 +02003562__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
Jens Axboe22e2c502005-06-27 10:55:12 +02003563{
Shaohua Li383cd722011-07-12 14:24:35 +02003564 unsigned long elapsed = jiffies - ttime->last_end_request;
3565 elapsed = min(elapsed, 2UL * slice_idle);
Jens Axboe22e2c502005-06-27 10:55:12 +02003566
Shaohua Li383cd722011-07-12 14:24:35 +02003567 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3568 ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3569 ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3570}
3571
3572static void
3573cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Tejun Heoc5869802011-12-14 00:33:41 +01003574 struct cfq_io_cq *cic)
Shaohua Li383cd722011-07-12 14:24:35 +02003575{
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02003576 if (cfq_cfqq_sync(cfqq)) {
Shaohua Li383cd722011-07-12 14:24:35 +02003577 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02003578 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3579 cfqd->cfq_slice_idle);
3580 }
Shaohua Li7700fc42011-07-12 14:24:56 +02003581#ifdef CONFIG_CFQ_GROUP_IOSCHED
3582 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3583#endif
Jens Axboe22e2c502005-06-27 10:55:12 +02003584}
3585
Jens Axboe206dc692006-03-28 13:03:44 +02003586static void
Jeff Moyerb2c18e12009-10-23 17:14:49 -04003587cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Jens Axboe6d048f52007-04-25 12:44:27 +02003588 struct request *rq)
Jens Axboe206dc692006-03-28 13:03:44 +02003589{
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +01003590 sector_t sdist = 0;
Corrado Zoccolo41647e72010-02-27 19:45:40 +01003591 sector_t n_sec = blk_rq_sectors(rq);
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +01003592 if (cfqq->last_request_pos) {
3593 if (cfqq->last_request_pos < blk_rq_pos(rq))
3594 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3595 else
3596 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3597 }
Jens Axboe206dc692006-03-28 13:03:44 +02003598
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +01003599 cfqq->seek_history <<= 1;
Corrado Zoccolo41647e72010-02-27 19:45:40 +01003600 if (blk_queue_nonrot(cfqd->queue))
3601 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3602 else
3603 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
Jens Axboe206dc692006-03-28 13:03:44 +02003604}
Jens Axboe22e2c502005-06-27 10:55:12 +02003605
3606/*
3607 * Disable idle window if the process thinks too long or seeks so much that
3608 * it doesn't matter
3609 */
3610static void
3611cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Tejun Heoc5869802011-12-14 00:33:41 +01003612 struct cfq_io_cq *cic)
Jens Axboe22e2c502005-06-27 10:55:12 +02003613{
Jens Axboe7b679132008-05-30 12:23:07 +02003614 int old_idle, enable_idle;
Jens Axboe1be92f22007-04-19 14:32:26 +02003615
Jens Axboe08717142008-01-28 11:38:15 +01003616 /*
3617 * Don't idle for async or idle io prio class
3618 */
3619 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
Jens Axboe1be92f22007-04-19 14:32:26 +02003620 return;
3621
Jens Axboec265a7f2008-06-26 13:49:33 +02003622 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003623
Corrado Zoccolo76280af2009-11-26 10:02:58 +01003624 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3625 cfq_mark_cfqq_deep(cfqq);
3626
Corrado Zoccolo749ef9f2010-09-20 15:24:50 +02003627 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3628 enable_idle = 0;
Tejun Heof6e8d012012-03-05 13:15:26 -08003629 else if (!atomic_read(&cic->icq.ioc->active_ref) ||
Tejun Heoc5869802011-12-14 00:33:41 +01003630 !cfqd->cfq_slice_idle ||
3631 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
Jens Axboe22e2c502005-06-27 10:55:12 +02003632 enable_idle = 0;
Shaohua Li383cd722011-07-12 14:24:35 +02003633 else if (sample_valid(cic->ttime.ttime_samples)) {
3634 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
Jens Axboe22e2c502005-06-27 10:55:12 +02003635 enable_idle = 0;
3636 else
3637 enable_idle = 1;
3638 }
3639
Jens Axboe7b679132008-05-30 12:23:07 +02003640 if (old_idle != enable_idle) {
3641 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3642 if (enable_idle)
3643 cfq_mark_cfqq_idle_window(cfqq);
3644 else
3645 cfq_clear_cfqq_idle_window(cfqq);
3646 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003647}
3648
Jens Axboe22e2c502005-06-27 10:55:12 +02003649/*
3650 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3651 * no or if we aren't sure, a 1 will cause a preempt.
3652 */
Jens Axboea6151c32009-10-07 20:02:57 +02003653static bool
Jens Axboe22e2c502005-06-27 10:55:12 +02003654cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
Jens Axboe5e705372006-07-13 12:39:25 +02003655 struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02003656{
Jens Axboe6d048f52007-04-25 12:44:27 +02003657 struct cfq_queue *cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02003658
Jens Axboe6d048f52007-04-25 12:44:27 +02003659 cfqq = cfqd->active_queue;
3660 if (!cfqq)
Jens Axboea6151c32009-10-07 20:02:57 +02003661 return false;
Jens Axboe22e2c502005-06-27 10:55:12 +02003662
Jens Axboe6d048f52007-04-25 12:44:27 +02003663 if (cfq_class_idle(new_cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003664 return false;
Jens Axboe22e2c502005-06-27 10:55:12 +02003665
3666 if (cfq_class_idle(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003667 return true;
Jens Axboe1e3335d2007-02-14 19:59:49 +01003668
Jens Axboe22e2c502005-06-27 10:55:12 +02003669 /*
Divyesh Shah875feb62010-01-06 18:58:20 -08003670 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3671 */
3672 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3673 return false;
3674
3675 /*
Jens Axboe374f84a2006-07-23 01:42:19 +02003676 * if the new request is sync, but the currently running queue is
3677 * not, let the sync request have priority.
3678 */
Jens Axboe5e705372006-07-13 12:39:25 +02003679 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003680 return true;
Jens Axboe1e3335d2007-02-14 19:59:49 +01003681
Vivek Goyal8682e1f2009-12-03 12:59:50 -05003682 if (new_cfqq->cfqg != cfqq->cfqg)
3683 return false;
3684
3685 if (cfq_slice_used(cfqq))
3686 return true;
3687
3688 /* Allow preemption only if we are idling on sync-noidle tree */
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003689 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
Vivek Goyal8682e1f2009-12-03 12:59:50 -05003690 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3691 new_cfqq->service_tree->count == 2 &&
3692 RB_EMPTY_ROOT(&cfqq->sort_list))
3693 return true;
3694
Jens Axboe374f84a2006-07-23 01:42:19 +02003695 /*
Jens Axboeb53d1ed2011-08-19 08:34:48 +02003696 * So both queues are sync. Let the new request get disk time if
3697 * it's a metadata request and the current queue is doing regular IO.
3698 */
Christoph Hellwig65299a32011-08-23 14:50:29 +02003699 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
Jens Axboeb53d1ed2011-08-19 08:34:48 +02003700 return true;
3701
3702 /*
Divyesh Shah3a9a3f62009-01-30 12:46:41 +01003703 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3704 */
3705 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003706 return true;
Divyesh Shah3a9a3f62009-01-30 12:46:41 +01003707
Shaohua Lid2d59e12010-11-08 15:01:03 +01003708 /* An idle queue should not be idle now for some reason */
3709 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3710 return true;
3711
Jens Axboe1e3335d2007-02-14 19:59:49 +01003712 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003713 return false;
Jens Axboe1e3335d2007-02-14 19:59:49 +01003714
3715 /*
3716 * if this request is as-good as one we would expect from the
3717 * current cfqq, let it preempt
3718 */
Shaohua Lie9ce3352010-03-19 08:03:04 +01003719 if (cfq_rq_close(cfqd, cfqq, rq))
Jens Axboea6151c32009-10-07 20:02:57 +02003720 return true;
Jens Axboe1e3335d2007-02-14 19:59:49 +01003721
Jens Axboea6151c32009-10-07 20:02:57 +02003722 return false;
Jens Axboe22e2c502005-06-27 10:55:12 +02003723}
3724
3725/*
3726 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3727 * let it have half of its nominal slice.
3728 */
3729static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3730{
Shaohua Lidf0793a2012-01-19 09:20:09 +01003731 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3732
Jens Axboe7b679132008-05-30 12:23:07 +02003733 cfq_log_cfqq(cfqd, cfqq, "preempt");
Shaohua Lidf0793a2012-01-19 09:20:09 +01003734 cfq_slice_expired(cfqd, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02003735
Jens Axboebf572252006-07-19 20:29:12 +02003736 /*
Shaohua Lif8ae6e32011-01-14 08:41:02 +01003737 * workload type is changed, don't save slice, otherwise preempt
3738 * doesn't happen
3739 */
Shaohua Lidf0793a2012-01-19 09:20:09 +01003740 if (old_type != cfqq_type(cfqq))
Vivek Goyal4d2ceea2012-10-03 16:56:57 -04003741 cfqq->cfqg->saved_wl_slice = 0;
Shaohua Lif8ae6e32011-01-14 08:41:02 +01003742
3743 /*
Jens Axboebf572252006-07-19 20:29:12 +02003744 * Put the new queue at the front of the of the current list,
3745 * so we know that it will be selected next.
3746 */
3747 BUG_ON(!cfq_cfqq_on_rr(cfqq));
Jens Axboeedd75ff2007-04-19 12:03:34 +02003748
3749 cfq_service_tree_add(cfqd, cfqq, 1);
Justin TerAvesteda5e0c2011-03-22 21:26:49 +01003750
Justin TerAvest62a37f62011-03-23 08:25:44 +01003751 cfqq->slice_end = 0;
3752 cfq_mark_cfqq_slice_new(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003753}
3754
3755/*
Jens Axboe5e705372006-07-13 12:39:25 +02003756 * Called when a new fs request (rq) is added (to cfqq). Check if there's
Jens Axboe22e2c502005-06-27 10:55:12 +02003757 * something we should do about it
3758 */
3759static void
Jens Axboe5e705372006-07-13 12:39:25 +02003760cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3761 struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02003762{
Tejun Heoc5869802011-12-14 00:33:41 +01003763 struct cfq_io_cq *cic = RQ_CIC(rq);
Jens Axboe12e9fdd2006-06-01 10:09:56 +02003764
Aaron Carroll45333d52008-08-26 15:52:36 +02003765 cfqd->rq_queued++;
Christoph Hellwig65299a32011-08-23 14:50:29 +02003766 if (rq->cmd_flags & REQ_PRIO)
3767 cfqq->prio_pending++;
Jens Axboe374f84a2006-07-23 01:42:19 +02003768
Shaohua Li383cd722011-07-12 14:24:35 +02003769 cfq_update_io_thinktime(cfqd, cfqq, cic);
Jeff Moyerb2c18e12009-10-23 17:14:49 -04003770 cfq_update_io_seektime(cfqd, cfqq, rq);
Jens Axboe9c2c38a2005-08-24 14:57:54 +02003771 cfq_update_idle_window(cfqd, cfqq, cic);
3772
Jeff Moyerb2c18e12009-10-23 17:14:49 -04003773 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003774
3775 if (cfqq == cfqd->active_queue) {
3776 /*
Jens Axboeb0291952009-04-07 11:38:31 +02003777 * Remember that we saw a request from this process, but
3778 * don't start queuing just yet. Otherwise we risk seeing lots
3779 * of tiny requests, because we disrupt the normal plugging
Jens Axboed6ceb252009-04-14 14:18:16 +02003780 * and merging. If the request is already larger than a single
3781 * page, let it rip immediately. For that case we assume that
Jens Axboe2d870722009-04-15 12:12:46 +02003782 * merging is already done. Ditto for a busy system that
3783 * has other work pending, don't risk delaying until the
3784 * idle timer unplug to continue working.
Jens Axboe22e2c502005-06-27 10:55:12 +02003785 */
Jens Axboed6ceb252009-04-14 14:18:16 +02003786 if (cfq_cfqq_wait_request(cfqq)) {
Jens Axboe2d870722009-04-15 12:12:46 +02003787 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3788 cfqd->busy_queues > 1) {
Divyesh Shah812df482010-04-08 21:15:35 -07003789 cfq_del_timer(cfqd, cfqq);
Gui Jianfeng554554f2009-12-10 09:38:39 +01003790 cfq_clear_cfqq_wait_request(cfqq);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02003791 __blk_run_queue(cfqd->queue);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +02003792 } else {
Tejun Heo155fead2012-04-01 14:38:44 -07003793 cfqg_stats_update_idle_time(cfqq->cfqg);
Vivek Goyalbf7919372009-12-03 12:59:37 -05003794 cfq_mark_cfqq_must_dispatch(cfqq);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +02003795 }
Jens Axboed6ceb252009-04-14 14:18:16 +02003796 }
Jens Axboe5e705372006-07-13 12:39:25 +02003797 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
Jens Axboe22e2c502005-06-27 10:55:12 +02003798 /*
3799 * not the active queue - expire current slice if it is
3800 * idle and has expired it's mean thinktime or this new queue
Divyesh Shah3a9a3f62009-01-30 12:46:41 +01003801 * has some old slice time left and is of higher priority or
3802 * this new queue is RT and the current one is BE
Jens Axboe22e2c502005-06-27 10:55:12 +02003803 */
3804 cfq_preempt_queue(cfqd, cfqq);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02003805 __blk_run_queue(cfqd->queue);
Jens Axboe22e2c502005-06-27 10:55:12 +02003806 }
3807}
3808
Jens Axboe165125e2007-07-24 09:28:11 +02003809static void cfq_insert_request(struct request_queue *q, struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02003810{
Jens Axboeb4878f22005-10-20 16:42:29 +02003811 struct cfq_data *cfqd = q->elevator->elevator_data;
Jens Axboe5e705372006-07-13 12:39:25 +02003812 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003813
Jens Axboe7b679132008-05-30 12:23:07 +02003814 cfq_log_cfqq(cfqd, cfqq, "insert_request");
Tejun Heoabede6d2012-03-19 15:10:57 -07003815 cfq_init_prio_data(cfqq, RQ_CIC(rq));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816
Jens Axboe30996f42009-10-05 11:03:39 +02003817 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
Jens Axboe22e2c502005-06-27 10:55:12 +02003818 list_add_tail(&rq->queuelist, &cfqq->fifo);
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01003819 cfq_add_rq_rb(rq);
Tejun Heo155fead2012-04-01 14:38:44 -07003820 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
3821 rq->cmd_flags);
Jens Axboe5e705372006-07-13 12:39:25 +02003822 cfq_rq_enqueued(cfqd, cfqq, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823}
3824
Aaron Carroll45333d52008-08-26 15:52:36 +02003825/*
3826 * Update hw_tag based on peak queue depth over 50 samples under
3827 * sufficient load.
3828 */
3829static void cfq_update_hw_tag(struct cfq_data *cfqd)
3830{
Shaohua Li1a1238a2009-10-27 08:46:23 +01003831 struct cfq_queue *cfqq = cfqd->active_queue;
3832
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003833 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3834 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
Corrado Zoccoloe459dd02009-11-26 10:02:57 +01003835
3836 if (cfqd->hw_tag == 1)
3837 return;
Aaron Carroll45333d52008-08-26 15:52:36 +02003838
3839 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003840 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
Aaron Carroll45333d52008-08-26 15:52:36 +02003841 return;
3842
Shaohua Li1a1238a2009-10-27 08:46:23 +01003843 /*
3844 * If active queue hasn't enough requests and can idle, cfq might not
3845 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3846 * case
3847 */
3848 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3849 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003850 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
Shaohua Li1a1238a2009-10-27 08:46:23 +01003851 return;
3852
Aaron Carroll45333d52008-08-26 15:52:36 +02003853 if (cfqd->hw_tag_samples++ < 50)
3854 return;
3855
Corrado Zoccoloe459dd02009-11-26 10:02:57 +01003856 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
Aaron Carroll45333d52008-08-26 15:52:36 +02003857 cfqd->hw_tag = 1;
3858 else
3859 cfqd->hw_tag = 0;
Aaron Carroll45333d52008-08-26 15:52:36 +02003860}
3861
Vivek Goyal7667aa02009-12-08 17:52:58 -05003862static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3863{
Tejun Heoc5869802011-12-14 00:33:41 +01003864 struct cfq_io_cq *cic = cfqd->active_cic;
Vivek Goyal7667aa02009-12-08 17:52:58 -05003865
Justin TerAvest02a8f012011-02-09 14:20:03 +01003866 /* If the queue already has requests, don't wait */
3867 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3868 return false;
3869
Vivek Goyal7667aa02009-12-08 17:52:58 -05003870 /* If there are other queues in the group, don't wait */
3871 if (cfqq->cfqg->nr_cfqq > 1)
3872 return false;
3873
Shaohua Li7700fc42011-07-12 14:24:56 +02003874 /* the only queue in the group, but think time is big */
3875 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3876 return false;
3877
Vivek Goyal7667aa02009-12-08 17:52:58 -05003878 if (cfq_slice_used(cfqq))
3879 return true;
3880
3881 /* if slice left is less than think time, wait busy */
Shaohua Li383cd722011-07-12 14:24:35 +02003882 if (cic && sample_valid(cic->ttime.ttime_samples)
3883 && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
Vivek Goyal7667aa02009-12-08 17:52:58 -05003884 return true;
3885
3886 /*
3887 * If think times is less than a jiffy than ttime_mean=0 and above
3888 * will not be true. It might happen that slice has not expired yet
3889 * but will expire soon (4-5 ns) during select_queue(). To cover the
3890 * case where think time is less than a jiffy, mark the queue wait
3891 * busy if only 1 jiffy is left in the slice.
3892 */
3893 if (cfqq->slice_end - jiffies == 1)
3894 return true;
3895
3896 return false;
3897}
3898
Jens Axboe165125e2007-07-24 09:28:11 +02003899static void cfq_completed_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900{
Jens Axboe5e705372006-07-13 12:39:25 +02003901 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +02003902 struct cfq_data *cfqd = cfqq->cfqd;
Jens Axboe5380a102006-07-13 12:37:56 +02003903 const int sync = rq_is_sync(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +02003904 unsigned long now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905
Jens Axboeb4878f22005-10-20 16:42:29 +02003906 now = jiffies;
Christoph Hellwig33659eb2010-08-07 18:17:56 +02003907 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3908 !!(rq->cmd_flags & REQ_NOIDLE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909
Aaron Carroll45333d52008-08-26 15:52:36 +02003910 cfq_update_hw_tag(cfqd);
3911
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003912 WARN_ON(!cfqd->rq_in_driver);
Jens Axboe6d048f52007-04-25 12:44:27 +02003913 WARN_ON(!cfqq->dispatched);
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003914 cfqd->rq_in_driver--;
Jens Axboe6d048f52007-04-25 12:44:27 +02003915 cfqq->dispatched--;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02003916 (RQ_CFQG(rq))->dispatched--;
Tejun Heo155fead2012-04-01 14:38:44 -07003917 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
3918 rq_io_start_time_ns(rq), rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003919
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003920 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
Jens Axboe3ed9a292007-04-23 08:33:33 +02003921
Vivek Goyal365722b2009-10-03 15:21:27 +02003922 if (sync) {
Vivek Goyal34b98d02012-10-03 16:56:58 -04003923 struct cfq_rb_root *st;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02003924
Shaohua Li383cd722011-07-12 14:24:35 +02003925 RQ_CIC(rq)->ttime.last_end_request = now;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02003926
3927 if (cfq_cfqq_on_rr(cfqq))
Vivek Goyal34b98d02012-10-03 16:56:58 -04003928 st = cfqq->service_tree;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02003929 else
Vivek Goyal34b98d02012-10-03 16:56:58 -04003930 st = st_for(cfqq->cfqg, cfqq_class(cfqq),
3931 cfqq_type(cfqq));
3932
3933 st->ttime.last_end_request = now;
Corrado Zoccolo573412b2009-12-06 11:48:52 +01003934 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3935 cfqd->last_delayed_sync = now;
Vivek Goyal365722b2009-10-03 15:21:27 +02003936 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02003937
Shaohua Li7700fc42011-07-12 14:24:56 +02003938#ifdef CONFIG_CFQ_GROUP_IOSCHED
3939 cfqq->cfqg->ttime.last_end_request = now;
3940#endif
3941
Jens Axboecaaa5f92006-06-16 11:23:00 +02003942 /*
3943 * If this is the active queue, check if it needs to be expired,
3944 * or if we want to idle in case it has no pending requests.
3945 */
3946 if (cfqd->active_queue == cfqq) {
Jens Axboea36e71f2009-04-15 12:15:11 +02003947 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3948
Jens Axboe44f7c162007-01-19 11:51:58 +11003949 if (cfq_cfqq_slice_new(cfqq)) {
3950 cfq_set_prio_slice(cfqd, cfqq);
3951 cfq_clear_cfqq_slice_new(cfqq);
3952 }
Vivek Goyalf75edf22009-12-03 12:59:53 -05003953
3954 /*
Vivek Goyal7667aa02009-12-08 17:52:58 -05003955 * Should we wait for next request to come in before we expire
3956 * the queue.
Vivek Goyalf75edf22009-12-03 12:59:53 -05003957 */
Vivek Goyal7667aa02009-12-08 17:52:58 -05003958 if (cfq_should_wait_busy(cfqd, cfqq)) {
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02003959 unsigned long extend_sl = cfqd->cfq_slice_idle;
3960 if (!cfqd->cfq_slice_idle)
3961 extend_sl = cfqd->cfq_group_idle;
3962 cfqq->slice_end = jiffies + extend_sl;
Vivek Goyalf75edf22009-12-03 12:59:53 -05003963 cfq_mark_cfqq_wait_busy(cfqq);
Divyesh Shahb1ffe732010-03-25 15:45:03 +01003964 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
Vivek Goyalf75edf22009-12-03 12:59:53 -05003965 }
3966
Jens Axboea36e71f2009-04-15 12:15:11 +02003967 /*
Corrado Zoccolo8e550632009-11-26 10:02:58 +01003968 * Idling is not enabled on:
3969 * - expired queues
3970 * - idle-priority queues
3971 * - async queues
3972 * - queues with still some requests queued
3973 * - when there is a close cooperator
Jens Axboea36e71f2009-04-15 12:15:11 +02003974 */
Jens Axboe08717142008-01-28 11:38:15 +01003975 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
Vivek Goyale5ff0822010-04-26 19:25:11 +02003976 cfq_slice_expired(cfqd, 1);
Corrado Zoccolo8e550632009-11-26 10:02:58 +01003977 else if (sync && cfqq_empty &&
3978 !cfq_close_cooperator(cfqd, cfqq)) {
Corrado Zoccolo749ef9f2010-09-20 15:24:50 +02003979 cfq_arm_slice_timer(cfqd);
Corrado Zoccolo8e550632009-11-26 10:02:58 +01003980 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02003981 }
Jens Axboe6d048f52007-04-25 12:44:27 +02003982
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003983 if (!cfqd->rq_in_driver)
Jens Axboe23e018a2009-10-05 08:52:35 +02003984 cfq_schedule_dispatch(cfqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985}
3986
Jens Axboe89850f72006-07-22 16:48:31 +02003987static inline int __cfq_may_queue(struct cfq_queue *cfqq)
Jens Axboe22e2c502005-06-27 10:55:12 +02003988{
Jens Axboe1b379d82009-08-11 08:26:11 +02003989 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
Jens Axboe3b181522005-06-27 10:56:24 +02003990 cfq_mark_cfqq_must_alloc_slice(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003991 return ELV_MQUEUE_MUST;
Jens Axboe3b181522005-06-27 10:56:24 +02003992 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003993
3994 return ELV_MQUEUE_MAY;
Jens Axboe22e2c502005-06-27 10:55:12 +02003995}
3996
Jens Axboe165125e2007-07-24 09:28:11 +02003997static int cfq_may_queue(struct request_queue *q, int rw)
Jens Axboe22e2c502005-06-27 10:55:12 +02003998{
3999 struct cfq_data *cfqd = q->elevator->elevator_data;
4000 struct task_struct *tsk = current;
Tejun Heoc5869802011-12-14 00:33:41 +01004001 struct cfq_io_cq *cic;
Jens Axboe22e2c502005-06-27 10:55:12 +02004002 struct cfq_queue *cfqq;
4003
4004 /*
4005 * don't force setup of a queue from here, as a call to may_queue
4006 * does not necessarily imply that a request actually will be queued.
4007 * so just lookup a possibly existing queue, or return 'may queue'
4008 * if that fails
4009 */
Jens Axboe4ac845a2008-01-24 08:44:49 +01004010 cic = cfq_cic_lookup(cfqd, tsk->io_context);
Vasily Tarasov91fac312007-04-25 12:29:51 +02004011 if (!cic)
4012 return ELV_MQUEUE_MAY;
4013
Jens Axboeb0b78f82009-04-08 10:56:08 +02004014 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
Jens Axboe22e2c502005-06-27 10:55:12 +02004015 if (cfqq) {
Tejun Heoabede6d2012-03-19 15:10:57 -07004016 cfq_init_prio_data(cfqq, cic);
Jens Axboe22e2c502005-06-27 10:55:12 +02004017
Jens Axboe89850f72006-07-22 16:48:31 +02004018 return __cfq_may_queue(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004019 }
4020
4021 return ELV_MQUEUE_MAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022}
4023
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024/*
4025 * queue lock held here
4026 */
Jens Axboebb37b942006-12-01 10:42:33 +01004027static void cfq_put_request(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028{
Jens Axboe5e705372006-07-13 12:39:25 +02004029 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004030
Jens Axboe5e705372006-07-13 12:39:25 +02004031 if (cfqq) {
Jens Axboe22e2c502005-06-27 10:55:12 +02004032 const int rw = rq_data_dir(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033
Jens Axboe22e2c502005-06-27 10:55:12 +02004034 BUG_ON(!cfqq->allocated[rw]);
4035 cfqq->allocated[rw]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +02004037 /* Put down rq reference on cfqg */
Tejun Heoeb7d8c072012-03-23 14:02:53 +01004038 cfqg_put(RQ_CFQG(rq));
Tejun Heoa612fdd2011-12-14 00:33:41 +01004039 rq->elv.priv[0] = NULL;
4040 rq->elv.priv[1] = NULL;
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +02004041
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042 cfq_put_queue(cfqq);
4043 }
4044}
4045
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004046static struct cfq_queue *
Tejun Heoc5869802011-12-14 00:33:41 +01004047cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004048 struct cfq_queue *cfqq)
4049{
4050 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4051 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
Jeff Moyerb3b6d042009-10-23 17:14:51 -04004052 cfq_mark_cfqq_coop(cfqq->new_cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004053 cfq_put_queue(cfqq);
4054 return cic_to_cfqq(cic, 1);
4055}
4056
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004057/*
4058 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4059 * was the last process referring to said cfqq.
4060 */
4061static struct cfq_queue *
Tejun Heoc5869802011-12-14 00:33:41 +01004062split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004063{
4064 if (cfqq_process_refs(cfqq) == 1) {
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004065 cfqq->pid = current->pid;
4066 cfq_clear_cfqq_coop(cfqq);
Shaohua Liae54abe2010-02-05 13:11:45 +01004067 cfq_clear_cfqq_split_coop(cfqq);
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004068 return cfqq;
4069 }
4070
4071 cic_set_cfqq(cic, NULL, 1);
Shaohua Lid02a2c02010-05-25 10:16:53 +02004072
4073 cfq_put_cooperator(cfqq);
4074
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004075 cfq_put_queue(cfqq);
4076 return NULL;
4077}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078/*
Jens Axboe22e2c502005-06-27 10:55:12 +02004079 * Allocate cfq data structures associated with this request.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080 */
Jens Axboe22e2c502005-06-27 10:55:12 +02004081static int
Tejun Heo852c7882012-03-05 13:15:27 -08004082cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4083 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084{
4085 struct cfq_data *cfqd = q->elevator->elevator_data;
Tejun Heof1f8cc92011-12-14 00:33:42 +01004086 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087 const int rw = rq_data_dir(rq);
Jens Axboea6151c32009-10-07 20:02:57 +02004088 const bool is_sync = rq_is_sync(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004089 struct cfq_queue *cfqq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090
4091 might_sleep_if(gfp_mask & __GFP_WAIT);
4092
Tejun Heo216284c2011-12-14 00:33:38 +01004093 spin_lock_irq(q->queue_lock);
Tejun Heof1f8cc92011-12-14 00:33:42 +01004094
Tejun Heo598971b2012-03-19 15:10:58 -07004095 check_ioprio_changed(cic, bio);
4096 check_blkcg_changed(cic, bio);
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004097new_queue:
Vasily Tarasov91fac312007-04-25 12:29:51 +02004098 cfqq = cic_to_cfqq(cic, is_sync);
Vivek Goyal32f2e802009-07-09 22:13:16 +02004099 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
Tejun Heoabede6d2012-03-19 15:10:57 -07004100 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
Vasily Tarasov91fac312007-04-25 12:29:51 +02004101 cic_set_cfqq(cic, cfqq, is_sync);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004102 } else {
4103 /*
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004104 * If the queue was seeky for too long, break it apart.
4105 */
Shaohua Liae54abe2010-02-05 13:11:45 +01004106 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
Jeff Moyere6c5bc72009-10-23 17:14:52 -04004107 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4108 cfqq = split_cfqq(cic, cfqq);
4109 if (!cfqq)
4110 goto new_queue;
4111 }
4112
4113 /*
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04004114 * Check to see if this queue is scheduled to merge with
4115 * another, closely cooperating queue. The merging of
4116 * queues happens here as it must be done in process context.
4117 * The reference on new_cfqq was taken in merge_cfqqs.
4118 */
4119 if (cfqq->new_cfqq)
4120 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
Vasily Tarasov91fac312007-04-25 12:29:51 +02004121 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122
4123 cfqq->allocated[rw]++;
Jens Axboe5e705372006-07-13 12:39:25 +02004124
Jens Axboe6fae9c22011-03-01 15:04:39 -05004125 cfqq->ref++;
Tejun Heoeb7d8c072012-03-23 14:02:53 +01004126 cfqg_get(cfqq->cfqg);
Tejun Heoa612fdd2011-12-14 00:33:41 +01004127 rq->elv.priv[0] = cfqq;
Tejun Heo1adaf3d2012-03-05 13:15:15 -08004128 rq->elv.priv[1] = cfqq->cfqg;
Tejun Heo216284c2011-12-14 00:33:38 +01004129 spin_unlock_irq(q->queue_lock);
Jens Axboe5e705372006-07-13 12:39:25 +02004130 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131}
4132
David Howells65f27f32006-11-22 14:55:48 +00004133static void cfq_kick_queue(struct work_struct *work)
Jens Axboe22e2c502005-06-27 10:55:12 +02004134{
David Howells65f27f32006-11-22 14:55:48 +00004135 struct cfq_data *cfqd =
Jens Axboe23e018a2009-10-05 08:52:35 +02004136 container_of(work, struct cfq_data, unplug_work);
Jens Axboe165125e2007-07-24 09:28:11 +02004137 struct request_queue *q = cfqd->queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02004138
Jens Axboe40bb54d2009-04-15 12:11:10 +02004139 spin_lock_irq(q->queue_lock);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02004140 __blk_run_queue(cfqd->queue);
Jens Axboe40bb54d2009-04-15 12:11:10 +02004141 spin_unlock_irq(q->queue_lock);
Jens Axboe22e2c502005-06-27 10:55:12 +02004142}
4143
4144/*
4145 * Timer running if the active_queue is currently idling inside its time slice
4146 */
4147static void cfq_idle_slice_timer(unsigned long data)
4148{
4149 struct cfq_data *cfqd = (struct cfq_data *) data;
4150 struct cfq_queue *cfqq;
4151 unsigned long flags;
Jens Axboe3c6bd2f2007-01-19 12:06:33 +11004152 int timed_out = 1;
Jens Axboe22e2c502005-06-27 10:55:12 +02004153
Jens Axboe7b679132008-05-30 12:23:07 +02004154 cfq_log(cfqd, "idle timer fired");
4155
Jens Axboe22e2c502005-06-27 10:55:12 +02004156 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4157
Jens Axboefe094d92008-01-31 13:08:54 +01004158 cfqq = cfqd->active_queue;
4159 if (cfqq) {
Jens Axboe3c6bd2f2007-01-19 12:06:33 +11004160 timed_out = 0;
4161
Jens Axboe22e2c502005-06-27 10:55:12 +02004162 /*
Jens Axboeb0291952009-04-07 11:38:31 +02004163 * We saw a request before the queue expired, let it through
4164 */
4165 if (cfq_cfqq_must_dispatch(cfqq))
4166 goto out_kick;
4167
4168 /*
Jens Axboe22e2c502005-06-27 10:55:12 +02004169 * expired
4170 */
Jens Axboe44f7c162007-01-19 11:51:58 +11004171 if (cfq_slice_used(cfqq))
Jens Axboe22e2c502005-06-27 10:55:12 +02004172 goto expire;
4173
4174 /*
4175 * only expire and reinvoke request handler, if there are
4176 * other queues with pending requests
4177 */
Jens Axboecaaa5f92006-06-16 11:23:00 +02004178 if (!cfqd->busy_queues)
Jens Axboe22e2c502005-06-27 10:55:12 +02004179 goto out_cont;
Jens Axboe22e2c502005-06-27 10:55:12 +02004180
4181 /*
4182 * not expired and it has a request pending, let it dispatch
4183 */
Jens Axboe75e50982009-04-07 08:56:14 +02004184 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
Jens Axboe22e2c502005-06-27 10:55:12 +02004185 goto out_kick;
Corrado Zoccolo76280af2009-11-26 10:02:58 +01004186
4187 /*
4188 * Queue depth flag is reset only when the idle didn't succeed
4189 */
4190 cfq_clear_cfqq_deep(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02004191 }
4192expire:
Vivek Goyale5ff0822010-04-26 19:25:11 +02004193 cfq_slice_expired(cfqd, timed_out);
Jens Axboe22e2c502005-06-27 10:55:12 +02004194out_kick:
Jens Axboe23e018a2009-10-05 08:52:35 +02004195 cfq_schedule_dispatch(cfqd);
Jens Axboe22e2c502005-06-27 10:55:12 +02004196out_cont:
4197 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4198}
4199
Jens Axboe3b181522005-06-27 10:56:24 +02004200static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4201{
4202 del_timer_sync(&cfqd->idle_slice_timer);
Jens Axboe23e018a2009-10-05 08:52:35 +02004203 cancel_work_sync(&cfqd->unplug_work);
Jens Axboe3b181522005-06-27 10:56:24 +02004204}
Jens Axboe22e2c502005-06-27 10:55:12 +02004205
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02004206static void cfq_put_async_queues(struct cfq_data *cfqd)
4207{
4208 int i;
4209
4210 for (i = 0; i < IOPRIO_BE_NR; i++) {
4211 if (cfqd->async_cfqq[0][i])
4212 cfq_put_queue(cfqd->async_cfqq[0][i]);
4213 if (cfqd->async_cfqq[1][i])
4214 cfq_put_queue(cfqd->async_cfqq[1][i]);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02004215 }
Oleg Nesterov2389d1e2007-11-05 08:58:05 +01004216
4217 if (cfqd->async_idle_cfqq)
4218 cfq_put_queue(cfqd->async_idle_cfqq);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02004219}
4220
Jens Axboeb374d182008-10-31 10:05:07 +01004221static void cfq_exit_queue(struct elevator_queue *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222{
Jens Axboe22e2c502005-06-27 10:55:12 +02004223 struct cfq_data *cfqd = e->elevator_data;
Jens Axboe165125e2007-07-24 09:28:11 +02004224 struct request_queue *q = cfqd->queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02004225
Jens Axboe3b181522005-06-27 10:56:24 +02004226 cfq_shutdown_timer_wq(cfqd);
Jens Axboee2d74ac2006-03-28 08:59:01 +02004227
Al Virod9ff4182006-03-18 13:51:22 -05004228 spin_lock_irq(q->queue_lock);
Jens Axboee2d74ac2006-03-28 08:59:01 +02004229
Al Virod9ff4182006-03-18 13:51:22 -05004230 if (cfqd->active_queue)
Vivek Goyale5ff0822010-04-26 19:25:11 +02004231 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
Jens Axboee2d74ac2006-03-28 08:59:01 +02004232
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02004233 cfq_put_async_queues(cfqd);
Tejun Heo03aa2642012-03-05 13:15:19 -08004234
4235 spin_unlock_irq(q->queue_lock);
4236
Al Viroa90d7422006-03-18 12:05:37 -05004237 cfq_shutdown_timer_wq(cfqd);
4238
Tejun Heoffea73f2012-06-04 10:02:29 +02004239#ifdef CONFIG_CFQ_GROUP_IOSCHED
4240 blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4241#else
Tejun Heof51b8022012-03-05 13:15:05 -08004242 kfree(cfqd->root_group);
Vivek Goyal2abae552011-05-23 10:02:19 +02004243#endif
Vivek Goyal56edf7d2011-05-19 15:38:22 -04004244 kfree(cfqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245}
4246
Tejun Heob2fab5a2012-03-05 13:14:57 -08004247static int cfq_init_queue(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248{
4249 struct cfq_data *cfqd;
Tejun Heo3c798392012-04-16 13:57:25 -07004250 struct blkcg_gq *blkg __maybe_unused;
Tejun Heoa2b16932012-04-13 13:11:33 -07004251 int i, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252
Christoph Lameter94f60302007-07-17 04:03:29 -07004253 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
Tejun Heoa73f7302011-12-14 00:33:37 +01004254 if (!cfqd)
Tejun Heob2fab5a2012-03-05 13:14:57 -08004255 return -ENOMEM;
Konstantin Khlebnikov80b15c72010-05-20 23:21:41 +04004256
Tejun Heof51b8022012-03-05 13:15:05 -08004257 cfqd->queue = q;
4258 q->elevator->elevator_data = cfqd;
4259
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05004260 /* Init root service tree */
4261 cfqd->grp_service_tree = CFQ_RB_ROOT;
4262
Tejun Heof51b8022012-03-05 13:15:05 -08004263 /* Init root group and prefer root group over other groups by default */
Vivek Goyal25fb5162009-12-03 12:59:46 -05004264#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004265 ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
Tejun Heoa2b16932012-04-13 13:11:33 -07004266 if (ret)
4267 goto out_free;
Vivek Goyal5624a4e2011-05-19 15:38:28 -04004268
Tejun Heoa2b16932012-04-13 13:11:33 -07004269 cfqd->root_group = blkg_to_cfqg(q->root_blkg);
Tejun Heof51b8022012-03-05 13:15:05 -08004270#else
Tejun Heoa2b16932012-04-13 13:11:33 -07004271 ret = -ENOMEM;
Tejun Heof51b8022012-03-05 13:15:05 -08004272 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4273 GFP_KERNEL, cfqd->queue->node);
Tejun Heoa2b16932012-04-13 13:11:33 -07004274 if (!cfqd->root_group)
4275 goto out_free;
Vivek Goyal5624a4e2011-05-19 15:38:28 -04004276
Tejun Heoa2b16932012-04-13 13:11:33 -07004277 cfq_init_cfqg_base(cfqd->root_group);
4278#endif
Tejun Heo3381cb82012-04-01 14:38:44 -07004279 cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
Tejun Heoe71357e2013-01-09 08:05:10 -08004280 cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
Vivek Goyal5624a4e2011-05-19 15:38:28 -04004281
Jens Axboe26a2ac02009-04-23 12:13:27 +02004282 /*
4283 * Not strictly needed (since RB_ROOT just clears the node and we
4284 * zeroed cfqd on alloc), but better be safe in case someone decides
4285 * to add magic to the rb code
4286 */
4287 for (i = 0; i < CFQ_PRIO_LISTS; i++)
4288 cfqd->prio_trees[i] = RB_ROOT;
4289
Jens Axboe6118b702009-06-30 09:34:12 +02004290 /*
4291 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
4292 * Grab a permanent reference to it, so that the normal code flow
Tejun Heof51b8022012-03-05 13:15:05 -08004293 * will not attempt to free it. oom_cfqq is linked to root_group
4294 * but shouldn't hold a reference as it'll never be unlinked. Lose
4295 * the reference from linking right away.
Jens Axboe6118b702009-06-30 09:34:12 +02004296 */
4297 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
Shaohua Li30d7b942011-01-07 08:46:59 +01004298 cfqd->oom_cfqq.ref++;
Tejun Heo1adaf3d2012-03-05 13:15:15 -08004299
4300 spin_lock_irq(q->queue_lock);
Tejun Heof51b8022012-03-05 13:15:05 -08004301 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
Tejun Heoeb7d8c072012-03-23 14:02:53 +01004302 cfqg_put(cfqd->root_group);
Tejun Heo1adaf3d2012-03-05 13:15:15 -08004303 spin_unlock_irq(q->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304
Jens Axboe22e2c502005-06-27 10:55:12 +02004305 init_timer(&cfqd->idle_slice_timer);
4306 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4307 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4308
Jens Axboe23e018a2009-10-05 08:52:35 +02004309 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
Jens Axboe22e2c502005-06-27 10:55:12 +02004310
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 cfqd->cfq_quantum = cfq_quantum;
Jens Axboe22e2c502005-06-27 10:55:12 +02004312 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4313 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 cfqd->cfq_back_max = cfq_back_max;
4315 cfqd->cfq_back_penalty = cfq_back_penalty;
Jens Axboe22e2c502005-06-27 10:55:12 +02004316 cfqd->cfq_slice[0] = cfq_slice_async;
4317 cfqd->cfq_slice[1] = cfq_slice_sync;
Tao Ma5bf14c02012-04-01 14:33:39 -07004318 cfqd->cfq_target_latency = cfq_target_latency;
Jens Axboe22e2c502005-06-27 10:55:12 +02004319 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4320 cfqd->cfq_slice_idle = cfq_slice_idle;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004321 cfqd->cfq_group_idle = cfq_group_idle;
Jens Axboe963b72f2009-10-03 19:42:18 +02004322 cfqd->cfq_latency = 1;
Corrado Zoccoloe459dd02009-11-26 10:02:57 +01004323 cfqd->hw_tag = -1;
Corrado Zoccoloedc71132009-12-09 20:56:04 +01004324 /*
4325 * we optimistically start assuming sync ops weren't delayed in last
4326 * second, in order to have larger depth for async operations.
4327 */
Corrado Zoccolo573412b2009-12-06 11:48:52 +01004328 cfqd->last_delayed_sync = jiffies - HZ;
Tejun Heob2fab5a2012-03-05 13:14:57 -08004329 return 0;
Tejun Heoa2b16932012-04-13 13:11:33 -07004330
4331out_free:
4332 kfree(cfqd);
4333 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334}
4335
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336/*
4337 * sysfs parts below -->
4338 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339static ssize_t
4340cfq_var_show(unsigned int var, char *page)
4341{
4342 return sprintf(page, "%d\n", var);
4343}
4344
4345static ssize_t
4346cfq_var_store(unsigned int *var, const char *page, size_t count)
4347{
4348 char *p = (char *) page;
4349
4350 *var = simple_strtoul(p, &p, 10);
4351 return count;
4352}
4353
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
Jens Axboeb374d182008-10-31 10:05:07 +01004355static ssize_t __FUNC(struct elevator_queue *e, char *page) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356{ \
Al Viro3d1ab402006-03-18 18:35:43 -05004357 struct cfq_data *cfqd = e->elevator_data; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 unsigned int __data = __VAR; \
4359 if (__CONV) \
4360 __data = jiffies_to_msecs(__data); \
4361 return cfq_var_show(__data, (page)); \
4362}
4363SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02004364SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4365SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
Al Viroe572ec72006-03-18 22:27:18 -05004366SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4367SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02004368SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004369SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02004370SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4371SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4372SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
Jens Axboe963b72f2009-10-03 19:42:18 +02004373SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
Tao Ma5bf14c02012-04-01 14:33:39 -07004374SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004375#undef SHOW_FUNCTION
4376
4377#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
Jens Axboeb374d182008-10-31 10:05:07 +01004378static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004379{ \
Al Viro3d1ab402006-03-18 18:35:43 -05004380 struct cfq_data *cfqd = e->elevator_data; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381 unsigned int __data; \
4382 int ret = cfq_var_store(&__data, (page), count); \
4383 if (__data < (MIN)) \
4384 __data = (MIN); \
4385 else if (__data > (MAX)) \
4386 __data = (MAX); \
4387 if (__CONV) \
4388 *(__PTR) = msecs_to_jiffies(__data); \
4389 else \
4390 *(__PTR) = __data; \
4391 return ret; \
4392}
4393STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
Jens Axboefe094d92008-01-31 13:08:54 +01004394STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4395 UINT_MAX, 1);
4396STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4397 UINT_MAX, 1);
Al Viroe572ec72006-03-18 22:27:18 -05004398STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
Jens Axboefe094d92008-01-31 13:08:54 +01004399STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4400 UINT_MAX, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02004401STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004402STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02004403STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4404STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
Jens Axboefe094d92008-01-31 13:08:54 +01004405STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4406 UINT_MAX, 0);
Jens Axboe963b72f2009-10-03 19:42:18 +02004407STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
Tao Ma5bf14c02012-04-01 14:33:39 -07004408STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409#undef STORE_FUNCTION
4410
Al Viroe572ec72006-03-18 22:27:18 -05004411#define CFQ_ATTR(name) \
4412 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
Jens Axboe3b181522005-06-27 10:56:24 +02004413
Al Viroe572ec72006-03-18 22:27:18 -05004414static struct elv_fs_entry cfq_attrs[] = {
4415 CFQ_ATTR(quantum),
Al Viroe572ec72006-03-18 22:27:18 -05004416 CFQ_ATTR(fifo_expire_sync),
4417 CFQ_ATTR(fifo_expire_async),
4418 CFQ_ATTR(back_seek_max),
4419 CFQ_ATTR(back_seek_penalty),
4420 CFQ_ATTR(slice_sync),
4421 CFQ_ATTR(slice_async),
4422 CFQ_ATTR(slice_async_rq),
4423 CFQ_ATTR(slice_idle),
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004424 CFQ_ATTR(group_idle),
Jens Axboe963b72f2009-10-03 19:42:18 +02004425 CFQ_ATTR(low_latency),
Tao Ma5bf14c02012-04-01 14:33:39 -07004426 CFQ_ATTR(target_latency),
Al Viroe572ec72006-03-18 22:27:18 -05004427 __ATTR_NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -07004428};
4429
Linus Torvalds1da177e2005-04-16 15:20:36 -07004430static struct elevator_type iosched_cfq = {
4431 .ops = {
4432 .elevator_merge_fn = cfq_merge,
4433 .elevator_merged_fn = cfq_merged_request,
4434 .elevator_merge_req_fn = cfq_merged_requests,
Jens Axboeda775262006-12-20 11:04:12 +01004435 .elevator_allow_merge_fn = cfq_allow_merge,
Divyesh Shah812d4022010-04-08 21:14:23 -07004436 .elevator_bio_merged_fn = cfq_bio_merged,
Jens Axboeb4878f22005-10-20 16:42:29 +02004437 .elevator_dispatch_fn = cfq_dispatch_requests,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 .elevator_add_req_fn = cfq_insert_request,
Jens Axboeb4878f22005-10-20 16:42:29 +02004439 .elevator_activate_req_fn = cfq_activate_request,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440 .elevator_deactivate_req_fn = cfq_deactivate_request,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004441 .elevator_completed_req_fn = cfq_completed_request,
Jens Axboe21183b02006-07-13 12:33:14 +02004442 .elevator_former_req_fn = elv_rb_former_request,
4443 .elevator_latter_req_fn = elv_rb_latter_request,
Tejun Heo9b84cac2011-12-14 00:33:42 +01004444 .elevator_init_icq_fn = cfq_init_icq,
Tejun Heo7e5a8792011-12-14 00:33:42 +01004445 .elevator_exit_icq_fn = cfq_exit_icq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446 .elevator_set_req_fn = cfq_set_request,
4447 .elevator_put_req_fn = cfq_put_request,
4448 .elevator_may_queue_fn = cfq_may_queue,
4449 .elevator_init_fn = cfq_init_queue,
4450 .elevator_exit_fn = cfq_exit_queue,
4451 },
Tejun Heo3d3c2372011-12-14 00:33:42 +01004452 .icq_size = sizeof(struct cfq_io_cq),
4453 .icq_align = __alignof__(struct cfq_io_cq),
Al Viro3d1ab402006-03-18 18:35:43 -05004454 .elevator_attrs = cfq_attrs,
Tejun Heo3d3c2372011-12-14 00:33:42 +01004455 .elevator_name = "cfq",
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456 .elevator_owner = THIS_MODULE,
4457};
4458
Vivek Goyal3e252062009-12-04 10:36:42 -05004459#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004460static struct blkcg_policy blkcg_policy_cfq = {
Tejun Heof9fcc2d2012-04-16 13:57:27 -07004461 .pd_size = sizeof(struct cfq_group),
4462 .cftypes = cfq_blkcg_files,
4463
4464 .pd_init_fn = cfq_pd_init,
Tejun Heo0b399202013-01-09 08:05:13 -08004465 .pd_offline_fn = cfq_pd_offline,
Tejun Heof9fcc2d2012-04-16 13:57:27 -07004466 .pd_reset_stats_fn = cfq_pd_reset_stats,
Vivek Goyal3e252062009-12-04 10:36:42 -05004467};
Vivek Goyal3e252062009-12-04 10:36:42 -05004468#endif
4469
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470static int __init cfq_init(void)
4471{
Tejun Heo3d3c2372011-12-14 00:33:42 +01004472 int ret;
4473
Jens Axboe22e2c502005-06-27 10:55:12 +02004474 /*
4475 * could be 0 on HZ < 1000 setups
4476 */
4477 if (!cfq_slice_async)
4478 cfq_slice_async = 1;
4479 if (!cfq_slice_idle)
4480 cfq_slice_idle = 1;
4481
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004482#ifdef CONFIG_CFQ_GROUP_IOSCHED
4483 if (!cfq_group_idle)
4484 cfq_group_idle = 1;
Tejun Heo8bd435b2012-04-13 13:11:28 -07004485
Tejun Heo3c798392012-04-16 13:57:25 -07004486 ret = blkcg_policy_register(&blkcg_policy_cfq);
Tejun Heo8bd435b2012-04-13 13:11:28 -07004487 if (ret)
4488 return ret;
Tejun Heoffea73f2012-06-04 10:02:29 +02004489#else
4490 cfq_group_idle = 0;
4491#endif
Tejun Heo8bd435b2012-04-13 13:11:28 -07004492
Tejun Heofd794952012-06-04 10:01:38 +02004493 ret = -ENOMEM;
Tejun Heo3d3c2372011-12-14 00:33:42 +01004494 cfq_pool = KMEM_CACHE(cfq_queue, 0);
4495 if (!cfq_pool)
Tejun Heo8bd435b2012-04-13 13:11:28 -07004496 goto err_pol_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497
Tejun Heo3d3c2372011-12-14 00:33:42 +01004498 ret = elv_register(&iosched_cfq);
Tejun Heo8bd435b2012-04-13 13:11:28 -07004499 if (ret)
4500 goto err_free_pool;
Tejun Heo3d3c2372011-12-14 00:33:42 +01004501
Adrian Bunk2fdd82b2007-12-12 18:51:56 +01004502 return 0;
Tejun Heo8bd435b2012-04-13 13:11:28 -07004503
4504err_free_pool:
4505 kmem_cache_destroy(cfq_pool);
4506err_pol_unreg:
Tejun Heoffea73f2012-06-04 10:02:29 +02004507#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004508 blkcg_policy_unregister(&blkcg_policy_cfq);
Tejun Heoffea73f2012-06-04 10:02:29 +02004509#endif
Tejun Heo8bd435b2012-04-13 13:11:28 -07004510 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004511}
4512
4513static void __exit cfq_exit(void)
4514{
Tejun Heoffea73f2012-06-04 10:02:29 +02004515#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004516 blkcg_policy_unregister(&blkcg_policy_cfq);
Tejun Heoffea73f2012-06-04 10:02:29 +02004517#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518 elv_unregister(&iosched_cfq);
Tejun Heo3d3c2372011-12-14 00:33:42 +01004519 kmem_cache_destroy(cfq_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520}
4521
4522module_init(cfq_init);
4523module_exit(cfq_exit);
4524
4525MODULE_AUTHOR("Jens Axboe");
4526MODULE_LICENSE("GPL");
4527MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");