blob: 792218281d91f69daadbc965d216a57eebc37437 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
Jens Axboe0fe23472006-09-04 15:41:16 +02007 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Al Viro1cc9be62006-03-18 12:29:52 -050011#include <linux/blkdev.h>
12#include <linux/elevator.h>
Randy Dunlapad5ebd22009-11-11 13:47:45 +010013#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/rbtree.h>
Jens Axboe22e2c502005-06-27 10:55:12 +020015#include <linux/ioprio.h>
Jens Axboe7b679132008-05-30 12:23:07 +020016#include <linux/blktrace_api.h>
Tejun Heo6e736be2011-12-14 00:33:38 +010017#include "blk.h"
Tejun Heo629ed0b2012-04-01 14:38:44 -070018#include "blk-cgroup.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Tejun Heo3c798392012-04-16 13:57:25 -070020static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
Tejun Heo03814112012-03-05 13:15:14 -080021
Linus Torvalds1da177e2005-04-16 15:20:36 -070022/*
23 * tunables
24 */
Jens Axboefe094d92008-01-31 13:08:54 +010025/* max queue in one round of service */
Shaohua Liabc3c742010-03-01 09:20:54 +010026static const int cfq_quantum = 8;
Arjan van de Ven64100092006-01-06 09:46:02 +010027static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
Jens Axboefe094d92008-01-31 13:08:54 +010028/* maximum backwards seek, in KiB */
29static const int cfq_back_max = 16 * 1024;
30/* penalty of a backwards seek */
31static const int cfq_back_penalty = 2;
Arjan van de Ven64100092006-01-06 09:46:02 +010032static const int cfq_slice_sync = HZ / 10;
Jens Axboe3b181522005-06-27 10:56:24 +020033static int cfq_slice_async = HZ / 25;
Arjan van de Ven64100092006-01-06 09:46:02 +010034static const int cfq_slice_async_rq = 2;
Jens Axboecaaa5f92006-06-16 11:23:00 +020035static int cfq_slice_idle = HZ / 125;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +020036static int cfq_group_idle = HZ / 125;
Corrado Zoccolo5db5d642009-10-26 22:44:04 +010037static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
38static const int cfq_hist_divisor = 4;
Jens Axboe22e2c502005-06-27 10:55:12 +020039
Jens Axboed9e76202007-04-20 14:27:50 +020040/*
Jens Axboe08717142008-01-28 11:38:15 +010041 * offset from end of service tree
Jens Axboed9e76202007-04-20 14:27:50 +020042 */
Jens Axboe08717142008-01-28 11:38:15 +010043#define CFQ_IDLE_DELAY (HZ / 5)
Jens Axboed9e76202007-04-20 14:27:50 +020044
45/*
46 * below this threshold, we consider thinktime immediate
47 */
48#define CFQ_MIN_TT (2)
49
Jens Axboe22e2c502005-06-27 10:55:12 +020050#define CFQ_SLICE_SCALE (5)
Aaron Carroll45333d52008-08-26 15:52:36 +020051#define CFQ_HW_QUEUE_MIN (5)
Vivek Goyal25bc6b02009-12-03 12:59:43 -050052#define CFQ_SERVICE_SHIFT 12
Jens Axboe22e2c502005-06-27 10:55:12 +020053
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +010054#define CFQQ_SEEK_THR (sector_t)(8 * 100)
Shaohua Lie9ce3352010-03-19 08:03:04 +010055#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
Corrado Zoccolo41647e72010-02-27 19:45:40 +010056#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +010057#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
Shaohua Liae54abe2010-02-05 13:11:45 +010058
Tejun Heoa612fdd2011-12-14 00:33:41 +010059#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
60#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
61#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Christoph Lametere18b8902006-12-06 20:33:20 -080063static struct kmem_cache *cfq_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Jens Axboe22e2c502005-06-27 10:55:12 +020065#define CFQ_PRIO_LISTS IOPRIO_BE_NR
66#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
Jens Axboe22e2c502005-06-27 10:55:12 +020067#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
68
Jens Axboe206dc692006-03-28 13:03:44 +020069#define sample_valid(samples) ((samples) > 80)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -050070#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
Jens Axboe206dc692006-03-28 13:03:44 +020071
Tejun Heoc5869802011-12-14 00:33:41 +010072struct cfq_ttime {
73 unsigned long last_end_request;
74
75 unsigned long ttime_total;
76 unsigned long ttime_samples;
77 unsigned long ttime_mean;
78};
79
Jens Axboe22e2c502005-06-27 10:55:12 +020080/*
Jens Axboecc09e292007-04-26 12:53:50 +020081 * Most of our rbtree usage is for sorting with min extraction, so
82 * if we cache the leftmost node we don't have to walk down the tree
83 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
84 * move this into the elevator for the rq sorting as well.
85 */
86struct cfq_rb_root {
87 struct rb_root rb;
88 struct rb_node *left;
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +010089 unsigned count;
Richard Kennedy73e9ffd2010-03-01 10:50:20 +010090 unsigned total_weight;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -050091 u64 min_vdisktime;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +020092 struct cfq_ttime ttime;
Jens Axboecc09e292007-04-26 12:53:50 +020093};
Shaohua Lif5f2b6c2011-07-12 14:24:55 +020094#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
95 .ttime = {.last_end_request = jiffies,},}
Jens Axboecc09e292007-04-26 12:53:50 +020096
97/*
Jens Axboe6118b702009-06-30 09:34:12 +020098 * Per process-grouping structure
99 */
100struct cfq_queue {
101 /* reference count */
Shaohua Li30d7b942011-01-07 08:46:59 +0100102 int ref;
Jens Axboe6118b702009-06-30 09:34:12 +0200103 /* various state flags, see below */
104 unsigned int flags;
105 /* parent cfq_data */
106 struct cfq_data *cfqd;
107 /* service_tree member */
108 struct rb_node rb_node;
109 /* service_tree key */
110 unsigned long rb_key;
111 /* prio tree member */
112 struct rb_node p_node;
113 /* prio tree root we belong to, if any */
114 struct rb_root *p_root;
115 /* sorted list of pending requests */
116 struct rb_root sort_list;
117 /* if fifo isn't expired, next request to serve */
118 struct request *next_rq;
119 /* requests queued in sort_list */
120 int queued[2];
121 /* currently allocated requests */
122 int allocated[2];
123 /* fifo list of requests in sort_list */
124 struct list_head fifo;
125
Vivek Goyaldae739e2009-12-03 12:59:45 -0500126 /* time when queue got scheduled in to dispatch first request. */
127 unsigned long dispatch_start;
Vivek Goyalf75edf22009-12-03 12:59:53 -0500128 unsigned int allocated_slice;
Richard Kennedyc4081ba2010-02-22 13:49:24 +0100129 unsigned int slice_dispatch;
Vivek Goyaldae739e2009-12-03 12:59:45 -0500130 /* time when first request from queue completed and slice started. */
131 unsigned long slice_start;
Jens Axboe6118b702009-06-30 09:34:12 +0200132 unsigned long slice_end;
133 long slice_resid;
Jens Axboe6118b702009-06-30 09:34:12 +0200134
Christoph Hellwig65299a32011-08-23 14:50:29 +0200135 /* pending priority requests */
136 int prio_pending;
Jens Axboe6118b702009-06-30 09:34:12 +0200137 /* number of requests that are on the dispatch list or inside driver */
138 int dispatched;
139
140 /* io prio of this group */
141 unsigned short ioprio, org_ioprio;
Justin TerAvest4aede842011-07-12 08:31:45 +0200142 unsigned short ioprio_class;
Jens Axboe6118b702009-06-30 09:34:12 +0200143
Richard Kennedyc4081ba2010-02-22 13:49:24 +0100144 pid_t pid;
145
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +0100146 u32 seek_history;
Jeff Moyerb2c18e12009-10-23 17:14:49 -0400147 sector_t last_request_pos;
148
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +0100149 struct cfq_rb_root *service_tree;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -0400150 struct cfq_queue *new_cfqq;
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500151 struct cfq_group *cfqg;
Vivek Goyalc4e78932010-08-23 12:25:03 +0200152 /* Number of sectors dispatched from queue in single dispatch round */
153 unsigned long nr_sectors;
Jens Axboe6118b702009-06-30 09:34:12 +0200154};
155
156/*
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100157 * First index in the service_trees.
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100158 * IDLE is handled separately, so it has negative index
159 */
160enum wl_prio_t {
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100161 BE_WORKLOAD = 0,
Vivek Goyal615f0252009-12-03 12:59:39 -0500162 RT_WORKLOAD = 1,
163 IDLE_WORKLOAD = 2,
Vivek Goyalb4627322010-10-22 09:48:43 +0200164 CFQ_PRIO_NR,
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100165};
166
167/*
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100168 * Second index in the service_trees.
169 */
170enum wl_type_t {
171 ASYNC_WORKLOAD = 0,
172 SYNC_NOIDLE_WORKLOAD = 1,
173 SYNC_WORKLOAD = 2
174};
175
Tejun Heo155fead2012-04-01 14:38:44 -0700176struct cfqg_stats {
177#ifdef CONFIG_CFQ_GROUP_IOSCHED
178 /* total bytes transferred */
179 struct blkg_rwstat service_bytes;
180 /* total IOs serviced, post merge */
181 struct blkg_rwstat serviced;
182 /* number of ios merged */
183 struct blkg_rwstat merged;
184 /* total time spent on device in ns, may not be accurate w/ queueing */
185 struct blkg_rwstat service_time;
186 /* total time spent waiting in scheduler queue in ns */
187 struct blkg_rwstat wait_time;
188 /* number of IOs queued up */
189 struct blkg_rwstat queued;
190 /* total sectors transferred */
191 struct blkg_stat sectors;
192 /* total disk time and nr sectors dispatched by this group */
193 struct blkg_stat time;
194#ifdef CONFIG_DEBUG_BLK_CGROUP
195 /* time not charged to this cgroup */
196 struct blkg_stat unaccounted_time;
197 /* sum of number of ios queued across all samples */
198 struct blkg_stat avg_queue_size_sum;
199 /* count of samples taken for average */
200 struct blkg_stat avg_queue_size_samples;
201 /* how many times this group has been removed from service tree */
202 struct blkg_stat dequeue;
203 /* total time spent waiting for it to be assigned a timeslice. */
204 struct blkg_stat group_wait_time;
Tejun Heo3c798392012-04-16 13:57:25 -0700205 /* time spent idling for this blkcg_gq */
Tejun Heo155fead2012-04-01 14:38:44 -0700206 struct blkg_stat idle_time;
207 /* total time with empty current active q with other requests queued */
208 struct blkg_stat empty_time;
209 /* fields after this shouldn't be cleared on stat reset */
210 uint64_t start_group_wait_time;
211 uint64_t start_idle_time;
212 uint64_t start_empty_time;
213 uint16_t flags;
214#endif /* CONFIG_DEBUG_BLK_CGROUP */
215#endif /* CONFIG_CFQ_GROUP_IOSCHED */
216};
217
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500218/* This is per cgroup per device grouping structure */
219struct cfq_group {
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500220 /* group service_tree member */
221 struct rb_node rb_node;
222
223 /* group service_tree key */
224 u64 vdisktime;
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500225 unsigned int weight;
Justin TerAvest8184f932011-03-17 16:12:36 +0100226 unsigned int new_weight;
Tejun Heo3381cb82012-04-01 14:38:44 -0700227 unsigned int dev_weight;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500228
229 /* number of cfqq currently on this group */
230 int nr_cfqq;
231
Jens Axboe22e2c502005-06-27 10:55:12 +0200232 /*
Kyungmin Park4495a7d2011-05-31 10:04:09 +0200233 * Per group busy queues average. Useful for workload slice calc. We
Vivek Goyalb4627322010-10-22 09:48:43 +0200234 * create the array for each prio class but at run time it is used
235 * only for RT and BE class and slot for IDLE class remains unused.
236 * This is primarily done to avoid confusion and a gcc warning.
237 */
238 unsigned int busy_queues_avg[CFQ_PRIO_NR];
239 /*
240 * rr lists of queues with requests. We maintain service trees for
241 * RT and BE classes. These trees are subdivided in subclasses
242 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
243 * class there is no subclassification and all the cfq queues go on
244 * a single tree service_tree_idle.
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100245 * Counts are embedded in the cfq_rb_root
Jens Axboe22e2c502005-06-27 10:55:12 +0200246 */
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100247 struct cfq_rb_root service_trees[2][3];
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100248 struct cfq_rb_root service_tree_idle;
Vivek Goyaldae739e2009-12-03 12:59:45 -0500249
250 unsigned long saved_workload_slice;
251 enum wl_type_t saved_workload;
252 enum wl_prio_t saved_serving_prio;
Tejun Heo4eef3042012-03-05 13:15:18 -0800253
Vivek Goyal80bdf0c2010-08-23 12:24:26 +0200254 /* number of requests that are on the dispatch list or inside driver */
255 int dispatched;
Shaohua Li7700fc42011-07-12 14:24:56 +0200256 struct cfq_ttime ttime;
Tejun Heo155fead2012-04-01 14:38:44 -0700257 struct cfqg_stats stats;
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500258};
259
Tejun Heoc5869802011-12-14 00:33:41 +0100260struct cfq_io_cq {
261 struct io_cq icq; /* must be the first member */
262 struct cfq_queue *cfqq[2];
263 struct cfq_ttime ttime;
Tejun Heo598971b2012-03-19 15:10:58 -0700264 int ioprio; /* the current ioprio */
265#ifdef CONFIG_CFQ_GROUP_IOSCHED
266 uint64_t blkcg_id; /* the current blkcg ID */
267#endif
Tejun Heoc5869802011-12-14 00:33:41 +0100268};
269
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500270/*
271 * Per block device queue structure
272 */
273struct cfq_data {
274 struct request_queue *queue;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500275 /* Root service tree for cfq_groups */
276 struct cfq_rb_root grp_service_tree;
Tejun Heof51b8022012-03-05 13:15:05 -0800277 struct cfq_group *root_group;
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500278
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100279 /*
280 * The priority currently being served
281 */
282 enum wl_prio_t serving_prio;
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100283 enum wl_type_t serving_type;
284 unsigned long workload_expires;
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500285 struct cfq_group *serving_group;
Jens Axboea36e71f2009-04-15 12:15:11 +0200286
287 /*
288 * Each priority tree is sorted by next_request position. These
289 * trees are used when determining if two or more queues are
290 * interleaving requests (see cfq_close_cooperator).
291 */
292 struct rb_root prio_trees[CFQ_PRIO_LISTS];
293
Jens Axboe22e2c502005-06-27 10:55:12 +0200294 unsigned int busy_queues;
Shaohua Lief8a41d2011-03-07 09:26:29 +0100295 unsigned int busy_sync_queues;
Jens Axboe22e2c502005-06-27 10:55:12 +0200296
Corrado Zoccolo53c583d2010-02-28 19:45:05 +0100297 int rq_in_driver;
298 int rq_in_flight[2];
Aaron Carroll45333d52008-08-26 15:52:36 +0200299
300 /*
301 * queue-depth detection
302 */
303 int rq_queued;
Jens Axboe25776e32006-06-01 10:12:26 +0200304 int hw_tag;
Corrado Zoccoloe459dd02009-11-26 10:02:57 +0100305 /*
306 * hw_tag can be
307 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
308 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
309 * 0 => no NCQ
310 */
311 int hw_tag_est_depth;
312 unsigned int hw_tag_samples;
Jens Axboe22e2c502005-06-27 10:55:12 +0200313
314 /*
Jens Axboe22e2c502005-06-27 10:55:12 +0200315 * idle window management
316 */
317 struct timer_list idle_slice_timer;
Jens Axboe23e018a2009-10-05 08:52:35 +0200318 struct work_struct unplug_work;
Jens Axboe22e2c502005-06-27 10:55:12 +0200319
320 struct cfq_queue *active_queue;
Tejun Heoc5869802011-12-14 00:33:41 +0100321 struct cfq_io_cq *active_cic;
Jens Axboe22e2c502005-06-27 10:55:12 +0200322
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +0200323 /*
324 * async queue for each priority case
325 */
326 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
327 struct cfq_queue *async_idle_cfqq;
Jens Axboe15c31be2007-07-10 13:43:25 +0200328
Jens Axboe6d048f52007-04-25 12:44:27 +0200329 sector_t last_position;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 /*
332 * tunables, see top of file
333 */
334 unsigned int cfq_quantum;
Jens Axboe22e2c502005-06-27 10:55:12 +0200335 unsigned int cfq_fifo_expire[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 unsigned int cfq_back_penalty;
337 unsigned int cfq_back_max;
Jens Axboe22e2c502005-06-27 10:55:12 +0200338 unsigned int cfq_slice[2];
339 unsigned int cfq_slice_async_rq;
340 unsigned int cfq_slice_idle;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +0200341 unsigned int cfq_group_idle;
Jens Axboe963b72f2009-10-03 19:42:18 +0200342 unsigned int cfq_latency;
Al Virod9ff4182006-03-18 13:51:22 -0500343
Jens Axboe6118b702009-06-30 09:34:12 +0200344 /*
345 * Fallback dummy cfqq for extreme OOM conditions
346 */
347 struct cfq_queue oom_cfqq;
Vivek Goyal365722b2009-10-03 15:21:27 +0200348
Corrado Zoccolo573412b2009-12-06 11:48:52 +0100349 unsigned long last_delayed_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350};
351
Vivek Goyal25fb5162009-12-03 12:59:46 -0500352static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
353
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500354static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
355 enum wl_prio_t prio,
Vivek Goyal65b32a52009-12-16 17:52:59 -0500356 enum wl_type_t type)
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100357{
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -0500358 if (!cfqg)
359 return NULL;
360
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100361 if (prio == IDLE_WORKLOAD)
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500362 return &cfqg->service_tree_idle;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100363
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500364 return &cfqg->service_trees[prio][type];
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100365}
366
Jens Axboe3b181522005-06-27 10:56:24 +0200367enum cfqq_state_flags {
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100368 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
369 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
Jens Axboeb0291952009-04-07 11:38:31 +0200370 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100371 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100372 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
373 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
374 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
Jens Axboe44f7c162007-01-19 11:51:58 +1100375 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
Vasily Tarasov91fac312007-04-25 12:29:51 +0200376 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
Jeff Moyerb3b6d042009-10-23 17:14:51 -0400377 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
Shaohua Liae54abe2010-02-05 13:11:45 +0100378 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
Corrado Zoccolo76280af2009-11-26 10:02:58 +0100379 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
Vivek Goyalf75edf22009-12-03 12:59:53 -0500380 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
Jens Axboe3b181522005-06-27 10:56:24 +0200381};
382
383#define CFQ_CFQQ_FNS(name) \
384static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
385{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100386 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
Jens Axboe3b181522005-06-27 10:56:24 +0200387} \
388static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
389{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100390 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
Jens Axboe3b181522005-06-27 10:56:24 +0200391} \
392static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
393{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100394 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
Jens Axboe3b181522005-06-27 10:56:24 +0200395}
396
397CFQ_CFQQ_FNS(on_rr);
398CFQ_CFQQ_FNS(wait_request);
Jens Axboeb0291952009-04-07 11:38:31 +0200399CFQ_CFQQ_FNS(must_dispatch);
Jens Axboe3b181522005-06-27 10:56:24 +0200400CFQ_CFQQ_FNS(must_alloc_slice);
Jens Axboe3b181522005-06-27 10:56:24 +0200401CFQ_CFQQ_FNS(fifo_expire);
402CFQ_CFQQ_FNS(idle_window);
403CFQ_CFQQ_FNS(prio_changed);
Jens Axboe44f7c162007-01-19 11:51:58 +1100404CFQ_CFQQ_FNS(slice_new);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200405CFQ_CFQQ_FNS(sync);
Jens Axboea36e71f2009-04-15 12:15:11 +0200406CFQ_CFQQ_FNS(coop);
Shaohua Liae54abe2010-02-05 13:11:45 +0100407CFQ_CFQQ_FNS(split_coop);
Corrado Zoccolo76280af2009-11-26 10:02:58 +0100408CFQ_CFQQ_FNS(deep);
Vivek Goyalf75edf22009-12-03 12:59:53 -0500409CFQ_CFQQ_FNS(wait_busy);
Jens Axboe3b181522005-06-27 10:56:24 +0200410#undef CFQ_CFQQ_FNS
411
Tejun Heo629ed0b2012-04-01 14:38:44 -0700412#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700413
Tejun Heo155fead2012-04-01 14:38:44 -0700414/* cfqg stats flags */
415enum cfqg_stats_flags {
416 CFQG_stats_waiting = 0,
417 CFQG_stats_idling,
418 CFQG_stats_empty,
Tejun Heo629ed0b2012-04-01 14:38:44 -0700419};
420
Tejun Heo155fead2012-04-01 14:38:44 -0700421#define CFQG_FLAG_FNS(name) \
422static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700423{ \
Tejun Heo155fead2012-04-01 14:38:44 -0700424 stats->flags |= (1 << CFQG_stats_##name); \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700425} \
Tejun Heo155fead2012-04-01 14:38:44 -0700426static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700427{ \
Tejun Heo155fead2012-04-01 14:38:44 -0700428 stats->flags &= ~(1 << CFQG_stats_##name); \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700429} \
Tejun Heo155fead2012-04-01 14:38:44 -0700430static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700431{ \
Tejun Heo155fead2012-04-01 14:38:44 -0700432 return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
Tejun Heo629ed0b2012-04-01 14:38:44 -0700433} \
434
Tejun Heo155fead2012-04-01 14:38:44 -0700435CFQG_FLAG_FNS(waiting)
436CFQG_FLAG_FNS(idling)
437CFQG_FLAG_FNS(empty)
438#undef CFQG_FLAG_FNS
Tejun Heo629ed0b2012-04-01 14:38:44 -0700439
440/* This should be called with the queue_lock held. */
Tejun Heo155fead2012-04-01 14:38:44 -0700441static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700442{
443 unsigned long long now;
444
Tejun Heo155fead2012-04-01 14:38:44 -0700445 if (!cfqg_stats_waiting(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700446 return;
447
448 now = sched_clock();
449 if (time_after64(now, stats->start_group_wait_time))
450 blkg_stat_add(&stats->group_wait_time,
451 now - stats->start_group_wait_time);
Tejun Heo155fead2012-04-01 14:38:44 -0700452 cfqg_stats_clear_waiting(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700453}
454
455/* This should be called with the queue_lock held. */
Tejun Heo155fead2012-04-01 14:38:44 -0700456static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
457 struct cfq_group *curr_cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700458{
Tejun Heo155fead2012-04-01 14:38:44 -0700459 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700460
Tejun Heo155fead2012-04-01 14:38:44 -0700461 if (cfqg_stats_waiting(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700462 return;
Tejun Heo155fead2012-04-01 14:38:44 -0700463 if (cfqg == curr_cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700464 return;
Tejun Heo155fead2012-04-01 14:38:44 -0700465 stats->start_group_wait_time = sched_clock();
466 cfqg_stats_mark_waiting(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700467}
468
469/* This should be called with the queue_lock held. */
Tejun Heo155fead2012-04-01 14:38:44 -0700470static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700471{
472 unsigned long long now;
473
Tejun Heo155fead2012-04-01 14:38:44 -0700474 if (!cfqg_stats_empty(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700475 return;
476
477 now = sched_clock();
478 if (time_after64(now, stats->start_empty_time))
479 blkg_stat_add(&stats->empty_time,
480 now - stats->start_empty_time);
Tejun Heo155fead2012-04-01 14:38:44 -0700481 cfqg_stats_clear_empty(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700482}
483
Tejun Heo155fead2012-04-01 14:38:44 -0700484static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700485{
Tejun Heo155fead2012-04-01 14:38:44 -0700486 blkg_stat_add(&cfqg->stats.dequeue, 1);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700487}
488
Tejun Heo155fead2012-04-01 14:38:44 -0700489static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700490{
Tejun Heo155fead2012-04-01 14:38:44 -0700491 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700492
493 if (blkg_rwstat_sum(&stats->queued))
494 return;
495
496 /*
497 * group is already marked empty. This can happen if cfqq got new
498 * request in parent group and moved to this group while being added
499 * to service tree. Just ignore the event and move on.
500 */
Tejun Heo155fead2012-04-01 14:38:44 -0700501 if (cfqg_stats_empty(stats))
Tejun Heo629ed0b2012-04-01 14:38:44 -0700502 return;
503
504 stats->start_empty_time = sched_clock();
Tejun Heo155fead2012-04-01 14:38:44 -0700505 cfqg_stats_mark_empty(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700506}
507
Tejun Heo155fead2012-04-01 14:38:44 -0700508static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700509{
Tejun Heo155fead2012-04-01 14:38:44 -0700510 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700511
Tejun Heo155fead2012-04-01 14:38:44 -0700512 if (cfqg_stats_idling(stats)) {
Tejun Heo629ed0b2012-04-01 14:38:44 -0700513 unsigned long long now = sched_clock();
514
515 if (time_after64(now, stats->start_idle_time))
516 blkg_stat_add(&stats->idle_time,
517 now - stats->start_idle_time);
Tejun Heo155fead2012-04-01 14:38:44 -0700518 cfqg_stats_clear_idling(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700519 }
520}
521
Tejun Heo155fead2012-04-01 14:38:44 -0700522static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700523{
Tejun Heo155fead2012-04-01 14:38:44 -0700524 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700525
Tejun Heo155fead2012-04-01 14:38:44 -0700526 BUG_ON(cfqg_stats_idling(stats));
Tejun Heo629ed0b2012-04-01 14:38:44 -0700527
528 stats->start_idle_time = sched_clock();
Tejun Heo155fead2012-04-01 14:38:44 -0700529 cfqg_stats_mark_idling(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700530}
531
Tejun Heo155fead2012-04-01 14:38:44 -0700532static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
Tejun Heo629ed0b2012-04-01 14:38:44 -0700533{
Tejun Heo155fead2012-04-01 14:38:44 -0700534 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700535
536 blkg_stat_add(&stats->avg_queue_size_sum,
537 blkg_rwstat_sum(&stats->queued));
538 blkg_stat_add(&stats->avg_queue_size_samples, 1);
Tejun Heo155fead2012-04-01 14:38:44 -0700539 cfqg_stats_update_group_wait_time(stats);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700540}
541
542#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
543
Tejun Heof48ec1d2012-04-13 13:11:25 -0700544static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
545static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
546static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
547static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
548static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
549static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
550static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
Tejun Heo629ed0b2012-04-01 14:38:44 -0700551
552#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
553
554#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo2ce4d502012-04-01 14:38:43 -0700555
Tejun Heo3c798392012-04-16 13:57:25 -0700556static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100557{
Tejun Heo3c798392012-04-16 13:57:25 -0700558 return blkg_to_pdata(blkg, &blkcg_policy_cfq);
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100559}
560
Tejun Heo3c798392012-04-16 13:57:25 -0700561static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100562{
Tejun Heoaaec55a2012-04-01 14:38:42 -0700563 return pdata_to_blkg(cfqg);
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100564}
565
566static inline void cfqg_get(struct cfq_group *cfqg)
567{
568 return blkg_get(cfqg_to_blkg(cfqg));
569}
570
571static inline void cfqg_put(struct cfq_group *cfqg)
572{
573 return blkg_put(cfqg_to_blkg(cfqg));
574}
575
Tejun Heo54e7ed12012-04-16 13:57:23 -0700576#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
577 char __pbuf[128]; \
578 \
579 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
Vivek Goyal2868ef72009-12-03 12:59:48 -0500580 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
Tejun Heo54e7ed12012-04-16 13:57:23 -0700581 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
582 __pbuf, ##args); \
583} while (0)
Vivek Goyal2868ef72009-12-03 12:59:48 -0500584
Tejun Heo54e7ed12012-04-16 13:57:23 -0700585#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
586 char __pbuf[128]; \
587 \
588 blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
589 blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
590} while (0)
Vivek Goyal2868ef72009-12-03 12:59:48 -0500591
Tejun Heo155fead2012-04-01 14:38:44 -0700592static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
593 struct cfq_group *curr_cfqg, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700594{
Tejun Heo155fead2012-04-01 14:38:44 -0700595 blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
596 cfqg_stats_end_empty_time(&cfqg->stats);
597 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700598}
599
Tejun Heo155fead2012-04-01 14:38:44 -0700600static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
601 unsigned long time, unsigned long unaccounted_time)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700602{
Tejun Heo155fead2012-04-01 14:38:44 -0700603 blkg_stat_add(&cfqg->stats.time, time);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700604#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heo155fead2012-04-01 14:38:44 -0700605 blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
Tejun Heo629ed0b2012-04-01 14:38:44 -0700606#endif
Tejun Heo2ce4d502012-04-01 14:38:43 -0700607}
608
Tejun Heo155fead2012-04-01 14:38:44 -0700609static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700610{
Tejun Heo155fead2012-04-01 14:38:44 -0700611 blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700612}
613
Tejun Heo155fead2012-04-01 14:38:44 -0700614static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700615{
Tejun Heo155fead2012-04-01 14:38:44 -0700616 blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700617}
618
Tejun Heo155fead2012-04-01 14:38:44 -0700619static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
620 uint64_t bytes, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700621{
Tejun Heo155fead2012-04-01 14:38:44 -0700622 blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
623 blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
624 blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700625}
626
Tejun Heo155fead2012-04-01 14:38:44 -0700627static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
628 uint64_t start_time, uint64_t io_start_time, int rw)
Tejun Heo2ce4d502012-04-01 14:38:43 -0700629{
Tejun Heo155fead2012-04-01 14:38:44 -0700630 struct cfqg_stats *stats = &cfqg->stats;
Tejun Heo629ed0b2012-04-01 14:38:44 -0700631 unsigned long long now = sched_clock();
Tejun Heo629ed0b2012-04-01 14:38:44 -0700632
633 if (time_after64(now, io_start_time))
634 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
635 if (time_after64(io_start_time, start_time))
636 blkg_rwstat_add(&stats->wait_time, rw,
637 io_start_time - start_time);
Tejun Heo2ce4d502012-04-01 14:38:43 -0700638}
639
Tejun Heo3c798392012-04-16 13:57:25 -0700640static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
Tejun Heo155fead2012-04-01 14:38:44 -0700641{
642 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
643 struct cfqg_stats *stats = &cfqg->stats;
644
645 /* queued stats shouldn't be cleared */
646 blkg_rwstat_reset(&stats->service_bytes);
647 blkg_rwstat_reset(&stats->serviced);
648 blkg_rwstat_reset(&stats->merged);
649 blkg_rwstat_reset(&stats->service_time);
650 blkg_rwstat_reset(&stats->wait_time);
651 blkg_stat_reset(&stats->time);
652#ifdef CONFIG_DEBUG_BLK_CGROUP
653 blkg_stat_reset(&stats->unaccounted_time);
654 blkg_stat_reset(&stats->avg_queue_size_sum);
655 blkg_stat_reset(&stats->avg_queue_size_samples);
656 blkg_stat_reset(&stats->dequeue);
657 blkg_stat_reset(&stats->group_wait_time);
658 blkg_stat_reset(&stats->idle_time);
659 blkg_stat_reset(&stats->empty_time);
660#endif
661}
662
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100663#else /* CONFIG_CFQ_GROUP_IOSCHED */
664
Tejun Heo3c798392012-04-16 13:57:25 -0700665static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg) { return NULL; }
666static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg) { return NULL; }
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100667static inline void cfqg_get(struct cfq_group *cfqg) { }
668static inline void cfqg_put(struct cfq_group *cfqg) { }
669
Jens Axboe7b679132008-05-30 12:23:07 +0200670#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
671 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
Kyungmin Park4495a7d2011-05-31 10:04:09 +0200672#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100673
Tejun Heo155fead2012-04-01 14:38:44 -0700674static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
675 struct cfq_group *curr_cfqg, int rw) { }
676static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
677 unsigned long time, unsigned long unaccounted_time) { }
678static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
679static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
680static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
681 uint64_t bytes, int rw) { }
682static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
683 uint64_t start_time, uint64_t io_start_time, int rw) { }
Tejun Heo2ce4d502012-04-01 14:38:43 -0700684
Tejun Heoeb7d8c072012-03-23 14:02:53 +0100685#endif /* CONFIG_CFQ_GROUP_IOSCHED */
686
Jens Axboe7b679132008-05-30 12:23:07 +0200687#define cfq_log(cfqd, fmt, args...) \
688 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
689
Vivek Goyal615f0252009-12-03 12:59:39 -0500690/* Traverses through cfq group service trees */
691#define for_each_cfqg_st(cfqg, i, j, st) \
692 for (i = 0; i <= IDLE_WORKLOAD; i++) \
693 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
694 : &cfqg->service_tree_idle; \
695 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
696 (i == IDLE_WORKLOAD && j == 0); \
697 j++, st = i < IDLE_WORKLOAD ? \
698 &cfqg->service_trees[i][j]: NULL) \
699
Shaohua Lif5f2b6c2011-07-12 14:24:55 +0200700static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
701 struct cfq_ttime *ttime, bool group_idle)
702{
703 unsigned long slice;
704 if (!sample_valid(ttime->ttime_samples))
705 return false;
706 if (group_idle)
707 slice = cfqd->cfq_group_idle;
708 else
709 slice = cfqd->cfq_slice_idle;
710 return ttime->ttime_mean > slice;
711}
Vivek Goyal615f0252009-12-03 12:59:39 -0500712
Vivek Goyal02b35082010-08-23 12:23:53 +0200713static inline bool iops_mode(struct cfq_data *cfqd)
714{
715 /*
716 * If we are not idling on queues and it is a NCQ drive, parallel
717 * execution of requests is on and measuring time is not possible
718 * in most of the cases until and unless we drive shallower queue
719 * depths and that becomes a performance bottleneck. In such cases
720 * switch to start providing fairness in terms of number of IOs.
721 */
722 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
723 return true;
724 else
725 return false;
726}
727
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100728static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
729{
730 if (cfq_class_idle(cfqq))
731 return IDLE_WORKLOAD;
732 if (cfq_class_rt(cfqq))
733 return RT_WORKLOAD;
734 return BE_WORKLOAD;
735}
736
Corrado Zoccolo718eee02009-10-26 22:45:29 +0100737
738static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
739{
740 if (!cfq_cfqq_sync(cfqq))
741 return ASYNC_WORKLOAD;
742 if (!cfq_cfqq_idle_window(cfqq))
743 return SYNC_NOIDLE_WORKLOAD;
744 return SYNC_WORKLOAD;
745}
746
Vivek Goyal58ff82f2009-12-03 12:59:44 -0500747static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
748 struct cfq_data *cfqd,
749 struct cfq_group *cfqg)
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100750{
Vivek Goyalcdb16e82009-12-03 12:59:38 -0500751 if (wl == IDLE_WORKLOAD)
752 return cfqg->service_tree_idle.count;
753
754 return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
755 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
756 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +0100757}
758
Vivek Goyalf26bd1f2009-12-03 12:59:54 -0500759static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
760 struct cfq_group *cfqg)
761{
762 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
763 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
764}
765
Jens Axboe165125e2007-07-24 09:28:11 +0200766static void cfq_dispatch_insert(struct request_queue *, struct request *);
Tejun Heo4f85cb92012-03-05 13:15:28 -0800767static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
Tejun Heoabede6d2012-03-19 15:10:57 -0700768 struct cfq_io_cq *cic, struct bio *bio,
Tejun Heo4f85cb92012-03-05 13:15:28 -0800769 gfp_t gfp_mask);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200770
Tejun Heoc5869802011-12-14 00:33:41 +0100771static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
772{
773 /* cic->icq is the first member, %NULL will convert to %NULL */
774 return container_of(icq, struct cfq_io_cq, icq);
775}
776
Tejun Heo47fdd4c2011-12-14 00:33:42 +0100777static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
778 struct io_context *ioc)
779{
780 if (ioc)
781 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
782 return NULL;
783}
784
Tejun Heoc5869802011-12-14 00:33:41 +0100785static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
Vasily Tarasov91fac312007-04-25 12:29:51 +0200786{
Jens Axboea6151c32009-10-07 20:02:57 +0200787 return cic->cfqq[is_sync];
Vasily Tarasov91fac312007-04-25 12:29:51 +0200788}
789
Tejun Heoc5869802011-12-14 00:33:41 +0100790static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
791 bool is_sync)
Vasily Tarasov91fac312007-04-25 12:29:51 +0200792{
Jens Axboea6151c32009-10-07 20:02:57 +0200793 cic->cfqq[is_sync] = cfqq;
Vasily Tarasov91fac312007-04-25 12:29:51 +0200794}
795
Tejun Heoc5869802011-12-14 00:33:41 +0100796static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +0400797{
Tejun Heoc5869802011-12-14 00:33:41 +0100798 return cic->icq.q->elevator->elevator_data;
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +0400799}
800
Vasily Tarasov91fac312007-04-25 12:29:51 +0200801/*
802 * We regard a request as SYNC, if it's either a read or has the SYNC bit
803 * set (in which case it could also be direct WRITE).
804 */
Jens Axboea6151c32009-10-07 20:02:57 +0200805static inline bool cfq_bio_sync(struct bio *bio)
Vasily Tarasov91fac312007-04-25 12:29:51 +0200806{
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200807 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200808}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810/*
Andrew Morton99f95e52005-06-27 20:14:05 -0700811 * scheduler run of queue, if there are requests pending and no one in the
812 * driver that will restart queueing
813 */
Jens Axboe23e018a2009-10-05 08:52:35 +0200814static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
Andrew Morton99f95e52005-06-27 20:14:05 -0700815{
Jens Axboe7b679132008-05-30 12:23:07 +0200816 if (cfqd->busy_queues) {
817 cfq_log(cfqd, "schedule dispatch");
Jens Axboe23e018a2009-10-05 08:52:35 +0200818 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
Jens Axboe7b679132008-05-30 12:23:07 +0200819 }
Andrew Morton99f95e52005-06-27 20:14:05 -0700820}
821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822/*
Jens Axboe44f7c162007-01-19 11:51:58 +1100823 * Scale schedule slice based on io priority. Use the sync time slice only
824 * if a queue is marked sync and has sync io queued. A sync queue with async
825 * io only, should not get full sync slice length.
826 */
Jens Axboea6151c32009-10-07 20:02:57 +0200827static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
Jens Axboed9e76202007-04-20 14:27:50 +0200828 unsigned short prio)
829{
830 const int base_slice = cfqd->cfq_slice[sync];
831
832 WARN_ON(prio >= IOPRIO_BE_NR);
833
834 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
835}
836
Jens Axboe44f7c162007-01-19 11:51:58 +1100837static inline int
838cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
839{
Jens Axboed9e76202007-04-20 14:27:50 +0200840 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
Jens Axboe44f7c162007-01-19 11:51:58 +1100841}
842
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500843static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
844{
845 u64 d = delta << CFQ_SERVICE_SHIFT;
846
Tejun Heo3381cb82012-04-01 14:38:44 -0700847 d = d * CFQ_WEIGHT_DEFAULT;
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500848 do_div(d, cfqg->weight);
849 return d;
850}
851
852static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
853{
854 s64 delta = (s64)(vdisktime - min_vdisktime);
855 if (delta > 0)
856 min_vdisktime = vdisktime;
857
858 return min_vdisktime;
859}
860
861static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
862{
863 s64 delta = (s64)(vdisktime - min_vdisktime);
864 if (delta < 0)
865 min_vdisktime = vdisktime;
866
867 return min_vdisktime;
868}
869
870static void update_min_vdisktime(struct cfq_rb_root *st)
871{
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500872 struct cfq_group *cfqg;
873
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500874 if (st->left) {
875 cfqg = rb_entry_cfqg(st->left);
Gui Jianfenga6032712011-03-07 09:28:09 +0100876 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
877 cfqg->vdisktime);
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500878 }
Vivek Goyal25bc6b02009-12-03 12:59:43 -0500879}
880
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100881/*
882 * get averaged number of queues of RT/BE priority.
883 * average is updated, with a formula that gives more weight to higher numbers,
884 * to quickly follows sudden increases and decrease slowly
885 */
886
Vivek Goyal58ff82f2009-12-03 12:59:44 -0500887static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
888 struct cfq_group *cfqg, bool rt)
Jens Axboe5869619c2009-10-28 09:27:07 +0100889{
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100890 unsigned min_q, max_q;
891 unsigned mult = cfq_hist_divisor - 1;
892 unsigned round = cfq_hist_divisor / 2;
Vivek Goyal58ff82f2009-12-03 12:59:44 -0500893 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100894
Vivek Goyal58ff82f2009-12-03 12:59:44 -0500895 min_q = min(cfqg->busy_queues_avg[rt], busy);
896 max_q = max(cfqg->busy_queues_avg[rt], busy);
897 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100898 cfq_hist_divisor;
Vivek Goyal58ff82f2009-12-03 12:59:44 -0500899 return cfqg->busy_queues_avg[rt];
900}
901
902static inline unsigned
903cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
904{
905 struct cfq_rb_root *st = &cfqd->grp_service_tree;
906
907 return cfq_target_latency * cfqg->weight / st->total_weight;
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100908}
909
Shaohua Lic553f8e2011-01-14 08:41:03 +0100910static inline unsigned
Vivek Goyalba5bd522011-01-19 08:25:02 -0700911cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe44f7c162007-01-19 11:51:58 +1100912{
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100913 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
914 if (cfqd->cfq_latency) {
Vivek Goyal58ff82f2009-12-03 12:59:44 -0500915 /*
916 * interested queues (we consider only the ones with the same
917 * priority class in the cfq group)
918 */
919 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
920 cfq_class_rt(cfqq));
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100921 unsigned sync_slice = cfqd->cfq_slice[1];
922 unsigned expect_latency = sync_slice * iq;
Vivek Goyal58ff82f2009-12-03 12:59:44 -0500923 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
924
925 if (expect_latency > group_slice) {
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100926 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
927 /* scale low_slice according to IO priority
928 * and sync vs async */
929 unsigned low_slice =
930 min(slice, base_low_slice * slice / sync_slice);
931 /* the adapted slice value is scaled to fit all iqs
932 * into the target latency */
Vivek Goyal58ff82f2009-12-03 12:59:44 -0500933 slice = max(slice * group_slice / expect_latency,
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100934 low_slice);
935 }
936 }
Shaohua Lic553f8e2011-01-14 08:41:03 +0100937 return slice;
938}
939
940static inline void
941cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
942{
Vivek Goyalba5bd522011-01-19 08:25:02 -0700943 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
Shaohua Lic553f8e2011-01-14 08:41:03 +0100944
Vivek Goyaldae739e2009-12-03 12:59:45 -0500945 cfqq->slice_start = jiffies;
Corrado Zoccolo5db5d642009-10-26 22:44:04 +0100946 cfqq->slice_end = jiffies + slice;
Vivek Goyalf75edf22009-12-03 12:59:53 -0500947 cfqq->allocated_slice = slice;
Jens Axboe7b679132008-05-30 12:23:07 +0200948 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
Jens Axboe44f7c162007-01-19 11:51:58 +1100949}
950
951/*
952 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
953 * isn't valid until the first request from the dispatch is activated
954 * and the slice time set.
955 */
Jens Axboea6151c32009-10-07 20:02:57 +0200956static inline bool cfq_slice_used(struct cfq_queue *cfqq)
Jens Axboe44f7c162007-01-19 11:51:58 +1100957{
958 if (cfq_cfqq_slice_new(cfqq))
Shaohua Lic1e44752010-11-08 15:01:02 +0100959 return false;
Jens Axboe44f7c162007-01-19 11:51:58 +1100960 if (time_before(jiffies, cfqq->slice_end))
Shaohua Lic1e44752010-11-08 15:01:02 +0100961 return false;
Jens Axboe44f7c162007-01-19 11:51:58 +1100962
Shaohua Lic1e44752010-11-08 15:01:02 +0100963 return true;
Jens Axboe44f7c162007-01-19 11:51:58 +1100964}
965
966/*
Jens Axboe5e705372006-07-13 12:39:25 +0200967 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 * We choose the request that is closest to the head right now. Distance
Andreas Mohre8a99052006-03-28 08:59:49 +0200969 * behind the head is penalized and only allowed to a certain extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 */
Jens Axboe5e705372006-07-13 12:39:25 +0200971static struct request *
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +0100972cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973{
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +0100974 sector_t s1, s2, d1 = 0, d2 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 unsigned long back_max;
Andreas Mohre8a99052006-03-28 08:59:49 +0200976#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
977#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
978 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
Jens Axboe5e705372006-07-13 12:39:25 +0200980 if (rq1 == NULL || rq1 == rq2)
981 return rq2;
982 if (rq2 == NULL)
983 return rq1;
Jens Axboe9c2c38a2005-08-24 14:57:54 +0200984
Namhyung Kim229836b2011-05-24 10:23:21 +0200985 if (rq_is_sync(rq1) != rq_is_sync(rq2))
986 return rq_is_sync(rq1) ? rq1 : rq2;
987
Christoph Hellwig65299a32011-08-23 14:50:29 +0200988 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
989 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
Jens Axboeb53d1ed2011-08-19 08:34:48 +0200990
Tejun Heo83096eb2009-05-07 22:24:39 +0900991 s1 = blk_rq_pos(rq1);
992 s2 = blk_rq_pos(rq2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 /*
995 * by definition, 1KiB is 2 sectors
996 */
997 back_max = cfqd->cfq_back_max * 2;
998
999 /*
1000 * Strict one way elevator _except_ in the case where we allow
1001 * short backward seeks which are biased as twice the cost of a
1002 * similar forward seek.
1003 */
1004 if (s1 >= last)
1005 d1 = s1 - last;
1006 else if (s1 + back_max >= last)
1007 d1 = (last - s1) * cfqd->cfq_back_penalty;
1008 else
Andreas Mohre8a99052006-03-28 08:59:49 +02001009 wrap |= CFQ_RQ1_WRAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
1011 if (s2 >= last)
1012 d2 = s2 - last;
1013 else if (s2 + back_max >= last)
1014 d2 = (last - s2) * cfqd->cfq_back_penalty;
1015 else
Andreas Mohre8a99052006-03-28 08:59:49 +02001016 wrap |= CFQ_RQ2_WRAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
1018 /* Found required data */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019
Andreas Mohre8a99052006-03-28 08:59:49 +02001020 /*
1021 * By doing switch() on the bit mask "wrap" we avoid having to
1022 * check two variables for all permutations: --> faster!
1023 */
1024 switch (wrap) {
Jens Axboe5e705372006-07-13 12:39:25 +02001025 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
Andreas Mohre8a99052006-03-28 08:59:49 +02001026 if (d1 < d2)
Jens Axboe5e705372006-07-13 12:39:25 +02001027 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +02001028 else if (d2 < d1)
Jens Axboe5e705372006-07-13 12:39:25 +02001029 return rq2;
Andreas Mohre8a99052006-03-28 08:59:49 +02001030 else {
1031 if (s1 >= s2)
Jens Axboe5e705372006-07-13 12:39:25 +02001032 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +02001033 else
Jens Axboe5e705372006-07-13 12:39:25 +02001034 return rq2;
Andreas Mohre8a99052006-03-28 08:59:49 +02001035 }
1036
1037 case CFQ_RQ2_WRAP:
Jens Axboe5e705372006-07-13 12:39:25 +02001038 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +02001039 case CFQ_RQ1_WRAP:
Jens Axboe5e705372006-07-13 12:39:25 +02001040 return rq2;
1041 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
Andreas Mohre8a99052006-03-28 08:59:49 +02001042 default:
1043 /*
1044 * Since both rqs are wrapped,
1045 * start with the one that's further behind head
1046 * (--> only *one* back seek required),
1047 * since back seek takes more time than forward.
1048 */
1049 if (s1 <= s2)
Jens Axboe5e705372006-07-13 12:39:25 +02001050 return rq1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 else
Jens Axboe5e705372006-07-13 12:39:25 +02001052 return rq2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 }
1054}
1055
Jens Axboe498d3aa22007-04-26 12:54:48 +02001056/*
1057 * The below is leftmost cache rbtree addon
1058 */
Jens Axboe08717142008-01-28 11:38:15 +01001059static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
Jens Axboecc09e292007-04-26 12:53:50 +02001060{
Vivek Goyal615f0252009-12-03 12:59:39 -05001061 /* Service tree is empty */
1062 if (!root->count)
1063 return NULL;
1064
Jens Axboecc09e292007-04-26 12:53:50 +02001065 if (!root->left)
1066 root->left = rb_first(&root->rb);
1067
Jens Axboe08717142008-01-28 11:38:15 +01001068 if (root->left)
1069 return rb_entry(root->left, struct cfq_queue, rb_node);
1070
1071 return NULL;
Jens Axboecc09e292007-04-26 12:53:50 +02001072}
1073
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001074static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1075{
1076 if (!root->left)
1077 root->left = rb_first(&root->rb);
1078
1079 if (root->left)
1080 return rb_entry_cfqg(root->left);
1081
1082 return NULL;
1083}
1084
Jens Axboea36e71f2009-04-15 12:15:11 +02001085static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1086{
1087 rb_erase(n, root);
1088 RB_CLEAR_NODE(n);
1089}
1090
Jens Axboecc09e292007-04-26 12:53:50 +02001091static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1092{
1093 if (root->left == n)
1094 root->left = NULL;
Jens Axboea36e71f2009-04-15 12:15:11 +02001095 rb_erase_init(n, &root->rb);
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01001096 --root->count;
Jens Axboecc09e292007-04-26 12:53:50 +02001097}
1098
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099/*
1100 * would be nice to take fifo expire time into account as well
1101 */
Jens Axboe5e705372006-07-13 12:39:25 +02001102static struct request *
1103cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1104 struct request *last)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105{
Jens Axboe21183b02006-07-13 12:33:14 +02001106 struct rb_node *rbnext = rb_next(&last->rb_node);
1107 struct rb_node *rbprev = rb_prev(&last->rb_node);
Jens Axboe5e705372006-07-13 12:39:25 +02001108 struct request *next = NULL, *prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Jens Axboe21183b02006-07-13 12:33:14 +02001110 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
1112 if (rbprev)
Jens Axboe5e705372006-07-13 12:39:25 +02001113 prev = rb_entry_rq(rbprev);
Jens Axboe21183b02006-07-13 12:33:14 +02001114
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 if (rbnext)
Jens Axboe5e705372006-07-13 12:39:25 +02001116 next = rb_entry_rq(rbnext);
Jens Axboe21183b02006-07-13 12:33:14 +02001117 else {
1118 rbnext = rb_first(&cfqq->sort_list);
1119 if (rbnext && rbnext != &last->rb_node)
Jens Axboe5e705372006-07-13 12:39:25 +02001120 next = rb_entry_rq(rbnext);
Jens Axboe21183b02006-07-13 12:33:14 +02001121 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01001123 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124}
1125
Jens Axboed9e76202007-04-20 14:27:50 +02001126static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1127 struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128{
Jens Axboed9e76202007-04-20 14:27:50 +02001129 /*
1130 * just an approximation, should be ok.
1131 */
Vivek Goyalcdb16e82009-12-03 12:59:38 -05001132 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
Jens Axboe464191c2009-11-30 09:38:13 +01001133 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
Jens Axboed9e76202007-04-20 14:27:50 +02001134}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001136static inline s64
1137cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1138{
1139 return cfqg->vdisktime - st->min_vdisktime;
1140}
1141
1142static void
1143__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1144{
1145 struct rb_node **node = &st->rb.rb_node;
1146 struct rb_node *parent = NULL;
1147 struct cfq_group *__cfqg;
1148 s64 key = cfqg_key(st, cfqg);
1149 int left = 1;
1150
1151 while (*node != NULL) {
1152 parent = *node;
1153 __cfqg = rb_entry_cfqg(parent);
1154
1155 if (key < cfqg_key(st, __cfqg))
1156 node = &parent->rb_left;
1157 else {
1158 node = &parent->rb_right;
1159 left = 0;
1160 }
1161 }
1162
1163 if (left)
1164 st->left = &cfqg->rb_node;
1165
1166 rb_link_node(&cfqg->rb_node, parent, node);
1167 rb_insert_color(&cfqg->rb_node, &st->rb);
1168}
1169
1170static void
Justin TerAvest8184f932011-03-17 16:12:36 +01001171cfq_update_group_weight(struct cfq_group *cfqg)
1172{
1173 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
Tejun Heo3381cb82012-04-01 14:38:44 -07001174 if (cfqg->new_weight) {
Justin TerAvest8184f932011-03-17 16:12:36 +01001175 cfqg->weight = cfqg->new_weight;
Tejun Heo3381cb82012-04-01 14:38:44 -07001176 cfqg->new_weight = 0;
Justin TerAvest8184f932011-03-17 16:12:36 +01001177 }
1178}
1179
1180static void
1181cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1182{
1183 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1184
1185 cfq_update_group_weight(cfqg);
1186 __cfq_group_service_tree_add(st, cfqg);
1187 st->total_weight += cfqg->weight;
1188}
1189
1190static void
1191cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001192{
1193 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1194 struct cfq_group *__cfqg;
1195 struct rb_node *n;
1196
1197 cfqg->nr_cfqq++;
Gui Jianfeng760701b2010-11-30 20:52:47 +01001198 if (!RB_EMPTY_NODE(&cfqg->rb_node))
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001199 return;
1200
1201 /*
1202 * Currently put the group at the end. Later implement something
1203 * so that groups get lesser vtime based on their weights, so that
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001204 * if group does not loose all if it was not continuously backlogged.
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001205 */
1206 n = rb_last(&st->rb);
1207 if (n) {
1208 __cfqg = rb_entry_cfqg(n);
1209 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1210 } else
1211 cfqg->vdisktime = st->min_vdisktime;
Justin TerAvest8184f932011-03-17 16:12:36 +01001212 cfq_group_service_tree_add(st, cfqg);
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001213}
1214
1215static void
Justin TerAvest8184f932011-03-17 16:12:36 +01001216cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1217{
1218 st->total_weight -= cfqg->weight;
1219 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1220 cfq_rb_erase(&cfqg->rb_node, st);
1221}
1222
1223static void
1224cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001225{
1226 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1227
1228 BUG_ON(cfqg->nr_cfqq < 1);
1229 cfqg->nr_cfqq--;
Vivek Goyal25bc6b02009-12-03 12:59:43 -05001230
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001231 /* If there are other cfq queues under this group, don't delete it */
1232 if (cfqg->nr_cfqq)
1233 return;
1234
Vivek Goyal2868ef72009-12-03 12:59:48 -05001235 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
Justin TerAvest8184f932011-03-17 16:12:36 +01001236 cfq_group_service_tree_del(st, cfqg);
Vivek Goyaldae739e2009-12-03 12:59:45 -05001237 cfqg->saved_workload_slice = 0;
Tejun Heo155fead2012-04-01 14:38:44 -07001238 cfqg_stats_update_dequeue(cfqg);
Vivek Goyaldae739e2009-12-03 12:59:45 -05001239}
1240
Justin TerAvest167400d2011-03-12 16:54:00 +01001241static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1242 unsigned int *unaccounted_time)
Vivek Goyaldae739e2009-12-03 12:59:45 -05001243{
Vivek Goyalf75edf22009-12-03 12:59:53 -05001244 unsigned int slice_used;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001245
1246 /*
1247 * Queue got expired before even a single request completed or
1248 * got expired immediately after first request completion.
1249 */
1250 if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1251 /*
1252 * Also charge the seek time incurred to the group, otherwise
1253 * if there are mutiple queues in the group, each can dispatch
1254 * a single request on seeky media and cause lots of seek time
1255 * and group will never know it.
1256 */
1257 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1258 1);
1259 } else {
1260 slice_used = jiffies - cfqq->slice_start;
Justin TerAvest167400d2011-03-12 16:54:00 +01001261 if (slice_used > cfqq->allocated_slice) {
1262 *unaccounted_time = slice_used - cfqq->allocated_slice;
Vivek Goyalf75edf22009-12-03 12:59:53 -05001263 slice_used = cfqq->allocated_slice;
Justin TerAvest167400d2011-03-12 16:54:00 +01001264 }
1265 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1266 *unaccounted_time += cfqq->slice_start -
1267 cfqq->dispatch_start;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001268 }
1269
Vivek Goyaldae739e2009-12-03 12:59:45 -05001270 return slice_used;
1271}
1272
1273static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
Vivek Goyale5ff0822010-04-26 19:25:11 +02001274 struct cfq_queue *cfqq)
Vivek Goyaldae739e2009-12-03 12:59:45 -05001275{
1276 struct cfq_rb_root *st = &cfqd->grp_service_tree;
Justin TerAvest167400d2011-03-12 16:54:00 +01001277 unsigned int used_sl, charge, unaccounted_sl = 0;
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05001278 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1279 - cfqg->service_tree_idle.count;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001280
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05001281 BUG_ON(nr_sync < 0);
Justin TerAvest167400d2011-03-12 16:54:00 +01001282 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05001283
Vivek Goyal02b35082010-08-23 12:23:53 +02001284 if (iops_mode(cfqd))
1285 charge = cfqq->slice_dispatch;
1286 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1287 charge = cfqq->allocated_slice;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001288
1289 /* Can't update vdisktime while group is on service tree */
Justin TerAvest8184f932011-03-17 16:12:36 +01001290 cfq_group_service_tree_del(st, cfqg);
Vivek Goyal02b35082010-08-23 12:23:53 +02001291 cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
Justin TerAvest8184f932011-03-17 16:12:36 +01001292 /* If a new weight was requested, update now, off tree */
1293 cfq_group_service_tree_add(st, cfqg);
Vivek Goyaldae739e2009-12-03 12:59:45 -05001294
1295 /* This group is being expired. Save the context */
1296 if (time_after(cfqd->workload_expires, jiffies)) {
1297 cfqg->saved_workload_slice = cfqd->workload_expires
1298 - jiffies;
1299 cfqg->saved_workload = cfqd->serving_type;
1300 cfqg->saved_serving_prio = cfqd->serving_prio;
1301 } else
1302 cfqg->saved_workload_slice = 0;
Vivek Goyal2868ef72009-12-03 12:59:48 -05001303
1304 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1305 st->min_vdisktime);
Joe Perchesfd16d262011-06-13 10:42:49 +02001306 cfq_log_cfqq(cfqq->cfqd, cfqq,
1307 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1308 used_sl, cfqq->slice_dispatch, charge,
1309 iops_mode(cfqd), cfqq->nr_sectors);
Tejun Heo155fead2012-04-01 14:38:44 -07001310 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1311 cfqg_stats_set_start_empty_time(cfqg);
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05001312}
1313
Tejun Heof51b8022012-03-05 13:15:05 -08001314/**
1315 * cfq_init_cfqg_base - initialize base part of a cfq_group
1316 * @cfqg: cfq_group to initialize
1317 *
1318 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1319 * is enabled or not.
1320 */
1321static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1322{
1323 struct cfq_rb_root *st;
1324 int i, j;
1325
1326 for_each_cfqg_st(cfqg, i, j, st)
1327 *st = CFQ_RB_ROOT;
1328 RB_CLEAR_NODE(&cfqg->rb_node);
1329
1330 cfqg->ttime.last_end_request = jiffies;
1331}
1332
Vivek Goyal25fb5162009-12-03 12:59:46 -05001333#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07001334static void cfq_pd_init(struct blkcg_gq *blkg)
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001335{
Tejun Heo03814112012-03-05 13:15:14 -08001336 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
Vivek Goyal25fb5162009-12-03 12:59:46 -05001337
Tejun Heof51b8022012-03-05 13:15:05 -08001338 cfq_init_cfqg_base(cfqg);
Tejun Heo3381cb82012-04-01 14:38:44 -07001339 cfqg->weight = blkg->blkcg->cfq_weight;
Vivek Goyal25fb5162009-12-03 12:59:46 -05001340}
1341
1342/*
Vivek Goyal3e59cf92011-05-19 15:38:21 -04001343 * Search for the cfq group current task belongs to. request_queue lock must
1344 * be held.
Vivek Goyal25fb5162009-12-03 12:59:46 -05001345 */
Tejun Heocd1604f2012-03-05 13:15:06 -08001346static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
Tejun Heo3c798392012-04-16 13:57:25 -07001347 struct blkcg *blkcg)
Vivek Goyal25fb5162009-12-03 12:59:46 -05001348{
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001349 struct request_queue *q = cfqd->queue;
Tejun Heocd1604f2012-03-05 13:15:06 -08001350 struct cfq_group *cfqg = NULL;
Vivek Goyal25fb5162009-12-03 12:59:46 -05001351
Tejun Heo3c798392012-04-16 13:57:25 -07001352 /* avoid lookup for the common case where there's no blkcg */
1353 if (blkcg == &blkcg_root) {
Tejun Heocd1604f2012-03-05 13:15:06 -08001354 cfqg = cfqd->root_group;
1355 } else {
Tejun Heo3c798392012-04-16 13:57:25 -07001356 struct blkcg_gq *blkg;
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001357
Tejun Heo3c96cb32012-04-13 13:11:34 -07001358 blkg = blkg_lookup_create(blkcg, q);
Tejun Heocd1604f2012-03-05 13:15:06 -08001359 if (!IS_ERR(blkg))
Tejun Heo03814112012-03-05 13:15:14 -08001360 cfqg = blkg_to_cfqg(blkg);
Vivek Goyalf469a7b2011-05-19 15:38:23 -04001361 }
1362
Vivek Goyal25fb5162009-12-03 12:59:46 -05001363 return cfqg;
1364}
1365
1366static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1367{
1368 /* Currently, all async queues are mapped to root group */
1369 if (!cfq_cfqq_sync(cfqq))
Tejun Heof51b8022012-03-05 13:15:05 -08001370 cfqg = cfqq->cfqd->root_group;
Vivek Goyal25fb5162009-12-03 12:59:46 -05001371
1372 cfqq->cfqg = cfqg;
Vivek Goyalb1c35762009-12-03 12:59:47 -05001373 /* cfqq reference on cfqg */
Tejun Heoeb7d8c072012-03-23 14:02:53 +01001374 cfqg_get(cfqg);
Vivek Goyalb1c35762009-12-03 12:59:47 -05001375}
1376
Tejun Heod366e7e2012-04-01 14:38:44 -07001377static u64 cfqg_prfill_weight_device(struct seq_file *sf, void *pdata, int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001378{
Tejun Heod366e7e2012-04-01 14:38:44 -07001379 struct cfq_group *cfqg = pdata;
Tejun Heo3381cb82012-04-01 14:38:44 -07001380
1381 if (!cfqg->dev_weight)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001382 return 0;
Tejun Heod366e7e2012-04-01 14:38:44 -07001383 return __blkg_prfill_u64(sf, pdata, cfqg->dev_weight);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001384}
1385
Tejun Heo3381cb82012-04-01 14:38:44 -07001386static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
1387 struct seq_file *sf)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001388{
Tejun Heo3c798392012-04-16 13:57:25 -07001389 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
1390 cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001391 false);
1392 return 0;
1393}
1394
Tejun Heo3381cb82012-04-01 14:38:44 -07001395static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
1396 struct seq_file *sf)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001397{
Tejun Heo3c798392012-04-16 13:57:25 -07001398 seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001399 return 0;
1400}
1401
Tejun Heo3381cb82012-04-01 14:38:44 -07001402static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
1403 const char *buf)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001404{
Tejun Heo3c798392012-04-16 13:57:25 -07001405 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001406 struct blkg_conf_ctx ctx;
Tejun Heo3381cb82012-04-01 14:38:44 -07001407 struct cfq_group *cfqg;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001408 int ret;
1409
Tejun Heo3c798392012-04-16 13:57:25 -07001410 ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001411 if (ret)
1412 return ret;
1413
1414 ret = -EINVAL;
Tejun Heo3381cb82012-04-01 14:38:44 -07001415 cfqg = blkg_to_cfqg(ctx.blkg);
Tejun Heoa2b16932012-04-13 13:11:33 -07001416 if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
Tejun Heo3381cb82012-04-01 14:38:44 -07001417 cfqg->dev_weight = ctx.v;
1418 cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001419 ret = 0;
1420 }
1421
1422 blkg_conf_finish(&ctx);
1423 return ret;
1424}
1425
Tejun Heo3381cb82012-04-01 14:38:44 -07001426static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001427{
Tejun Heo3c798392012-04-16 13:57:25 -07001428 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1429 struct blkcg_gq *blkg;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001430 struct hlist_node *n;
1431
Tejun Heo3381cb82012-04-01 14:38:44 -07001432 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001433 return -EINVAL;
1434
1435 spin_lock_irq(&blkcg->lock);
Tejun Heo3381cb82012-04-01 14:38:44 -07001436 blkcg->cfq_weight = (unsigned int)val;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001437
1438 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
Tejun Heo3381cb82012-04-01 14:38:44 -07001439 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001440
Tejun Heo3381cb82012-04-01 14:38:44 -07001441 if (cfqg && !cfqg->dev_weight)
1442 cfqg->new_weight = blkcg->cfq_weight;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001443 }
1444
1445 spin_unlock_irq(&blkcg->lock);
1446 return 0;
1447}
1448
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001449static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
1450 struct seq_file *sf)
1451{
Tejun Heo3c798392012-04-16 13:57:25 -07001452 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001453
Tejun Heo3c798392012-04-16 13:57:25 -07001454 blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001455 cft->private, false);
1456 return 0;
1457}
1458
1459static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
1460 struct seq_file *sf)
1461{
Tejun Heo3c798392012-04-16 13:57:25 -07001462 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001463
Tejun Heo3c798392012-04-16 13:57:25 -07001464 blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001465 cft->private, true);
1466 return 0;
1467}
1468
Tejun Heo60c2bc22012-04-01 14:38:43 -07001469#ifdef CONFIG_DEBUG_BLK_CGROUP
Tejun Heod366e7e2012-04-01 14:38:44 -07001470static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, void *pdata, int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001471{
Tejun Heod366e7e2012-04-01 14:38:44 -07001472 struct cfq_group *cfqg = pdata;
Tejun Heo155fead2012-04-01 14:38:44 -07001473 u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001474 u64 v = 0;
1475
1476 if (samples) {
Tejun Heo155fead2012-04-01 14:38:44 -07001477 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001478 do_div(v, samples);
1479 }
Tejun Heod366e7e2012-04-01 14:38:44 -07001480 __blkg_prfill_u64(sf, pdata, v);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001481 return 0;
1482}
1483
1484/* print avg_queue_size */
Tejun Heo155fead2012-04-01 14:38:44 -07001485static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
1486 struct seq_file *sf)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001487{
Tejun Heo3c798392012-04-16 13:57:25 -07001488 struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001489
Tejun Heo155fead2012-04-01 14:38:44 -07001490 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
Tejun Heo3c798392012-04-16 13:57:25 -07001491 &blkcg_policy_cfq, 0, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001492 return 0;
1493}
1494#endif /* CONFIG_DEBUG_BLK_CGROUP */
1495
1496static struct cftype cfq_blkcg_files[] = {
1497 {
1498 .name = "weight_device",
Tejun Heo3381cb82012-04-01 14:38:44 -07001499 .read_seq_string = cfqg_print_weight_device,
1500 .write_string = cfqg_set_weight_device,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001501 .max_write_len = 256,
1502 },
1503 {
1504 .name = "weight",
Tejun Heo3381cb82012-04-01 14:38:44 -07001505 .read_seq_string = cfq_print_weight,
1506 .write_u64 = cfq_set_weight,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001507 },
1508 {
1509 .name = "time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001510 .private = offsetof(struct cfq_group, stats.time),
1511 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001512 },
1513 {
1514 .name = "sectors",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001515 .private = offsetof(struct cfq_group, stats.sectors),
1516 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001517 },
1518 {
1519 .name = "io_service_bytes",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001520 .private = offsetof(struct cfq_group, stats.service_bytes),
1521 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001522 },
1523 {
1524 .name = "io_serviced",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001525 .private = offsetof(struct cfq_group, stats.serviced),
1526 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001527 },
1528 {
1529 .name = "io_service_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001530 .private = offsetof(struct cfq_group, stats.service_time),
1531 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001532 },
1533 {
1534 .name = "io_wait_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001535 .private = offsetof(struct cfq_group, stats.wait_time),
1536 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001537 },
1538 {
1539 .name = "io_merged",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001540 .private = offsetof(struct cfq_group, stats.merged),
1541 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001542 },
1543 {
1544 .name = "io_queued",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001545 .private = offsetof(struct cfq_group, stats.queued),
1546 .read_seq_string = cfqg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001547 },
1548#ifdef CONFIG_DEBUG_BLK_CGROUP
1549 {
1550 .name = "avg_queue_size",
Tejun Heo155fead2012-04-01 14:38:44 -07001551 .read_seq_string = cfqg_print_avg_queue_size,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001552 },
1553 {
1554 .name = "group_wait_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001555 .private = offsetof(struct cfq_group, stats.group_wait_time),
1556 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001557 },
1558 {
1559 .name = "idle_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001560 .private = offsetof(struct cfq_group, stats.idle_time),
1561 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001562 },
1563 {
1564 .name = "empty_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001565 .private = offsetof(struct cfq_group, stats.empty_time),
1566 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001567 },
1568 {
1569 .name = "dequeue",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001570 .private = offsetof(struct cfq_group, stats.dequeue),
1571 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001572 },
1573 {
1574 .name = "unaccounted_time",
Tejun Heo5bc4afb12012-04-01 14:38:45 -07001575 .private = offsetof(struct cfq_group, stats.unaccounted_time),
1576 .read_seq_string = cfqg_print_stat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001577 },
1578#endif /* CONFIG_DEBUG_BLK_CGROUP */
1579 { } /* terminate */
1580};
Vivek Goyal25fb5162009-12-03 12:59:46 -05001581#else /* GROUP_IOSCHED */
Tejun Heocd1604f2012-03-05 13:15:06 -08001582static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
Tejun Heo3c798392012-04-16 13:57:25 -07001583 struct blkcg *blkcg)
Vivek Goyal25fb5162009-12-03 12:59:46 -05001584{
Tejun Heof51b8022012-03-05 13:15:05 -08001585 return cfqd->root_group;
Vivek Goyal25fb5162009-12-03 12:59:46 -05001586}
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +02001587
Vivek Goyal25fb5162009-12-03 12:59:46 -05001588static inline void
1589cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1590 cfqq->cfqg = cfqg;
1591}
1592
1593#endif /* GROUP_IOSCHED */
1594
Jens Axboe498d3aa22007-04-26 12:54:48 +02001595/*
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01001596 * The cfqd->service_trees holds all pending cfq_queue's that have
Jens Axboe498d3aa22007-04-26 12:54:48 +02001597 * requests waiting to be processed. It is sorted in the order that
1598 * we will service the queues.
1599 */
Jens Axboea36e71f2009-04-15 12:15:11 +02001600static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Jens Axboea6151c32009-10-07 20:02:57 +02001601 bool add_front)
Jens Axboed9e76202007-04-20 14:27:50 +02001602{
Jens Axboe08717142008-01-28 11:38:15 +01001603 struct rb_node **p, *parent;
1604 struct cfq_queue *__cfqq;
Jens Axboed9e76202007-04-20 14:27:50 +02001605 unsigned long rb_key;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01001606 struct cfq_rb_root *service_tree;
Jens Axboe498d3aa22007-04-26 12:54:48 +02001607 int left;
Vivek Goyaldae739e2009-12-03 12:59:45 -05001608 int new_cfqq = 1;
Vivek Goyalae30c282009-12-03 12:59:55 -05001609
Vivek Goyalcdb16e82009-12-03 12:59:38 -05001610 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
Vivek Goyal65b32a52009-12-16 17:52:59 -05001611 cfqq_type(cfqq));
Jens Axboe08717142008-01-28 11:38:15 +01001612 if (cfq_class_idle(cfqq)) {
1613 rb_key = CFQ_IDLE_DELAY;
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01001614 parent = rb_last(&service_tree->rb);
Jens Axboe08717142008-01-28 11:38:15 +01001615 if (parent && parent != &cfqq->rb_node) {
1616 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1617 rb_key += __cfqq->rb_key;
1618 } else
1619 rb_key += jiffies;
1620 } else if (!add_front) {
Jens Axboeb9c89462009-10-06 20:53:44 +02001621 /*
1622 * Get our rb key offset. Subtract any residual slice
1623 * value carried from last service. A negative resid
1624 * count indicates slice overrun, and this should position
1625 * the next service time further away in the tree.
1626 */
Jens Axboeedd75ff2007-04-19 12:03:34 +02001627 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
Jens Axboeb9c89462009-10-06 20:53:44 +02001628 rb_key -= cfqq->slice_resid;
Jens Axboeedd75ff2007-04-19 12:03:34 +02001629 cfqq->slice_resid = 0;
Corrado Zoccolo48e025e2009-10-05 08:49:23 +02001630 } else {
1631 rb_key = -HZ;
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01001632 __cfqq = cfq_rb_first(service_tree);
Corrado Zoccolo48e025e2009-10-05 08:49:23 +02001633 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1634 }
Jens Axboed9e76202007-04-20 14:27:50 +02001635
1636 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
Vivek Goyaldae739e2009-12-03 12:59:45 -05001637 new_cfqq = 0;
Jens Axboe99f96282007-02-05 11:56:25 +01001638 /*
Jens Axboed9e76202007-04-20 14:27:50 +02001639 * same position, nothing more to do
Jens Axboe99f96282007-02-05 11:56:25 +01001640 */
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01001641 if (rb_key == cfqq->rb_key &&
1642 cfqq->service_tree == service_tree)
Jens Axboed9e76202007-04-20 14:27:50 +02001643 return;
Jens Axboe53b037442006-07-28 09:48:51 +02001644
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01001645 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1646 cfqq->service_tree = NULL;
Jens Axboe22e2c502005-06-27 10:55:12 +02001647 }
Jens Axboed9e76202007-04-20 14:27:50 +02001648
Jens Axboe498d3aa22007-04-26 12:54:48 +02001649 left = 1;
Jens Axboe08717142008-01-28 11:38:15 +01001650 parent = NULL;
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01001651 cfqq->service_tree = service_tree;
1652 p = &service_tree->rb.rb_node;
Jens Axboed9e76202007-04-20 14:27:50 +02001653 while (*p) {
Jens Axboe67060e32007-04-18 20:13:32 +02001654 struct rb_node **n;
Jens Axboecc09e292007-04-26 12:53:50 +02001655
Jens Axboed9e76202007-04-20 14:27:50 +02001656 parent = *p;
1657 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1658
Jens Axboe0c534e02007-04-18 20:01:57 +02001659 /*
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01001660 * sort by key, that represents service time.
Jens Axboe0c534e02007-04-18 20:01:57 +02001661 */
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01001662 if (time_before(rb_key, __cfqq->rb_key))
Jens Axboe67060e32007-04-18 20:13:32 +02001663 n = &(*p)->rb_left;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01001664 else {
Jens Axboe67060e32007-04-18 20:13:32 +02001665 n = &(*p)->rb_right;
Jens Axboecc09e292007-04-26 12:53:50 +02001666 left = 0;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01001667 }
Jens Axboe67060e32007-04-18 20:13:32 +02001668
1669 p = n;
Jens Axboed9e76202007-04-20 14:27:50 +02001670 }
1671
Jens Axboecc09e292007-04-26 12:53:50 +02001672 if (left)
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01001673 service_tree->left = &cfqq->rb_node;
Jens Axboecc09e292007-04-26 12:53:50 +02001674
Jens Axboed9e76202007-04-20 14:27:50 +02001675 cfqq->rb_key = rb_key;
1676 rb_link_node(&cfqq->rb_node, parent, p);
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01001677 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1678 service_tree->count++;
Namhyung Kim20359f22011-05-24 10:23:22 +02001679 if (add_front || !new_cfqq)
Vivek Goyaldae739e2009-12-03 12:59:45 -05001680 return;
Justin TerAvest8184f932011-03-17 16:12:36 +01001681 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682}
1683
Jens Axboea36e71f2009-04-15 12:15:11 +02001684static struct cfq_queue *
Jens Axboef2d1f0a2009-04-23 12:19:38 +02001685cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1686 sector_t sector, struct rb_node **ret_parent,
1687 struct rb_node ***rb_link)
Jens Axboea36e71f2009-04-15 12:15:11 +02001688{
Jens Axboea36e71f2009-04-15 12:15:11 +02001689 struct rb_node **p, *parent;
1690 struct cfq_queue *cfqq = NULL;
1691
1692 parent = NULL;
1693 p = &root->rb_node;
1694 while (*p) {
1695 struct rb_node **n;
1696
1697 parent = *p;
1698 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1699
1700 /*
1701 * Sort strictly based on sector. Smallest to the left,
1702 * largest to the right.
1703 */
Tejun Heo2e46e8b2009-05-07 22:24:41 +09001704 if (sector > blk_rq_pos(cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02001705 n = &(*p)->rb_right;
Tejun Heo2e46e8b2009-05-07 22:24:41 +09001706 else if (sector < blk_rq_pos(cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02001707 n = &(*p)->rb_left;
1708 else
1709 break;
1710 p = n;
Jens Axboe3ac6c9f2009-04-23 12:14:56 +02001711 cfqq = NULL;
Jens Axboea36e71f2009-04-15 12:15:11 +02001712 }
1713
1714 *ret_parent = parent;
1715 if (rb_link)
1716 *rb_link = p;
Jens Axboe3ac6c9f2009-04-23 12:14:56 +02001717 return cfqq;
Jens Axboea36e71f2009-04-15 12:15:11 +02001718}
1719
1720static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1721{
Jens Axboea36e71f2009-04-15 12:15:11 +02001722 struct rb_node **p, *parent;
1723 struct cfq_queue *__cfqq;
1724
Jens Axboef2d1f0a2009-04-23 12:19:38 +02001725 if (cfqq->p_root) {
1726 rb_erase(&cfqq->p_node, cfqq->p_root);
1727 cfqq->p_root = NULL;
1728 }
Jens Axboea36e71f2009-04-15 12:15:11 +02001729
1730 if (cfq_class_idle(cfqq))
1731 return;
1732 if (!cfqq->next_rq)
1733 return;
1734
Jens Axboef2d1f0a2009-04-23 12:19:38 +02001735 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
Tejun Heo2e46e8b2009-05-07 22:24:41 +09001736 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1737 blk_rq_pos(cfqq->next_rq), &parent, &p);
Jens Axboe3ac6c9f2009-04-23 12:14:56 +02001738 if (!__cfqq) {
1739 rb_link_node(&cfqq->p_node, parent, p);
Jens Axboef2d1f0a2009-04-23 12:19:38 +02001740 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1741 } else
1742 cfqq->p_root = NULL;
Jens Axboea36e71f2009-04-15 12:15:11 +02001743}
1744
Jens Axboe498d3aa22007-04-26 12:54:48 +02001745/*
1746 * Update cfqq's position in the service tree.
1747 */
Jens Axboeedd75ff2007-04-19 12:03:34 +02001748static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +02001749{
Jens Axboe6d048f52007-04-25 12:44:27 +02001750 /*
1751 * Resorting requires the cfqq to be on the RR list already.
1752 */
Jens Axboea36e71f2009-04-15 12:15:11 +02001753 if (cfq_cfqq_on_rr(cfqq)) {
Jens Axboeedd75ff2007-04-19 12:03:34 +02001754 cfq_service_tree_add(cfqd, cfqq, 0);
Jens Axboea36e71f2009-04-15 12:15:11 +02001755 cfq_prio_tree_add(cfqd, cfqq);
1756 }
Jens Axboe6d048f52007-04-25 12:44:27 +02001757}
1758
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759/*
1760 * add to busy list of queues for service, trying to be fair in ordering
Jens Axboe22e2c502005-06-27 10:55:12 +02001761 * the pending list according to last request service
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 */
Jens Axboefebffd62008-01-28 13:19:43 +01001763static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764{
Jens Axboe7b679132008-05-30 12:23:07 +02001765 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
Jens Axboe3b181522005-06-27 10:56:24 +02001766 BUG_ON(cfq_cfqq_on_rr(cfqq));
1767 cfq_mark_cfqq_on_rr(cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 cfqd->busy_queues++;
Shaohua Lief8a41d2011-03-07 09:26:29 +01001769 if (cfq_cfqq_sync(cfqq))
1770 cfqd->busy_sync_queues++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
Jens Axboeedd75ff2007-04-19 12:03:34 +02001772 cfq_resort_rr_list(cfqd, cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773}
1774
Jens Axboe498d3aa22007-04-26 12:54:48 +02001775/*
1776 * Called when the cfqq no longer has requests pending, remove it from
1777 * the service tree.
1778 */
Jens Axboefebffd62008-01-28 13:19:43 +01001779static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780{
Jens Axboe7b679132008-05-30 12:23:07 +02001781 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
Jens Axboe3b181522005-06-27 10:56:24 +02001782 BUG_ON(!cfq_cfqq_on_rr(cfqq));
1783 cfq_clear_cfqq_on_rr(cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01001785 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1786 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1787 cfqq->service_tree = NULL;
1788 }
Jens Axboef2d1f0a2009-04-23 12:19:38 +02001789 if (cfqq->p_root) {
1790 rb_erase(&cfqq->p_node, cfqq->p_root);
1791 cfqq->p_root = NULL;
1792 }
Jens Axboed9e76202007-04-20 14:27:50 +02001793
Justin TerAvest8184f932011-03-17 16:12:36 +01001794 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 BUG_ON(!cfqd->busy_queues);
1796 cfqd->busy_queues--;
Shaohua Lief8a41d2011-03-07 09:26:29 +01001797 if (cfq_cfqq_sync(cfqq))
1798 cfqd->busy_sync_queues--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799}
1800
1801/*
1802 * rb tree support functions
1803 */
Jens Axboefebffd62008-01-28 13:19:43 +01001804static void cfq_del_rq_rb(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805{
Jens Axboe5e705372006-07-13 12:39:25 +02001806 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe5e705372006-07-13 12:39:25 +02001807 const int sync = rq_is_sync(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
Jens Axboeb4878f22005-10-20 16:42:29 +02001809 BUG_ON(!cfqq->queued[sync]);
1810 cfqq->queued[sync]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
Jens Axboe5e705372006-07-13 12:39:25 +02001812 elv_rb_del(&cfqq->sort_list, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
Vivek Goyalf04a6422009-12-03 12:59:40 -05001814 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1815 /*
1816 * Queue will be deleted from service tree when we actually
1817 * expire it later. Right now just remove it from prio tree
1818 * as it is empty.
1819 */
1820 if (cfqq->p_root) {
1821 rb_erase(&cfqq->p_node, cfqq->p_root);
1822 cfqq->p_root = NULL;
1823 }
1824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825}
1826
Jens Axboe5e705372006-07-13 12:39:25 +02001827static void cfq_add_rq_rb(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828{
Jens Axboe5e705372006-07-13 12:39:25 +02001829 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 struct cfq_data *cfqd = cfqq->cfqd;
Jeff Moyer796d5112011-06-02 21:19:05 +02001831 struct request *prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
Jens Axboe5380a102006-07-13 12:37:56 +02001833 cfqq->queued[rq_is_sync(rq)]++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834
Jeff Moyer796d5112011-06-02 21:19:05 +02001835 elv_rb_add(&cfqq->sort_list, rq);
Jens Axboe5fccbf62006-10-31 14:21:55 +01001836
1837 if (!cfq_cfqq_on_rr(cfqq))
1838 cfq_add_cfqq_rr(cfqd, cfqq);
Jens Axboe5044eed2007-04-25 11:53:48 +02001839
1840 /*
1841 * check if this request is a better next-serve candidate
1842 */
Jens Axboea36e71f2009-04-15 12:15:11 +02001843 prev = cfqq->next_rq;
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01001844 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
Jens Axboea36e71f2009-04-15 12:15:11 +02001845
1846 /*
1847 * adjust priority tree position, if ->next_rq changes
1848 */
1849 if (prev != cfqq->next_rq)
1850 cfq_prio_tree_add(cfqd, cfqq);
1851
Jens Axboe5044eed2007-04-25 11:53:48 +02001852 BUG_ON(!cfqq->next_rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853}
1854
Jens Axboefebffd62008-01-28 13:19:43 +01001855static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856{
Jens Axboe5380a102006-07-13 12:37:56 +02001857 elv_rb_del(&cfqq->sort_list, rq);
1858 cfqq->queued[rq_is_sync(rq)]--;
Tejun Heo155fead2012-04-01 14:38:44 -07001859 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
Jens Axboe5e705372006-07-13 12:39:25 +02001860 cfq_add_rq_rb(rq);
Tejun Heo155fead2012-04-01 14:38:44 -07001861 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
1862 rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863}
1864
Jens Axboe206dc692006-03-28 13:03:44 +02001865static struct request *
1866cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867{
Jens Axboe206dc692006-03-28 13:03:44 +02001868 struct task_struct *tsk = current;
Tejun Heoc5869802011-12-14 00:33:41 +01001869 struct cfq_io_cq *cic;
Jens Axboe206dc692006-03-28 13:03:44 +02001870 struct cfq_queue *cfqq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871
Jens Axboe4ac845a2008-01-24 08:44:49 +01001872 cic = cfq_cic_lookup(cfqd, tsk->io_context);
Vasily Tarasov91fac312007-04-25 12:29:51 +02001873 if (!cic)
1874 return NULL;
1875
1876 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
Jens Axboe89850f72006-07-22 16:48:31 +02001877 if (cfqq) {
1878 sector_t sector = bio->bi_sector + bio_sectors(bio);
1879
Jens Axboe21183b02006-07-13 12:33:14 +02001880 return elv_rb_find(&cfqq->sort_list, sector);
Jens Axboe89850f72006-07-22 16:48:31 +02001881 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 return NULL;
1884}
1885
Jens Axboe165125e2007-07-24 09:28:11 +02001886static void cfq_activate_request(struct request_queue *q, struct request *rq)
Jens Axboeb4878f22005-10-20 16:42:29 +02001887{
1888 struct cfq_data *cfqd = q->elevator->elevator_data;
1889
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01001890 cfqd->rq_in_driver++;
Jens Axboe7b679132008-05-30 12:23:07 +02001891 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01001892 cfqd->rq_in_driver);
Jens Axboe25776e32006-06-01 10:12:26 +02001893
Tejun Heo5b936292009-05-07 22:24:38 +09001894 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +02001895}
1896
Jens Axboe165125e2007-07-24 09:28:11 +02001897static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898{
Jens Axboe22e2c502005-06-27 10:55:12 +02001899 struct cfq_data *cfqd = q->elevator->elevator_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01001901 WARN_ON(!cfqd->rq_in_driver);
1902 cfqd->rq_in_driver--;
Jens Axboe7b679132008-05-30 12:23:07 +02001903 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01001904 cfqd->rq_in_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905}
1906
Jens Axboeb4878f22005-10-20 16:42:29 +02001907static void cfq_remove_request(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908{
Jens Axboe5e705372006-07-13 12:39:25 +02001909 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe21183b02006-07-13 12:33:14 +02001910
Jens Axboe5e705372006-07-13 12:39:25 +02001911 if (cfqq->next_rq == rq)
1912 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
Jens Axboeb4878f22005-10-20 16:42:29 +02001914 list_del_init(&rq->queuelist);
Jens Axboe5e705372006-07-13 12:39:25 +02001915 cfq_del_rq_rb(rq);
Jens Axboe374f84a2006-07-23 01:42:19 +02001916
Aaron Carroll45333d52008-08-26 15:52:36 +02001917 cfqq->cfqd->rq_queued--;
Tejun Heo155fead2012-04-01 14:38:44 -07001918 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
Christoph Hellwig65299a32011-08-23 14:50:29 +02001919 if (rq->cmd_flags & REQ_PRIO) {
1920 WARN_ON(!cfqq->prio_pending);
1921 cfqq->prio_pending--;
Jens Axboeb53d1ed2011-08-19 08:34:48 +02001922 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923}
1924
Jens Axboe165125e2007-07-24 09:28:11 +02001925static int cfq_merge(struct request_queue *q, struct request **req,
1926 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927{
1928 struct cfq_data *cfqd = q->elevator->elevator_data;
1929 struct request *__rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
Jens Axboe206dc692006-03-28 13:03:44 +02001931 __rq = cfq_find_rq_fmerge(cfqd, bio);
Jens Axboe22e2c502005-06-27 10:55:12 +02001932 if (__rq && elv_rq_merge_ok(__rq, bio)) {
Jens Axboe98170642006-07-28 09:23:08 +02001933 *req = __rq;
1934 return ELEVATOR_FRONT_MERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 }
1936
1937 return ELEVATOR_NO_MERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938}
1939
Jens Axboe165125e2007-07-24 09:28:11 +02001940static void cfq_merged_request(struct request_queue *q, struct request *req,
Jens Axboe21183b02006-07-13 12:33:14 +02001941 int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942{
Jens Axboe21183b02006-07-13 12:33:14 +02001943 if (type == ELEVATOR_FRONT_MERGE) {
Jens Axboe5e705372006-07-13 12:39:25 +02001944 struct cfq_queue *cfqq = RQ_CFQQ(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
Jens Axboe5e705372006-07-13 12:39:25 +02001946 cfq_reposition_rq_rb(cfqq, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948}
1949
Divyesh Shah812d4022010-04-08 21:14:23 -07001950static void cfq_bio_merged(struct request_queue *q, struct request *req,
1951 struct bio *bio)
1952{
Tejun Heo155fead2012-04-01 14:38:44 -07001953 cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
Divyesh Shah812d4022010-04-08 21:14:23 -07001954}
1955
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956static void
Jens Axboe165125e2007-07-24 09:28:11 +02001957cfq_merged_requests(struct request_queue *q, struct request *rq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 struct request *next)
1959{
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01001960 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Shaohua Li4a0b75c2011-12-16 14:00:22 +01001961 struct cfq_data *cfqd = q->elevator->elevator_data;
1962
Jens Axboe22e2c502005-06-27 10:55:12 +02001963 /*
1964 * reposition in fifo if next is older than rq
1965 */
1966 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
Jens Axboe30996f42009-10-05 11:03:39 +02001967 time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
Jens Axboe22e2c502005-06-27 10:55:12 +02001968 list_move(&rq->queuelist, &next->queuelist);
Jens Axboe30996f42009-10-05 11:03:39 +02001969 rq_set_fifo_time(rq, rq_fifo_time(next));
1970 }
Jens Axboe22e2c502005-06-27 10:55:12 +02001971
Corrado Zoccolocf7c25c2009-11-08 17:16:46 +01001972 if (cfqq->next_rq == next)
1973 cfqq->next_rq = rq;
Jens Axboeb4878f22005-10-20 16:42:29 +02001974 cfq_remove_request(next);
Tejun Heo155fead2012-04-01 14:38:44 -07001975 cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
Shaohua Li4a0b75c2011-12-16 14:00:22 +01001976
1977 cfqq = RQ_CFQQ(next);
1978 /*
1979 * all requests of this queue are merged to other queues, delete it
1980 * from the service tree. If it's the active_queue,
1981 * cfq_dispatch_requests() will choose to expire it or do idle
1982 */
1983 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
1984 cfqq != cfqd->active_queue)
1985 cfq_del_cfqq_rr(cfqd, cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02001986}
1987
Jens Axboe165125e2007-07-24 09:28:11 +02001988static int cfq_allow_merge(struct request_queue *q, struct request *rq,
Jens Axboeda775262006-12-20 11:04:12 +01001989 struct bio *bio)
1990{
1991 struct cfq_data *cfqd = q->elevator->elevator_data;
Tejun Heoc5869802011-12-14 00:33:41 +01001992 struct cfq_io_cq *cic;
Jens Axboeda775262006-12-20 11:04:12 +01001993 struct cfq_queue *cfqq;
Jens Axboeda775262006-12-20 11:04:12 +01001994
1995 /*
Jens Axboeec8acb62007-01-02 18:32:11 +01001996 * Disallow merge of a sync bio into an async request.
Jens Axboeda775262006-12-20 11:04:12 +01001997 */
Vasily Tarasov91fac312007-04-25 12:29:51 +02001998 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
Jens Axboea6151c32009-10-07 20:02:57 +02001999 return false;
Jens Axboeda775262006-12-20 11:04:12 +01002000
2001 /*
Tejun Heof1a4f4d2011-12-14 00:33:39 +01002002 * Lookup the cfqq that this bio will be queued with and allow
Tejun Heo07c2bd32012-02-08 09:19:42 +01002003 * merge only if rq is queued there.
Jens Axboeda775262006-12-20 11:04:12 +01002004 */
Tejun Heo07c2bd32012-02-08 09:19:42 +01002005 cic = cfq_cic_lookup(cfqd, current->io_context);
2006 if (!cic)
2007 return false;
Jens Axboe719d3402006-12-22 09:38:53 +01002008
Vasily Tarasov91fac312007-04-25 12:29:51 +02002009 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
Jens Axboea6151c32009-10-07 20:02:57 +02002010 return cfqq == RQ_CFQQ(rq);
Jens Axboeda775262006-12-20 11:04:12 +01002011}
2012
Divyesh Shah812df482010-04-08 21:15:35 -07002013static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2014{
2015 del_timer(&cfqd->idle_slice_timer);
Tejun Heo155fead2012-04-01 14:38:44 -07002016 cfqg_stats_update_idle_time(cfqq->cfqg);
Divyesh Shah812df482010-04-08 21:15:35 -07002017}
2018
Jens Axboefebffd62008-01-28 13:19:43 +01002019static void __cfq_set_active_queue(struct cfq_data *cfqd,
2020 struct cfq_queue *cfqq)
Jens Axboe22e2c502005-06-27 10:55:12 +02002021{
2022 if (cfqq) {
Divyesh Shahb1ffe732010-03-25 15:45:03 +01002023 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
2024 cfqd->serving_prio, cfqd->serving_type);
Tejun Heo155fead2012-04-01 14:38:44 -07002025 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
Justin TerAvest62a37f62011-03-23 08:25:44 +01002026 cfqq->slice_start = 0;
2027 cfqq->dispatch_start = jiffies;
2028 cfqq->allocated_slice = 0;
2029 cfqq->slice_end = 0;
2030 cfqq->slice_dispatch = 0;
2031 cfqq->nr_sectors = 0;
2032
2033 cfq_clear_cfqq_wait_request(cfqq);
2034 cfq_clear_cfqq_must_dispatch(cfqq);
2035 cfq_clear_cfqq_must_alloc_slice(cfqq);
2036 cfq_clear_cfqq_fifo_expire(cfqq);
2037 cfq_mark_cfqq_slice_new(cfqq);
2038
2039 cfq_del_timer(cfqd, cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002040 }
2041
2042 cfqd->active_queue = cfqq;
2043}
2044
2045/*
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002046 * current cfqq expired its slice (or was too idle), select new one
2047 */
2048static void
2049__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Vivek Goyale5ff0822010-04-26 19:25:11 +02002050 bool timed_out)
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002051{
Jens Axboe7b679132008-05-30 12:23:07 +02002052 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2053
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002054 if (cfq_cfqq_wait_request(cfqq))
Divyesh Shah812df482010-04-08 21:15:35 -07002055 cfq_del_timer(cfqd, cfqq);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002056
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002057 cfq_clear_cfqq_wait_request(cfqq);
Vivek Goyalf75edf22009-12-03 12:59:53 -05002058 cfq_clear_cfqq_wait_busy(cfqq);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002059
2060 /*
Shaohua Liae54abe2010-02-05 13:11:45 +01002061 * If this cfqq is shared between multiple processes, check to
2062 * make sure that those processes are still issuing I/Os within
2063 * the mean seek distance. If not, it may be time to break the
2064 * queues apart again.
2065 */
2066 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2067 cfq_mark_cfqq_split_coop(cfqq);
2068
2069 /*
Jens Axboe6084cdd2007-04-23 08:25:00 +02002070 * store what was left of this slice, if the queue idled/timed out
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002071 */
Shaohua Lic553f8e2011-01-14 08:41:03 +01002072 if (timed_out) {
2073 if (cfq_cfqq_slice_new(cfqq))
Vivek Goyalba5bd522011-01-19 08:25:02 -07002074 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
Shaohua Lic553f8e2011-01-14 08:41:03 +01002075 else
2076 cfqq->slice_resid = cfqq->slice_end - jiffies;
Jens Axboe7b679132008-05-30 12:23:07 +02002077 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2078 }
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002079
Vivek Goyale5ff0822010-04-26 19:25:11 +02002080 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
Vivek Goyaldae739e2009-12-03 12:59:45 -05002081
Vivek Goyalf04a6422009-12-03 12:59:40 -05002082 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2083 cfq_del_cfqq_rr(cfqd, cfqq);
2084
Jens Axboeedd75ff2007-04-19 12:03:34 +02002085 cfq_resort_rr_list(cfqd, cfqq);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002086
2087 if (cfqq == cfqd->active_queue)
2088 cfqd->active_queue = NULL;
2089
2090 if (cfqd->active_cic) {
Tejun Heo11a31222012-02-07 07:51:30 +01002091 put_io_context(cfqd->active_cic->icq.ioc);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002092 cfqd->active_cic = NULL;
2093 }
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002094}
2095
Vivek Goyale5ff0822010-04-26 19:25:11 +02002096static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002097{
2098 struct cfq_queue *cfqq = cfqd->active_queue;
2099
2100 if (cfqq)
Vivek Goyale5ff0822010-04-26 19:25:11 +02002101 __cfq_slice_expired(cfqd, cfqq, timed_out);
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002102}
2103
Jens Axboe498d3aa22007-04-26 12:54:48 +02002104/*
2105 * Get next queue for service. Unless we have a queue preemption,
2106 * we'll simply select the first cfqq in the service tree.
2107 */
Jens Axboe6d048f52007-04-25 12:44:27 +02002108static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +02002109{
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002110 struct cfq_rb_root *service_tree =
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002111 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
Vivek Goyal65b32a52009-12-16 17:52:59 -05002112 cfqd->serving_type);
Jens Axboeedd75ff2007-04-19 12:03:34 +02002113
Vivek Goyalf04a6422009-12-03 12:59:40 -05002114 if (!cfqd->rq_queued)
2115 return NULL;
2116
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002117 /* There is nothing to dispatch */
2118 if (!service_tree)
2119 return NULL;
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002120 if (RB_EMPTY_ROOT(&service_tree->rb))
2121 return NULL;
2122 return cfq_rb_first(service_tree);
Jens Axboe6d048f52007-04-25 12:44:27 +02002123}
2124
Vivek Goyalf04a6422009-12-03 12:59:40 -05002125static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2126{
Vivek Goyal25fb5162009-12-03 12:59:46 -05002127 struct cfq_group *cfqg;
Vivek Goyalf04a6422009-12-03 12:59:40 -05002128 struct cfq_queue *cfqq;
2129 int i, j;
2130 struct cfq_rb_root *st;
2131
2132 if (!cfqd->rq_queued)
2133 return NULL;
2134
Vivek Goyal25fb5162009-12-03 12:59:46 -05002135 cfqg = cfq_get_next_cfqg(cfqd);
2136 if (!cfqg)
2137 return NULL;
2138
Vivek Goyalf04a6422009-12-03 12:59:40 -05002139 for_each_cfqg_st(cfqg, i, j, st)
2140 if ((cfqq = cfq_rb_first(st)) != NULL)
2141 return cfqq;
2142 return NULL;
2143}
2144
Jens Axboe498d3aa22007-04-26 12:54:48 +02002145/*
2146 * Get and set a new active queue for service.
2147 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002148static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2149 struct cfq_queue *cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002150{
Jens Axboee00ef792009-11-04 08:54:55 +01002151 if (!cfqq)
Jens Axboea36e71f2009-04-15 12:15:11 +02002152 cfqq = cfq_get_next_queue(cfqd);
Jens Axboe6d048f52007-04-25 12:44:27 +02002153
Jens Axboe22e2c502005-06-27 10:55:12 +02002154 __cfq_set_active_queue(cfqd, cfqq);
Jens Axboe3b181522005-06-27 10:56:24 +02002155 return cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02002156}
2157
Jens Axboed9e76202007-04-20 14:27:50 +02002158static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2159 struct request *rq)
2160{
Tejun Heo83096eb2009-05-07 22:24:39 +09002161 if (blk_rq_pos(rq) >= cfqd->last_position)
2162 return blk_rq_pos(rq) - cfqd->last_position;
Jens Axboed9e76202007-04-20 14:27:50 +02002163 else
Tejun Heo83096eb2009-05-07 22:24:39 +09002164 return cfqd->last_position - blk_rq_pos(rq);
Jens Axboed9e76202007-04-20 14:27:50 +02002165}
2166
Jeff Moyerb2c18e12009-10-23 17:14:49 -04002167static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Shaohua Lie9ce3352010-03-19 08:03:04 +01002168 struct request *rq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002169{
Shaohua Lie9ce3352010-03-19 08:03:04 +01002170 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
Jens Axboe6d048f52007-04-25 12:44:27 +02002171}
2172
Jens Axboea36e71f2009-04-15 12:15:11 +02002173static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2174 struct cfq_queue *cur_cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +02002175{
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002176 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
Jens Axboea36e71f2009-04-15 12:15:11 +02002177 struct rb_node *parent, *node;
2178 struct cfq_queue *__cfqq;
2179 sector_t sector = cfqd->last_position;
2180
2181 if (RB_EMPTY_ROOT(root))
2182 return NULL;
2183
2184 /*
2185 * First, if we find a request starting at the end of the last
2186 * request, choose it.
2187 */
Jens Axboef2d1f0a2009-04-23 12:19:38 +02002188 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
Jens Axboea36e71f2009-04-15 12:15:11 +02002189 if (__cfqq)
2190 return __cfqq;
2191
2192 /*
2193 * If the exact sector wasn't found, the parent of the NULL leaf
2194 * will contain the closest sector.
2195 */
2196 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
Shaohua Lie9ce3352010-03-19 08:03:04 +01002197 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02002198 return __cfqq;
2199
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002200 if (blk_rq_pos(__cfqq->next_rq) < sector)
Jens Axboea36e71f2009-04-15 12:15:11 +02002201 node = rb_next(&__cfqq->p_node);
2202 else
2203 node = rb_prev(&__cfqq->p_node);
2204 if (!node)
2205 return NULL;
2206
2207 __cfqq = rb_entry(node, struct cfq_queue, p_node);
Shaohua Lie9ce3352010-03-19 08:03:04 +01002208 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
Jens Axboea36e71f2009-04-15 12:15:11 +02002209 return __cfqq;
2210
2211 return NULL;
2212}
2213
2214/*
2215 * cfqd - obvious
2216 * cur_cfqq - passed in so that we don't decide that the current queue is
2217 * closely cooperating with itself.
2218 *
2219 * So, basically we're assuming that that cur_cfqq has dispatched at least
2220 * one request, and that cfqd->last_position reflects a position on the disk
2221 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
2222 * assumption.
2223 */
2224static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
Jeff Moyerb3b6d042009-10-23 17:14:51 -04002225 struct cfq_queue *cur_cfqq)
Jens Axboea36e71f2009-04-15 12:15:11 +02002226{
2227 struct cfq_queue *cfqq;
2228
Divyesh Shah39c01b22010-03-25 15:45:57 +01002229 if (cfq_class_idle(cur_cfqq))
2230 return NULL;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002231 if (!cfq_cfqq_sync(cur_cfqq))
2232 return NULL;
2233 if (CFQQ_SEEKY(cur_cfqq))
2234 return NULL;
2235
Jens Axboea36e71f2009-04-15 12:15:11 +02002236 /*
Gui Jianfengb9d8f4c2009-12-08 08:54:17 +01002237 * Don't search priority tree if it's the only queue in the group.
2238 */
2239 if (cur_cfqq->cfqg->nr_cfqq == 1)
2240 return NULL;
2241
2242 /*
Jens Axboed9e76202007-04-20 14:27:50 +02002243 * We should notice if some of the queues are cooperating, eg
2244 * working closely on the same area of the disk. In that case,
2245 * we can group them together and don't waste time idling.
Jens Axboe6d048f52007-04-25 12:44:27 +02002246 */
Jens Axboea36e71f2009-04-15 12:15:11 +02002247 cfqq = cfqq_close(cfqd, cur_cfqq);
2248 if (!cfqq)
2249 return NULL;
2250
Vivek Goyal8682e1f2009-12-03 12:59:50 -05002251 /* If new queue belongs to different cfq_group, don't choose it */
2252 if (cur_cfqq->cfqg != cfqq->cfqg)
2253 return NULL;
2254
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002255 /*
2256 * It only makes sense to merge sync queues.
2257 */
2258 if (!cfq_cfqq_sync(cfqq))
2259 return NULL;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002260 if (CFQQ_SEEKY(cfqq))
2261 return NULL;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002262
Corrado Zoccoloc0324a02009-10-27 19:16:03 +01002263 /*
2264 * Do not merge queues of different priority classes
2265 */
2266 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2267 return NULL;
2268
Jens Axboea36e71f2009-04-15 12:15:11 +02002269 return cfqq;
Jens Axboe6d048f52007-04-25 12:44:27 +02002270}
2271
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002272/*
2273 * Determine whether we should enforce idle window for this queue.
2274 */
2275
2276static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2277{
2278 enum wl_prio_t prio = cfqq_prio(cfqq);
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002279 struct cfq_rb_root *service_tree = cfqq->service_tree;
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002280
Vivek Goyalf04a6422009-12-03 12:59:40 -05002281 BUG_ON(!service_tree);
2282 BUG_ON(!service_tree->count);
2283
Vivek Goyalb6508c12010-08-23 12:23:33 +02002284 if (!cfqd->cfq_slice_idle)
2285 return false;
2286
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002287 /* We never do for idle class queues. */
2288 if (prio == IDLE_WORKLOAD)
2289 return false;
2290
2291 /* We do for queues that were marked with idle window flag. */
Shaohua Li3c764b72009-12-04 13:12:06 +01002292 if (cfq_cfqq_idle_window(cfqq) &&
2293 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002294 return true;
2295
2296 /*
2297 * Otherwise, we do only if they are the last ones
2298 * in their service tree.
2299 */
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02002300 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
2301 !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
Shaohua Lic1e44752010-11-08 15:01:02 +01002302 return true;
Divyesh Shahb1ffe732010-03-25 15:45:03 +01002303 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
2304 service_tree->count);
Shaohua Lic1e44752010-11-08 15:01:02 +01002305 return false;
Corrado Zoccoloa6d44e92009-10-26 22:45:11 +01002306}
2307
Jens Axboe6d048f52007-04-25 12:44:27 +02002308static void cfq_arm_slice_timer(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +02002309{
Jens Axboe17926692007-01-19 11:59:30 +11002310 struct cfq_queue *cfqq = cfqd->active_queue;
Tejun Heoc5869802011-12-14 00:33:41 +01002311 struct cfq_io_cq *cic;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002312 unsigned long sl, group_idle = 0;
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002313
Jens Axboea68bbdd2008-09-24 13:03:33 +02002314 /*
Jens Axboef7d7b7a2008-09-25 11:37:50 +02002315 * SSD device without seek penalty, disable idling. But only do so
2316 * for devices that support queuing, otherwise we still have a problem
2317 * with sync vs async workloads.
Jens Axboea68bbdd2008-09-24 13:03:33 +02002318 */
Jens Axboef7d7b7a2008-09-25 11:37:50 +02002319 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
Jens Axboea68bbdd2008-09-24 13:03:33 +02002320 return;
2321
Jens Axboedd67d052006-06-21 09:36:18 +02002322 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
Jens Axboe6d048f52007-04-25 12:44:27 +02002323 WARN_ON(cfq_cfqq_slice_new(cfqq));
Jens Axboe22e2c502005-06-27 10:55:12 +02002324
2325 /*
2326 * idle is disabled, either manually or by past process history
2327 */
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002328 if (!cfq_should_idle(cfqd, cfqq)) {
2329 /* no queue idling. Check for group idling */
2330 if (cfqd->cfq_group_idle)
2331 group_idle = cfqd->cfq_group_idle;
2332 else
2333 return;
2334 }
Jens Axboe6d048f52007-04-25 12:44:27 +02002335
Jens Axboe22e2c502005-06-27 10:55:12 +02002336 /*
Corrado Zoccolo8e550632009-11-26 10:02:58 +01002337 * still active requests from this queue, don't idle
Jens Axboe7b679132008-05-30 12:23:07 +02002338 */
Corrado Zoccolo8e550632009-11-26 10:02:58 +01002339 if (cfqq->dispatched)
Jens Axboe7b679132008-05-30 12:23:07 +02002340 return;
2341
2342 /*
Jens Axboe22e2c502005-06-27 10:55:12 +02002343 * task has exited, don't wait
2344 */
Jens Axboe206dc692006-03-28 13:03:44 +02002345 cic = cfqd->active_cic;
Tejun Heof6e8d012012-03-05 13:15:26 -08002346 if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
Jens Axboe6d048f52007-04-25 12:44:27 +02002347 return;
2348
Corrado Zoccolo355b6592009-10-08 08:43:32 +02002349 /*
2350 * If our average think time is larger than the remaining time
2351 * slice, then don't idle. This avoids overrunning the allotted
2352 * time slice.
2353 */
Shaohua Li383cd722011-07-12 14:24:35 +02002354 if (sample_valid(cic->ttime.ttime_samples) &&
2355 (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
Joe Perchesfd16d262011-06-13 10:42:49 +02002356 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
Shaohua Li383cd722011-07-12 14:24:35 +02002357 cic->ttime.ttime_mean);
Corrado Zoccolo355b6592009-10-08 08:43:32 +02002358 return;
Divyesh Shahb1ffe732010-03-25 15:45:03 +01002359 }
Corrado Zoccolo355b6592009-10-08 08:43:32 +02002360
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002361 /* There are other queues in the group, don't do group idle */
2362 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2363 return;
2364
Jens Axboe3b181522005-06-27 10:56:24 +02002365 cfq_mark_cfqq_wait_request(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002366
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002367 if (group_idle)
2368 sl = cfqd->cfq_group_idle;
2369 else
2370 sl = cfqd->cfq_slice_idle;
Jens Axboe206dc692006-03-28 13:03:44 +02002371
Jens Axboe7b14e3b2006-02-28 09:35:11 +01002372 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
Tejun Heo155fead2012-04-01 14:38:44 -07002373 cfqg_stats_set_start_idle_time(cfqq->cfqg);
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002374 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2375 group_idle ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376}
2377
Jens Axboe498d3aa22007-04-26 12:54:48 +02002378/*
2379 * Move request from internal lists to the request queue dispatch list.
2380 */
Jens Axboe165125e2007-07-24 09:28:11 +02002381static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382{
Jens Axboe3ed9a292007-04-23 08:33:33 +02002383 struct cfq_data *cfqd = q->elevator->elevator_data;
Jens Axboe5e705372006-07-13 12:39:25 +02002384 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002385
Jens Axboe7b679132008-05-30 12:23:07 +02002386 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2387
Jeff Moyer06d21882009-09-11 17:08:59 +02002388 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
Jens Axboe5380a102006-07-13 12:37:56 +02002389 cfq_remove_request(rq);
Jens Axboe6d048f52007-04-25 12:44:27 +02002390 cfqq->dispatched++;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002391 (RQ_CFQG(rq))->dispatched++;
Jens Axboe5380a102006-07-13 12:37:56 +02002392 elv_dispatch_sort(q, rq);
Jens Axboe3ed9a292007-04-23 08:33:33 +02002393
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002394 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
Vivek Goyalc4e78932010-08-23 12:25:03 +02002395 cfqq->nr_sectors += blk_rq_sectors(rq);
Tejun Heo155fead2012-04-01 14:38:44 -07002396 cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397}
2398
2399/*
2400 * return expired entry, or NULL to just start from scratch in rbtree
2401 */
Jens Axboefebffd62008-01-28 13:19:43 +01002402static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403{
Jens Axboe30996f42009-10-05 11:03:39 +02002404 struct request *rq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405
Jens Axboe3b181522005-06-27 10:56:24 +02002406 if (cfq_cfqq_fifo_expire(cfqq))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 return NULL;
Jens Axboecb887412007-01-19 12:01:16 +11002408
2409 cfq_mark_cfqq_fifo_expire(cfqq);
2410
Jens Axboe89850f72006-07-22 16:48:31 +02002411 if (list_empty(&cfqq->fifo))
2412 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413
Jens Axboe89850f72006-07-22 16:48:31 +02002414 rq = rq_entry_fifo(cfqq->fifo.next);
Jens Axboe30996f42009-10-05 11:03:39 +02002415 if (time_before(jiffies, rq_fifo_time(rq)))
Jens Axboe7b679132008-05-30 12:23:07 +02002416 rq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417
Jens Axboe30996f42009-10-05 11:03:39 +02002418 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
Jens Axboe6d048f52007-04-25 12:44:27 +02002419 return rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420}
2421
Jens Axboe22e2c502005-06-27 10:55:12 +02002422static inline int
2423cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2424{
2425 const int base_rq = cfqd->cfq_slice_async_rq;
2426
2427 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2428
Namhyung Kimb9f8ce02011-05-24 10:23:21 +02002429 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
Jens Axboe22e2c502005-06-27 10:55:12 +02002430}
2431
2432/*
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002433 * Must be called with the queue_lock held.
2434 */
2435static int cfqq_process_refs(struct cfq_queue *cfqq)
2436{
2437 int process_refs, io_refs;
2438
2439 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
Shaohua Li30d7b942011-01-07 08:46:59 +01002440 process_refs = cfqq->ref - io_refs;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002441 BUG_ON(process_refs < 0);
2442 return process_refs;
2443}
2444
2445static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2446{
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002447 int process_refs, new_process_refs;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002448 struct cfq_queue *__cfqq;
2449
Jeff Moyerc10b61f2010-06-17 10:19:11 -04002450 /*
2451 * If there are no process references on the new_cfqq, then it is
2452 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2453 * chain may have dropped their last reference (not just their
2454 * last process reference).
2455 */
2456 if (!cfqq_process_refs(new_cfqq))
2457 return;
2458
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002459 /* Avoid a circular list and skip interim queue merges */
2460 while ((__cfqq = new_cfqq->new_cfqq)) {
2461 if (__cfqq == cfqq)
2462 return;
2463 new_cfqq = __cfqq;
2464 }
2465
2466 process_refs = cfqq_process_refs(cfqq);
Jeff Moyerc10b61f2010-06-17 10:19:11 -04002467 new_process_refs = cfqq_process_refs(new_cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002468 /*
2469 * If the process for the cfqq has gone away, there is no
2470 * sense in merging the queues.
2471 */
Jeff Moyerc10b61f2010-06-17 10:19:11 -04002472 if (process_refs == 0 || new_process_refs == 0)
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002473 return;
2474
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002475 /*
2476 * Merge in the direction of the lesser amount of work.
2477 */
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002478 if (new_process_refs >= process_refs) {
2479 cfqq->new_cfqq = new_cfqq;
Shaohua Li30d7b942011-01-07 08:46:59 +01002480 new_cfqq->ref += process_refs;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002481 } else {
2482 new_cfqq->new_cfqq = cfqq;
Shaohua Li30d7b942011-01-07 08:46:59 +01002483 cfqq->ref += new_process_refs;
Jeff Moyere6c5bc72009-10-23 17:14:52 -04002484 }
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002485}
2486
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002487static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
Vivek Goyal65b32a52009-12-16 17:52:59 -05002488 struct cfq_group *cfqg, enum wl_prio_t prio)
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002489{
2490 struct cfq_queue *queue;
2491 int i;
2492 bool key_valid = false;
2493 unsigned long lowest_key = 0;
2494 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2495
Vivek Goyal65b32a52009-12-16 17:52:59 -05002496 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2497 /* select the one with lowest rb_key */
2498 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002499 if (queue &&
2500 (!key_valid || time_before(queue->rb_key, lowest_key))) {
2501 lowest_key = queue->rb_key;
2502 cur_best = i;
2503 key_valid = true;
2504 }
2505 }
2506
2507 return cur_best;
2508}
2509
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002510static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002511{
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002512 unsigned slice;
2513 unsigned count;
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002514 struct cfq_rb_root *st;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05002515 unsigned group_slice;
Shaohua Li writese4ea0c12010-12-13 14:32:22 +01002516 enum wl_prio_t original_prio = cfqd->serving_prio;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002517
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002518 /* Choose next priority. RT > BE > IDLE */
Vivek Goyal58ff82f2009-12-03 12:59:44 -05002519 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002520 cfqd->serving_prio = RT_WORKLOAD;
Vivek Goyal58ff82f2009-12-03 12:59:44 -05002521 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002522 cfqd->serving_prio = BE_WORKLOAD;
2523 else {
2524 cfqd->serving_prio = IDLE_WORKLOAD;
2525 cfqd->workload_expires = jiffies + 1;
2526 return;
2527 }
2528
Shaohua Li writese4ea0c12010-12-13 14:32:22 +01002529 if (original_prio != cfqd->serving_prio)
2530 goto new_workload;
2531
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002532 /*
2533 * For RT and BE, we have to choose also the type
2534 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2535 * expiration time
2536 */
Vivek Goyal65b32a52009-12-16 17:52:59 -05002537 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002538 count = st->count;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002539
2540 /*
Vivek Goyal65b32a52009-12-16 17:52:59 -05002541 * check workload expiration, and that we still have other queues ready
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002542 */
Vivek Goyal65b32a52009-12-16 17:52:59 -05002543 if (count && !time_after(jiffies, cfqd->workload_expires))
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002544 return;
2545
Shaohua Li writese4ea0c12010-12-13 14:32:22 +01002546new_workload:
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002547 /* otherwise select new workload type */
2548 cfqd->serving_type =
Vivek Goyal65b32a52009-12-16 17:52:59 -05002549 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2550 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002551 count = st->count;
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002552
2553 /*
2554 * the workload slice is computed as a fraction of target latency
2555 * proportional to the number of queues in that workload, over
2556 * all the queues in the same priority class
2557 */
Vivek Goyal58ff82f2009-12-03 12:59:44 -05002558 group_slice = cfq_group_slice(cfqd, cfqg);
2559
2560 slice = group_slice * count /
2561 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2562 cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002563
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05002564 if (cfqd->serving_type == ASYNC_WORKLOAD) {
2565 unsigned int tmp;
2566
2567 /*
2568 * Async queues are currently system wide. Just taking
2569 * proportion of queues with-in same group will lead to higher
2570 * async ratio system wide as generally root group is going
2571 * to have higher weight. A more accurate thing would be to
2572 * calculate system wide asnc/sync ratio.
2573 */
2574 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2575 tmp = tmp/cfqd->busy_queues;
2576 slice = min_t(unsigned, slice, tmp);
2577
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002578 /* async workload slice is scaled down according to
2579 * the sync/async slice ratio. */
2580 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
Vivek Goyalf26bd1f2009-12-03 12:59:54 -05002581 } else
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002582 /* sync workload slice is at least 2 * cfq_slice_idle */
2583 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2584
2585 slice = max_t(unsigned, slice, CFQ_MIN_TT);
Divyesh Shahb1ffe732010-03-25 15:45:03 +01002586 cfq_log(cfqd, "workload slice:%d", slice);
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002587 cfqd->workload_expires = jiffies + slice;
2588}
2589
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002590static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2591{
2592 struct cfq_rb_root *st = &cfqd->grp_service_tree;
Vivek Goyal25bc6b02009-12-03 12:59:43 -05002593 struct cfq_group *cfqg;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002594
2595 if (RB_EMPTY_ROOT(&st->rb))
2596 return NULL;
Vivek Goyal25bc6b02009-12-03 12:59:43 -05002597 cfqg = cfq_rb_first_group(st);
Vivek Goyal25bc6b02009-12-03 12:59:43 -05002598 update_min_vdisktime(st);
2599 return cfqg;
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002600}
2601
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002602static void cfq_choose_cfqg(struct cfq_data *cfqd)
2603{
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002604 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2605
2606 cfqd->serving_group = cfqg;
Vivek Goyaldae739e2009-12-03 12:59:45 -05002607
2608 /* Restore the workload type data */
2609 if (cfqg->saved_workload_slice) {
2610 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2611 cfqd->serving_type = cfqg->saved_workload;
2612 cfqd->serving_prio = cfqg->saved_serving_prio;
Gui Jianfeng66ae2912009-12-15 10:08:45 +01002613 } else
2614 cfqd->workload_expires = jiffies - 1;
2615
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05002616 choose_service_tree(cfqd, cfqg);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002617}
2618
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002619/*
Jens Axboe498d3aa22007-04-26 12:54:48 +02002620 * Select a queue for service. If we have a current active queue,
2621 * check whether to continue servicing it, or retrieve and set a new one.
Jens Axboe22e2c502005-06-27 10:55:12 +02002622 */
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01002623static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +02002624{
Jens Axboea36e71f2009-04-15 12:15:11 +02002625 struct cfq_queue *cfqq, *new_cfqq = NULL;
Jens Axboe22e2c502005-06-27 10:55:12 +02002626
2627 cfqq = cfqd->active_queue;
2628 if (!cfqq)
2629 goto new_queue;
2630
Vivek Goyalf04a6422009-12-03 12:59:40 -05002631 if (!cfqd->rq_queued)
2632 return NULL;
Vivek Goyalc244bb52009-12-08 17:52:57 -05002633
2634 /*
2635 * We were waiting for group to get backlogged. Expire the queue
2636 */
2637 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2638 goto expire;
2639
Jens Axboe22e2c502005-06-27 10:55:12 +02002640 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02002641 * The active queue has run out of time, expire it and select new.
Jens Axboe22e2c502005-06-27 10:55:12 +02002642 */
Vivek Goyal7667aa02009-12-08 17:52:58 -05002643 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2644 /*
2645 * If slice had not expired at the completion of last request
2646 * we might not have turned on wait_busy flag. Don't expire
2647 * the queue yet. Allow the group to get backlogged.
2648 *
2649 * The very fact that we have used the slice, that means we
2650 * have been idling all along on this queue and it should be
2651 * ok to wait for this request to complete.
2652 */
Vivek Goyal82bbbf22009-12-10 19:25:41 +01002653 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2654 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2655 cfqq = NULL;
Vivek Goyal7667aa02009-12-08 17:52:58 -05002656 goto keep_queue;
Vivek Goyal82bbbf22009-12-10 19:25:41 +01002657 } else
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002658 goto check_group_idle;
Vivek Goyal7667aa02009-12-08 17:52:58 -05002659 }
Jens Axboe22e2c502005-06-27 10:55:12 +02002660
2661 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02002662 * The active queue has requests and isn't expired, allow it to
2663 * dispatch.
Jens Axboe22e2c502005-06-27 10:55:12 +02002664 */
Jens Axboedd67d052006-06-21 09:36:18 +02002665 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
Jens Axboe22e2c502005-06-27 10:55:12 +02002666 goto keep_queue;
Jens Axboe6d048f52007-04-25 12:44:27 +02002667
2668 /*
Jens Axboea36e71f2009-04-15 12:15:11 +02002669 * If another queue has a request waiting within our mean seek
2670 * distance, let it run. The expire code will check for close
2671 * cooperators and put the close queue at the front of the service
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002672 * tree. If possible, merge the expiring queue with the new cfqq.
Jens Axboea36e71f2009-04-15 12:15:11 +02002673 */
Jeff Moyerb3b6d042009-10-23 17:14:51 -04002674 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002675 if (new_cfqq) {
2676 if (!cfqq->new_cfqq)
2677 cfq_setup_merge(cfqq, new_cfqq);
Jens Axboea36e71f2009-04-15 12:15:11 +02002678 goto expire;
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002679 }
Jens Axboea36e71f2009-04-15 12:15:11 +02002680
2681 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02002682 * No requests pending. If the active queue still has requests in
2683 * flight or is idling for a new request, allow either of these
2684 * conditions to happen (or time out) before selecting a new queue.
2685 */
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002686 if (timer_pending(&cfqd->idle_slice_timer)) {
2687 cfqq = NULL;
2688 goto keep_queue;
2689 }
2690
Shaohua Li8e1ac662010-11-08 15:01:04 +01002691 /*
2692 * This is a deep seek queue, but the device is much faster than
2693 * the queue can deliver, don't idle
2694 **/
2695 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2696 (cfq_cfqq_slice_new(cfqq) ||
2697 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2698 cfq_clear_cfqq_deep(cfqq);
2699 cfq_clear_cfqq_idle_window(cfqq);
2700 }
2701
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02002702 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2703 cfqq = NULL;
2704 goto keep_queue;
2705 }
2706
2707 /*
2708 * If group idle is enabled and there are requests dispatched from
2709 * this group, wait for requests to complete.
2710 */
2711check_group_idle:
Shaohua Li7700fc42011-07-12 14:24:56 +02002712 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
2713 cfqq->cfqg->dispatched &&
2714 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
Jens Axboecaaa5f92006-06-16 11:23:00 +02002715 cfqq = NULL;
2716 goto keep_queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02002717 }
2718
Jens Axboe3b181522005-06-27 10:56:24 +02002719expire:
Vivek Goyale5ff0822010-04-26 19:25:11 +02002720 cfq_slice_expired(cfqd, 0);
Jens Axboe3b181522005-06-27 10:56:24 +02002721new_queue:
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002722 /*
2723 * Current queue expired. Check if we have to switch to a new
2724 * service tree
2725 */
2726 if (!new_cfqq)
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002727 cfq_choose_cfqg(cfqd);
Corrado Zoccolo718eee02009-10-26 22:45:29 +01002728
Jens Axboea36e71f2009-04-15 12:15:11 +02002729 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002730keep_queue:
Jens Axboe3b181522005-06-27 10:56:24 +02002731 return cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02002732}
2733
Jens Axboefebffd62008-01-28 13:19:43 +01002734static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
Jens Axboed9e76202007-04-20 14:27:50 +02002735{
2736 int dispatched = 0;
2737
2738 while (cfqq->next_rq) {
2739 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2740 dispatched++;
2741 }
2742
2743 BUG_ON(!list_empty(&cfqq->fifo));
Vivek Goyalf04a6422009-12-03 12:59:40 -05002744
2745 /* By default cfqq is not expired if it is empty. Do it explicitly */
Vivek Goyale5ff0822010-04-26 19:25:11 +02002746 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
Jens Axboed9e76202007-04-20 14:27:50 +02002747 return dispatched;
2748}
2749
Jens Axboe498d3aa22007-04-26 12:54:48 +02002750/*
2751 * Drain our current requests. Used for barriers and when switching
2752 * io schedulers on-the-fly.
2753 */
Jens Axboed9e76202007-04-20 14:27:50 +02002754static int cfq_forced_dispatch(struct cfq_data *cfqd)
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01002755{
Jens Axboe08717142008-01-28 11:38:15 +01002756 struct cfq_queue *cfqq;
Jens Axboed9e76202007-04-20 14:27:50 +02002757 int dispatched = 0;
Vivek Goyalcdb16e82009-12-03 12:59:38 -05002758
Divyesh Shah3440c492010-04-09 09:29:57 +02002759 /* Expire the timeslice of the current active queue first */
Vivek Goyale5ff0822010-04-26 19:25:11 +02002760 cfq_slice_expired(cfqd, 0);
Divyesh Shah3440c492010-04-09 09:29:57 +02002761 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2762 __cfq_set_active_queue(cfqd, cfqq);
Vivek Goyalf04a6422009-12-03 12:59:40 -05002763 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
Divyesh Shah3440c492010-04-09 09:29:57 +02002764 }
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01002765
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01002766 BUG_ON(cfqd->busy_queues);
2767
Jeff Moyer69237152009-06-12 15:29:30 +02002768 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01002769 return dispatched;
2770}
2771
Shaohua Liabc3c742010-03-01 09:20:54 +01002772static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2773 struct cfq_queue *cfqq)
2774{
2775 /* the queue hasn't finished any request, can't estimate */
2776 if (cfq_cfqq_slice_new(cfqq))
Shaohua Lic1e44752010-11-08 15:01:02 +01002777 return true;
Shaohua Liabc3c742010-03-01 09:20:54 +01002778 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2779 cfqq->slice_end))
Shaohua Lic1e44752010-11-08 15:01:02 +01002780 return true;
Shaohua Liabc3c742010-03-01 09:20:54 +01002781
Shaohua Lic1e44752010-11-08 15:01:02 +01002782 return false;
Shaohua Liabc3c742010-03-01 09:20:54 +01002783}
2784
Jens Axboe0b182d62009-10-06 20:49:37 +02002785static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe2f5cb732009-04-07 08:51:19 +02002786{
Jens Axboe2f5cb732009-04-07 08:51:19 +02002787 unsigned int max_dispatch;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788
Jens Axboe2f5cb732009-04-07 08:51:19 +02002789 /*
Jens Axboe5ad531d2009-07-03 12:57:48 +02002790 * Drain async requests before we start sync IO
2791 */
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002792 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
Jens Axboe0b182d62009-10-06 20:49:37 +02002793 return false;
Jens Axboe5ad531d2009-07-03 12:57:48 +02002794
2795 /*
Jens Axboe2f5cb732009-04-07 08:51:19 +02002796 * If this is an async queue and we have sync IO in flight, let it wait
2797 */
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01002798 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
Jens Axboe0b182d62009-10-06 20:49:37 +02002799 return false;
Jens Axboe2f5cb732009-04-07 08:51:19 +02002800
Shaohua Liabc3c742010-03-01 09:20:54 +01002801 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
Jens Axboe2f5cb732009-04-07 08:51:19 +02002802 if (cfq_class_idle(cfqq))
2803 max_dispatch = 1;
2804
2805 /*
2806 * Does this cfqq already have too much IO in flight?
2807 */
2808 if (cfqq->dispatched >= max_dispatch) {
Shaohua Lief8a41d2011-03-07 09:26:29 +01002809 bool promote_sync = false;
Jens Axboe2f5cb732009-04-07 08:51:19 +02002810 /*
2811 * idle queue must always only have a single IO in flight
2812 */
Jens Axboe3ed9a292007-04-23 08:33:33 +02002813 if (cfq_class_idle(cfqq))
Jens Axboe0b182d62009-10-06 20:49:37 +02002814 return false;
Jens Axboe3ed9a292007-04-23 08:33:33 +02002815
Jens Axboe2f5cb732009-04-07 08:51:19 +02002816 /*
Li, Shaohuac4ade942011-03-23 08:30:34 +01002817 * If there is only one sync queue
2818 * we can ignore async queue here and give the sync
Shaohua Lief8a41d2011-03-07 09:26:29 +01002819 * queue no dispatch limit. The reason is a sync queue can
2820 * preempt async queue, limiting the sync queue doesn't make
2821 * sense. This is useful for aiostress test.
2822 */
Li, Shaohuac4ade942011-03-23 08:30:34 +01002823 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2824 promote_sync = true;
Shaohua Lief8a41d2011-03-07 09:26:29 +01002825
2826 /*
Jens Axboe2f5cb732009-04-07 08:51:19 +02002827 * We have other queues, don't allow more IO from this one
2828 */
Shaohua Lief8a41d2011-03-07 09:26:29 +01002829 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2830 !promote_sync)
Jens Axboe0b182d62009-10-06 20:49:37 +02002831 return false;
Jens Axboe9ede2092007-01-19 12:11:44 +11002832
Jens Axboe2f5cb732009-04-07 08:51:19 +02002833 /*
Shaohua Li474b18c2009-12-03 12:58:05 +01002834 * Sole queue user, no limit
Vivek Goyal365722b2009-10-03 15:21:27 +02002835 */
Shaohua Lief8a41d2011-03-07 09:26:29 +01002836 if (cfqd->busy_queues == 1 || promote_sync)
Shaohua Liabc3c742010-03-01 09:20:54 +01002837 max_dispatch = -1;
2838 else
2839 /*
2840 * Normally we start throttling cfqq when cfq_quantum/2
2841 * requests have been dispatched. But we can drive
2842 * deeper queue depths at the beginning of slice
2843 * subjected to upper limit of cfq_quantum.
2844 * */
2845 max_dispatch = cfqd->cfq_quantum;
Jens Axboe8e296752009-10-03 16:26:03 +02002846 }
2847
2848 /*
2849 * Async queues must wait a bit before being allowed dispatch.
2850 * We also ramp up the dispatch depth gradually for async IO,
2851 * based on the last sync IO we serviced
2852 */
Jens Axboe963b72f2009-10-03 19:42:18 +02002853 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
Corrado Zoccolo573412b2009-12-06 11:48:52 +01002854 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
Jens Axboe8e296752009-10-03 16:26:03 +02002855 unsigned int depth;
Vivek Goyal365722b2009-10-03 15:21:27 +02002856
Jens Axboe61f0c1d2009-10-03 19:46:03 +02002857 depth = last_sync / cfqd->cfq_slice[1];
Jens Axboee00c54c2009-10-04 20:36:19 +02002858 if (!depth && !cfqq->dispatched)
2859 depth = 1;
Jens Axboe8e296752009-10-03 16:26:03 +02002860 if (depth < max_dispatch)
2861 max_dispatch = depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 }
2863
Jens Axboe0b182d62009-10-06 20:49:37 +02002864 /*
2865 * If we're below the current max, allow a dispatch
2866 */
2867 return cfqq->dispatched < max_dispatch;
2868}
2869
2870/*
2871 * Dispatch a request from cfqq, moving them to the request queue
2872 * dispatch list.
2873 */
2874static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2875{
2876 struct request *rq;
2877
2878 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2879
2880 if (!cfq_may_dispatch(cfqd, cfqq))
2881 return false;
2882
2883 /*
2884 * follow expired path, else get first next available
2885 */
2886 rq = cfq_check_fifo(cfqq);
2887 if (!rq)
2888 rq = cfqq->next_rq;
2889
2890 /*
2891 * insert request into driver dispatch list
2892 */
2893 cfq_dispatch_insert(cfqd->queue, rq);
2894
2895 if (!cfqd->active_cic) {
Tejun Heoc5869802011-12-14 00:33:41 +01002896 struct cfq_io_cq *cic = RQ_CIC(rq);
Jens Axboe0b182d62009-10-06 20:49:37 +02002897
Tejun Heoc5869802011-12-14 00:33:41 +01002898 atomic_long_inc(&cic->icq.ioc->refcount);
Jens Axboe0b182d62009-10-06 20:49:37 +02002899 cfqd->active_cic = cic;
2900 }
2901
2902 return true;
2903}
2904
2905/*
2906 * Find the cfqq that we need to service and move a request from that to the
2907 * dispatch list
2908 */
2909static int cfq_dispatch_requests(struct request_queue *q, int force)
2910{
2911 struct cfq_data *cfqd = q->elevator->elevator_data;
2912 struct cfq_queue *cfqq;
2913
2914 if (!cfqd->busy_queues)
2915 return 0;
2916
2917 if (unlikely(force))
2918 return cfq_forced_dispatch(cfqd);
2919
2920 cfqq = cfq_select_queue(cfqd);
2921 if (!cfqq)
Jens Axboe8e296752009-10-03 16:26:03 +02002922 return 0;
2923
Jens Axboe2f5cb732009-04-07 08:51:19 +02002924 /*
Jens Axboe0b182d62009-10-06 20:49:37 +02002925 * Dispatch a request from this cfqq, if it is allowed
Jens Axboe2f5cb732009-04-07 08:51:19 +02002926 */
Jens Axboe0b182d62009-10-06 20:49:37 +02002927 if (!cfq_dispatch_request(cfqd, cfqq))
2928 return 0;
2929
Jens Axboe2f5cb732009-04-07 08:51:19 +02002930 cfqq->slice_dispatch++;
Jens Axboeb0291952009-04-07 11:38:31 +02002931 cfq_clear_cfqq_must_dispatch(cfqq);
Jens Axboe2f5cb732009-04-07 08:51:19 +02002932
2933 /*
2934 * expire an async queue immediately if it has used up its slice. idle
2935 * queue always expire after 1 dispatch round.
2936 */
2937 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2938 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2939 cfq_class_idle(cfqq))) {
2940 cfqq->slice_end = jiffies + 1;
Vivek Goyale5ff0822010-04-26 19:25:11 +02002941 cfq_slice_expired(cfqd, 0);
Jens Axboe2f5cb732009-04-07 08:51:19 +02002942 }
2943
Shan Weib217a902009-09-01 10:06:42 +02002944 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
Jens Axboe2f5cb732009-04-07 08:51:19 +02002945 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946}
2947
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948/*
Jens Axboe5e705372006-07-13 12:39:25 +02002949 * task holds one reference to the queue, dropped when task exits. each rq
2950 * in-flight on this queue also holds a reference, dropped when rq is freed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 *
Vivek Goyalb1c35762009-12-03 12:59:47 -05002952 * Each cfq queue took a reference on the parent group. Drop it now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 * queue lock must be held here.
2954 */
2955static void cfq_put_queue(struct cfq_queue *cfqq)
2956{
Jens Axboe22e2c502005-06-27 10:55:12 +02002957 struct cfq_data *cfqd = cfqq->cfqd;
Justin TerAvest0bbfeb82011-03-01 15:05:08 -05002958 struct cfq_group *cfqg;
Jens Axboe22e2c502005-06-27 10:55:12 +02002959
Shaohua Li30d7b942011-01-07 08:46:59 +01002960 BUG_ON(cfqq->ref <= 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961
Shaohua Li30d7b942011-01-07 08:46:59 +01002962 cfqq->ref--;
2963 if (cfqq->ref)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 return;
2965
Jens Axboe7b679132008-05-30 12:23:07 +02002966 cfq_log_cfqq(cfqd, cfqq, "put_queue");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 BUG_ON(rb_first(&cfqq->sort_list));
Jens Axboe22e2c502005-06-27 10:55:12 +02002968 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
Vivek Goyalb1c35762009-12-03 12:59:47 -05002969 cfqg = cfqq->cfqg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970
Jens Axboe28f95cbc2007-01-19 12:09:53 +11002971 if (unlikely(cfqd->active_queue == cfqq)) {
Vivek Goyale5ff0822010-04-26 19:25:11 +02002972 __cfq_slice_expired(cfqd, cfqq, 0);
Jens Axboe23e018a2009-10-05 08:52:35 +02002973 cfq_schedule_dispatch(cfqd);
Jens Axboe28f95cbc2007-01-19 12:09:53 +11002974 }
Jens Axboe22e2c502005-06-27 10:55:12 +02002975
Vivek Goyalf04a6422009-12-03 12:59:40 -05002976 BUG_ON(cfq_cfqq_on_rr(cfqq));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977 kmem_cache_free(cfq_pool, cfqq);
Tejun Heoeb7d8c072012-03-23 14:02:53 +01002978 cfqg_put(cfqg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979}
2980
Shaohua Lid02a2c02010-05-25 10:16:53 +02002981static void cfq_put_cooperator(struct cfq_queue *cfqq)
Jens Axboe89850f72006-07-22 16:48:31 +02002982{
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002983 struct cfq_queue *__cfqq, *next;
2984
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04002985 /*
2986 * If this queue was scheduled to merge with another queue, be
2987 * sure to drop the reference taken on that queue (and others in
2988 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
2989 */
2990 __cfqq = cfqq->new_cfqq;
2991 while (__cfqq) {
2992 if (__cfqq == cfqq) {
2993 WARN(1, "cfqq->new_cfqq loop detected\n");
2994 break;
2995 }
2996 next = __cfqq->new_cfqq;
2997 cfq_put_queue(__cfqq);
2998 __cfqq = next;
2999 }
Shaohua Lid02a2c02010-05-25 10:16:53 +02003000}
3001
3002static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3003{
3004 if (unlikely(cfqq == cfqd->active_queue)) {
3005 __cfq_slice_expired(cfqd, cfqq, 0);
3006 cfq_schedule_dispatch(cfqd);
3007 }
3008
3009 cfq_put_cooperator(cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003010
Jens Axboe89850f72006-07-22 16:48:31 +02003011 cfq_put_queue(cfqq);
3012}
3013
Tejun Heo9b84cac2011-12-14 00:33:42 +01003014static void cfq_init_icq(struct io_cq *icq)
3015{
3016 struct cfq_io_cq *cic = icq_to_cic(icq);
3017
3018 cic->ttime.last_end_request = jiffies;
3019}
3020
Tejun Heoc5869802011-12-14 00:33:41 +01003021static void cfq_exit_icq(struct io_cq *icq)
Jens Axboe89850f72006-07-22 16:48:31 +02003022{
Tejun Heoc5869802011-12-14 00:33:41 +01003023 struct cfq_io_cq *cic = icq_to_cic(icq);
Tejun Heo283287a2011-12-14 00:33:38 +01003024 struct cfq_data *cfqd = cic_to_cfqd(cic);
Fabio Checconi4faa3c82008-04-10 08:28:01 +02003025
Jens Axboeff6657c2009-04-08 10:58:57 +02003026 if (cic->cfqq[BLK_RW_ASYNC]) {
3027 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
3028 cic->cfqq[BLK_RW_ASYNC] = NULL;
Jens Axboe89850f72006-07-22 16:48:31 +02003029 }
3030
Jens Axboeff6657c2009-04-08 10:58:57 +02003031 if (cic->cfqq[BLK_RW_SYNC]) {
3032 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
3033 cic->cfqq[BLK_RW_SYNC] = NULL;
Jens Axboe89850f72006-07-22 16:48:31 +02003034 }
Jens Axboe89850f72006-07-22 16:48:31 +02003035}
3036
Tejun Heoabede6d2012-03-19 15:10:57 -07003037static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
Jens Axboe22e2c502005-06-27 10:55:12 +02003038{
3039 struct task_struct *tsk = current;
3040 int ioprio_class;
3041
Jens Axboe3b181522005-06-27 10:56:24 +02003042 if (!cfq_cfqq_prio_changed(cfqq))
Jens Axboe22e2c502005-06-27 10:55:12 +02003043 return;
3044
Tejun Heo598971b2012-03-19 15:10:58 -07003045 ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
Jens Axboe22e2c502005-06-27 10:55:12 +02003046 switch (ioprio_class) {
Jens Axboefe094d92008-01-31 13:08:54 +01003047 default:
3048 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3049 case IOPRIO_CLASS_NONE:
3050 /*
Jens Axboe6d63c272008-05-07 09:51:23 +02003051 * no prio set, inherit CPU scheduling settings
Jens Axboefe094d92008-01-31 13:08:54 +01003052 */
3053 cfqq->ioprio = task_nice_ioprio(tsk);
Jens Axboe6d63c272008-05-07 09:51:23 +02003054 cfqq->ioprio_class = task_nice_ioclass(tsk);
Jens Axboefe094d92008-01-31 13:08:54 +01003055 break;
3056 case IOPRIO_CLASS_RT:
Tejun Heo598971b2012-03-19 15:10:58 -07003057 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
Jens Axboefe094d92008-01-31 13:08:54 +01003058 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3059 break;
3060 case IOPRIO_CLASS_BE:
Tejun Heo598971b2012-03-19 15:10:58 -07003061 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
Jens Axboefe094d92008-01-31 13:08:54 +01003062 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3063 break;
3064 case IOPRIO_CLASS_IDLE:
3065 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3066 cfqq->ioprio = 7;
3067 cfq_clear_cfqq_idle_window(cfqq);
3068 break;
Jens Axboe22e2c502005-06-27 10:55:12 +02003069 }
3070
3071 /*
3072 * keep track of original prio settings in case we have to temporarily
3073 * elevate the priority of this queue
3074 */
3075 cfqq->org_ioprio = cfqq->ioprio;
Jens Axboe3b181522005-06-27 10:56:24 +02003076 cfq_clear_cfqq_prio_changed(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003077}
3078
Tejun Heo598971b2012-03-19 15:10:58 -07003079static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
Jens Axboe22e2c502005-06-27 10:55:12 +02003080{
Tejun Heo598971b2012-03-19 15:10:58 -07003081 int ioprio = cic->icq.ioc->ioprio;
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +04003082 struct cfq_data *cfqd = cic_to_cfqd(cic);
Al Viro478a82b2006-03-18 13:25:24 -05003083 struct cfq_queue *cfqq;
Jens Axboe35e60772006-06-14 09:10:45 +02003084
Tejun Heo598971b2012-03-19 15:10:58 -07003085 /*
3086 * Check whether ioprio has changed. The condition may trigger
3087 * spuriously on a newly created cic but there's no harm.
3088 */
3089 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
Jens Axboecaaa5f92006-06-16 11:23:00 +02003090 return;
3091
Jens Axboeff6657c2009-04-08 10:58:57 +02003092 cfqq = cic->cfqq[BLK_RW_ASYNC];
Jens Axboecaaa5f92006-06-16 11:23:00 +02003093 if (cfqq) {
3094 struct cfq_queue *new_cfqq;
Tejun Heoabede6d2012-03-19 15:10:57 -07003095 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
3096 GFP_ATOMIC);
Jens Axboecaaa5f92006-06-16 11:23:00 +02003097 if (new_cfqq) {
Jens Axboeff6657c2009-04-08 10:58:57 +02003098 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
Jens Axboecaaa5f92006-06-16 11:23:00 +02003099 cfq_put_queue(cfqq);
3100 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003101 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02003102
Jens Axboeff6657c2009-04-08 10:58:57 +02003103 cfqq = cic->cfqq[BLK_RW_SYNC];
Jens Axboecaaa5f92006-06-16 11:23:00 +02003104 if (cfqq)
3105 cfq_mark_cfqq_prio_changed(cfqq);
Tejun Heo598971b2012-03-19 15:10:58 -07003106
3107 cic->ioprio = ioprio;
Jens Axboe22e2c502005-06-27 10:55:12 +02003108}
3109
Jens Axboed5036d72009-06-26 10:44:34 +02003110static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Jens Axboea6151c32009-10-07 20:02:57 +02003111 pid_t pid, bool is_sync)
Jens Axboed5036d72009-06-26 10:44:34 +02003112{
3113 RB_CLEAR_NODE(&cfqq->rb_node);
3114 RB_CLEAR_NODE(&cfqq->p_node);
3115 INIT_LIST_HEAD(&cfqq->fifo);
3116
Shaohua Li30d7b942011-01-07 08:46:59 +01003117 cfqq->ref = 0;
Jens Axboed5036d72009-06-26 10:44:34 +02003118 cfqq->cfqd = cfqd;
3119
3120 cfq_mark_cfqq_prio_changed(cfqq);
3121
3122 if (is_sync) {
3123 if (!cfq_class_idle(cfqq))
3124 cfq_mark_cfqq_idle_window(cfqq);
3125 cfq_mark_cfqq_sync(cfqq);
3126 }
3127 cfqq->pid = pid;
3128}
3129
Vivek Goyal246103332009-12-03 12:59:51 -05003130#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo598971b2012-03-19 15:10:58 -07003131static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
Vivek Goyal246103332009-12-03 12:59:51 -05003132{
Konstantin Khlebnikovbca4b912010-05-20 23:21:34 +04003133 struct cfq_data *cfqd = cic_to_cfqd(cic);
Tejun Heo598971b2012-03-19 15:10:58 -07003134 struct cfq_queue *sync_cfqq;
3135 uint64_t id;
Vivek Goyal246103332009-12-03 12:59:51 -05003136
Tejun Heo598971b2012-03-19 15:10:58 -07003137 rcu_read_lock();
Tejun Heo3c798392012-04-16 13:57:25 -07003138 id = bio_blkcg(bio)->id;
Tejun Heo598971b2012-03-19 15:10:58 -07003139 rcu_read_unlock();
3140
3141 /*
3142 * Check whether blkcg has changed. The condition may trigger
3143 * spuriously on a newly created cic but there's no harm.
3144 */
3145 if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
Vivek Goyal246103332009-12-03 12:59:51 -05003146 return;
3147
Tejun Heo598971b2012-03-19 15:10:58 -07003148 sync_cfqq = cic_to_cfqq(cic, 1);
Vivek Goyal246103332009-12-03 12:59:51 -05003149 if (sync_cfqq) {
3150 /*
3151 * Drop reference to sync queue. A new sync queue will be
3152 * assigned in new group upon arrival of a fresh request.
3153 */
3154 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
3155 cic_set_cfqq(cic, NULL, 1);
3156 cfq_put_queue(sync_cfqq);
3157 }
Tejun Heo598971b2012-03-19 15:10:58 -07003158
3159 cic->blkcg_id = id;
Vivek Goyal246103332009-12-03 12:59:51 -05003160}
Tejun Heo598971b2012-03-19 15:10:58 -07003161#else
3162static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
Vivek Goyal246103332009-12-03 12:59:51 -05003163#endif /* CONFIG_CFQ_GROUP_IOSCHED */
3164
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165static struct cfq_queue *
Tejun Heoabede6d2012-03-19 15:10:57 -07003166cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3167 struct bio *bio, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168{
Tejun Heo3c798392012-04-16 13:57:25 -07003169 struct blkcg *blkcg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170 struct cfq_queue *cfqq, *new_cfqq = NULL;
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003171 struct cfq_group *cfqg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172
3173retry:
Tejun Heo2a7f1242012-03-05 13:15:01 -08003174 rcu_read_lock();
3175
Tejun Heo3c798392012-04-16 13:57:25 -07003176 blkcg = bio_blkcg(bio);
Tejun Heocd1604f2012-03-05 13:15:06 -08003177 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
Vasily Tarasov91fac312007-04-25 12:29:51 +02003178 cfqq = cic_to_cfqq(cic, is_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179
Jens Axboe6118b702009-06-30 09:34:12 +02003180 /*
3181 * Always try a new alloc if we fell back to the OOM cfqq
3182 * originally, since it should just be a temporary situation.
3183 */
3184 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3185 cfqq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186 if (new_cfqq) {
3187 cfqq = new_cfqq;
3188 new_cfqq = NULL;
Jens Axboe22e2c502005-06-27 10:55:12 +02003189 } else if (gfp_mask & __GFP_WAIT) {
Tejun Heo2a7f1242012-03-05 13:15:01 -08003190 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 spin_unlock_irq(cfqd->queue->queue_lock);
Christoph Lameter94f60302007-07-17 04:03:29 -07003192 new_cfqq = kmem_cache_alloc_node(cfq_pool,
Jens Axboe6118b702009-06-30 09:34:12 +02003193 gfp_mask | __GFP_ZERO,
Christoph Lameter94f60302007-07-17 04:03:29 -07003194 cfqd->queue->node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 spin_lock_irq(cfqd->queue->queue_lock);
Jens Axboe6118b702009-06-30 09:34:12 +02003196 if (new_cfqq)
3197 goto retry;
Jens Axboe22e2c502005-06-27 10:55:12 +02003198 } else {
Christoph Lameter94f60302007-07-17 04:03:29 -07003199 cfqq = kmem_cache_alloc_node(cfq_pool,
3200 gfp_mask | __GFP_ZERO,
3201 cfqd->queue->node);
Kiyoshi Ueda db3b5842005-06-17 16:15:10 +02003202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203
Jens Axboe6118b702009-06-30 09:34:12 +02003204 if (cfqq) {
3205 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
Tejun Heoabede6d2012-03-19 15:10:57 -07003206 cfq_init_prio_data(cfqq, cic);
Vivek Goyalcdb16e82009-12-03 12:59:38 -05003207 cfq_link_cfqq_cfqg(cfqq, cfqg);
Jens Axboe6118b702009-06-30 09:34:12 +02003208 cfq_log_cfqq(cfqd, cfqq, "alloced");
3209 } else
3210 cfqq = &cfqd->oom_cfqq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 }
3212
3213 if (new_cfqq)
3214 kmem_cache_free(cfq_pool, new_cfqq);
3215
Tejun Heo2a7f1242012-03-05 13:15:01 -08003216 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217 return cfqq;
3218}
3219
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003220static struct cfq_queue **
3221cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3222{
Jens Axboefe094d92008-01-31 13:08:54 +01003223 switch (ioprio_class) {
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003224 case IOPRIO_CLASS_RT:
3225 return &cfqd->async_cfqq[0][ioprio];
Tejun Heo598971b2012-03-19 15:10:58 -07003226 case IOPRIO_CLASS_NONE:
3227 ioprio = IOPRIO_NORM;
3228 /* fall through */
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003229 case IOPRIO_CLASS_BE:
3230 return &cfqd->async_cfqq[1][ioprio];
3231 case IOPRIO_CLASS_IDLE:
3232 return &cfqd->async_idle_cfqq;
3233 default:
3234 BUG();
3235 }
3236}
3237
Jens Axboe15c31be2007-07-10 13:43:25 +02003238static struct cfq_queue *
Tejun Heoabede6d2012-03-19 15:10:57 -07003239cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
Tejun Heo4f85cb92012-03-05 13:15:28 -08003240 struct bio *bio, gfp_t gfp_mask)
Jens Axboe15c31be2007-07-10 13:43:25 +02003241{
Tejun Heo598971b2012-03-19 15:10:58 -07003242 const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3243 const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003244 struct cfq_queue **async_cfqq = NULL;
Jens Axboe15c31be2007-07-10 13:43:25 +02003245 struct cfq_queue *cfqq = NULL;
3246
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003247 if (!is_sync) {
3248 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3249 cfqq = *async_cfqq;
3250 }
3251
Jens Axboe6118b702009-06-30 09:34:12 +02003252 if (!cfqq)
Tejun Heoabede6d2012-03-19 15:10:57 -07003253 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
Jens Axboe15c31be2007-07-10 13:43:25 +02003254
3255 /*
3256 * pin the queue now that it's allocated, scheduler exit will prune it
3257 */
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003258 if (!is_sync && !(*async_cfqq)) {
Shaohua Li30d7b942011-01-07 08:46:59 +01003259 cfqq->ref++;
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003260 *async_cfqq = cfqq;
Jens Axboe15c31be2007-07-10 13:43:25 +02003261 }
3262
Shaohua Li30d7b942011-01-07 08:46:59 +01003263 cfqq->ref++;
Jens Axboe15c31be2007-07-10 13:43:25 +02003264 return cfqq;
3265}
3266
Jens Axboe22e2c502005-06-27 10:55:12 +02003267static void
Shaohua Li383cd722011-07-12 14:24:35 +02003268__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
Jens Axboe22e2c502005-06-27 10:55:12 +02003269{
Shaohua Li383cd722011-07-12 14:24:35 +02003270 unsigned long elapsed = jiffies - ttime->last_end_request;
3271 elapsed = min(elapsed, 2UL * slice_idle);
Jens Axboe22e2c502005-06-27 10:55:12 +02003272
Shaohua Li383cd722011-07-12 14:24:35 +02003273 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3274 ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3275 ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3276}
3277
3278static void
3279cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Tejun Heoc5869802011-12-14 00:33:41 +01003280 struct cfq_io_cq *cic)
Shaohua Li383cd722011-07-12 14:24:35 +02003281{
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02003282 if (cfq_cfqq_sync(cfqq)) {
Shaohua Li383cd722011-07-12 14:24:35 +02003283 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02003284 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3285 cfqd->cfq_slice_idle);
3286 }
Shaohua Li7700fc42011-07-12 14:24:56 +02003287#ifdef CONFIG_CFQ_GROUP_IOSCHED
3288 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3289#endif
Jens Axboe22e2c502005-06-27 10:55:12 +02003290}
3291
Jens Axboe206dc692006-03-28 13:03:44 +02003292static void
Jeff Moyerb2c18e12009-10-23 17:14:49 -04003293cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Jens Axboe6d048f52007-04-25 12:44:27 +02003294 struct request *rq)
Jens Axboe206dc692006-03-28 13:03:44 +02003295{
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +01003296 sector_t sdist = 0;
Corrado Zoccolo41647e72010-02-27 19:45:40 +01003297 sector_t n_sec = blk_rq_sectors(rq);
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +01003298 if (cfqq->last_request_pos) {
3299 if (cfqq->last_request_pos < blk_rq_pos(rq))
3300 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3301 else
3302 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3303 }
Jens Axboe206dc692006-03-28 13:03:44 +02003304
Corrado Zoccolo3dde36d2010-02-27 19:45:39 +01003305 cfqq->seek_history <<= 1;
Corrado Zoccolo41647e72010-02-27 19:45:40 +01003306 if (blk_queue_nonrot(cfqd->queue))
3307 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3308 else
3309 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
Jens Axboe206dc692006-03-28 13:03:44 +02003310}
Jens Axboe22e2c502005-06-27 10:55:12 +02003311
3312/*
3313 * Disable idle window if the process thinks too long or seeks so much that
3314 * it doesn't matter
3315 */
3316static void
3317cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Tejun Heoc5869802011-12-14 00:33:41 +01003318 struct cfq_io_cq *cic)
Jens Axboe22e2c502005-06-27 10:55:12 +02003319{
Jens Axboe7b679132008-05-30 12:23:07 +02003320 int old_idle, enable_idle;
Jens Axboe1be92f22007-04-19 14:32:26 +02003321
Jens Axboe08717142008-01-28 11:38:15 +01003322 /*
3323 * Don't idle for async or idle io prio class
3324 */
3325 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
Jens Axboe1be92f22007-04-19 14:32:26 +02003326 return;
3327
Jens Axboec265a7f2008-06-26 13:49:33 +02003328 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003329
Corrado Zoccolo76280af2009-11-26 10:02:58 +01003330 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3331 cfq_mark_cfqq_deep(cfqq);
3332
Corrado Zoccolo749ef9f2010-09-20 15:24:50 +02003333 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3334 enable_idle = 0;
Tejun Heof6e8d012012-03-05 13:15:26 -08003335 else if (!atomic_read(&cic->icq.ioc->active_ref) ||
Tejun Heoc5869802011-12-14 00:33:41 +01003336 !cfqd->cfq_slice_idle ||
3337 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
Jens Axboe22e2c502005-06-27 10:55:12 +02003338 enable_idle = 0;
Shaohua Li383cd722011-07-12 14:24:35 +02003339 else if (sample_valid(cic->ttime.ttime_samples)) {
3340 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
Jens Axboe22e2c502005-06-27 10:55:12 +02003341 enable_idle = 0;
3342 else
3343 enable_idle = 1;
3344 }
3345
Jens Axboe7b679132008-05-30 12:23:07 +02003346 if (old_idle != enable_idle) {
3347 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3348 if (enable_idle)
3349 cfq_mark_cfqq_idle_window(cfqq);
3350 else
3351 cfq_clear_cfqq_idle_window(cfqq);
3352 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003353}
3354
Jens Axboe22e2c502005-06-27 10:55:12 +02003355/*
3356 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3357 * no or if we aren't sure, a 1 will cause a preempt.
3358 */
Jens Axboea6151c32009-10-07 20:02:57 +02003359static bool
Jens Axboe22e2c502005-06-27 10:55:12 +02003360cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
Jens Axboe5e705372006-07-13 12:39:25 +02003361 struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02003362{
Jens Axboe6d048f52007-04-25 12:44:27 +02003363 struct cfq_queue *cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02003364
Jens Axboe6d048f52007-04-25 12:44:27 +02003365 cfqq = cfqd->active_queue;
3366 if (!cfqq)
Jens Axboea6151c32009-10-07 20:02:57 +02003367 return false;
Jens Axboe22e2c502005-06-27 10:55:12 +02003368
Jens Axboe6d048f52007-04-25 12:44:27 +02003369 if (cfq_class_idle(new_cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003370 return false;
Jens Axboe22e2c502005-06-27 10:55:12 +02003371
3372 if (cfq_class_idle(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003373 return true;
Jens Axboe1e3335d2007-02-14 19:59:49 +01003374
Jens Axboe22e2c502005-06-27 10:55:12 +02003375 /*
Divyesh Shah875feb62010-01-06 18:58:20 -08003376 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3377 */
3378 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3379 return false;
3380
3381 /*
Jens Axboe374f84a2006-07-23 01:42:19 +02003382 * if the new request is sync, but the currently running queue is
3383 * not, let the sync request have priority.
3384 */
Jens Axboe5e705372006-07-13 12:39:25 +02003385 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003386 return true;
Jens Axboe1e3335d2007-02-14 19:59:49 +01003387
Vivek Goyal8682e1f2009-12-03 12:59:50 -05003388 if (new_cfqq->cfqg != cfqq->cfqg)
3389 return false;
3390
3391 if (cfq_slice_used(cfqq))
3392 return true;
3393
3394 /* Allow preemption only if we are idling on sync-noidle tree */
3395 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3396 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3397 new_cfqq->service_tree->count == 2 &&
3398 RB_EMPTY_ROOT(&cfqq->sort_list))
3399 return true;
3400
Jens Axboe374f84a2006-07-23 01:42:19 +02003401 /*
Jens Axboeb53d1ed2011-08-19 08:34:48 +02003402 * So both queues are sync. Let the new request get disk time if
3403 * it's a metadata request and the current queue is doing regular IO.
3404 */
Christoph Hellwig65299a32011-08-23 14:50:29 +02003405 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
Jens Axboeb53d1ed2011-08-19 08:34:48 +02003406 return true;
3407
3408 /*
Divyesh Shah3a9a3f62009-01-30 12:46:41 +01003409 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3410 */
3411 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003412 return true;
Divyesh Shah3a9a3f62009-01-30 12:46:41 +01003413
Shaohua Lid2d59e12010-11-08 15:01:03 +01003414 /* An idle queue should not be idle now for some reason */
3415 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3416 return true;
3417
Jens Axboe1e3335d2007-02-14 19:59:49 +01003418 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
Jens Axboea6151c32009-10-07 20:02:57 +02003419 return false;
Jens Axboe1e3335d2007-02-14 19:59:49 +01003420
3421 /*
3422 * if this request is as-good as one we would expect from the
3423 * current cfqq, let it preempt
3424 */
Shaohua Lie9ce3352010-03-19 08:03:04 +01003425 if (cfq_rq_close(cfqd, cfqq, rq))
Jens Axboea6151c32009-10-07 20:02:57 +02003426 return true;
Jens Axboe1e3335d2007-02-14 19:59:49 +01003427
Jens Axboea6151c32009-10-07 20:02:57 +02003428 return false;
Jens Axboe22e2c502005-06-27 10:55:12 +02003429}
3430
3431/*
3432 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3433 * let it have half of its nominal slice.
3434 */
3435static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3436{
Shaohua Lidf0793a2012-01-19 09:20:09 +01003437 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3438
Jens Axboe7b679132008-05-30 12:23:07 +02003439 cfq_log_cfqq(cfqd, cfqq, "preempt");
Shaohua Lidf0793a2012-01-19 09:20:09 +01003440 cfq_slice_expired(cfqd, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02003441
Jens Axboebf572252006-07-19 20:29:12 +02003442 /*
Shaohua Lif8ae6e32011-01-14 08:41:02 +01003443 * workload type is changed, don't save slice, otherwise preempt
3444 * doesn't happen
3445 */
Shaohua Lidf0793a2012-01-19 09:20:09 +01003446 if (old_type != cfqq_type(cfqq))
Shaohua Lif8ae6e32011-01-14 08:41:02 +01003447 cfqq->cfqg->saved_workload_slice = 0;
3448
3449 /*
Jens Axboebf572252006-07-19 20:29:12 +02003450 * Put the new queue at the front of the of the current list,
3451 * so we know that it will be selected next.
3452 */
3453 BUG_ON(!cfq_cfqq_on_rr(cfqq));
Jens Axboeedd75ff2007-04-19 12:03:34 +02003454
3455 cfq_service_tree_add(cfqd, cfqq, 1);
Justin TerAvesteda5e0c2011-03-22 21:26:49 +01003456
Justin TerAvest62a37f62011-03-23 08:25:44 +01003457 cfqq->slice_end = 0;
3458 cfq_mark_cfqq_slice_new(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003459}
3460
3461/*
Jens Axboe5e705372006-07-13 12:39:25 +02003462 * Called when a new fs request (rq) is added (to cfqq). Check if there's
Jens Axboe22e2c502005-06-27 10:55:12 +02003463 * something we should do about it
3464 */
3465static void
Jens Axboe5e705372006-07-13 12:39:25 +02003466cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3467 struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02003468{
Tejun Heoc5869802011-12-14 00:33:41 +01003469 struct cfq_io_cq *cic = RQ_CIC(rq);
Jens Axboe12e9fdd2006-06-01 10:09:56 +02003470
Aaron Carroll45333d52008-08-26 15:52:36 +02003471 cfqd->rq_queued++;
Christoph Hellwig65299a32011-08-23 14:50:29 +02003472 if (rq->cmd_flags & REQ_PRIO)
3473 cfqq->prio_pending++;
Jens Axboe374f84a2006-07-23 01:42:19 +02003474
Shaohua Li383cd722011-07-12 14:24:35 +02003475 cfq_update_io_thinktime(cfqd, cfqq, cic);
Jeff Moyerb2c18e12009-10-23 17:14:49 -04003476 cfq_update_io_seektime(cfqd, cfqq, rq);
Jens Axboe9c2c38a2005-08-24 14:57:54 +02003477 cfq_update_idle_window(cfqd, cfqq, cic);
3478
Jeff Moyerb2c18e12009-10-23 17:14:49 -04003479 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003480
3481 if (cfqq == cfqd->active_queue) {
3482 /*
Jens Axboeb0291952009-04-07 11:38:31 +02003483 * Remember that we saw a request from this process, but
3484 * don't start queuing just yet. Otherwise we risk seeing lots
3485 * of tiny requests, because we disrupt the normal plugging
Jens Axboed6ceb252009-04-14 14:18:16 +02003486 * and merging. If the request is already larger than a single
3487 * page, let it rip immediately. For that case we assume that
Jens Axboe2d870722009-04-15 12:12:46 +02003488 * merging is already done. Ditto for a busy system that
3489 * has other work pending, don't risk delaying until the
3490 * idle timer unplug to continue working.
Jens Axboe22e2c502005-06-27 10:55:12 +02003491 */
Jens Axboed6ceb252009-04-14 14:18:16 +02003492 if (cfq_cfqq_wait_request(cfqq)) {
Jens Axboe2d870722009-04-15 12:12:46 +02003493 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3494 cfqd->busy_queues > 1) {
Divyesh Shah812df482010-04-08 21:15:35 -07003495 cfq_del_timer(cfqd, cfqq);
Gui Jianfeng554554f2009-12-10 09:38:39 +01003496 cfq_clear_cfqq_wait_request(cfqq);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02003497 __blk_run_queue(cfqd->queue);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +02003498 } else {
Tejun Heo155fead2012-04-01 14:38:44 -07003499 cfqg_stats_update_idle_time(cfqq->cfqg);
Vivek Goyalbf7919372009-12-03 12:59:37 -05003500 cfq_mark_cfqq_must_dispatch(cfqq);
Divyesh Shaha11cdaa2010-04-13 19:59:17 +02003501 }
Jens Axboed6ceb252009-04-14 14:18:16 +02003502 }
Jens Axboe5e705372006-07-13 12:39:25 +02003503 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
Jens Axboe22e2c502005-06-27 10:55:12 +02003504 /*
3505 * not the active queue - expire current slice if it is
3506 * idle and has expired it's mean thinktime or this new queue
Divyesh Shah3a9a3f62009-01-30 12:46:41 +01003507 * has some old slice time left and is of higher priority or
3508 * this new queue is RT and the current one is BE
Jens Axboe22e2c502005-06-27 10:55:12 +02003509 */
3510 cfq_preempt_queue(cfqd, cfqq);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02003511 __blk_run_queue(cfqd->queue);
Jens Axboe22e2c502005-06-27 10:55:12 +02003512 }
3513}
3514
Jens Axboe165125e2007-07-24 09:28:11 +02003515static void cfq_insert_request(struct request_queue *q, struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02003516{
Jens Axboeb4878f22005-10-20 16:42:29 +02003517 struct cfq_data *cfqd = q->elevator->elevator_data;
Jens Axboe5e705372006-07-13 12:39:25 +02003518 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003519
Jens Axboe7b679132008-05-30 12:23:07 +02003520 cfq_log_cfqq(cfqd, cfqq, "insert_request");
Tejun Heoabede6d2012-03-19 15:10:57 -07003521 cfq_init_prio_data(cfqq, RQ_CIC(rq));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522
Jens Axboe30996f42009-10-05 11:03:39 +02003523 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
Jens Axboe22e2c502005-06-27 10:55:12 +02003524 list_add_tail(&rq->queuelist, &cfqq->fifo);
Corrado Zoccoloaa6f6a32009-10-26 22:44:33 +01003525 cfq_add_rq_rb(rq);
Tejun Heo155fead2012-04-01 14:38:44 -07003526 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
3527 rq->cmd_flags);
Jens Axboe5e705372006-07-13 12:39:25 +02003528 cfq_rq_enqueued(cfqd, cfqq, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529}
3530
Aaron Carroll45333d52008-08-26 15:52:36 +02003531/*
3532 * Update hw_tag based on peak queue depth over 50 samples under
3533 * sufficient load.
3534 */
3535static void cfq_update_hw_tag(struct cfq_data *cfqd)
3536{
Shaohua Li1a1238a2009-10-27 08:46:23 +01003537 struct cfq_queue *cfqq = cfqd->active_queue;
3538
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003539 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3540 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
Corrado Zoccoloe459dd02009-11-26 10:02:57 +01003541
3542 if (cfqd->hw_tag == 1)
3543 return;
Aaron Carroll45333d52008-08-26 15:52:36 +02003544
3545 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003546 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
Aaron Carroll45333d52008-08-26 15:52:36 +02003547 return;
3548
Shaohua Li1a1238a2009-10-27 08:46:23 +01003549 /*
3550 * If active queue hasn't enough requests and can idle, cfq might not
3551 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3552 * case
3553 */
3554 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3555 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003556 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
Shaohua Li1a1238a2009-10-27 08:46:23 +01003557 return;
3558
Aaron Carroll45333d52008-08-26 15:52:36 +02003559 if (cfqd->hw_tag_samples++ < 50)
3560 return;
3561
Corrado Zoccoloe459dd02009-11-26 10:02:57 +01003562 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
Aaron Carroll45333d52008-08-26 15:52:36 +02003563 cfqd->hw_tag = 1;
3564 else
3565 cfqd->hw_tag = 0;
Aaron Carroll45333d52008-08-26 15:52:36 +02003566}
3567
Vivek Goyal7667aa02009-12-08 17:52:58 -05003568static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3569{
Tejun Heoc5869802011-12-14 00:33:41 +01003570 struct cfq_io_cq *cic = cfqd->active_cic;
Vivek Goyal7667aa02009-12-08 17:52:58 -05003571
Justin TerAvest02a8f012011-02-09 14:20:03 +01003572 /* If the queue already has requests, don't wait */
3573 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3574 return false;
3575
Vivek Goyal7667aa02009-12-08 17:52:58 -05003576 /* If there are other queues in the group, don't wait */
3577 if (cfqq->cfqg->nr_cfqq > 1)
3578 return false;
3579
Shaohua Li7700fc42011-07-12 14:24:56 +02003580 /* the only queue in the group, but think time is big */
3581 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3582 return false;
3583
Vivek Goyal7667aa02009-12-08 17:52:58 -05003584 if (cfq_slice_used(cfqq))
3585 return true;
3586
3587 /* if slice left is less than think time, wait busy */
Shaohua Li383cd722011-07-12 14:24:35 +02003588 if (cic && sample_valid(cic->ttime.ttime_samples)
3589 && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
Vivek Goyal7667aa02009-12-08 17:52:58 -05003590 return true;
3591
3592 /*
3593 * If think times is less than a jiffy than ttime_mean=0 and above
3594 * will not be true. It might happen that slice has not expired yet
3595 * but will expire soon (4-5 ns) during select_queue(). To cover the
3596 * case where think time is less than a jiffy, mark the queue wait
3597 * busy if only 1 jiffy is left in the slice.
3598 */
3599 if (cfqq->slice_end - jiffies == 1)
3600 return true;
3601
3602 return false;
3603}
3604
Jens Axboe165125e2007-07-24 09:28:11 +02003605static void cfq_completed_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606{
Jens Axboe5e705372006-07-13 12:39:25 +02003607 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +02003608 struct cfq_data *cfqd = cfqq->cfqd;
Jens Axboe5380a102006-07-13 12:37:56 +02003609 const int sync = rq_is_sync(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +02003610 unsigned long now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611
Jens Axboeb4878f22005-10-20 16:42:29 +02003612 now = jiffies;
Christoph Hellwig33659eb2010-08-07 18:17:56 +02003613 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3614 !!(rq->cmd_flags & REQ_NOIDLE));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615
Aaron Carroll45333d52008-08-26 15:52:36 +02003616 cfq_update_hw_tag(cfqd);
3617
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003618 WARN_ON(!cfqd->rq_in_driver);
Jens Axboe6d048f52007-04-25 12:44:27 +02003619 WARN_ON(!cfqq->dispatched);
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003620 cfqd->rq_in_driver--;
Jens Axboe6d048f52007-04-25 12:44:27 +02003621 cfqq->dispatched--;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02003622 (RQ_CFQG(rq))->dispatched--;
Tejun Heo155fead2012-04-01 14:38:44 -07003623 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
3624 rq_io_start_time_ns(rq), rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003626 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
Jens Axboe3ed9a292007-04-23 08:33:33 +02003627
Vivek Goyal365722b2009-10-03 15:21:27 +02003628 if (sync) {
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02003629 struct cfq_rb_root *service_tree;
3630
Shaohua Li383cd722011-07-12 14:24:35 +02003631 RQ_CIC(rq)->ttime.last_end_request = now;
Shaohua Lif5f2b6c2011-07-12 14:24:55 +02003632
3633 if (cfq_cfqq_on_rr(cfqq))
3634 service_tree = cfqq->service_tree;
3635 else
3636 service_tree = service_tree_for(cfqq->cfqg,
3637 cfqq_prio(cfqq), cfqq_type(cfqq));
3638 service_tree->ttime.last_end_request = now;
Corrado Zoccolo573412b2009-12-06 11:48:52 +01003639 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3640 cfqd->last_delayed_sync = now;
Vivek Goyal365722b2009-10-03 15:21:27 +02003641 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02003642
Shaohua Li7700fc42011-07-12 14:24:56 +02003643#ifdef CONFIG_CFQ_GROUP_IOSCHED
3644 cfqq->cfqg->ttime.last_end_request = now;
3645#endif
3646
Jens Axboecaaa5f92006-06-16 11:23:00 +02003647 /*
3648 * If this is the active queue, check if it needs to be expired,
3649 * or if we want to idle in case it has no pending requests.
3650 */
3651 if (cfqd->active_queue == cfqq) {
Jens Axboea36e71f2009-04-15 12:15:11 +02003652 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3653
Jens Axboe44f7c162007-01-19 11:51:58 +11003654 if (cfq_cfqq_slice_new(cfqq)) {
3655 cfq_set_prio_slice(cfqd, cfqq);
3656 cfq_clear_cfqq_slice_new(cfqq);
3657 }
Vivek Goyalf75edf22009-12-03 12:59:53 -05003658
3659 /*
Vivek Goyal7667aa02009-12-08 17:52:58 -05003660 * Should we wait for next request to come in before we expire
3661 * the queue.
Vivek Goyalf75edf22009-12-03 12:59:53 -05003662 */
Vivek Goyal7667aa02009-12-08 17:52:58 -05003663 if (cfq_should_wait_busy(cfqd, cfqq)) {
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02003664 unsigned long extend_sl = cfqd->cfq_slice_idle;
3665 if (!cfqd->cfq_slice_idle)
3666 extend_sl = cfqd->cfq_group_idle;
3667 cfqq->slice_end = jiffies + extend_sl;
Vivek Goyalf75edf22009-12-03 12:59:53 -05003668 cfq_mark_cfqq_wait_busy(cfqq);
Divyesh Shahb1ffe732010-03-25 15:45:03 +01003669 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
Vivek Goyalf75edf22009-12-03 12:59:53 -05003670 }
3671
Jens Axboea36e71f2009-04-15 12:15:11 +02003672 /*
Corrado Zoccolo8e550632009-11-26 10:02:58 +01003673 * Idling is not enabled on:
3674 * - expired queues
3675 * - idle-priority queues
3676 * - async queues
3677 * - queues with still some requests queued
3678 * - when there is a close cooperator
Jens Axboea36e71f2009-04-15 12:15:11 +02003679 */
Jens Axboe08717142008-01-28 11:38:15 +01003680 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
Vivek Goyale5ff0822010-04-26 19:25:11 +02003681 cfq_slice_expired(cfqd, 1);
Corrado Zoccolo8e550632009-11-26 10:02:58 +01003682 else if (sync && cfqq_empty &&
3683 !cfq_close_cooperator(cfqd, cfqq)) {
Corrado Zoccolo749ef9f2010-09-20 15:24:50 +02003684 cfq_arm_slice_timer(cfqd);
Corrado Zoccolo8e550632009-11-26 10:02:58 +01003685 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02003686 }
Jens Axboe6d048f52007-04-25 12:44:27 +02003687
Corrado Zoccolo53c583d2010-02-28 19:45:05 +01003688 if (!cfqd->rq_in_driver)
Jens Axboe23e018a2009-10-05 08:52:35 +02003689 cfq_schedule_dispatch(cfqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690}
3691
Jens Axboe89850f72006-07-22 16:48:31 +02003692static inline int __cfq_may_queue(struct cfq_queue *cfqq)
Jens Axboe22e2c502005-06-27 10:55:12 +02003693{
Jens Axboe1b379d82009-08-11 08:26:11 +02003694 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
Jens Axboe3b181522005-06-27 10:56:24 +02003695 cfq_mark_cfqq_must_alloc_slice(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003696 return ELV_MQUEUE_MUST;
Jens Axboe3b181522005-06-27 10:56:24 +02003697 }
Jens Axboe22e2c502005-06-27 10:55:12 +02003698
3699 return ELV_MQUEUE_MAY;
Jens Axboe22e2c502005-06-27 10:55:12 +02003700}
3701
Jens Axboe165125e2007-07-24 09:28:11 +02003702static int cfq_may_queue(struct request_queue *q, int rw)
Jens Axboe22e2c502005-06-27 10:55:12 +02003703{
3704 struct cfq_data *cfqd = q->elevator->elevator_data;
3705 struct task_struct *tsk = current;
Tejun Heoc5869802011-12-14 00:33:41 +01003706 struct cfq_io_cq *cic;
Jens Axboe22e2c502005-06-27 10:55:12 +02003707 struct cfq_queue *cfqq;
3708
3709 /*
3710 * don't force setup of a queue from here, as a call to may_queue
3711 * does not necessarily imply that a request actually will be queued.
3712 * so just lookup a possibly existing queue, or return 'may queue'
3713 * if that fails
3714 */
Jens Axboe4ac845a2008-01-24 08:44:49 +01003715 cic = cfq_cic_lookup(cfqd, tsk->io_context);
Vasily Tarasov91fac312007-04-25 12:29:51 +02003716 if (!cic)
3717 return ELV_MQUEUE_MAY;
3718
Jens Axboeb0b78f82009-04-08 10:56:08 +02003719 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
Jens Axboe22e2c502005-06-27 10:55:12 +02003720 if (cfqq) {
Tejun Heoabede6d2012-03-19 15:10:57 -07003721 cfq_init_prio_data(cfqq, cic);
Jens Axboe22e2c502005-06-27 10:55:12 +02003722
Jens Axboe89850f72006-07-22 16:48:31 +02003723 return __cfq_may_queue(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003724 }
3725
3726 return ELV_MQUEUE_MAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727}
3728
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729/*
3730 * queue lock held here
3731 */
Jens Axboebb37b942006-12-01 10:42:33 +01003732static void cfq_put_request(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733{
Jens Axboe5e705372006-07-13 12:39:25 +02003734 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735
Jens Axboe5e705372006-07-13 12:39:25 +02003736 if (cfqq) {
Jens Axboe22e2c502005-06-27 10:55:12 +02003737 const int rw = rq_data_dir(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738
Jens Axboe22e2c502005-06-27 10:55:12 +02003739 BUG_ON(!cfqq->allocated[rw]);
3740 cfqq->allocated[rw]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +02003742 /* Put down rq reference on cfqg */
Tejun Heoeb7d8c072012-03-23 14:02:53 +01003743 cfqg_put(RQ_CFQG(rq));
Tejun Heoa612fdd2011-12-14 00:33:41 +01003744 rq->elv.priv[0] = NULL;
3745 rq->elv.priv[1] = NULL;
Vivek Goyal7f1dc8a2010-04-21 17:44:16 +02003746
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 cfq_put_queue(cfqq);
3748 }
3749}
3750
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003751static struct cfq_queue *
Tejun Heoc5869802011-12-14 00:33:41 +01003752cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003753 struct cfq_queue *cfqq)
3754{
3755 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3756 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
Jeff Moyerb3b6d042009-10-23 17:14:51 -04003757 cfq_mark_cfqq_coop(cfqq->new_cfqq);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003758 cfq_put_queue(cfqq);
3759 return cic_to_cfqq(cic, 1);
3760}
3761
Jeff Moyere6c5bc72009-10-23 17:14:52 -04003762/*
3763 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3764 * was the last process referring to said cfqq.
3765 */
3766static struct cfq_queue *
Tejun Heoc5869802011-12-14 00:33:41 +01003767split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
Jeff Moyere6c5bc72009-10-23 17:14:52 -04003768{
3769 if (cfqq_process_refs(cfqq) == 1) {
Jeff Moyere6c5bc72009-10-23 17:14:52 -04003770 cfqq->pid = current->pid;
3771 cfq_clear_cfqq_coop(cfqq);
Shaohua Liae54abe2010-02-05 13:11:45 +01003772 cfq_clear_cfqq_split_coop(cfqq);
Jeff Moyere6c5bc72009-10-23 17:14:52 -04003773 return cfqq;
3774 }
3775
3776 cic_set_cfqq(cic, NULL, 1);
Shaohua Lid02a2c02010-05-25 10:16:53 +02003777
3778 cfq_put_cooperator(cfqq);
3779
Jeff Moyere6c5bc72009-10-23 17:14:52 -04003780 cfq_put_queue(cfqq);
3781 return NULL;
3782}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783/*
Jens Axboe22e2c502005-06-27 10:55:12 +02003784 * Allocate cfq data structures associated with this request.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785 */
Jens Axboe22e2c502005-06-27 10:55:12 +02003786static int
Tejun Heo852c7882012-03-05 13:15:27 -08003787cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
3788 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789{
3790 struct cfq_data *cfqd = q->elevator->elevator_data;
Tejun Heof1f8cc92011-12-14 00:33:42 +01003791 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792 const int rw = rq_data_dir(rq);
Jens Axboea6151c32009-10-07 20:02:57 +02003793 const bool is_sync = rq_is_sync(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003794 struct cfq_queue *cfqq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795
3796 might_sleep_if(gfp_mask & __GFP_WAIT);
3797
Tejun Heo216284c2011-12-14 00:33:38 +01003798 spin_lock_irq(q->queue_lock);
Tejun Heof1f8cc92011-12-14 00:33:42 +01003799
Tejun Heo598971b2012-03-19 15:10:58 -07003800 check_ioprio_changed(cic, bio);
3801 check_blkcg_changed(cic, bio);
Jeff Moyere6c5bc72009-10-23 17:14:52 -04003802new_queue:
Vasily Tarasov91fac312007-04-25 12:29:51 +02003803 cfqq = cic_to_cfqq(cic, is_sync);
Vivek Goyal32f2e802009-07-09 22:13:16 +02003804 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
Tejun Heoabede6d2012-03-19 15:10:57 -07003805 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
Vasily Tarasov91fac312007-04-25 12:29:51 +02003806 cic_set_cfqq(cic, cfqq, is_sync);
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003807 } else {
3808 /*
Jeff Moyere6c5bc72009-10-23 17:14:52 -04003809 * If the queue was seeky for too long, break it apart.
3810 */
Shaohua Liae54abe2010-02-05 13:11:45 +01003811 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
Jeff Moyere6c5bc72009-10-23 17:14:52 -04003812 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3813 cfqq = split_cfqq(cic, cfqq);
3814 if (!cfqq)
3815 goto new_queue;
3816 }
3817
3818 /*
Jeff Moyerdf5fe3e2009-10-23 17:14:50 -04003819 * Check to see if this queue is scheduled to merge with
3820 * another, closely cooperating queue. The merging of
3821 * queues happens here as it must be done in process context.
3822 * The reference on new_cfqq was taken in merge_cfqqs.
3823 */
3824 if (cfqq->new_cfqq)
3825 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
Vasily Tarasov91fac312007-04-25 12:29:51 +02003826 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003827
3828 cfqq->allocated[rw]++;
Jens Axboe5e705372006-07-13 12:39:25 +02003829
Jens Axboe6fae9c22011-03-01 15:04:39 -05003830 cfqq->ref++;
Tejun Heoeb7d8c072012-03-23 14:02:53 +01003831 cfqg_get(cfqq->cfqg);
Tejun Heoa612fdd2011-12-14 00:33:41 +01003832 rq->elv.priv[0] = cfqq;
Tejun Heo1adaf3d2012-03-05 13:15:15 -08003833 rq->elv.priv[1] = cfqq->cfqg;
Tejun Heo216284c2011-12-14 00:33:38 +01003834 spin_unlock_irq(q->queue_lock);
Jens Axboe5e705372006-07-13 12:39:25 +02003835 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836}
3837
David Howells65f27f32006-11-22 14:55:48 +00003838static void cfq_kick_queue(struct work_struct *work)
Jens Axboe22e2c502005-06-27 10:55:12 +02003839{
David Howells65f27f32006-11-22 14:55:48 +00003840 struct cfq_data *cfqd =
Jens Axboe23e018a2009-10-05 08:52:35 +02003841 container_of(work, struct cfq_data, unplug_work);
Jens Axboe165125e2007-07-24 09:28:11 +02003842 struct request_queue *q = cfqd->queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02003843
Jens Axboe40bb54d2009-04-15 12:11:10 +02003844 spin_lock_irq(q->queue_lock);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02003845 __blk_run_queue(cfqd->queue);
Jens Axboe40bb54d2009-04-15 12:11:10 +02003846 spin_unlock_irq(q->queue_lock);
Jens Axboe22e2c502005-06-27 10:55:12 +02003847}
3848
3849/*
3850 * Timer running if the active_queue is currently idling inside its time slice
3851 */
3852static void cfq_idle_slice_timer(unsigned long data)
3853{
3854 struct cfq_data *cfqd = (struct cfq_data *) data;
3855 struct cfq_queue *cfqq;
3856 unsigned long flags;
Jens Axboe3c6bd2f2007-01-19 12:06:33 +11003857 int timed_out = 1;
Jens Axboe22e2c502005-06-27 10:55:12 +02003858
Jens Axboe7b679132008-05-30 12:23:07 +02003859 cfq_log(cfqd, "idle timer fired");
3860
Jens Axboe22e2c502005-06-27 10:55:12 +02003861 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3862
Jens Axboefe094d92008-01-31 13:08:54 +01003863 cfqq = cfqd->active_queue;
3864 if (cfqq) {
Jens Axboe3c6bd2f2007-01-19 12:06:33 +11003865 timed_out = 0;
3866
Jens Axboe22e2c502005-06-27 10:55:12 +02003867 /*
Jens Axboeb0291952009-04-07 11:38:31 +02003868 * We saw a request before the queue expired, let it through
3869 */
3870 if (cfq_cfqq_must_dispatch(cfqq))
3871 goto out_kick;
3872
3873 /*
Jens Axboe22e2c502005-06-27 10:55:12 +02003874 * expired
3875 */
Jens Axboe44f7c162007-01-19 11:51:58 +11003876 if (cfq_slice_used(cfqq))
Jens Axboe22e2c502005-06-27 10:55:12 +02003877 goto expire;
3878
3879 /*
3880 * only expire and reinvoke request handler, if there are
3881 * other queues with pending requests
3882 */
Jens Axboecaaa5f92006-06-16 11:23:00 +02003883 if (!cfqd->busy_queues)
Jens Axboe22e2c502005-06-27 10:55:12 +02003884 goto out_cont;
Jens Axboe22e2c502005-06-27 10:55:12 +02003885
3886 /*
3887 * not expired and it has a request pending, let it dispatch
3888 */
Jens Axboe75e50982009-04-07 08:56:14 +02003889 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
Jens Axboe22e2c502005-06-27 10:55:12 +02003890 goto out_kick;
Corrado Zoccolo76280af2009-11-26 10:02:58 +01003891
3892 /*
3893 * Queue depth flag is reset only when the idle didn't succeed
3894 */
3895 cfq_clear_cfqq_deep(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02003896 }
3897expire:
Vivek Goyale5ff0822010-04-26 19:25:11 +02003898 cfq_slice_expired(cfqd, timed_out);
Jens Axboe22e2c502005-06-27 10:55:12 +02003899out_kick:
Jens Axboe23e018a2009-10-05 08:52:35 +02003900 cfq_schedule_dispatch(cfqd);
Jens Axboe22e2c502005-06-27 10:55:12 +02003901out_cont:
3902 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3903}
3904
Jens Axboe3b181522005-06-27 10:56:24 +02003905static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3906{
3907 del_timer_sync(&cfqd->idle_slice_timer);
Jens Axboe23e018a2009-10-05 08:52:35 +02003908 cancel_work_sync(&cfqd->unplug_work);
Jens Axboe3b181522005-06-27 10:56:24 +02003909}
Jens Axboe22e2c502005-06-27 10:55:12 +02003910
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003911static void cfq_put_async_queues(struct cfq_data *cfqd)
3912{
3913 int i;
3914
3915 for (i = 0; i < IOPRIO_BE_NR; i++) {
3916 if (cfqd->async_cfqq[0][i])
3917 cfq_put_queue(cfqd->async_cfqq[0][i]);
3918 if (cfqd->async_cfqq[1][i])
3919 cfq_put_queue(cfqd->async_cfqq[1][i]);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003920 }
Oleg Nesterov2389d1e2007-11-05 08:58:05 +01003921
3922 if (cfqd->async_idle_cfqq)
3923 cfq_put_queue(cfqd->async_idle_cfqq);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003924}
3925
Jens Axboeb374d182008-10-31 10:05:07 +01003926static void cfq_exit_queue(struct elevator_queue *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003927{
Jens Axboe22e2c502005-06-27 10:55:12 +02003928 struct cfq_data *cfqd = e->elevator_data;
Jens Axboe165125e2007-07-24 09:28:11 +02003929 struct request_queue *q = cfqd->queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02003930
Jens Axboe3b181522005-06-27 10:56:24 +02003931 cfq_shutdown_timer_wq(cfqd);
Jens Axboee2d74ac2006-03-28 08:59:01 +02003932
Al Virod9ff4182006-03-18 13:51:22 -05003933 spin_lock_irq(q->queue_lock);
Jens Axboee2d74ac2006-03-28 08:59:01 +02003934
Al Virod9ff4182006-03-18 13:51:22 -05003935 if (cfqd->active_queue)
Vivek Goyale5ff0822010-04-26 19:25:11 +02003936 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
Jens Axboee2d74ac2006-03-28 08:59:01 +02003937
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02003938 cfq_put_async_queues(cfqd);
Tejun Heo03aa2642012-03-05 13:15:19 -08003939
3940 spin_unlock_irq(q->queue_lock);
3941
Al Viroa90d7422006-03-18 12:05:37 -05003942 cfq_shutdown_timer_wq(cfqd);
3943
Tejun Heof51b8022012-03-05 13:15:05 -08003944#ifndef CONFIG_CFQ_GROUP_IOSCHED
3945 kfree(cfqd->root_group);
Vivek Goyal2abae552011-05-23 10:02:19 +02003946#endif
Tejun Heo3c798392012-04-16 13:57:25 -07003947 blkcg_deactivate_policy(q, &blkcg_policy_cfq);
Vivek Goyal56edf7d2011-05-19 15:38:22 -04003948 kfree(cfqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949}
3950
Tejun Heob2fab5a2012-03-05 13:14:57 -08003951static int cfq_init_queue(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952{
3953 struct cfq_data *cfqd;
Tejun Heo3c798392012-04-16 13:57:25 -07003954 struct blkcg_gq *blkg __maybe_unused;
Tejun Heoa2b16932012-04-13 13:11:33 -07003955 int i, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956
Christoph Lameter94f60302007-07-17 04:03:29 -07003957 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
Tejun Heoa73f7302011-12-14 00:33:37 +01003958 if (!cfqd)
Tejun Heob2fab5a2012-03-05 13:14:57 -08003959 return -ENOMEM;
Konstantin Khlebnikov80b15c72010-05-20 23:21:41 +04003960
Tejun Heof51b8022012-03-05 13:15:05 -08003961 cfqd->queue = q;
3962 q->elevator->elevator_data = cfqd;
3963
Vivek Goyal1fa8f6d2009-12-03 12:59:41 -05003964 /* Init root service tree */
3965 cfqd->grp_service_tree = CFQ_RB_ROOT;
3966
Tejun Heof51b8022012-03-05 13:15:05 -08003967 /* Init root group and prefer root group over other groups by default */
Vivek Goyal25fb5162009-12-03 12:59:46 -05003968#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07003969 ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
Tejun Heoa2b16932012-04-13 13:11:33 -07003970 if (ret)
3971 goto out_free;
Vivek Goyal5624a4e2011-05-19 15:38:28 -04003972
Tejun Heoa2b16932012-04-13 13:11:33 -07003973 cfqd->root_group = blkg_to_cfqg(q->root_blkg);
Tejun Heof51b8022012-03-05 13:15:05 -08003974#else
Tejun Heoa2b16932012-04-13 13:11:33 -07003975 ret = -ENOMEM;
Tejun Heof51b8022012-03-05 13:15:05 -08003976 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
3977 GFP_KERNEL, cfqd->queue->node);
Tejun Heoa2b16932012-04-13 13:11:33 -07003978 if (!cfqd->root_group)
3979 goto out_free;
Vivek Goyal5624a4e2011-05-19 15:38:28 -04003980
Tejun Heoa2b16932012-04-13 13:11:33 -07003981 cfq_init_cfqg_base(cfqd->root_group);
3982#endif
Tejun Heo3381cb82012-04-01 14:38:44 -07003983 cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
Vivek Goyal5624a4e2011-05-19 15:38:28 -04003984
Jens Axboe26a2ac02009-04-23 12:13:27 +02003985 /*
3986 * Not strictly needed (since RB_ROOT just clears the node and we
3987 * zeroed cfqd on alloc), but better be safe in case someone decides
3988 * to add magic to the rb code
3989 */
3990 for (i = 0; i < CFQ_PRIO_LISTS; i++)
3991 cfqd->prio_trees[i] = RB_ROOT;
3992
Jens Axboe6118b702009-06-30 09:34:12 +02003993 /*
3994 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3995 * Grab a permanent reference to it, so that the normal code flow
Tejun Heof51b8022012-03-05 13:15:05 -08003996 * will not attempt to free it. oom_cfqq is linked to root_group
3997 * but shouldn't hold a reference as it'll never be unlinked. Lose
3998 * the reference from linking right away.
Jens Axboe6118b702009-06-30 09:34:12 +02003999 */
4000 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
Shaohua Li30d7b942011-01-07 08:46:59 +01004001 cfqd->oom_cfqq.ref++;
Tejun Heo1adaf3d2012-03-05 13:15:15 -08004002
4003 spin_lock_irq(q->queue_lock);
Tejun Heof51b8022012-03-05 13:15:05 -08004004 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
Tejun Heoeb7d8c072012-03-23 14:02:53 +01004005 cfqg_put(cfqd->root_group);
Tejun Heo1adaf3d2012-03-05 13:15:15 -08004006 spin_unlock_irq(q->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007
Jens Axboe22e2c502005-06-27 10:55:12 +02004008 init_timer(&cfqd->idle_slice_timer);
4009 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4010 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4011
Jens Axboe23e018a2009-10-05 08:52:35 +02004012 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
Jens Axboe22e2c502005-06-27 10:55:12 +02004013
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014 cfqd->cfq_quantum = cfq_quantum;
Jens Axboe22e2c502005-06-27 10:55:12 +02004015 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4016 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017 cfqd->cfq_back_max = cfq_back_max;
4018 cfqd->cfq_back_penalty = cfq_back_penalty;
Jens Axboe22e2c502005-06-27 10:55:12 +02004019 cfqd->cfq_slice[0] = cfq_slice_async;
4020 cfqd->cfq_slice[1] = cfq_slice_sync;
4021 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4022 cfqd->cfq_slice_idle = cfq_slice_idle;
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004023 cfqd->cfq_group_idle = cfq_group_idle;
Jens Axboe963b72f2009-10-03 19:42:18 +02004024 cfqd->cfq_latency = 1;
Corrado Zoccoloe459dd02009-11-26 10:02:57 +01004025 cfqd->hw_tag = -1;
Corrado Zoccoloedc71132009-12-09 20:56:04 +01004026 /*
4027 * we optimistically start assuming sync ops weren't delayed in last
4028 * second, in order to have larger depth for async operations.
4029 */
Corrado Zoccolo573412b2009-12-06 11:48:52 +01004030 cfqd->last_delayed_sync = jiffies - HZ;
Tejun Heob2fab5a2012-03-05 13:14:57 -08004031 return 0;
Tejun Heoa2b16932012-04-13 13:11:33 -07004032
4033out_free:
4034 kfree(cfqd);
4035 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036}
4037
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038/*
4039 * sysfs parts below -->
4040 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041static ssize_t
4042cfq_var_show(unsigned int var, char *page)
4043{
4044 return sprintf(page, "%d\n", var);
4045}
4046
4047static ssize_t
4048cfq_var_store(unsigned int *var, const char *page, size_t count)
4049{
4050 char *p = (char *) page;
4051
4052 *var = simple_strtoul(p, &p, 10);
4053 return count;
4054}
4055
Linus Torvalds1da177e2005-04-16 15:20:36 -07004056#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
Jens Axboeb374d182008-10-31 10:05:07 +01004057static ssize_t __FUNC(struct elevator_queue *e, char *page) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058{ \
Al Viro3d1ab402006-03-18 18:35:43 -05004059 struct cfq_data *cfqd = e->elevator_data; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060 unsigned int __data = __VAR; \
4061 if (__CONV) \
4062 __data = jiffies_to_msecs(__data); \
4063 return cfq_var_show(__data, (page)); \
4064}
4065SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02004066SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4067SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
Al Viroe572ec72006-03-18 22:27:18 -05004068SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4069SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02004070SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004071SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02004072SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4073SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4074SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
Jens Axboe963b72f2009-10-03 19:42:18 +02004075SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076#undef SHOW_FUNCTION
4077
4078#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
Jens Axboeb374d182008-10-31 10:05:07 +01004079static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080{ \
Al Viro3d1ab402006-03-18 18:35:43 -05004081 struct cfq_data *cfqd = e->elevator_data; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082 unsigned int __data; \
4083 int ret = cfq_var_store(&__data, (page), count); \
4084 if (__data < (MIN)) \
4085 __data = (MIN); \
4086 else if (__data > (MAX)) \
4087 __data = (MAX); \
4088 if (__CONV) \
4089 *(__PTR) = msecs_to_jiffies(__data); \
4090 else \
4091 *(__PTR) = __data; \
4092 return ret; \
4093}
4094STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
Jens Axboefe094d92008-01-31 13:08:54 +01004095STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4096 UINT_MAX, 1);
4097STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4098 UINT_MAX, 1);
Al Viroe572ec72006-03-18 22:27:18 -05004099STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
Jens Axboefe094d92008-01-31 13:08:54 +01004100STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4101 UINT_MAX, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02004102STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004103STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02004104STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4105STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
Jens Axboefe094d92008-01-31 13:08:54 +01004106STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4107 UINT_MAX, 0);
Jens Axboe963b72f2009-10-03 19:42:18 +02004108STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109#undef STORE_FUNCTION
4110
Al Viroe572ec72006-03-18 22:27:18 -05004111#define CFQ_ATTR(name) \
4112 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
Jens Axboe3b181522005-06-27 10:56:24 +02004113
Al Viroe572ec72006-03-18 22:27:18 -05004114static struct elv_fs_entry cfq_attrs[] = {
4115 CFQ_ATTR(quantum),
Al Viroe572ec72006-03-18 22:27:18 -05004116 CFQ_ATTR(fifo_expire_sync),
4117 CFQ_ATTR(fifo_expire_async),
4118 CFQ_ATTR(back_seek_max),
4119 CFQ_ATTR(back_seek_penalty),
4120 CFQ_ATTR(slice_sync),
4121 CFQ_ATTR(slice_async),
4122 CFQ_ATTR(slice_async_rq),
4123 CFQ_ATTR(slice_idle),
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004124 CFQ_ATTR(group_idle),
Jens Axboe963b72f2009-10-03 19:42:18 +02004125 CFQ_ATTR(low_latency),
Al Viroe572ec72006-03-18 22:27:18 -05004126 __ATTR_NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127};
4128
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129static struct elevator_type iosched_cfq = {
4130 .ops = {
4131 .elevator_merge_fn = cfq_merge,
4132 .elevator_merged_fn = cfq_merged_request,
4133 .elevator_merge_req_fn = cfq_merged_requests,
Jens Axboeda775262006-12-20 11:04:12 +01004134 .elevator_allow_merge_fn = cfq_allow_merge,
Divyesh Shah812d4022010-04-08 21:14:23 -07004135 .elevator_bio_merged_fn = cfq_bio_merged,
Jens Axboeb4878f22005-10-20 16:42:29 +02004136 .elevator_dispatch_fn = cfq_dispatch_requests,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137 .elevator_add_req_fn = cfq_insert_request,
Jens Axboeb4878f22005-10-20 16:42:29 +02004138 .elevator_activate_req_fn = cfq_activate_request,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139 .elevator_deactivate_req_fn = cfq_deactivate_request,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140 .elevator_completed_req_fn = cfq_completed_request,
Jens Axboe21183b02006-07-13 12:33:14 +02004141 .elevator_former_req_fn = elv_rb_former_request,
4142 .elevator_latter_req_fn = elv_rb_latter_request,
Tejun Heo9b84cac2011-12-14 00:33:42 +01004143 .elevator_init_icq_fn = cfq_init_icq,
Tejun Heo7e5a8792011-12-14 00:33:42 +01004144 .elevator_exit_icq_fn = cfq_exit_icq,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145 .elevator_set_req_fn = cfq_set_request,
4146 .elevator_put_req_fn = cfq_put_request,
4147 .elevator_may_queue_fn = cfq_may_queue,
4148 .elevator_init_fn = cfq_init_queue,
4149 .elevator_exit_fn = cfq_exit_queue,
4150 },
Tejun Heo3d3c2372011-12-14 00:33:42 +01004151 .icq_size = sizeof(struct cfq_io_cq),
4152 .icq_align = __alignof__(struct cfq_io_cq),
Al Viro3d1ab402006-03-18 18:35:43 -05004153 .elevator_attrs = cfq_attrs,
Tejun Heo3d3c2372011-12-14 00:33:42 +01004154 .elevator_name = "cfq",
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155 .elevator_owner = THIS_MODULE,
4156};
4157
Vivek Goyal3e252062009-12-04 10:36:42 -05004158#ifdef CONFIG_CFQ_GROUP_IOSCHED
Tejun Heo3c798392012-04-16 13:57:25 -07004159static struct blkcg_policy blkcg_policy_cfq = {
Vivek Goyal3e252062009-12-04 10:36:42 -05004160 .ops = {
Tejun Heo3c798392012-04-16 13:57:25 -07004161 .pd_init_fn = cfq_pd_init,
4162 .pd_reset_stats_fn = cfq_pd_reset_stats,
Vivek Goyal3e252062009-12-04 10:36:42 -05004163 },
Tejun Heo03814112012-03-05 13:15:14 -08004164 .pdata_size = sizeof(struct cfq_group),
Tejun Heo60c2bc22012-04-01 14:38:43 -07004165 .cftypes = cfq_blkcg_files,
Vivek Goyal3e252062009-12-04 10:36:42 -05004166};
Vivek Goyal3e252062009-12-04 10:36:42 -05004167#endif
4168
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169static int __init cfq_init(void)
4170{
Tejun Heo3d3c2372011-12-14 00:33:42 +01004171 int ret;
4172
Jens Axboe22e2c502005-06-27 10:55:12 +02004173 /*
4174 * could be 0 on HZ < 1000 setups
4175 */
4176 if (!cfq_slice_async)
4177 cfq_slice_async = 1;
4178 if (!cfq_slice_idle)
4179 cfq_slice_idle = 1;
4180
Vivek Goyal80bdf0c2010-08-23 12:24:26 +02004181#ifdef CONFIG_CFQ_GROUP_IOSCHED
4182 if (!cfq_group_idle)
4183 cfq_group_idle = 1;
4184#else
4185 cfq_group_idle = 0;
4186#endif
Tejun Heo8bd435b2012-04-13 13:11:28 -07004187
Tejun Heo3c798392012-04-16 13:57:25 -07004188 ret = blkcg_policy_register(&blkcg_policy_cfq);
Tejun Heo8bd435b2012-04-13 13:11:28 -07004189 if (ret)
4190 return ret;
4191
Tejun Heo3d3c2372011-12-14 00:33:42 +01004192 cfq_pool = KMEM_CACHE(cfq_queue, 0);
4193 if (!cfq_pool)
Tejun Heo8bd435b2012-04-13 13:11:28 -07004194 goto err_pol_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195
Tejun Heo3d3c2372011-12-14 00:33:42 +01004196 ret = elv_register(&iosched_cfq);
Tejun Heo8bd435b2012-04-13 13:11:28 -07004197 if (ret)
4198 goto err_free_pool;
Tejun Heo3d3c2372011-12-14 00:33:42 +01004199
Adrian Bunk2fdd82b2007-12-12 18:51:56 +01004200 return 0;
Tejun Heo8bd435b2012-04-13 13:11:28 -07004201
4202err_free_pool:
4203 kmem_cache_destroy(cfq_pool);
4204err_pol_unreg:
Tejun Heo3c798392012-04-16 13:57:25 -07004205 blkcg_policy_unregister(&blkcg_policy_cfq);
Tejun Heo8bd435b2012-04-13 13:11:28 -07004206 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207}
4208
4209static void __exit cfq_exit(void)
4210{
Tejun Heo3c798392012-04-16 13:57:25 -07004211 blkcg_policy_unregister(&blkcg_policy_cfq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 elv_unregister(&iosched_cfq);
Tejun Heo3d3c2372011-12-14 00:33:42 +01004213 kmem_cache_destroy(cfq_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214}
4215
4216module_init(cfq_init);
4217module_exit(cfq_exit);
4218
4219MODULE_AUTHOR("Jens Axboe");
4220MODULE_LICENSE("GPL");
4221MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");