blob: 56e9d8503cf1ddc5d67113820b9ac1c069b701e7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
Jens Axboe0fe23472006-09-04 15:41:16 +02007 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/module.h>
Al Viro1cc9be62006-03-18 12:29:52 -050010#include <linux/blkdev.h>
11#include <linux/elevator.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/rbtree.h>
Jens Axboe22e2c502005-06-27 10:55:12 +020013#include <linux/ioprio.h>
Jens Axboe7b679132008-05-30 12:23:07 +020014#include <linux/blktrace_api.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16/*
17 * tunables
18 */
Jens Axboefe094d92008-01-31 13:08:54 +010019/* max queue in one round of service */
20static const int cfq_quantum = 4;
Arjan van de Ven64100092006-01-06 09:46:02 +010021static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
Jens Axboefe094d92008-01-31 13:08:54 +010022/* maximum backwards seek, in KiB */
23static const int cfq_back_max = 16 * 1024;
24/* penalty of a backwards seek */
25static const int cfq_back_penalty = 2;
Arjan van de Ven64100092006-01-06 09:46:02 +010026static const int cfq_slice_sync = HZ / 10;
Jens Axboe3b181522005-06-27 10:56:24 +020027static int cfq_slice_async = HZ / 25;
Arjan van de Ven64100092006-01-06 09:46:02 +010028static const int cfq_slice_async_rq = 2;
Jens Axboecaaa5f92006-06-16 11:23:00 +020029static int cfq_slice_idle = HZ / 125;
Jens Axboe22e2c502005-06-27 10:55:12 +020030
Jens Axboed9e76202007-04-20 14:27:50 +020031/*
Jens Axboe08717142008-01-28 11:38:15 +010032 * offset from end of service tree
Jens Axboed9e76202007-04-20 14:27:50 +020033 */
Jens Axboe08717142008-01-28 11:38:15 +010034#define CFQ_IDLE_DELAY (HZ / 5)
Jens Axboed9e76202007-04-20 14:27:50 +020035
36/*
37 * below this threshold, we consider thinktime immediate
38 */
39#define CFQ_MIN_TT (2)
40
Jens Axboe22e2c502005-06-27 10:55:12 +020041#define CFQ_SLICE_SCALE (5)
Aaron Carroll45333d52008-08-26 15:52:36 +020042#define CFQ_HW_QUEUE_MIN (5)
Jens Axboe22e2c502005-06-27 10:55:12 +020043
Jens Axboefe094d92008-01-31 13:08:54 +010044#define RQ_CIC(rq) \
45 ((struct cfq_io_context *) (rq)->elevator_private)
Jens Axboe7b679132008-05-30 12:23:07 +020046#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Christoph Lametere18b8902006-12-06 20:33:20 -080048static struct kmem_cache *cfq_pool;
49static struct kmem_cache *cfq_ioc_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Jens Axboe4050cf12006-07-19 05:07:12 +020051static DEFINE_PER_CPU(unsigned long, ioc_count);
Al Viro334e94d2006-03-18 15:05:53 -050052static struct completion *ioc_gone;
Jens Axboe9a11b4e2008-05-29 09:32:08 +020053static DEFINE_SPINLOCK(ioc_gone_lock);
Al Viro334e94d2006-03-18 15:05:53 -050054
Jens Axboe22e2c502005-06-27 10:55:12 +020055#define CFQ_PRIO_LISTS IOPRIO_BE_NR
56#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
Jens Axboe22e2c502005-06-27 10:55:12 +020057#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
58
Jens Axboe206dc692006-03-28 13:03:44 +020059#define sample_valid(samples) ((samples) > 80)
60
Jens Axboe22e2c502005-06-27 10:55:12 +020061/*
Jens Axboecc09e292007-04-26 12:53:50 +020062 * Most of our rbtree usage is for sorting with min extraction, so
63 * if we cache the leftmost node we don't have to walk down the tree
64 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
65 * move this into the elevator for the rq sorting as well.
66 */
67struct cfq_rb_root {
68 struct rb_root rb;
69 struct rb_node *left;
70};
71#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
72
73/*
Jens Axboe22e2c502005-06-27 10:55:12 +020074 * Per block device queue structure
75 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070076struct cfq_data {
Jens Axboe165125e2007-07-24 09:28:11 +020077 struct request_queue *queue;
Jens Axboe22e2c502005-06-27 10:55:12 +020078
79 /*
80 * rr list of queues with requests and the count of them
81 */
Jens Axboecc09e292007-04-26 12:53:50 +020082 struct cfq_rb_root service_tree;
Jens Axboe22e2c502005-06-27 10:55:12 +020083 unsigned int busy_queues;
Divyesh Shah3a9a3f62009-01-30 12:46:41 +010084 /*
85 * Used to track any pending rt requests so we can pre-empt current
86 * non-RT cfqq in service when this value is non-zero.
87 */
88 unsigned int busy_rt_queues;
Jens Axboe22e2c502005-06-27 10:55:12 +020089
Jens Axboe22e2c502005-06-27 10:55:12 +020090 int rq_in_driver;
Jens Axboe3ed9a292007-04-23 08:33:33 +020091 int sync_flight;
Aaron Carroll45333d52008-08-26 15:52:36 +020092
93 /*
94 * queue-depth detection
95 */
96 int rq_queued;
Jens Axboe25776e32006-06-01 10:12:26 +020097 int hw_tag;
Aaron Carroll45333d52008-08-26 15:52:36 +020098 int hw_tag_samples;
99 int rq_in_driver_peak;
Jens Axboe22e2c502005-06-27 10:55:12 +0200100
101 /*
Jens Axboe22e2c502005-06-27 10:55:12 +0200102 * idle window management
103 */
104 struct timer_list idle_slice_timer;
105 struct work_struct unplug_work;
106
107 struct cfq_queue *active_queue;
108 struct cfq_io_context *active_cic;
Jens Axboe22e2c502005-06-27 10:55:12 +0200109
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +0200110 /*
111 * async queue for each priority case
112 */
113 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
114 struct cfq_queue *async_idle_cfqq;
Jens Axboe15c31be2007-07-10 13:43:25 +0200115
Jens Axboe6d048f52007-04-25 12:44:27 +0200116 sector_t last_position;
Jens Axboe22e2c502005-06-27 10:55:12 +0200117 unsigned long last_end_request;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 /*
120 * tunables, see top of file
121 */
122 unsigned int cfq_quantum;
Jens Axboe22e2c502005-06-27 10:55:12 +0200123 unsigned int cfq_fifo_expire[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 unsigned int cfq_back_penalty;
125 unsigned int cfq_back_max;
Jens Axboe22e2c502005-06-27 10:55:12 +0200126 unsigned int cfq_slice[2];
127 unsigned int cfq_slice_async_rq;
128 unsigned int cfq_slice_idle;
Al Virod9ff4182006-03-18 13:51:22 -0500129
130 struct list_head cic_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131};
132
Jens Axboe22e2c502005-06-27 10:55:12 +0200133/*
134 * Per process-grouping structure
135 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136struct cfq_queue {
137 /* reference count */
138 atomic_t ref;
Richard Kennedybe754d22008-05-23 06:52:00 +0200139 /* various state flags, see below */
140 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 /* parent cfq_data */
142 struct cfq_data *cfqd;
Jens Axboed9e76202007-04-20 14:27:50 +0200143 /* service_tree member */
144 struct rb_node rb_node;
145 /* service_tree key */
146 unsigned long rb_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 /* sorted list of pending requests */
148 struct rb_root sort_list;
149 /* if fifo isn't expired, next request to serve */
Jens Axboe5e705372006-07-13 12:39:25 +0200150 struct request *next_rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 /* requests queued in sort_list */
152 int queued[2];
153 /* currently allocated requests */
154 int allocated[2];
155 /* fifo list of requests in sort_list */
Jens Axboe22e2c502005-06-27 10:55:12 +0200156 struct list_head fifo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Jens Axboe22e2c502005-06-27 10:55:12 +0200158 unsigned long slice_end;
Jens Axboec5b680f2007-01-19 11:56:49 +1100159 long slice_resid;
Jens Axboe2f5cb732009-04-07 08:51:19 +0200160 unsigned int slice_dispatch;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Richard Kennedybe754d22008-05-23 06:52:00 +0200162 /* pending metadata requests */
163 int meta_pending;
Jens Axboe6d048f52007-04-25 12:44:27 +0200164 /* number of requests that are on the dispatch list or inside driver */
165 int dispatched;
Jens Axboe22e2c502005-06-27 10:55:12 +0200166
167 /* io prio of this group */
168 unsigned short ioprio, org_ioprio;
169 unsigned short ioprio_class, org_ioprio_class;
170
Jens Axboe7b679132008-05-30 12:23:07 +0200171 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172};
173
Jens Axboe3b181522005-06-27 10:56:24 +0200174enum cfqq_state_flags {
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100175 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
176 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
Jens Axboeb0291952009-04-07 11:38:31 +0200177 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100178 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
179 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
Jens Axboeb0b8d7492007-01-19 11:35:30 +1100180 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
181 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
182 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
Jens Axboe44f7c162007-01-19 11:51:58 +1100183 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
Vasily Tarasov91fac312007-04-25 12:29:51 +0200184 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
Jens Axboe3b181522005-06-27 10:56:24 +0200185};
186
187#define CFQ_CFQQ_FNS(name) \
188static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
189{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100190 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
Jens Axboe3b181522005-06-27 10:56:24 +0200191} \
192static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
193{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100194 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
Jens Axboe3b181522005-06-27 10:56:24 +0200195} \
196static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
197{ \
Jens Axboefe094d92008-01-31 13:08:54 +0100198 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
Jens Axboe3b181522005-06-27 10:56:24 +0200199}
200
201CFQ_CFQQ_FNS(on_rr);
202CFQ_CFQQ_FNS(wait_request);
Jens Axboeb0291952009-04-07 11:38:31 +0200203CFQ_CFQQ_FNS(must_dispatch);
Jens Axboe3b181522005-06-27 10:56:24 +0200204CFQ_CFQQ_FNS(must_alloc);
205CFQ_CFQQ_FNS(must_alloc_slice);
Jens Axboe3b181522005-06-27 10:56:24 +0200206CFQ_CFQQ_FNS(fifo_expire);
207CFQ_CFQQ_FNS(idle_window);
208CFQ_CFQQ_FNS(prio_changed);
Jens Axboe44f7c162007-01-19 11:51:58 +1100209CFQ_CFQQ_FNS(slice_new);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200210CFQ_CFQQ_FNS(sync);
Jens Axboe3b181522005-06-27 10:56:24 +0200211#undef CFQ_CFQQ_FNS
212
Jens Axboe7b679132008-05-30 12:23:07 +0200213#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
214 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
215#define cfq_log(cfqd, fmt, args...) \
216 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
217
Jens Axboe165125e2007-07-24 09:28:11 +0200218static void cfq_dispatch_insert(struct request_queue *, struct request *);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200219static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
Jens Axboefd0928d2008-01-24 08:52:45 +0100220 struct io_context *, gfp_t);
Jens Axboe4ac845a2008-01-24 08:44:49 +0100221static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
Vasily Tarasov91fac312007-04-25 12:29:51 +0200222 struct io_context *);
223
224static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
225 int is_sync)
226{
227 return cic->cfqq[!!is_sync];
228}
229
230static inline void cic_set_cfqq(struct cfq_io_context *cic,
231 struct cfq_queue *cfqq, int is_sync)
232{
233 cic->cfqq[!!is_sync] = cfqq;
234}
235
236/*
237 * We regard a request as SYNC, if it's either a read or has the SYNC bit
238 * set (in which case it could also be direct WRITE).
239 */
240static inline int cfq_bio_sync(struct bio *bio)
241{
242 if (bio_data_dir(bio) == READ || bio_sync(bio))
243 return 1;
244
245 return 0;
246}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248/*
Andrew Morton99f95e52005-06-27 20:14:05 -0700249 * scheduler run of queue, if there are requests pending and no one in the
250 * driver that will restart queueing
251 */
252static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
253{
Jens Axboe7b679132008-05-30 12:23:07 +0200254 if (cfqd->busy_queues) {
255 cfq_log(cfqd, "schedule dispatch");
Jens Axboe18887ad2008-07-28 13:08:45 +0200256 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
Jens Axboe7b679132008-05-30 12:23:07 +0200257 }
Andrew Morton99f95e52005-06-27 20:14:05 -0700258}
259
Jens Axboe165125e2007-07-24 09:28:11 +0200260static int cfq_queue_empty(struct request_queue *q)
Andrew Morton99f95e52005-06-27 20:14:05 -0700261{
262 struct cfq_data *cfqd = q->elevator->elevator_data;
263
Jens Axboeb4878f22005-10-20 16:42:29 +0200264 return !cfqd->busy_queues;
Andrew Morton99f95e52005-06-27 20:14:05 -0700265}
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267/*
Jens Axboe44f7c162007-01-19 11:51:58 +1100268 * Scale schedule slice based on io priority. Use the sync time slice only
269 * if a queue is marked sync and has sync io queued. A sync queue with async
270 * io only, should not get full sync slice length.
271 */
Jens Axboed9e76202007-04-20 14:27:50 +0200272static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
273 unsigned short prio)
274{
275 const int base_slice = cfqd->cfq_slice[sync];
276
277 WARN_ON(prio >= IOPRIO_BE_NR);
278
279 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
280}
281
Jens Axboe44f7c162007-01-19 11:51:58 +1100282static inline int
283cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
284{
Jens Axboed9e76202007-04-20 14:27:50 +0200285 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
Jens Axboe44f7c162007-01-19 11:51:58 +1100286}
287
288static inline void
289cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
290{
291 cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
Jens Axboe7b679132008-05-30 12:23:07 +0200292 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
Jens Axboe44f7c162007-01-19 11:51:58 +1100293}
294
295/*
296 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
297 * isn't valid until the first request from the dispatch is activated
298 * and the slice time set.
299 */
300static inline int cfq_slice_used(struct cfq_queue *cfqq)
301{
302 if (cfq_cfqq_slice_new(cfqq))
303 return 0;
304 if (time_before(jiffies, cfqq->slice_end))
305 return 0;
306
307 return 1;
308}
309
310/*
Jens Axboe5e705372006-07-13 12:39:25 +0200311 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 * We choose the request that is closest to the head right now. Distance
Andreas Mohre8a99052006-03-28 08:59:49 +0200313 * behind the head is penalized and only allowed to a certain extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 */
Jens Axboe5e705372006-07-13 12:39:25 +0200315static struct request *
316cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317{
318 sector_t last, s1, s2, d1 = 0, d2 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 unsigned long back_max;
Andreas Mohre8a99052006-03-28 08:59:49 +0200320#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
321#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
322 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
Jens Axboe5e705372006-07-13 12:39:25 +0200324 if (rq1 == NULL || rq1 == rq2)
325 return rq2;
326 if (rq2 == NULL)
327 return rq1;
Jens Axboe9c2c38a2005-08-24 14:57:54 +0200328
Jens Axboe5e705372006-07-13 12:39:25 +0200329 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
330 return rq1;
331 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
332 return rq2;
Jens Axboe374f84a2006-07-23 01:42:19 +0200333 if (rq_is_meta(rq1) && !rq_is_meta(rq2))
334 return rq1;
335 else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
336 return rq2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Jens Axboe5e705372006-07-13 12:39:25 +0200338 s1 = rq1->sector;
339 s2 = rq2->sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Jens Axboe6d048f52007-04-25 12:44:27 +0200341 last = cfqd->last_position;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 /*
344 * by definition, 1KiB is 2 sectors
345 */
346 back_max = cfqd->cfq_back_max * 2;
347
348 /*
349 * Strict one way elevator _except_ in the case where we allow
350 * short backward seeks which are biased as twice the cost of a
351 * similar forward seek.
352 */
353 if (s1 >= last)
354 d1 = s1 - last;
355 else if (s1 + back_max >= last)
356 d1 = (last - s1) * cfqd->cfq_back_penalty;
357 else
Andreas Mohre8a99052006-03-28 08:59:49 +0200358 wrap |= CFQ_RQ1_WRAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360 if (s2 >= last)
361 d2 = s2 - last;
362 else if (s2 + back_max >= last)
363 d2 = (last - s2) * cfqd->cfq_back_penalty;
364 else
Andreas Mohre8a99052006-03-28 08:59:49 +0200365 wrap |= CFQ_RQ2_WRAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
367 /* Found required data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Andreas Mohre8a99052006-03-28 08:59:49 +0200369 /*
370 * By doing switch() on the bit mask "wrap" we avoid having to
371 * check two variables for all permutations: --> faster!
372 */
373 switch (wrap) {
Jens Axboe5e705372006-07-13 12:39:25 +0200374 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
Andreas Mohre8a99052006-03-28 08:59:49 +0200375 if (d1 < d2)
Jens Axboe5e705372006-07-13 12:39:25 +0200376 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +0200377 else if (d2 < d1)
Jens Axboe5e705372006-07-13 12:39:25 +0200378 return rq2;
Andreas Mohre8a99052006-03-28 08:59:49 +0200379 else {
380 if (s1 >= s2)
Jens Axboe5e705372006-07-13 12:39:25 +0200381 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +0200382 else
Jens Axboe5e705372006-07-13 12:39:25 +0200383 return rq2;
Andreas Mohre8a99052006-03-28 08:59:49 +0200384 }
385
386 case CFQ_RQ2_WRAP:
Jens Axboe5e705372006-07-13 12:39:25 +0200387 return rq1;
Andreas Mohre8a99052006-03-28 08:59:49 +0200388 case CFQ_RQ1_WRAP:
Jens Axboe5e705372006-07-13 12:39:25 +0200389 return rq2;
390 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
Andreas Mohre8a99052006-03-28 08:59:49 +0200391 default:
392 /*
393 * Since both rqs are wrapped,
394 * start with the one that's further behind head
395 * (--> only *one* back seek required),
396 * since back seek takes more time than forward.
397 */
398 if (s1 <= s2)
Jens Axboe5e705372006-07-13 12:39:25 +0200399 return rq1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 else
Jens Axboe5e705372006-07-13 12:39:25 +0200401 return rq2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 }
403}
404
Jens Axboe498d3aa22007-04-26 12:54:48 +0200405/*
406 * The below is leftmost cache rbtree addon
407 */
Jens Axboe08717142008-01-28 11:38:15 +0100408static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
Jens Axboecc09e292007-04-26 12:53:50 +0200409{
410 if (!root->left)
411 root->left = rb_first(&root->rb);
412
Jens Axboe08717142008-01-28 11:38:15 +0100413 if (root->left)
414 return rb_entry(root->left, struct cfq_queue, rb_node);
415
416 return NULL;
Jens Axboecc09e292007-04-26 12:53:50 +0200417}
418
419static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
420{
421 if (root->left == n)
422 root->left = NULL;
423
424 rb_erase(n, &root->rb);
425 RB_CLEAR_NODE(n);
426}
427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428/*
429 * would be nice to take fifo expire time into account as well
430 */
Jens Axboe5e705372006-07-13 12:39:25 +0200431static struct request *
432cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
433 struct request *last)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434{
Jens Axboe21183b02006-07-13 12:33:14 +0200435 struct rb_node *rbnext = rb_next(&last->rb_node);
436 struct rb_node *rbprev = rb_prev(&last->rb_node);
Jens Axboe5e705372006-07-13 12:39:25 +0200437 struct request *next = NULL, *prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Jens Axboe21183b02006-07-13 12:33:14 +0200439 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
441 if (rbprev)
Jens Axboe5e705372006-07-13 12:39:25 +0200442 prev = rb_entry_rq(rbprev);
Jens Axboe21183b02006-07-13 12:33:14 +0200443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 if (rbnext)
Jens Axboe5e705372006-07-13 12:39:25 +0200445 next = rb_entry_rq(rbnext);
Jens Axboe21183b02006-07-13 12:33:14 +0200446 else {
447 rbnext = rb_first(&cfqq->sort_list);
448 if (rbnext && rbnext != &last->rb_node)
Jens Axboe5e705372006-07-13 12:39:25 +0200449 next = rb_entry_rq(rbnext);
Jens Axboe21183b02006-07-13 12:33:14 +0200450 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
Jens Axboe21183b02006-07-13 12:33:14 +0200452 return cfq_choose_req(cfqd, next, prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453}
454
Jens Axboed9e76202007-04-20 14:27:50 +0200455static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
456 struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457{
Jens Axboed9e76202007-04-20 14:27:50 +0200458 /*
459 * just an approximation, should be ok.
460 */
Jens Axboe67e6b492007-04-20 14:18:00 +0200461 return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
462 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
Jens Axboed9e76202007-04-20 14:27:50 +0200463}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Jens Axboe498d3aa22007-04-26 12:54:48 +0200465/*
466 * The cfqd->service_tree holds all pending cfq_queue's that have
467 * requests waiting to be processed. It is sorted in the order that
468 * we will service the queues.
469 */
Jens Axboed9e76202007-04-20 14:27:50 +0200470static void cfq_service_tree_add(struct cfq_data *cfqd,
Jens Axboeedd75ff2007-04-19 12:03:34 +0200471 struct cfq_queue *cfqq, int add_front)
Jens Axboed9e76202007-04-20 14:27:50 +0200472{
Jens Axboe08717142008-01-28 11:38:15 +0100473 struct rb_node **p, *parent;
474 struct cfq_queue *__cfqq;
Jens Axboed9e76202007-04-20 14:27:50 +0200475 unsigned long rb_key;
Jens Axboe498d3aa22007-04-26 12:54:48 +0200476 int left;
Jens Axboed9e76202007-04-20 14:27:50 +0200477
Jens Axboe08717142008-01-28 11:38:15 +0100478 if (cfq_class_idle(cfqq)) {
479 rb_key = CFQ_IDLE_DELAY;
480 parent = rb_last(&cfqd->service_tree.rb);
481 if (parent && parent != &cfqq->rb_node) {
482 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
483 rb_key += __cfqq->rb_key;
484 } else
485 rb_key += jiffies;
486 } else if (!add_front) {
Jens Axboeedd75ff2007-04-19 12:03:34 +0200487 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
488 rb_key += cfqq->slice_resid;
489 cfqq->slice_resid = 0;
490 } else
491 rb_key = 0;
Jens Axboed9e76202007-04-20 14:27:50 +0200492
493 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
Jens Axboe99f96282007-02-05 11:56:25 +0100494 /*
Jens Axboed9e76202007-04-20 14:27:50 +0200495 * same position, nothing more to do
Jens Axboe99f96282007-02-05 11:56:25 +0100496 */
Jens Axboed9e76202007-04-20 14:27:50 +0200497 if (rb_key == cfqq->rb_key)
498 return;
Jens Axboe53b037442006-07-28 09:48:51 +0200499
Jens Axboecc09e292007-04-26 12:53:50 +0200500 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
Jens Axboe22e2c502005-06-27 10:55:12 +0200501 }
Jens Axboed9e76202007-04-20 14:27:50 +0200502
Jens Axboe498d3aa22007-04-26 12:54:48 +0200503 left = 1;
Jens Axboe08717142008-01-28 11:38:15 +0100504 parent = NULL;
505 p = &cfqd->service_tree.rb.rb_node;
Jens Axboed9e76202007-04-20 14:27:50 +0200506 while (*p) {
Jens Axboe67060e32007-04-18 20:13:32 +0200507 struct rb_node **n;
Jens Axboecc09e292007-04-26 12:53:50 +0200508
Jens Axboed9e76202007-04-20 14:27:50 +0200509 parent = *p;
510 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
511
Jens Axboe0c534e02007-04-18 20:01:57 +0200512 /*
513 * sort RT queues first, we always want to give
Jens Axboe67060e32007-04-18 20:13:32 +0200514 * preference to them. IDLE queues goes to the back.
515 * after that, sort on the next service time.
Jens Axboe0c534e02007-04-18 20:01:57 +0200516 */
517 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
Jens Axboe67060e32007-04-18 20:13:32 +0200518 n = &(*p)->rb_left;
Jens Axboe0c534e02007-04-18 20:01:57 +0200519 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
Jens Axboe67060e32007-04-18 20:13:32 +0200520 n = &(*p)->rb_right;
521 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
522 n = &(*p)->rb_left;
523 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
524 n = &(*p)->rb_right;
Jens Axboe0c534e02007-04-18 20:01:57 +0200525 else if (rb_key < __cfqq->rb_key)
Jens Axboe67060e32007-04-18 20:13:32 +0200526 n = &(*p)->rb_left;
527 else
528 n = &(*p)->rb_right;
529
530 if (n == &(*p)->rb_right)
Jens Axboecc09e292007-04-26 12:53:50 +0200531 left = 0;
Jens Axboe67060e32007-04-18 20:13:32 +0200532
533 p = n;
Jens Axboed9e76202007-04-20 14:27:50 +0200534 }
535
Jens Axboecc09e292007-04-26 12:53:50 +0200536 if (left)
537 cfqd->service_tree.left = &cfqq->rb_node;
538
Jens Axboed9e76202007-04-20 14:27:50 +0200539 cfqq->rb_key = rb_key;
540 rb_link_node(&cfqq->rb_node, parent, p);
Jens Axboecc09e292007-04-26 12:53:50 +0200541 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542}
543
Jens Axboe498d3aa22007-04-26 12:54:48 +0200544/*
545 * Update cfqq's position in the service tree.
546 */
Jens Axboeedd75ff2007-04-19 12:03:34 +0200547static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +0200548{
Jens Axboe6d048f52007-04-25 12:44:27 +0200549 /*
550 * Resorting requires the cfqq to be on the RR list already.
551 */
Jens Axboe498d3aa22007-04-26 12:54:48 +0200552 if (cfq_cfqq_on_rr(cfqq))
Jens Axboeedd75ff2007-04-19 12:03:34 +0200553 cfq_service_tree_add(cfqd, cfqq, 0);
Jens Axboe6d048f52007-04-25 12:44:27 +0200554}
555
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556/*
557 * add to busy list of queues for service, trying to be fair in ordering
Jens Axboe22e2c502005-06-27 10:55:12 +0200558 * the pending list according to last request service
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 */
Jens Axboefebffd62008-01-28 13:19:43 +0100560static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561{
Jens Axboe7b679132008-05-30 12:23:07 +0200562 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
Jens Axboe3b181522005-06-27 10:56:24 +0200563 BUG_ON(cfq_cfqq_on_rr(cfqq));
564 cfq_mark_cfqq_on_rr(cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 cfqd->busy_queues++;
Divyesh Shah3a9a3f62009-01-30 12:46:41 +0100566 if (cfq_class_rt(cfqq))
567 cfqd->busy_rt_queues++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Jens Axboeedd75ff2007-04-19 12:03:34 +0200569 cfq_resort_rr_list(cfqd, cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570}
571
Jens Axboe498d3aa22007-04-26 12:54:48 +0200572/*
573 * Called when the cfqq no longer has requests pending, remove it from
574 * the service tree.
575 */
Jens Axboefebffd62008-01-28 13:19:43 +0100576static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577{
Jens Axboe7b679132008-05-30 12:23:07 +0200578 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
Jens Axboe3b181522005-06-27 10:56:24 +0200579 BUG_ON(!cfq_cfqq_on_rr(cfqq));
580 cfq_clear_cfqq_on_rr(cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
Jens Axboecc09e292007-04-26 12:53:50 +0200582 if (!RB_EMPTY_NODE(&cfqq->rb_node))
583 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
Jens Axboed9e76202007-04-20 14:27:50 +0200584
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 BUG_ON(!cfqd->busy_queues);
586 cfqd->busy_queues--;
Divyesh Shah3a9a3f62009-01-30 12:46:41 +0100587 if (cfq_class_rt(cfqq))
588 cfqd->busy_rt_queues--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589}
590
591/*
592 * rb tree support functions
593 */
Jens Axboefebffd62008-01-28 13:19:43 +0100594static void cfq_del_rq_rb(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595{
Jens Axboe5e705372006-07-13 12:39:25 +0200596 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +0200597 struct cfq_data *cfqd = cfqq->cfqd;
Jens Axboe5e705372006-07-13 12:39:25 +0200598 const int sync = rq_is_sync(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
Jens Axboeb4878f22005-10-20 16:42:29 +0200600 BUG_ON(!cfqq->queued[sync]);
601 cfqq->queued[sync]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
Jens Axboe5e705372006-07-13 12:39:25 +0200603 elv_rb_del(&cfqq->sort_list, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Jens Axboedd67d052006-06-21 09:36:18 +0200605 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
Jens Axboeb4878f22005-10-20 16:42:29 +0200606 cfq_del_cfqq_rr(cfqd, cfqq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607}
608
Jens Axboe5e705372006-07-13 12:39:25 +0200609static void cfq_add_rq_rb(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610{
Jens Axboe5e705372006-07-13 12:39:25 +0200611 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 struct cfq_data *cfqd = cfqq->cfqd;
Jens Axboe21183b02006-07-13 12:33:14 +0200613 struct request *__alias;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
Jens Axboe5380a102006-07-13 12:37:56 +0200615 cfqq->queued[rq_is_sync(rq)]++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617 /*
618 * looks a little odd, but the first insert might return an alias.
619 * if that happens, put the alias on the dispatch list
620 */
Jens Axboe21183b02006-07-13 12:33:14 +0200621 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
Jens Axboe5e705372006-07-13 12:39:25 +0200622 cfq_dispatch_insert(cfqd->queue, __alias);
Jens Axboe5fccbf62006-10-31 14:21:55 +0100623
624 if (!cfq_cfqq_on_rr(cfqq))
625 cfq_add_cfqq_rr(cfqd, cfqq);
Jens Axboe5044eed2007-04-25 11:53:48 +0200626
627 /*
628 * check if this request is a better next-serve candidate
629 */
630 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
631 BUG_ON(!cfqq->next_rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632}
633
Jens Axboefebffd62008-01-28 13:19:43 +0100634static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635{
Jens Axboe5380a102006-07-13 12:37:56 +0200636 elv_rb_del(&cfqq->sort_list, rq);
637 cfqq->queued[rq_is_sync(rq)]--;
Jens Axboe5e705372006-07-13 12:39:25 +0200638 cfq_add_rq_rb(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639}
640
Jens Axboe206dc692006-03-28 13:03:44 +0200641static struct request *
642cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643{
Jens Axboe206dc692006-03-28 13:03:44 +0200644 struct task_struct *tsk = current;
Vasily Tarasov91fac312007-04-25 12:29:51 +0200645 struct cfq_io_context *cic;
Jens Axboe206dc692006-03-28 13:03:44 +0200646 struct cfq_queue *cfqq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
Jens Axboe4ac845a2008-01-24 08:44:49 +0100648 cic = cfq_cic_lookup(cfqd, tsk->io_context);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200649 if (!cic)
650 return NULL;
651
652 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
Jens Axboe89850f72006-07-22 16:48:31 +0200653 if (cfqq) {
654 sector_t sector = bio->bi_sector + bio_sectors(bio);
655
Jens Axboe21183b02006-07-13 12:33:14 +0200656 return elv_rb_find(&cfqq->sort_list, sector);
Jens Axboe89850f72006-07-22 16:48:31 +0200657 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 return NULL;
660}
661
Jens Axboe165125e2007-07-24 09:28:11 +0200662static void cfq_activate_request(struct request_queue *q, struct request *rq)
Jens Axboeb4878f22005-10-20 16:42:29 +0200663{
664 struct cfq_data *cfqd = q->elevator->elevator_data;
665
666 cfqd->rq_in_driver++;
Jens Axboe7b679132008-05-30 12:23:07 +0200667 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
668 cfqd->rq_in_driver);
Jens Axboe25776e32006-06-01 10:12:26 +0200669
Jens Axboe6d048f52007-04-25 12:44:27 +0200670 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
Jens Axboeb4878f22005-10-20 16:42:29 +0200671}
672
Jens Axboe165125e2007-07-24 09:28:11 +0200673static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674{
Jens Axboe22e2c502005-06-27 10:55:12 +0200675 struct cfq_data *cfqd = q->elevator->elevator_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
Jens Axboeb4878f22005-10-20 16:42:29 +0200677 WARN_ON(!cfqd->rq_in_driver);
678 cfqd->rq_in_driver--;
Jens Axboe7b679132008-05-30 12:23:07 +0200679 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
680 cfqd->rq_in_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681}
682
Jens Axboeb4878f22005-10-20 16:42:29 +0200683static void cfq_remove_request(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
Jens Axboe5e705372006-07-13 12:39:25 +0200685 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe21183b02006-07-13 12:33:14 +0200686
Jens Axboe5e705372006-07-13 12:39:25 +0200687 if (cfqq->next_rq == rq)
688 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
Jens Axboeb4878f22005-10-20 16:42:29 +0200690 list_del_init(&rq->queuelist);
Jens Axboe5e705372006-07-13 12:39:25 +0200691 cfq_del_rq_rb(rq);
Jens Axboe374f84a2006-07-23 01:42:19 +0200692
Aaron Carroll45333d52008-08-26 15:52:36 +0200693 cfqq->cfqd->rq_queued--;
Jens Axboe374f84a2006-07-23 01:42:19 +0200694 if (rq_is_meta(rq)) {
695 WARN_ON(!cfqq->meta_pending);
696 cfqq->meta_pending--;
697 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698}
699
Jens Axboe165125e2007-07-24 09:28:11 +0200700static int cfq_merge(struct request_queue *q, struct request **req,
701 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702{
703 struct cfq_data *cfqd = q->elevator->elevator_data;
704 struct request *__rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
Jens Axboe206dc692006-03-28 13:03:44 +0200706 __rq = cfq_find_rq_fmerge(cfqd, bio);
Jens Axboe22e2c502005-06-27 10:55:12 +0200707 if (__rq && elv_rq_merge_ok(__rq, bio)) {
Jens Axboe98170642006-07-28 09:23:08 +0200708 *req = __rq;
709 return ELEVATOR_FRONT_MERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 }
711
712 return ELEVATOR_NO_MERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713}
714
Jens Axboe165125e2007-07-24 09:28:11 +0200715static void cfq_merged_request(struct request_queue *q, struct request *req,
Jens Axboe21183b02006-07-13 12:33:14 +0200716 int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717{
Jens Axboe21183b02006-07-13 12:33:14 +0200718 if (type == ELEVATOR_FRONT_MERGE) {
Jens Axboe5e705372006-07-13 12:39:25 +0200719 struct cfq_queue *cfqq = RQ_CFQQ(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
Jens Axboe5e705372006-07-13 12:39:25 +0200721 cfq_reposition_rq_rb(cfqq, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723}
724
725static void
Jens Axboe165125e2007-07-24 09:28:11 +0200726cfq_merged_requests(struct request_queue *q, struct request *rq,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 struct request *next)
728{
Jens Axboe22e2c502005-06-27 10:55:12 +0200729 /*
730 * reposition in fifo if next is older than rq
731 */
732 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
733 time_before(next->start_time, rq->start_time))
734 list_move(&rq->queuelist, &next->queuelist);
735
Jens Axboeb4878f22005-10-20 16:42:29 +0200736 cfq_remove_request(next);
Jens Axboe22e2c502005-06-27 10:55:12 +0200737}
738
Jens Axboe165125e2007-07-24 09:28:11 +0200739static int cfq_allow_merge(struct request_queue *q, struct request *rq,
Jens Axboeda775262006-12-20 11:04:12 +0100740 struct bio *bio)
741{
742 struct cfq_data *cfqd = q->elevator->elevator_data;
Vasily Tarasov91fac312007-04-25 12:29:51 +0200743 struct cfq_io_context *cic;
Jens Axboeda775262006-12-20 11:04:12 +0100744 struct cfq_queue *cfqq;
Jens Axboeda775262006-12-20 11:04:12 +0100745
746 /*
Jens Axboeec8acb62007-01-02 18:32:11 +0100747 * Disallow merge of a sync bio into an async request.
Jens Axboeda775262006-12-20 11:04:12 +0100748 */
Vasily Tarasov91fac312007-04-25 12:29:51 +0200749 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
Jens Axboeda775262006-12-20 11:04:12 +0100750 return 0;
751
752 /*
Jens Axboe719d3402006-12-22 09:38:53 +0100753 * Lookup the cfqq that this bio will be queued with. Allow
754 * merge only if rq is queued there.
Jens Axboeda775262006-12-20 11:04:12 +0100755 */
Jens Axboe4ac845a2008-01-24 08:44:49 +0100756 cic = cfq_cic_lookup(cfqd, current->io_context);
Vasily Tarasov91fac312007-04-25 12:29:51 +0200757 if (!cic)
758 return 0;
Jens Axboe719d3402006-12-22 09:38:53 +0100759
Vasily Tarasov91fac312007-04-25 12:29:51 +0200760 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
Jens Axboe719d3402006-12-22 09:38:53 +0100761 if (cfqq == RQ_CFQQ(rq))
762 return 1;
Jens Axboeda775262006-12-20 11:04:12 +0100763
Jens Axboeec8acb62007-01-02 18:32:11 +0100764 return 0;
Jens Axboeda775262006-12-20 11:04:12 +0100765}
766
Jens Axboefebffd62008-01-28 13:19:43 +0100767static void __cfq_set_active_queue(struct cfq_data *cfqd,
768 struct cfq_queue *cfqq)
Jens Axboe22e2c502005-06-27 10:55:12 +0200769{
770 if (cfqq) {
Jens Axboe7b679132008-05-30 12:23:07 +0200771 cfq_log_cfqq(cfqd, cfqq, "set_active");
Jens Axboe22e2c502005-06-27 10:55:12 +0200772 cfqq->slice_end = 0;
Jens Axboe2f5cb732009-04-07 08:51:19 +0200773 cfqq->slice_dispatch = 0;
774
Jens Axboe2f5cb732009-04-07 08:51:19 +0200775 cfq_clear_cfqq_wait_request(cfqq);
Jens Axboeb0291952009-04-07 11:38:31 +0200776 cfq_clear_cfqq_must_dispatch(cfqq);
Jens Axboe3b181522005-06-27 10:56:24 +0200777 cfq_clear_cfqq_must_alloc_slice(cfqq);
778 cfq_clear_cfqq_fifo_expire(cfqq);
Jens Axboe44f7c162007-01-19 11:51:58 +1100779 cfq_mark_cfqq_slice_new(cfqq);
Jens Axboe2f5cb732009-04-07 08:51:19 +0200780
781 del_timer(&cfqd->idle_slice_timer);
Jens Axboe22e2c502005-06-27 10:55:12 +0200782 }
783
784 cfqd->active_queue = cfqq;
785}
786
787/*
Jens Axboe7b14e3b2006-02-28 09:35:11 +0100788 * current cfqq expired its slice (or was too idle), select new one
789 */
790static void
791__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
Jens Axboe6084cdd2007-04-23 08:25:00 +0200792 int timed_out)
Jens Axboe7b14e3b2006-02-28 09:35:11 +0100793{
Jens Axboe7b679132008-05-30 12:23:07 +0200794 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
795
Jens Axboe7b14e3b2006-02-28 09:35:11 +0100796 if (cfq_cfqq_wait_request(cfqq))
797 del_timer(&cfqd->idle_slice_timer);
798
Jens Axboe7b14e3b2006-02-28 09:35:11 +0100799 cfq_clear_cfqq_wait_request(cfqq);
800
801 /*
Jens Axboe6084cdd2007-04-23 08:25:00 +0200802 * store what was left of this slice, if the queue idled/timed out
Jens Axboe7b14e3b2006-02-28 09:35:11 +0100803 */
Jens Axboe7b679132008-05-30 12:23:07 +0200804 if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
Jens Axboec5b680f2007-01-19 11:56:49 +1100805 cfqq->slice_resid = cfqq->slice_end - jiffies;
Jens Axboe7b679132008-05-30 12:23:07 +0200806 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
807 }
Jens Axboe7b14e3b2006-02-28 09:35:11 +0100808
Jens Axboeedd75ff2007-04-19 12:03:34 +0200809 cfq_resort_rr_list(cfqd, cfqq);
Jens Axboe7b14e3b2006-02-28 09:35:11 +0100810
811 if (cfqq == cfqd->active_queue)
812 cfqd->active_queue = NULL;
813
814 if (cfqd->active_cic) {
815 put_io_context(cfqd->active_cic->ioc);
816 cfqd->active_cic = NULL;
817 }
Jens Axboe7b14e3b2006-02-28 09:35:11 +0100818}
819
Jens Axboe6084cdd2007-04-23 08:25:00 +0200820static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
Jens Axboe7b14e3b2006-02-28 09:35:11 +0100821{
822 struct cfq_queue *cfqq = cfqd->active_queue;
823
824 if (cfqq)
Jens Axboe6084cdd2007-04-23 08:25:00 +0200825 __cfq_slice_expired(cfqd, cfqq, timed_out);
Jens Axboe7b14e3b2006-02-28 09:35:11 +0100826}
827
Jens Axboe498d3aa22007-04-26 12:54:48 +0200828/*
829 * Get next queue for service. Unless we have a queue preemption,
830 * we'll simply select the first cfqq in the service tree.
831 */
Jens Axboe6d048f52007-04-25 12:44:27 +0200832static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +0200833{
Jens Axboeedd75ff2007-04-19 12:03:34 +0200834 if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
835 return NULL;
836
Jens Axboe08717142008-01-28 11:38:15 +0100837 return cfq_rb_first(&cfqd->service_tree);
Jens Axboe6d048f52007-04-25 12:44:27 +0200838}
839
Jens Axboe498d3aa22007-04-26 12:54:48 +0200840/*
841 * Get and set a new active queue for service.
842 */
Jens Axboe6d048f52007-04-25 12:44:27 +0200843static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
844{
845 struct cfq_queue *cfqq;
846
Jens Axboed9e76202007-04-20 14:27:50 +0200847 cfqq = cfq_get_next_queue(cfqd);
Jens Axboe22e2c502005-06-27 10:55:12 +0200848 __cfq_set_active_queue(cfqd, cfqq);
Jens Axboe3b181522005-06-27 10:56:24 +0200849 return cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +0200850}
851
Jens Axboed9e76202007-04-20 14:27:50 +0200852static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
853 struct request *rq)
854{
855 if (rq->sector >= cfqd->last_position)
856 return rq->sector - cfqd->last_position;
857 else
858 return cfqd->last_position - rq->sector;
859}
860
Jens Axboe6d048f52007-04-25 12:44:27 +0200861static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
862{
863 struct cfq_io_context *cic = cfqd->active_cic;
Jens Axboecaaa5f92006-06-16 11:23:00 +0200864
Jens Axboe6d048f52007-04-25 12:44:27 +0200865 if (!sample_valid(cic->seek_samples))
866 return 0;
867
868 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
869}
870
Jens Axboed9e76202007-04-20 14:27:50 +0200871static int cfq_close_cooperator(struct cfq_data *cfq_data,
872 struct cfq_queue *cfqq)
Jens Axboe6d048f52007-04-25 12:44:27 +0200873{
Jens Axboe6d048f52007-04-25 12:44:27 +0200874 /*
Jens Axboed9e76202007-04-20 14:27:50 +0200875 * We should notice if some of the queues are cooperating, eg
876 * working closely on the same area of the disk. In that case,
877 * we can group them together and don't waste time idling.
Jens Axboe6d048f52007-04-25 12:44:27 +0200878 */
Jens Axboed9e76202007-04-20 14:27:50 +0200879 return 0;
Jens Axboe6d048f52007-04-25 12:44:27 +0200880}
881
882#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
883
884static void cfq_arm_slice_timer(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +0200885{
Jens Axboe17926692007-01-19 11:59:30 +1100886 struct cfq_queue *cfqq = cfqd->active_queue;
Jens Axboe206dc692006-03-28 13:03:44 +0200887 struct cfq_io_context *cic;
Jens Axboe7b14e3b2006-02-28 09:35:11 +0100888 unsigned long sl;
889
Jens Axboea68bbdd2008-09-24 13:03:33 +0200890 /*
Jens Axboef7d7b7a2008-09-25 11:37:50 +0200891 * SSD device without seek penalty, disable idling. But only do so
892 * for devices that support queuing, otherwise we still have a problem
893 * with sync vs async workloads.
Jens Axboea68bbdd2008-09-24 13:03:33 +0200894 */
Jens Axboef7d7b7a2008-09-25 11:37:50 +0200895 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
Jens Axboea68bbdd2008-09-24 13:03:33 +0200896 return;
897
Jens Axboedd67d052006-06-21 09:36:18 +0200898 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
Jens Axboe6d048f52007-04-25 12:44:27 +0200899 WARN_ON(cfq_cfqq_slice_new(cfqq));
Jens Axboe22e2c502005-06-27 10:55:12 +0200900
901 /*
902 * idle is disabled, either manually or by past process history
903 */
Jens Axboe6d048f52007-04-25 12:44:27 +0200904 if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
905 return;
906
Jens Axboe22e2c502005-06-27 10:55:12 +0200907 /*
Jens Axboe7b679132008-05-30 12:23:07 +0200908 * still requests with the driver, don't idle
909 */
910 if (cfqd->rq_in_driver)
911 return;
912
913 /*
Jens Axboe22e2c502005-06-27 10:55:12 +0200914 * task has exited, don't wait
915 */
Jens Axboe206dc692006-03-28 13:03:44 +0200916 cic = cfqd->active_cic;
Nikanth Karthikesan66dac982007-11-27 12:47:04 +0100917 if (!cic || !atomic_read(&cic->ioc->nr_tasks))
Jens Axboe6d048f52007-04-25 12:44:27 +0200918 return;
919
920 /*
921 * See if this prio level has a good candidate
922 */
Jens Axboe1afba042007-04-17 12:47:55 +0200923 if (cfq_close_cooperator(cfqd, cfqq) &&
924 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
Jens Axboe6d048f52007-04-25 12:44:27 +0200925 return;
Jens Axboe22e2c502005-06-27 10:55:12 +0200926
Jens Axboe3b181522005-06-27 10:56:24 +0200927 cfq_mark_cfqq_wait_request(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +0200928
Jens Axboe206dc692006-03-28 13:03:44 +0200929 /*
930 * we don't want to idle for seeks, but we do want to allow
931 * fair distribution of slice time for a process doing back-to-back
932 * seeks. so allow a little bit of time for him to submit a new rq
933 */
Jens Axboe6d048f52007-04-25 12:44:27 +0200934 sl = cfqd->cfq_slice_idle;
Jens Axboecaaa5f92006-06-16 11:23:00 +0200935 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
Jens Axboed9e76202007-04-20 14:27:50 +0200936 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
Jens Axboe206dc692006-03-28 13:03:44 +0200937
Jens Axboe7b14e3b2006-02-28 09:35:11 +0100938 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
Jens Axboe7b679132008-05-30 12:23:07 +0200939 cfq_log(cfqd, "arm_idle: %lu", sl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940}
941
Jens Axboe498d3aa22007-04-26 12:54:48 +0200942/*
943 * Move request from internal lists to the request queue dispatch list.
944 */
Jens Axboe165125e2007-07-24 09:28:11 +0200945static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946{
Jens Axboe3ed9a292007-04-23 08:33:33 +0200947 struct cfq_data *cfqd = q->elevator->elevator_data;
Jens Axboe5e705372006-07-13 12:39:25 +0200948 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +0200949
Jens Axboe7b679132008-05-30 12:23:07 +0200950 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
951
Jens Axboe5380a102006-07-13 12:37:56 +0200952 cfq_remove_request(rq);
Jens Axboe6d048f52007-04-25 12:44:27 +0200953 cfqq->dispatched++;
Jens Axboe5380a102006-07-13 12:37:56 +0200954 elv_dispatch_sort(q, rq);
Jens Axboe3ed9a292007-04-23 08:33:33 +0200955
956 if (cfq_cfqq_sync(cfqq))
957 cfqd->sync_flight++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958}
959
960/*
961 * return expired entry, or NULL to just start from scratch in rbtree
962 */
Jens Axboefebffd62008-01-28 13:19:43 +0100963static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964{
965 struct cfq_data *cfqd = cfqq->cfqd;
Jens Axboe22e2c502005-06-27 10:55:12 +0200966 struct request *rq;
Jens Axboe89850f72006-07-22 16:48:31 +0200967 int fifo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Jens Axboe3b181522005-06-27 10:56:24 +0200969 if (cfq_cfqq_fifo_expire(cfqq))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 return NULL;
Jens Axboecb887412007-01-19 12:01:16 +1100971
972 cfq_mark_cfqq_fifo_expire(cfqq);
973
Jens Axboe89850f72006-07-22 16:48:31 +0200974 if (list_empty(&cfqq->fifo))
975 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Jens Axboe6d048f52007-04-25 12:44:27 +0200977 fifo = cfq_cfqq_sync(cfqq);
Jens Axboe89850f72006-07-22 16:48:31 +0200978 rq = rq_entry_fifo(cfqq->fifo.next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
Jens Axboe6d048f52007-04-25 12:44:27 +0200980 if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
Jens Axboe7b679132008-05-30 12:23:07 +0200981 rq = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
Jens Axboe7b679132008-05-30 12:23:07 +0200983 cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
Jens Axboe6d048f52007-04-25 12:44:27 +0200984 return rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985}
986
Jens Axboe22e2c502005-06-27 10:55:12 +0200987static inline int
988cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
989{
990 const int base_rq = cfqd->cfq_slice_async_rq;
991
992 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
993
994 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
995}
996
997/*
Jens Axboe498d3aa22007-04-26 12:54:48 +0200998 * Select a queue for service. If we have a current active queue,
999 * check whether to continue servicing it, or retrieve and set a new one.
Jens Axboe22e2c502005-06-27 10:55:12 +02001000 */
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01001001static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
Jens Axboe22e2c502005-06-27 10:55:12 +02001002{
Jens Axboe22e2c502005-06-27 10:55:12 +02001003 struct cfq_queue *cfqq;
1004
1005 cfqq = cfqd->active_queue;
1006 if (!cfqq)
1007 goto new_queue;
1008
1009 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02001010 * The active queue has run out of time, expire it and select new.
Jens Axboe22e2c502005-06-27 10:55:12 +02001011 */
Jens Axboeb0291952009-04-07 11:38:31 +02001012 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
Jens Axboe3b181522005-06-27 10:56:24 +02001013 goto expire;
Jens Axboe22e2c502005-06-27 10:55:12 +02001014
1015 /*
Divyesh Shah3a9a3f62009-01-30 12:46:41 +01001016 * If we have a RT cfqq waiting, then we pre-empt the current non-rt
1017 * cfqq.
1018 */
1019 if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) {
1020 /*
1021 * We simulate this as cfqq timed out so that it gets to bank
1022 * the remaining of its time slice.
1023 */
1024 cfq_log_cfqq(cfqd, cfqq, "preempt");
1025 cfq_slice_expired(cfqd, 1);
1026 goto new_queue;
1027 }
1028
1029 /*
Jens Axboe6d048f52007-04-25 12:44:27 +02001030 * The active queue has requests and isn't expired, allow it to
1031 * dispatch.
Jens Axboe22e2c502005-06-27 10:55:12 +02001032 */
Jens Axboedd67d052006-06-21 09:36:18 +02001033 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
Jens Axboe22e2c502005-06-27 10:55:12 +02001034 goto keep_queue;
Jens Axboe6d048f52007-04-25 12:44:27 +02001035
1036 /*
1037 * No requests pending. If the active queue still has requests in
1038 * flight or is idling for a new request, allow either of these
1039 * conditions to happen (or time out) before selecting a new queue.
1040 */
Jens Axboecc197472007-04-20 20:45:39 +02001041 if (timer_pending(&cfqd->idle_slice_timer) ||
1042 (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
Jens Axboecaaa5f92006-06-16 11:23:00 +02001043 cfqq = NULL;
1044 goto keep_queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02001045 }
1046
Jens Axboe3b181522005-06-27 10:56:24 +02001047expire:
Jens Axboe6084cdd2007-04-23 08:25:00 +02001048 cfq_slice_expired(cfqd, 0);
Jens Axboe3b181522005-06-27 10:56:24 +02001049new_queue:
1050 cfqq = cfq_set_active_queue(cfqd);
Jens Axboe22e2c502005-06-27 10:55:12 +02001051keep_queue:
Jens Axboe3b181522005-06-27 10:56:24 +02001052 return cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02001053}
1054
Jens Axboefebffd62008-01-28 13:19:43 +01001055static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
Jens Axboed9e76202007-04-20 14:27:50 +02001056{
1057 int dispatched = 0;
1058
1059 while (cfqq->next_rq) {
1060 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1061 dispatched++;
1062 }
1063
1064 BUG_ON(!list_empty(&cfqq->fifo));
1065 return dispatched;
1066}
1067
Jens Axboe498d3aa22007-04-26 12:54:48 +02001068/*
1069 * Drain our current requests. Used for barriers and when switching
1070 * io schedulers on-the-fly.
1071 */
Jens Axboed9e76202007-04-20 14:27:50 +02001072static int cfq_forced_dispatch(struct cfq_data *cfqd)
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01001073{
Jens Axboe08717142008-01-28 11:38:15 +01001074 struct cfq_queue *cfqq;
Jens Axboed9e76202007-04-20 14:27:50 +02001075 int dispatched = 0;
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01001076
Jens Axboe08717142008-01-28 11:38:15 +01001077 while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL)
Jens Axboed9e76202007-04-20 14:27:50 +02001078 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01001079
Jens Axboe6084cdd2007-04-23 08:25:00 +02001080 cfq_slice_expired(cfqd, 0);
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01001081
1082 BUG_ON(cfqd->busy_queues);
1083
Jens Axboe7b679132008-05-30 12:23:07 +02001084 cfq_log(cfqd, "forced_dispatch=%d\n", dispatched);
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01001085 return dispatched;
1086}
1087
Jens Axboe2f5cb732009-04-07 08:51:19 +02001088/*
1089 * Dispatch a request from cfqq, moving them to the request queue
1090 * dispatch list.
1091 */
1092static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1093{
1094 struct request *rq;
1095
1096 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1097
1098 /*
1099 * follow expired path, else get first next available
1100 */
1101 rq = cfq_check_fifo(cfqq);
1102 if (!rq)
1103 rq = cfqq->next_rq;
1104
1105 /*
1106 * insert request into driver dispatch list
1107 */
1108 cfq_dispatch_insert(cfqd->queue, rq);
1109
1110 if (!cfqd->active_cic) {
1111 struct cfq_io_context *cic = RQ_CIC(rq);
1112
1113 atomic_inc(&cic->ioc->refcount);
1114 cfqd->active_cic = cic;
1115 }
1116}
1117
1118/*
1119 * Find the cfqq that we need to service and move a request from that to the
1120 * dispatch list
1121 */
Jens Axboe165125e2007-07-24 09:28:11 +02001122static int cfq_dispatch_requests(struct request_queue *q, int force)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123{
1124 struct cfq_data *cfqd = q->elevator->elevator_data;
Jens Axboe6d048f52007-04-25 12:44:27 +02001125 struct cfq_queue *cfqq;
Jens Axboe2f5cb732009-04-07 08:51:19 +02001126 unsigned int max_dispatch;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
Jens Axboe22e2c502005-06-27 10:55:12 +02001128 if (!cfqd->busy_queues)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 return 0;
1130
Tejun Heo1b5ed5e12005-11-10 08:49:19 +01001131 if (unlikely(force))
1132 return cfq_forced_dispatch(cfqd);
1133
Jens Axboe2f5cb732009-04-07 08:51:19 +02001134 cfqq = cfq_select_queue(cfqd);
1135 if (!cfqq)
1136 return 0;
Jens Axboeb4878f22005-10-20 16:42:29 +02001137
Jens Axboe2f5cb732009-04-07 08:51:19 +02001138 /*
1139 * If this is an async queue and we have sync IO in flight, let it wait
1140 */
1141 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1142 return 0;
1143
1144 max_dispatch = cfqd->cfq_quantum;
1145 if (cfq_class_idle(cfqq))
1146 max_dispatch = 1;
1147
1148 /*
1149 * Does this cfqq already have too much IO in flight?
1150 */
1151 if (cfqq->dispatched >= max_dispatch) {
1152 /*
1153 * idle queue must always only have a single IO in flight
1154 */
Jens Axboe3ed9a292007-04-23 08:33:33 +02001155 if (cfq_class_idle(cfqq))
Jens Axboe2f5cb732009-04-07 08:51:19 +02001156 return 0;
Jens Axboe3ed9a292007-04-23 08:33:33 +02001157
Jens Axboe2f5cb732009-04-07 08:51:19 +02001158 /*
1159 * We have other queues, don't allow more IO from this one
1160 */
1161 if (cfqd->busy_queues > 1)
1162 return 0;
Jens Axboe9ede2092007-01-19 12:11:44 +11001163
Jens Axboe2f5cb732009-04-07 08:51:19 +02001164 /*
1165 * we are the only queue, allow up to 4 times of 'quantum'
1166 */
1167 if (cfqq->dispatched >= 4 * max_dispatch)
1168 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 }
1170
Jens Axboe2f5cb732009-04-07 08:51:19 +02001171 /*
1172 * Dispatch a request from this cfqq
1173 */
1174 cfq_dispatch_request(cfqd, cfqq);
1175 cfqq->slice_dispatch++;
Jens Axboeb0291952009-04-07 11:38:31 +02001176 cfq_clear_cfqq_must_dispatch(cfqq);
Jens Axboe2f5cb732009-04-07 08:51:19 +02001177
1178 /*
1179 * expire an async queue immediately if it has used up its slice. idle
1180 * queue always expire after 1 dispatch round.
1181 */
1182 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1183 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1184 cfq_class_idle(cfqq))) {
1185 cfqq->slice_end = jiffies + 1;
1186 cfq_slice_expired(cfqd, 0);
1187 }
1188
1189 cfq_log(cfqd, "dispatched a request");
1190 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191}
1192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193/*
Jens Axboe5e705372006-07-13 12:39:25 +02001194 * task holds one reference to the queue, dropped when task exits. each rq
1195 * in-flight on this queue also holds a reference, dropped when rq is freed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 *
1197 * queue lock must be held here.
1198 */
1199static void cfq_put_queue(struct cfq_queue *cfqq)
1200{
Jens Axboe22e2c502005-06-27 10:55:12 +02001201 struct cfq_data *cfqd = cfqq->cfqd;
1202
1203 BUG_ON(atomic_read(&cfqq->ref) <= 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
1205 if (!atomic_dec_and_test(&cfqq->ref))
1206 return;
1207
Jens Axboe7b679132008-05-30 12:23:07 +02001208 cfq_log_cfqq(cfqd, cfqq, "put_queue");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 BUG_ON(rb_first(&cfqq->sort_list));
Jens Axboe22e2c502005-06-27 10:55:12 +02001210 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
Jens Axboe3b181522005-06-27 10:56:24 +02001211 BUG_ON(cfq_cfqq_on_rr(cfqq));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212
Jens Axboe28f95cbc2007-01-19 12:09:53 +11001213 if (unlikely(cfqd->active_queue == cfqq)) {
Jens Axboe6084cdd2007-04-23 08:25:00 +02001214 __cfq_slice_expired(cfqd, cfqq, 0);
Jens Axboe28f95cbc2007-01-19 12:09:53 +11001215 cfq_schedule_dispatch(cfqd);
1216 }
Jens Axboe22e2c502005-06-27 10:55:12 +02001217
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 kmem_cache_free(cfq_pool, cfqq);
1219}
1220
Jens Axboed6de8be2008-05-28 14:46:59 +02001221/*
1222 * Must always be called with the rcu_read_lock() held
1223 */
Jens Axboe07416d22008-05-07 09:17:12 +02001224static void
1225__call_for_each_cic(struct io_context *ioc,
1226 void (*func)(struct io_context *, struct cfq_io_context *))
1227{
1228 struct cfq_io_context *cic;
1229 struct hlist_node *n;
1230
1231 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
1232 func(ioc, cic);
1233}
1234
Jens Axboe4ac845a2008-01-24 08:44:49 +01001235/*
Fabio Checconi34e6bbf2008-04-02 14:31:02 +02001236 * Call func for each cic attached to this ioc.
Jens Axboe4ac845a2008-01-24 08:44:49 +01001237 */
Fabio Checconi34e6bbf2008-04-02 14:31:02 +02001238static void
Jens Axboe4ac845a2008-01-24 08:44:49 +01001239call_for_each_cic(struct io_context *ioc,
1240 void (*func)(struct io_context *, struct cfq_io_context *))
1241{
Jens Axboe4ac845a2008-01-24 08:44:49 +01001242 rcu_read_lock();
Jens Axboe07416d22008-05-07 09:17:12 +02001243 __call_for_each_cic(ioc, func);
Jens Axboe4ac845a2008-01-24 08:44:49 +01001244 rcu_read_unlock();
Fabio Checconi34e6bbf2008-04-02 14:31:02 +02001245}
Jens Axboe4ac845a2008-01-24 08:44:49 +01001246
Fabio Checconi34e6bbf2008-04-02 14:31:02 +02001247static void cfq_cic_free_rcu(struct rcu_head *head)
1248{
1249 struct cfq_io_context *cic;
1250
1251 cic = container_of(head, struct cfq_io_context, rcu_head);
1252
1253 kmem_cache_free(cfq_ioc_pool, cic);
1254 elv_ioc_count_dec(ioc_count);
1255
Jens Axboe9a11b4e2008-05-29 09:32:08 +02001256 if (ioc_gone) {
1257 /*
1258 * CFQ scheduler is exiting, grab exit lock and check
1259 * the pending io context count. If it hits zero,
1260 * complete ioc_gone and set it back to NULL
1261 */
1262 spin_lock(&ioc_gone_lock);
1263 if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
1264 complete(ioc_gone);
1265 ioc_gone = NULL;
1266 }
1267 spin_unlock(&ioc_gone_lock);
1268 }
Fabio Checconi34e6bbf2008-04-02 14:31:02 +02001269}
1270
1271static void cfq_cic_free(struct cfq_io_context *cic)
1272{
1273 call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
Jens Axboe4ac845a2008-01-24 08:44:49 +01001274}
1275
1276static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1277{
1278 unsigned long flags;
1279
1280 BUG_ON(!cic->dead_key);
1281
1282 spin_lock_irqsave(&ioc->lock, flags);
1283 radix_tree_delete(&ioc->radix_root, cic->dead_key);
Jens Axboeffc4e752008-02-19 10:02:29 +01001284 hlist_del_rcu(&cic->cic_list);
Jens Axboe4ac845a2008-01-24 08:44:49 +01001285 spin_unlock_irqrestore(&ioc->lock, flags);
1286
Fabio Checconi34e6bbf2008-04-02 14:31:02 +02001287 cfq_cic_free(cic);
Jens Axboe4ac845a2008-01-24 08:44:49 +01001288}
1289
Jens Axboed6de8be2008-05-28 14:46:59 +02001290/*
1291 * Must be called with rcu_read_lock() held or preemption otherwise disabled.
1292 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
1293 * and ->trim() which is called with the task lock held
1294 */
Jens Axboee2d74ac2006-03-28 08:59:01 +02001295static void cfq_free_io_context(struct io_context *ioc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296{
Jens Axboe4ac845a2008-01-24 08:44:49 +01001297 /*
Fabio Checconi34e6bbf2008-04-02 14:31:02 +02001298 * ioc->refcount is zero here, or we are called from elv_unregister(),
1299 * so no more cic's are allowed to be linked into this ioc. So it
1300 * should be ok to iterate over the known list, we will see all cic's
1301 * since no new ones are added.
Jens Axboe4ac845a2008-01-24 08:44:49 +01001302 */
Jens Axboe07416d22008-05-07 09:17:12 +02001303 __call_for_each_cic(ioc, cic_free_func);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304}
1305
Jens Axboe89850f72006-07-22 16:48:31 +02001306static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1307{
Jens Axboe28f95cbc2007-01-19 12:09:53 +11001308 if (unlikely(cfqq == cfqd->active_queue)) {
Jens Axboe6084cdd2007-04-23 08:25:00 +02001309 __cfq_slice_expired(cfqd, cfqq, 0);
Jens Axboe28f95cbc2007-01-19 12:09:53 +11001310 cfq_schedule_dispatch(cfqd);
1311 }
Jens Axboe89850f72006-07-22 16:48:31 +02001312
1313 cfq_put_queue(cfqq);
1314}
1315
1316static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1317 struct cfq_io_context *cic)
1318{
Fabio Checconi4faa3c82008-04-10 08:28:01 +02001319 struct io_context *ioc = cic->ioc;
1320
Jens Axboefc463792006-08-29 09:05:44 +02001321 list_del_init(&cic->queue_list);
Jens Axboe4ac845a2008-01-24 08:44:49 +01001322
1323 /*
1324 * Make sure key == NULL is seen for dead queues
1325 */
Jens Axboefc463792006-08-29 09:05:44 +02001326 smp_wmb();
Jens Axboe4ac845a2008-01-24 08:44:49 +01001327 cic->dead_key = (unsigned long) cic->key;
Jens Axboefc463792006-08-29 09:05:44 +02001328 cic->key = NULL;
1329
Fabio Checconi4faa3c82008-04-10 08:28:01 +02001330 if (ioc->ioc_data == cic)
1331 rcu_assign_pointer(ioc->ioc_data, NULL);
1332
Jens Axboeff6657c2009-04-08 10:58:57 +02001333 if (cic->cfqq[BLK_RW_ASYNC]) {
1334 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
1335 cic->cfqq[BLK_RW_ASYNC] = NULL;
Jens Axboe89850f72006-07-22 16:48:31 +02001336 }
1337
Jens Axboeff6657c2009-04-08 10:58:57 +02001338 if (cic->cfqq[BLK_RW_SYNC]) {
1339 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
1340 cic->cfqq[BLK_RW_SYNC] = NULL;
Jens Axboe89850f72006-07-22 16:48:31 +02001341 }
Jens Axboe89850f72006-07-22 16:48:31 +02001342}
1343
Jens Axboe4ac845a2008-01-24 08:44:49 +01001344static void cfq_exit_single_io_context(struct io_context *ioc,
1345 struct cfq_io_context *cic)
Jens Axboe22e2c502005-06-27 10:55:12 +02001346{
Al Viro478a82b2006-03-18 13:25:24 -05001347 struct cfq_data *cfqd = cic->key;
Jens Axboe22e2c502005-06-27 10:55:12 +02001348
Jens Axboe89850f72006-07-22 16:48:31 +02001349 if (cfqd) {
Jens Axboe165125e2007-07-24 09:28:11 +02001350 struct request_queue *q = cfqd->queue;
Jens Axboe4ac845a2008-01-24 08:44:49 +01001351 unsigned long flags;
Jens Axboe22e2c502005-06-27 10:55:12 +02001352
Jens Axboe4ac845a2008-01-24 08:44:49 +01001353 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboe62c1fe92008-12-15 21:19:25 +01001354
1355 /*
1356 * Ensure we get a fresh copy of the ->key to prevent
1357 * race between exiting task and queue
1358 */
1359 smp_read_barrier_depends();
1360 if (cic->key)
1361 __cfq_exit_single_io_context(cfqd, cic);
1362
Jens Axboe4ac845a2008-01-24 08:44:49 +01001363 spin_unlock_irqrestore(q->queue_lock, flags);
Al Viro12a05732006-03-18 13:38:01 -05001364 }
Jens Axboe22e2c502005-06-27 10:55:12 +02001365}
1366
Jens Axboe498d3aa22007-04-26 12:54:48 +02001367/*
1368 * The process that ioc belongs to has exited, we need to clean up
1369 * and put the internal structures we have that belongs to that process.
1370 */
Jens Axboee2d74ac2006-03-28 08:59:01 +02001371static void cfq_exit_io_context(struct io_context *ioc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372{
Jens Axboe4ac845a2008-01-24 08:44:49 +01001373 call_for_each_cic(ioc, cfq_exit_single_io_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374}
1375
Jens Axboe22e2c502005-06-27 10:55:12 +02001376static struct cfq_io_context *
Al Viro8267e262005-10-21 03:20:53 -04001377cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378{
Jens Axboeb5deef92006-07-19 23:39:40 +02001379 struct cfq_io_context *cic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380
Christoph Lameter94f60302007-07-17 04:03:29 -07001381 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
1382 cfqd->queue->node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 if (cic) {
Jens Axboe22e2c502005-06-27 10:55:12 +02001384 cic->last_end_request = jiffies;
Jens Axboe553698f2006-06-14 19:11:57 +02001385 INIT_LIST_HEAD(&cic->queue_list);
Jens Axboeffc4e752008-02-19 10:02:29 +01001386 INIT_HLIST_NODE(&cic->cic_list);
Jens Axboe22e2c502005-06-27 10:55:12 +02001387 cic->dtor = cfq_free_io_context;
1388 cic->exit = cfq_exit_io_context;
Jens Axboe4050cf12006-07-19 05:07:12 +02001389 elv_ioc_count_inc(ioc_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 }
1391
1392 return cic;
1393}
1394
Jens Axboefd0928d2008-01-24 08:52:45 +01001395static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
Jens Axboe22e2c502005-06-27 10:55:12 +02001396{
1397 struct task_struct *tsk = current;
1398 int ioprio_class;
1399
Jens Axboe3b181522005-06-27 10:56:24 +02001400 if (!cfq_cfqq_prio_changed(cfqq))
Jens Axboe22e2c502005-06-27 10:55:12 +02001401 return;
1402
Jens Axboefd0928d2008-01-24 08:52:45 +01001403 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
Jens Axboe22e2c502005-06-27 10:55:12 +02001404 switch (ioprio_class) {
Jens Axboefe094d92008-01-31 13:08:54 +01001405 default:
1406 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1407 case IOPRIO_CLASS_NONE:
1408 /*
Jens Axboe6d63c272008-05-07 09:51:23 +02001409 * no prio set, inherit CPU scheduling settings
Jens Axboefe094d92008-01-31 13:08:54 +01001410 */
1411 cfqq->ioprio = task_nice_ioprio(tsk);
Jens Axboe6d63c272008-05-07 09:51:23 +02001412 cfqq->ioprio_class = task_nice_ioclass(tsk);
Jens Axboefe094d92008-01-31 13:08:54 +01001413 break;
1414 case IOPRIO_CLASS_RT:
1415 cfqq->ioprio = task_ioprio(ioc);
1416 cfqq->ioprio_class = IOPRIO_CLASS_RT;
1417 break;
1418 case IOPRIO_CLASS_BE:
1419 cfqq->ioprio = task_ioprio(ioc);
1420 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1421 break;
1422 case IOPRIO_CLASS_IDLE:
1423 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1424 cfqq->ioprio = 7;
1425 cfq_clear_cfqq_idle_window(cfqq);
1426 break;
Jens Axboe22e2c502005-06-27 10:55:12 +02001427 }
1428
1429 /*
1430 * keep track of original prio settings in case we have to temporarily
1431 * elevate the priority of this queue
1432 */
1433 cfqq->org_ioprio = cfqq->ioprio;
1434 cfqq->org_ioprio_class = cfqq->ioprio_class;
Jens Axboe3b181522005-06-27 10:56:24 +02001435 cfq_clear_cfqq_prio_changed(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02001436}
1437
Jens Axboefebffd62008-01-28 13:19:43 +01001438static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
Jens Axboe22e2c502005-06-27 10:55:12 +02001439{
Al Viro478a82b2006-03-18 13:25:24 -05001440 struct cfq_data *cfqd = cic->key;
1441 struct cfq_queue *cfqq;
Jens Axboec1b707d2006-10-30 19:54:23 +01001442 unsigned long flags;
Jens Axboe35e60772006-06-14 09:10:45 +02001443
Jens Axboecaaa5f92006-06-16 11:23:00 +02001444 if (unlikely(!cfqd))
1445 return;
1446
Jens Axboec1b707d2006-10-30 19:54:23 +01001447 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
Jens Axboecaaa5f92006-06-16 11:23:00 +02001448
Jens Axboeff6657c2009-04-08 10:58:57 +02001449 cfqq = cic->cfqq[BLK_RW_ASYNC];
Jens Axboecaaa5f92006-06-16 11:23:00 +02001450 if (cfqq) {
1451 struct cfq_queue *new_cfqq;
Jens Axboeff6657c2009-04-08 10:58:57 +02001452 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
1453 GFP_ATOMIC);
Jens Axboecaaa5f92006-06-16 11:23:00 +02001454 if (new_cfqq) {
Jens Axboeff6657c2009-04-08 10:58:57 +02001455 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
Jens Axboecaaa5f92006-06-16 11:23:00 +02001456 cfq_put_queue(cfqq);
1457 }
Jens Axboe22e2c502005-06-27 10:55:12 +02001458 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02001459
Jens Axboeff6657c2009-04-08 10:58:57 +02001460 cfqq = cic->cfqq[BLK_RW_SYNC];
Jens Axboecaaa5f92006-06-16 11:23:00 +02001461 if (cfqq)
1462 cfq_mark_cfqq_prio_changed(cfqq);
1463
Jens Axboec1b707d2006-10-30 19:54:23 +01001464 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
Jens Axboe22e2c502005-06-27 10:55:12 +02001465}
1466
Jens Axboefc463792006-08-29 09:05:44 +02001467static void cfq_ioc_set_ioprio(struct io_context *ioc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468{
Jens Axboe4ac845a2008-01-24 08:44:49 +01001469 call_for_each_cic(ioc, changed_ioprio);
Jens Axboefc463792006-08-29 09:05:44 +02001470 ioc->ioprio_changed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471}
1472
1473static struct cfq_queue *
Jens Axboe15c31be2007-07-10 13:43:25 +02001474cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
Jens Axboefd0928d2008-01-24 08:52:45 +01001475 struct io_context *ioc, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 struct cfq_queue *cfqq, *new_cfqq = NULL;
Vasily Tarasov91fac312007-04-25 12:29:51 +02001478 struct cfq_io_context *cic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479
1480retry:
Jens Axboe4ac845a2008-01-24 08:44:49 +01001481 cic = cfq_cic_lookup(cfqd, ioc);
Vasily Tarasov91fac312007-04-25 12:29:51 +02001482 /* cic always exists here */
1483 cfqq = cic_to_cfqq(cic, is_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
1485 if (!cfqq) {
1486 if (new_cfqq) {
1487 cfqq = new_cfqq;
1488 new_cfqq = NULL;
Jens Axboe22e2c502005-06-27 10:55:12 +02001489 } else if (gfp_mask & __GFP_WAIT) {
Jens Axboe89850f72006-07-22 16:48:31 +02001490 /*
1491 * Inform the allocator of the fact that we will
1492 * just repeat this allocation if it fails, to allow
1493 * the allocator to do whatever it needs to attempt to
1494 * free memory.
1495 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 spin_unlock_irq(cfqd->queue->queue_lock);
Christoph Lameter94f60302007-07-17 04:03:29 -07001497 new_cfqq = kmem_cache_alloc_node(cfq_pool,
1498 gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
1499 cfqd->queue->node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 spin_lock_irq(cfqd->queue->queue_lock);
1501 goto retry;
Jens Axboe22e2c502005-06-27 10:55:12 +02001502 } else {
Christoph Lameter94f60302007-07-17 04:03:29 -07001503 cfqq = kmem_cache_alloc_node(cfq_pool,
1504 gfp_mask | __GFP_ZERO,
1505 cfqd->queue->node);
Jens Axboe22e2c502005-06-27 10:55:12 +02001506 if (!cfqq)
1507 goto out;
Kiyoshi Ueda db3b5842005-06-17 16:15:10 +02001508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
Jens Axboed9e76202007-04-20 14:27:50 +02001510 RB_CLEAR_NODE(&cfqq->rb_node);
Jens Axboe22e2c502005-06-27 10:55:12 +02001511 INIT_LIST_HEAD(&cfqq->fifo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 atomic_set(&cfqq->ref, 0);
1514 cfqq->cfqd = cfqd;
Jens Axboec5b680f2007-01-19 11:56:49 +11001515
Jens Axboe3b181522005-06-27 10:56:24 +02001516 cfq_mark_cfqq_prio_changed(cfqq);
Vasily Tarasov91fac312007-04-25 12:29:51 +02001517
Jens Axboefd0928d2008-01-24 08:52:45 +01001518 cfq_init_prio_data(cfqq, ioc);
Jens Axboe08717142008-01-28 11:38:15 +01001519
1520 if (is_sync) {
1521 if (!cfq_class_idle(cfqq))
1522 cfq_mark_cfqq_idle_window(cfqq);
1523 cfq_mark_cfqq_sync(cfqq);
1524 }
Jens Axboe7b679132008-05-30 12:23:07 +02001525 cfqq->pid = current->pid;
1526 cfq_log_cfqq(cfqd, cfqq, "alloced");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 }
1528
1529 if (new_cfqq)
1530 kmem_cache_free(cfq_pool, new_cfqq);
1531
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532out:
1533 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1534 return cfqq;
1535}
1536
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02001537static struct cfq_queue **
1538cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
1539{
Jens Axboefe094d92008-01-31 13:08:54 +01001540 switch (ioprio_class) {
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02001541 case IOPRIO_CLASS_RT:
1542 return &cfqd->async_cfqq[0][ioprio];
1543 case IOPRIO_CLASS_BE:
1544 return &cfqd->async_cfqq[1][ioprio];
1545 case IOPRIO_CLASS_IDLE:
1546 return &cfqd->async_idle_cfqq;
1547 default:
1548 BUG();
1549 }
1550}
1551
Jens Axboe15c31be2007-07-10 13:43:25 +02001552static struct cfq_queue *
Jens Axboefd0928d2008-01-24 08:52:45 +01001553cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
Jens Axboe15c31be2007-07-10 13:43:25 +02001554 gfp_t gfp_mask)
1555{
Jens Axboefd0928d2008-01-24 08:52:45 +01001556 const int ioprio = task_ioprio(ioc);
1557 const int ioprio_class = task_ioprio_class(ioc);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02001558 struct cfq_queue **async_cfqq = NULL;
Jens Axboe15c31be2007-07-10 13:43:25 +02001559 struct cfq_queue *cfqq = NULL;
1560
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02001561 if (!is_sync) {
1562 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
1563 cfqq = *async_cfqq;
1564 }
1565
Oleg Nesterov0a0836a2007-10-23 15:08:21 +02001566 if (!cfqq) {
Jens Axboefd0928d2008-01-24 08:52:45 +01001567 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
Oleg Nesterov0a0836a2007-10-23 15:08:21 +02001568 if (!cfqq)
1569 return NULL;
1570 }
Jens Axboe15c31be2007-07-10 13:43:25 +02001571
1572 /*
1573 * pin the queue now that it's allocated, scheduler exit will prune it
1574 */
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02001575 if (!is_sync && !(*async_cfqq)) {
Jens Axboe15c31be2007-07-10 13:43:25 +02001576 atomic_inc(&cfqq->ref);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02001577 *async_cfqq = cfqq;
Jens Axboe15c31be2007-07-10 13:43:25 +02001578 }
1579
1580 atomic_inc(&cfqq->ref);
1581 return cfqq;
1582}
1583
Jens Axboe498d3aa22007-04-26 12:54:48 +02001584/*
1585 * We drop cfq io contexts lazily, so we may find a dead one.
1586 */
OGAWA Hirofumidbecf3a2006-04-18 09:45:18 +02001587static void
Jens Axboe4ac845a2008-01-24 08:44:49 +01001588cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
1589 struct cfq_io_context *cic)
OGAWA Hirofumidbecf3a2006-04-18 09:45:18 +02001590{
Jens Axboe4ac845a2008-01-24 08:44:49 +01001591 unsigned long flags;
1592
Jens Axboefc463792006-08-29 09:05:44 +02001593 WARN_ON(!list_empty(&cic->queue_list));
Jens Axboe597bc482007-04-24 21:23:53 +02001594
Jens Axboe4ac845a2008-01-24 08:44:49 +01001595 spin_lock_irqsave(&ioc->lock, flags);
Jens Axboe597bc482007-04-24 21:23:53 +02001596
Fabio Checconi4faa3c82008-04-10 08:28:01 +02001597 BUG_ON(ioc->ioc_data == cic);
Jens Axboe4ac845a2008-01-24 08:44:49 +01001598
1599 radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
Jens Axboeffc4e752008-02-19 10:02:29 +01001600 hlist_del_rcu(&cic->cic_list);
Jens Axboe4ac845a2008-01-24 08:44:49 +01001601 spin_unlock_irqrestore(&ioc->lock, flags);
1602
1603 cfq_cic_free(cic);
OGAWA Hirofumidbecf3a2006-04-18 09:45:18 +02001604}
1605
Jens Axboee2d74ac2006-03-28 08:59:01 +02001606static struct cfq_io_context *
Jens Axboe4ac845a2008-01-24 08:44:49 +01001607cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
Jens Axboee2d74ac2006-03-28 08:59:01 +02001608{
Jens Axboee2d74ac2006-03-28 08:59:01 +02001609 struct cfq_io_context *cic;
Jens Axboed6de8be2008-05-28 14:46:59 +02001610 unsigned long flags;
Jens Axboe4ac845a2008-01-24 08:44:49 +01001611 void *k;
Jens Axboee2d74ac2006-03-28 08:59:01 +02001612
Vasily Tarasov91fac312007-04-25 12:29:51 +02001613 if (unlikely(!ioc))
1614 return NULL;
1615
Jens Axboed6de8be2008-05-28 14:46:59 +02001616 rcu_read_lock();
1617
Jens Axboe597bc482007-04-24 21:23:53 +02001618 /*
1619 * we maintain a last-hit cache, to avoid browsing over the tree
1620 */
Jens Axboe4ac845a2008-01-24 08:44:49 +01001621 cic = rcu_dereference(ioc->ioc_data);
Jens Axboed6de8be2008-05-28 14:46:59 +02001622 if (cic && cic->key == cfqd) {
1623 rcu_read_unlock();
Jens Axboe597bc482007-04-24 21:23:53 +02001624 return cic;
Jens Axboed6de8be2008-05-28 14:46:59 +02001625 }
Jens Axboe597bc482007-04-24 21:23:53 +02001626
Jens Axboe4ac845a2008-01-24 08:44:49 +01001627 do {
Jens Axboe4ac845a2008-01-24 08:44:49 +01001628 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
1629 rcu_read_unlock();
1630 if (!cic)
1631 break;
OGAWA Hirofumibe3b0752006-04-18 19:18:31 +02001632 /* ->key must be copied to avoid race with cfq_exit_queue() */
1633 k = cic->key;
1634 if (unlikely(!k)) {
Jens Axboe4ac845a2008-01-24 08:44:49 +01001635 cfq_drop_dead_cic(cfqd, ioc, cic);
Jens Axboed6de8be2008-05-28 14:46:59 +02001636 rcu_read_lock();
Jens Axboe4ac845a2008-01-24 08:44:49 +01001637 continue;
OGAWA Hirofumidbecf3a2006-04-18 09:45:18 +02001638 }
Jens Axboee2d74ac2006-03-28 08:59:01 +02001639
Jens Axboed6de8be2008-05-28 14:46:59 +02001640 spin_lock_irqsave(&ioc->lock, flags);
Jens Axboe4ac845a2008-01-24 08:44:49 +01001641 rcu_assign_pointer(ioc->ioc_data, cic);
Jens Axboed6de8be2008-05-28 14:46:59 +02001642 spin_unlock_irqrestore(&ioc->lock, flags);
Jens Axboe4ac845a2008-01-24 08:44:49 +01001643 break;
1644 } while (1);
Jens Axboee2d74ac2006-03-28 08:59:01 +02001645
Jens Axboe4ac845a2008-01-24 08:44:49 +01001646 return cic;
Jens Axboee2d74ac2006-03-28 08:59:01 +02001647}
1648
Jens Axboe4ac845a2008-01-24 08:44:49 +01001649/*
1650 * Add cic into ioc, using cfqd as the search key. This enables us to lookup
1651 * the process specific cfq io context when entered from the block layer.
1652 * Also adds the cic to a per-cfqd list, used when this queue is removed.
1653 */
Jens Axboefebffd62008-01-28 13:19:43 +01001654static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1655 struct cfq_io_context *cic, gfp_t gfp_mask)
Jens Axboee2d74ac2006-03-28 08:59:01 +02001656{
Jens Axboe0261d682006-10-30 19:07:48 +01001657 unsigned long flags;
Jens Axboe4ac845a2008-01-24 08:44:49 +01001658 int ret;
Jens Axboee2d74ac2006-03-28 08:59:01 +02001659
Jens Axboe4ac845a2008-01-24 08:44:49 +01001660 ret = radix_tree_preload(gfp_mask);
1661 if (!ret) {
1662 cic->ioc = ioc;
1663 cic->key = cfqd;
Jens Axboee2d74ac2006-03-28 08:59:01 +02001664
Jens Axboe4ac845a2008-01-24 08:44:49 +01001665 spin_lock_irqsave(&ioc->lock, flags);
1666 ret = radix_tree_insert(&ioc->radix_root,
1667 (unsigned long) cfqd, cic);
Jens Axboeffc4e752008-02-19 10:02:29 +01001668 if (!ret)
1669 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
Jens Axboe4ac845a2008-01-24 08:44:49 +01001670 spin_unlock_irqrestore(&ioc->lock, flags);
1671
1672 radix_tree_preload_end();
1673
1674 if (!ret) {
1675 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1676 list_add(&cic->queue_list, &cfqd->cic_list);
1677 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
OGAWA Hirofumidbecf3a2006-04-18 09:45:18 +02001678 }
Jens Axboee2d74ac2006-03-28 08:59:01 +02001679 }
1680
Jens Axboe4ac845a2008-01-24 08:44:49 +01001681 if (ret)
1682 printk(KERN_ERR "cfq: cic link failed!\n");
Jens Axboefc463792006-08-29 09:05:44 +02001683
Jens Axboe4ac845a2008-01-24 08:44:49 +01001684 return ret;
Jens Axboee2d74ac2006-03-28 08:59:01 +02001685}
1686
Jens Axboe22e2c502005-06-27 10:55:12 +02001687/*
1688 * Setup general io context and cfq io context. There can be several cfq
1689 * io contexts per general io context, if this process is doing io to more
Jens Axboee2d74ac2006-03-28 08:59:01 +02001690 * than one device managed by cfq.
Jens Axboe22e2c502005-06-27 10:55:12 +02001691 */
1692static struct cfq_io_context *
Jens Axboee2d74ac2006-03-28 08:59:01 +02001693cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694{
Jens Axboe22e2c502005-06-27 10:55:12 +02001695 struct io_context *ioc = NULL;
1696 struct cfq_io_context *cic;
1697
1698 might_sleep_if(gfp_mask & __GFP_WAIT);
1699
Jens Axboeb5deef92006-07-19 23:39:40 +02001700 ioc = get_io_context(gfp_mask, cfqd->queue->node);
Jens Axboe22e2c502005-06-27 10:55:12 +02001701 if (!ioc)
1702 return NULL;
1703
Jens Axboe4ac845a2008-01-24 08:44:49 +01001704 cic = cfq_cic_lookup(cfqd, ioc);
Jens Axboee2d74ac2006-03-28 08:59:01 +02001705 if (cic)
1706 goto out;
Jens Axboe22e2c502005-06-27 10:55:12 +02001707
Jens Axboee2d74ac2006-03-28 08:59:01 +02001708 cic = cfq_alloc_io_context(cfqd, gfp_mask);
1709 if (cic == NULL)
1710 goto err;
Jens Axboe22e2c502005-06-27 10:55:12 +02001711
Jens Axboe4ac845a2008-01-24 08:44:49 +01001712 if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
1713 goto err_free;
1714
Jens Axboe22e2c502005-06-27 10:55:12 +02001715out:
Jens Axboefc463792006-08-29 09:05:44 +02001716 smp_read_barrier_depends();
1717 if (unlikely(ioc->ioprio_changed))
1718 cfq_ioc_set_ioprio(ioc);
1719
Jens Axboe22e2c502005-06-27 10:55:12 +02001720 return cic;
Jens Axboe4ac845a2008-01-24 08:44:49 +01001721err_free:
1722 cfq_cic_free(cic);
Jens Axboe22e2c502005-06-27 10:55:12 +02001723err:
1724 put_io_context(ioc);
1725 return NULL;
1726}
1727
1728static void
1729cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1730{
Jens Axboeaaf12282007-01-19 11:30:16 +11001731 unsigned long elapsed = jiffies - cic->last_end_request;
1732 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
Jens Axboe22e2c502005-06-27 10:55:12 +02001733
1734 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1735 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1736 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1737}
1738
Jens Axboe206dc692006-03-28 13:03:44 +02001739static void
Jens Axboe6d048f52007-04-25 12:44:27 +02001740cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1741 struct request *rq)
Jens Axboe206dc692006-03-28 13:03:44 +02001742{
1743 sector_t sdist;
1744 u64 total;
1745
Jens Axboe5e705372006-07-13 12:39:25 +02001746 if (cic->last_request_pos < rq->sector)
1747 sdist = rq->sector - cic->last_request_pos;
Jens Axboe206dc692006-03-28 13:03:44 +02001748 else
Jens Axboe5e705372006-07-13 12:39:25 +02001749 sdist = cic->last_request_pos - rq->sector;
Jens Axboe206dc692006-03-28 13:03:44 +02001750
1751 /*
1752 * Don't allow the seek distance to get too large from the
1753 * odd fragment, pagein, etc
1754 */
1755 if (cic->seek_samples <= 60) /* second&third seek */
1756 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1757 else
1758 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
1759
1760 cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1761 cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1762 total = cic->seek_total + (cic->seek_samples/2);
1763 do_div(total, cic->seek_samples);
1764 cic->seek_mean = (sector_t)total;
1765}
Jens Axboe22e2c502005-06-27 10:55:12 +02001766
1767/*
1768 * Disable idle window if the process thinks too long or seeks so much that
1769 * it doesn't matter
1770 */
1771static void
1772cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1773 struct cfq_io_context *cic)
1774{
Jens Axboe7b679132008-05-30 12:23:07 +02001775 int old_idle, enable_idle;
Jens Axboe1be92f22007-04-19 14:32:26 +02001776
Jens Axboe08717142008-01-28 11:38:15 +01001777 /*
1778 * Don't idle for async or idle io prio class
1779 */
1780 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
Jens Axboe1be92f22007-04-19 14:32:26 +02001781 return;
1782
Jens Axboec265a7f2008-06-26 13:49:33 +02001783 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02001784
Nikanth Karthikesan66dac982007-11-27 12:47:04 +01001785 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
Jens Axboecaaa5f92006-06-16 11:23:00 +02001786 (cfqd->hw_tag && CIC_SEEKY(cic)))
Jens Axboe22e2c502005-06-27 10:55:12 +02001787 enable_idle = 0;
1788 else if (sample_valid(cic->ttime_samples)) {
1789 if (cic->ttime_mean > cfqd->cfq_slice_idle)
1790 enable_idle = 0;
1791 else
1792 enable_idle = 1;
1793 }
1794
Jens Axboe7b679132008-05-30 12:23:07 +02001795 if (old_idle != enable_idle) {
1796 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
1797 if (enable_idle)
1798 cfq_mark_cfqq_idle_window(cfqq);
1799 else
1800 cfq_clear_cfqq_idle_window(cfqq);
1801 }
Jens Axboe22e2c502005-06-27 10:55:12 +02001802}
1803
Jens Axboe22e2c502005-06-27 10:55:12 +02001804/*
1805 * Check if new_cfqq should preempt the currently active queue. Return 0 for
1806 * no or if we aren't sure, a 1 will cause a preempt.
1807 */
1808static int
1809cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
Jens Axboe5e705372006-07-13 12:39:25 +02001810 struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02001811{
Jens Axboe6d048f52007-04-25 12:44:27 +02001812 struct cfq_queue *cfqq;
Jens Axboe22e2c502005-06-27 10:55:12 +02001813
Jens Axboe6d048f52007-04-25 12:44:27 +02001814 cfqq = cfqd->active_queue;
1815 if (!cfqq)
Jens Axboe22e2c502005-06-27 10:55:12 +02001816 return 0;
1817
Jens Axboe6d048f52007-04-25 12:44:27 +02001818 if (cfq_slice_used(cfqq))
1819 return 1;
1820
1821 if (cfq_class_idle(new_cfqq))
Jens Axboecaaa5f92006-06-16 11:23:00 +02001822 return 0;
Jens Axboe22e2c502005-06-27 10:55:12 +02001823
1824 if (cfq_class_idle(cfqq))
1825 return 1;
Jens Axboe1e3335d2007-02-14 19:59:49 +01001826
Jens Axboe22e2c502005-06-27 10:55:12 +02001827 /*
Jens Axboe374f84a2006-07-23 01:42:19 +02001828 * if the new request is sync, but the currently running queue is
1829 * not, let the sync request have priority.
1830 */
Jens Axboe5e705372006-07-13 12:39:25 +02001831 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
Jens Axboe22e2c502005-06-27 10:55:12 +02001832 return 1;
Jens Axboe1e3335d2007-02-14 19:59:49 +01001833
Jens Axboe374f84a2006-07-23 01:42:19 +02001834 /*
1835 * So both queues are sync. Let the new request get disk time if
1836 * it's a metadata request and the current queue is doing regular IO.
1837 */
1838 if (rq_is_meta(rq) && !cfqq->meta_pending)
1839 return 1;
Jens Axboe22e2c502005-06-27 10:55:12 +02001840
Divyesh Shah3a9a3f62009-01-30 12:46:41 +01001841 /*
1842 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
1843 */
1844 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
1845 return 1;
1846
Jens Axboe1e3335d2007-02-14 19:59:49 +01001847 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
1848 return 0;
1849
1850 /*
1851 * if this request is as-good as one we would expect from the
1852 * current cfqq, let it preempt
1853 */
Jens Axboe6d048f52007-04-25 12:44:27 +02001854 if (cfq_rq_close(cfqd, rq))
Jens Axboe1e3335d2007-02-14 19:59:49 +01001855 return 1;
1856
Jens Axboe22e2c502005-06-27 10:55:12 +02001857 return 0;
1858}
1859
1860/*
1861 * cfqq preempts the active queue. if we allowed preempt with no slice left,
1862 * let it have half of its nominal slice.
1863 */
1864static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1865{
Jens Axboe7b679132008-05-30 12:23:07 +02001866 cfq_log_cfqq(cfqd, cfqq, "preempt");
Jens Axboe6084cdd2007-04-23 08:25:00 +02001867 cfq_slice_expired(cfqd, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02001868
Jens Axboebf572252006-07-19 20:29:12 +02001869 /*
1870 * Put the new queue at the front of the of the current list,
1871 * so we know that it will be selected next.
1872 */
1873 BUG_ON(!cfq_cfqq_on_rr(cfqq));
Jens Axboeedd75ff2007-04-19 12:03:34 +02001874
1875 cfq_service_tree_add(cfqd, cfqq, 1);
Jens Axboebf572252006-07-19 20:29:12 +02001876
Jens Axboe44f7c162007-01-19 11:51:58 +11001877 cfqq->slice_end = 0;
1878 cfq_mark_cfqq_slice_new(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02001879}
1880
1881/*
Jens Axboe5e705372006-07-13 12:39:25 +02001882 * Called when a new fs request (rq) is added (to cfqq). Check if there's
Jens Axboe22e2c502005-06-27 10:55:12 +02001883 * something we should do about it
1884 */
1885static void
Jens Axboe5e705372006-07-13 12:39:25 +02001886cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1887 struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02001888{
Jens Axboe5e705372006-07-13 12:39:25 +02001889 struct cfq_io_context *cic = RQ_CIC(rq);
Jens Axboe12e9fdd2006-06-01 10:09:56 +02001890
Aaron Carroll45333d52008-08-26 15:52:36 +02001891 cfqd->rq_queued++;
Jens Axboe374f84a2006-07-23 01:42:19 +02001892 if (rq_is_meta(rq))
1893 cfqq->meta_pending++;
1894
Jens Axboe9c2c38a2005-08-24 14:57:54 +02001895 cfq_update_io_thinktime(cfqd, cic);
Jens Axboe6d048f52007-04-25 12:44:27 +02001896 cfq_update_io_seektime(cfqd, cic, rq);
Jens Axboe9c2c38a2005-08-24 14:57:54 +02001897 cfq_update_idle_window(cfqd, cfqq, cic);
1898
Jens Axboe5e705372006-07-13 12:39:25 +02001899 cic->last_request_pos = rq->sector + rq->nr_sectors;
Jens Axboe22e2c502005-06-27 10:55:12 +02001900
1901 if (cfqq == cfqd->active_queue) {
1902 /*
Jens Axboeb0291952009-04-07 11:38:31 +02001903 * Remember that we saw a request from this process, but
1904 * don't start queuing just yet. Otherwise we risk seeing lots
1905 * of tiny requests, because we disrupt the normal plugging
Jens Axboed6ceb252009-04-14 14:18:16 +02001906 * and merging. If the request is already larger than a single
1907 * page, let it rip immediately. For that case we assume that
1908 * merging is already done.
Jens Axboe22e2c502005-06-27 10:55:12 +02001909 */
Jens Axboed6ceb252009-04-14 14:18:16 +02001910 if (cfq_cfqq_wait_request(cfqq)) {
1911 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE) {
1912 del_timer(&cfqd->idle_slice_timer);
1913 blk_start_queueing(cfqd->queue);
1914 }
Jens Axboeb0291952009-04-07 11:38:31 +02001915 cfq_mark_cfqq_must_dispatch(cfqq);
Jens Axboed6ceb252009-04-14 14:18:16 +02001916 }
Jens Axboe5e705372006-07-13 12:39:25 +02001917 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
Jens Axboe22e2c502005-06-27 10:55:12 +02001918 /*
1919 * not the active queue - expire current slice if it is
1920 * idle and has expired it's mean thinktime or this new queue
Divyesh Shah3a9a3f62009-01-30 12:46:41 +01001921 * has some old slice time left and is of higher priority or
1922 * this new queue is RT and the current one is BE
Jens Axboe22e2c502005-06-27 10:55:12 +02001923 */
1924 cfq_preempt_queue(cfqd, cfqq);
Jens Axboedc72ef42006-07-20 14:54:05 +02001925 blk_start_queueing(cfqd->queue);
Jens Axboe22e2c502005-06-27 10:55:12 +02001926 }
1927}
1928
Jens Axboe165125e2007-07-24 09:28:11 +02001929static void cfq_insert_request(struct request_queue *q, struct request *rq)
Jens Axboe22e2c502005-06-27 10:55:12 +02001930{
Jens Axboeb4878f22005-10-20 16:42:29 +02001931 struct cfq_data *cfqd = q->elevator->elevator_data;
Jens Axboe5e705372006-07-13 12:39:25 +02001932 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02001933
Jens Axboe7b679132008-05-30 12:23:07 +02001934 cfq_log_cfqq(cfqd, cfqq, "insert_request");
Jens Axboefd0928d2008-01-24 08:52:45 +01001935 cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
Jens Axboe5e705372006-07-13 12:39:25 +02001937 cfq_add_rq_rb(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
Jens Axboe22e2c502005-06-27 10:55:12 +02001939 list_add_tail(&rq->queuelist, &cfqq->fifo);
1940
Jens Axboe5e705372006-07-13 12:39:25 +02001941 cfq_rq_enqueued(cfqd, cfqq, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942}
1943
Aaron Carroll45333d52008-08-26 15:52:36 +02001944/*
1945 * Update hw_tag based on peak queue depth over 50 samples under
1946 * sufficient load.
1947 */
1948static void cfq_update_hw_tag(struct cfq_data *cfqd)
1949{
1950 if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak)
1951 cfqd->rq_in_driver_peak = cfqd->rq_in_driver;
1952
1953 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
1954 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
1955 return;
1956
1957 if (cfqd->hw_tag_samples++ < 50)
1958 return;
1959
1960 if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
1961 cfqd->hw_tag = 1;
1962 else
1963 cfqd->hw_tag = 0;
1964
1965 cfqd->hw_tag_samples = 0;
1966 cfqd->rq_in_driver_peak = 0;
1967}
1968
Jens Axboe165125e2007-07-24 09:28:11 +02001969static void cfq_completed_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970{
Jens Axboe5e705372006-07-13 12:39:25 +02001971 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +02001972 struct cfq_data *cfqd = cfqq->cfqd;
Jens Axboe5380a102006-07-13 12:37:56 +02001973 const int sync = rq_is_sync(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +02001974 unsigned long now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
Jens Axboeb4878f22005-10-20 16:42:29 +02001976 now = jiffies;
Jens Axboe7b679132008-05-30 12:23:07 +02001977 cfq_log_cfqq(cfqd, cfqq, "complete");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
Aaron Carroll45333d52008-08-26 15:52:36 +02001979 cfq_update_hw_tag(cfqd);
1980
Jens Axboeb4878f22005-10-20 16:42:29 +02001981 WARN_ON(!cfqd->rq_in_driver);
Jens Axboe6d048f52007-04-25 12:44:27 +02001982 WARN_ON(!cfqq->dispatched);
Jens Axboeb4878f22005-10-20 16:42:29 +02001983 cfqd->rq_in_driver--;
Jens Axboe6d048f52007-04-25 12:44:27 +02001984 cfqq->dispatched--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985
Jens Axboe3ed9a292007-04-23 08:33:33 +02001986 if (cfq_cfqq_sync(cfqq))
1987 cfqd->sync_flight--;
1988
Jens Axboeb4878f22005-10-20 16:42:29 +02001989 if (!cfq_class_idle(cfqq))
1990 cfqd->last_end_request = now;
Jens Axboe3b181522005-06-27 10:56:24 +02001991
Jens Axboecaaa5f92006-06-16 11:23:00 +02001992 if (sync)
Jens Axboe5e705372006-07-13 12:39:25 +02001993 RQ_CIC(rq)->last_end_request = now;
Jens Axboecaaa5f92006-06-16 11:23:00 +02001994
1995 /*
1996 * If this is the active queue, check if it needs to be expired,
1997 * or if we want to idle in case it has no pending requests.
1998 */
1999 if (cfqd->active_queue == cfqq) {
Jens Axboe44f7c162007-01-19 11:51:58 +11002000 if (cfq_cfqq_slice_new(cfqq)) {
2001 cfq_set_prio_slice(cfqd, cfqq);
2002 cfq_clear_cfqq_slice_new(cfqq);
2003 }
Jens Axboe08717142008-01-28 11:38:15 +01002004 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
Jens Axboe6084cdd2007-04-23 08:25:00 +02002005 cfq_slice_expired(cfqd, 1);
Jens Axboeaeb6faf2009-04-06 14:48:07 +02002006 else if (sync && !rq_noidle(rq) &&
2007 RB_EMPTY_ROOT(&cfqq->sort_list)) {
Jens Axboe6d048f52007-04-25 12:44:27 +02002008 cfq_arm_slice_timer(cfqd);
Jens Axboeaeb6faf2009-04-06 14:48:07 +02002009 }
Jens Axboecaaa5f92006-06-16 11:23:00 +02002010 }
Jens Axboe6d048f52007-04-25 12:44:27 +02002011
2012 if (!cfqd->rq_in_driver)
2013 cfq_schedule_dispatch(cfqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014}
2015
Jens Axboe22e2c502005-06-27 10:55:12 +02002016/*
2017 * we temporarily boost lower priority queues if they are holding fs exclusive
2018 * resources. they are boosted to normal prio (CLASS_BE/4)
2019 */
2020static void cfq_prio_boost(struct cfq_queue *cfqq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021{
Jens Axboe22e2c502005-06-27 10:55:12 +02002022 if (has_fs_excl()) {
2023 /*
2024 * boost idle prio on transactions that would lock out other
2025 * users of the filesystem
2026 */
2027 if (cfq_class_idle(cfqq))
2028 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2029 if (cfqq->ioprio > IOPRIO_NORM)
2030 cfqq->ioprio = IOPRIO_NORM;
2031 } else {
2032 /*
2033 * check if we need to unboost the queue
2034 */
2035 if (cfqq->ioprio_class != cfqq->org_ioprio_class)
2036 cfqq->ioprio_class = cfqq->org_ioprio_class;
2037 if (cfqq->ioprio != cfqq->org_ioprio)
2038 cfqq->ioprio = cfqq->org_ioprio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 }
Jens Axboe22e2c502005-06-27 10:55:12 +02002040}
2041
Jens Axboe89850f72006-07-22 16:48:31 +02002042static inline int __cfq_may_queue(struct cfq_queue *cfqq)
Jens Axboe22e2c502005-06-27 10:55:12 +02002043{
Jens Axboe3b181522005-06-27 10:56:24 +02002044 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
Andrew Morton99f95e52005-06-27 20:14:05 -07002045 !cfq_cfqq_must_alloc_slice(cfqq)) {
Jens Axboe3b181522005-06-27 10:56:24 +02002046 cfq_mark_cfqq_must_alloc_slice(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002047 return ELV_MQUEUE_MUST;
Jens Axboe3b181522005-06-27 10:56:24 +02002048 }
Jens Axboe22e2c502005-06-27 10:55:12 +02002049
2050 return ELV_MQUEUE_MAY;
Jens Axboe22e2c502005-06-27 10:55:12 +02002051}
2052
Jens Axboe165125e2007-07-24 09:28:11 +02002053static int cfq_may_queue(struct request_queue *q, int rw)
Jens Axboe22e2c502005-06-27 10:55:12 +02002054{
2055 struct cfq_data *cfqd = q->elevator->elevator_data;
2056 struct task_struct *tsk = current;
Vasily Tarasov91fac312007-04-25 12:29:51 +02002057 struct cfq_io_context *cic;
Jens Axboe22e2c502005-06-27 10:55:12 +02002058 struct cfq_queue *cfqq;
2059
2060 /*
2061 * don't force setup of a queue from here, as a call to may_queue
2062 * does not necessarily imply that a request actually will be queued.
2063 * so just lookup a possibly existing queue, or return 'may queue'
2064 * if that fails
2065 */
Jens Axboe4ac845a2008-01-24 08:44:49 +01002066 cic = cfq_cic_lookup(cfqd, tsk->io_context);
Vasily Tarasov91fac312007-04-25 12:29:51 +02002067 if (!cic)
2068 return ELV_MQUEUE_MAY;
2069
Jens Axboeb0b78f82009-04-08 10:56:08 +02002070 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
Jens Axboe22e2c502005-06-27 10:55:12 +02002071 if (cfqq) {
Jens Axboefd0928d2008-01-24 08:52:45 +01002072 cfq_init_prio_data(cfqq, cic->ioc);
Jens Axboe22e2c502005-06-27 10:55:12 +02002073 cfq_prio_boost(cfqq);
2074
Jens Axboe89850f72006-07-22 16:48:31 +02002075 return __cfq_may_queue(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002076 }
2077
2078 return ELV_MQUEUE_MAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079}
2080
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081/*
2082 * queue lock held here
2083 */
Jens Axboebb37b942006-12-01 10:42:33 +01002084static void cfq_put_request(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085{
Jens Axboe5e705372006-07-13 12:39:25 +02002086 struct cfq_queue *cfqq = RQ_CFQQ(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
Jens Axboe5e705372006-07-13 12:39:25 +02002088 if (cfqq) {
Jens Axboe22e2c502005-06-27 10:55:12 +02002089 const int rw = rq_data_dir(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
Jens Axboe22e2c502005-06-27 10:55:12 +02002091 BUG_ON(!cfqq->allocated[rw]);
2092 cfqq->allocated[rw]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093
Jens Axboe5e705372006-07-13 12:39:25 +02002094 put_io_context(RQ_CIC(rq)->ioc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 rq->elevator_private = NULL;
Jens Axboe5e705372006-07-13 12:39:25 +02002097 rq->elevator_private2 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 cfq_put_queue(cfqq);
2100 }
2101}
2102
2103/*
Jens Axboe22e2c502005-06-27 10:55:12 +02002104 * Allocate cfq data structures associated with this request.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 */
Jens Axboe22e2c502005-06-27 10:55:12 +02002106static int
Jens Axboe165125e2007-07-24 09:28:11 +02002107cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108{
2109 struct cfq_data *cfqd = q->elevator->elevator_data;
2110 struct cfq_io_context *cic;
2111 const int rw = rq_data_dir(rq);
Jens Axboe7749a8d2006-12-13 13:02:26 +01002112 const int is_sync = rq_is_sync(rq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002113 struct cfq_queue *cfqq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 unsigned long flags;
2115
2116 might_sleep_if(gfp_mask & __GFP_WAIT);
2117
Jens Axboee2d74ac2006-03-28 08:59:01 +02002118 cic = cfq_get_io_context(cfqd, gfp_mask);
Jens Axboe22e2c502005-06-27 10:55:12 +02002119
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 spin_lock_irqsave(q->queue_lock, flags);
2121
Jens Axboe22e2c502005-06-27 10:55:12 +02002122 if (!cic)
2123 goto queue_fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124
Vasily Tarasov91fac312007-04-25 12:29:51 +02002125 cfqq = cic_to_cfqq(cic, is_sync);
2126 if (!cfqq) {
Jens Axboefd0928d2008-01-24 08:52:45 +01002127 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
Vasily Tarasov91fac312007-04-25 12:29:51 +02002128
Jens Axboe22e2c502005-06-27 10:55:12 +02002129 if (!cfqq)
2130 goto queue_fail;
2131
Vasily Tarasov91fac312007-04-25 12:29:51 +02002132 cic_set_cfqq(cic, cfqq, is_sync);
2133 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134
2135 cfqq->allocated[rw]++;
Jens Axboe3b181522005-06-27 10:56:24 +02002136 cfq_clear_cfqq_must_alloc(cfqq);
Jens Axboe22e2c502005-06-27 10:55:12 +02002137 atomic_inc(&cfqq->ref);
Jens Axboe5e705372006-07-13 12:39:25 +02002138
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 spin_unlock_irqrestore(q->queue_lock, flags);
2140
Jens Axboe5e705372006-07-13 12:39:25 +02002141 rq->elevator_private = cic;
2142 rq->elevator_private2 = cfqq;
2143 return 0;
Jens Axboe3b181522005-06-27 10:56:24 +02002144
Jens Axboe22e2c502005-06-27 10:55:12 +02002145queue_fail:
2146 if (cic)
2147 put_io_context(cic->ioc);
Jens Axboe89850f72006-07-22 16:48:31 +02002148
Jens Axboe3b181522005-06-27 10:56:24 +02002149 cfq_schedule_dispatch(cfqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 spin_unlock_irqrestore(q->queue_lock, flags);
Jens Axboe7b679132008-05-30 12:23:07 +02002151 cfq_log(cfqd, "set_request fail");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 return 1;
2153}
2154
David Howells65f27f32006-11-22 14:55:48 +00002155static void cfq_kick_queue(struct work_struct *work)
Jens Axboe22e2c502005-06-27 10:55:12 +02002156{
David Howells65f27f32006-11-22 14:55:48 +00002157 struct cfq_data *cfqd =
2158 container_of(work, struct cfq_data, unplug_work);
Jens Axboe165125e2007-07-24 09:28:11 +02002159 struct request_queue *q = cfqd->queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02002160 unsigned long flags;
2161
2162 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboedc72ef42006-07-20 14:54:05 +02002163 blk_start_queueing(q);
Jens Axboe22e2c502005-06-27 10:55:12 +02002164 spin_unlock_irqrestore(q->queue_lock, flags);
2165}
2166
2167/*
2168 * Timer running if the active_queue is currently idling inside its time slice
2169 */
2170static void cfq_idle_slice_timer(unsigned long data)
2171{
2172 struct cfq_data *cfqd = (struct cfq_data *) data;
2173 struct cfq_queue *cfqq;
2174 unsigned long flags;
Jens Axboe3c6bd2f2007-01-19 12:06:33 +11002175 int timed_out = 1;
Jens Axboe22e2c502005-06-27 10:55:12 +02002176
Jens Axboe7b679132008-05-30 12:23:07 +02002177 cfq_log(cfqd, "idle timer fired");
2178
Jens Axboe22e2c502005-06-27 10:55:12 +02002179 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2180
Jens Axboefe094d92008-01-31 13:08:54 +01002181 cfqq = cfqd->active_queue;
2182 if (cfqq) {
Jens Axboe3c6bd2f2007-01-19 12:06:33 +11002183 timed_out = 0;
2184
Jens Axboe22e2c502005-06-27 10:55:12 +02002185 /*
Jens Axboeb0291952009-04-07 11:38:31 +02002186 * We saw a request before the queue expired, let it through
2187 */
2188 if (cfq_cfqq_must_dispatch(cfqq))
2189 goto out_kick;
2190
2191 /*
Jens Axboe22e2c502005-06-27 10:55:12 +02002192 * expired
2193 */
Jens Axboe44f7c162007-01-19 11:51:58 +11002194 if (cfq_slice_used(cfqq))
Jens Axboe22e2c502005-06-27 10:55:12 +02002195 goto expire;
2196
2197 /*
2198 * only expire and reinvoke request handler, if there are
2199 * other queues with pending requests
2200 */
Jens Axboecaaa5f92006-06-16 11:23:00 +02002201 if (!cfqd->busy_queues)
Jens Axboe22e2c502005-06-27 10:55:12 +02002202 goto out_cont;
Jens Axboe22e2c502005-06-27 10:55:12 +02002203
2204 /*
2205 * not expired and it has a request pending, let it dispatch
2206 */
Jens Axboe75e50982009-04-07 08:56:14 +02002207 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
Jens Axboe22e2c502005-06-27 10:55:12 +02002208 goto out_kick;
Jens Axboe22e2c502005-06-27 10:55:12 +02002209 }
2210expire:
Jens Axboe6084cdd2007-04-23 08:25:00 +02002211 cfq_slice_expired(cfqd, timed_out);
Jens Axboe22e2c502005-06-27 10:55:12 +02002212out_kick:
Jens Axboe3b181522005-06-27 10:56:24 +02002213 cfq_schedule_dispatch(cfqd);
Jens Axboe22e2c502005-06-27 10:55:12 +02002214out_cont:
2215 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2216}
2217
Jens Axboe3b181522005-06-27 10:56:24 +02002218static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2219{
2220 del_timer_sync(&cfqd->idle_slice_timer);
Cheng Renquan64d01dc2008-12-03 12:41:39 +01002221 cancel_work_sync(&cfqd->unplug_work);
Jens Axboe3b181522005-06-27 10:56:24 +02002222}
Jens Axboe22e2c502005-06-27 10:55:12 +02002223
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02002224static void cfq_put_async_queues(struct cfq_data *cfqd)
2225{
2226 int i;
2227
2228 for (i = 0; i < IOPRIO_BE_NR; i++) {
2229 if (cfqd->async_cfqq[0][i])
2230 cfq_put_queue(cfqd->async_cfqq[0][i]);
2231 if (cfqd->async_cfqq[1][i])
2232 cfq_put_queue(cfqd->async_cfqq[1][i]);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02002233 }
Oleg Nesterov2389d1e2007-11-05 08:58:05 +01002234
2235 if (cfqd->async_idle_cfqq)
2236 cfq_put_queue(cfqd->async_idle_cfqq);
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02002237}
2238
Jens Axboeb374d182008-10-31 10:05:07 +01002239static void cfq_exit_queue(struct elevator_queue *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240{
Jens Axboe22e2c502005-06-27 10:55:12 +02002241 struct cfq_data *cfqd = e->elevator_data;
Jens Axboe165125e2007-07-24 09:28:11 +02002242 struct request_queue *q = cfqd->queue;
Jens Axboe22e2c502005-06-27 10:55:12 +02002243
Jens Axboe3b181522005-06-27 10:56:24 +02002244 cfq_shutdown_timer_wq(cfqd);
Jens Axboee2d74ac2006-03-28 08:59:01 +02002245
Al Virod9ff4182006-03-18 13:51:22 -05002246 spin_lock_irq(q->queue_lock);
Jens Axboee2d74ac2006-03-28 08:59:01 +02002247
Al Virod9ff4182006-03-18 13:51:22 -05002248 if (cfqd->active_queue)
Jens Axboe6084cdd2007-04-23 08:25:00 +02002249 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
Jens Axboee2d74ac2006-03-28 08:59:01 +02002250
2251 while (!list_empty(&cfqd->cic_list)) {
Al Virod9ff4182006-03-18 13:51:22 -05002252 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2253 struct cfq_io_context,
2254 queue_list);
Jens Axboe89850f72006-07-22 16:48:31 +02002255
2256 __cfq_exit_single_io_context(cfqd, cic);
Al Virod9ff4182006-03-18 13:51:22 -05002257 }
Jens Axboee2d74ac2006-03-28 08:59:01 +02002258
Vasily Tarasovc2dea2d2007-07-20 10:06:38 +02002259 cfq_put_async_queues(cfqd);
Jens Axboe15c31be2007-07-10 13:43:25 +02002260
Al Virod9ff4182006-03-18 13:51:22 -05002261 spin_unlock_irq(q->queue_lock);
Al Viroa90d7422006-03-18 12:05:37 -05002262
2263 cfq_shutdown_timer_wq(cfqd);
2264
Al Viroa90d7422006-03-18 12:05:37 -05002265 kfree(cfqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266}
2267
Jens Axboe165125e2007-07-24 09:28:11 +02002268static void *cfq_init_queue(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269{
2270 struct cfq_data *cfqd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
Christoph Lameter94f60302007-07-17 04:03:29 -07002272 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 if (!cfqd)
Jens Axboebc1c1162006-06-08 08:49:06 +02002274 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275
Jens Axboecc09e292007-04-26 12:53:50 +02002276 cfqd->service_tree = CFQ_RB_ROOT;
Al Virod9ff4182006-03-18 13:51:22 -05002277 INIT_LIST_HEAD(&cfqd->cic_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 cfqd->queue = q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
Jens Axboe22e2c502005-06-27 10:55:12 +02002281 init_timer(&cfqd->idle_slice_timer);
2282 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2283 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2284
David Howells65f27f32006-11-22 14:55:48 +00002285 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
Jens Axboe22e2c502005-06-27 10:55:12 +02002286
Oleg Nesterovb70c8642007-11-07 09:46:13 +01002287 cfqd->last_end_request = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 cfqd->cfq_quantum = cfq_quantum;
Jens Axboe22e2c502005-06-27 10:55:12 +02002289 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2290 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 cfqd->cfq_back_max = cfq_back_max;
2292 cfqd->cfq_back_penalty = cfq_back_penalty;
Jens Axboe22e2c502005-06-27 10:55:12 +02002293 cfqd->cfq_slice[0] = cfq_slice_async;
2294 cfqd->cfq_slice[1] = cfq_slice_sync;
2295 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2296 cfqd->cfq_slice_idle = cfq_slice_idle;
Aaron Carroll45333d52008-08-26 15:52:36 +02002297 cfqd->hw_tag = 1;
Jens Axboe3b181522005-06-27 10:56:24 +02002298
Jens Axboebc1c1162006-06-08 08:49:06 +02002299 return cfqd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300}
2301
2302static void cfq_slab_kill(void)
2303{
Jens Axboed6de8be2008-05-28 14:46:59 +02002304 /*
2305 * Caller already ensured that pending RCU callbacks are completed,
2306 * so we should have no busy allocations at this point.
2307 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 if (cfq_pool)
2309 kmem_cache_destroy(cfq_pool);
2310 if (cfq_ioc_pool)
2311 kmem_cache_destroy(cfq_ioc_pool);
2312}
2313
2314static int __init cfq_slab_setup(void)
2315{
Christoph Lameter0a31bd52007-05-06 14:49:57 -07002316 cfq_pool = KMEM_CACHE(cfq_queue, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 if (!cfq_pool)
2318 goto fail;
2319
Fabio Checconi34e6bbf2008-04-02 14:31:02 +02002320 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 if (!cfq_ioc_pool)
2322 goto fail;
2323
2324 return 0;
2325fail:
2326 cfq_slab_kill();
2327 return -ENOMEM;
2328}
2329
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330/*
2331 * sysfs parts below -->
2332 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333static ssize_t
2334cfq_var_show(unsigned int var, char *page)
2335{
2336 return sprintf(page, "%d\n", var);
2337}
2338
2339static ssize_t
2340cfq_var_store(unsigned int *var, const char *page, size_t count)
2341{
2342 char *p = (char *) page;
2343
2344 *var = simple_strtoul(p, &p, 10);
2345 return count;
2346}
2347
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
Jens Axboeb374d182008-10-31 10:05:07 +01002349static ssize_t __FUNC(struct elevator_queue *e, char *page) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350{ \
Al Viro3d1ab402006-03-18 18:35:43 -05002351 struct cfq_data *cfqd = e->elevator_data; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 unsigned int __data = __VAR; \
2353 if (__CONV) \
2354 __data = jiffies_to_msecs(__data); \
2355 return cfq_var_show(__data, (page)); \
2356}
2357SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02002358SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2359SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
Al Viroe572ec72006-03-18 22:27:18 -05002360SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2361SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02002362SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2363SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2364SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2365SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366#undef SHOW_FUNCTION
2367
2368#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
Jens Axboeb374d182008-10-31 10:05:07 +01002369static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370{ \
Al Viro3d1ab402006-03-18 18:35:43 -05002371 struct cfq_data *cfqd = e->elevator_data; \
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 unsigned int __data; \
2373 int ret = cfq_var_store(&__data, (page), count); \
2374 if (__data < (MIN)) \
2375 __data = (MIN); \
2376 else if (__data > (MAX)) \
2377 __data = (MAX); \
2378 if (__CONV) \
2379 *(__PTR) = msecs_to_jiffies(__data); \
2380 else \
2381 *(__PTR) = __data; \
2382 return ret; \
2383}
2384STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
Jens Axboefe094d92008-01-31 13:08:54 +01002385STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
2386 UINT_MAX, 1);
2387STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
2388 UINT_MAX, 1);
Al Viroe572ec72006-03-18 22:27:18 -05002389STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
Jens Axboefe094d92008-01-31 13:08:54 +01002390STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
2391 UINT_MAX, 0);
Jens Axboe22e2c502005-06-27 10:55:12 +02002392STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2393STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2394STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
Jens Axboefe094d92008-01-31 13:08:54 +01002395STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
2396 UINT_MAX, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397#undef STORE_FUNCTION
2398
Al Viroe572ec72006-03-18 22:27:18 -05002399#define CFQ_ATTR(name) \
2400 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
Jens Axboe3b181522005-06-27 10:56:24 +02002401
Al Viroe572ec72006-03-18 22:27:18 -05002402static struct elv_fs_entry cfq_attrs[] = {
2403 CFQ_ATTR(quantum),
Al Viroe572ec72006-03-18 22:27:18 -05002404 CFQ_ATTR(fifo_expire_sync),
2405 CFQ_ATTR(fifo_expire_async),
2406 CFQ_ATTR(back_seek_max),
2407 CFQ_ATTR(back_seek_penalty),
2408 CFQ_ATTR(slice_sync),
2409 CFQ_ATTR(slice_async),
2410 CFQ_ATTR(slice_async_rq),
2411 CFQ_ATTR(slice_idle),
Al Viroe572ec72006-03-18 22:27:18 -05002412 __ATTR_NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413};
2414
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415static struct elevator_type iosched_cfq = {
2416 .ops = {
2417 .elevator_merge_fn = cfq_merge,
2418 .elevator_merged_fn = cfq_merged_request,
2419 .elevator_merge_req_fn = cfq_merged_requests,
Jens Axboeda775262006-12-20 11:04:12 +01002420 .elevator_allow_merge_fn = cfq_allow_merge,
Jens Axboeb4878f22005-10-20 16:42:29 +02002421 .elevator_dispatch_fn = cfq_dispatch_requests,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 .elevator_add_req_fn = cfq_insert_request,
Jens Axboeb4878f22005-10-20 16:42:29 +02002423 .elevator_activate_req_fn = cfq_activate_request,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 .elevator_deactivate_req_fn = cfq_deactivate_request,
2425 .elevator_queue_empty_fn = cfq_queue_empty,
2426 .elevator_completed_req_fn = cfq_completed_request,
Jens Axboe21183b02006-07-13 12:33:14 +02002427 .elevator_former_req_fn = elv_rb_former_request,
2428 .elevator_latter_req_fn = elv_rb_latter_request,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 .elevator_set_req_fn = cfq_set_request,
2430 .elevator_put_req_fn = cfq_put_request,
2431 .elevator_may_queue_fn = cfq_may_queue,
2432 .elevator_init_fn = cfq_init_queue,
2433 .elevator_exit_fn = cfq_exit_queue,
Jens Axboefc463792006-08-29 09:05:44 +02002434 .trim = cfq_free_io_context,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 },
Al Viro3d1ab402006-03-18 18:35:43 -05002436 .elevator_attrs = cfq_attrs,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 .elevator_name = "cfq",
2438 .elevator_owner = THIS_MODULE,
2439};
2440
2441static int __init cfq_init(void)
2442{
Jens Axboe22e2c502005-06-27 10:55:12 +02002443 /*
2444 * could be 0 on HZ < 1000 setups
2445 */
2446 if (!cfq_slice_async)
2447 cfq_slice_async = 1;
2448 if (!cfq_slice_idle)
2449 cfq_slice_idle = 1;
2450
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 if (cfq_slab_setup())
2452 return -ENOMEM;
2453
Adrian Bunk2fdd82b2007-12-12 18:51:56 +01002454 elv_register(&iosched_cfq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455
Adrian Bunk2fdd82b2007-12-12 18:51:56 +01002456 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457}
2458
2459static void __exit cfq_exit(void)
2460{
Peter Zijlstra6e9a4732006-09-30 23:28:10 -07002461 DECLARE_COMPLETION_ONSTACK(all_gone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 elv_unregister(&iosched_cfq);
Al Viro334e94d2006-03-18 15:05:53 -05002463 ioc_gone = &all_gone;
OGAWA Hirofumifba82272006-04-18 09:44:06 +02002464 /* ioc_gone's update must be visible before reading ioc_count */
2465 smp_wmb();
Jens Axboed6de8be2008-05-28 14:46:59 +02002466
2467 /*
2468 * this also protects us from entering cfq_slab_kill() with
2469 * pending RCU callbacks
2470 */
Jens Axboe4050cf12006-07-19 05:07:12 +02002471 if (elv_ioc_count_read(ioc_count))
Jens Axboe9a11b4e2008-05-29 09:32:08 +02002472 wait_for_completion(&all_gone);
Christoph Hellwig83521d32005-10-30 15:01:39 -08002473 cfq_slab_kill();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474}
2475
2476module_init(cfq_init);
2477module_exit(cfq_exit);
2478
2479MODULE_AUTHOR("Jens Axboe");
2480MODULE_LICENSE("GPL");
2481MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");