blob: e5bccaaed563b31b3f8081cff66aff3ad6f7dbff [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Deadline i/o scheduler.
3 *
4 * Copyright (C) 2002 Jens Axboe <axboe@suse.de>
5 */
6#include <linux/kernel.h>
7#include <linux/fs.h>
8#include <linux/blkdev.h>
9#include <linux/elevator.h>
10#include <linux/bio.h>
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/compiler.h>
16#include <linux/hash.h>
17#include <linux/rbtree.h>
18
19/*
20 * See Documentation/block/deadline-iosched.txt
21 */
Arjan van de Ven64100092006-01-06 09:46:02 +010022static const int read_expire = HZ / 2; /* max time before a read is submitted. */
23static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
24static const int writes_starved = 2; /* max times reads can starve a write */
25static const int fifo_batch = 16; /* # of sequential requests treated as one
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 by the above parameters. For throughput. */
27
28static const int deadline_hash_shift = 5;
29#define DL_HASH_BLOCK(sec) ((sec) >> 3)
30#define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
31#define DL_HASH_ENTRIES (1 << deadline_hash_shift)
32#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
Akinobu Mitabae386f2006-04-24 21:12:59 +020033#define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash))
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35struct deadline_data {
36 /*
37 * run time data
38 */
39
40 /*
41 * requests (deadline_rq s) are present on both sort_list and fifo_list
42 */
43 struct rb_root sort_list[2];
44 struct list_head fifo_list[2];
45
46 /*
47 * next in sort order. read, write or both are NULL
48 */
49 struct deadline_rq *next_drq[2];
Akinobu Mitabae386f2006-04-24 21:12:59 +020050 struct hlist_head *hash; /* request hash */
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 unsigned int batching; /* number of sequential requests made */
52 sector_t last_sector; /* head position */
53 unsigned int starved; /* times reads have starved writes */
54
55 /*
56 * settings that change how the i/o scheduler behaves
57 */
58 int fifo_expire[2];
59 int fifo_batch;
60 int writes_starved;
61 int front_merges;
62
63 mempool_t *drq_pool;
64};
65
66/*
67 * pre-request data.
68 */
69struct deadline_rq {
70 /*
71 * rbtree index, key is the starting offset
72 */
73 struct rb_node rb_node;
74 sector_t rb_key;
75
76 struct request *request;
77
78 /*
79 * request hash, key is the ending offset (for back merge lookup)
80 */
Akinobu Mitabae386f2006-04-24 21:12:59 +020081 struct hlist_node hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
83 /*
84 * expire fifo
85 */
86 struct list_head fifo;
87 unsigned long expires;
88};
89
90static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
91
92static kmem_cache_t *drq_pool;
93
94#define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
95
96/*
97 * the back merge hash support functions
98 */
99static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
100{
Akinobu Mitabae386f2006-04-24 21:12:59 +0200101 hlist_del_init(&drq->hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102}
103
104static inline void deadline_del_drq_hash(struct deadline_rq *drq)
105{
106 if (ON_HASH(drq))
107 __deadline_del_drq_hash(drq);
108}
109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110static inline void
111deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
112{
113 struct request *rq = drq->request;
114
115 BUG_ON(ON_HASH(drq));
116
Akinobu Mitabae386f2006-04-24 21:12:59 +0200117 hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118}
119
120/*
121 * move hot entry to front of chain
122 */
123static inline void
124deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
125{
126 struct request *rq = drq->request;
Akinobu Mitabae386f2006-04-24 21:12:59 +0200127 struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
Akinobu Mitabae386f2006-04-24 21:12:59 +0200129 if (ON_HASH(drq) && &drq->hash != head->first) {
130 hlist_del(&drq->hash);
131 hlist_add_head(&drq->hash, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 }
133}
134
135static struct request *
136deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
137{
Akinobu Mitabae386f2006-04-24 21:12:59 +0200138 struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
139 struct hlist_node *entry, *next;
140 struct deadline_rq *drq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Akinobu Mitabae386f2006-04-24 21:12:59 +0200142 hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 struct request *__rq = drq->request;
144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 BUG_ON(!ON_HASH(drq));
146
147 if (!rq_mergeable(__rq)) {
148 __deadline_del_drq_hash(drq);
149 continue;
150 }
151
152 if (rq_hash_key(__rq) == offset)
153 return __rq;
154 }
155
156 return NULL;
157}
158
159/*
160 * rb tree support functions
161 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162#define RB_EMPTY(root) ((root)->rb_node == NULL)
David Woodhouse3db3a442006-04-21 13:15:17 +0100163#define ON_RB(node) (rb_parent(node) != node)
164#define RB_CLEAR(node) (rb_set_parent(node, node))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
166#define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)])
167#define rq_rb_key(rq) (rq)->sector
168
169static struct deadline_rq *
170__deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
171{
172 struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
173 struct rb_node *parent = NULL;
174 struct deadline_rq *__drq;
175
176 while (*p) {
177 parent = *p;
178 __drq = rb_entry_drq(parent);
179
180 if (drq->rb_key < __drq->rb_key)
181 p = &(*p)->rb_left;
182 else if (drq->rb_key > __drq->rb_key)
183 p = &(*p)->rb_right;
184 else
185 return __drq;
186 }
187
188 rb_link_node(&drq->rb_node, parent, p);
189 return NULL;
190}
191
192static void
193deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
194{
195 struct deadline_rq *__alias;
196
197 drq->rb_key = rq_rb_key(drq->request);
198
199retry:
200 __alias = __deadline_add_drq_rb(dd, drq);
201 if (!__alias) {
202 rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
203 return;
204 }
205
206 deadline_move_request(dd, __alias);
207 goto retry;
208}
209
210static inline void
211deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
212{
213 const int data_dir = rq_data_dir(drq->request);
214
215 if (dd->next_drq[data_dir] == drq) {
216 struct rb_node *rbnext = rb_next(&drq->rb_node);
217
218 dd->next_drq[data_dir] = NULL;
219 if (rbnext)
220 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
221 }
222
Jens Axboeb4878f22005-10-20 16:42:29 +0200223 BUG_ON(!ON_RB(&drq->rb_node));
224 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
225 RB_CLEAR(&drq->rb_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226}
227
228static struct request *
229deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
230{
231 struct rb_node *n = dd->sort_list[data_dir].rb_node;
232 struct deadline_rq *drq;
233
234 while (n) {
235 drq = rb_entry_drq(n);
236
237 if (sector < drq->rb_key)
238 n = n->rb_left;
239 else if (sector > drq->rb_key)
240 n = n->rb_right;
241 else
242 return drq->request;
243 }
244
245 return NULL;
246}
247
248/*
249 * deadline_find_first_drq finds the first (lowest sector numbered) request
250 * for the specified data_dir. Used to sweep back to the start of the disk
251 * (1-way elevator) after we process the last (highest sector) request.
252 */
253static struct deadline_rq *
254deadline_find_first_drq(struct deadline_data *dd, int data_dir)
255{
256 struct rb_node *n = dd->sort_list[data_dir].rb_node;
257
258 for (;;) {
259 if (n->rb_left == NULL)
260 return rb_entry_drq(n);
261
262 n = n->rb_left;
263 }
264}
265
266/*
267 * add drq to rbtree and fifo
268 */
Jens Axboeb4878f22005-10-20 16:42:29 +0200269static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270deadline_add_request(struct request_queue *q, struct request *rq)
271{
272 struct deadline_data *dd = q->elevator->elevator_data;
273 struct deadline_rq *drq = RQ_DATA(rq);
274
275 const int data_dir = rq_data_dir(drq->request);
276
277 deadline_add_drq_rb(dd, drq);
278 /*
279 * set expire time (only used for reads) and add to fifo list
280 */
281 drq->expires = jiffies + dd->fifo_expire[data_dir];
282 list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
283
Tejun Heo98b11472005-10-20 16:46:54 +0200284 if (rq_mergeable(rq))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 deadline_add_drq_hash(dd, drq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
288/*
289 * remove rq from rbtree, fifo, and hash
290 */
291static void deadline_remove_request(request_queue_t *q, struct request *rq)
292{
293 struct deadline_rq *drq = RQ_DATA(rq);
Jens Axboeb4878f22005-10-20 16:42:29 +0200294 struct deadline_data *dd = q->elevator->elevator_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Jens Axboeb4878f22005-10-20 16:42:29 +0200296 list_del_init(&drq->fifo);
Jens Axboeb4878f22005-10-20 16:42:29 +0200297 deadline_del_drq_rb(dd, drq);
Tejun Heo98b11472005-10-20 16:46:54 +0200298 deadline_del_drq_hash(drq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
300
301static int
302deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
303{
304 struct deadline_data *dd = q->elevator->elevator_data;
305 struct request *__rq;
306 int ret;
307
308 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 * see if the merge hash can satisfy a back merge
310 */
311 __rq = deadline_find_drq_hash(dd, bio->bi_sector);
312 if (__rq) {
313 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
314
315 if (elv_rq_merge_ok(__rq, bio)) {
316 ret = ELEVATOR_BACK_MERGE;
317 goto out;
318 }
319 }
320
321 /*
322 * check for front merge
323 */
324 if (dd->front_merges) {
325 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
326
327 __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
328 if (__rq) {
329 BUG_ON(rb_key != rq_rb_key(__rq));
330
331 if (elv_rq_merge_ok(__rq, bio)) {
332 ret = ELEVATOR_FRONT_MERGE;
333 goto out;
334 }
335 }
336 }
337
338 return ELEVATOR_NO_MERGE;
339out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 if (ret)
341 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
342 *req = __rq;
343 return ret;
344}
345
346static void deadline_merged_request(request_queue_t *q, struct request *req)
347{
348 struct deadline_data *dd = q->elevator->elevator_data;
349 struct deadline_rq *drq = RQ_DATA(req);
350
351 /*
352 * hash always needs to be repositioned, key is end sector
353 */
354 deadline_del_drq_hash(drq);
355 deadline_add_drq_hash(dd, drq);
356
357 /*
358 * if the merge was a front merge, we need to reposition request
359 */
360 if (rq_rb_key(req) != drq->rb_key) {
361 deadline_del_drq_rb(dd, drq);
362 deadline_add_drq_rb(dd, drq);
363 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364}
365
366static void
367deadline_merged_requests(request_queue_t *q, struct request *req,
368 struct request *next)
369{
370 struct deadline_data *dd = q->elevator->elevator_data;
371 struct deadline_rq *drq = RQ_DATA(req);
372 struct deadline_rq *dnext = RQ_DATA(next);
373
374 BUG_ON(!drq);
375 BUG_ON(!dnext);
376
377 /*
378 * reposition drq (this is the merged request) in hash, and in rbtree
379 * in case of a front merge
380 */
381 deadline_del_drq_hash(drq);
382 deadline_add_drq_hash(dd, drq);
383
384 if (rq_rb_key(req) != drq->rb_key) {
385 deadline_del_drq_rb(dd, drq);
386 deadline_add_drq_rb(dd, drq);
387 }
388
389 /*
390 * if dnext expires before drq, assign its expire time to drq
391 * and move into dnext position (dnext will be deleted) in fifo
392 */
393 if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
394 if (time_before(dnext->expires, drq->expires)) {
395 list_move(&drq->fifo, &dnext->fifo);
396 drq->expires = dnext->expires;
397 }
398 }
399
400 /*
401 * kill knowledge of next, this one is a goner
402 */
403 deadline_remove_request(q, next);
404}
405
406/*
407 * move request from sort list to dispatch queue.
408 */
409static inline void
410deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
411{
412 request_queue_t *q = drq->request->q;
413
414 deadline_remove_request(q, drq->request);
Jens Axboeb4878f22005-10-20 16:42:29 +0200415 elv_dispatch_add_tail(q, drq->request);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416}
417
418/*
419 * move an entry to dispatch queue
420 */
421static void
422deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
423{
424 const int data_dir = rq_data_dir(drq->request);
425 struct rb_node *rbnext = rb_next(&drq->rb_node);
426
427 dd->next_drq[READ] = NULL;
428 dd->next_drq[WRITE] = NULL;
429
430 if (rbnext)
431 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
432
433 dd->last_sector = drq->request->sector + drq->request->nr_sectors;
434
435 /*
436 * take it off the sort and fifo list, move
437 * to dispatch queue
438 */
439 deadline_move_to_dispatch(dd, drq);
440}
441
442#define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo)
443
444/*
445 * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
446 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
447 */
448static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
449{
450 struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
451
452 /*
453 * drq is expired!
454 */
455 if (time_after(jiffies, drq->expires))
456 return 1;
457
458 return 0;
459}
460
461/*
462 * deadline_dispatch_requests selects the best request according to
463 * read/write expire, fifo_batch, etc
464 */
Jens Axboeb4878f22005-10-20 16:42:29 +0200465static int deadline_dispatch_requests(request_queue_t *q, int force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466{
Jens Axboeb4878f22005-10-20 16:42:29 +0200467 struct deadline_data *dd = q->elevator->elevator_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 const int reads = !list_empty(&dd->fifo_list[READ]);
469 const int writes = !list_empty(&dd->fifo_list[WRITE]);
470 struct deadline_rq *drq;
Nikita Danilov4b0dc072005-09-06 15:17:20 -0700471 int data_dir;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
473 /*
474 * batches are currently reads XOR writes
475 */
Andrew Morton9d5c1e12005-09-09 13:02:12 -0700476 if (dd->next_drq[WRITE])
477 drq = dd->next_drq[WRITE];
478 else
479 drq = dd->next_drq[READ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
481 if (drq) {
482 /* we have a "next request" */
483
484 if (dd->last_sector != drq->request->sector)
485 /* end the batch on a non sequential request */
486 dd->batching += dd->fifo_batch;
487
488 if (dd->batching < dd->fifo_batch)
489 /* we are still entitled to batch */
490 goto dispatch_request;
491 }
492
493 /*
494 * at this point we are not running a batch. select the appropriate
495 * data direction (read / write)
496 */
497
498 if (reads) {
499 BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
500
501 if (writes && (dd->starved++ >= dd->writes_starved))
502 goto dispatch_writes;
503
504 data_dir = READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
506 goto dispatch_find_request;
507 }
508
509 /*
510 * there are either no reads or writes have been starved
511 */
512
513 if (writes) {
514dispatch_writes:
515 BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
516
517 dd->starved = 0;
518
519 data_dir = WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
521 goto dispatch_find_request;
522 }
523
524 return 0;
525
526dispatch_find_request:
527 /*
528 * we are not running a batch, find best request for selected data_dir
529 */
530 if (deadline_check_fifo(dd, data_dir)) {
531 /* An expired request exists - satisfy it */
532 dd->batching = 0;
533 drq = list_entry_fifo(dd->fifo_list[data_dir].next);
534
535 } else if (dd->next_drq[data_dir]) {
536 /*
537 * The last req was the same dir and we have a next request in
538 * sort order. No expired requests so continue on from here.
539 */
540 drq = dd->next_drq[data_dir];
541 } else {
542 /*
543 * The last req was the other direction or we have run out of
544 * higher-sectored requests. Go back to the lowest sectored
545 * request (1 way elevator) and start a new batch.
546 */
547 dd->batching = 0;
548 drq = deadline_find_first_drq(dd, data_dir);
549 }
550
551dispatch_request:
552 /*
553 * drq is the selected appropriate request.
554 */
555 dd->batching++;
556 deadline_move_request(dd, drq);
557
558 return 1;
559}
560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561static int deadline_queue_empty(request_queue_t *q)
562{
563 struct deadline_data *dd = q->elevator->elevator_data;
564
Jens Axboeb4878f22005-10-20 16:42:29 +0200565 return list_empty(&dd->fifo_list[WRITE])
566 && list_empty(&dd->fifo_list[READ]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567}
568
569static struct request *
570deadline_former_request(request_queue_t *q, struct request *rq)
571{
572 struct deadline_rq *drq = RQ_DATA(rq);
573 struct rb_node *rbprev = rb_prev(&drq->rb_node);
574
575 if (rbprev)
576 return rb_entry_drq(rbprev)->request;
577
578 return NULL;
579}
580
581static struct request *
582deadline_latter_request(request_queue_t *q, struct request *rq)
583{
584 struct deadline_rq *drq = RQ_DATA(rq);
585 struct rb_node *rbnext = rb_next(&drq->rb_node);
586
587 if (rbnext)
588 return rb_entry_drq(rbnext)->request;
589
590 return NULL;
591}
592
593static void deadline_exit_queue(elevator_t *e)
594{
595 struct deadline_data *dd = e->elevator_data;
596
597 BUG_ON(!list_empty(&dd->fifo_list[READ]));
598 BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
599
600 mempool_destroy(dd->drq_pool);
601 kfree(dd->hash);
602 kfree(dd);
603}
604
605/*
606 * initialize elevator private data (deadline_data), and alloc a drq for
607 * each request on the free lists
608 */
Jens Axboebc1c1162006-06-08 08:49:06 +0200609static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610{
611 struct deadline_data *dd;
612 int i;
613
614 if (!drq_pool)
Jens Axboebc1c1162006-06-08 08:49:06 +0200615 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Christoph Lameter19460892005-06-23 00:08:19 -0700617 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 if (!dd)
Jens Axboebc1c1162006-06-08 08:49:06 +0200619 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 memset(dd, 0, sizeof(*dd));
621
Akinobu Mitabae386f2006-04-24 21:12:59 +0200622 dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES,
Christoph Lameter19460892005-06-23 00:08:19 -0700623 GFP_KERNEL, q->node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 if (!dd->hash) {
625 kfree(dd);
Jens Axboebc1c1162006-06-08 08:49:06 +0200626 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 }
628
Christoph Lameter19460892005-06-23 00:08:19 -0700629 dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
630 mempool_free_slab, drq_pool, q->node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 if (!dd->drq_pool) {
632 kfree(dd->hash);
633 kfree(dd);
Jens Axboebc1c1162006-06-08 08:49:06 +0200634 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 }
636
637 for (i = 0; i < DL_HASH_ENTRIES; i++)
Akinobu Mitabae386f2006-04-24 21:12:59 +0200638 INIT_HLIST_HEAD(&dd->hash[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
640 INIT_LIST_HEAD(&dd->fifo_list[READ]);
641 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
642 dd->sort_list[READ] = RB_ROOT;
643 dd->sort_list[WRITE] = RB_ROOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 dd->fifo_expire[READ] = read_expire;
645 dd->fifo_expire[WRITE] = write_expire;
646 dd->writes_starved = writes_starved;
647 dd->front_merges = 1;
648 dd->fifo_batch = fifo_batch;
Jens Axboebc1c1162006-06-08 08:49:06 +0200649 return dd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650}
651
652static void deadline_put_request(request_queue_t *q, struct request *rq)
653{
654 struct deadline_data *dd = q->elevator->elevator_data;
655 struct deadline_rq *drq = RQ_DATA(rq);
656
Jens Axboeb4878f22005-10-20 16:42:29 +0200657 mempool_free(drq, dd->drq_pool);
658 rq->elevator_private = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659}
660
661static int
Jens Axboe22e2c502005-06-27 10:55:12 +0200662deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
Al Viro8267e262005-10-21 03:20:53 -0400663 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664{
665 struct deadline_data *dd = q->elevator->elevator_data;
666 struct deadline_rq *drq;
667
668 drq = mempool_alloc(dd->drq_pool, gfp_mask);
669 if (drq) {
670 memset(drq, 0, sizeof(*drq));
671 RB_CLEAR(&drq->rb_node);
672 drq->request = rq;
673
Akinobu Mitabae386f2006-04-24 21:12:59 +0200674 INIT_HLIST_NODE(&drq->hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
676 INIT_LIST_HEAD(&drq->fifo);
677
678 rq->elevator_private = drq;
679 return 0;
680 }
681
682 return 1;
683}
684
685/*
686 * sysfs parts below
687 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689static ssize_t
690deadline_var_show(int var, char *page)
691{
692 return sprintf(page, "%d\n", var);
693}
694
695static ssize_t
696deadline_var_store(int *var, const char *page, size_t count)
697{
698 char *p = (char *) page;
699
700 *var = simple_strtol(p, &p, 10);
701 return count;
702}
703
704#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
Al Viro3d1ab402006-03-18 18:35:43 -0500705static ssize_t __FUNC(elevator_t *e, char *page) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706{ \
Al Viro3d1ab402006-03-18 18:35:43 -0500707 struct deadline_data *dd = e->elevator_data; \
708 int __data = __VAR; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 if (__CONV) \
710 __data = jiffies_to_msecs(__data); \
711 return deadline_var_show(__data, (page)); \
712}
Al Viroe572ec72006-03-18 22:27:18 -0500713SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
714SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
715SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
716SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
717SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718#undef SHOW_FUNCTION
719
720#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
Al Viro3d1ab402006-03-18 18:35:43 -0500721static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722{ \
Al Viro3d1ab402006-03-18 18:35:43 -0500723 struct deadline_data *dd = e->elevator_data; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 int __data; \
725 int ret = deadline_var_store(&__data, (page), count); \
726 if (__data < (MIN)) \
727 __data = (MIN); \
728 else if (__data > (MAX)) \
729 __data = (MAX); \
730 if (__CONV) \
731 *(__PTR) = msecs_to_jiffies(__data); \
732 else \
733 *(__PTR) = __data; \
734 return ret; \
735}
Al Viroe572ec72006-03-18 22:27:18 -0500736STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
737STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
738STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
739STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
740STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741#undef STORE_FUNCTION
742
Al Viroe572ec72006-03-18 22:27:18 -0500743#define DD_ATTR(name) \
744 __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
745 deadline_##name##_store)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
Al Viroe572ec72006-03-18 22:27:18 -0500747static struct elv_fs_entry deadline_attrs[] = {
748 DD_ATTR(read_expire),
749 DD_ATTR(write_expire),
750 DD_ATTR(writes_starved),
751 DD_ATTR(front_merges),
752 DD_ATTR(fifo_batch),
753 __ATTR_NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754};
755
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756static struct elevator_type iosched_deadline = {
757 .ops = {
758 .elevator_merge_fn = deadline_merge,
759 .elevator_merged_fn = deadline_merged_request,
760 .elevator_merge_req_fn = deadline_merged_requests,
Jens Axboeb4878f22005-10-20 16:42:29 +0200761 .elevator_dispatch_fn = deadline_dispatch_requests,
762 .elevator_add_req_fn = deadline_add_request,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 .elevator_queue_empty_fn = deadline_queue_empty,
764 .elevator_former_req_fn = deadline_former_request,
765 .elevator_latter_req_fn = deadline_latter_request,
766 .elevator_set_req_fn = deadline_set_request,
767 .elevator_put_req_fn = deadline_put_request,
768 .elevator_init_fn = deadline_init_queue,
769 .elevator_exit_fn = deadline_exit_queue,
770 },
771
Al Viro3d1ab402006-03-18 18:35:43 -0500772 .elevator_attrs = deadline_attrs,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 .elevator_name = "deadline",
774 .elevator_owner = THIS_MODULE,
775};
776
777static int __init deadline_init(void)
778{
779 int ret;
780
781 drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
782 0, 0, NULL, NULL);
783
784 if (!drq_pool)
785 return -ENOMEM;
786
787 ret = elv_register(&iosched_deadline);
788 if (ret)
789 kmem_cache_destroy(drq_pool);
790
791 return ret;
792}
793
794static void __exit deadline_exit(void)
795{
796 kmem_cache_destroy(drq_pool);
797 elv_unregister(&iosched_deadline);
798}
799
800module_init(deadline_init);
801module_exit(deadline_exit);
802
803MODULE_AUTHOR("Jens Axboe");
804MODULE_LICENSE("GPL");
805MODULE_DESCRIPTION("deadline IO scheduler");