blob: 237e43d760c8e5a20be4e8341034f24f39127dc5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/block/elevator.c
3 *
4 * Block device elevator/IO-scheduler.
5 *
6 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 *
8 * 30042000 Jens Axboe <axboe@suse.de> :
9 *
10 * Split the elevator a bit so that it is possible to choose a different
11 * one or even write a new "plug in". There are three pieces:
12 * - elevator_fn, inserts a new request in the queue list
13 * - elevator_merge_fn, decides whether a new buffer can be merged with
14 * an existing request
15 * - elevator_dequeue_fn, called when a request is taken off the active list
16 *
17 * 20082000 Dave Jones <davej@suse.de> :
18 * Removed tests for max-bomb-segments, which was breaking elvtune
19 * when run without -bN
20 *
21 * Jens:
22 * - Rework again to work with bio instead of buffer_heads
23 * - loose bi_dev comparisons, partition handling is right now
24 * - completely modularize elevator setup and teardown
25 *
26 */
27#include <linux/kernel.h>
28#include <linux/fs.h>
29#include <linux/blkdev.h>
30#include <linux/elevator.h>
31#include <linux/bio.h>
32#include <linux/config.h>
33#include <linux/module.h>
34#include <linux/slab.h>
35#include <linux/init.h>
36#include <linux/compiler.h>
37
38#include <asm/uaccess.h>
39
40static DEFINE_SPINLOCK(elv_list_lock);
41static LIST_HEAD(elv_list);
42
43/*
44 * can we safely merge with this request?
45 */
46inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
47{
48 if (!rq_mergeable(rq))
49 return 0;
50
51 /*
52 * different data direction or already started, don't merge
53 */
54 if (bio_data_dir(bio) != rq_data_dir(rq))
55 return 0;
56
57 /*
58 * same device and no special stuff set, merge is ok
59 */
60 if (rq->rq_disk == bio->bi_bdev->bd_disk &&
61 !rq->waiting && !rq->special)
62 return 1;
63
64 return 0;
65}
66EXPORT_SYMBOL(elv_rq_merge_ok);
67
68inline int elv_try_merge(struct request *__rq, struct bio *bio)
69{
70 int ret = ELEVATOR_NO_MERGE;
71
72 /*
73 * we can merge and sequence is ok, check if it's possible
74 */
75 if (elv_rq_merge_ok(__rq, bio)) {
76 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
77 ret = ELEVATOR_BACK_MERGE;
78 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
79 ret = ELEVATOR_FRONT_MERGE;
80 }
81
82 return ret;
83}
84EXPORT_SYMBOL(elv_try_merge);
85
86inline int elv_try_last_merge(request_queue_t *q, struct bio *bio)
87{
88 if (q->last_merge)
89 return elv_try_merge(q->last_merge, bio);
90
91 return ELEVATOR_NO_MERGE;
92}
93EXPORT_SYMBOL(elv_try_last_merge);
94
95static struct elevator_type *elevator_find(const char *name)
96{
97 struct elevator_type *e = NULL;
98 struct list_head *entry;
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 list_for_each(entry, &elv_list) {
101 struct elevator_type *__e;
102
103 __e = list_entry(entry, struct elevator_type, list);
104
105 if (!strcmp(__e->elevator_name, name)) {
106 e = __e;
107 break;
108 }
109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111 return e;
112}
113
114static void elevator_put(struct elevator_type *e)
115{
116 module_put(e->elevator_owner);
117}
118
119static struct elevator_type *elevator_get(const char *name)
120{
Tejun Heo2824bc932005-10-20 10:56:41 +0200121 struct elevator_type *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Tejun Heo2824bc932005-10-20 10:56:41 +0200123 spin_lock_irq(&elv_list_lock);
124
125 e = elevator_find(name);
126 if (e && !try_module_get(e->elevator_owner))
127 e = NULL;
128
129 spin_unlock_irq(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131 return e;
132}
133
134static int elevator_attach(request_queue_t *q, struct elevator_type *e,
135 struct elevator_queue *eq)
136{
137 int ret = 0;
138
139 memset(eq, 0, sizeof(*eq));
140 eq->ops = &e->ops;
141 eq->elevator_type = e;
142
143 INIT_LIST_HEAD(&q->queue_head);
144 q->last_merge = NULL;
145 q->elevator = eq;
Jens Axboe1b47f532005-10-20 16:37:00 +0200146 q->end_sector = 0;
Tejun Heo8922e162005-10-20 16:23:44 +0200147 q->boundary_rq = NULL;
148 q->max_back_kb = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
150 if (eq->ops->elevator_init_fn)
151 ret = eq->ops->elevator_init_fn(q, eq);
152
153 return ret;
154}
155
156static char chosen_elevator[16];
157
158static void elevator_setup_default(void)
159{
Tejun Heo2824bc932005-10-20 10:56:41 +0200160 struct elevator_type *e;
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 /*
163 * check if default is set and exists
164 */
Tejun Heo2824bc932005-10-20 10:56:41 +0200165 if (chosen_elevator[0] && (e = elevator_get(chosen_elevator))) {
166 elevator_put(e);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 return;
Tejun Heo2824bc932005-10-20 10:56:41 +0200168 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
170#if defined(CONFIG_IOSCHED_AS)
171 strcpy(chosen_elevator, "anticipatory");
172#elif defined(CONFIG_IOSCHED_DEADLINE)
173 strcpy(chosen_elevator, "deadline");
174#elif defined(CONFIG_IOSCHED_CFQ)
175 strcpy(chosen_elevator, "cfq");
176#elif defined(CONFIG_IOSCHED_NOOP)
177 strcpy(chosen_elevator, "noop");
178#else
179#error "You must build at least 1 IO scheduler into the kernel"
180#endif
181}
182
183static int __init elevator_setup(char *str)
184{
185 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
186 return 0;
187}
188
189__setup("elevator=", elevator_setup);
190
191int elevator_init(request_queue_t *q, char *name)
192{
193 struct elevator_type *e = NULL;
194 struct elevator_queue *eq;
195 int ret = 0;
196
197 elevator_setup_default();
198
199 if (!name)
200 name = chosen_elevator;
201
202 e = elevator_get(name);
203 if (!e)
204 return -EINVAL;
205
206 eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
207 if (!eq) {
208 elevator_put(e->elevator_type);
209 return -ENOMEM;
210 }
211
212 ret = elevator_attach(q, e, eq);
213 if (ret) {
214 kfree(eq);
215 elevator_put(e->elevator_type);
216 }
217
218 return ret;
219}
220
221void elevator_exit(elevator_t *e)
222{
223 if (e->ops->elevator_exit_fn)
224 e->ops->elevator_exit_fn(e);
225
226 elevator_put(e->elevator_type);
227 e->elevator_type = NULL;
228 kfree(e);
229}
230
Jens Axboe1b47f532005-10-20 16:37:00 +0200231void elv_dispatch_insert_tail(request_queue_t *q, struct request *rq)
232{
233}
234
Tejun Heo8922e162005-10-20 16:23:44 +0200235/*
236 * Insert rq into dispatch queue of q. Queue lock must be held on
237 * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
238 * appended to the dispatch queue. To be used by specific elevators.
239 */
Jens Axboe1b47f532005-10-20 16:37:00 +0200240void elv_dispatch_sort(request_queue_t *q, struct request *rq)
Tejun Heo8922e162005-10-20 16:23:44 +0200241{
242 sector_t boundary;
243 unsigned max_back;
244 struct list_head *entry;
245
Jens Axboe1b47f532005-10-20 16:37:00 +0200246 boundary = q->end_sector;
Tejun Heo8922e162005-10-20 16:23:44 +0200247 max_back = q->max_back_kb * 2;
248 boundary = boundary > max_back ? boundary - max_back : 0;
Jens Axboe1b47f532005-10-20 16:37:00 +0200249
Tejun Heo8922e162005-10-20 16:23:44 +0200250 list_for_each_prev(entry, &q->queue_head) {
251 struct request *pos = list_entry_rq(entry);
252
253 if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
254 break;
255 if (rq->sector >= boundary) {
256 if (pos->sector < boundary)
257 continue;
258 } else {
259 if (pos->sector >= boundary)
260 break;
261 }
262 if (rq->sector >= pos->sector)
263 break;
264 }
265
266 list_add(&rq->queuelist, entry);
267}
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
270{
271 elevator_t *e = q->elevator;
272
273 if (e->ops->elevator_merge_fn)
274 return e->ops->elevator_merge_fn(q, req, bio);
275
276 return ELEVATOR_NO_MERGE;
277}
278
279void elv_merged_request(request_queue_t *q, struct request *rq)
280{
281 elevator_t *e = q->elevator;
282
283 if (e->ops->elevator_merged_fn)
284 e->ops->elevator_merged_fn(q, rq);
285}
286
287void elv_merge_requests(request_queue_t *q, struct request *rq,
288 struct request *next)
289{
290 elevator_t *e = q->elevator;
291
292 if (q->last_merge == next)
293 q->last_merge = NULL;
294
295 if (e->ops->elevator_merge_req_fn)
296 e->ops->elevator_merge_req_fn(q, rq, next);
297}
298
Tejun Heo8922e162005-10-20 16:23:44 +0200299void elv_requeue_request(request_queue_t *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300{
301 elevator_t *e = q->elevator;
302
303 /*
304 * it already went through dequeue, we need to decrement the
305 * in_flight count again
306 */
Tejun Heo8922e162005-10-20 16:23:44 +0200307 if (blk_account_rq(rq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 q->in_flight--;
Tejun Heo8922e162005-10-20 16:23:44 +0200309 if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
310 e->ops->elevator_deactivate_req_fn(q, rq);
311 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
313 rq->flags &= ~REQ_STARTED;
314
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 /*
316 * if this is the flush, requeue the original instead and drop the flush
317 */
318 if (rq->flags & REQ_BAR_FLUSH) {
319 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
320 rq = rq->end_io_data;
321 }
322
Tejun Heo8922e162005-10-20 16:23:44 +0200323 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324}
325
326void __elv_add_request(request_queue_t *q, struct request *rq, int where,
327 int plug)
328{
Tejun Heo8922e162005-10-20 16:23:44 +0200329 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
330 /*
331 * barriers implicitly indicate back insertion
332 */
333 if (where == ELEVATOR_INSERT_SORT)
334 where = ELEVATOR_INSERT_BACK;
335
336 /*
Jens Axboe1b47f532005-10-20 16:37:00 +0200337 * this request is scheduling boundary, update end_sector
Tejun Heo8922e162005-10-20 16:23:44 +0200338 */
339 if (blk_fs_request(rq)) {
Jens Axboe1b47f532005-10-20 16:37:00 +0200340 q->end_sector = rq_end_sector(rq);
Tejun Heo8922e162005-10-20 16:23:44 +0200341 q->boundary_rq = rq;
342 }
343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
345 if (plug)
346 blk_plug_device(q);
347
348 rq->q = q;
349
Tejun Heo8922e162005-10-20 16:23:44 +0200350 if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 /*
352 * if drain is set, store the request "locally". when the drain
353 * is finished, the requests will be handed ordered to the io
354 * scheduler
355 */
356 list_add_tail(&rq->queuelist, &q->drain_list);
Tejun Heo8922e162005-10-20 16:23:44 +0200357 return;
358 }
359
360 switch (where) {
361 case ELEVATOR_INSERT_FRONT:
362 rq->flags |= REQ_SOFTBARRIER;
363
364 list_add(&rq->queuelist, &q->queue_head);
365 break;
366
367 case ELEVATOR_INSERT_BACK:
368 rq->flags |= REQ_SOFTBARRIER;
369
370 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
371 ;
372 list_add_tail(&rq->queuelist, &q->queue_head);
373 /*
374 * We kick the queue here for the following reasons.
375 * - The elevator might have returned NULL previously
376 * to delay requests and returned them now. As the
377 * queue wasn't empty before this request, ll_rw_blk
378 * won't run the queue on return, resulting in hang.
379 * - Usually, back inserted requests won't be merged
380 * with anything. There's no point in delaying queue
381 * processing.
382 */
383 blk_remove_plug(q);
384 q->request_fn(q);
385 break;
386
387 case ELEVATOR_INSERT_SORT:
388 BUG_ON(!blk_fs_request(rq));
389 rq->flags |= REQ_SORTED;
390 q->elevator->ops->elevator_add_req_fn(q, rq);
391 break;
392
393 default:
394 printk(KERN_ERR "%s: bad insertion point %d\n",
395 __FUNCTION__, where);
396 BUG();
397 }
398
399 if (blk_queue_plugged(q)) {
400 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
401 - q->in_flight;
402
403 if (nrq >= q->unplug_thresh)
404 __generic_unplug_device(q);
405 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406}
407
408void elv_add_request(request_queue_t *q, struct request *rq, int where,
409 int plug)
410{
411 unsigned long flags;
412
413 spin_lock_irqsave(q->queue_lock, flags);
414 __elv_add_request(q, rq, where, plug);
415 spin_unlock_irqrestore(q->queue_lock, flags);
416}
417
418static inline struct request *__elv_next_request(request_queue_t *q)
419{
Tejun Heo8922e162005-10-20 16:23:44 +0200420 struct request *rq;
421
422 if (unlikely(list_empty(&q->queue_head) &&
423 !q->elevator->ops->elevator_dispatch_fn(q, 0)))
424 return NULL;
425
426 rq = list_entry_rq(q->queue_head.next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427
428 /*
429 * if this is a barrier write and the device has to issue a
430 * flush sequence to support it, check how far we are
431 */
Tejun Heo8922e162005-10-20 16:23:44 +0200432 if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
434
435 if (q->ordered == QUEUE_ORDERED_FLUSH &&
436 !blk_barrier_preflush(rq))
437 rq = blk_start_pre_flush(q, rq);
438 }
439
440 return rq;
441}
442
443struct request *elv_next_request(request_queue_t *q)
444{
445 struct request *rq;
446 int ret;
447
448 while ((rq = __elv_next_request(q)) != NULL) {
Tejun Heo8922e162005-10-20 16:23:44 +0200449 if (!(rq->flags & REQ_STARTED)) {
450 elevator_t *e = q->elevator;
451
452 /*
453 * This is the first time the device driver
454 * sees this request (possibly after
455 * requeueing). Notify IO scheduler.
456 */
457 if (blk_sorted_rq(rq) &&
458 e->ops->elevator_activate_req_fn)
459 e->ops->elevator_activate_req_fn(q, rq);
460
461 /*
462 * just mark as started even if we don't start
463 * it, a request that has been delayed should
464 * not be passed by new incoming requests
465 */
466 rq->flags |= REQ_STARTED;
467 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
469 if (rq == q->last_merge)
470 q->last_merge = NULL;
471
Tejun Heo8922e162005-10-20 16:23:44 +0200472 if (!q->boundary_rq || q->boundary_rq == rq) {
Jens Axboe1b47f532005-10-20 16:37:00 +0200473 q->end_sector = rq_end_sector(rq);
Tejun Heo8922e162005-10-20 16:23:44 +0200474 q->boundary_rq = NULL;
475 }
476
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
478 break;
479
480 ret = q->prep_rq_fn(q, rq);
481 if (ret == BLKPREP_OK) {
482 break;
483 } else if (ret == BLKPREP_DEFER) {
Tejun Heo 2e759cd2005-04-24 02:04:21 -0500484 /*
485 * the request may have been (partially) prepped.
486 * we need to keep this request in the front to
Tejun Heo8922e162005-10-20 16:23:44 +0200487 * avoid resource deadlock. REQ_STARTED will
488 * prevent other fs requests from passing this one.
Tejun Heo 2e759cd2005-04-24 02:04:21 -0500489 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 rq = NULL;
491 break;
492 } else if (ret == BLKPREP_KILL) {
493 int nr_bytes = rq->hard_nr_sectors << 9;
494
495 if (!nr_bytes)
496 nr_bytes = rq->data_len;
497
498 blkdev_dequeue_request(rq);
499 rq->flags |= REQ_QUIET;
500 end_that_request_chunk(rq, 0, nr_bytes);
501 end_that_request_last(rq);
502 } else {
503 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
504 ret);
505 break;
506 }
507 }
508
509 return rq;
510}
511
Tejun Heo8922e162005-10-20 16:23:44 +0200512void elv_dequeue_request(request_queue_t *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513{
Tejun Heo8922e162005-10-20 16:23:44 +0200514 BUG_ON(list_empty(&rq->queuelist));
515
516 list_del_init(&rq->queuelist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
518 /*
519 * the time frame between a request being removed from the lists
520 * and to it is freed is accounted as io that is in progress at
Tejun Heo8922e162005-10-20 16:23:44 +0200521 * the driver side.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 */
523 if (blk_account_rq(rq))
524 q->in_flight++;
525
526 /*
527 * the main clearing point for q->last_merge is on retrieval of
528 * request by driver (it calls elv_next_request()), but it _can_
529 * also happen here if a request is added to the queue but later
530 * deleted without ever being given to driver (merged with another
531 * request).
532 */
533 if (rq == q->last_merge)
534 q->last_merge = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535}
536
537int elv_queue_empty(request_queue_t *q)
538{
539 elevator_t *e = q->elevator;
540
Tejun Heo8922e162005-10-20 16:23:44 +0200541 if (!list_empty(&q->queue_head))
542 return 0;
543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 if (e->ops->elevator_queue_empty_fn)
545 return e->ops->elevator_queue_empty_fn(q);
546
Tejun Heo8922e162005-10-20 16:23:44 +0200547 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548}
549
550struct request *elv_latter_request(request_queue_t *q, struct request *rq)
551{
552 struct list_head *next;
553
554 elevator_t *e = q->elevator;
555
556 if (e->ops->elevator_latter_req_fn)
557 return e->ops->elevator_latter_req_fn(q, rq);
558
559 next = rq->queuelist.next;
560 if (next != &q->queue_head && next != &rq->queuelist)
561 return list_entry_rq(next);
562
563 return NULL;
564}
565
566struct request *elv_former_request(request_queue_t *q, struct request *rq)
567{
568 struct list_head *prev;
569
570 elevator_t *e = q->elevator;
571
572 if (e->ops->elevator_former_req_fn)
573 return e->ops->elevator_former_req_fn(q, rq);
574
575 prev = rq->queuelist.prev;
576 if (prev != &q->queue_head && prev != &rq->queuelist)
577 return list_entry_rq(prev);
578
579 return NULL;
580}
581
Jens Axboe22e2c502005-06-27 10:55:12 +0200582int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
583 int gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584{
585 elevator_t *e = q->elevator;
586
587 if (e->ops->elevator_set_req_fn)
Jens Axboe22e2c502005-06-27 10:55:12 +0200588 return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
590 rq->elevator_private = NULL;
591 return 0;
592}
593
594void elv_put_request(request_queue_t *q, struct request *rq)
595{
596 elevator_t *e = q->elevator;
597
598 if (e->ops->elevator_put_req_fn)
599 e->ops->elevator_put_req_fn(q, rq);
600}
601
Jens Axboe22e2c502005-06-27 10:55:12 +0200602int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603{
604 elevator_t *e = q->elevator;
605
606 if (e->ops->elevator_may_queue_fn)
Jens Axboe22e2c502005-06-27 10:55:12 +0200607 return e->ops->elevator_may_queue_fn(q, rw, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
609 return ELV_MQUEUE_MAY;
610}
611
612void elv_completed_request(request_queue_t *q, struct request *rq)
613{
614 elevator_t *e = q->elevator;
615
616 /*
617 * request is released from the driver, io must be done
618 */
Tejun Heo8922e162005-10-20 16:23:44 +0200619 if (blk_account_rq(rq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 q->in_flight--;
Tejun Heo8922e162005-10-20 16:23:44 +0200621 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
622 e->ops->elevator_completed_req_fn(q, rq);
623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624}
625
626int elv_register_queue(struct request_queue *q)
627{
628 elevator_t *e = q->elevator;
629
630 e->kobj.parent = kobject_get(&q->kobj);
631 if (!e->kobj.parent)
632 return -EBUSY;
633
634 snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
635 e->kobj.ktype = e->elevator_type->elevator_ktype;
636
637 return kobject_register(&e->kobj);
638}
639
640void elv_unregister_queue(struct request_queue *q)
641{
642 if (q) {
643 elevator_t *e = q->elevator;
644 kobject_unregister(&e->kobj);
645 kobject_put(&q->kobj);
646 }
647}
648
649int elv_register(struct elevator_type *e)
650{
Tejun Heo2824bc932005-10-20 10:56:41 +0200651 spin_lock_irq(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 if (elevator_find(e->elevator_name))
653 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 list_add_tail(&e->list, &elv_list);
655 spin_unlock_irq(&elv_list_lock);
656
657 printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
658 if (!strcmp(e->elevator_name, chosen_elevator))
659 printk(" (default)");
660 printk("\n");
661 return 0;
662}
663EXPORT_SYMBOL_GPL(elv_register);
664
665void elv_unregister(struct elevator_type *e)
666{
667 spin_lock_irq(&elv_list_lock);
668 list_del_init(&e->list);
669 spin_unlock_irq(&elv_list_lock);
670}
671EXPORT_SYMBOL_GPL(elv_unregister);
672
673/*
674 * switch to new_e io scheduler. be careful not to introduce deadlocks -
675 * we don't free the old io scheduler, before we have allocated what we
676 * need for the new one. this way we have a chance of going back to the old
677 * one, if the new one fails init for some reason. we also do an intermediate
678 * switch to noop to ensure safety with stack-allocated requests, since they
679 * don't originate from the block layer allocator. noop is safe here, because
680 * it never needs to touch the elevator itself for completion events. DRAIN
681 * flags will make sure we don't touch it for additions either.
682 */
683static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
684{
685 elevator_t *e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
686 struct elevator_type *noop_elevator = NULL;
687 elevator_t *old_elevator;
688
689 if (!e)
690 goto error;
691
692 /*
693 * first step, drain requests from the block freelist
694 */
695 blk_wait_queue_drained(q, 0);
696
697 /*
698 * unregister old elevator data
699 */
700 elv_unregister_queue(q);
701 old_elevator = q->elevator;
702
703 /*
704 * next step, switch to noop since it uses no private rq structures
705 * and doesn't allocate any memory for anything. then wait for any
706 * non-fs requests in-flight
707 */
708 noop_elevator = elevator_get("noop");
709 spin_lock_irq(q->queue_lock);
710 elevator_attach(q, noop_elevator, e);
711 spin_unlock_irq(q->queue_lock);
712
713 blk_wait_queue_drained(q, 1);
714
715 /*
716 * attach and start new elevator
717 */
718 if (elevator_attach(q, new_e, e))
719 goto fail;
720
721 if (elv_register_queue(q))
722 goto fail_register;
723
724 /*
725 * finally exit old elevator and start queue again
726 */
727 elevator_exit(old_elevator);
728 blk_finish_queue_drain(q);
729 elevator_put(noop_elevator);
730 return;
731
732fail_register:
733 /*
734 * switch failed, exit the new io scheduler and reattach the old
735 * one again (along with re-adding the sysfs dir)
736 */
737 elevator_exit(e);
738fail:
739 q->elevator = old_elevator;
740 elv_register_queue(q);
741 blk_finish_queue_drain(q);
742error:
743 if (noop_elevator)
744 elevator_put(noop_elevator);
745 elevator_put(new_e);
746 printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
747}
748
749ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
750{
751 char elevator_name[ELV_NAME_MAX];
752 struct elevator_type *e;
753
754 memset(elevator_name, 0, sizeof(elevator_name));
755 strncpy(elevator_name, name, sizeof(elevator_name));
756
757 if (elevator_name[strlen(elevator_name) - 1] == '\n')
758 elevator_name[strlen(elevator_name) - 1] = '\0';
759
760 e = elevator_get(elevator_name);
761 if (!e) {
762 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
763 return -EINVAL;
764 }
765
766 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name))
767 return count;
768
769 elevator_switch(q, e);
770 return count;
771}
772
773ssize_t elv_iosched_show(request_queue_t *q, char *name)
774{
775 elevator_t *e = q->elevator;
776 struct elevator_type *elv = e->elevator_type;
777 struct list_head *entry;
778 int len = 0;
779
780 spin_lock_irq(q->queue_lock);
781 list_for_each(entry, &elv_list) {
782 struct elevator_type *__e;
783
784 __e = list_entry(entry, struct elevator_type, list);
785 if (!strcmp(elv->elevator_name, __e->elevator_name))
786 len += sprintf(name+len, "[%s] ", elv->elevator_name);
787 else
788 len += sprintf(name+len, "%s ", __e->elevator_name);
789 }
790 spin_unlock_irq(q->queue_lock);
791
792 len += sprintf(len+name, "\n");
793 return len;
794}
795
Jens Axboe1b47f532005-10-20 16:37:00 +0200796EXPORT_SYMBOL(elv_dispatch_sort);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797EXPORT_SYMBOL(elv_add_request);
798EXPORT_SYMBOL(__elv_add_request);
799EXPORT_SYMBOL(elv_requeue_request);
800EXPORT_SYMBOL(elv_next_request);
Tejun Heo8922e162005-10-20 16:23:44 +0200801EXPORT_SYMBOL(elv_dequeue_request);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802EXPORT_SYMBOL(elv_queue_empty);
803EXPORT_SYMBOL(elv_completed_request);
804EXPORT_SYMBOL(elevator_exit);
805EXPORT_SYMBOL(elevator_init);