blob: 74fd51b154fd4b4d03ce9514b988e03df6e0f6cc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
Jens Axboe0fe23472006-09-04 15:41:16 +02006 * 30042000 Jens Axboe <axboe@kernel.dk> :
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/fs.h>
27#include <linux/blkdev.h>
28#include <linux/elevator.h>
29#include <linux/bio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
Jens Axboe2056a782006-03-23 20:00:26 +010034#include <linux/blktrace_api.h>
Jens Axboe98170642006-07-28 09:23:08 +020035#include <linux/hash.h>
Jens Axboe0835da62008-08-26 09:15:47 +020036#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Li Zefan55782132009-06-09 13:43:05 +080038#include <trace/events/block.h>
39
Jens Axboe242f9dc2008-09-14 05:55:09 -070040#include "blk.h"
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042static DEFINE_SPINLOCK(elv_list_lock);
43static LIST_HEAD(elv_list);
44
45/*
Jens Axboe98170642006-07-28 09:23:08 +020046 * Merge hash stuff.
47 */
48static const int elv_hash_shift = 6;
49#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
Jens Axboe4eb166d2008-02-01 00:37:27 +010050#define ELV_HASH_FN(sec) \
51 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
Jens Axboe98170642006-07-28 09:23:08 +020052#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
Tejun Heo83096eb2009-05-07 22:24:39 +090053#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
Jens Axboe98170642006-07-28 09:23:08 +020054
55/*
Jens Axboeda775262006-12-20 11:04:12 +010056 * Query io scheduler to see if the current process issuing bio may be
57 * merged with rq.
58 */
59static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
60{
Jens Axboe165125e2007-07-24 09:28:11 +020061 struct request_queue *q = rq->q;
Jens Axboeb374d182008-10-31 10:05:07 +010062 struct elevator_queue *e = q->elevator;
Jens Axboeda775262006-12-20 11:04:12 +010063
Tejun Heo22f746e2011-12-14 00:33:41 +010064 if (e->type->ops.elevator_allow_merge_fn)
65 return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
Jens Axboeda775262006-12-20 11:04:12 +010066
67 return 1;
68}
69
70/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 * can we safely merge with this request?
72 */
Tejun Heo050c8ea2012-02-08 09:19:38 +010073bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
75 if (!rq_mergeable(rq))
76 return 0;
77
78 /*
David Woodhousee17fc0a2008-08-09 16:42:20 +010079 * Don't merge file system requests and discard requests
80 */
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +020081 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
David Woodhousee17fc0a2008-08-09 16:42:20 +010082 return 0;
83
84 /*
Adrian Hunter8d57a982010-08-11 14:17:49 -070085 * Don't merge discard requests and secure discard requests
86 */
87 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
88 return 0;
89
90 /*
Maya Erez73937f52012-05-24 23:33:05 +030091 * Don't merge sanitize requests
92 */
93 if ((bio->bi_rw & REQ_SANITIZE) != (rq->bio->bi_rw & REQ_SANITIZE))
94 return 0;
95
96 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 * different data direction or already started, don't merge
98 */
99 if (bio_data_dir(bio) != rq_data_dir(rq))
100 return 0;
101
102 /*
Jens Axboeda775262006-12-20 11:04:12 +0100103 * must be same device and not a special request
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 */
Jens Axboebb4067e2006-12-21 21:20:01 +0100105 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
Jens Axboeda775262006-12-20 11:04:12 +0100106 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200108 /*
109 * only merge integrity protected bio into ditto rq
110 */
111 if (bio_integrity(bio) != blk_integrity_rq(rq))
112 return 0;
113
Jens Axboeda775262006-12-20 11:04:12 +0100114 if (!elv_iosched_allow_merge(rq, bio))
115 return 0;
116
117 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118}
119EXPORT_SYMBOL(elv_rq_merge_ok);
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static struct elevator_type *elevator_find(const char *name)
122{
Vasily Tarasova22b1692006-10-11 09:24:27 +0200123 struct elevator_type *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Matthias Kaehlcke70cee262007-07-10 12:26:24 +0200125 list_for_each_entry(e, &elv_list, list) {
Vasily Tarasova22b1692006-10-11 09:24:27 +0200126 if (!strcmp(e->elevator_name, name))
127 return e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
Vasily Tarasova22b1692006-10-11 09:24:27 +0200130 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131}
132
133static void elevator_put(struct elevator_type *e)
134{
135 module_put(e->elevator_owner);
136}
137
138static struct elevator_type *elevator_get(const char *name)
139{
Tejun Heo2824bc932005-10-20 10:56:41 +0200140 struct elevator_type *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200142 spin_lock(&elv_list_lock);
Tejun Heo2824bc932005-10-20 10:56:41 +0200143
144 e = elevator_find(name);
Jens Axboee1640942008-02-19 10:20:37 +0100145 if (!e) {
Jens Axboee1640942008-02-19 10:20:37 +0100146 spin_unlock(&elv_list_lock);
Kees Cook490b94b2011-05-05 18:02:12 -0600147 request_module("%s-iosched", name);
Jens Axboee1640942008-02-19 10:20:37 +0100148 spin_lock(&elv_list_lock);
149 e = elevator_find(name);
150 }
151
Tejun Heo2824bc932005-10-20 10:56:41 +0200152 if (e && !try_module_get(e->elevator_owner))
153 e = NULL;
154
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200155 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
157 return e;
158}
159
Tejun Heof8fc8772011-12-14 00:33:40 +0100160static int elevator_init_queue(struct request_queue *q,
161 struct elevator_queue *eq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162{
Tejun Heo22f746e2011-12-14 00:33:41 +0100163 eq->elevator_data = eq->type->ops.elevator_init_fn(q);
Tejun Heof8fc8772011-12-14 00:33:40 +0100164 if (eq->elevator_data)
165 return 0;
166 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167}
168
Wang Sheng-Hui484fc252011-09-08 12:32:14 +0200169static char chosen_elevator[ELV_NAME_MAX];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Nate Diller5f003972006-01-24 10:07:58 +0100171static int __init elevator_setup(char *str)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
Chuck Ebbert752a3b72006-01-16 09:47:37 +0100173 /*
174 * Be backwards-compatible with previous kernels, so users
175 * won't get the wrong elevator.
176 */
Jens Axboe492af632009-10-03 09:37:51 +0200177 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
OGAWA Hirofumi9b410462006-03-31 02:30:33 -0800178 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
181__setup("elevator=", elevator_setup);
182
Al Viro3d1ab402006-03-18 18:35:43 -0500183static struct kobj_type elv_ktype;
184
Jens Axboeb374d182008-10-31 10:05:07 +0100185static struct elevator_queue *elevator_alloc(struct request_queue *q,
Jens Axboe165125e2007-07-24 09:28:11 +0200186 struct elevator_type *e)
Al Viro3d1ab402006-03-18 18:35:43 -0500187{
Jens Axboeb374d182008-10-31 10:05:07 +0100188 struct elevator_queue *eq;
Jens Axboe98170642006-07-28 09:23:08 +0200189 int i;
190
Jens Axboeb374d182008-10-31 10:05:07 +0100191 eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
Jens Axboe98170642006-07-28 09:23:08 +0200192 if (unlikely(!eq))
193 goto err;
194
Tejun Heo22f746e2011-12-14 00:33:41 +0100195 eq->type = e;
Greg Kroah-Hartmanf9cb0742007-12-17 23:05:35 -0700196 kobject_init(&eq->kobj, &elv_ktype);
Jens Axboe98170642006-07-28 09:23:08 +0200197 mutex_init(&eq->sysfs_lock);
198
Jens Axboeb5deef92006-07-19 23:39:40 +0200199 eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
200 GFP_KERNEL, q->node);
Jens Axboe98170642006-07-28 09:23:08 +0200201 if (!eq->hash)
202 goto err;
203
204 for (i = 0; i < ELV_HASH_ENTRIES; i++)
205 INIT_HLIST_HEAD(&eq->hash[i]);
206
Al Viro3d1ab402006-03-18 18:35:43 -0500207 return eq;
Jens Axboe98170642006-07-28 09:23:08 +0200208err:
209 kfree(eq);
210 elevator_put(e);
211 return NULL;
Al Viro3d1ab402006-03-18 18:35:43 -0500212}
213
214static void elevator_release(struct kobject *kobj)
215{
Jens Axboeb374d182008-10-31 10:05:07 +0100216 struct elevator_queue *e;
Jens Axboe98170642006-07-28 09:23:08 +0200217
Jens Axboeb374d182008-10-31 10:05:07 +0100218 e = container_of(kobj, struct elevator_queue, kobj);
Tejun Heo22f746e2011-12-14 00:33:41 +0100219 elevator_put(e->type);
Jens Axboe98170642006-07-28 09:23:08 +0200220 kfree(e->hash);
Al Viro3d1ab402006-03-18 18:35:43 -0500221 kfree(e);
222}
223
Jens Axboe165125e2007-07-24 09:28:11 +0200224int elevator_init(struct request_queue *q, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
226 struct elevator_type *e = NULL;
227 struct elevator_queue *eq;
Tejun Heof8fc8772011-12-14 00:33:40 +0100228 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Mike Snitzer1abec4f2010-05-25 13:15:15 -0400230 if (unlikely(q->elevator))
231 return 0;
232
Tejun Heocb98fc82005-10-28 08:29:39 +0200233 INIT_LIST_HEAD(&q->queue_head);
234 q->last_merge = NULL;
235 q->end_sector = 0;
236 q->boundary_rq = NULL;
Tejun Heocb98fc82005-10-28 08:29:39 +0200237
Jens Axboe4eb166d2008-02-01 00:37:27 +0100238 if (name) {
239 e = elevator_get(name);
240 if (!e)
241 return -EINVAL;
242 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Jens Axboe4eb166d2008-02-01 00:37:27 +0100244 if (!e && *chosen_elevator) {
245 e = elevator_get(chosen_elevator);
246 if (!e)
247 printk(KERN_ERR "I/O scheduler %s not found\n",
248 chosen_elevator);
249 }
Nate Diller248d5ca2006-01-24 10:09:14 +0100250
Jens Axboe4eb166d2008-02-01 00:37:27 +0100251 if (!e) {
252 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
253 if (!e) {
254 printk(KERN_ERR
255 "Default I/O scheduler not found. " \
256 "Using noop.\n");
257 e = elevator_get("noop");
258 }
Nate Diller5f003972006-01-24 10:07:58 +0100259 }
260
Jens Axboeb5deef92006-07-19 23:39:40 +0200261 eq = elevator_alloc(q, e);
Al Viro3d1ab402006-03-18 18:35:43 -0500262 if (!eq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Tejun Heof8fc8772011-12-14 00:33:40 +0100265 err = elevator_init_queue(q, eq);
266 if (err) {
Al Viro3d1ab402006-03-18 18:35:43 -0500267 kobject_put(&eq->kobj);
Tejun Heof8fc8772011-12-14 00:33:40 +0100268 return err;
Jens Axboebc1c1162006-06-08 08:49:06 +0200269 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Tejun Heof8fc8772011-12-14 00:33:40 +0100271 q->elevator = eq;
Mike Snitzer1abec4f2010-05-25 13:15:15 -0400272 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273}
Jens Axboe2e662b62006-07-13 11:55:04 +0200274EXPORT_SYMBOL(elevator_init);
275
Jens Axboeb374d182008-10-31 10:05:07 +0100276void elevator_exit(struct elevator_queue *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277{
Al Viro3d1ab402006-03-18 18:35:43 -0500278 mutex_lock(&e->sysfs_lock);
Tejun Heo22f746e2011-12-14 00:33:41 +0100279 if (e->type->ops.elevator_exit_fn)
280 e->type->ops.elevator_exit_fn(e);
Al Viro3d1ab402006-03-18 18:35:43 -0500281 mutex_unlock(&e->sysfs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Al Viro3d1ab402006-03-18 18:35:43 -0500283 kobject_put(&e->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284}
Jens Axboe2e662b62006-07-13 11:55:04 +0200285EXPORT_SYMBOL(elevator_exit);
286
Jens Axboe98170642006-07-28 09:23:08 +0200287static inline void __elv_rqhash_del(struct request *rq)
288{
289 hlist_del_init(&rq->hash);
290}
291
Jens Axboe165125e2007-07-24 09:28:11 +0200292static void elv_rqhash_del(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200293{
294 if (ELV_ON_HASH(rq))
295 __elv_rqhash_del(rq);
296}
297
Jens Axboe165125e2007-07-24 09:28:11 +0200298static void elv_rqhash_add(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200299{
Jens Axboeb374d182008-10-31 10:05:07 +0100300 struct elevator_queue *e = q->elevator;
Jens Axboe98170642006-07-28 09:23:08 +0200301
302 BUG_ON(ELV_ON_HASH(rq));
303 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
304}
305
Jens Axboe165125e2007-07-24 09:28:11 +0200306static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200307{
308 __elv_rqhash_del(rq);
309 elv_rqhash_add(q, rq);
310}
311
Jens Axboe165125e2007-07-24 09:28:11 +0200312static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
Jens Axboe98170642006-07-28 09:23:08 +0200313{
Jens Axboeb374d182008-10-31 10:05:07 +0100314 struct elevator_queue *e = q->elevator;
Jens Axboe98170642006-07-28 09:23:08 +0200315 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
316 struct hlist_node *entry, *next;
317 struct request *rq;
318
319 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
320 BUG_ON(!ELV_ON_HASH(rq));
321
322 if (unlikely(!rq_mergeable(rq))) {
323 __elv_rqhash_del(rq);
324 continue;
325 }
326
327 if (rq_hash_key(rq) == offset)
328 return rq;
329 }
330
331 return NULL;
332}
333
Tejun Heo8922e162005-10-20 16:23:44 +0200334/*
Jens Axboe2e662b62006-07-13 11:55:04 +0200335 * RB-tree support functions for inserting/lookup/removal of requests
336 * in a sorted RB tree.
337 */
Jeff Moyer796d5112011-06-02 21:19:05 +0200338void elv_rb_add(struct rb_root *root, struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +0200339{
340 struct rb_node **p = &root->rb_node;
341 struct rb_node *parent = NULL;
342 struct request *__rq;
343
344 while (*p) {
345 parent = *p;
346 __rq = rb_entry(parent, struct request, rb_node);
347
Tejun Heo83096eb2009-05-07 22:24:39 +0900348 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200349 p = &(*p)->rb_left;
Jeff Moyer796d5112011-06-02 21:19:05 +0200350 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200351 p = &(*p)->rb_right;
Jens Axboe2e662b62006-07-13 11:55:04 +0200352 }
353
354 rb_link_node(&rq->rb_node, parent, p);
355 rb_insert_color(&rq->rb_node, root);
Jens Axboe2e662b62006-07-13 11:55:04 +0200356}
Jens Axboe2e662b62006-07-13 11:55:04 +0200357EXPORT_SYMBOL(elv_rb_add);
358
359void elv_rb_del(struct rb_root *root, struct request *rq)
360{
361 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
362 rb_erase(&rq->rb_node, root);
363 RB_CLEAR_NODE(&rq->rb_node);
364}
Jens Axboe2e662b62006-07-13 11:55:04 +0200365EXPORT_SYMBOL(elv_rb_del);
366
367struct request *elv_rb_find(struct rb_root *root, sector_t sector)
368{
369 struct rb_node *n = root->rb_node;
370 struct request *rq;
371
372 while (n) {
373 rq = rb_entry(n, struct request, rb_node);
374
Tejun Heo83096eb2009-05-07 22:24:39 +0900375 if (sector < blk_rq_pos(rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200376 n = n->rb_left;
Tejun Heo83096eb2009-05-07 22:24:39 +0900377 else if (sector > blk_rq_pos(rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200378 n = n->rb_right;
379 else
380 return rq;
381 }
382
383 return NULL;
384}
Jens Axboe2e662b62006-07-13 11:55:04 +0200385EXPORT_SYMBOL(elv_rb_find);
386
387/*
Tejun Heo8922e162005-10-20 16:23:44 +0200388 * Insert rq into dispatch queue of q. Queue lock must be held on
Uwe Kleine-Königdbe7f762007-10-20 01:55:04 +0200389 * entry. rq is sort instead into the dispatch queue. To be used by
Jens Axboe2e662b62006-07-13 11:55:04 +0200390 * specific elevators.
Tejun Heo8922e162005-10-20 16:23:44 +0200391 */
Jens Axboe165125e2007-07-24 09:28:11 +0200392void elv_dispatch_sort(struct request_queue *q, struct request *rq)
Tejun Heo8922e162005-10-20 16:23:44 +0200393{
394 sector_t boundary;
Tejun Heo8922e162005-10-20 16:23:44 +0200395 struct list_head *entry;
Jens Axboe4eb166d2008-02-01 00:37:27 +0100396 int stop_flags;
Tejun Heo8922e162005-10-20 16:23:44 +0200397
Tejun Heo06b86242005-10-20 16:46:23 +0200398 if (q->last_merge == rq)
399 q->last_merge = NULL;
Jens Axboe98170642006-07-28 09:23:08 +0200400
401 elv_rqhash_del(q, rq);
402
Tejun Heo15853af2005-11-10 08:52:05 +0100403 q->nr_sorted--;
Tejun Heo06b86242005-10-20 16:46:23 +0200404
Jens Axboe1b47f532005-10-20 16:37:00 +0200405 boundary = q->end_sector;
Christoph Hellwig02e031c2010-11-10 14:54:09 +0100406 stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
Tejun Heo8922e162005-10-20 16:23:44 +0200407 list_for_each_prev(entry, &q->queue_head) {
408 struct request *pos = list_entry_rq(entry);
409
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200410 if ((rq->cmd_flags & REQ_DISCARD) !=
411 (pos->cmd_flags & REQ_DISCARD))
David Woodhousee17fc0a2008-08-09 16:42:20 +0100412 break;
Jens Axboe783660b2007-01-19 11:27:47 +1100413 if (rq_data_dir(rq) != rq_data_dir(pos))
414 break;
Jens Axboe4eb166d2008-02-01 00:37:27 +0100415 if (pos->cmd_flags & stop_flags)
Tejun Heo8922e162005-10-20 16:23:44 +0200416 break;
Tejun Heo83096eb2009-05-07 22:24:39 +0900417 if (blk_rq_pos(rq) >= boundary) {
418 if (blk_rq_pos(pos) < boundary)
Tejun Heo8922e162005-10-20 16:23:44 +0200419 continue;
420 } else {
Tejun Heo83096eb2009-05-07 22:24:39 +0900421 if (blk_rq_pos(pos) >= boundary)
Tejun Heo8922e162005-10-20 16:23:44 +0200422 break;
423 }
Tejun Heo83096eb2009-05-07 22:24:39 +0900424 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
Tejun Heo8922e162005-10-20 16:23:44 +0200425 break;
426 }
427
428 list_add(&rq->queuelist, entry);
429}
Jens Axboe2e662b62006-07-13 11:55:04 +0200430EXPORT_SYMBOL(elv_dispatch_sort);
431
Jens Axboe98170642006-07-28 09:23:08 +0200432/*
Jens Axboe2e662b62006-07-13 11:55:04 +0200433 * Insert rq into dispatch queue of q. Queue lock must be held on
434 * entry. rq is added to the back of the dispatch queue. To be used by
435 * specific elevators.
Jens Axboe98170642006-07-28 09:23:08 +0200436 */
437void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
438{
439 if (q->last_merge == rq)
440 q->last_merge = NULL;
441
442 elv_rqhash_del(q, rq);
443
444 q->nr_sorted--;
445
446 q->end_sector = rq_end_sector(rq);
447 q->boundary_rq = rq;
448 list_add_tail(&rq->queuelist, &q->queue_head);
449}
Jens Axboe2e662b62006-07-13 11:55:04 +0200450EXPORT_SYMBOL(elv_dispatch_add_tail);
451
Jens Axboe165125e2007-07-24 09:28:11 +0200452int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
Jens Axboeb374d182008-10-31 10:05:07 +0100454 struct elevator_queue *e = q->elevator;
Jens Axboe98170642006-07-28 09:23:08 +0200455 struct request *__rq;
Tejun Heo06b86242005-10-20 16:46:23 +0200456 int ret;
457
Jens Axboe98170642006-07-28 09:23:08 +0200458 /*
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100459 * Levels of merges:
460 * nomerges: No merges at all attempted
461 * noxmerges: Only simple one-hit cache try
462 * merges: All merge tries attempted
463 */
464 if (blk_queue_nomerges(q))
465 return ELEVATOR_NO_MERGE;
466
467 /*
Jens Axboe98170642006-07-28 09:23:08 +0200468 * First try one-hit cache.
469 */
Tejun Heo050c8ea2012-02-08 09:19:38 +0100470 if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
471 ret = blk_try_merge(q->last_merge, bio);
Tejun Heo06b86242005-10-20 16:46:23 +0200472 if (ret != ELEVATOR_NO_MERGE) {
473 *req = q->last_merge;
474 return ret;
475 }
476 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100478 if (blk_queue_noxmerges(q))
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200479 return ELEVATOR_NO_MERGE;
480
Jens Axboe98170642006-07-28 09:23:08 +0200481 /*
482 * See if our hash lookup can find a potential backmerge.
483 */
484 __rq = elv_rqhash_find(q, bio->bi_sector);
485 if (__rq && elv_rq_merge_ok(__rq, bio)) {
486 *req = __rq;
487 return ELEVATOR_BACK_MERGE;
488 }
489
Tejun Heo22f746e2011-12-14 00:33:41 +0100490 if (e->type->ops.elevator_merge_fn)
491 return e->type->ops.elevator_merge_fn(q, req, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493 return ELEVATOR_NO_MERGE;
494}
495
Jens Axboe5e84ea32011-03-21 10:14:27 +0100496/*
497 * Attempt to do an insertion back merge. Only check for the case where
498 * we can append 'rq' to an existing request, so we can throw 'rq' away
499 * afterwards.
500 *
501 * Returns true if we merged, false otherwise
502 */
503static bool elv_attempt_insert_merge(struct request_queue *q,
504 struct request *rq)
505{
506 struct request *__rq;
507
508 if (blk_queue_nomerges(q))
509 return false;
510
511 /*
512 * First try one-hit cache.
513 */
514 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
515 return true;
516
517 if (blk_queue_noxmerges(q))
518 return false;
519
520 /*
521 * See if our hash lookup can find a potential backmerge.
522 */
Jens Axboe5d381ef2012-01-15 10:29:48 +0100523 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
524 if (__rq && blk_attempt_req_merge(q, __rq, rq))
525 return true;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100526
Jens Axboe5d381ef2012-01-15 10:29:48 +0100527 return false;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100528}
529
Jens Axboe165125e2007-07-24 09:28:11 +0200530void elv_merged_request(struct request_queue *q, struct request *rq, int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531{
Jens Axboeb374d182008-10-31 10:05:07 +0100532 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
Tejun Heo22f746e2011-12-14 00:33:41 +0100534 if (e->type->ops.elevator_merged_fn)
535 e->type->ops.elevator_merged_fn(q, rq, type);
Tejun Heo06b86242005-10-20 16:46:23 +0200536
Jens Axboe2e662b62006-07-13 11:55:04 +0200537 if (type == ELEVATOR_BACK_MERGE)
538 elv_rqhash_reposition(q, rq);
Jens Axboe98170642006-07-28 09:23:08 +0200539
Tejun Heo06b86242005-10-20 16:46:23 +0200540 q->last_merge = rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541}
542
Jens Axboe165125e2007-07-24 09:28:11 +0200543void elv_merge_requests(struct request_queue *q, struct request *rq,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 struct request *next)
545{
Jens Axboeb374d182008-10-31 10:05:07 +0100546 struct elevator_queue *e = q->elevator;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100547 const int next_sorted = next->cmd_flags & REQ_SORTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
Tejun Heo22f746e2011-12-14 00:33:41 +0100549 if (next_sorted && e->type->ops.elevator_merge_req_fn)
550 e->type->ops.elevator_merge_req_fn(q, rq, next);
Tejun Heo06b86242005-10-20 16:46:23 +0200551
Jens Axboe98170642006-07-28 09:23:08 +0200552 elv_rqhash_reposition(q, rq);
Jens Axboe98170642006-07-28 09:23:08 +0200553
Jens Axboe5e84ea32011-03-21 10:14:27 +0100554 if (next_sorted) {
555 elv_rqhash_del(q, next);
556 q->nr_sorted--;
557 }
558
Tejun Heo06b86242005-10-20 16:46:23 +0200559 q->last_merge = rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560}
561
Divyesh Shah812d4022010-04-08 21:14:23 -0700562void elv_bio_merged(struct request_queue *q, struct request *rq,
563 struct bio *bio)
564{
565 struct elevator_queue *e = q->elevator;
566
Tejun Heo22f746e2011-12-14 00:33:41 +0100567 if (e->type->ops.elevator_bio_merged_fn)
568 e->type->ops.elevator_bio_merged_fn(q, rq, bio);
Divyesh Shah812d4022010-04-08 21:14:23 -0700569}
570
Jens Axboe165125e2007-07-24 09:28:11 +0200571void elv_requeue_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 /*
574 * it already went through dequeue, we need to decrement the
575 * in_flight count again
576 */
Tejun Heo8922e162005-10-20 16:23:44 +0200577 if (blk_account_rq(rq)) {
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200578 q->in_flight[rq_is_sync(rq)]--;
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200579 if (rq->cmd_flags & REQ_SORTED)
Jens Axboecad97512007-01-14 22:26:09 +1100580 elv_deactivate_rq(q, rq);
Tejun Heo8922e162005-10-20 16:23:44 +0200581 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Jens Axboe4aff5e22006-08-10 08:44:47 +0200583 rq->cmd_flags &= ~REQ_STARTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
Jens Axboeb710a482011-03-30 09:52:30 +0200585 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586}
587
Jerome Marchand26308ea2009-03-27 10:31:51 +0100588void elv_drain_elevator(struct request_queue *q)
Tejun Heo15853af2005-11-10 08:52:05 +0100589{
590 static int printed;
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200591
592 lockdep_assert_held(q->queue_lock);
593
Tejun Heo22f746e2011-12-14 00:33:41 +0100594 while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
Tejun Heo15853af2005-11-10 08:52:05 +0100595 ;
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200596 if (q->nr_sorted && printed++ < 10) {
Tejun Heo15853af2005-11-10 08:52:05 +0100597 printk(KERN_ERR "%s: forced dispatching is broken "
598 "(nr_sorted=%u), please report this\n",
Tejun Heo22f746e2011-12-14 00:33:41 +0100599 q->elevator->type->elevator_name, q->nr_sorted);
Tejun Heo15853af2005-11-10 08:52:05 +0100600 }
601}
602
Jens Axboef600abe2009-04-08 14:22:01 +0200603void elv_quiesce_start(struct request_queue *q)
Jens Axboe6c7e8ce2009-03-27 10:30:47 +0100604{
Martin K. Petersencd43e262009-05-22 17:17:52 -0400605 if (!q->elevator)
606 return;
607
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200608 spin_lock_irq(q->queue_lock);
Jens Axboe6c7e8ce2009-03-27 10:30:47 +0100609 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200610 spin_unlock_irq(q->queue_lock);
Jens Axboe6c7e8ce2009-03-27 10:30:47 +0100611
Tejun Heoc9a929d2011-10-19 14:42:16 +0200612 blk_drain_queue(q, false);
Jens Axboe6c7e8ce2009-03-27 10:30:47 +0100613}
614
Jens Axboef600abe2009-04-08 14:22:01 +0200615void elv_quiesce_end(struct request_queue *q)
Jens Axboe6c7e8ce2009-03-27 10:30:47 +0100616{
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200617 spin_lock_irq(q->queue_lock);
Jens Axboe6c7e8ce2009-03-27 10:30:47 +0100618 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200619 spin_unlock_irq(q->queue_lock);
Jens Axboe6c7e8ce2009-03-27 10:30:47 +0100620}
621
Jens Axboeb710a482011-03-30 09:52:30 +0200622void __elv_add_request(struct request_queue *q, struct request *rq, int where)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623{
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +0100624 trace_block_rq_insert(q, rq);
Jens Axboe2056a782006-03-23 20:00:26 +0100625
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 rq->q = q;
627
Jens Axboeb710a482011-03-30 09:52:30 +0200628 if (rq->cmd_flags & REQ_SOFTBARRIER) {
629 /* barriers are scheduling boundary, update end_sector */
630 if (rq->cmd_type == REQ_TYPE_FS ||
Maya Erez73937f52012-05-24 23:33:05 +0300631 (rq->cmd_flags & (REQ_DISCARD | REQ_SANITIZE))) {
Jens Axboeb710a482011-03-30 09:52:30 +0200632 q->end_sector = rq_end_sector(rq);
633 q->boundary_rq = rq;
634 }
635 } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
Jens Axboe3aa72872011-04-21 19:28:35 +0200636 (where == ELEVATOR_INSERT_SORT ||
637 where == ELEVATOR_INSERT_SORT_MERGE))
Jens Axboeb710a482011-03-30 09:52:30 +0200638 where = ELEVATOR_INSERT_BACK;
639
Tejun Heo8922e162005-10-20 16:23:44 +0200640 switch (where) {
Tejun Heo28e7d182010-09-03 11:56:16 +0200641 case ELEVATOR_INSERT_REQUEUE:
Tejun Heo8922e162005-10-20 16:23:44 +0200642 case ELEVATOR_INSERT_FRONT:
Jens Axboe4aff5e22006-08-10 08:44:47 +0200643 rq->cmd_flags |= REQ_SOFTBARRIER;
Tejun Heo8922e162005-10-20 16:23:44 +0200644 list_add(&rq->queuelist, &q->queue_head);
645 break;
646
647 case ELEVATOR_INSERT_BACK:
Jens Axboe4aff5e22006-08-10 08:44:47 +0200648 rq->cmd_flags |= REQ_SOFTBARRIER;
Tejun Heo15853af2005-11-10 08:52:05 +0100649 elv_drain_elevator(q);
Tejun Heo8922e162005-10-20 16:23:44 +0200650 list_add_tail(&rq->queuelist, &q->queue_head);
651 /*
652 * We kick the queue here for the following reasons.
653 * - The elevator might have returned NULL previously
654 * to delay requests and returned them now. As the
655 * queue wasn't empty before this request, ll_rw_blk
656 * won't run the queue on return, resulting in hang.
657 * - Usually, back inserted requests won't be merged
658 * with anything. There's no point in delaying queue
659 * processing.
660 */
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200661 __blk_run_queue(q);
Tejun Heo8922e162005-10-20 16:23:44 +0200662 break;
663
Jens Axboe5e84ea32011-03-21 10:14:27 +0100664 case ELEVATOR_INSERT_SORT_MERGE:
665 /*
666 * If we succeed in merging this request with one in the
667 * queue already, we are done - rq has now been freed,
668 * so no need to do anything further.
669 */
670 if (elv_attempt_insert_merge(q, rq))
671 break;
Tejun Heo8922e162005-10-20 16:23:44 +0200672 case ELEVATOR_INSERT_SORT:
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200673 BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
674 !(rq->cmd_flags & REQ_DISCARD));
Jens Axboe4aff5e22006-08-10 08:44:47 +0200675 rq->cmd_flags |= REQ_SORTED;
Tejun Heo15853af2005-11-10 08:52:05 +0100676 q->nr_sorted++;
Jens Axboe98170642006-07-28 09:23:08 +0200677 if (rq_mergeable(rq)) {
678 elv_rqhash_add(q, rq);
679 if (!q->last_merge)
680 q->last_merge = rq;
681 }
682
Tejun Heoca235092005-11-01 17:23:49 +0900683 /*
684 * Some ioscheds (cfq) run q->request_fn directly, so
685 * rq cannot be accessed after calling
686 * elevator_add_req_fn.
687 */
Tejun Heo22f746e2011-12-14 00:33:41 +0100688 q->elevator->type->ops.elevator_add_req_fn(q, rq);
Tejun Heo8922e162005-10-20 16:23:44 +0200689 break;
690
Tejun Heoae1b1532011-01-25 12:43:54 +0100691 case ELEVATOR_INSERT_FLUSH:
692 rq->cmd_flags |= REQ_SOFTBARRIER;
693 blk_insert_flush(rq);
694 break;
Tejun Heo8922e162005-10-20 16:23:44 +0200695 default:
696 printk(KERN_ERR "%s: bad insertion point %d\n",
Harvey Harrison24c03d42008-05-01 04:35:17 -0700697 __func__, where);
Tejun Heo8922e162005-10-20 16:23:44 +0200698 BUG();
699 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700}
Jens Axboe2e662b62006-07-13 11:55:04 +0200701EXPORT_SYMBOL(__elv_add_request);
702
Jens Axboe7eaceac2011-03-10 08:52:07 +0100703void elv_add_request(struct request_queue *q, struct request *rq, int where)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704{
705 unsigned long flags;
706
707 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100708 __elv_add_request(q, rq, where);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 spin_unlock_irqrestore(q->queue_lock, flags);
710}
Jens Axboe2e662b62006-07-13 11:55:04 +0200711EXPORT_SYMBOL(elv_add_request);
712
Jens Axboe165125e2007-07-24 09:28:11 +0200713struct request *elv_latter_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714{
Jens Axboeb374d182008-10-31 10:05:07 +0100715 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Tejun Heo22f746e2011-12-14 00:33:41 +0100717 if (e->type->ops.elevator_latter_req_fn)
718 return e->type->ops.elevator_latter_req_fn(q, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 return NULL;
720}
721
Jens Axboe165125e2007-07-24 09:28:11 +0200722struct request *elv_former_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723{
Jens Axboeb374d182008-10-31 10:05:07 +0100724 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
Tejun Heo22f746e2011-12-14 00:33:41 +0100726 if (e->type->ops.elevator_former_req_fn)
727 return e->type->ops.elevator_former_req_fn(q, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 return NULL;
729}
730
Jens Axboe165125e2007-07-24 09:28:11 +0200731int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732{
Jens Axboeb374d182008-10-31 10:05:07 +0100733 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Tejun Heo22f746e2011-12-14 00:33:41 +0100735 if (e->type->ops.elevator_set_req_fn)
736 return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 return 0;
738}
739
Jens Axboe165125e2007-07-24 09:28:11 +0200740void elv_put_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741{
Jens Axboeb374d182008-10-31 10:05:07 +0100742 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Tejun Heo22f746e2011-12-14 00:33:41 +0100744 if (e->type->ops.elevator_put_req_fn)
745 e->type->ops.elevator_put_req_fn(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746}
747
Jens Axboe165125e2007-07-24 09:28:11 +0200748int elv_may_queue(struct request_queue *q, int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749{
Jens Axboeb374d182008-10-31 10:05:07 +0100750 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
Tejun Heo22f746e2011-12-14 00:33:41 +0100752 if (e->type->ops.elevator_may_queue_fn)
753 return e->type->ops.elevator_may_queue_fn(q, rw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
755 return ELV_MQUEUE_MAY;
756}
757
Mike Anderson11914a52008-09-13 20:31:27 +0200758void elv_abort_queue(struct request_queue *q)
759{
760 struct request *rq;
761
Tejun Heoae1b1532011-01-25 12:43:54 +0100762 blk_abort_flushes(q);
763
Mike Anderson11914a52008-09-13 20:31:27 +0200764 while (!list_empty(&q->queue_head)) {
765 rq = list_entry_rq(q->queue_head.next);
766 rq->cmd_flags |= REQ_QUIET;
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +0100767 trace_block_rq_abort(q, rq);
Kiyoshi Ueda53c663c2009-06-02 08:44:01 +0200768 /*
769 * Mark this request as started so we don't trigger
770 * any debug logic in the end I/O path.
771 */
772 blk_start_request(rq);
Tejun Heo40cbbb72009-04-23 11:05:19 +0900773 __blk_end_request_all(rq, -EIO);
Mike Anderson11914a52008-09-13 20:31:27 +0200774 }
775}
776EXPORT_SYMBOL(elv_abort_queue);
777
Jens Axboe165125e2007-07-24 09:28:11 +0200778void elv_completed_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779{
Jens Axboeb374d182008-10-31 10:05:07 +0100780 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
782 /*
783 * request is released from the driver, io must be done
784 */
Tejun Heo8922e162005-10-20 16:23:44 +0200785 if (blk_account_rq(rq)) {
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200786 q->in_flight[rq_is_sync(rq)]--;
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200787 if ((rq->cmd_flags & REQ_SORTED) &&
Tejun Heo22f746e2011-12-14 00:33:41 +0100788 e->type->ops.elevator_completed_req_fn)
789 e->type->ops.elevator_completed_req_fn(q, rq);
Tejun Heo1bc691d2006-01-12 15:39:26 +0100790 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791}
792
Al Viro3d1ab402006-03-18 18:35:43 -0500793#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
794
795static ssize_t
796elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
797{
Al Viro3d1ab402006-03-18 18:35:43 -0500798 struct elv_fs_entry *entry = to_elv(attr);
Jens Axboeb374d182008-10-31 10:05:07 +0100799 struct elevator_queue *e;
Al Viro3d1ab402006-03-18 18:35:43 -0500800 ssize_t error;
801
802 if (!entry->show)
803 return -EIO;
804
Jens Axboeb374d182008-10-31 10:05:07 +0100805 e = container_of(kobj, struct elevator_queue, kobj);
Al Viro3d1ab402006-03-18 18:35:43 -0500806 mutex_lock(&e->sysfs_lock);
Tejun Heo22f746e2011-12-14 00:33:41 +0100807 error = e->type ? entry->show(e, page) : -ENOENT;
Al Viro3d1ab402006-03-18 18:35:43 -0500808 mutex_unlock(&e->sysfs_lock);
809 return error;
810}
811
812static ssize_t
813elv_attr_store(struct kobject *kobj, struct attribute *attr,
814 const char *page, size_t length)
815{
Al Viro3d1ab402006-03-18 18:35:43 -0500816 struct elv_fs_entry *entry = to_elv(attr);
Jens Axboeb374d182008-10-31 10:05:07 +0100817 struct elevator_queue *e;
Al Viro3d1ab402006-03-18 18:35:43 -0500818 ssize_t error;
819
820 if (!entry->store)
821 return -EIO;
822
Jens Axboeb374d182008-10-31 10:05:07 +0100823 e = container_of(kobj, struct elevator_queue, kobj);
Al Viro3d1ab402006-03-18 18:35:43 -0500824 mutex_lock(&e->sysfs_lock);
Tejun Heo22f746e2011-12-14 00:33:41 +0100825 error = e->type ? entry->store(e, page, length) : -ENOENT;
Al Viro3d1ab402006-03-18 18:35:43 -0500826 mutex_unlock(&e->sysfs_lock);
827 return error;
828}
829
Emese Revfy52cf25d2010-01-19 02:58:23 +0100830static const struct sysfs_ops elv_sysfs_ops = {
Al Viro3d1ab402006-03-18 18:35:43 -0500831 .show = elv_attr_show,
832 .store = elv_attr_store,
833};
834
835static struct kobj_type elv_ktype = {
836 .sysfs_ops = &elv_sysfs_ops,
837 .release = elevator_release,
838};
839
Tejun Heof8fc8772011-12-14 00:33:40 +0100840int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841{
Al Viro3d1ab402006-03-18 18:35:43 -0500842 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Greg Kroah-Hartmanb2d6db52007-12-17 23:05:35 -0700844 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
Al Viro3d1ab402006-03-18 18:35:43 -0500845 if (!error) {
Tejun Heo22f746e2011-12-14 00:33:41 +0100846 struct elv_fs_entry *attr = e->type->elevator_attrs;
Al Viro3d1ab402006-03-18 18:35:43 -0500847 if (attr) {
Al Viroe572ec72006-03-18 22:27:18 -0500848 while (attr->attr.name) {
849 if (sysfs_create_file(&e->kobj, &attr->attr))
Al Viro3d1ab402006-03-18 18:35:43 -0500850 break;
Al Viroe572ec72006-03-18 22:27:18 -0500851 attr++;
Al Viro3d1ab402006-03-18 18:35:43 -0500852 }
853 }
854 kobject_uevent(&e->kobj, KOBJ_ADD);
Jens Axboe430c62f2010-10-07 09:35:16 +0200855 e->registered = 1;
Al Viro3d1ab402006-03-18 18:35:43 -0500856 }
857 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858}
859
Tejun Heof8fc8772011-12-14 00:33:40 +0100860int elv_register_queue(struct request_queue *q)
Jens Axboebc1c1162006-06-08 08:49:06 +0200861{
Tejun Heof8fc8772011-12-14 00:33:40 +0100862 return __elv_register_queue(q, q->elevator);
Jens Axboebc1c1162006-06-08 08:49:06 +0200863}
Tejun Heof8fc8772011-12-14 00:33:40 +0100864EXPORT_SYMBOL(elv_register_queue);
Jens Axboebc1c1162006-06-08 08:49:06 +0200865
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866void elv_unregister_queue(struct request_queue *q)
867{
Tejun Heof8fc8772011-12-14 00:33:40 +0100868 if (q) {
869 struct elevator_queue *e = q->elevator;
870
871 kobject_uevent(&e->kobj, KOBJ_REMOVE);
872 kobject_del(&e->kobj);
873 e->registered = 0;
874 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875}
Mike Snitzer01effb02010-05-11 08:57:42 +0200876EXPORT_SYMBOL(elv_unregister_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
Tejun Heo3d3c2372011-12-14 00:33:42 +0100878int elv_register(struct elevator_type *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879{
Thibaut VARENE1ffb96c2007-03-15 12:59:19 +0100880 char *def = "";
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200881
Tejun Heo3d3c2372011-12-14 00:33:42 +0100882 /* create icq_cache if requested */
883 if (e->icq_size) {
884 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
885 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
886 return -EINVAL;
887
888 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
889 "%s_io_cq", e->elevator_name);
890 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
891 e->icq_align, 0, NULL);
892 if (!e->icq_cache)
893 return -ENOMEM;
894 }
895
896 /* register, don't allow duplicate names */
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200897 spin_lock(&elv_list_lock);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100898 if (elevator_find(e->elevator_name)) {
899 spin_unlock(&elv_list_lock);
900 if (e->icq_cache)
901 kmem_cache_destroy(e->icq_cache);
902 return -EBUSY;
903 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 list_add_tail(&e->list, &elv_list);
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200905 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906
Tejun Heo3d3c2372011-12-14 00:33:42 +0100907 /* print pretty message */
Nate Diller5f003972006-01-24 10:07:58 +0100908 if (!strcmp(e->elevator_name, chosen_elevator) ||
909 (!*chosen_elevator &&
910 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
Thibaut VARENE1ffb96c2007-03-15 12:59:19 +0100911 def = " (default)";
912
Jens Axboe4eb166d2008-02-01 00:37:27 +0100913 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
914 def);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100915 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916}
917EXPORT_SYMBOL_GPL(elv_register);
918
919void elv_unregister(struct elevator_type *e)
920{
Tejun Heo3d3c2372011-12-14 00:33:42 +0100921 /* unregister */
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200922 spin_lock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 list_del_init(&e->list);
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200924 spin_unlock(&elv_list_lock);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100925
926 /*
927 * Destroy icq_cache if it exists. icq's are RCU managed. Make
928 * sure all RCU operations are complete before proceeding.
929 */
930 if (e->icq_cache) {
931 rcu_barrier();
932 kmem_cache_destroy(e->icq_cache);
933 e->icq_cache = NULL;
934 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935}
936EXPORT_SYMBOL_GPL(elv_unregister);
937
938/*
939 * switch to new_e io scheduler. be careful not to introduce deadlocks -
940 * we don't free the old io scheduler, before we have allocated what we
941 * need for the new one. this way we have a chance of going back to the old
Tejun Heocb98fc82005-10-28 08:29:39 +0200942 * one, if the new one fails init for some reason.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 */
Jens Axboe165125e2007-07-24 09:28:11 +0200944static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945{
Jens Axboeb374d182008-10-31 10:05:07 +0100946 struct elevator_queue *old_elevator, *e;
Jens Axboe5dd531a2010-08-23 13:52:19 +0200947 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
Tejun Heof8fc8772011-12-14 00:33:40 +0100949 /* allocate new elevator */
Jens Axboeb5deef92006-07-19 23:39:40 +0200950 e = elevator_alloc(q, new_e);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 if (!e)
Jens Axboe5dd531a2010-08-23 13:52:19 +0200952 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
Tejun Heof8fc8772011-12-14 00:33:40 +0100954 err = elevator_init_queue(q, e);
955 if (err) {
Jens Axboebc1c1162006-06-08 08:49:06 +0200956 kobject_put(&e->kobj);
Tejun Heof8fc8772011-12-14 00:33:40 +0100957 return err;
Jens Axboebc1c1162006-06-08 08:49:06 +0200958 }
959
Tejun Heof8fc8772011-12-14 00:33:40 +0100960 /* turn on BYPASS and drain all requests w/ elevator private data */
Jens Axboef600abe2009-04-08 14:22:01 +0200961 elv_quiesce_start(q);
Tejun Heocb98fc82005-10-28 08:29:39 +0200962
Tejun Heof8fc8772011-12-14 00:33:40 +0100963 /* unregister old queue, register new one and kill old elevator */
964 if (q->elevator->registered) {
965 elv_unregister_queue(q);
966 err = __elv_register_queue(q, e);
Jens Axboe430c62f2010-10-07 09:35:16 +0200967 if (err)
968 goto fail_register;
969 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
Tejun Heo7e5a8792011-12-14 00:33:42 +0100971 /* done, clear io_cq's, switch elevators and turn off BYPASS */
Tejun Heof8fc8772011-12-14 00:33:40 +0100972 spin_lock_irq(q->queue_lock);
Tejun Heo7e5a8792011-12-14 00:33:42 +0100973 ioc_clear_queue(q);
Tejun Heof8fc8772011-12-14 00:33:40 +0100974 old_elevator = q->elevator;
975 q->elevator = e;
976 spin_unlock_irq(q->queue_lock);
977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 elevator_exit(old_elevator);
Jens Axboef600abe2009-04-08 14:22:01 +0200979 elv_quiesce_end(q);
Nick Piggin75ad23b2008-04-29 14:48:33 +0200980
Tejun Heo22f746e2011-12-14 00:33:41 +0100981 blk_add_trace_msg(q, "elv switch: %s", e->type->elevator_name);
Alan D. Brunelle4722dc52008-05-27 14:55:00 +0200982
Jens Axboe5dd531a2010-08-23 13:52:19 +0200983 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
985fail_register:
986 /*
987 * switch failed, exit the new io scheduler and reattach the old
988 * one again (along with re-adding the sysfs dir)
989 */
990 elevator_exit(e);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 elv_register_queue(q);
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200992 elv_quiesce_end(q);
Nick Piggin75ad23b2008-04-29 14:48:33 +0200993
Jens Axboe5dd531a2010-08-23 13:52:19 +0200994 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995}
996
Jens Axboe5dd531a2010-08-23 13:52:19 +0200997/*
998 * Switch this queue to the given IO scheduler.
999 */
1000int elevator_change(struct request_queue *q, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001{
1002 char elevator_name[ELV_NAME_MAX];
1003 struct elevator_type *e;
1004
Martin K. Petersencd43e262009-05-22 17:17:52 -04001005 if (!q->elevator)
Jens Axboe5dd531a2010-08-23 13:52:19 +02001006 return -ENXIO;
Martin K. Petersencd43e262009-05-22 17:17:52 -04001007
Li Zefanee2e9922008-10-14 08:49:56 +02001008 strlcpy(elevator_name, name, sizeof(elevator_name));
KOSAKI Motohiro8c279592009-10-09 08:48:08 +02001009 e = elevator_get(strstrip(elevator_name));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 if (!e) {
1011 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1012 return -EINVAL;
1013 }
1014
Tejun Heo22f746e2011-12-14 00:33:41 +01001015 if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
Nate Diller2ca7d932005-10-30 15:02:24 -08001016 elevator_put(e);
Jens Axboe5dd531a2010-08-23 13:52:19 +02001017 return 0;
Nate Diller2ca7d932005-10-30 15:02:24 -08001018 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019
Jens Axboe5dd531a2010-08-23 13:52:19 +02001020 return elevator_switch(q, e);
1021}
1022EXPORT_SYMBOL(elevator_change);
1023
1024ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1025 size_t count)
1026{
1027 int ret;
1028
1029 if (!q->elevator)
1030 return count;
1031
1032 ret = elevator_change(q, name);
1033 if (!ret)
1034 return count;
1035
1036 printk(KERN_ERR "elevator: switch to %s failed\n", name);
1037 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038}
1039
Jens Axboe165125e2007-07-24 09:28:11 +02001040ssize_t elv_iosched_show(struct request_queue *q, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041{
Jens Axboeb374d182008-10-31 10:05:07 +01001042 struct elevator_queue *e = q->elevator;
Martin K. Petersencd43e262009-05-22 17:17:52 -04001043 struct elevator_type *elv;
Matthias Kaehlcke70cee262007-07-10 12:26:24 +02001044 struct elevator_type *__e;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 int len = 0;
1046
Mike Snitzere36f7242010-05-24 09:07:32 +02001047 if (!q->elevator || !blk_queue_stackable(q))
Martin K. Petersencd43e262009-05-22 17:17:52 -04001048 return sprintf(name, "none\n");
1049
Tejun Heo22f746e2011-12-14 00:33:41 +01001050 elv = e->type;
Martin K. Petersencd43e262009-05-22 17:17:52 -04001051
Jens Axboe2a12dcd2007-04-26 14:41:53 +02001052 spin_lock(&elv_list_lock);
Matthias Kaehlcke70cee262007-07-10 12:26:24 +02001053 list_for_each_entry(__e, &elv_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 if (!strcmp(elv->elevator_name, __e->elevator_name))
1055 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1056 else
1057 len += sprintf(name+len, "%s ", __e->elevator_name);
1058 }
Jens Axboe2a12dcd2007-04-26 14:41:53 +02001059 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
1061 len += sprintf(len+name, "\n");
1062 return len;
1063}
1064
Jens Axboe165125e2007-07-24 09:28:11 +02001065struct request *elv_rb_former_request(struct request_queue *q,
1066 struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +02001067{
1068 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1069
1070 if (rbprev)
1071 return rb_entry_rq(rbprev);
1072
1073 return NULL;
1074}
Jens Axboe2e662b62006-07-13 11:55:04 +02001075EXPORT_SYMBOL(elv_rb_former_request);
1076
Jens Axboe165125e2007-07-24 09:28:11 +02001077struct request *elv_rb_latter_request(struct request_queue *q,
1078 struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +02001079{
1080 struct rb_node *rbnext = rb_next(&rq->rb_node);
1081
1082 if (rbnext)
1083 return rb_entry_rq(rbnext);
1084
1085 return NULL;
1086}
Jens Axboe2e662b62006-07-13 11:55:04 +02001087EXPORT_SYMBOL(elv_rb_latter_request);