blob: 7bda083d59684177bb66eec2e221177417c893ac [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
Jens Axboe0fe23472006-09-04 15:41:16 +02006 * 30042000 Jens Axboe <axboe@kernel.dk> :
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/fs.h>
27#include <linux/blkdev.h>
28#include <linux/elevator.h>
29#include <linux/bio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
Jens Axboe2056a782006-03-23 20:00:26 +010034#include <linux/blktrace_api.h>
Jens Axboe98170642006-07-28 09:23:08 +020035#include <linux/hash.h>
Jens Axboe0835da62008-08-26 09:15:47 +020036#include <linux/uaccess.h>
Lin Mingc8158812013-03-23 11:42:27 +080037#include <linux/pm_runtime.h>
Tejun Heoeea8f412015-05-22 17:13:17 -040038#include <linux/blk-cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Li Zefan55782132009-06-09 13:43:05 +080040#include <trace/events/block.h>
41
Jens Axboe242f9dc2008-09-14 05:55:09 -070042#include "blk.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070043#include "blk-mq-sched.h"
Jan Kara8330cdb2017-04-19 11:33:27 +020044#include "blk-wbt.h"
Jens Axboe242f9dc2008-09-14 05:55:09 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static DEFINE_SPINLOCK(elv_list_lock);
47static LIST_HEAD(elv_list);
48
49/*
Jens Axboe98170642006-07-28 09:23:08 +020050 * Merge hash stuff.
51 */
Tejun Heo83096eb2009-05-07 22:24:39 +090052#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
Jens Axboe98170642006-07-28 09:23:08 +020053
54/*
Jens Axboeda775262006-12-20 11:04:12 +010055 * Query io scheduler to see if the current process issuing bio may be
56 * merged with rq.
57 */
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070058static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
Jens Axboeda775262006-12-20 11:04:12 +010059{
Jens Axboe165125e2007-07-24 09:28:11 +020060 struct request_queue *q = rq->q;
Jens Axboeb374d182008-10-31 10:05:07 +010061 struct elevator_queue *e = q->elevator;
Jens Axboeda775262006-12-20 11:04:12 +010062
Jens Axboebd166ef2017-01-17 06:03:22 -070063 if (e->uses_mq && e->type->ops.mq.allow_merge)
64 return e->type->ops.mq.allow_merge(q, rq, bio);
65 else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -070066 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
Jens Axboeda775262006-12-20 11:04:12 +010067
68 return 1;
69}
70
71/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * can we safely merge with this request?
73 */
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070074bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075{
Tejun Heo050c8ea2012-02-08 09:19:38 +010076 if (!blk_rq_merge_ok(rq, bio))
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070077 return false;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +020078
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070079 if (!elv_iosched_allow_bio_merge(rq, bio))
80 return false;
Jens Axboeda775262006-12-20 11:04:12 +010081
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070082 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070084EXPORT_SYMBOL(elv_bio_merge_ok);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Jens Axboe8ac0d9a2017-10-25 12:35:02 -060086static bool elevator_match(const struct elevator_type *e, const char *name)
87{
88 if (!strcmp(e->elevator_name, name))
89 return true;
90 if (e->elevator_alias && !strcmp(e->elevator_alias, name))
91 return true;
92
93 return false;
94}
95
Jens Axboe2527d992017-10-25 12:33:42 -060096/*
97 * Return scheduler with name 'name' and with matching 'mq capability
98 */
99static struct elevator_type *elevator_find(const char *name, bool mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100{
Vasily Tarasova22b1692006-10-11 09:24:27 +0200101 struct elevator_type *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
Matthias Kaehlcke70cee262007-07-10 12:26:24 +0200103 list_for_each_entry(e, &elv_list, list) {
Jens Axboe8ac0d9a2017-10-25 12:35:02 -0600104 if (elevator_match(e, name) && (mq == e->uses_mq))
Vasily Tarasova22b1692006-10-11 09:24:27 +0200105 return e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Vasily Tarasova22b1692006-10-11 09:24:27 +0200108 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109}
110
111static void elevator_put(struct elevator_type *e)
112{
113 module_put(e->elevator_owner);
114}
115
Jens Axboe2527d992017-10-25 12:33:42 -0600116static struct elevator_type *elevator_get(struct request_queue *q,
117 const char *name, bool try_loading)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
Tejun Heo2824bc932005-10-20 10:56:41 +0200119 struct elevator_type *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200121 spin_lock(&elv_list_lock);
Tejun Heo2824bc932005-10-20 10:56:41 +0200122
Jens Axboe2527d992017-10-25 12:33:42 -0600123 e = elevator_find(name, q->mq_ops != NULL);
Tejun Heo21c3c5d2013-01-22 16:48:03 -0800124 if (!e && try_loading) {
Jens Axboee1640942008-02-19 10:20:37 +0100125 spin_unlock(&elv_list_lock);
Kees Cook490b94b2011-05-05 18:02:12 -0600126 request_module("%s-iosched", name);
Jens Axboee1640942008-02-19 10:20:37 +0100127 spin_lock(&elv_list_lock);
Jens Axboe2527d992017-10-25 12:33:42 -0600128 e = elevator_find(name, q->mq_ops != NULL);
Jens Axboee1640942008-02-19 10:20:37 +0100129 }
130
Tejun Heo2824bc932005-10-20 10:56:41 +0200131 if (e && !try_module_get(e->elevator_owner))
132 e = NULL;
133
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200134 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 return e;
136}
137
Wang Sheng-Hui484fc252011-09-08 12:32:14 +0200138static char chosen_elevator[ELV_NAME_MAX];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Nate Diller5f003972006-01-24 10:07:58 +0100140static int __init elevator_setup(char *str)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
Chuck Ebbert752a3b72006-01-16 09:47:37 +0100142 /*
143 * Be backwards-compatible with previous kernels, so users
144 * won't get the wrong elevator.
145 */
Jens Axboe492af632009-10-03 09:37:51 +0200146 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
OGAWA Hirofumi9b410462006-03-31 02:30:33 -0800147 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148}
149
150__setup("elevator=", elevator_setup);
151
Tejun Heobb813f42013-01-18 14:05:56 -0800152/* called during boot to load the elevator chosen by the elevator param */
153void __init load_default_elevator_module(void)
154{
155 struct elevator_type *e;
156
157 if (!chosen_elevator[0])
158 return;
159
Jens Axboe2527d992017-10-25 12:33:42 -0600160 /*
161 * Boot parameter is deprecated, we haven't supported that for MQ.
162 * Only look for non-mq schedulers from here.
163 */
Tejun Heobb813f42013-01-18 14:05:56 -0800164 spin_lock(&elv_list_lock);
Jens Axboe2527d992017-10-25 12:33:42 -0600165 e = elevator_find(chosen_elevator, false);
Tejun Heobb813f42013-01-18 14:05:56 -0800166 spin_unlock(&elv_list_lock);
167
168 if (!e)
169 request_module("%s-iosched", chosen_elevator);
170}
171
Al Viro3d1ab402006-03-18 18:35:43 -0500172static struct kobj_type elv_ktype;
173
Jianpeng Mad50235b2013-07-03 13:25:24 +0200174struct elevator_queue *elevator_alloc(struct request_queue *q,
Jens Axboe165125e2007-07-24 09:28:11 +0200175 struct elevator_type *e)
Al Viro3d1ab402006-03-18 18:35:43 -0500176{
Jens Axboeb374d182008-10-31 10:05:07 +0100177 struct elevator_queue *eq;
Jens Axboe98170642006-07-28 09:23:08 +0200178
Joe Perchesc1b511e2013-08-29 15:21:42 -0700179 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
Jens Axboe98170642006-07-28 09:23:08 +0200180 if (unlikely(!eq))
Chao Yu8406a4d2015-04-23 10:47:44 -0600181 return NULL;
Jens Axboe98170642006-07-28 09:23:08 +0200182
Tejun Heo22f746e2011-12-14 00:33:41 +0100183 eq->type = e;
Greg Kroah-Hartmanf9cb0742007-12-17 23:05:35 -0700184 kobject_init(&eq->kobj, &elv_ktype);
Jens Axboe98170642006-07-28 09:23:08 +0200185 mutex_init(&eq->sysfs_lock);
Sasha Levin242d98f2012-12-17 10:01:27 -0500186 hash_init(eq->hash);
Jens Axboebd166ef2017-01-17 06:03:22 -0700187 eq->uses_mq = e->uses_mq;
Jens Axboe98170642006-07-28 09:23:08 +0200188
Al Viro3d1ab402006-03-18 18:35:43 -0500189 return eq;
190}
Jianpeng Mad50235b2013-07-03 13:25:24 +0200191EXPORT_SYMBOL(elevator_alloc);
Al Viro3d1ab402006-03-18 18:35:43 -0500192
193static void elevator_release(struct kobject *kobj)
194{
Jens Axboeb374d182008-10-31 10:05:07 +0100195 struct elevator_queue *e;
Jens Axboe98170642006-07-28 09:23:08 +0200196
Jens Axboeb374d182008-10-31 10:05:07 +0100197 e = container_of(kobj, struct elevator_queue, kobj);
Tejun Heo22f746e2011-12-14 00:33:41 +0100198 elevator_put(e->type);
Al Viro3d1ab402006-03-18 18:35:43 -0500199 kfree(e);
200}
201
Jens Axboe165125e2007-07-24 09:28:11 +0200202int elevator_init(struct request_queue *q, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
204 struct elevator_type *e = NULL;
Tejun Heof8fc8772011-12-14 00:33:40 +0100205 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
Tomoki Sekiyamaeb1c1602013-10-15 16:42:16 -0600207 /*
208 * q->sysfs_lock must be held to provide mutual exclusion between
209 * elevator_switch() and here.
210 */
211 lockdep_assert_held(&q->sysfs_lock);
212
Mike Snitzer1abec4f2010-05-25 13:15:15 -0400213 if (unlikely(q->elevator))
214 return 0;
215
Tejun Heocb98fc82005-10-28 08:29:39 +0200216 INIT_LIST_HEAD(&q->queue_head);
217 q->last_merge = NULL;
218 q->end_sector = 0;
219 q->boundary_rq = NULL;
Tejun Heocb98fc82005-10-28 08:29:39 +0200220
Jens Axboe4eb166d2008-02-01 00:37:27 +0100221 if (name) {
Jens Axboe2527d992017-10-25 12:33:42 -0600222 e = elevator_get(q, name, true);
Jens Axboe4eb166d2008-02-01 00:37:27 +0100223 if (!e)
224 return -EINVAL;
225 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Tejun Heo21c3c5d2013-01-22 16:48:03 -0800227 /*
Jens Axboed1a987f2017-02-14 08:16:41 -0700228 * Use the default elevator specified by config boot param for
229 * non-mq devices, or by config option. Don't try to load modules
230 * as we could be running off async and request_module() isn't
231 * allowed from async.
Tejun Heo21c3c5d2013-01-22 16:48:03 -0800232 */
Jens Axboed1a987f2017-02-14 08:16:41 -0700233 if (!e && !q->mq_ops && *chosen_elevator) {
Jens Axboe2527d992017-10-25 12:33:42 -0600234 e = elevator_get(q, chosen_elevator, false);
Jens Axboe4eb166d2008-02-01 00:37:27 +0100235 if (!e)
236 printk(KERN_ERR "I/O scheduler %s not found\n",
237 chosen_elevator);
238 }
Nate Diller248d5ca2006-01-24 10:09:14 +0100239
Jens Axboe4eb166d2008-02-01 00:37:27 +0100240 if (!e) {
Jens Axboeb86dd812017-02-22 13:19:45 -0700241 /*
242 * For blk-mq devices, we default to using mq-deadline,
243 * if available, for single queue devices. If deadline
244 * isn't available OR we have multiple queues, default
245 * to "none".
246 */
247 if (q->mq_ops) {
248 if (q->nr_hw_queues == 1)
Jens Axboe2527d992017-10-25 12:33:42 -0600249 e = elevator_get(q, "mq-deadline", false);
Jens Axboeb86dd812017-02-22 13:19:45 -0700250 if (!e)
251 return 0;
252 } else
Jens Axboe2527d992017-10-25 12:33:42 -0600253 e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
Jens Axboed3484992017-01-13 14:43:58 -0700254
Jens Axboe4eb166d2008-02-01 00:37:27 +0100255 if (!e) {
256 printk(KERN_ERR
257 "Default I/O scheduler not found. " \
Jens Axboeb86dd812017-02-22 13:19:45 -0700258 "Using noop.\n");
Jens Axboe2527d992017-10-25 12:33:42 -0600259 e = elevator_get(q, "noop", false);
Jens Axboe4eb166d2008-02-01 00:37:27 +0100260 }
Nate Diller5f003972006-01-24 10:07:58 +0100261 }
262
Omar Sandoval6917ff02017-04-05 12:01:30 -0700263 if (e->uses_mq)
264 err = blk_mq_init_sched(q, e);
265 else
Jens Axboebd166ef2017-01-17 06:03:22 -0700266 err = e->ops.sq.elevator_init_fn(q, e);
Omar Sandoval6917ff02017-04-05 12:01:30 -0700267 if (err)
Sudip Mukherjeed32f6b52014-10-23 22:16:48 +0530268 elevator_put(e);
269 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270}
Jens Axboe2e662b62006-07-13 11:55:04 +0200271EXPORT_SYMBOL(elevator_init);
272
Omar Sandoval54d53292017-04-07 08:52:27 -0600273void elevator_exit(struct request_queue *q, struct elevator_queue *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274{
Al Viro3d1ab402006-03-18 18:35:43 -0500275 mutex_lock(&e->sysfs_lock);
Jens Axboebd166ef2017-01-17 06:03:22 -0700276 if (e->uses_mq && e->type->ops.mq.exit_sched)
Omar Sandoval54d53292017-04-07 08:52:27 -0600277 blk_mq_exit_sched(q, e);
Jens Axboebd166ef2017-01-17 06:03:22 -0700278 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700279 e->type->ops.sq.elevator_exit_fn(e);
Al Viro3d1ab402006-03-18 18:35:43 -0500280 mutex_unlock(&e->sysfs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
Al Viro3d1ab402006-03-18 18:35:43 -0500282 kobject_put(&e->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283}
Jens Axboe2e662b62006-07-13 11:55:04 +0200284EXPORT_SYMBOL(elevator_exit);
285
Jens Axboe98170642006-07-28 09:23:08 +0200286static inline void __elv_rqhash_del(struct request *rq)
287{
Sasha Levin242d98f2012-12-17 10:01:27 -0500288 hash_del(&rq->hash);
Christoph Hellwige8064022016-10-20 15:12:13 +0200289 rq->rq_flags &= ~RQF_HASHED;
Jens Axboe98170642006-07-28 09:23:08 +0200290}
291
Jens Axboe70b3ea02016-12-07 08:43:31 -0700292void elv_rqhash_del(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200293{
294 if (ELV_ON_HASH(rq))
295 __elv_rqhash_del(rq);
296}
Jens Axboebd166ef2017-01-17 06:03:22 -0700297EXPORT_SYMBOL_GPL(elv_rqhash_del);
Jens Axboe98170642006-07-28 09:23:08 +0200298
Jens Axboe70b3ea02016-12-07 08:43:31 -0700299void elv_rqhash_add(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200300{
Jens Axboeb374d182008-10-31 10:05:07 +0100301 struct elevator_queue *e = q->elevator;
Jens Axboe98170642006-07-28 09:23:08 +0200302
303 BUG_ON(ELV_ON_HASH(rq));
Sasha Levin242d98f2012-12-17 10:01:27 -0500304 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
Christoph Hellwige8064022016-10-20 15:12:13 +0200305 rq->rq_flags |= RQF_HASHED;
Jens Axboe98170642006-07-28 09:23:08 +0200306}
Jens Axboebd166ef2017-01-17 06:03:22 -0700307EXPORT_SYMBOL_GPL(elv_rqhash_add);
Jens Axboe98170642006-07-28 09:23:08 +0200308
Jens Axboe70b3ea02016-12-07 08:43:31 -0700309void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200310{
311 __elv_rqhash_del(rq);
312 elv_rqhash_add(q, rq);
313}
314
Jens Axboe70b3ea02016-12-07 08:43:31 -0700315struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
Jens Axboe98170642006-07-28 09:23:08 +0200316{
Jens Axboeb374d182008-10-31 10:05:07 +0100317 struct elevator_queue *e = q->elevator;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800318 struct hlist_node *next;
Jens Axboe98170642006-07-28 09:23:08 +0200319 struct request *rq;
320
Linus Torvaldsee89f812013-02-28 12:52:24 -0800321 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
Jens Axboe98170642006-07-28 09:23:08 +0200322 BUG_ON(!ELV_ON_HASH(rq));
323
324 if (unlikely(!rq_mergeable(rq))) {
325 __elv_rqhash_del(rq);
326 continue;
327 }
328
329 if (rq_hash_key(rq) == offset)
330 return rq;
331 }
332
333 return NULL;
334}
335
Tejun Heo8922e162005-10-20 16:23:44 +0200336/*
Jens Axboe2e662b62006-07-13 11:55:04 +0200337 * RB-tree support functions for inserting/lookup/removal of requests
338 * in a sorted RB tree.
339 */
Jeff Moyer796d5112011-06-02 21:19:05 +0200340void elv_rb_add(struct rb_root *root, struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +0200341{
342 struct rb_node **p = &root->rb_node;
343 struct rb_node *parent = NULL;
344 struct request *__rq;
345
346 while (*p) {
347 parent = *p;
348 __rq = rb_entry(parent, struct request, rb_node);
349
Tejun Heo83096eb2009-05-07 22:24:39 +0900350 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200351 p = &(*p)->rb_left;
Jeff Moyer796d5112011-06-02 21:19:05 +0200352 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200353 p = &(*p)->rb_right;
Jens Axboe2e662b62006-07-13 11:55:04 +0200354 }
355
356 rb_link_node(&rq->rb_node, parent, p);
357 rb_insert_color(&rq->rb_node, root);
Jens Axboe2e662b62006-07-13 11:55:04 +0200358}
Jens Axboe2e662b62006-07-13 11:55:04 +0200359EXPORT_SYMBOL(elv_rb_add);
360
361void elv_rb_del(struct rb_root *root, struct request *rq)
362{
363 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
364 rb_erase(&rq->rb_node, root);
365 RB_CLEAR_NODE(&rq->rb_node);
366}
Jens Axboe2e662b62006-07-13 11:55:04 +0200367EXPORT_SYMBOL(elv_rb_del);
368
369struct request *elv_rb_find(struct rb_root *root, sector_t sector)
370{
371 struct rb_node *n = root->rb_node;
372 struct request *rq;
373
374 while (n) {
375 rq = rb_entry(n, struct request, rb_node);
376
Tejun Heo83096eb2009-05-07 22:24:39 +0900377 if (sector < blk_rq_pos(rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200378 n = n->rb_left;
Tejun Heo83096eb2009-05-07 22:24:39 +0900379 else if (sector > blk_rq_pos(rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200380 n = n->rb_right;
381 else
382 return rq;
383 }
384
385 return NULL;
386}
Jens Axboe2e662b62006-07-13 11:55:04 +0200387EXPORT_SYMBOL(elv_rb_find);
388
389/*
Tejun Heo8922e162005-10-20 16:23:44 +0200390 * Insert rq into dispatch queue of q. Queue lock must be held on
Uwe Kleine-Königdbe7f762007-10-20 01:55:04 +0200391 * entry. rq is sort instead into the dispatch queue. To be used by
Jens Axboe2e662b62006-07-13 11:55:04 +0200392 * specific elevators.
Tejun Heo8922e162005-10-20 16:23:44 +0200393 */
Jens Axboe165125e2007-07-24 09:28:11 +0200394void elv_dispatch_sort(struct request_queue *q, struct request *rq)
Tejun Heo8922e162005-10-20 16:23:44 +0200395{
396 sector_t boundary;
Tejun Heo8922e162005-10-20 16:23:44 +0200397 struct list_head *entry;
398
Tejun Heo06b86242005-10-20 16:46:23 +0200399 if (q->last_merge == rq)
400 q->last_merge = NULL;
Jens Axboe98170642006-07-28 09:23:08 +0200401
402 elv_rqhash_del(q, rq);
403
Tejun Heo15853af2005-11-10 08:52:05 +0100404 q->nr_sorted--;
Tejun Heo06b86242005-10-20 16:46:23 +0200405
Jens Axboe1b47f532005-10-20 16:37:00 +0200406 boundary = q->end_sector;
Tejun Heo8922e162005-10-20 16:23:44 +0200407 list_for_each_prev(entry, &q->queue_head) {
408 struct request *pos = list_entry_rq(entry);
409
Adrian Hunter7afafc82016-08-16 10:59:35 +0300410 if (req_op(rq) != req_op(pos))
David Woodhousee17fc0a2008-08-09 16:42:20 +0100411 break;
Jens Axboe783660b2007-01-19 11:27:47 +1100412 if (rq_data_dir(rq) != rq_data_dir(pos))
413 break;
Christoph Hellwige8064022016-10-20 15:12:13 +0200414 if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
Tejun Heo8922e162005-10-20 16:23:44 +0200415 break;
Tejun Heo83096eb2009-05-07 22:24:39 +0900416 if (blk_rq_pos(rq) >= boundary) {
417 if (blk_rq_pos(pos) < boundary)
Tejun Heo8922e162005-10-20 16:23:44 +0200418 continue;
419 } else {
Tejun Heo83096eb2009-05-07 22:24:39 +0900420 if (blk_rq_pos(pos) >= boundary)
Tejun Heo8922e162005-10-20 16:23:44 +0200421 break;
422 }
Tejun Heo83096eb2009-05-07 22:24:39 +0900423 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
Tejun Heo8922e162005-10-20 16:23:44 +0200424 break;
425 }
426
427 list_add(&rq->queuelist, entry);
428}
Jens Axboe2e662b62006-07-13 11:55:04 +0200429EXPORT_SYMBOL(elv_dispatch_sort);
430
Jens Axboe98170642006-07-28 09:23:08 +0200431/*
Jens Axboe2e662b62006-07-13 11:55:04 +0200432 * Insert rq into dispatch queue of q. Queue lock must be held on
433 * entry. rq is added to the back of the dispatch queue. To be used by
434 * specific elevators.
Jens Axboe98170642006-07-28 09:23:08 +0200435 */
436void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
437{
438 if (q->last_merge == rq)
439 q->last_merge = NULL;
440
441 elv_rqhash_del(q, rq);
442
443 q->nr_sorted--;
444
445 q->end_sector = rq_end_sector(rq);
446 q->boundary_rq = rq;
447 list_add_tail(&rq->queuelist, &q->queue_head);
448}
Jens Axboe2e662b62006-07-13 11:55:04 +0200449EXPORT_SYMBOL(elv_dispatch_add_tail);
450
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100451enum elv_merge elv_merge(struct request_queue *q, struct request **req,
452 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
Jens Axboeb374d182008-10-31 10:05:07 +0100454 struct elevator_queue *e = q->elevator;
Jens Axboe98170642006-07-28 09:23:08 +0200455 struct request *__rq;
Tejun Heo06b86242005-10-20 16:46:23 +0200456
Jens Axboe98170642006-07-28 09:23:08 +0200457 /*
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100458 * Levels of merges:
459 * nomerges: No merges at all attempted
460 * noxmerges: Only simple one-hit cache try
461 * merges: All merge tries attempted
462 */
Ming Lei7460d382015-10-20 23:13:55 +0800463 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100464 return ELEVATOR_NO_MERGE;
465
466 /*
Jens Axboe98170642006-07-28 09:23:08 +0200467 * First try one-hit cache.
468 */
Tahsin Erdogan72ef7992016-07-07 11:48:22 -0700469 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100470 enum elv_merge ret = blk_try_merge(q->last_merge, bio);
471
Tejun Heo06b86242005-10-20 16:46:23 +0200472 if (ret != ELEVATOR_NO_MERGE) {
473 *req = q->last_merge;
474 return ret;
475 }
476 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100478 if (blk_queue_noxmerges(q))
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200479 return ELEVATOR_NO_MERGE;
480
Jens Axboe98170642006-07-28 09:23:08 +0200481 /*
482 * See if our hash lookup can find a potential backmerge.
483 */
Kent Overstreet4f024f32013-10-11 15:44:27 -0700484 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
Tahsin Erdogan72ef7992016-07-07 11:48:22 -0700485 if (__rq && elv_bio_merge_ok(__rq, bio)) {
Jens Axboe98170642006-07-28 09:23:08 +0200486 *req = __rq;
487 return ELEVATOR_BACK_MERGE;
488 }
489
Jens Axboebd166ef2017-01-17 06:03:22 -0700490 if (e->uses_mq && e->type->ops.mq.request_merge)
491 return e->type->ops.mq.request_merge(q, req, bio);
492 else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700493 return e->type->ops.sq.elevator_merge_fn(q, req, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
495 return ELEVATOR_NO_MERGE;
496}
497
Jens Axboe5e84ea32011-03-21 10:14:27 +0100498/*
499 * Attempt to do an insertion back merge. Only check for the case where
500 * we can append 'rq' to an existing request, so we can throw 'rq' away
501 * afterwards.
502 *
503 * Returns true if we merged, false otherwise
504 */
Jens Axboebd166ef2017-01-17 06:03:22 -0700505bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
Jens Axboe5e84ea32011-03-21 10:14:27 +0100506{
507 struct request *__rq;
Shaohua Libee03932012-11-09 08:44:27 +0100508 bool ret;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100509
510 if (blk_queue_nomerges(q))
511 return false;
512
513 /*
514 * First try one-hit cache.
515 */
516 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
517 return true;
518
519 if (blk_queue_noxmerges(q))
520 return false;
521
Shaohua Libee03932012-11-09 08:44:27 +0100522 ret = false;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100523 /*
524 * See if our hash lookup can find a potential backmerge.
525 */
Shaohua Libee03932012-11-09 08:44:27 +0100526 while (1) {
527 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
528 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
529 break;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100530
Shaohua Libee03932012-11-09 08:44:27 +0100531 /* The merged request could be merged with others, try again */
532 ret = true;
533 rq = __rq;
534 }
535
536 return ret;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100537}
538
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100539void elv_merged_request(struct request_queue *q, struct request *rq,
540 enum elv_merge type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541{
Jens Axboeb374d182008-10-31 10:05:07 +0100542 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Jens Axboebd166ef2017-01-17 06:03:22 -0700544 if (e->uses_mq && e->type->ops.mq.request_merged)
545 e->type->ops.mq.request_merged(q, rq, type);
546 else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700547 e->type->ops.sq.elevator_merged_fn(q, rq, type);
Tejun Heo06b86242005-10-20 16:46:23 +0200548
Jens Axboe2e662b62006-07-13 11:55:04 +0200549 if (type == ELEVATOR_BACK_MERGE)
550 elv_rqhash_reposition(q, rq);
Jens Axboe98170642006-07-28 09:23:08 +0200551
Tejun Heo06b86242005-10-20 16:46:23 +0200552 q->last_merge = rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553}
554
Jens Axboe165125e2007-07-24 09:28:11 +0200555void elv_merge_requests(struct request_queue *q, struct request *rq,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 struct request *next)
557{
Jens Axboeb374d182008-10-31 10:05:07 +0100558 struct elevator_queue *e = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -0700559 bool next_sorted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
Jens Axboebd166ef2017-01-17 06:03:22 -0700561 if (e->uses_mq && e->type->ops.mq.requests_merged)
562 e->type->ops.mq.requests_merged(q, rq, next);
563 else if (e->type->ops.sq.elevator_merge_req_fn) {
Bart Van Asschea1ae0f72017-02-01 12:22:23 -0700564 next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
Jens Axboebd166ef2017-01-17 06:03:22 -0700565 if (next_sorted)
566 e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
567 }
Tejun Heo06b86242005-10-20 16:46:23 +0200568
Jens Axboe98170642006-07-28 09:23:08 +0200569 elv_rqhash_reposition(q, rq);
Jens Axboe98170642006-07-28 09:23:08 +0200570
Jens Axboe5e84ea32011-03-21 10:14:27 +0100571 if (next_sorted) {
572 elv_rqhash_del(q, next);
573 q->nr_sorted--;
574 }
575
Tejun Heo06b86242005-10-20 16:46:23 +0200576 q->last_merge = rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577}
578
Divyesh Shah812d4022010-04-08 21:14:23 -0700579void elv_bio_merged(struct request_queue *q, struct request *rq,
580 struct bio *bio)
581{
582 struct elevator_queue *e = q->elevator;
583
Jens Axboebd166ef2017-01-17 06:03:22 -0700584 if (WARN_ON_ONCE(e->uses_mq))
585 return;
586
Jens Axboec51ca6c2016-12-10 15:13:59 -0700587 if (e->type->ops.sq.elevator_bio_merged_fn)
588 e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
Divyesh Shah812d4022010-04-08 21:14:23 -0700589}
590
Rafael J. Wysocki47fafbc2014-12-04 01:00:23 +0100591#ifdef CONFIG_PM
Lin Mingc8158812013-03-23 11:42:27 +0800592static void blk_pm_requeue_request(struct request *rq)
593{
Christoph Hellwige8064022016-10-20 15:12:13 +0200594 if (rq->q->dev && !(rq->rq_flags & RQF_PM))
Lin Mingc8158812013-03-23 11:42:27 +0800595 rq->q->nr_pending--;
596}
597
598static void blk_pm_add_request(struct request_queue *q, struct request *rq)
599{
Christoph Hellwige8064022016-10-20 15:12:13 +0200600 if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
Lin Mingc8158812013-03-23 11:42:27 +0800601 (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
602 pm_request_resume(q->dev);
603}
604#else
605static inline void blk_pm_requeue_request(struct request *rq) {}
606static inline void blk_pm_add_request(struct request_queue *q,
607 struct request *rq)
608{
609}
610#endif
611
Jens Axboe165125e2007-07-24 09:28:11 +0200612void elv_requeue_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 /*
615 * it already went through dequeue, we need to decrement the
616 * in_flight count again
617 */
Tejun Heo8922e162005-10-20 16:23:44 +0200618 if (blk_account_rq(rq)) {
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200619 q->in_flight[rq_is_sync(rq)]--;
Christoph Hellwige8064022016-10-20 15:12:13 +0200620 if (rq->rq_flags & RQF_SORTED)
Jens Axboecad97512007-01-14 22:26:09 +1100621 elv_deactivate_rq(q, rq);
Tejun Heo8922e162005-10-20 16:23:44 +0200622 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623
Christoph Hellwige8064022016-10-20 15:12:13 +0200624 rq->rq_flags &= ~RQF_STARTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Lin Mingc8158812013-03-23 11:42:27 +0800626 blk_pm_requeue_request(rq);
627
Jens Axboeb710a482011-03-30 09:52:30 +0200628 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629}
630
Jerome Marchand26308ea2009-03-27 10:31:51 +0100631void elv_drain_elevator(struct request_queue *q)
Tejun Heo15853af2005-11-10 08:52:05 +0100632{
Jens Axboebd166ef2017-01-17 06:03:22 -0700633 struct elevator_queue *e = q->elevator;
Tejun Heo15853af2005-11-10 08:52:05 +0100634 static int printed;
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200635
Jens Axboebd166ef2017-01-17 06:03:22 -0700636 if (WARN_ON_ONCE(e->uses_mq))
637 return;
638
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200639 lockdep_assert_held(q->queue_lock);
640
Jens Axboebd166ef2017-01-17 06:03:22 -0700641 while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
Tejun Heo15853af2005-11-10 08:52:05 +0100642 ;
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200643 if (q->nr_sorted && printed++ < 10) {
Tejun Heo15853af2005-11-10 08:52:05 +0100644 printk(KERN_ERR "%s: forced dispatching is broken "
645 "(nr_sorted=%u), please report this\n",
Tejun Heo22f746e2011-12-14 00:33:41 +0100646 q->elevator->type->elevator_name, q->nr_sorted);
Tejun Heo15853af2005-11-10 08:52:05 +0100647 }
648}
649
Jens Axboeb710a482011-03-30 09:52:30 +0200650void __elv_add_request(struct request_queue *q, struct request *rq, int where)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +0100652 trace_block_rq_insert(q, rq);
Jens Axboe2056a782006-03-23 20:00:26 +0100653
Lin Mingc8158812013-03-23 11:42:27 +0800654 blk_pm_add_request(q, rq);
655
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 rq->q = q;
657
Christoph Hellwige8064022016-10-20 15:12:13 +0200658 if (rq->rq_flags & RQF_SOFTBARRIER) {
Jens Axboeb710a482011-03-30 09:52:30 +0200659 /* barriers are scheduling boundary, update end_sector */
Christoph Hellwig57292b52017-01-31 16:57:29 +0100660 if (!blk_rq_is_passthrough(rq)) {
Jens Axboeb710a482011-03-30 09:52:30 +0200661 q->end_sector = rq_end_sector(rq);
662 q->boundary_rq = rq;
663 }
Christoph Hellwige8064022016-10-20 15:12:13 +0200664 } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
Jens Axboe3aa72872011-04-21 19:28:35 +0200665 (where == ELEVATOR_INSERT_SORT ||
666 where == ELEVATOR_INSERT_SORT_MERGE))
Jens Axboeb710a482011-03-30 09:52:30 +0200667 where = ELEVATOR_INSERT_BACK;
668
Tejun Heo8922e162005-10-20 16:23:44 +0200669 switch (where) {
Tejun Heo28e7d182010-09-03 11:56:16 +0200670 case ELEVATOR_INSERT_REQUEUE:
Tejun Heo8922e162005-10-20 16:23:44 +0200671 case ELEVATOR_INSERT_FRONT:
Christoph Hellwige8064022016-10-20 15:12:13 +0200672 rq->rq_flags |= RQF_SOFTBARRIER;
Tejun Heo8922e162005-10-20 16:23:44 +0200673 list_add(&rq->queuelist, &q->queue_head);
674 break;
675
676 case ELEVATOR_INSERT_BACK:
Christoph Hellwige8064022016-10-20 15:12:13 +0200677 rq->rq_flags |= RQF_SOFTBARRIER;
Tejun Heo15853af2005-11-10 08:52:05 +0100678 elv_drain_elevator(q);
Tejun Heo8922e162005-10-20 16:23:44 +0200679 list_add_tail(&rq->queuelist, &q->queue_head);
680 /*
681 * We kick the queue here for the following reasons.
682 * - The elevator might have returned NULL previously
683 * to delay requests and returned them now. As the
684 * queue wasn't empty before this request, ll_rw_blk
685 * won't run the queue on return, resulting in hang.
686 * - Usually, back inserted requests won't be merged
687 * with anything. There's no point in delaying queue
688 * processing.
689 */
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200690 __blk_run_queue(q);
Tejun Heo8922e162005-10-20 16:23:44 +0200691 break;
692
Jens Axboe5e84ea32011-03-21 10:14:27 +0100693 case ELEVATOR_INSERT_SORT_MERGE:
694 /*
695 * If we succeed in merging this request with one in the
696 * queue already, we are done - rq has now been freed,
697 * so no need to do anything further.
698 */
699 if (elv_attempt_insert_merge(q, rq))
700 break;
Bart Van Asschee29387e2017-06-21 09:40:11 -0700701 /* fall through */
Tejun Heo8922e162005-10-20 16:23:44 +0200702 case ELEVATOR_INSERT_SORT:
Christoph Hellwig57292b52017-01-31 16:57:29 +0100703 BUG_ON(blk_rq_is_passthrough(rq));
Christoph Hellwige8064022016-10-20 15:12:13 +0200704 rq->rq_flags |= RQF_SORTED;
Tejun Heo15853af2005-11-10 08:52:05 +0100705 q->nr_sorted++;
Jens Axboe98170642006-07-28 09:23:08 +0200706 if (rq_mergeable(rq)) {
707 elv_rqhash_add(q, rq);
708 if (!q->last_merge)
709 q->last_merge = rq;
710 }
711
Tejun Heoca235092005-11-01 17:23:49 +0900712 /*
713 * Some ioscheds (cfq) run q->request_fn directly, so
714 * rq cannot be accessed after calling
715 * elevator_add_req_fn.
716 */
Jens Axboec51ca6c2016-12-10 15:13:59 -0700717 q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
Tejun Heo8922e162005-10-20 16:23:44 +0200718 break;
719
Tejun Heoae1b1532011-01-25 12:43:54 +0100720 case ELEVATOR_INSERT_FLUSH:
Christoph Hellwige8064022016-10-20 15:12:13 +0200721 rq->rq_flags |= RQF_SOFTBARRIER;
Tejun Heoae1b1532011-01-25 12:43:54 +0100722 blk_insert_flush(rq);
723 break;
Tejun Heo8922e162005-10-20 16:23:44 +0200724 default:
725 printk(KERN_ERR "%s: bad insertion point %d\n",
Harvey Harrison24c03d42008-05-01 04:35:17 -0700726 __func__, where);
Tejun Heo8922e162005-10-20 16:23:44 +0200727 BUG();
728 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729}
Jens Axboe2e662b62006-07-13 11:55:04 +0200730EXPORT_SYMBOL(__elv_add_request);
731
Jens Axboe7eaceac2011-03-10 08:52:07 +0100732void elv_add_request(struct request_queue *q, struct request *rq, int where)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
734 unsigned long flags;
735
736 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100737 __elv_add_request(q, rq, where);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 spin_unlock_irqrestore(q->queue_lock, flags);
739}
Jens Axboe2e662b62006-07-13 11:55:04 +0200740EXPORT_SYMBOL(elv_add_request);
741
Jens Axboe165125e2007-07-24 09:28:11 +0200742struct request *elv_latter_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743{
Jens Axboeb374d182008-10-31 10:05:07 +0100744 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Jens Axboebd166ef2017-01-17 06:03:22 -0700746 if (e->uses_mq && e->type->ops.mq.next_request)
747 return e->type->ops.mq.next_request(q, rq);
748 else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700749 return e->type->ops.sq.elevator_latter_req_fn(q, rq);
Jens Axboebd166ef2017-01-17 06:03:22 -0700750
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 return NULL;
752}
753
Jens Axboe165125e2007-07-24 09:28:11 +0200754struct request *elv_former_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755{
Jens Axboeb374d182008-10-31 10:05:07 +0100756 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
Jens Axboebd166ef2017-01-17 06:03:22 -0700758 if (e->uses_mq && e->type->ops.mq.former_request)
759 return e->type->ops.mq.former_request(q, rq);
760 if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700761 return e->type->ops.sq.elevator_former_req_fn(q, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 return NULL;
763}
764
Tejun Heo852c7882012-03-05 13:15:27 -0800765int elv_set_request(struct request_queue *q, struct request *rq,
766 struct bio *bio, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767{
Jens Axboeb374d182008-10-31 10:05:07 +0100768 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Jens Axboebd166ef2017-01-17 06:03:22 -0700770 if (WARN_ON_ONCE(e->uses_mq))
771 return 0;
772
Jens Axboec51ca6c2016-12-10 15:13:59 -0700773 if (e->type->ops.sq.elevator_set_req_fn)
774 return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 return 0;
776}
777
Jens Axboe165125e2007-07-24 09:28:11 +0200778void elv_put_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779{
Jens Axboeb374d182008-10-31 10:05:07 +0100780 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
Jens Axboebd166ef2017-01-17 06:03:22 -0700782 if (WARN_ON_ONCE(e->uses_mq))
783 return;
784
Jens Axboec51ca6c2016-12-10 15:13:59 -0700785 if (e->type->ops.sq.elevator_put_req_fn)
786 e->type->ops.sq.elevator_put_req_fn(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787}
788
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600789int elv_may_queue(struct request_queue *q, unsigned int op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790{
Jens Axboeb374d182008-10-31 10:05:07 +0100791 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
Jens Axboebd166ef2017-01-17 06:03:22 -0700793 if (WARN_ON_ONCE(e->uses_mq))
794 return 0;
795
Jens Axboec51ca6c2016-12-10 15:13:59 -0700796 if (e->type->ops.sq.elevator_may_queue_fn)
797 return e->type->ops.sq.elevator_may_queue_fn(q, op);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
799 return ELV_MQUEUE_MAY;
800}
801
Jens Axboe165125e2007-07-24 09:28:11 +0200802void elv_completed_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803{
Jens Axboeb374d182008-10-31 10:05:07 +0100804 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
Jens Axboebd166ef2017-01-17 06:03:22 -0700806 if (WARN_ON_ONCE(e->uses_mq))
807 return;
808
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 /*
810 * request is released from the driver, io must be done
811 */
Tejun Heo8922e162005-10-20 16:23:44 +0200812 if (blk_account_rq(rq)) {
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200813 q->in_flight[rq_is_sync(rq)]--;
Christoph Hellwige8064022016-10-20 15:12:13 +0200814 if ((rq->rq_flags & RQF_SORTED) &&
Jens Axboec51ca6c2016-12-10 15:13:59 -0700815 e->type->ops.sq.elevator_completed_req_fn)
816 e->type->ops.sq.elevator_completed_req_fn(q, rq);
Tejun Heo1bc691d2006-01-12 15:39:26 +0100817 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818}
819
Al Viro3d1ab402006-03-18 18:35:43 -0500820#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
821
822static ssize_t
823elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
824{
Al Viro3d1ab402006-03-18 18:35:43 -0500825 struct elv_fs_entry *entry = to_elv(attr);
Jens Axboeb374d182008-10-31 10:05:07 +0100826 struct elevator_queue *e;
Al Viro3d1ab402006-03-18 18:35:43 -0500827 ssize_t error;
828
829 if (!entry->show)
830 return -EIO;
831
Jens Axboeb374d182008-10-31 10:05:07 +0100832 e = container_of(kobj, struct elevator_queue, kobj);
Al Viro3d1ab402006-03-18 18:35:43 -0500833 mutex_lock(&e->sysfs_lock);
Tejun Heo22f746e2011-12-14 00:33:41 +0100834 error = e->type ? entry->show(e, page) : -ENOENT;
Al Viro3d1ab402006-03-18 18:35:43 -0500835 mutex_unlock(&e->sysfs_lock);
836 return error;
837}
838
839static ssize_t
840elv_attr_store(struct kobject *kobj, struct attribute *attr,
841 const char *page, size_t length)
842{
Al Viro3d1ab402006-03-18 18:35:43 -0500843 struct elv_fs_entry *entry = to_elv(attr);
Jens Axboeb374d182008-10-31 10:05:07 +0100844 struct elevator_queue *e;
Al Viro3d1ab402006-03-18 18:35:43 -0500845 ssize_t error;
846
847 if (!entry->store)
848 return -EIO;
849
Jens Axboeb374d182008-10-31 10:05:07 +0100850 e = container_of(kobj, struct elevator_queue, kobj);
Al Viro3d1ab402006-03-18 18:35:43 -0500851 mutex_lock(&e->sysfs_lock);
Tejun Heo22f746e2011-12-14 00:33:41 +0100852 error = e->type ? entry->store(e, page, length) : -ENOENT;
Al Viro3d1ab402006-03-18 18:35:43 -0500853 mutex_unlock(&e->sysfs_lock);
854 return error;
855}
856
Emese Revfy52cf25d2010-01-19 02:58:23 +0100857static const struct sysfs_ops elv_sysfs_ops = {
Al Viro3d1ab402006-03-18 18:35:43 -0500858 .show = elv_attr_show,
859 .store = elv_attr_store,
860};
861
862static struct kobj_type elv_ktype = {
863 .sysfs_ops = &elv_sysfs_ops,
864 .release = elevator_release,
865};
866
Tejun Heo5a5bafd2012-03-05 13:14:56 -0800867int elv_register_queue(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868{
Tejun Heo5a5bafd2012-03-05 13:14:56 -0800869 struct elevator_queue *e = q->elevator;
Al Viro3d1ab402006-03-18 18:35:43 -0500870 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
Greg Kroah-Hartmanb2d6db52007-12-17 23:05:35 -0700872 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
Al Viro3d1ab402006-03-18 18:35:43 -0500873 if (!error) {
Tejun Heo22f746e2011-12-14 00:33:41 +0100874 struct elv_fs_entry *attr = e->type->elevator_attrs;
Al Viro3d1ab402006-03-18 18:35:43 -0500875 if (attr) {
Al Viroe572ec72006-03-18 22:27:18 -0500876 while (attr->attr.name) {
877 if (sysfs_create_file(&e->kobj, &attr->attr))
Al Viro3d1ab402006-03-18 18:35:43 -0500878 break;
Al Viroe572ec72006-03-18 22:27:18 -0500879 attr++;
Al Viro3d1ab402006-03-18 18:35:43 -0500880 }
881 }
882 kobject_uevent(&e->kobj, KOBJ_ADD);
Jens Axboe430c62f2010-10-07 09:35:16 +0200883 e->registered = 1;
Jens Axboebd166ef2017-01-17 06:03:22 -0700884 if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700885 e->type->ops.sq.elevator_registered_fn(q);
Al Viro3d1ab402006-03-18 18:35:43 -0500886 }
887 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888}
Tejun Heof8fc8772011-12-14 00:33:40 +0100889EXPORT_SYMBOL(elv_register_queue);
Jens Axboebc1c1162006-06-08 08:49:06 +0200890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891void elv_unregister_queue(struct request_queue *q)
892{
Tejun Heof8fc8772011-12-14 00:33:40 +0100893 if (q) {
894 struct elevator_queue *e = q->elevator;
895
896 kobject_uevent(&e->kobj, KOBJ_REMOVE);
897 kobject_del(&e->kobj);
898 e->registered = 0;
Jan Kara8330cdb2017-04-19 11:33:27 +0200899 /* Re-enable throttling in case elevator disabled it */
900 wbt_enable_default(q);
Tejun Heof8fc8772011-12-14 00:33:40 +0100901 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902}
Mike Snitzer01effb02010-05-11 08:57:42 +0200903EXPORT_SYMBOL(elv_unregister_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Jens Axboee567bf72014-06-22 16:32:48 -0600905int elv_register(struct elevator_type *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
Thibaut VARENE1ffb96c2007-03-15 12:59:19 +0100907 char *def = "";
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200908
Tejun Heo3d3c2372011-12-14 00:33:42 +0100909 /* create icq_cache if requested */
910 if (e->icq_size) {
911 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
912 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
913 return -EINVAL;
914
915 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
916 "%s_io_cq", e->elevator_name);
917 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
918 e->icq_align, 0, NULL);
919 if (!e->icq_cache)
920 return -ENOMEM;
921 }
922
923 /* register, don't allow duplicate names */
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200924 spin_lock(&elv_list_lock);
Jens Axboe2527d992017-10-25 12:33:42 -0600925 if (elevator_find(e->elevator_name, e->uses_mq)) {
Tejun Heo3d3c2372011-12-14 00:33:42 +0100926 spin_unlock(&elv_list_lock);
927 if (e->icq_cache)
928 kmem_cache_destroy(e->icq_cache);
929 return -EBUSY;
930 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 list_add_tail(&e->list, &elv_list);
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200932 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
Tejun Heo3d3c2372011-12-14 00:33:42 +0100934 /* print pretty message */
Jens Axboe8ac0d9a2017-10-25 12:35:02 -0600935 if (elevator_match(e, chosen_elevator) ||
Nate Diller5f003972006-01-24 10:07:58 +0100936 (!*chosen_elevator &&
Jens Axboe8ac0d9a2017-10-25 12:35:02 -0600937 elevator_match(e, CONFIG_DEFAULT_IOSCHED)))
Thibaut VARENE1ffb96c2007-03-15 12:59:19 +0100938 def = " (default)";
939
Jens Axboe4eb166d2008-02-01 00:37:27 +0100940 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
941 def);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100942 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943}
944EXPORT_SYMBOL_GPL(elv_register);
945
946void elv_unregister(struct elevator_type *e)
947{
Tejun Heo3d3c2372011-12-14 00:33:42 +0100948 /* unregister */
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200949 spin_lock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 list_del_init(&e->list);
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200951 spin_unlock(&elv_list_lock);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100952
953 /*
954 * Destroy icq_cache if it exists. icq's are RCU managed. Make
955 * sure all RCU operations are complete before proceeding.
956 */
957 if (e->icq_cache) {
958 rcu_barrier();
959 kmem_cache_destroy(e->icq_cache);
960 e->icq_cache = NULL;
961 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962}
963EXPORT_SYMBOL_GPL(elv_unregister);
964
Omar Sandoval54d53292017-04-07 08:52:27 -0600965static int elevator_switch_mq(struct request_queue *q,
966 struct elevator_type *new_e)
967{
968 int ret;
969
970 blk_mq_freeze_queue(q);
Omar Sandoval54d53292017-04-07 08:52:27 -0600971
972 if (q->elevator) {
973 if (q->elevator->registered)
974 elv_unregister_queue(q);
975 ioc_clear_queue(q);
976 elevator_exit(q, q->elevator);
977 }
978
979 ret = blk_mq_init_sched(q, new_e);
980 if (ret)
981 goto out;
982
983 if (new_e) {
984 ret = elv_register_queue(q);
985 if (ret) {
986 elevator_exit(q, q->elevator);
987 goto out;
988 }
989 }
990
991 if (new_e)
992 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
993 else
994 blk_add_trace_msg(q, "elv switch: none");
995
996out:
997 blk_mq_unfreeze_queue(q);
Omar Sandoval54d53292017-04-07 08:52:27 -0600998 return ret;
Omar Sandoval54d53292017-04-07 08:52:27 -0600999}
1000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001/*
1002 * switch to new_e io scheduler. be careful not to introduce deadlocks -
1003 * we don't free the old io scheduler, before we have allocated what we
1004 * need for the new one. this way we have a chance of going back to the old
Tejun Heocb98fc82005-10-28 08:29:39 +02001005 * one, if the new one fails init for some reason.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 */
Jens Axboe165125e2007-07-24 09:28:11 +02001007static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008{
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001009 struct elevator_queue *old = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -07001010 bool old_registered = false;
Tejun Heoe8989fa2012-03-05 13:15:20 -08001011 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
Omar Sandoval54d53292017-04-07 08:52:27 -06001013 if (q->mq_ops)
1014 return elevator_switch_mq(q, new_e);
Jens Axboebd166ef2017-01-17 06:03:22 -07001015
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001016 /*
1017 * Turn on BYPASS and drain all requests w/ elevator private data.
1018 * Block layer doesn't call into a quiesced elevator - all requests
1019 * are directly put on the dispatch list without elevator data
1020 * using INSERT_BACK. All requests have SOFTBARRIER set and no
1021 * merge happens either.
1022 */
Jens Axboebd166ef2017-01-17 06:03:22 -07001023 if (old) {
1024 old_registered = old->registered;
Tejun Heocb98fc82005-10-28 08:29:39 +02001025
Omar Sandoval54d53292017-04-07 08:52:27 -06001026 blk_queue_bypass_start(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001027
1028 /* unregister and clear all auxiliary data of the old elevator */
1029 if (old_registered)
1030 elv_unregister_queue(q);
1031
Jens Axboebd166ef2017-01-17 06:03:22 -07001032 ioc_clear_queue(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001033 }
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001034
1035 /* allocate, init and register new elevator */
Omar Sandoval54d53292017-04-07 08:52:27 -06001036 err = new_e->ops.sq.elevator_init_fn(q, new_e);
Omar Sandoval6917ff02017-04-05 12:01:30 -07001037 if (err)
1038 goto fail_init;
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001039
Omar Sandoval54d53292017-04-07 08:52:27 -06001040 err = elv_register_queue(q);
1041 if (err)
1042 goto fail_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001044 /* done, kill the old one and finish */
Jens Axboebd166ef2017-01-17 06:03:22 -07001045 if (old) {
Omar Sandoval54d53292017-04-07 08:52:27 -06001046 elevator_exit(q, old);
1047 blk_queue_bypass_end(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001048 }
Nick Piggin75ad23b2008-04-29 14:48:33 +02001049
Omar Sandoval54d53292017-04-07 08:52:27 -06001050 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
Alan D. Brunelle4722dc52008-05-27 14:55:00 +02001051
Jens Axboe5dd531a2010-08-23 13:52:19 +02001052 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
1054fail_register:
Omar Sandoval54d53292017-04-07 08:52:27 -06001055 elevator_exit(q, q->elevator);
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001056fail_init:
1057 /* switch failed, restore and re-register old elevator */
Jens Axboebd166ef2017-01-17 06:03:22 -07001058 if (old) {
1059 q->elevator = old;
1060 elv_register_queue(q);
Omar Sandoval54d53292017-04-07 08:52:27 -06001061 blk_queue_bypass_end(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001062 }
Nick Piggin75ad23b2008-04-29 14:48:33 +02001063
Jens Axboe5dd531a2010-08-23 13:52:19 +02001064 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065}
1066
Jens Axboe5dd531a2010-08-23 13:52:19 +02001067/*
1068 * Switch this queue to the given IO scheduler.
1069 */
Tomoki Sekiyama7c8a3672013-10-15 16:42:19 -06001070static int __elevator_change(struct request_queue *q, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071{
1072 char elevator_name[ELV_NAME_MAX];
1073 struct elevator_type *e;
1074
David Jefferye9a823f2017-08-28 10:52:44 -06001075 /* Make sure queue is not in the middle of being removed */
1076 if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
1077 return -ENOENT;
1078
Jens Axboebd166ef2017-01-17 06:03:22 -07001079 /*
1080 * Special case for mq, turn off scheduling
1081 */
1082 if (q->mq_ops && !strncmp(name, "none", 4))
1083 return elevator_switch(q, NULL);
Martin K. Petersencd43e262009-05-22 17:17:52 -04001084
Li Zefanee2e9922008-10-14 08:49:56 +02001085 strlcpy(elevator_name, name, sizeof(elevator_name));
Jens Axboe2527d992017-10-25 12:33:42 -06001086 e = elevator_get(q, strstrip(elevator_name), true);
Jens Axboe340ff322017-05-10 07:40:04 -06001087 if (!e)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
Jens Axboe8ac0d9a2017-10-25 12:35:02 -06001090 if (q->elevator && elevator_match(q->elevator->type, elevator_name)) {
Nate Diller2ca7d932005-10-30 15:02:24 -08001091 elevator_put(e);
Jens Axboe5dd531a2010-08-23 13:52:19 +02001092 return 0;
Nate Diller2ca7d932005-10-30 15:02:24 -08001093 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094
Jens Axboe5dd531a2010-08-23 13:52:19 +02001095 return elevator_switch(q, e);
1096}
Tomoki Sekiyama7c8a3672013-10-15 16:42:19 -06001097
Ming Lei3a5088c2017-04-15 20:38:22 +08001098static inline bool elv_support_iosched(struct request_queue *q)
1099{
1100 if (q->mq_ops && q->tag_set && (q->tag_set->flags &
1101 BLK_MQ_F_NO_SCHED))
1102 return false;
1103 return true;
1104}
1105
Jens Axboe5dd531a2010-08-23 13:52:19 +02001106ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1107 size_t count)
1108{
1109 int ret;
1110
Ming Lei3a5088c2017-04-15 20:38:22 +08001111 if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
Jens Axboe5dd531a2010-08-23 13:52:19 +02001112 return count;
1113
Tomoki Sekiyama7c8a3672013-10-15 16:42:19 -06001114 ret = __elevator_change(q, name);
Jens Axboe5dd531a2010-08-23 13:52:19 +02001115 if (!ret)
1116 return count;
1117
Jens Axboe5dd531a2010-08-23 13:52:19 +02001118 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119}
1120
Jens Axboe165125e2007-07-24 09:28:11 +02001121ssize_t elv_iosched_show(struct request_queue *q, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122{
Jens Axboeb374d182008-10-31 10:05:07 +01001123 struct elevator_queue *e = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -07001124 struct elevator_type *elv = NULL;
Matthias Kaehlcke70cee262007-07-10 12:26:24 +02001125 struct elevator_type *__e;
Jens Axboe8ac0d9a2017-10-25 12:35:02 -06001126 bool uses_mq = q->mq_ops != NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 int len = 0;
1128
Christoph Hellwig5fdee212017-10-05 21:22:52 +02001129 if (!queue_is_rq_based(q))
Martin K. Petersencd43e262009-05-22 17:17:52 -04001130 return sprintf(name, "none\n");
1131
Jens Axboebd166ef2017-01-17 06:03:22 -07001132 if (!q->elevator)
1133 len += sprintf(name+len, "[none] ");
1134 else
1135 elv = e->type;
Martin K. Petersencd43e262009-05-22 17:17:52 -04001136
Jens Axboe2a12dcd2007-04-26 14:41:53 +02001137 spin_lock(&elv_list_lock);
Matthias Kaehlcke70cee262007-07-10 12:26:24 +02001138 list_for_each_entry(__e, &elv_list, list) {
Jens Axboe8ac0d9a2017-10-25 12:35:02 -06001139 if (elv && elevator_match(elv, __e->elevator_name) &&
1140 (__e->uses_mq == uses_mq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 len += sprintf(name+len, "[%s] ", elv->elevator_name);
Jens Axboebd166ef2017-01-17 06:03:22 -07001142 continue;
1143 }
Ming Lei3a5088c2017-04-15 20:38:22 +08001144 if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
Jens Axboebd166ef2017-01-17 06:03:22 -07001145 len += sprintf(name+len, "%s ", __e->elevator_name);
1146 else if (!__e->uses_mq && !q->mq_ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 len += sprintf(name+len, "%s ", __e->elevator_name);
1148 }
Jens Axboe2a12dcd2007-04-26 14:41:53 +02001149 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Jens Axboebd166ef2017-01-17 06:03:22 -07001151 if (q->mq_ops && q->elevator)
1152 len += sprintf(name+len, "none");
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 len += sprintf(len+name, "\n");
1155 return len;
1156}
1157
Jens Axboe165125e2007-07-24 09:28:11 +02001158struct request *elv_rb_former_request(struct request_queue *q,
1159 struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +02001160{
1161 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1162
1163 if (rbprev)
1164 return rb_entry_rq(rbprev);
1165
1166 return NULL;
1167}
Jens Axboe2e662b62006-07-13 11:55:04 +02001168EXPORT_SYMBOL(elv_rb_former_request);
1169
Jens Axboe165125e2007-07-24 09:28:11 +02001170struct request *elv_rb_latter_request(struct request_queue *q,
1171 struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +02001172{
1173 struct rb_node *rbnext = rb_next(&rq->rb_node);
1174
1175 if (rbnext)
1176 return rb_entry_rq(rbnext);
1177
1178 return NULL;
1179}
Jens Axboe2e662b62006-07-13 11:55:04 +02001180EXPORT_SYMBOL(elv_rb_latter_request);