blob: fae58b2f906fc5e0352c3f3194780abe13369784 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
Jens Axboe0fe23472006-09-04 15:41:16 +02006 * 30042000 Jens Axboe <axboe@kernel.dk> :
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/fs.h>
27#include <linux/blkdev.h>
28#include <linux/elevator.h>
29#include <linux/bio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
Jens Axboe2056a782006-03-23 20:00:26 +010034#include <linux/blktrace_api.h>
Jens Axboe98170642006-07-28 09:23:08 +020035#include <linux/hash.h>
Jens Axboe0835da62008-08-26 09:15:47 +020036#include <linux/uaccess.h>
Lin Mingc8158812013-03-23 11:42:27 +080037#include <linux/pm_runtime.h>
Tejun Heoeea8f412015-05-22 17:13:17 -040038#include <linux/blk-cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Li Zefan55782132009-06-09 13:43:05 +080040#include <trace/events/block.h>
41
Jens Axboe242f9dc2008-09-14 05:55:09 -070042#include "blk.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070043#include "blk-mq-sched.h"
Jan Kara8330cdb2017-04-19 11:33:27 +020044#include "blk-wbt.h"
Jens Axboe242f9dc2008-09-14 05:55:09 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static DEFINE_SPINLOCK(elv_list_lock);
47static LIST_HEAD(elv_list);
48
49/*
Jens Axboe98170642006-07-28 09:23:08 +020050 * Merge hash stuff.
51 */
Tejun Heo83096eb2009-05-07 22:24:39 +090052#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
Jens Axboe98170642006-07-28 09:23:08 +020053
54/*
Jens Axboeda775262006-12-20 11:04:12 +010055 * Query io scheduler to see if the current process issuing bio may be
56 * merged with rq.
57 */
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070058static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
Jens Axboeda775262006-12-20 11:04:12 +010059{
Jens Axboe165125e2007-07-24 09:28:11 +020060 struct request_queue *q = rq->q;
Jens Axboeb374d182008-10-31 10:05:07 +010061 struct elevator_queue *e = q->elevator;
Jens Axboeda775262006-12-20 11:04:12 +010062
Jens Axboebd166ef2017-01-17 06:03:22 -070063 if (e->uses_mq && e->type->ops.mq.allow_merge)
64 return e->type->ops.mq.allow_merge(q, rq, bio);
65 else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -070066 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
Jens Axboeda775262006-12-20 11:04:12 +010067
68 return 1;
69}
70
71/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * can we safely merge with this request?
73 */
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070074bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075{
Tejun Heo050c8ea2012-02-08 09:19:38 +010076 if (!blk_rq_merge_ok(rq, bio))
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070077 return false;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +020078
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070079 if (!elv_iosched_allow_bio_merge(rq, bio))
80 return false;
Jens Axboeda775262006-12-20 11:04:12 +010081
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070082 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070084EXPORT_SYMBOL(elv_bio_merge_ok);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Jens Axboe8ac0d9a2017-10-25 12:35:02 -060086static bool elevator_match(const struct elevator_type *e, const char *name)
87{
88 if (!strcmp(e->elevator_name, name))
89 return true;
90 if (e->elevator_alias && !strcmp(e->elevator_alias, name))
91 return true;
92
93 return false;
94}
95
Jens Axboe2527d992017-10-25 12:33:42 -060096/*
97 * Return scheduler with name 'name' and with matching 'mq capability
98 */
99static struct elevator_type *elevator_find(const char *name, bool mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100{
Vasily Tarasova22b1692006-10-11 09:24:27 +0200101 struct elevator_type *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
Matthias Kaehlcke70cee262007-07-10 12:26:24 +0200103 list_for_each_entry(e, &elv_list, list) {
Jens Axboe8ac0d9a2017-10-25 12:35:02 -0600104 if (elevator_match(e, name) && (mq == e->uses_mq))
Vasily Tarasova22b1692006-10-11 09:24:27 +0200105 return e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Vasily Tarasova22b1692006-10-11 09:24:27 +0200108 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109}
110
111static void elevator_put(struct elevator_type *e)
112{
113 module_put(e->elevator_owner);
114}
115
Jens Axboe2527d992017-10-25 12:33:42 -0600116static struct elevator_type *elevator_get(struct request_queue *q,
117 const char *name, bool try_loading)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
Tejun Heo2824bc932005-10-20 10:56:41 +0200119 struct elevator_type *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200121 spin_lock(&elv_list_lock);
Tejun Heo2824bc932005-10-20 10:56:41 +0200122
Jens Axboe2527d992017-10-25 12:33:42 -0600123 e = elevator_find(name, q->mq_ops != NULL);
Tejun Heo21c3c5d2013-01-22 16:48:03 -0800124 if (!e && try_loading) {
Jens Axboee1640942008-02-19 10:20:37 +0100125 spin_unlock(&elv_list_lock);
Kees Cook490b94b2011-05-05 18:02:12 -0600126 request_module("%s-iosched", name);
Jens Axboee1640942008-02-19 10:20:37 +0100127 spin_lock(&elv_list_lock);
Jens Axboe2527d992017-10-25 12:33:42 -0600128 e = elevator_find(name, q->mq_ops != NULL);
Jens Axboee1640942008-02-19 10:20:37 +0100129 }
130
Tejun Heo2824bc932005-10-20 10:56:41 +0200131 if (e && !try_module_get(e->elevator_owner))
132 e = NULL;
133
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200134 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 return e;
136}
137
Wang Sheng-Hui484fc252011-09-08 12:32:14 +0200138static char chosen_elevator[ELV_NAME_MAX];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Nate Diller5f003972006-01-24 10:07:58 +0100140static int __init elevator_setup(char *str)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
Chuck Ebbert752a3b72006-01-16 09:47:37 +0100142 /*
143 * Be backwards-compatible with previous kernels, so users
144 * won't get the wrong elevator.
145 */
Jens Axboe492af632009-10-03 09:37:51 +0200146 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
OGAWA Hirofumi9b410462006-03-31 02:30:33 -0800147 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148}
149
150__setup("elevator=", elevator_setup);
151
Tejun Heobb813f42013-01-18 14:05:56 -0800152/* called during boot to load the elevator chosen by the elevator param */
153void __init load_default_elevator_module(void)
154{
155 struct elevator_type *e;
156
157 if (!chosen_elevator[0])
158 return;
159
Jens Axboe2527d992017-10-25 12:33:42 -0600160 /*
161 * Boot parameter is deprecated, we haven't supported that for MQ.
162 * Only look for non-mq schedulers from here.
163 */
Tejun Heobb813f42013-01-18 14:05:56 -0800164 spin_lock(&elv_list_lock);
Jens Axboe2527d992017-10-25 12:33:42 -0600165 e = elevator_find(chosen_elevator, false);
Tejun Heobb813f42013-01-18 14:05:56 -0800166 spin_unlock(&elv_list_lock);
167
168 if (!e)
169 request_module("%s-iosched", chosen_elevator);
170}
171
Al Viro3d1ab402006-03-18 18:35:43 -0500172static struct kobj_type elv_ktype;
173
Jianpeng Mad50235b2013-07-03 13:25:24 +0200174struct elevator_queue *elevator_alloc(struct request_queue *q,
Jens Axboe165125e2007-07-24 09:28:11 +0200175 struct elevator_type *e)
Al Viro3d1ab402006-03-18 18:35:43 -0500176{
Jens Axboeb374d182008-10-31 10:05:07 +0100177 struct elevator_queue *eq;
Jens Axboe98170642006-07-28 09:23:08 +0200178
Joe Perchesc1b511e2013-08-29 15:21:42 -0700179 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
Jens Axboe98170642006-07-28 09:23:08 +0200180 if (unlikely(!eq))
Chao Yu8406a4d2015-04-23 10:47:44 -0600181 return NULL;
Jens Axboe98170642006-07-28 09:23:08 +0200182
Tejun Heo22f746e2011-12-14 00:33:41 +0100183 eq->type = e;
Greg Kroah-Hartmanf9cb0742007-12-17 23:05:35 -0700184 kobject_init(&eq->kobj, &elv_ktype);
Jens Axboe98170642006-07-28 09:23:08 +0200185 mutex_init(&eq->sysfs_lock);
Sasha Levin242d98f2012-12-17 10:01:27 -0500186 hash_init(eq->hash);
Jens Axboebd166ef2017-01-17 06:03:22 -0700187 eq->uses_mq = e->uses_mq;
Jens Axboe98170642006-07-28 09:23:08 +0200188
Al Viro3d1ab402006-03-18 18:35:43 -0500189 return eq;
190}
Jianpeng Mad50235b2013-07-03 13:25:24 +0200191EXPORT_SYMBOL(elevator_alloc);
Al Viro3d1ab402006-03-18 18:35:43 -0500192
193static void elevator_release(struct kobject *kobj)
194{
Jens Axboeb374d182008-10-31 10:05:07 +0100195 struct elevator_queue *e;
Jens Axboe98170642006-07-28 09:23:08 +0200196
Jens Axboeb374d182008-10-31 10:05:07 +0100197 e = container_of(kobj, struct elevator_queue, kobj);
Tejun Heo22f746e2011-12-14 00:33:41 +0100198 elevator_put(e->type);
Al Viro3d1ab402006-03-18 18:35:43 -0500199 kfree(e);
200}
201
Christoph Hellwig131d08e2018-05-31 19:11:40 +0200202/*
203 * Use the default elevator specified by config boot param for non-mq devices,
204 * or by config option. Don't try to load modules as we could be running off
205 * async and request_module() isn't allowed from async.
206 */
Christoph Hellwigddb72532018-05-31 19:11:38 +0200207int elevator_init(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208{
209 struct elevator_type *e = NULL;
Christoph Hellwigacddf3b2018-05-31 19:11:39 +0200210 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Tomoki Sekiyamaeb1c1602013-10-15 16:42:16 -0600212 /*
213 * q->sysfs_lock must be held to provide mutual exclusion between
214 * elevator_switch() and here.
215 */
Christoph Hellwigacddf3b2018-05-31 19:11:39 +0200216 mutex_lock(&q->sysfs_lock);
Mike Snitzer1abec4f2010-05-25 13:15:15 -0400217 if (unlikely(q->elevator))
Christoph Hellwigacddf3b2018-05-31 19:11:39 +0200218 goto out_unlock;
Mike Snitzer1abec4f2010-05-25 13:15:15 -0400219
Christoph Hellwig131d08e2018-05-31 19:11:40 +0200220 if (*chosen_elevator) {
Jens Axboe2527d992017-10-25 12:33:42 -0600221 e = elevator_get(q, chosen_elevator, false);
Jens Axboe4eb166d2008-02-01 00:37:27 +0100222 if (!e)
223 printk(KERN_ERR "I/O scheduler %s not found\n",
224 chosen_elevator);
225 }
Nate Diller248d5ca2006-01-24 10:09:14 +0100226
Christoph Hellwig131d08e2018-05-31 19:11:40 +0200227 if (!e)
228 e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
Jens Axboe4eb166d2008-02-01 00:37:27 +0100229 if (!e) {
Christoph Hellwig131d08e2018-05-31 19:11:40 +0200230 printk(KERN_ERR
231 "Default I/O scheduler not found. Using noop.\n");
232 e = elevator_get(q, "noop", false);
Nate Diller5f003972006-01-24 10:07:58 +0100233 }
234
Christoph Hellwig131d08e2018-05-31 19:11:40 +0200235 err = e->ops.sq.elevator_init_fn(q, e);
Omar Sandoval6917ff02017-04-05 12:01:30 -0700236 if (err)
Sudip Mukherjeed32f6b52014-10-23 22:16:48 +0530237 elevator_put(e);
Christoph Hellwigacddf3b2018-05-31 19:11:39 +0200238out_unlock:
239 mutex_unlock(&q->sysfs_lock);
Sudip Mukherjeed32f6b52014-10-23 22:16:48 +0530240 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241}
Jens Axboe2e662b62006-07-13 11:55:04 +0200242
Omar Sandoval54d53292017-04-07 08:52:27 -0600243void elevator_exit(struct request_queue *q, struct elevator_queue *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
Al Viro3d1ab402006-03-18 18:35:43 -0500245 mutex_lock(&e->sysfs_lock);
Jens Axboebd166ef2017-01-17 06:03:22 -0700246 if (e->uses_mq && e->type->ops.mq.exit_sched)
Omar Sandoval54d53292017-04-07 08:52:27 -0600247 blk_mq_exit_sched(q, e);
Jens Axboebd166ef2017-01-17 06:03:22 -0700248 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700249 e->type->ops.sq.elevator_exit_fn(e);
Al Viro3d1ab402006-03-18 18:35:43 -0500250 mutex_unlock(&e->sysfs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Al Viro3d1ab402006-03-18 18:35:43 -0500252 kobject_put(&e->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253}
Jens Axboe2e662b62006-07-13 11:55:04 +0200254
Jens Axboe98170642006-07-28 09:23:08 +0200255static inline void __elv_rqhash_del(struct request *rq)
256{
Sasha Levin242d98f2012-12-17 10:01:27 -0500257 hash_del(&rq->hash);
Christoph Hellwige8064022016-10-20 15:12:13 +0200258 rq->rq_flags &= ~RQF_HASHED;
Jens Axboe98170642006-07-28 09:23:08 +0200259}
260
Jens Axboe70b3ea02016-12-07 08:43:31 -0700261void elv_rqhash_del(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200262{
263 if (ELV_ON_HASH(rq))
264 __elv_rqhash_del(rq);
265}
Jens Axboebd166ef2017-01-17 06:03:22 -0700266EXPORT_SYMBOL_GPL(elv_rqhash_del);
Jens Axboe98170642006-07-28 09:23:08 +0200267
Jens Axboe70b3ea02016-12-07 08:43:31 -0700268void elv_rqhash_add(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200269{
Jens Axboeb374d182008-10-31 10:05:07 +0100270 struct elevator_queue *e = q->elevator;
Jens Axboe98170642006-07-28 09:23:08 +0200271
272 BUG_ON(ELV_ON_HASH(rq));
Sasha Levin242d98f2012-12-17 10:01:27 -0500273 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
Christoph Hellwige8064022016-10-20 15:12:13 +0200274 rq->rq_flags |= RQF_HASHED;
Jens Axboe98170642006-07-28 09:23:08 +0200275}
Jens Axboebd166ef2017-01-17 06:03:22 -0700276EXPORT_SYMBOL_GPL(elv_rqhash_add);
Jens Axboe98170642006-07-28 09:23:08 +0200277
Jens Axboe70b3ea02016-12-07 08:43:31 -0700278void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200279{
280 __elv_rqhash_del(rq);
281 elv_rqhash_add(q, rq);
282}
283
Jens Axboe70b3ea02016-12-07 08:43:31 -0700284struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
Jens Axboe98170642006-07-28 09:23:08 +0200285{
Jens Axboeb374d182008-10-31 10:05:07 +0100286 struct elevator_queue *e = q->elevator;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800287 struct hlist_node *next;
Jens Axboe98170642006-07-28 09:23:08 +0200288 struct request *rq;
289
Linus Torvaldsee89f812013-02-28 12:52:24 -0800290 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
Jens Axboe98170642006-07-28 09:23:08 +0200291 BUG_ON(!ELV_ON_HASH(rq));
292
293 if (unlikely(!rq_mergeable(rq))) {
294 __elv_rqhash_del(rq);
295 continue;
296 }
297
298 if (rq_hash_key(rq) == offset)
299 return rq;
300 }
301
302 return NULL;
303}
304
Tejun Heo8922e162005-10-20 16:23:44 +0200305/*
Jens Axboe2e662b62006-07-13 11:55:04 +0200306 * RB-tree support functions for inserting/lookup/removal of requests
307 * in a sorted RB tree.
308 */
Jeff Moyer796d5112011-06-02 21:19:05 +0200309void elv_rb_add(struct rb_root *root, struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +0200310{
311 struct rb_node **p = &root->rb_node;
312 struct rb_node *parent = NULL;
313 struct request *__rq;
314
315 while (*p) {
316 parent = *p;
317 __rq = rb_entry(parent, struct request, rb_node);
318
Tejun Heo83096eb2009-05-07 22:24:39 +0900319 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200320 p = &(*p)->rb_left;
Jeff Moyer796d5112011-06-02 21:19:05 +0200321 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200322 p = &(*p)->rb_right;
Jens Axboe2e662b62006-07-13 11:55:04 +0200323 }
324
325 rb_link_node(&rq->rb_node, parent, p);
326 rb_insert_color(&rq->rb_node, root);
Jens Axboe2e662b62006-07-13 11:55:04 +0200327}
Jens Axboe2e662b62006-07-13 11:55:04 +0200328EXPORT_SYMBOL(elv_rb_add);
329
330void elv_rb_del(struct rb_root *root, struct request *rq)
331{
332 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
333 rb_erase(&rq->rb_node, root);
334 RB_CLEAR_NODE(&rq->rb_node);
335}
Jens Axboe2e662b62006-07-13 11:55:04 +0200336EXPORT_SYMBOL(elv_rb_del);
337
338struct request *elv_rb_find(struct rb_root *root, sector_t sector)
339{
340 struct rb_node *n = root->rb_node;
341 struct request *rq;
342
343 while (n) {
344 rq = rb_entry(n, struct request, rb_node);
345
Tejun Heo83096eb2009-05-07 22:24:39 +0900346 if (sector < blk_rq_pos(rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200347 n = n->rb_left;
Tejun Heo83096eb2009-05-07 22:24:39 +0900348 else if (sector > blk_rq_pos(rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200349 n = n->rb_right;
350 else
351 return rq;
352 }
353
354 return NULL;
355}
Jens Axboe2e662b62006-07-13 11:55:04 +0200356EXPORT_SYMBOL(elv_rb_find);
357
358/*
Tejun Heo8922e162005-10-20 16:23:44 +0200359 * Insert rq into dispatch queue of q. Queue lock must be held on
Uwe Kleine-Königdbe7f762007-10-20 01:55:04 +0200360 * entry. rq is sort instead into the dispatch queue. To be used by
Jens Axboe2e662b62006-07-13 11:55:04 +0200361 * specific elevators.
Tejun Heo8922e162005-10-20 16:23:44 +0200362 */
Jens Axboe165125e2007-07-24 09:28:11 +0200363void elv_dispatch_sort(struct request_queue *q, struct request *rq)
Tejun Heo8922e162005-10-20 16:23:44 +0200364{
365 sector_t boundary;
Tejun Heo8922e162005-10-20 16:23:44 +0200366 struct list_head *entry;
367
Tejun Heo06b86242005-10-20 16:46:23 +0200368 if (q->last_merge == rq)
369 q->last_merge = NULL;
Jens Axboe98170642006-07-28 09:23:08 +0200370
371 elv_rqhash_del(q, rq);
372
Tejun Heo15853af2005-11-10 08:52:05 +0100373 q->nr_sorted--;
Tejun Heo06b86242005-10-20 16:46:23 +0200374
Jens Axboe1b47f532005-10-20 16:37:00 +0200375 boundary = q->end_sector;
Tejun Heo8922e162005-10-20 16:23:44 +0200376 list_for_each_prev(entry, &q->queue_head) {
377 struct request *pos = list_entry_rq(entry);
378
Adrian Hunter7afafc82016-08-16 10:59:35 +0300379 if (req_op(rq) != req_op(pos))
David Woodhousee17fc0a2008-08-09 16:42:20 +0100380 break;
Jens Axboe783660b2007-01-19 11:27:47 +1100381 if (rq_data_dir(rq) != rq_data_dir(pos))
382 break;
Christoph Hellwige8064022016-10-20 15:12:13 +0200383 if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
Tejun Heo8922e162005-10-20 16:23:44 +0200384 break;
Tejun Heo83096eb2009-05-07 22:24:39 +0900385 if (blk_rq_pos(rq) >= boundary) {
386 if (blk_rq_pos(pos) < boundary)
Tejun Heo8922e162005-10-20 16:23:44 +0200387 continue;
388 } else {
Tejun Heo83096eb2009-05-07 22:24:39 +0900389 if (blk_rq_pos(pos) >= boundary)
Tejun Heo8922e162005-10-20 16:23:44 +0200390 break;
391 }
Tejun Heo83096eb2009-05-07 22:24:39 +0900392 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
Tejun Heo8922e162005-10-20 16:23:44 +0200393 break;
394 }
395
396 list_add(&rq->queuelist, entry);
397}
Jens Axboe2e662b62006-07-13 11:55:04 +0200398EXPORT_SYMBOL(elv_dispatch_sort);
399
Jens Axboe98170642006-07-28 09:23:08 +0200400/*
Jens Axboe2e662b62006-07-13 11:55:04 +0200401 * Insert rq into dispatch queue of q. Queue lock must be held on
402 * entry. rq is added to the back of the dispatch queue. To be used by
403 * specific elevators.
Jens Axboe98170642006-07-28 09:23:08 +0200404 */
405void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
406{
407 if (q->last_merge == rq)
408 q->last_merge = NULL;
409
410 elv_rqhash_del(q, rq);
411
412 q->nr_sorted--;
413
414 q->end_sector = rq_end_sector(rq);
415 q->boundary_rq = rq;
416 list_add_tail(&rq->queuelist, &q->queue_head);
417}
Jens Axboe2e662b62006-07-13 11:55:04 +0200418EXPORT_SYMBOL(elv_dispatch_add_tail);
419
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100420enum elv_merge elv_merge(struct request_queue *q, struct request **req,
421 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422{
Jens Axboeb374d182008-10-31 10:05:07 +0100423 struct elevator_queue *e = q->elevator;
Jens Axboe98170642006-07-28 09:23:08 +0200424 struct request *__rq;
Tejun Heo06b86242005-10-20 16:46:23 +0200425
Jens Axboe98170642006-07-28 09:23:08 +0200426 /*
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100427 * Levels of merges:
428 * nomerges: No merges at all attempted
429 * noxmerges: Only simple one-hit cache try
430 * merges: All merge tries attempted
431 */
Ming Lei7460d382015-10-20 23:13:55 +0800432 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100433 return ELEVATOR_NO_MERGE;
434
435 /*
Jens Axboe98170642006-07-28 09:23:08 +0200436 * First try one-hit cache.
437 */
Tahsin Erdogan72ef7992016-07-07 11:48:22 -0700438 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100439 enum elv_merge ret = blk_try_merge(q->last_merge, bio);
440
Tejun Heo06b86242005-10-20 16:46:23 +0200441 if (ret != ELEVATOR_NO_MERGE) {
442 *req = q->last_merge;
443 return ret;
444 }
445 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100447 if (blk_queue_noxmerges(q))
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200448 return ELEVATOR_NO_MERGE;
449
Jens Axboe98170642006-07-28 09:23:08 +0200450 /*
451 * See if our hash lookup can find a potential backmerge.
452 */
Kent Overstreet4f024f32013-10-11 15:44:27 -0700453 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
Tahsin Erdogan72ef7992016-07-07 11:48:22 -0700454 if (__rq && elv_bio_merge_ok(__rq, bio)) {
Jens Axboe98170642006-07-28 09:23:08 +0200455 *req = __rq;
456 return ELEVATOR_BACK_MERGE;
457 }
458
Jens Axboebd166ef2017-01-17 06:03:22 -0700459 if (e->uses_mq && e->type->ops.mq.request_merge)
460 return e->type->ops.mq.request_merge(q, req, bio);
461 else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700462 return e->type->ops.sq.elevator_merge_fn(q, req, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464 return ELEVATOR_NO_MERGE;
465}
466
Jens Axboe5e84ea32011-03-21 10:14:27 +0100467/*
468 * Attempt to do an insertion back merge. Only check for the case where
469 * we can append 'rq' to an existing request, so we can throw 'rq' away
470 * afterwards.
471 *
472 * Returns true if we merged, false otherwise
473 */
Jens Axboebd166ef2017-01-17 06:03:22 -0700474bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
Jens Axboe5e84ea32011-03-21 10:14:27 +0100475{
476 struct request *__rq;
Shaohua Libee03932012-11-09 08:44:27 +0100477 bool ret;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100478
479 if (blk_queue_nomerges(q))
480 return false;
481
482 /*
483 * First try one-hit cache.
484 */
485 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
486 return true;
487
488 if (blk_queue_noxmerges(q))
489 return false;
490
Shaohua Libee03932012-11-09 08:44:27 +0100491 ret = false;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100492 /*
493 * See if our hash lookup can find a potential backmerge.
494 */
Shaohua Libee03932012-11-09 08:44:27 +0100495 while (1) {
496 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
497 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
498 break;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100499
Shaohua Libee03932012-11-09 08:44:27 +0100500 /* The merged request could be merged with others, try again */
501 ret = true;
502 rq = __rq;
503 }
504
505 return ret;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100506}
507
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100508void elv_merged_request(struct request_queue *q, struct request *rq,
509 enum elv_merge type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510{
Jens Axboeb374d182008-10-31 10:05:07 +0100511 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
Jens Axboebd166ef2017-01-17 06:03:22 -0700513 if (e->uses_mq && e->type->ops.mq.request_merged)
514 e->type->ops.mq.request_merged(q, rq, type);
515 else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700516 e->type->ops.sq.elevator_merged_fn(q, rq, type);
Tejun Heo06b86242005-10-20 16:46:23 +0200517
Jens Axboe2e662b62006-07-13 11:55:04 +0200518 if (type == ELEVATOR_BACK_MERGE)
519 elv_rqhash_reposition(q, rq);
Jens Axboe98170642006-07-28 09:23:08 +0200520
Tejun Heo06b86242005-10-20 16:46:23 +0200521 q->last_merge = rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
523
Jens Axboe165125e2007-07-24 09:28:11 +0200524void elv_merge_requests(struct request_queue *q, struct request *rq,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 struct request *next)
526{
Jens Axboeb374d182008-10-31 10:05:07 +0100527 struct elevator_queue *e = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -0700528 bool next_sorted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Jens Axboebd166ef2017-01-17 06:03:22 -0700530 if (e->uses_mq && e->type->ops.mq.requests_merged)
531 e->type->ops.mq.requests_merged(q, rq, next);
532 else if (e->type->ops.sq.elevator_merge_req_fn) {
Bart Van Asschea1ae0f72017-02-01 12:22:23 -0700533 next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
Jens Axboebd166ef2017-01-17 06:03:22 -0700534 if (next_sorted)
535 e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
536 }
Tejun Heo06b86242005-10-20 16:46:23 +0200537
Jens Axboe98170642006-07-28 09:23:08 +0200538 elv_rqhash_reposition(q, rq);
Jens Axboe98170642006-07-28 09:23:08 +0200539
Jens Axboe5e84ea32011-03-21 10:14:27 +0100540 if (next_sorted) {
541 elv_rqhash_del(q, next);
542 q->nr_sorted--;
543 }
544
Tejun Heo06b86242005-10-20 16:46:23 +0200545 q->last_merge = rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546}
547
Divyesh Shah812d4022010-04-08 21:14:23 -0700548void elv_bio_merged(struct request_queue *q, struct request *rq,
549 struct bio *bio)
550{
551 struct elevator_queue *e = q->elevator;
552
Jens Axboebd166ef2017-01-17 06:03:22 -0700553 if (WARN_ON_ONCE(e->uses_mq))
554 return;
555
Jens Axboec51ca6c2016-12-10 15:13:59 -0700556 if (e->type->ops.sq.elevator_bio_merged_fn)
557 e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
Divyesh Shah812d4022010-04-08 21:14:23 -0700558}
559
Rafael J. Wysocki47fafbc2014-12-04 01:00:23 +0100560#ifdef CONFIG_PM
Lin Mingc8158812013-03-23 11:42:27 +0800561static void blk_pm_requeue_request(struct request *rq)
562{
Christoph Hellwige8064022016-10-20 15:12:13 +0200563 if (rq->q->dev && !(rq->rq_flags & RQF_PM))
Lin Mingc8158812013-03-23 11:42:27 +0800564 rq->q->nr_pending--;
565}
566
567static void blk_pm_add_request(struct request_queue *q, struct request *rq)
568{
Christoph Hellwige8064022016-10-20 15:12:13 +0200569 if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
Lin Mingc8158812013-03-23 11:42:27 +0800570 (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
571 pm_request_resume(q->dev);
572}
573#else
574static inline void blk_pm_requeue_request(struct request *rq) {}
575static inline void blk_pm_add_request(struct request_queue *q,
576 struct request *rq)
577{
578}
579#endif
580
Jens Axboe165125e2007-07-24 09:28:11 +0200581void elv_requeue_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 /*
584 * it already went through dequeue, we need to decrement the
585 * in_flight count again
586 */
Tejun Heo8922e162005-10-20 16:23:44 +0200587 if (blk_account_rq(rq)) {
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200588 q->in_flight[rq_is_sync(rq)]--;
Christoph Hellwige8064022016-10-20 15:12:13 +0200589 if (rq->rq_flags & RQF_SORTED)
Jens Axboecad97512007-01-14 22:26:09 +1100590 elv_deactivate_rq(q, rq);
Tejun Heo8922e162005-10-20 16:23:44 +0200591 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Christoph Hellwige8064022016-10-20 15:12:13 +0200593 rq->rq_flags &= ~RQF_STARTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Lin Mingc8158812013-03-23 11:42:27 +0800595 blk_pm_requeue_request(rq);
596
Jens Axboeb710a482011-03-30 09:52:30 +0200597 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598}
599
Jerome Marchand26308ea2009-03-27 10:31:51 +0100600void elv_drain_elevator(struct request_queue *q)
Tejun Heo15853af2005-11-10 08:52:05 +0100601{
Jens Axboebd166ef2017-01-17 06:03:22 -0700602 struct elevator_queue *e = q->elevator;
Tejun Heo15853af2005-11-10 08:52:05 +0100603 static int printed;
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200604
Jens Axboebd166ef2017-01-17 06:03:22 -0700605 if (WARN_ON_ONCE(e->uses_mq))
606 return;
607
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200608 lockdep_assert_held(q->queue_lock);
609
Jens Axboebd166ef2017-01-17 06:03:22 -0700610 while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
Tejun Heo15853af2005-11-10 08:52:05 +0100611 ;
Damien Le Moal854f31c2018-09-27 10:55:13 +0900612 if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
Tejun Heo15853af2005-11-10 08:52:05 +0100613 printk(KERN_ERR "%s: forced dispatching is broken "
614 "(nr_sorted=%u), please report this\n",
Tejun Heo22f746e2011-12-14 00:33:41 +0100615 q->elevator->type->elevator_name, q->nr_sorted);
Tejun Heo15853af2005-11-10 08:52:05 +0100616 }
617}
618
Jens Axboeb710a482011-03-30 09:52:30 +0200619void __elv_add_request(struct request_queue *q, struct request *rq, int where)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620{
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +0100621 trace_block_rq_insert(q, rq);
Jens Axboe2056a782006-03-23 20:00:26 +0100622
Lin Mingc8158812013-03-23 11:42:27 +0800623 blk_pm_add_request(q, rq);
624
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 rq->q = q;
626
Christoph Hellwige8064022016-10-20 15:12:13 +0200627 if (rq->rq_flags & RQF_SOFTBARRIER) {
Jens Axboeb710a482011-03-30 09:52:30 +0200628 /* barriers are scheduling boundary, update end_sector */
Christoph Hellwig57292b52017-01-31 16:57:29 +0100629 if (!blk_rq_is_passthrough(rq)) {
Jens Axboeb710a482011-03-30 09:52:30 +0200630 q->end_sector = rq_end_sector(rq);
631 q->boundary_rq = rq;
632 }
Christoph Hellwige8064022016-10-20 15:12:13 +0200633 } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
Jens Axboe3aa72872011-04-21 19:28:35 +0200634 (where == ELEVATOR_INSERT_SORT ||
635 where == ELEVATOR_INSERT_SORT_MERGE))
Jens Axboeb710a482011-03-30 09:52:30 +0200636 where = ELEVATOR_INSERT_BACK;
637
Tejun Heo8922e162005-10-20 16:23:44 +0200638 switch (where) {
Tejun Heo28e7d182010-09-03 11:56:16 +0200639 case ELEVATOR_INSERT_REQUEUE:
Tejun Heo8922e162005-10-20 16:23:44 +0200640 case ELEVATOR_INSERT_FRONT:
Christoph Hellwige8064022016-10-20 15:12:13 +0200641 rq->rq_flags |= RQF_SOFTBARRIER;
Tejun Heo8922e162005-10-20 16:23:44 +0200642 list_add(&rq->queuelist, &q->queue_head);
643 break;
644
645 case ELEVATOR_INSERT_BACK:
Christoph Hellwige8064022016-10-20 15:12:13 +0200646 rq->rq_flags |= RQF_SOFTBARRIER;
Tejun Heo15853af2005-11-10 08:52:05 +0100647 elv_drain_elevator(q);
Tejun Heo8922e162005-10-20 16:23:44 +0200648 list_add_tail(&rq->queuelist, &q->queue_head);
649 /*
650 * We kick the queue here for the following reasons.
651 * - The elevator might have returned NULL previously
652 * to delay requests and returned them now. As the
653 * queue wasn't empty before this request, ll_rw_blk
654 * won't run the queue on return, resulting in hang.
655 * - Usually, back inserted requests won't be merged
656 * with anything. There's no point in delaying queue
657 * processing.
658 */
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200659 __blk_run_queue(q);
Tejun Heo8922e162005-10-20 16:23:44 +0200660 break;
661
Jens Axboe5e84ea32011-03-21 10:14:27 +0100662 case ELEVATOR_INSERT_SORT_MERGE:
663 /*
664 * If we succeed in merging this request with one in the
665 * queue already, we are done - rq has now been freed,
666 * so no need to do anything further.
667 */
668 if (elv_attempt_insert_merge(q, rq))
669 break;
Bart Van Asschee29387e2017-06-21 09:40:11 -0700670 /* fall through */
Tejun Heo8922e162005-10-20 16:23:44 +0200671 case ELEVATOR_INSERT_SORT:
Christoph Hellwig57292b52017-01-31 16:57:29 +0100672 BUG_ON(blk_rq_is_passthrough(rq));
Christoph Hellwige8064022016-10-20 15:12:13 +0200673 rq->rq_flags |= RQF_SORTED;
Tejun Heo15853af2005-11-10 08:52:05 +0100674 q->nr_sorted++;
Jens Axboe98170642006-07-28 09:23:08 +0200675 if (rq_mergeable(rq)) {
676 elv_rqhash_add(q, rq);
677 if (!q->last_merge)
678 q->last_merge = rq;
679 }
680
Tejun Heoca235092005-11-01 17:23:49 +0900681 /*
682 * Some ioscheds (cfq) run q->request_fn directly, so
683 * rq cannot be accessed after calling
684 * elevator_add_req_fn.
685 */
Jens Axboec51ca6c2016-12-10 15:13:59 -0700686 q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
Tejun Heo8922e162005-10-20 16:23:44 +0200687 break;
688
Tejun Heoae1b1532011-01-25 12:43:54 +0100689 case ELEVATOR_INSERT_FLUSH:
Christoph Hellwige8064022016-10-20 15:12:13 +0200690 rq->rq_flags |= RQF_SOFTBARRIER;
Tejun Heoae1b1532011-01-25 12:43:54 +0100691 blk_insert_flush(rq);
692 break;
Tejun Heo8922e162005-10-20 16:23:44 +0200693 default:
694 printk(KERN_ERR "%s: bad insertion point %d\n",
Harvey Harrison24c03d42008-05-01 04:35:17 -0700695 __func__, where);
Tejun Heo8922e162005-10-20 16:23:44 +0200696 BUG();
697 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698}
Jens Axboe2e662b62006-07-13 11:55:04 +0200699EXPORT_SYMBOL(__elv_add_request);
700
Jens Axboe7eaceac2011-03-10 08:52:07 +0100701void elv_add_request(struct request_queue *q, struct request *rq, int where)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702{
703 unsigned long flags;
704
705 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100706 __elv_add_request(q, rq, where);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 spin_unlock_irqrestore(q->queue_lock, flags);
708}
Jens Axboe2e662b62006-07-13 11:55:04 +0200709EXPORT_SYMBOL(elv_add_request);
710
Jens Axboe165125e2007-07-24 09:28:11 +0200711struct request *elv_latter_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712{
Jens Axboeb374d182008-10-31 10:05:07 +0100713 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
Jens Axboebd166ef2017-01-17 06:03:22 -0700715 if (e->uses_mq && e->type->ops.mq.next_request)
716 return e->type->ops.mq.next_request(q, rq);
717 else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700718 return e->type->ops.sq.elevator_latter_req_fn(q, rq);
Jens Axboebd166ef2017-01-17 06:03:22 -0700719
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 return NULL;
721}
722
Jens Axboe165125e2007-07-24 09:28:11 +0200723struct request *elv_former_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724{
Jens Axboeb374d182008-10-31 10:05:07 +0100725 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
Jens Axboebd166ef2017-01-17 06:03:22 -0700727 if (e->uses_mq && e->type->ops.mq.former_request)
728 return e->type->ops.mq.former_request(q, rq);
729 if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700730 return e->type->ops.sq.elevator_former_req_fn(q, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 return NULL;
732}
733
Tejun Heo852c7882012-03-05 13:15:27 -0800734int elv_set_request(struct request_queue *q, struct request *rq,
735 struct bio *bio, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736{
Jens Axboeb374d182008-10-31 10:05:07 +0100737 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
Jens Axboebd166ef2017-01-17 06:03:22 -0700739 if (WARN_ON_ONCE(e->uses_mq))
740 return 0;
741
Jens Axboec51ca6c2016-12-10 15:13:59 -0700742 if (e->type->ops.sq.elevator_set_req_fn)
743 return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 return 0;
745}
746
Jens Axboe165125e2007-07-24 09:28:11 +0200747void elv_put_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748{
Jens Axboeb374d182008-10-31 10:05:07 +0100749 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
Jens Axboebd166ef2017-01-17 06:03:22 -0700751 if (WARN_ON_ONCE(e->uses_mq))
752 return;
753
Jens Axboec51ca6c2016-12-10 15:13:59 -0700754 if (e->type->ops.sq.elevator_put_req_fn)
755 e->type->ops.sq.elevator_put_req_fn(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756}
757
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600758int elv_may_queue(struct request_queue *q, unsigned int op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759{
Jens Axboeb374d182008-10-31 10:05:07 +0100760 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
Jens Axboebd166ef2017-01-17 06:03:22 -0700762 if (WARN_ON_ONCE(e->uses_mq))
763 return 0;
764
Jens Axboec51ca6c2016-12-10 15:13:59 -0700765 if (e->type->ops.sq.elevator_may_queue_fn)
766 return e->type->ops.sq.elevator_may_queue_fn(q, op);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
768 return ELV_MQUEUE_MAY;
769}
770
Jens Axboe165125e2007-07-24 09:28:11 +0200771void elv_completed_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772{
Jens Axboeb374d182008-10-31 10:05:07 +0100773 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
Jens Axboebd166ef2017-01-17 06:03:22 -0700775 if (WARN_ON_ONCE(e->uses_mq))
776 return;
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 /*
779 * request is released from the driver, io must be done
780 */
Tejun Heo8922e162005-10-20 16:23:44 +0200781 if (blk_account_rq(rq)) {
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200782 q->in_flight[rq_is_sync(rq)]--;
Christoph Hellwige8064022016-10-20 15:12:13 +0200783 if ((rq->rq_flags & RQF_SORTED) &&
Jens Axboec51ca6c2016-12-10 15:13:59 -0700784 e->type->ops.sq.elevator_completed_req_fn)
785 e->type->ops.sq.elevator_completed_req_fn(q, rq);
Tejun Heo1bc691d2006-01-12 15:39:26 +0100786 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787}
788
Al Viro3d1ab402006-03-18 18:35:43 -0500789#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
790
791static ssize_t
792elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
793{
Al Viro3d1ab402006-03-18 18:35:43 -0500794 struct elv_fs_entry *entry = to_elv(attr);
Jens Axboeb374d182008-10-31 10:05:07 +0100795 struct elevator_queue *e;
Al Viro3d1ab402006-03-18 18:35:43 -0500796 ssize_t error;
797
798 if (!entry->show)
799 return -EIO;
800
Jens Axboeb374d182008-10-31 10:05:07 +0100801 e = container_of(kobj, struct elevator_queue, kobj);
Al Viro3d1ab402006-03-18 18:35:43 -0500802 mutex_lock(&e->sysfs_lock);
Tejun Heo22f746e2011-12-14 00:33:41 +0100803 error = e->type ? entry->show(e, page) : -ENOENT;
Al Viro3d1ab402006-03-18 18:35:43 -0500804 mutex_unlock(&e->sysfs_lock);
805 return error;
806}
807
808static ssize_t
809elv_attr_store(struct kobject *kobj, struct attribute *attr,
810 const char *page, size_t length)
811{
Al Viro3d1ab402006-03-18 18:35:43 -0500812 struct elv_fs_entry *entry = to_elv(attr);
Jens Axboeb374d182008-10-31 10:05:07 +0100813 struct elevator_queue *e;
Al Viro3d1ab402006-03-18 18:35:43 -0500814 ssize_t error;
815
816 if (!entry->store)
817 return -EIO;
818
Jens Axboeb374d182008-10-31 10:05:07 +0100819 e = container_of(kobj, struct elevator_queue, kobj);
Al Viro3d1ab402006-03-18 18:35:43 -0500820 mutex_lock(&e->sysfs_lock);
Tejun Heo22f746e2011-12-14 00:33:41 +0100821 error = e->type ? entry->store(e, page, length) : -ENOENT;
Al Viro3d1ab402006-03-18 18:35:43 -0500822 mutex_unlock(&e->sysfs_lock);
823 return error;
824}
825
Emese Revfy52cf25d2010-01-19 02:58:23 +0100826static const struct sysfs_ops elv_sysfs_ops = {
Al Viro3d1ab402006-03-18 18:35:43 -0500827 .show = elv_attr_show,
828 .store = elv_attr_store,
829};
830
831static struct kobj_type elv_ktype = {
832 .sysfs_ops = &elv_sysfs_ops,
833 .release = elevator_release,
834};
835
Tejun Heo5a5bafd2012-03-05 13:14:56 -0800836int elv_register_queue(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837{
Tejun Heo5a5bafd2012-03-05 13:14:56 -0800838 struct elevator_queue *e = q->elevator;
Al Viro3d1ab402006-03-18 18:35:43 -0500839 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
Bart Van Assche14a23492018-01-17 11:48:09 -0800841 lockdep_assert_held(&q->sysfs_lock);
842
Greg Kroah-Hartmanb2d6db52007-12-17 23:05:35 -0700843 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
Al Viro3d1ab402006-03-18 18:35:43 -0500844 if (!error) {
Tejun Heo22f746e2011-12-14 00:33:41 +0100845 struct elv_fs_entry *attr = e->type->elevator_attrs;
Al Viro3d1ab402006-03-18 18:35:43 -0500846 if (attr) {
Al Viroe572ec72006-03-18 22:27:18 -0500847 while (attr->attr.name) {
848 if (sysfs_create_file(&e->kobj, &attr->attr))
Al Viro3d1ab402006-03-18 18:35:43 -0500849 break;
Al Viroe572ec72006-03-18 22:27:18 -0500850 attr++;
Al Viro3d1ab402006-03-18 18:35:43 -0500851 }
852 }
853 kobject_uevent(&e->kobj, KOBJ_ADD);
Jens Axboe430c62f2010-10-07 09:35:16 +0200854 e->registered = 1;
Jens Axboebd166ef2017-01-17 06:03:22 -0700855 if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700856 e->type->ops.sq.elevator_registered_fn(q);
Al Viro3d1ab402006-03-18 18:35:43 -0500857 }
858 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859}
Jens Axboebc1c1162006-06-08 08:49:06 +0200860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861void elv_unregister_queue(struct request_queue *q)
862{
Bart Van Assche14a23492018-01-17 11:48:09 -0800863 lockdep_assert_held(&q->sysfs_lock);
864
Tejun Heof8fc8772011-12-14 00:33:40 +0100865 if (q) {
866 struct elevator_queue *e = q->elevator;
867
868 kobject_uevent(&e->kobj, KOBJ_REMOVE);
869 kobject_del(&e->kobj);
870 e->registered = 0;
Jan Kara8330cdb2017-04-19 11:33:27 +0200871 /* Re-enable throttling in case elevator disabled it */
872 wbt_enable_default(q);
Tejun Heof8fc8772011-12-14 00:33:40 +0100873 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874}
875
Jens Axboee567bf72014-06-22 16:32:48 -0600876int elv_register(struct elevator_type *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877{
Thibaut VARENE1ffb96c2007-03-15 12:59:19 +0100878 char *def = "";
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200879
Tejun Heo3d3c2372011-12-14 00:33:42 +0100880 /* create icq_cache if requested */
881 if (e->icq_size) {
882 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
883 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
884 return -EINVAL;
885
886 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
887 "%s_io_cq", e->elevator_name);
888 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
889 e->icq_align, 0, NULL);
890 if (!e->icq_cache)
891 return -ENOMEM;
892 }
893
894 /* register, don't allow duplicate names */
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200895 spin_lock(&elv_list_lock);
Jens Axboe2527d992017-10-25 12:33:42 -0600896 if (elevator_find(e->elevator_name, e->uses_mq)) {
Tejun Heo3d3c2372011-12-14 00:33:42 +0100897 spin_unlock(&elv_list_lock);
Chengguang Xu62d2a192018-08-28 07:31:11 +0800898 kmem_cache_destroy(e->icq_cache);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100899 return -EBUSY;
900 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 list_add_tail(&e->list, &elv_list);
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200902 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
Tejun Heo3d3c2372011-12-14 00:33:42 +0100904 /* print pretty message */
Jens Axboe8ac0d9a2017-10-25 12:35:02 -0600905 if (elevator_match(e, chosen_elevator) ||
Nate Diller5f003972006-01-24 10:07:58 +0100906 (!*chosen_elevator &&
Jens Axboe8ac0d9a2017-10-25 12:35:02 -0600907 elevator_match(e, CONFIG_DEFAULT_IOSCHED)))
Thibaut VARENE1ffb96c2007-03-15 12:59:19 +0100908 def = " (default)";
909
Jens Axboe4eb166d2008-02-01 00:37:27 +0100910 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
911 def);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100912 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913}
914EXPORT_SYMBOL_GPL(elv_register);
915
916void elv_unregister(struct elevator_type *e)
917{
Tejun Heo3d3c2372011-12-14 00:33:42 +0100918 /* unregister */
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200919 spin_lock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 list_del_init(&e->list);
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200921 spin_unlock(&elv_list_lock);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100922
923 /*
924 * Destroy icq_cache if it exists. icq's are RCU managed. Make
925 * sure all RCU operations are complete before proceeding.
926 */
927 if (e->icq_cache) {
928 rcu_barrier();
929 kmem_cache_destroy(e->icq_cache);
930 e->icq_cache = NULL;
931 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932}
933EXPORT_SYMBOL_GPL(elv_unregister);
934
Jianchao Wangd48ece22018-08-21 15:15:03 +0800935int elevator_switch_mq(struct request_queue *q,
Omar Sandoval54d53292017-04-07 08:52:27 -0600936 struct elevator_type *new_e)
937{
938 int ret;
939
Bart Van Assche14a23492018-01-17 11:48:09 -0800940 lockdep_assert_held(&q->sysfs_lock);
941
Omar Sandoval54d53292017-04-07 08:52:27 -0600942 if (q->elevator) {
943 if (q->elevator->registered)
944 elv_unregister_queue(q);
945 ioc_clear_queue(q);
946 elevator_exit(q, q->elevator);
947 }
948
949 ret = blk_mq_init_sched(q, new_e);
950 if (ret)
951 goto out;
952
953 if (new_e) {
954 ret = elv_register_queue(q);
955 if (ret) {
956 elevator_exit(q, q->elevator);
957 goto out;
958 }
959 }
960
961 if (new_e)
962 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
963 else
964 blk_add_trace_msg(q, "elv switch: none");
965
966out:
Omar Sandoval54d53292017-04-07 08:52:27 -0600967 return ret;
Omar Sandoval54d53292017-04-07 08:52:27 -0600968}
969
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970/*
Christoph Hellwig131d08e2018-05-31 19:11:40 +0200971 * For blk-mq devices, we default to using mq-deadline, if available, for single
972 * queue devices. If deadline isn't available OR we have multiple queues,
973 * default to "none".
974 */
975int elevator_init_mq(struct request_queue *q)
976{
977 struct elevator_type *e;
978 int err = 0;
979
980 if (q->nr_hw_queues != 1)
981 return 0;
982
983 /*
984 * q->sysfs_lock must be held to provide mutual exclusion between
985 * elevator_switch() and here.
986 */
987 mutex_lock(&q->sysfs_lock);
988 if (unlikely(q->elevator))
989 goto out_unlock;
990
991 e = elevator_get(q, "mq-deadline", false);
992 if (!e)
993 goto out_unlock;
994
995 err = blk_mq_init_sched(q, e);
996 if (err)
997 elevator_put(e);
998out_unlock:
999 mutex_unlock(&q->sysfs_lock);
1000 return err;
1001}
1002
1003
1004/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 * switch to new_e io scheduler. be careful not to introduce deadlocks -
1006 * we don't free the old io scheduler, before we have allocated what we
1007 * need for the new one. this way we have a chance of going back to the old
Tejun Heocb98fc82005-10-28 08:29:39 +02001008 * one, if the new one fails init for some reason.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 */
Jens Axboe165125e2007-07-24 09:28:11 +02001010static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011{
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001012 struct elevator_queue *old = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -07001013 bool old_registered = false;
Tejun Heoe8989fa2012-03-05 13:15:20 -08001014 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
Bart Van Assche14a23492018-01-17 11:48:09 -08001016 lockdep_assert_held(&q->sysfs_lock);
1017
Jianchao Wangd48ece22018-08-21 15:15:03 +08001018 if (q->mq_ops) {
1019 blk_mq_freeze_queue(q);
1020 blk_mq_quiesce_queue(q);
1021
1022 err = elevator_switch_mq(q, new_e);
1023
1024 blk_mq_unquiesce_queue(q);
1025 blk_mq_unfreeze_queue(q);
1026
1027 return err;
1028 }
Jens Axboebd166ef2017-01-17 06:03:22 -07001029
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001030 /*
1031 * Turn on BYPASS and drain all requests w/ elevator private data.
1032 * Block layer doesn't call into a quiesced elevator - all requests
1033 * are directly put on the dispatch list without elevator data
1034 * using INSERT_BACK. All requests have SOFTBARRIER set and no
1035 * merge happens either.
1036 */
Jens Axboebd166ef2017-01-17 06:03:22 -07001037 if (old) {
1038 old_registered = old->registered;
Tejun Heocb98fc82005-10-28 08:29:39 +02001039
Omar Sandoval54d53292017-04-07 08:52:27 -06001040 blk_queue_bypass_start(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001041
1042 /* unregister and clear all auxiliary data of the old elevator */
1043 if (old_registered)
1044 elv_unregister_queue(q);
1045
Jens Axboebd166ef2017-01-17 06:03:22 -07001046 ioc_clear_queue(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001047 }
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001048
1049 /* allocate, init and register new elevator */
Omar Sandoval54d53292017-04-07 08:52:27 -06001050 err = new_e->ops.sq.elevator_init_fn(q, new_e);
Omar Sandoval6917ff02017-04-05 12:01:30 -07001051 if (err)
1052 goto fail_init;
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001053
Omar Sandoval54d53292017-04-07 08:52:27 -06001054 err = elv_register_queue(q);
1055 if (err)
1056 goto fail_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001058 /* done, kill the old one and finish */
Jens Axboebd166ef2017-01-17 06:03:22 -07001059 if (old) {
Omar Sandoval54d53292017-04-07 08:52:27 -06001060 elevator_exit(q, old);
1061 blk_queue_bypass_end(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001062 }
Nick Piggin75ad23b2008-04-29 14:48:33 +02001063
Omar Sandoval54d53292017-04-07 08:52:27 -06001064 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
Alan D. Brunelle4722dc52008-05-27 14:55:00 +02001065
Jens Axboe5dd531a2010-08-23 13:52:19 +02001066 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
1068fail_register:
Omar Sandoval54d53292017-04-07 08:52:27 -06001069 elevator_exit(q, q->elevator);
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001070fail_init:
1071 /* switch failed, restore and re-register old elevator */
Jens Axboebd166ef2017-01-17 06:03:22 -07001072 if (old) {
1073 q->elevator = old;
1074 elv_register_queue(q);
Omar Sandoval54d53292017-04-07 08:52:27 -06001075 blk_queue_bypass_end(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001076 }
Nick Piggin75ad23b2008-04-29 14:48:33 +02001077
Jens Axboe5dd531a2010-08-23 13:52:19 +02001078 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079}
1080
Jens Axboe5dd531a2010-08-23 13:52:19 +02001081/*
1082 * Switch this queue to the given IO scheduler.
1083 */
Tomoki Sekiyama7c8a3672013-10-15 16:42:19 -06001084static int __elevator_change(struct request_queue *q, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085{
1086 char elevator_name[ELV_NAME_MAX];
1087 struct elevator_type *e;
1088
David Jefferye9a823f2017-08-28 10:52:44 -06001089 /* Make sure queue is not in the middle of being removed */
1090 if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
1091 return -ENOENT;
1092
Jens Axboebd166ef2017-01-17 06:03:22 -07001093 /*
1094 * Special case for mq, turn off scheduling
1095 */
1096 if (q->mq_ops && !strncmp(name, "none", 4))
1097 return elevator_switch(q, NULL);
Martin K. Petersencd43e262009-05-22 17:17:52 -04001098
Li Zefanee2e9922008-10-14 08:49:56 +02001099 strlcpy(elevator_name, name, sizeof(elevator_name));
Jens Axboe2527d992017-10-25 12:33:42 -06001100 e = elevator_get(q, strstrip(elevator_name), true);
Jens Axboe340ff322017-05-10 07:40:04 -06001101 if (!e)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
Jens Axboe8ac0d9a2017-10-25 12:35:02 -06001104 if (q->elevator && elevator_match(q->elevator->type, elevator_name)) {
Nate Diller2ca7d932005-10-30 15:02:24 -08001105 elevator_put(e);
Jens Axboe5dd531a2010-08-23 13:52:19 +02001106 return 0;
Nate Diller2ca7d932005-10-30 15:02:24 -08001107 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
Jens Axboe5dd531a2010-08-23 13:52:19 +02001109 return elevator_switch(q, e);
1110}
Tomoki Sekiyama7c8a3672013-10-15 16:42:19 -06001111
Ming Lei3a5088c2017-04-15 20:38:22 +08001112static inline bool elv_support_iosched(struct request_queue *q)
1113{
1114 if (q->mq_ops && q->tag_set && (q->tag_set->flags &
1115 BLK_MQ_F_NO_SCHED))
1116 return false;
1117 return true;
1118}
1119
Jens Axboe5dd531a2010-08-23 13:52:19 +02001120ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1121 size_t count)
1122{
1123 int ret;
1124
Ming Lei3a5088c2017-04-15 20:38:22 +08001125 if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
Jens Axboe5dd531a2010-08-23 13:52:19 +02001126 return count;
1127
Tomoki Sekiyama7c8a3672013-10-15 16:42:19 -06001128 ret = __elevator_change(q, name);
Jens Axboe5dd531a2010-08-23 13:52:19 +02001129 if (!ret)
1130 return count;
1131
Jens Axboe5dd531a2010-08-23 13:52:19 +02001132 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133}
1134
Jens Axboe165125e2007-07-24 09:28:11 +02001135ssize_t elv_iosched_show(struct request_queue *q, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136{
Jens Axboeb374d182008-10-31 10:05:07 +01001137 struct elevator_queue *e = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -07001138 struct elevator_type *elv = NULL;
Matthias Kaehlcke70cee262007-07-10 12:26:24 +02001139 struct elevator_type *__e;
Jens Axboe8ac0d9a2017-10-25 12:35:02 -06001140 bool uses_mq = q->mq_ops != NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 int len = 0;
1142
Christoph Hellwig5fdee212017-10-05 21:22:52 +02001143 if (!queue_is_rq_based(q))
Martin K. Petersencd43e262009-05-22 17:17:52 -04001144 return sprintf(name, "none\n");
1145
Jens Axboebd166ef2017-01-17 06:03:22 -07001146 if (!q->elevator)
1147 len += sprintf(name+len, "[none] ");
1148 else
1149 elv = e->type;
Martin K. Petersencd43e262009-05-22 17:17:52 -04001150
Jens Axboe2a12dcd2007-04-26 14:41:53 +02001151 spin_lock(&elv_list_lock);
Matthias Kaehlcke70cee262007-07-10 12:26:24 +02001152 list_for_each_entry(__e, &elv_list, list) {
Jens Axboe8ac0d9a2017-10-25 12:35:02 -06001153 if (elv && elevator_match(elv, __e->elevator_name) &&
1154 (__e->uses_mq == uses_mq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 len += sprintf(name+len, "[%s] ", elv->elevator_name);
Jens Axboebd166ef2017-01-17 06:03:22 -07001156 continue;
1157 }
Ming Lei3a5088c2017-04-15 20:38:22 +08001158 if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
Jens Axboebd166ef2017-01-17 06:03:22 -07001159 len += sprintf(name+len, "%s ", __e->elevator_name);
1160 else if (!__e->uses_mq && !q->mq_ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 len += sprintf(name+len, "%s ", __e->elevator_name);
1162 }
Jens Axboe2a12dcd2007-04-26 14:41:53 +02001163 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
Jens Axboebd166ef2017-01-17 06:03:22 -07001165 if (q->mq_ops && q->elevator)
1166 len += sprintf(name+len, "none");
1167
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 len += sprintf(len+name, "\n");
1169 return len;
1170}
1171
Jens Axboe165125e2007-07-24 09:28:11 +02001172struct request *elv_rb_former_request(struct request_queue *q,
1173 struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +02001174{
1175 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1176
1177 if (rbprev)
1178 return rb_entry_rq(rbprev);
1179
1180 return NULL;
1181}
Jens Axboe2e662b62006-07-13 11:55:04 +02001182EXPORT_SYMBOL(elv_rb_former_request);
1183
Jens Axboe165125e2007-07-24 09:28:11 +02001184struct request *elv_rb_latter_request(struct request_queue *q,
1185 struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +02001186{
1187 struct rb_node *rbnext = rb_next(&rq->rb_node);
1188
1189 if (rbnext)
1190 return rb_entry_rq(rbnext);
1191
1192 return NULL;
1193}
Jens Axboe2e662b62006-07-13 11:55:04 +02001194EXPORT_SYMBOL(elv_rb_latter_request);