blob: 4bb2f0c93fa6c09df2e52f77c09832744d10d940 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
Jens Axboe0fe23472006-09-04 15:41:16 +02006 * 30042000 Jens Axboe <axboe@kernel.dk> :
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/fs.h>
27#include <linux/blkdev.h>
28#include <linux/elevator.h>
29#include <linux/bio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
Jens Axboe2056a782006-03-23 20:00:26 +010034#include <linux/blktrace_api.h>
Jens Axboe98170642006-07-28 09:23:08 +020035#include <linux/hash.h>
Jens Axboe0835da62008-08-26 09:15:47 +020036#include <linux/uaccess.h>
Lin Mingc8158812013-03-23 11:42:27 +080037#include <linux/pm_runtime.h>
Tejun Heoeea8f412015-05-22 17:13:17 -040038#include <linux/blk-cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Li Zefan55782132009-06-09 13:43:05 +080040#include <trace/events/block.h>
41
Jens Axboe242f9dc2008-09-14 05:55:09 -070042#include "blk.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070043#include "blk-mq-sched.h"
Jan Kara8330cdb2017-04-19 11:33:27 +020044#include "blk-wbt.h"
Jens Axboe242f9dc2008-09-14 05:55:09 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static DEFINE_SPINLOCK(elv_list_lock);
47static LIST_HEAD(elv_list);
48
49/*
Jens Axboe98170642006-07-28 09:23:08 +020050 * Merge hash stuff.
51 */
Tejun Heo83096eb2009-05-07 22:24:39 +090052#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
Jens Axboe98170642006-07-28 09:23:08 +020053
54/*
Jens Axboeda775262006-12-20 11:04:12 +010055 * Query io scheduler to see if the current process issuing bio may be
56 * merged with rq.
57 */
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070058static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
Jens Axboeda775262006-12-20 11:04:12 +010059{
Jens Axboe165125e2007-07-24 09:28:11 +020060 struct request_queue *q = rq->q;
Jens Axboeb374d182008-10-31 10:05:07 +010061 struct elevator_queue *e = q->elevator;
Jens Axboeda775262006-12-20 11:04:12 +010062
Jens Axboebd166ef2017-01-17 06:03:22 -070063 if (e->uses_mq && e->type->ops.mq.allow_merge)
64 return e->type->ops.mq.allow_merge(q, rq, bio);
65 else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -070066 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
Jens Axboeda775262006-12-20 11:04:12 +010067
68 return 1;
69}
70
71/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * can we safely merge with this request?
73 */
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070074bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075{
Tejun Heo050c8ea2012-02-08 09:19:38 +010076 if (!blk_rq_merge_ok(rq, bio))
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070077 return false;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +020078
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070079 if (!elv_iosched_allow_bio_merge(rq, bio))
80 return false;
Jens Axboeda775262006-12-20 11:04:12 +010081
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070082 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
Tahsin Erdogan72ef7992016-07-07 11:48:22 -070084EXPORT_SYMBOL(elv_bio_merge_ok);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static struct elevator_type *elevator_find(const char *name)
87{
Vasily Tarasova22b1692006-10-11 09:24:27 +020088 struct elevator_type *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Matthias Kaehlcke70cee262007-07-10 12:26:24 +020090 list_for_each_entry(e, &elv_list, list) {
Vasily Tarasova22b1692006-10-11 09:24:27 +020091 if (!strcmp(e->elevator_name, name))
92 return e;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Vasily Tarasova22b1692006-10-11 09:24:27 +020095 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096}
97
98static void elevator_put(struct elevator_type *e)
99{
100 module_put(e->elevator_owner);
101}
102
Tejun Heo21c3c5d2013-01-22 16:48:03 -0800103static struct elevator_type *elevator_get(const char *name, bool try_loading)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Tejun Heo2824bc932005-10-20 10:56:41 +0200105 struct elevator_type *e;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200107 spin_lock(&elv_list_lock);
Tejun Heo2824bc932005-10-20 10:56:41 +0200108
109 e = elevator_find(name);
Tejun Heo21c3c5d2013-01-22 16:48:03 -0800110 if (!e && try_loading) {
Jens Axboee1640942008-02-19 10:20:37 +0100111 spin_unlock(&elv_list_lock);
Kees Cook490b94b2011-05-05 18:02:12 -0600112 request_module("%s-iosched", name);
Jens Axboee1640942008-02-19 10:20:37 +0100113 spin_lock(&elv_list_lock);
114 e = elevator_find(name);
115 }
116
Tejun Heo2824bc932005-10-20 10:56:41 +0200117 if (e && !try_module_get(e->elevator_owner))
118 e = NULL;
119
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200120 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122 return e;
123}
124
Wang Sheng-Hui484fc252011-09-08 12:32:14 +0200125static char chosen_elevator[ELV_NAME_MAX];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Nate Diller5f003972006-01-24 10:07:58 +0100127static int __init elevator_setup(char *str)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
Chuck Ebbert752a3b72006-01-16 09:47:37 +0100129 /*
130 * Be backwards-compatible with previous kernels, so users
131 * won't get the wrong elevator.
132 */
Jens Axboe492af632009-10-03 09:37:51 +0200133 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
OGAWA Hirofumi9b410462006-03-31 02:30:33 -0800134 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
136
137__setup("elevator=", elevator_setup);
138
Tejun Heobb813f42013-01-18 14:05:56 -0800139/* called during boot to load the elevator chosen by the elevator param */
140void __init load_default_elevator_module(void)
141{
142 struct elevator_type *e;
143
144 if (!chosen_elevator[0])
145 return;
146
147 spin_lock(&elv_list_lock);
148 e = elevator_find(chosen_elevator);
149 spin_unlock(&elv_list_lock);
150
151 if (!e)
152 request_module("%s-iosched", chosen_elevator);
153}
154
Al Viro3d1ab402006-03-18 18:35:43 -0500155static struct kobj_type elv_ktype;
156
Jianpeng Mad50235b2013-07-03 13:25:24 +0200157struct elevator_queue *elevator_alloc(struct request_queue *q,
Jens Axboe165125e2007-07-24 09:28:11 +0200158 struct elevator_type *e)
Al Viro3d1ab402006-03-18 18:35:43 -0500159{
Jens Axboeb374d182008-10-31 10:05:07 +0100160 struct elevator_queue *eq;
Jens Axboe98170642006-07-28 09:23:08 +0200161
Joe Perchesc1b511e2013-08-29 15:21:42 -0700162 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
Jens Axboe98170642006-07-28 09:23:08 +0200163 if (unlikely(!eq))
Chao Yu8406a4d2015-04-23 10:47:44 -0600164 return NULL;
Jens Axboe98170642006-07-28 09:23:08 +0200165
Tejun Heo22f746e2011-12-14 00:33:41 +0100166 eq->type = e;
Greg Kroah-Hartmanf9cb0742007-12-17 23:05:35 -0700167 kobject_init(&eq->kobj, &elv_ktype);
Jens Axboe98170642006-07-28 09:23:08 +0200168 mutex_init(&eq->sysfs_lock);
Sasha Levin242d98f2012-12-17 10:01:27 -0500169 hash_init(eq->hash);
Jens Axboebd166ef2017-01-17 06:03:22 -0700170 eq->uses_mq = e->uses_mq;
Jens Axboe98170642006-07-28 09:23:08 +0200171
Al Viro3d1ab402006-03-18 18:35:43 -0500172 return eq;
173}
Jianpeng Mad50235b2013-07-03 13:25:24 +0200174EXPORT_SYMBOL(elevator_alloc);
Al Viro3d1ab402006-03-18 18:35:43 -0500175
176static void elevator_release(struct kobject *kobj)
177{
Jens Axboeb374d182008-10-31 10:05:07 +0100178 struct elevator_queue *e;
Jens Axboe98170642006-07-28 09:23:08 +0200179
Jens Axboeb374d182008-10-31 10:05:07 +0100180 e = container_of(kobj, struct elevator_queue, kobj);
Tejun Heo22f746e2011-12-14 00:33:41 +0100181 elevator_put(e->type);
Al Viro3d1ab402006-03-18 18:35:43 -0500182 kfree(e);
183}
184
Jens Axboe165125e2007-07-24 09:28:11 +0200185int elevator_init(struct request_queue *q, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
187 struct elevator_type *e = NULL;
Tejun Heof8fc8772011-12-14 00:33:40 +0100188 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Tomoki Sekiyamaeb1c1602013-10-15 16:42:16 -0600190 /*
191 * q->sysfs_lock must be held to provide mutual exclusion between
192 * elevator_switch() and here.
193 */
194 lockdep_assert_held(&q->sysfs_lock);
195
Mike Snitzer1abec4f2010-05-25 13:15:15 -0400196 if (unlikely(q->elevator))
197 return 0;
198
Tejun Heocb98fc82005-10-28 08:29:39 +0200199 INIT_LIST_HEAD(&q->queue_head);
200 q->last_merge = NULL;
201 q->end_sector = 0;
202 q->boundary_rq = NULL;
Tejun Heocb98fc82005-10-28 08:29:39 +0200203
Jens Axboe4eb166d2008-02-01 00:37:27 +0100204 if (name) {
Tejun Heo21c3c5d2013-01-22 16:48:03 -0800205 e = elevator_get(name, true);
Jens Axboe4eb166d2008-02-01 00:37:27 +0100206 if (!e)
207 return -EINVAL;
208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
Tejun Heo21c3c5d2013-01-22 16:48:03 -0800210 /*
Jens Axboed1a987f2017-02-14 08:16:41 -0700211 * Use the default elevator specified by config boot param for
212 * non-mq devices, or by config option. Don't try to load modules
213 * as we could be running off async and request_module() isn't
214 * allowed from async.
Tejun Heo21c3c5d2013-01-22 16:48:03 -0800215 */
Jens Axboed1a987f2017-02-14 08:16:41 -0700216 if (!e && !q->mq_ops && *chosen_elevator) {
Tejun Heo21c3c5d2013-01-22 16:48:03 -0800217 e = elevator_get(chosen_elevator, false);
Jens Axboe4eb166d2008-02-01 00:37:27 +0100218 if (!e)
219 printk(KERN_ERR "I/O scheduler %s not found\n",
220 chosen_elevator);
221 }
Nate Diller248d5ca2006-01-24 10:09:14 +0100222
Jens Axboe4eb166d2008-02-01 00:37:27 +0100223 if (!e) {
Jens Axboeb86dd812017-02-22 13:19:45 -0700224 /*
225 * For blk-mq devices, we default to using mq-deadline,
226 * if available, for single queue devices. If deadline
227 * isn't available OR we have multiple queues, default
228 * to "none".
229 */
230 if (q->mq_ops) {
231 if (q->nr_hw_queues == 1)
232 e = elevator_get("mq-deadline", false);
233 if (!e)
234 return 0;
235 } else
Jens Axboed3484992017-01-13 14:43:58 -0700236 e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
237
Jens Axboe4eb166d2008-02-01 00:37:27 +0100238 if (!e) {
239 printk(KERN_ERR
240 "Default I/O scheduler not found. " \
Jens Axboeb86dd812017-02-22 13:19:45 -0700241 "Using noop.\n");
Tejun Heo21c3c5d2013-01-22 16:48:03 -0800242 e = elevator_get("noop", false);
Jens Axboe4eb166d2008-02-01 00:37:27 +0100243 }
Nate Diller5f003972006-01-24 10:07:58 +0100244 }
245
Omar Sandoval6917ff02017-04-05 12:01:30 -0700246 if (e->uses_mq)
247 err = blk_mq_init_sched(q, e);
248 else
Jens Axboebd166ef2017-01-17 06:03:22 -0700249 err = e->ops.sq.elevator_init_fn(q, e);
Omar Sandoval6917ff02017-04-05 12:01:30 -0700250 if (err)
Sudip Mukherjeed32f6b52014-10-23 22:16:48 +0530251 elevator_put(e);
252 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253}
Jens Axboe2e662b62006-07-13 11:55:04 +0200254EXPORT_SYMBOL(elevator_init);
255
Omar Sandoval54d53292017-04-07 08:52:27 -0600256void elevator_exit(struct request_queue *q, struct elevator_queue *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257{
Al Viro3d1ab402006-03-18 18:35:43 -0500258 mutex_lock(&e->sysfs_lock);
Jens Axboebd166ef2017-01-17 06:03:22 -0700259 if (e->uses_mq && e->type->ops.mq.exit_sched)
Omar Sandoval54d53292017-04-07 08:52:27 -0600260 blk_mq_exit_sched(q, e);
Jens Axboebd166ef2017-01-17 06:03:22 -0700261 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700262 e->type->ops.sq.elevator_exit_fn(e);
Al Viro3d1ab402006-03-18 18:35:43 -0500263 mutex_unlock(&e->sysfs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Al Viro3d1ab402006-03-18 18:35:43 -0500265 kobject_put(&e->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266}
Jens Axboe2e662b62006-07-13 11:55:04 +0200267EXPORT_SYMBOL(elevator_exit);
268
Jens Axboe98170642006-07-28 09:23:08 +0200269static inline void __elv_rqhash_del(struct request *rq)
270{
Sasha Levin242d98f2012-12-17 10:01:27 -0500271 hash_del(&rq->hash);
Christoph Hellwige8064022016-10-20 15:12:13 +0200272 rq->rq_flags &= ~RQF_HASHED;
Jens Axboe98170642006-07-28 09:23:08 +0200273}
274
Jens Axboe70b3ea02016-12-07 08:43:31 -0700275void elv_rqhash_del(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200276{
277 if (ELV_ON_HASH(rq))
278 __elv_rqhash_del(rq);
279}
Jens Axboebd166ef2017-01-17 06:03:22 -0700280EXPORT_SYMBOL_GPL(elv_rqhash_del);
Jens Axboe98170642006-07-28 09:23:08 +0200281
Jens Axboe70b3ea02016-12-07 08:43:31 -0700282void elv_rqhash_add(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200283{
Jens Axboeb374d182008-10-31 10:05:07 +0100284 struct elevator_queue *e = q->elevator;
Jens Axboe98170642006-07-28 09:23:08 +0200285
286 BUG_ON(ELV_ON_HASH(rq));
Sasha Levin242d98f2012-12-17 10:01:27 -0500287 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
Christoph Hellwige8064022016-10-20 15:12:13 +0200288 rq->rq_flags |= RQF_HASHED;
Jens Axboe98170642006-07-28 09:23:08 +0200289}
Jens Axboebd166ef2017-01-17 06:03:22 -0700290EXPORT_SYMBOL_GPL(elv_rqhash_add);
Jens Axboe98170642006-07-28 09:23:08 +0200291
Jens Axboe70b3ea02016-12-07 08:43:31 -0700292void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
Jens Axboe98170642006-07-28 09:23:08 +0200293{
294 __elv_rqhash_del(rq);
295 elv_rqhash_add(q, rq);
296}
297
Jens Axboe70b3ea02016-12-07 08:43:31 -0700298struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
Jens Axboe98170642006-07-28 09:23:08 +0200299{
Jens Axboeb374d182008-10-31 10:05:07 +0100300 struct elevator_queue *e = q->elevator;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800301 struct hlist_node *next;
Jens Axboe98170642006-07-28 09:23:08 +0200302 struct request *rq;
303
Linus Torvaldsee89f812013-02-28 12:52:24 -0800304 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
Jens Axboe98170642006-07-28 09:23:08 +0200305 BUG_ON(!ELV_ON_HASH(rq));
306
307 if (unlikely(!rq_mergeable(rq))) {
308 __elv_rqhash_del(rq);
309 continue;
310 }
311
312 if (rq_hash_key(rq) == offset)
313 return rq;
314 }
315
316 return NULL;
317}
318
Tejun Heo8922e162005-10-20 16:23:44 +0200319/*
Jens Axboe2e662b62006-07-13 11:55:04 +0200320 * RB-tree support functions for inserting/lookup/removal of requests
321 * in a sorted RB tree.
322 */
Jeff Moyer796d5112011-06-02 21:19:05 +0200323void elv_rb_add(struct rb_root *root, struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +0200324{
325 struct rb_node **p = &root->rb_node;
326 struct rb_node *parent = NULL;
327 struct request *__rq;
328
329 while (*p) {
330 parent = *p;
331 __rq = rb_entry(parent, struct request, rb_node);
332
Tejun Heo83096eb2009-05-07 22:24:39 +0900333 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200334 p = &(*p)->rb_left;
Jeff Moyer796d5112011-06-02 21:19:05 +0200335 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200336 p = &(*p)->rb_right;
Jens Axboe2e662b62006-07-13 11:55:04 +0200337 }
338
339 rb_link_node(&rq->rb_node, parent, p);
340 rb_insert_color(&rq->rb_node, root);
Jens Axboe2e662b62006-07-13 11:55:04 +0200341}
Jens Axboe2e662b62006-07-13 11:55:04 +0200342EXPORT_SYMBOL(elv_rb_add);
343
344void elv_rb_del(struct rb_root *root, struct request *rq)
345{
346 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
347 rb_erase(&rq->rb_node, root);
348 RB_CLEAR_NODE(&rq->rb_node);
349}
Jens Axboe2e662b62006-07-13 11:55:04 +0200350EXPORT_SYMBOL(elv_rb_del);
351
352struct request *elv_rb_find(struct rb_root *root, sector_t sector)
353{
354 struct rb_node *n = root->rb_node;
355 struct request *rq;
356
357 while (n) {
358 rq = rb_entry(n, struct request, rb_node);
359
Tejun Heo83096eb2009-05-07 22:24:39 +0900360 if (sector < blk_rq_pos(rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200361 n = n->rb_left;
Tejun Heo83096eb2009-05-07 22:24:39 +0900362 else if (sector > blk_rq_pos(rq))
Jens Axboe2e662b62006-07-13 11:55:04 +0200363 n = n->rb_right;
364 else
365 return rq;
366 }
367
368 return NULL;
369}
Jens Axboe2e662b62006-07-13 11:55:04 +0200370EXPORT_SYMBOL(elv_rb_find);
371
372/*
Tejun Heo8922e162005-10-20 16:23:44 +0200373 * Insert rq into dispatch queue of q. Queue lock must be held on
Uwe Kleine-Königdbe7f762007-10-20 01:55:04 +0200374 * entry. rq is sort instead into the dispatch queue. To be used by
Jens Axboe2e662b62006-07-13 11:55:04 +0200375 * specific elevators.
Tejun Heo8922e162005-10-20 16:23:44 +0200376 */
Jens Axboe165125e2007-07-24 09:28:11 +0200377void elv_dispatch_sort(struct request_queue *q, struct request *rq)
Tejun Heo8922e162005-10-20 16:23:44 +0200378{
379 sector_t boundary;
Tejun Heo8922e162005-10-20 16:23:44 +0200380 struct list_head *entry;
381
Tejun Heo06b86242005-10-20 16:46:23 +0200382 if (q->last_merge == rq)
383 q->last_merge = NULL;
Jens Axboe98170642006-07-28 09:23:08 +0200384
385 elv_rqhash_del(q, rq);
386
Tejun Heo15853af2005-11-10 08:52:05 +0100387 q->nr_sorted--;
Tejun Heo06b86242005-10-20 16:46:23 +0200388
Jens Axboe1b47f532005-10-20 16:37:00 +0200389 boundary = q->end_sector;
Tejun Heo8922e162005-10-20 16:23:44 +0200390 list_for_each_prev(entry, &q->queue_head) {
391 struct request *pos = list_entry_rq(entry);
392
Adrian Hunter7afafc82016-08-16 10:59:35 +0300393 if (req_op(rq) != req_op(pos))
David Woodhousee17fc0a2008-08-09 16:42:20 +0100394 break;
Jens Axboe783660b2007-01-19 11:27:47 +1100395 if (rq_data_dir(rq) != rq_data_dir(pos))
396 break;
Christoph Hellwige8064022016-10-20 15:12:13 +0200397 if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
Tejun Heo8922e162005-10-20 16:23:44 +0200398 break;
Tejun Heo83096eb2009-05-07 22:24:39 +0900399 if (blk_rq_pos(rq) >= boundary) {
400 if (blk_rq_pos(pos) < boundary)
Tejun Heo8922e162005-10-20 16:23:44 +0200401 continue;
402 } else {
Tejun Heo83096eb2009-05-07 22:24:39 +0900403 if (blk_rq_pos(pos) >= boundary)
Tejun Heo8922e162005-10-20 16:23:44 +0200404 break;
405 }
Tejun Heo83096eb2009-05-07 22:24:39 +0900406 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
Tejun Heo8922e162005-10-20 16:23:44 +0200407 break;
408 }
409
410 list_add(&rq->queuelist, entry);
411}
Jens Axboe2e662b62006-07-13 11:55:04 +0200412EXPORT_SYMBOL(elv_dispatch_sort);
413
Jens Axboe98170642006-07-28 09:23:08 +0200414/*
Jens Axboe2e662b62006-07-13 11:55:04 +0200415 * Insert rq into dispatch queue of q. Queue lock must be held on
416 * entry. rq is added to the back of the dispatch queue. To be used by
417 * specific elevators.
Jens Axboe98170642006-07-28 09:23:08 +0200418 */
419void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
420{
421 if (q->last_merge == rq)
422 q->last_merge = NULL;
423
424 elv_rqhash_del(q, rq);
425
426 q->nr_sorted--;
427
428 q->end_sector = rq_end_sector(rq);
429 q->boundary_rq = rq;
430 list_add_tail(&rq->queuelist, &q->queue_head);
431}
Jens Axboe2e662b62006-07-13 11:55:04 +0200432EXPORT_SYMBOL(elv_dispatch_add_tail);
433
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100434enum elv_merge elv_merge(struct request_queue *q, struct request **req,
435 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436{
Jens Axboeb374d182008-10-31 10:05:07 +0100437 struct elevator_queue *e = q->elevator;
Jens Axboe98170642006-07-28 09:23:08 +0200438 struct request *__rq;
Tejun Heo06b86242005-10-20 16:46:23 +0200439
Jens Axboe98170642006-07-28 09:23:08 +0200440 /*
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100441 * Levels of merges:
442 * nomerges: No merges at all attempted
443 * noxmerges: Only simple one-hit cache try
444 * merges: All merge tries attempted
445 */
Ming Lei7460d382015-10-20 23:13:55 +0800446 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100447 return ELEVATOR_NO_MERGE;
448
449 /*
Jens Axboe98170642006-07-28 09:23:08 +0200450 * First try one-hit cache.
451 */
Tahsin Erdogan72ef7992016-07-07 11:48:22 -0700452 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100453 enum elv_merge ret = blk_try_merge(q->last_merge, bio);
454
Tejun Heo06b86242005-10-20 16:46:23 +0200455 if (ret != ELEVATOR_NO_MERGE) {
456 *req = q->last_merge;
457 return ret;
458 }
459 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Alan D. Brunelle488991e2010-01-29 09:04:08 +0100461 if (blk_queue_noxmerges(q))
Alan D. Brunelleac9fafa2008-04-29 14:44:19 +0200462 return ELEVATOR_NO_MERGE;
463
Jens Axboe98170642006-07-28 09:23:08 +0200464 /*
465 * See if our hash lookup can find a potential backmerge.
466 */
Kent Overstreet4f024f32013-10-11 15:44:27 -0700467 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
Tahsin Erdogan72ef7992016-07-07 11:48:22 -0700468 if (__rq && elv_bio_merge_ok(__rq, bio)) {
Jens Axboe98170642006-07-28 09:23:08 +0200469 *req = __rq;
470 return ELEVATOR_BACK_MERGE;
471 }
472
Jens Axboebd166ef2017-01-17 06:03:22 -0700473 if (e->uses_mq && e->type->ops.mq.request_merge)
474 return e->type->ops.mq.request_merge(q, req, bio);
475 else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700476 return e->type->ops.sq.elevator_merge_fn(q, req, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
478 return ELEVATOR_NO_MERGE;
479}
480
Jens Axboe5e84ea32011-03-21 10:14:27 +0100481/*
482 * Attempt to do an insertion back merge. Only check for the case where
483 * we can append 'rq' to an existing request, so we can throw 'rq' away
484 * afterwards.
485 *
486 * Returns true if we merged, false otherwise
487 */
Jens Axboebd166ef2017-01-17 06:03:22 -0700488bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
Jens Axboe5e84ea32011-03-21 10:14:27 +0100489{
490 struct request *__rq;
Shaohua Libee03932012-11-09 08:44:27 +0100491 bool ret;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100492
493 if (blk_queue_nomerges(q))
494 return false;
495
496 /*
497 * First try one-hit cache.
498 */
499 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
500 return true;
501
502 if (blk_queue_noxmerges(q))
503 return false;
504
Shaohua Libee03932012-11-09 08:44:27 +0100505 ret = false;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100506 /*
507 * See if our hash lookup can find a potential backmerge.
508 */
Shaohua Libee03932012-11-09 08:44:27 +0100509 while (1) {
510 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
511 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
512 break;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100513
Shaohua Libee03932012-11-09 08:44:27 +0100514 /* The merged request could be merged with others, try again */
515 ret = true;
516 rq = __rq;
517 }
518
519 return ret;
Jens Axboe5e84ea32011-03-21 10:14:27 +0100520}
521
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100522void elv_merged_request(struct request_queue *q, struct request *rq,
523 enum elv_merge type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524{
Jens Axboeb374d182008-10-31 10:05:07 +0100525 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
Jens Axboebd166ef2017-01-17 06:03:22 -0700527 if (e->uses_mq && e->type->ops.mq.request_merged)
528 e->type->ops.mq.request_merged(q, rq, type);
529 else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700530 e->type->ops.sq.elevator_merged_fn(q, rq, type);
Tejun Heo06b86242005-10-20 16:46:23 +0200531
Jens Axboe2e662b62006-07-13 11:55:04 +0200532 if (type == ELEVATOR_BACK_MERGE)
533 elv_rqhash_reposition(q, rq);
Jens Axboe98170642006-07-28 09:23:08 +0200534
Tejun Heo06b86242005-10-20 16:46:23 +0200535 q->last_merge = rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536}
537
Jens Axboe165125e2007-07-24 09:28:11 +0200538void elv_merge_requests(struct request_queue *q, struct request *rq,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 struct request *next)
540{
Jens Axboeb374d182008-10-31 10:05:07 +0100541 struct elevator_queue *e = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -0700542 bool next_sorted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Jens Axboebd166ef2017-01-17 06:03:22 -0700544 if (e->uses_mq && e->type->ops.mq.requests_merged)
545 e->type->ops.mq.requests_merged(q, rq, next);
546 else if (e->type->ops.sq.elevator_merge_req_fn) {
Bart Van Asschea1ae0f72017-02-01 12:22:23 -0700547 next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
Jens Axboebd166ef2017-01-17 06:03:22 -0700548 if (next_sorted)
549 e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
550 }
Tejun Heo06b86242005-10-20 16:46:23 +0200551
Jens Axboe98170642006-07-28 09:23:08 +0200552 elv_rqhash_reposition(q, rq);
Jens Axboe98170642006-07-28 09:23:08 +0200553
Jens Axboe5e84ea32011-03-21 10:14:27 +0100554 if (next_sorted) {
555 elv_rqhash_del(q, next);
556 q->nr_sorted--;
557 }
558
Tejun Heo06b86242005-10-20 16:46:23 +0200559 q->last_merge = rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560}
561
Divyesh Shah812d4022010-04-08 21:14:23 -0700562void elv_bio_merged(struct request_queue *q, struct request *rq,
563 struct bio *bio)
564{
565 struct elevator_queue *e = q->elevator;
566
Jens Axboebd166ef2017-01-17 06:03:22 -0700567 if (WARN_ON_ONCE(e->uses_mq))
568 return;
569
Jens Axboec51ca6c2016-12-10 15:13:59 -0700570 if (e->type->ops.sq.elevator_bio_merged_fn)
571 e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
Divyesh Shah812d4022010-04-08 21:14:23 -0700572}
573
Rafael J. Wysocki47fafbc2014-12-04 01:00:23 +0100574#ifdef CONFIG_PM
Lin Mingc8158812013-03-23 11:42:27 +0800575static void blk_pm_requeue_request(struct request *rq)
576{
Christoph Hellwige8064022016-10-20 15:12:13 +0200577 if (rq->q->dev && !(rq->rq_flags & RQF_PM))
Lin Mingc8158812013-03-23 11:42:27 +0800578 rq->q->nr_pending--;
579}
580
581static void blk_pm_add_request(struct request_queue *q, struct request *rq)
582{
Christoph Hellwige8064022016-10-20 15:12:13 +0200583 if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
Lin Mingc8158812013-03-23 11:42:27 +0800584 (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
585 pm_request_resume(q->dev);
586}
587#else
588static inline void blk_pm_requeue_request(struct request *rq) {}
589static inline void blk_pm_add_request(struct request_queue *q,
590 struct request *rq)
591{
592}
593#endif
594
Jens Axboe165125e2007-07-24 09:28:11 +0200595void elv_requeue_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 /*
598 * it already went through dequeue, we need to decrement the
599 * in_flight count again
600 */
Tejun Heo8922e162005-10-20 16:23:44 +0200601 if (blk_account_rq(rq)) {
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200602 q->in_flight[rq_is_sync(rq)]--;
Christoph Hellwige8064022016-10-20 15:12:13 +0200603 if (rq->rq_flags & RQF_SORTED)
Jens Axboecad97512007-01-14 22:26:09 +1100604 elv_deactivate_rq(q, rq);
Tejun Heo8922e162005-10-20 16:23:44 +0200605 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Christoph Hellwige8064022016-10-20 15:12:13 +0200607 rq->rq_flags &= ~RQF_STARTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
Lin Mingc8158812013-03-23 11:42:27 +0800609 blk_pm_requeue_request(rq);
610
Jens Axboeb710a482011-03-30 09:52:30 +0200611 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612}
613
Jerome Marchand26308ea2009-03-27 10:31:51 +0100614void elv_drain_elevator(struct request_queue *q)
Tejun Heo15853af2005-11-10 08:52:05 +0100615{
Jens Axboebd166ef2017-01-17 06:03:22 -0700616 struct elevator_queue *e = q->elevator;
Tejun Heo15853af2005-11-10 08:52:05 +0100617 static int printed;
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200618
Jens Axboebd166ef2017-01-17 06:03:22 -0700619 if (WARN_ON_ONCE(e->uses_mq))
620 return;
621
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200622 lockdep_assert_held(q->queue_lock);
623
Jens Axboebd166ef2017-01-17 06:03:22 -0700624 while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
Tejun Heo15853af2005-11-10 08:52:05 +0100625 ;
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200626 if (q->nr_sorted && printed++ < 10) {
Tejun Heo15853af2005-11-10 08:52:05 +0100627 printk(KERN_ERR "%s: forced dispatching is broken "
628 "(nr_sorted=%u), please report this\n",
Tejun Heo22f746e2011-12-14 00:33:41 +0100629 q->elevator->type->elevator_name, q->nr_sorted);
Tejun Heo15853af2005-11-10 08:52:05 +0100630 }
631}
632
Jens Axboeb710a482011-03-30 09:52:30 +0200633void __elv_add_request(struct request_queue *q, struct request *rq, int where)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634{
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +0100635 trace_block_rq_insert(q, rq);
Jens Axboe2056a782006-03-23 20:00:26 +0100636
Lin Mingc8158812013-03-23 11:42:27 +0800637 blk_pm_add_request(q, rq);
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 rq->q = q;
640
Christoph Hellwige8064022016-10-20 15:12:13 +0200641 if (rq->rq_flags & RQF_SOFTBARRIER) {
Jens Axboeb710a482011-03-30 09:52:30 +0200642 /* barriers are scheduling boundary, update end_sector */
Christoph Hellwig57292b52017-01-31 16:57:29 +0100643 if (!blk_rq_is_passthrough(rq)) {
Jens Axboeb710a482011-03-30 09:52:30 +0200644 q->end_sector = rq_end_sector(rq);
645 q->boundary_rq = rq;
646 }
Christoph Hellwige8064022016-10-20 15:12:13 +0200647 } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
Jens Axboe3aa72872011-04-21 19:28:35 +0200648 (where == ELEVATOR_INSERT_SORT ||
649 where == ELEVATOR_INSERT_SORT_MERGE))
Jens Axboeb710a482011-03-30 09:52:30 +0200650 where = ELEVATOR_INSERT_BACK;
651
Tejun Heo8922e162005-10-20 16:23:44 +0200652 switch (where) {
Tejun Heo28e7d182010-09-03 11:56:16 +0200653 case ELEVATOR_INSERT_REQUEUE:
Tejun Heo8922e162005-10-20 16:23:44 +0200654 case ELEVATOR_INSERT_FRONT:
Christoph Hellwige8064022016-10-20 15:12:13 +0200655 rq->rq_flags |= RQF_SOFTBARRIER;
Tejun Heo8922e162005-10-20 16:23:44 +0200656 list_add(&rq->queuelist, &q->queue_head);
657 break;
658
659 case ELEVATOR_INSERT_BACK:
Christoph Hellwige8064022016-10-20 15:12:13 +0200660 rq->rq_flags |= RQF_SOFTBARRIER;
Tejun Heo15853af2005-11-10 08:52:05 +0100661 elv_drain_elevator(q);
Tejun Heo8922e162005-10-20 16:23:44 +0200662 list_add_tail(&rq->queuelist, &q->queue_head);
663 /*
664 * We kick the queue here for the following reasons.
665 * - The elevator might have returned NULL previously
666 * to delay requests and returned them now. As the
667 * queue wasn't empty before this request, ll_rw_blk
668 * won't run the queue on return, resulting in hang.
669 * - Usually, back inserted requests won't be merged
670 * with anything. There's no point in delaying queue
671 * processing.
672 */
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200673 __blk_run_queue(q);
Tejun Heo8922e162005-10-20 16:23:44 +0200674 break;
675
Jens Axboe5e84ea32011-03-21 10:14:27 +0100676 case ELEVATOR_INSERT_SORT_MERGE:
677 /*
678 * If we succeed in merging this request with one in the
679 * queue already, we are done - rq has now been freed,
680 * so no need to do anything further.
681 */
682 if (elv_attempt_insert_merge(q, rq))
683 break;
Bart Van Asschee29387e2017-06-21 09:40:11 -0700684 /* fall through */
Tejun Heo8922e162005-10-20 16:23:44 +0200685 case ELEVATOR_INSERT_SORT:
Christoph Hellwig57292b52017-01-31 16:57:29 +0100686 BUG_ON(blk_rq_is_passthrough(rq));
Christoph Hellwige8064022016-10-20 15:12:13 +0200687 rq->rq_flags |= RQF_SORTED;
Tejun Heo15853af2005-11-10 08:52:05 +0100688 q->nr_sorted++;
Jens Axboe98170642006-07-28 09:23:08 +0200689 if (rq_mergeable(rq)) {
690 elv_rqhash_add(q, rq);
691 if (!q->last_merge)
692 q->last_merge = rq;
693 }
694
Tejun Heoca235092005-11-01 17:23:49 +0900695 /*
696 * Some ioscheds (cfq) run q->request_fn directly, so
697 * rq cannot be accessed after calling
698 * elevator_add_req_fn.
699 */
Jens Axboec51ca6c2016-12-10 15:13:59 -0700700 q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
Tejun Heo8922e162005-10-20 16:23:44 +0200701 break;
702
Tejun Heoae1b1532011-01-25 12:43:54 +0100703 case ELEVATOR_INSERT_FLUSH:
Christoph Hellwige8064022016-10-20 15:12:13 +0200704 rq->rq_flags |= RQF_SOFTBARRIER;
Tejun Heoae1b1532011-01-25 12:43:54 +0100705 blk_insert_flush(rq);
706 break;
Tejun Heo8922e162005-10-20 16:23:44 +0200707 default:
708 printk(KERN_ERR "%s: bad insertion point %d\n",
Harvey Harrison24c03d42008-05-01 04:35:17 -0700709 __func__, where);
Tejun Heo8922e162005-10-20 16:23:44 +0200710 BUG();
711 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712}
Jens Axboe2e662b62006-07-13 11:55:04 +0200713EXPORT_SYMBOL(__elv_add_request);
714
Jens Axboe7eaceac2011-03-10 08:52:07 +0100715void elv_add_request(struct request_queue *q, struct request *rq, int where)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717 unsigned long flags;
718
719 spin_lock_irqsave(q->queue_lock, flags);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100720 __elv_add_request(q, rq, where);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 spin_unlock_irqrestore(q->queue_lock, flags);
722}
Jens Axboe2e662b62006-07-13 11:55:04 +0200723EXPORT_SYMBOL(elv_add_request);
724
Jens Axboe165125e2007-07-24 09:28:11 +0200725struct request *elv_latter_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726{
Jens Axboeb374d182008-10-31 10:05:07 +0100727 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
Jens Axboebd166ef2017-01-17 06:03:22 -0700729 if (e->uses_mq && e->type->ops.mq.next_request)
730 return e->type->ops.mq.next_request(q, rq);
731 else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700732 return e->type->ops.sq.elevator_latter_req_fn(q, rq);
Jens Axboebd166ef2017-01-17 06:03:22 -0700733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 return NULL;
735}
736
Jens Axboe165125e2007-07-24 09:28:11 +0200737struct request *elv_former_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738{
Jens Axboeb374d182008-10-31 10:05:07 +0100739 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Jens Axboebd166ef2017-01-17 06:03:22 -0700741 if (e->uses_mq && e->type->ops.mq.former_request)
742 return e->type->ops.mq.former_request(q, rq);
743 if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700744 return e->type->ops.sq.elevator_former_req_fn(q, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 return NULL;
746}
747
Tejun Heo852c7882012-03-05 13:15:27 -0800748int elv_set_request(struct request_queue *q, struct request *rq,
749 struct bio *bio, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
Jens Axboeb374d182008-10-31 10:05:07 +0100751 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Jens Axboebd166ef2017-01-17 06:03:22 -0700753 if (WARN_ON_ONCE(e->uses_mq))
754 return 0;
755
Jens Axboec51ca6c2016-12-10 15:13:59 -0700756 if (e->type->ops.sq.elevator_set_req_fn)
757 return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 return 0;
759}
760
Jens Axboe165125e2007-07-24 09:28:11 +0200761void elv_put_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762{
Jens Axboeb374d182008-10-31 10:05:07 +0100763 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
Jens Axboebd166ef2017-01-17 06:03:22 -0700765 if (WARN_ON_ONCE(e->uses_mq))
766 return;
767
Jens Axboec51ca6c2016-12-10 15:13:59 -0700768 if (e->type->ops.sq.elevator_put_req_fn)
769 e->type->ops.sq.elevator_put_req_fn(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770}
771
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600772int elv_may_queue(struct request_queue *q, unsigned int op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773{
Jens Axboeb374d182008-10-31 10:05:07 +0100774 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
Jens Axboebd166ef2017-01-17 06:03:22 -0700776 if (WARN_ON_ONCE(e->uses_mq))
777 return 0;
778
Jens Axboec51ca6c2016-12-10 15:13:59 -0700779 if (e->type->ops.sq.elevator_may_queue_fn)
780 return e->type->ops.sq.elevator_may_queue_fn(q, op);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
782 return ELV_MQUEUE_MAY;
783}
784
Jens Axboe165125e2007-07-24 09:28:11 +0200785void elv_completed_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786{
Jens Axboeb374d182008-10-31 10:05:07 +0100787 struct elevator_queue *e = q->elevator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
Jens Axboebd166ef2017-01-17 06:03:22 -0700789 if (WARN_ON_ONCE(e->uses_mq))
790 return;
791
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 /*
793 * request is released from the driver, io must be done
794 */
Tejun Heo8922e162005-10-20 16:23:44 +0200795 if (blk_account_rq(rq)) {
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200796 q->in_flight[rq_is_sync(rq)]--;
Christoph Hellwige8064022016-10-20 15:12:13 +0200797 if ((rq->rq_flags & RQF_SORTED) &&
Jens Axboec51ca6c2016-12-10 15:13:59 -0700798 e->type->ops.sq.elevator_completed_req_fn)
799 e->type->ops.sq.elevator_completed_req_fn(q, rq);
Tejun Heo1bc691d2006-01-12 15:39:26 +0100800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801}
802
Al Viro3d1ab402006-03-18 18:35:43 -0500803#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
804
805static ssize_t
806elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
807{
Al Viro3d1ab402006-03-18 18:35:43 -0500808 struct elv_fs_entry *entry = to_elv(attr);
Jens Axboeb374d182008-10-31 10:05:07 +0100809 struct elevator_queue *e;
Al Viro3d1ab402006-03-18 18:35:43 -0500810 ssize_t error;
811
812 if (!entry->show)
813 return -EIO;
814
Jens Axboeb374d182008-10-31 10:05:07 +0100815 e = container_of(kobj, struct elevator_queue, kobj);
Al Viro3d1ab402006-03-18 18:35:43 -0500816 mutex_lock(&e->sysfs_lock);
Tejun Heo22f746e2011-12-14 00:33:41 +0100817 error = e->type ? entry->show(e, page) : -ENOENT;
Al Viro3d1ab402006-03-18 18:35:43 -0500818 mutex_unlock(&e->sysfs_lock);
819 return error;
820}
821
822static ssize_t
823elv_attr_store(struct kobject *kobj, struct attribute *attr,
824 const char *page, size_t length)
825{
Al Viro3d1ab402006-03-18 18:35:43 -0500826 struct elv_fs_entry *entry = to_elv(attr);
Jens Axboeb374d182008-10-31 10:05:07 +0100827 struct elevator_queue *e;
Al Viro3d1ab402006-03-18 18:35:43 -0500828 ssize_t error;
829
830 if (!entry->store)
831 return -EIO;
832
Jens Axboeb374d182008-10-31 10:05:07 +0100833 e = container_of(kobj, struct elevator_queue, kobj);
Al Viro3d1ab402006-03-18 18:35:43 -0500834 mutex_lock(&e->sysfs_lock);
Tejun Heo22f746e2011-12-14 00:33:41 +0100835 error = e->type ? entry->store(e, page, length) : -ENOENT;
Al Viro3d1ab402006-03-18 18:35:43 -0500836 mutex_unlock(&e->sysfs_lock);
837 return error;
838}
839
Emese Revfy52cf25d2010-01-19 02:58:23 +0100840static const struct sysfs_ops elv_sysfs_ops = {
Al Viro3d1ab402006-03-18 18:35:43 -0500841 .show = elv_attr_show,
842 .store = elv_attr_store,
843};
844
845static struct kobj_type elv_ktype = {
846 .sysfs_ops = &elv_sysfs_ops,
847 .release = elevator_release,
848};
849
Tejun Heo5a5bafd2012-03-05 13:14:56 -0800850int elv_register_queue(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851{
Tejun Heo5a5bafd2012-03-05 13:14:56 -0800852 struct elevator_queue *e = q->elevator;
Al Viro3d1ab402006-03-18 18:35:43 -0500853 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
Greg Kroah-Hartmanb2d6db52007-12-17 23:05:35 -0700855 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
Al Viro3d1ab402006-03-18 18:35:43 -0500856 if (!error) {
Tejun Heo22f746e2011-12-14 00:33:41 +0100857 struct elv_fs_entry *attr = e->type->elevator_attrs;
Al Viro3d1ab402006-03-18 18:35:43 -0500858 if (attr) {
Al Viroe572ec72006-03-18 22:27:18 -0500859 while (attr->attr.name) {
860 if (sysfs_create_file(&e->kobj, &attr->attr))
Al Viro3d1ab402006-03-18 18:35:43 -0500861 break;
Al Viroe572ec72006-03-18 22:27:18 -0500862 attr++;
Al Viro3d1ab402006-03-18 18:35:43 -0500863 }
864 }
865 kobject_uevent(&e->kobj, KOBJ_ADD);
Jens Axboe430c62f2010-10-07 09:35:16 +0200866 e->registered = 1;
Jens Axboebd166ef2017-01-17 06:03:22 -0700867 if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
Jens Axboec51ca6c2016-12-10 15:13:59 -0700868 e->type->ops.sq.elevator_registered_fn(q);
Al Viro3d1ab402006-03-18 18:35:43 -0500869 }
870 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871}
Tejun Heof8fc8772011-12-14 00:33:40 +0100872EXPORT_SYMBOL(elv_register_queue);
Jens Axboebc1c1162006-06-08 08:49:06 +0200873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874void elv_unregister_queue(struct request_queue *q)
875{
Tejun Heof8fc8772011-12-14 00:33:40 +0100876 if (q) {
877 struct elevator_queue *e = q->elevator;
878
879 kobject_uevent(&e->kobj, KOBJ_REMOVE);
880 kobject_del(&e->kobj);
881 e->registered = 0;
Jan Kara8330cdb2017-04-19 11:33:27 +0200882 /* Re-enable throttling in case elevator disabled it */
883 wbt_enable_default(q);
Tejun Heof8fc8772011-12-14 00:33:40 +0100884 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885}
Mike Snitzer01effb02010-05-11 08:57:42 +0200886EXPORT_SYMBOL(elv_unregister_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
Jens Axboee567bf72014-06-22 16:32:48 -0600888int elv_register(struct elevator_type *e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889{
Thibaut VARENE1ffb96c2007-03-15 12:59:19 +0100890 char *def = "";
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200891
Tejun Heo3d3c2372011-12-14 00:33:42 +0100892 /* create icq_cache if requested */
893 if (e->icq_size) {
894 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
895 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
896 return -EINVAL;
897
898 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
899 "%s_io_cq", e->elevator_name);
900 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
901 e->icq_align, 0, NULL);
902 if (!e->icq_cache)
903 return -ENOMEM;
904 }
905
906 /* register, don't allow duplicate names */
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200907 spin_lock(&elv_list_lock);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100908 if (elevator_find(e->elevator_name)) {
909 spin_unlock(&elv_list_lock);
910 if (e->icq_cache)
911 kmem_cache_destroy(e->icq_cache);
912 return -EBUSY;
913 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 list_add_tail(&e->list, &elv_list);
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200915 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916
Tejun Heo3d3c2372011-12-14 00:33:42 +0100917 /* print pretty message */
Nate Diller5f003972006-01-24 10:07:58 +0100918 if (!strcmp(e->elevator_name, chosen_elevator) ||
919 (!*chosen_elevator &&
920 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
Thibaut VARENE1ffb96c2007-03-15 12:59:19 +0100921 def = " (default)";
922
Jens Axboe4eb166d2008-02-01 00:37:27 +0100923 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
924 def);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100925 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926}
927EXPORT_SYMBOL_GPL(elv_register);
928
929void elv_unregister(struct elevator_type *e)
930{
Tejun Heo3d3c2372011-12-14 00:33:42 +0100931 /* unregister */
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200932 spin_lock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 list_del_init(&e->list);
Jens Axboe2a12dcd2007-04-26 14:41:53 +0200934 spin_unlock(&elv_list_lock);
Tejun Heo3d3c2372011-12-14 00:33:42 +0100935
936 /*
937 * Destroy icq_cache if it exists. icq's are RCU managed. Make
938 * sure all RCU operations are complete before proceeding.
939 */
940 if (e->icq_cache) {
941 rcu_barrier();
942 kmem_cache_destroy(e->icq_cache);
943 e->icq_cache = NULL;
944 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945}
946EXPORT_SYMBOL_GPL(elv_unregister);
947
Omar Sandoval54d53292017-04-07 08:52:27 -0600948static int elevator_switch_mq(struct request_queue *q,
949 struct elevator_type *new_e)
950{
951 int ret;
952
953 blk_mq_freeze_queue(q);
Omar Sandoval54d53292017-04-07 08:52:27 -0600954
955 if (q->elevator) {
956 if (q->elevator->registered)
957 elv_unregister_queue(q);
958 ioc_clear_queue(q);
959 elevator_exit(q, q->elevator);
960 }
961
962 ret = blk_mq_init_sched(q, new_e);
963 if (ret)
964 goto out;
965
966 if (new_e) {
967 ret = elv_register_queue(q);
968 if (ret) {
969 elevator_exit(q, q->elevator);
970 goto out;
971 }
972 }
973
974 if (new_e)
975 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
976 else
977 blk_add_trace_msg(q, "elv switch: none");
978
979out:
980 blk_mq_unfreeze_queue(q);
Omar Sandoval54d53292017-04-07 08:52:27 -0600981 return ret;
Omar Sandoval54d53292017-04-07 08:52:27 -0600982}
983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984/*
985 * switch to new_e io scheduler. be careful not to introduce deadlocks -
986 * we don't free the old io scheduler, before we have allocated what we
987 * need for the new one. this way we have a chance of going back to the old
Tejun Heocb98fc82005-10-28 08:29:39 +0200988 * one, if the new one fails init for some reason.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 */
Jens Axboe165125e2007-07-24 09:28:11 +0200990static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991{
Tejun Heo5a5bafd2012-03-05 13:14:56 -0800992 struct elevator_queue *old = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -0700993 bool old_registered = false;
Tejun Heoe8989fa2012-03-05 13:15:20 -0800994 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
Omar Sandoval54d53292017-04-07 08:52:27 -0600996 if (q->mq_ops)
997 return elevator_switch_mq(q, new_e);
Jens Axboebd166ef2017-01-17 06:03:22 -0700998
Tejun Heo5a5bafd2012-03-05 13:14:56 -0800999 /*
1000 * Turn on BYPASS and drain all requests w/ elevator private data.
1001 * Block layer doesn't call into a quiesced elevator - all requests
1002 * are directly put on the dispatch list without elevator data
1003 * using INSERT_BACK. All requests have SOFTBARRIER set and no
1004 * merge happens either.
1005 */
Jens Axboebd166ef2017-01-17 06:03:22 -07001006 if (old) {
1007 old_registered = old->registered;
Tejun Heocb98fc82005-10-28 08:29:39 +02001008
Omar Sandoval54d53292017-04-07 08:52:27 -06001009 blk_queue_bypass_start(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001010
1011 /* unregister and clear all auxiliary data of the old elevator */
1012 if (old_registered)
1013 elv_unregister_queue(q);
1014
Jens Axboebd166ef2017-01-17 06:03:22 -07001015 ioc_clear_queue(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001016 }
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001017
1018 /* allocate, init and register new elevator */
Omar Sandoval54d53292017-04-07 08:52:27 -06001019 err = new_e->ops.sq.elevator_init_fn(q, new_e);
Omar Sandoval6917ff02017-04-05 12:01:30 -07001020 if (err)
1021 goto fail_init;
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001022
Omar Sandoval54d53292017-04-07 08:52:27 -06001023 err = elv_register_queue(q);
1024 if (err)
1025 goto fail_register;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001027 /* done, kill the old one and finish */
Jens Axboebd166ef2017-01-17 06:03:22 -07001028 if (old) {
Omar Sandoval54d53292017-04-07 08:52:27 -06001029 elevator_exit(q, old);
1030 blk_queue_bypass_end(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001031 }
Nick Piggin75ad23b2008-04-29 14:48:33 +02001032
Omar Sandoval54d53292017-04-07 08:52:27 -06001033 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
Alan D. Brunelle4722dc52008-05-27 14:55:00 +02001034
Jens Axboe5dd531a2010-08-23 13:52:19 +02001035 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
1037fail_register:
Omar Sandoval54d53292017-04-07 08:52:27 -06001038 elevator_exit(q, q->elevator);
Tejun Heo5a5bafd2012-03-05 13:14:56 -08001039fail_init:
1040 /* switch failed, restore and re-register old elevator */
Jens Axboebd166ef2017-01-17 06:03:22 -07001041 if (old) {
1042 q->elevator = old;
1043 elv_register_queue(q);
Omar Sandoval54d53292017-04-07 08:52:27 -06001044 blk_queue_bypass_end(q);
Jens Axboebd166ef2017-01-17 06:03:22 -07001045 }
Nick Piggin75ad23b2008-04-29 14:48:33 +02001046
Jens Axboe5dd531a2010-08-23 13:52:19 +02001047 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048}
1049
Jens Axboe5dd531a2010-08-23 13:52:19 +02001050/*
1051 * Switch this queue to the given IO scheduler.
1052 */
Tomoki Sekiyama7c8a3672013-10-15 16:42:19 -06001053static int __elevator_change(struct request_queue *q, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054{
1055 char elevator_name[ELV_NAME_MAX];
1056 struct elevator_type *e;
1057
Jens Axboebd166ef2017-01-17 06:03:22 -07001058 /*
1059 * Special case for mq, turn off scheduling
1060 */
1061 if (q->mq_ops && !strncmp(name, "none", 4))
1062 return elevator_switch(q, NULL);
Martin K. Petersencd43e262009-05-22 17:17:52 -04001063
Li Zefanee2e9922008-10-14 08:49:56 +02001064 strlcpy(elevator_name, name, sizeof(elevator_name));
Tejun Heo21c3c5d2013-01-22 16:48:03 -08001065 e = elevator_get(strstrip(elevator_name), true);
Jens Axboe340ff322017-05-10 07:40:04 -06001066 if (!e)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
Jens Axboebd166ef2017-01-17 06:03:22 -07001069 if (q->elevator &&
1070 !strcmp(elevator_name, q->elevator->type->elevator_name)) {
Nate Diller2ca7d932005-10-30 15:02:24 -08001071 elevator_put(e);
Jens Axboe5dd531a2010-08-23 13:52:19 +02001072 return 0;
Nate Diller2ca7d932005-10-30 15:02:24 -08001073 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
Jens Axboebd166ef2017-01-17 06:03:22 -07001075 if (!e->uses_mq && q->mq_ops) {
1076 elevator_put(e);
1077 return -EINVAL;
1078 }
1079 if (e->uses_mq && !q->mq_ops) {
1080 elevator_put(e);
1081 return -EINVAL;
1082 }
1083
Jens Axboe5dd531a2010-08-23 13:52:19 +02001084 return elevator_switch(q, e);
1085}
Tomoki Sekiyama7c8a3672013-10-15 16:42:19 -06001086
Ming Lei3a5088c2017-04-15 20:38:22 +08001087static inline bool elv_support_iosched(struct request_queue *q)
1088{
1089 if (q->mq_ops && q->tag_set && (q->tag_set->flags &
1090 BLK_MQ_F_NO_SCHED))
1091 return false;
1092 return true;
1093}
1094
Jens Axboe5dd531a2010-08-23 13:52:19 +02001095ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1096 size_t count)
1097{
1098 int ret;
1099
Ming Lei3a5088c2017-04-15 20:38:22 +08001100 if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
Jens Axboe5dd531a2010-08-23 13:52:19 +02001101 return count;
1102
Tomoki Sekiyama7c8a3672013-10-15 16:42:19 -06001103 ret = __elevator_change(q, name);
Jens Axboe5dd531a2010-08-23 13:52:19 +02001104 if (!ret)
1105 return count;
1106
Jens Axboe5dd531a2010-08-23 13:52:19 +02001107 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108}
1109
Jens Axboe165125e2007-07-24 09:28:11 +02001110ssize_t elv_iosched_show(struct request_queue *q, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111{
Jens Axboeb374d182008-10-31 10:05:07 +01001112 struct elevator_queue *e = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -07001113 struct elevator_type *elv = NULL;
Matthias Kaehlcke70cee262007-07-10 12:26:24 +02001114 struct elevator_type *__e;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 int len = 0;
1116
Jens Axboebd166ef2017-01-17 06:03:22 -07001117 if (!blk_queue_stackable(q))
Martin K. Petersencd43e262009-05-22 17:17:52 -04001118 return sprintf(name, "none\n");
1119
Jens Axboebd166ef2017-01-17 06:03:22 -07001120 if (!q->elevator)
1121 len += sprintf(name+len, "[none] ");
1122 else
1123 elv = e->type;
Martin K. Petersencd43e262009-05-22 17:17:52 -04001124
Jens Axboe2a12dcd2007-04-26 14:41:53 +02001125 spin_lock(&elv_list_lock);
Matthias Kaehlcke70cee262007-07-10 12:26:24 +02001126 list_for_each_entry(__e, &elv_list, list) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001127 if (elv && !strcmp(elv->elevator_name, __e->elevator_name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 len += sprintf(name+len, "[%s] ", elv->elevator_name);
Jens Axboebd166ef2017-01-17 06:03:22 -07001129 continue;
1130 }
Ming Lei3a5088c2017-04-15 20:38:22 +08001131 if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
Jens Axboebd166ef2017-01-17 06:03:22 -07001132 len += sprintf(name+len, "%s ", __e->elevator_name);
1133 else if (!__e->uses_mq && !q->mq_ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 len += sprintf(name+len, "%s ", __e->elevator_name);
1135 }
Jens Axboe2a12dcd2007-04-26 14:41:53 +02001136 spin_unlock(&elv_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
Jens Axboebd166ef2017-01-17 06:03:22 -07001138 if (q->mq_ops && q->elevator)
1139 len += sprintf(name+len, "none");
1140
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 len += sprintf(len+name, "\n");
1142 return len;
1143}
1144
Jens Axboe165125e2007-07-24 09:28:11 +02001145struct request *elv_rb_former_request(struct request_queue *q,
1146 struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +02001147{
1148 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1149
1150 if (rbprev)
1151 return rb_entry_rq(rbprev);
1152
1153 return NULL;
1154}
Jens Axboe2e662b62006-07-13 11:55:04 +02001155EXPORT_SYMBOL(elv_rb_former_request);
1156
Jens Axboe165125e2007-07-24 09:28:11 +02001157struct request *elv_rb_latter_request(struct request_queue *q,
1158 struct request *rq)
Jens Axboe2e662b62006-07-13 11:55:04 +02001159{
1160 struct rb_node *rbnext = rb_next(&rq->rb_node);
1161
1162 if (rbnext)
1163 return rb_entry_rq(rbnext);
1164
1165 return NULL;
1166}
Jens Axboe2e662b62006-07-13 11:55:04 +02001167EXPORT_SYMBOL(elv_rb_latter_request);