Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Block device elevator/IO-scheduler. |
| 3 | * |
| 4 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE |
| 5 | * |
Jens Axboe | 0fe2347 | 2006-09-04 15:41:16 +0200 | [diff] [blame] | 6 | * 30042000 Jens Axboe <axboe@kernel.dk> : |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * |
| 8 | * Split the elevator a bit so that it is possible to choose a different |
| 9 | * one or even write a new "plug in". There are three pieces: |
| 10 | * - elevator_fn, inserts a new request in the queue list |
| 11 | * - elevator_merge_fn, decides whether a new buffer can be merged with |
| 12 | * an existing request |
| 13 | * - elevator_dequeue_fn, called when a request is taken off the active list |
| 14 | * |
| 15 | * 20082000 Dave Jones <davej@suse.de> : |
| 16 | * Removed tests for max-bomb-segments, which was breaking elvtune |
| 17 | * when run without -bN |
| 18 | * |
| 19 | * Jens: |
| 20 | * - Rework again to work with bio instead of buffer_heads |
| 21 | * - loose bi_dev comparisons, partition handling is right now |
| 22 | * - completely modularize elevator setup and teardown |
| 23 | * |
| 24 | */ |
| 25 | #include <linux/kernel.h> |
| 26 | #include <linux/fs.h> |
| 27 | #include <linux/blkdev.h> |
| 28 | #include <linux/elevator.h> |
| 29 | #include <linux/bio.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/module.h> |
| 31 | #include <linux/slab.h> |
| 32 | #include <linux/init.h> |
| 33 | #include <linux/compiler.h> |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 34 | #include <linux/blktrace_api.h> |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 35 | #include <linux/hash.h> |
Jens Axboe | 0835da6 | 2008-08-26 09:15:47 +0200 | [diff] [blame] | 36 | #include <linux/uaccess.h> |
Lin Ming | c815881 | 2013-03-23 11:42:27 +0800 | [diff] [blame] | 37 | #include <linux/pm_runtime.h> |
Tejun Heo | eea8f41 | 2015-05-22 17:13:17 -0400 | [diff] [blame] | 38 | #include <linux/blk-cgroup.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Li Zefan | 5578213 | 2009-06-09 13:43:05 +0800 | [diff] [blame] | 40 | #include <trace/events/block.h> |
| 41 | |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 42 | #include "blk.h" |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 43 | #include "blk-mq-sched.h" |
Jan Kara | 8330cdb | 2017-04-19 11:33:27 +0200 | [diff] [blame] | 44 | #include "blk-wbt.h" |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 45 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | static DEFINE_SPINLOCK(elv_list_lock); |
| 47 | static LIST_HEAD(elv_list); |
| 48 | |
| 49 | /* |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 50 | * Merge hash stuff. |
| 51 | */ |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 52 | #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 53 | |
| 54 | /* |
Jens Axboe | da77526 | 2006-12-20 11:04:12 +0100 | [diff] [blame] | 55 | * Query io scheduler to see if the current process issuing bio may be |
| 56 | * merged with rq. |
| 57 | */ |
Tahsin Erdogan | 72ef799 | 2016-07-07 11:48:22 -0700 | [diff] [blame] | 58 | static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) |
Jens Axboe | da77526 | 2006-12-20 11:04:12 +0100 | [diff] [blame] | 59 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 60 | struct request_queue *q = rq->q; |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 61 | struct elevator_queue *e = q->elevator; |
Jens Axboe | da77526 | 2006-12-20 11:04:12 +0100 | [diff] [blame] | 62 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 63 | if (e->uses_mq && e->type->ops.mq.allow_merge) |
| 64 | return e->type->ops.mq.allow_merge(q, rq, bio); |
| 65 | else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn) |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 66 | return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio); |
Jens Axboe | da77526 | 2006-12-20 11:04:12 +0100 | [diff] [blame] | 67 | |
| 68 | return 1; |
| 69 | } |
| 70 | |
| 71 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | * can we safely merge with this request? |
| 73 | */ |
Tahsin Erdogan | 72ef799 | 2016-07-07 11:48:22 -0700 | [diff] [blame] | 74 | bool elv_bio_merge_ok(struct request *rq, struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | { |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 76 | if (!blk_rq_merge_ok(rq, bio)) |
Tahsin Erdogan | 72ef799 | 2016-07-07 11:48:22 -0700 | [diff] [blame] | 77 | return false; |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 78 | |
Tahsin Erdogan | 72ef799 | 2016-07-07 11:48:22 -0700 | [diff] [blame] | 79 | if (!elv_iosched_allow_bio_merge(rq, bio)) |
| 80 | return false; |
Jens Axboe | da77526 | 2006-12-20 11:04:12 +0100 | [diff] [blame] | 81 | |
Tahsin Erdogan | 72ef799 | 2016-07-07 11:48:22 -0700 | [diff] [blame] | 82 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | } |
Tahsin Erdogan | 72ef799 | 2016-07-07 11:48:22 -0700 | [diff] [blame] | 84 | EXPORT_SYMBOL(elv_bio_merge_ok); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
Jens Axboe | 8ac0d9a | 2017-10-25 12:35:02 -0600 | [diff] [blame] | 86 | static bool elevator_match(const struct elevator_type *e, const char *name) |
| 87 | { |
| 88 | if (!strcmp(e->elevator_name, name)) |
| 89 | return true; |
| 90 | if (e->elevator_alias && !strcmp(e->elevator_alias, name)) |
| 91 | return true; |
| 92 | |
| 93 | return false; |
| 94 | } |
| 95 | |
Jens Axboe | 2527d99 | 2017-10-25 12:33:42 -0600 | [diff] [blame] | 96 | /* |
| 97 | * Return scheduler with name 'name' and with matching 'mq capability |
| 98 | */ |
| 99 | static struct elevator_type *elevator_find(const char *name, bool mq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | { |
Vasily Tarasov | a22b169 | 2006-10-11 09:24:27 +0200 | [diff] [blame] | 101 | struct elevator_type *e; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | |
Matthias Kaehlcke | 70cee26 | 2007-07-10 12:26:24 +0200 | [diff] [blame] | 103 | list_for_each_entry(e, &elv_list, list) { |
Jens Axboe | 8ac0d9a | 2017-10-25 12:35:02 -0600 | [diff] [blame] | 104 | if (elevator_match(e, name) && (mq == e->uses_mq)) |
Vasily Tarasov | a22b169 | 2006-10-11 09:24:27 +0200 | [diff] [blame] | 105 | return e; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | |
Vasily Tarasov | a22b169 | 2006-10-11 09:24:27 +0200 | [diff] [blame] | 108 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | static void elevator_put(struct elevator_type *e) |
| 112 | { |
| 113 | module_put(e->elevator_owner); |
| 114 | } |
| 115 | |
Jens Axboe | 2527d99 | 2017-10-25 12:33:42 -0600 | [diff] [blame] | 116 | static struct elevator_type *elevator_get(struct request_queue *q, |
| 117 | const char *name, bool try_loading) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | { |
Tejun Heo | 2824bc93 | 2005-10-20 10:56:41 +0200 | [diff] [blame] | 119 | struct elevator_type *e; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | |
Jens Axboe | 2a12dcd | 2007-04-26 14:41:53 +0200 | [diff] [blame] | 121 | spin_lock(&elv_list_lock); |
Tejun Heo | 2824bc93 | 2005-10-20 10:56:41 +0200 | [diff] [blame] | 122 | |
Jens Axboe | 2527d99 | 2017-10-25 12:33:42 -0600 | [diff] [blame] | 123 | e = elevator_find(name, q->mq_ops != NULL); |
Tejun Heo | 21c3c5d | 2013-01-22 16:48:03 -0800 | [diff] [blame] | 124 | if (!e && try_loading) { |
Jens Axboe | e164094 | 2008-02-19 10:20:37 +0100 | [diff] [blame] | 125 | spin_unlock(&elv_list_lock); |
Kees Cook | 490b94b | 2011-05-05 18:02:12 -0600 | [diff] [blame] | 126 | request_module("%s-iosched", name); |
Jens Axboe | e164094 | 2008-02-19 10:20:37 +0100 | [diff] [blame] | 127 | spin_lock(&elv_list_lock); |
Jens Axboe | 2527d99 | 2017-10-25 12:33:42 -0600 | [diff] [blame] | 128 | e = elevator_find(name, q->mq_ops != NULL); |
Jens Axboe | e164094 | 2008-02-19 10:20:37 +0100 | [diff] [blame] | 129 | } |
| 130 | |
Tejun Heo | 2824bc93 | 2005-10-20 10:56:41 +0200 | [diff] [blame] | 131 | if (e && !try_module_get(e->elevator_owner)) |
| 132 | e = NULL; |
| 133 | |
Jens Axboe | 2a12dcd | 2007-04-26 14:41:53 +0200 | [diff] [blame] | 134 | spin_unlock(&elv_list_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | return e; |
| 136 | } |
| 137 | |
Wang Sheng-Hui | 484fc25 | 2011-09-08 12:32:14 +0200 | [diff] [blame] | 138 | static char chosen_elevator[ELV_NAME_MAX]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | |
Nate Diller | 5f00397 | 2006-01-24 10:07:58 +0100 | [diff] [blame] | 140 | static int __init elevator_setup(char *str) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | { |
Chuck Ebbert | 752a3b7 | 2006-01-16 09:47:37 +0100 | [diff] [blame] | 142 | /* |
| 143 | * Be backwards-compatible with previous kernels, so users |
| 144 | * won't get the wrong elevator. |
| 145 | */ |
Jens Axboe | 492af63 | 2009-10-03 09:37:51 +0200 | [diff] [blame] | 146 | strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); |
OGAWA Hirofumi | 9b41046 | 2006-03-31 02:30:33 -0800 | [diff] [blame] | 147 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | } |
| 149 | |
| 150 | __setup("elevator=", elevator_setup); |
| 151 | |
Tejun Heo | bb813f4 | 2013-01-18 14:05:56 -0800 | [diff] [blame] | 152 | /* called during boot to load the elevator chosen by the elevator param */ |
| 153 | void __init load_default_elevator_module(void) |
| 154 | { |
| 155 | struct elevator_type *e; |
| 156 | |
| 157 | if (!chosen_elevator[0]) |
| 158 | return; |
| 159 | |
Jens Axboe | 2527d99 | 2017-10-25 12:33:42 -0600 | [diff] [blame] | 160 | /* |
| 161 | * Boot parameter is deprecated, we haven't supported that for MQ. |
| 162 | * Only look for non-mq schedulers from here. |
| 163 | */ |
Tejun Heo | bb813f4 | 2013-01-18 14:05:56 -0800 | [diff] [blame] | 164 | spin_lock(&elv_list_lock); |
Jens Axboe | 2527d99 | 2017-10-25 12:33:42 -0600 | [diff] [blame] | 165 | e = elevator_find(chosen_elevator, false); |
Tejun Heo | bb813f4 | 2013-01-18 14:05:56 -0800 | [diff] [blame] | 166 | spin_unlock(&elv_list_lock); |
| 167 | |
| 168 | if (!e) |
| 169 | request_module("%s-iosched", chosen_elevator); |
| 170 | } |
| 171 | |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 172 | static struct kobj_type elv_ktype; |
| 173 | |
Jianpeng Ma | d50235b | 2013-07-03 13:25:24 +0200 | [diff] [blame] | 174 | struct elevator_queue *elevator_alloc(struct request_queue *q, |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 175 | struct elevator_type *e) |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 176 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 177 | struct elevator_queue *eq; |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 178 | |
Joe Perches | c1b511e | 2013-08-29 15:21:42 -0700 | [diff] [blame] | 179 | eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 180 | if (unlikely(!eq)) |
Chao Yu | 8406a4d | 2015-04-23 10:47:44 -0600 | [diff] [blame] | 181 | return NULL; |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 182 | |
Tejun Heo | 22f746e | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 183 | eq->type = e; |
Greg Kroah-Hartman | f9cb074 | 2007-12-17 23:05:35 -0700 | [diff] [blame] | 184 | kobject_init(&eq->kobj, &elv_ktype); |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 185 | mutex_init(&eq->sysfs_lock); |
Sasha Levin | 242d98f | 2012-12-17 10:01:27 -0500 | [diff] [blame] | 186 | hash_init(eq->hash); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 187 | eq->uses_mq = e->uses_mq; |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 188 | |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 189 | return eq; |
| 190 | } |
Jianpeng Ma | d50235b | 2013-07-03 13:25:24 +0200 | [diff] [blame] | 191 | EXPORT_SYMBOL(elevator_alloc); |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 192 | |
| 193 | static void elevator_release(struct kobject *kobj) |
| 194 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 195 | struct elevator_queue *e; |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 196 | |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 197 | e = container_of(kobj, struct elevator_queue, kobj); |
Tejun Heo | 22f746e | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 198 | elevator_put(e->type); |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 199 | kfree(e); |
| 200 | } |
| 201 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 202 | int elevator_init(struct request_queue *q, char *name) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | { |
| 204 | struct elevator_type *e = NULL; |
Tejun Heo | f8fc877 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 205 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | |
Tomoki Sekiyama | eb1c160 | 2013-10-15 16:42:16 -0600 | [diff] [blame] | 207 | /* |
| 208 | * q->sysfs_lock must be held to provide mutual exclusion between |
| 209 | * elevator_switch() and here. |
| 210 | */ |
| 211 | lockdep_assert_held(&q->sysfs_lock); |
| 212 | |
Mike Snitzer | 1abec4f | 2010-05-25 13:15:15 -0400 | [diff] [blame] | 213 | if (unlikely(q->elevator)) |
| 214 | return 0; |
| 215 | |
Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 216 | INIT_LIST_HEAD(&q->queue_head); |
| 217 | q->last_merge = NULL; |
| 218 | q->end_sector = 0; |
| 219 | q->boundary_rq = NULL; |
Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 220 | |
Jens Axboe | 4eb166d | 2008-02-01 00:37:27 +0100 | [diff] [blame] | 221 | if (name) { |
Jens Axboe | 2527d99 | 2017-10-25 12:33:42 -0600 | [diff] [blame] | 222 | e = elevator_get(q, name, true); |
Jens Axboe | 4eb166d | 2008-02-01 00:37:27 +0100 | [diff] [blame] | 223 | if (!e) |
| 224 | return -EINVAL; |
| 225 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | |
Tejun Heo | 21c3c5d | 2013-01-22 16:48:03 -0800 | [diff] [blame] | 227 | /* |
Jens Axboe | d1a987f | 2017-02-14 08:16:41 -0700 | [diff] [blame] | 228 | * Use the default elevator specified by config boot param for |
| 229 | * non-mq devices, or by config option. Don't try to load modules |
| 230 | * as we could be running off async and request_module() isn't |
| 231 | * allowed from async. |
Tejun Heo | 21c3c5d | 2013-01-22 16:48:03 -0800 | [diff] [blame] | 232 | */ |
Jens Axboe | d1a987f | 2017-02-14 08:16:41 -0700 | [diff] [blame] | 233 | if (!e && !q->mq_ops && *chosen_elevator) { |
Jens Axboe | 2527d99 | 2017-10-25 12:33:42 -0600 | [diff] [blame] | 234 | e = elevator_get(q, chosen_elevator, false); |
Jens Axboe | 4eb166d | 2008-02-01 00:37:27 +0100 | [diff] [blame] | 235 | if (!e) |
| 236 | printk(KERN_ERR "I/O scheduler %s not found\n", |
| 237 | chosen_elevator); |
| 238 | } |
Nate Diller | 248d5ca | 2006-01-24 10:09:14 +0100 | [diff] [blame] | 239 | |
Jens Axboe | 4eb166d | 2008-02-01 00:37:27 +0100 | [diff] [blame] | 240 | if (!e) { |
Jens Axboe | b86dd81 | 2017-02-22 13:19:45 -0700 | [diff] [blame] | 241 | /* |
| 242 | * For blk-mq devices, we default to using mq-deadline, |
| 243 | * if available, for single queue devices. If deadline |
| 244 | * isn't available OR we have multiple queues, default |
| 245 | * to "none". |
| 246 | */ |
| 247 | if (q->mq_ops) { |
| 248 | if (q->nr_hw_queues == 1) |
Jens Axboe | 2527d99 | 2017-10-25 12:33:42 -0600 | [diff] [blame] | 249 | e = elevator_get(q, "mq-deadline", false); |
Jens Axboe | b86dd81 | 2017-02-22 13:19:45 -0700 | [diff] [blame] | 250 | if (!e) |
| 251 | return 0; |
| 252 | } else |
Jens Axboe | 2527d99 | 2017-10-25 12:33:42 -0600 | [diff] [blame] | 253 | e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false); |
Jens Axboe | d348499 | 2017-01-13 14:43:58 -0700 | [diff] [blame] | 254 | |
Jens Axboe | 4eb166d | 2008-02-01 00:37:27 +0100 | [diff] [blame] | 255 | if (!e) { |
| 256 | printk(KERN_ERR |
| 257 | "Default I/O scheduler not found. " \ |
Jens Axboe | b86dd81 | 2017-02-22 13:19:45 -0700 | [diff] [blame] | 258 | "Using noop.\n"); |
Jens Axboe | 2527d99 | 2017-10-25 12:33:42 -0600 | [diff] [blame] | 259 | e = elevator_get(q, "noop", false); |
Jens Axboe | 4eb166d | 2008-02-01 00:37:27 +0100 | [diff] [blame] | 260 | } |
Nate Diller | 5f00397 | 2006-01-24 10:07:58 +0100 | [diff] [blame] | 261 | } |
| 262 | |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 263 | if (e->uses_mq) |
| 264 | err = blk_mq_init_sched(q, e); |
| 265 | else |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 266 | err = e->ops.sq.elevator_init_fn(q, e); |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 267 | if (err) |
Sudip Mukherjee | d32f6b5 | 2014-10-23 22:16:48 +0530 | [diff] [blame] | 268 | elevator_put(e); |
| 269 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | } |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 271 | EXPORT_SYMBOL(elevator_init); |
| 272 | |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 273 | void elevator_exit(struct request_queue *q, struct elevator_queue *e) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | { |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 275 | mutex_lock(&e->sysfs_lock); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 276 | if (e->uses_mq && e->type->ops.mq.exit_sched) |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 277 | blk_mq_exit_sched(q, e); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 278 | else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn) |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 279 | e->type->ops.sq.elevator_exit_fn(e); |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 280 | mutex_unlock(&e->sysfs_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 282 | kobject_put(&e->kobj); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | } |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 284 | EXPORT_SYMBOL(elevator_exit); |
| 285 | |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 286 | static inline void __elv_rqhash_del(struct request *rq) |
| 287 | { |
Sasha Levin | 242d98f | 2012-12-17 10:01:27 -0500 | [diff] [blame] | 288 | hash_del(&rq->hash); |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 289 | rq->rq_flags &= ~RQF_HASHED; |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 290 | } |
| 291 | |
Jens Axboe | 70b3ea0 | 2016-12-07 08:43:31 -0700 | [diff] [blame] | 292 | void elv_rqhash_del(struct request_queue *q, struct request *rq) |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 293 | { |
| 294 | if (ELV_ON_HASH(rq)) |
| 295 | __elv_rqhash_del(rq); |
| 296 | } |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 297 | EXPORT_SYMBOL_GPL(elv_rqhash_del); |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 298 | |
Jens Axboe | 70b3ea0 | 2016-12-07 08:43:31 -0700 | [diff] [blame] | 299 | void elv_rqhash_add(struct request_queue *q, struct request *rq) |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 300 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 301 | struct elevator_queue *e = q->elevator; |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 302 | |
| 303 | BUG_ON(ELV_ON_HASH(rq)); |
Sasha Levin | 242d98f | 2012-12-17 10:01:27 -0500 | [diff] [blame] | 304 | hash_add(e->hash, &rq->hash, rq_hash_key(rq)); |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 305 | rq->rq_flags |= RQF_HASHED; |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 306 | } |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 307 | EXPORT_SYMBOL_GPL(elv_rqhash_add); |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 308 | |
Jens Axboe | 70b3ea0 | 2016-12-07 08:43:31 -0700 | [diff] [blame] | 309 | void elv_rqhash_reposition(struct request_queue *q, struct request *rq) |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 310 | { |
| 311 | __elv_rqhash_del(rq); |
| 312 | elv_rqhash_add(q, rq); |
| 313 | } |
| 314 | |
Jens Axboe | 70b3ea0 | 2016-12-07 08:43:31 -0700 | [diff] [blame] | 315 | struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 316 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 317 | struct elevator_queue *e = q->elevator; |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 318 | struct hlist_node *next; |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 319 | struct request *rq; |
| 320 | |
Linus Torvalds | ee89f81 | 2013-02-28 12:52:24 -0800 | [diff] [blame] | 321 | hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 322 | BUG_ON(!ELV_ON_HASH(rq)); |
| 323 | |
| 324 | if (unlikely(!rq_mergeable(rq))) { |
| 325 | __elv_rqhash_del(rq); |
| 326 | continue; |
| 327 | } |
| 328 | |
| 329 | if (rq_hash_key(rq) == offset) |
| 330 | return rq; |
| 331 | } |
| 332 | |
| 333 | return NULL; |
| 334 | } |
| 335 | |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 336 | /* |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 337 | * RB-tree support functions for inserting/lookup/removal of requests |
| 338 | * in a sorted RB tree. |
| 339 | */ |
Jeff Moyer | 796d511 | 2011-06-02 21:19:05 +0200 | [diff] [blame] | 340 | void elv_rb_add(struct rb_root *root, struct request *rq) |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 341 | { |
| 342 | struct rb_node **p = &root->rb_node; |
| 343 | struct rb_node *parent = NULL; |
| 344 | struct request *__rq; |
| 345 | |
| 346 | while (*p) { |
| 347 | parent = *p; |
| 348 | __rq = rb_entry(parent, struct request, rb_node); |
| 349 | |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 350 | if (blk_rq_pos(rq) < blk_rq_pos(__rq)) |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 351 | p = &(*p)->rb_left; |
Jeff Moyer | 796d511 | 2011-06-02 21:19:05 +0200 | [diff] [blame] | 352 | else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 353 | p = &(*p)->rb_right; |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 354 | } |
| 355 | |
| 356 | rb_link_node(&rq->rb_node, parent, p); |
| 357 | rb_insert_color(&rq->rb_node, root); |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 358 | } |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 359 | EXPORT_SYMBOL(elv_rb_add); |
| 360 | |
| 361 | void elv_rb_del(struct rb_root *root, struct request *rq) |
| 362 | { |
| 363 | BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); |
| 364 | rb_erase(&rq->rb_node, root); |
| 365 | RB_CLEAR_NODE(&rq->rb_node); |
| 366 | } |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 367 | EXPORT_SYMBOL(elv_rb_del); |
| 368 | |
| 369 | struct request *elv_rb_find(struct rb_root *root, sector_t sector) |
| 370 | { |
| 371 | struct rb_node *n = root->rb_node; |
| 372 | struct request *rq; |
| 373 | |
| 374 | while (n) { |
| 375 | rq = rb_entry(n, struct request, rb_node); |
| 376 | |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 377 | if (sector < blk_rq_pos(rq)) |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 378 | n = n->rb_left; |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 379 | else if (sector > blk_rq_pos(rq)) |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 380 | n = n->rb_right; |
| 381 | else |
| 382 | return rq; |
| 383 | } |
| 384 | |
| 385 | return NULL; |
| 386 | } |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 387 | EXPORT_SYMBOL(elv_rb_find); |
| 388 | |
| 389 | /* |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 390 | * Insert rq into dispatch queue of q. Queue lock must be held on |
Uwe Kleine-König | dbe7f76 | 2007-10-20 01:55:04 +0200 | [diff] [blame] | 391 | * entry. rq is sort instead into the dispatch queue. To be used by |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 392 | * specific elevators. |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 393 | */ |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 394 | void elv_dispatch_sort(struct request_queue *q, struct request *rq) |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 395 | { |
| 396 | sector_t boundary; |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 397 | struct list_head *entry; |
| 398 | |
Tejun Heo | 06b8624 | 2005-10-20 16:46:23 +0200 | [diff] [blame] | 399 | if (q->last_merge == rq) |
| 400 | q->last_merge = NULL; |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 401 | |
| 402 | elv_rqhash_del(q, rq); |
| 403 | |
Tejun Heo | 15853af | 2005-11-10 08:52:05 +0100 | [diff] [blame] | 404 | q->nr_sorted--; |
Tejun Heo | 06b8624 | 2005-10-20 16:46:23 +0200 | [diff] [blame] | 405 | |
Jens Axboe | 1b47f53 | 2005-10-20 16:37:00 +0200 | [diff] [blame] | 406 | boundary = q->end_sector; |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 407 | list_for_each_prev(entry, &q->queue_head) { |
| 408 | struct request *pos = list_entry_rq(entry); |
| 409 | |
Adrian Hunter | 7afafc8 | 2016-08-16 10:59:35 +0300 | [diff] [blame] | 410 | if (req_op(rq) != req_op(pos)) |
David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 411 | break; |
Jens Axboe | 783660b | 2007-01-19 11:27:47 +1100 | [diff] [blame] | 412 | if (rq_data_dir(rq) != rq_data_dir(pos)) |
| 413 | break; |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 414 | if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER)) |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 415 | break; |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 416 | if (blk_rq_pos(rq) >= boundary) { |
| 417 | if (blk_rq_pos(pos) < boundary) |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 418 | continue; |
| 419 | } else { |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 420 | if (blk_rq_pos(pos) >= boundary) |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 421 | break; |
| 422 | } |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 423 | if (blk_rq_pos(rq) >= blk_rq_pos(pos)) |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 424 | break; |
| 425 | } |
| 426 | |
| 427 | list_add(&rq->queuelist, entry); |
| 428 | } |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 429 | EXPORT_SYMBOL(elv_dispatch_sort); |
| 430 | |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 431 | /* |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 432 | * Insert rq into dispatch queue of q. Queue lock must be held on |
| 433 | * entry. rq is added to the back of the dispatch queue. To be used by |
| 434 | * specific elevators. |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 435 | */ |
| 436 | void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) |
| 437 | { |
| 438 | if (q->last_merge == rq) |
| 439 | q->last_merge = NULL; |
| 440 | |
| 441 | elv_rqhash_del(q, rq); |
| 442 | |
| 443 | q->nr_sorted--; |
| 444 | |
| 445 | q->end_sector = rq_end_sector(rq); |
| 446 | q->boundary_rq = rq; |
| 447 | list_add_tail(&rq->queuelist, &q->queue_head); |
| 448 | } |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 449 | EXPORT_SYMBOL(elv_dispatch_add_tail); |
| 450 | |
Christoph Hellwig | 34fe7c0 | 2017-02-08 14:46:48 +0100 | [diff] [blame] | 451 | enum elv_merge elv_merge(struct request_queue *q, struct request **req, |
| 452 | struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 454 | struct elevator_queue *e = q->elevator; |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 455 | struct request *__rq; |
Tejun Heo | 06b8624 | 2005-10-20 16:46:23 +0200 | [diff] [blame] | 456 | |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 457 | /* |
Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 458 | * Levels of merges: |
| 459 | * nomerges: No merges at all attempted |
| 460 | * noxmerges: Only simple one-hit cache try |
| 461 | * merges: All merge tries attempted |
| 462 | */ |
Ming Lei | 7460d38 | 2015-10-20 23:13:55 +0800 | [diff] [blame] | 463 | if (blk_queue_nomerges(q) || !bio_mergeable(bio)) |
Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 464 | return ELEVATOR_NO_MERGE; |
| 465 | |
| 466 | /* |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 467 | * First try one-hit cache. |
| 468 | */ |
Tahsin Erdogan | 72ef799 | 2016-07-07 11:48:22 -0700 | [diff] [blame] | 469 | if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { |
Christoph Hellwig | 34fe7c0 | 2017-02-08 14:46:48 +0100 | [diff] [blame] | 470 | enum elv_merge ret = blk_try_merge(q->last_merge, bio); |
| 471 | |
Tejun Heo | 06b8624 | 2005-10-20 16:46:23 +0200 | [diff] [blame] | 472 | if (ret != ELEVATOR_NO_MERGE) { |
| 473 | *req = q->last_merge; |
| 474 | return ret; |
| 475 | } |
| 476 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | |
Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 478 | if (blk_queue_noxmerges(q)) |
Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 479 | return ELEVATOR_NO_MERGE; |
| 480 | |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 481 | /* |
| 482 | * See if our hash lookup can find a potential backmerge. |
| 483 | */ |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 484 | __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); |
Tahsin Erdogan | 72ef799 | 2016-07-07 11:48:22 -0700 | [diff] [blame] | 485 | if (__rq && elv_bio_merge_ok(__rq, bio)) { |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 486 | *req = __rq; |
| 487 | return ELEVATOR_BACK_MERGE; |
| 488 | } |
| 489 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 490 | if (e->uses_mq && e->type->ops.mq.request_merge) |
| 491 | return e->type->ops.mq.request_merge(q, req, bio); |
| 492 | else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn) |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 493 | return e->type->ops.sq.elevator_merge_fn(q, req, bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | |
| 495 | return ELEVATOR_NO_MERGE; |
| 496 | } |
| 497 | |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 498 | /* |
| 499 | * Attempt to do an insertion back merge. Only check for the case where |
| 500 | * we can append 'rq' to an existing request, so we can throw 'rq' away |
| 501 | * afterwards. |
| 502 | * |
| 503 | * Returns true if we merged, false otherwise |
| 504 | */ |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 505 | bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 506 | { |
| 507 | struct request *__rq; |
Shaohua Li | bee0393 | 2012-11-09 08:44:27 +0100 | [diff] [blame] | 508 | bool ret; |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 509 | |
| 510 | if (blk_queue_nomerges(q)) |
| 511 | return false; |
| 512 | |
| 513 | /* |
| 514 | * First try one-hit cache. |
| 515 | */ |
| 516 | if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) |
| 517 | return true; |
| 518 | |
| 519 | if (blk_queue_noxmerges(q)) |
| 520 | return false; |
| 521 | |
Shaohua Li | bee0393 | 2012-11-09 08:44:27 +0100 | [diff] [blame] | 522 | ret = false; |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 523 | /* |
| 524 | * See if our hash lookup can find a potential backmerge. |
| 525 | */ |
Shaohua Li | bee0393 | 2012-11-09 08:44:27 +0100 | [diff] [blame] | 526 | while (1) { |
| 527 | __rq = elv_rqhash_find(q, blk_rq_pos(rq)); |
| 528 | if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) |
| 529 | break; |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 530 | |
Shaohua Li | bee0393 | 2012-11-09 08:44:27 +0100 | [diff] [blame] | 531 | /* The merged request could be merged with others, try again */ |
| 532 | ret = true; |
| 533 | rq = __rq; |
| 534 | } |
| 535 | |
| 536 | return ret; |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 537 | } |
| 538 | |
Christoph Hellwig | 34fe7c0 | 2017-02-08 14:46:48 +0100 | [diff] [blame] | 539 | void elv_merged_request(struct request_queue *q, struct request *rq, |
| 540 | enum elv_merge type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 542 | struct elevator_queue *e = q->elevator; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 544 | if (e->uses_mq && e->type->ops.mq.request_merged) |
| 545 | e->type->ops.mq.request_merged(q, rq, type); |
| 546 | else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn) |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 547 | e->type->ops.sq.elevator_merged_fn(q, rq, type); |
Tejun Heo | 06b8624 | 2005-10-20 16:46:23 +0200 | [diff] [blame] | 548 | |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 549 | if (type == ELEVATOR_BACK_MERGE) |
| 550 | elv_rqhash_reposition(q, rq); |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 551 | |
Tejun Heo | 06b8624 | 2005-10-20 16:46:23 +0200 | [diff] [blame] | 552 | q->last_merge = rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | } |
| 554 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 555 | void elv_merge_requests(struct request_queue *q, struct request *rq, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 | struct request *next) |
| 557 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 558 | struct elevator_queue *e = q->elevator; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 559 | bool next_sorted = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 561 | if (e->uses_mq && e->type->ops.mq.requests_merged) |
| 562 | e->type->ops.mq.requests_merged(q, rq, next); |
| 563 | else if (e->type->ops.sq.elevator_merge_req_fn) { |
Bart Van Assche | a1ae0f7 | 2017-02-01 12:22:23 -0700 | [diff] [blame] | 564 | next_sorted = (__force bool)(next->rq_flags & RQF_SORTED); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 565 | if (next_sorted) |
| 566 | e->type->ops.sq.elevator_merge_req_fn(q, rq, next); |
| 567 | } |
Tejun Heo | 06b8624 | 2005-10-20 16:46:23 +0200 | [diff] [blame] | 568 | |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 569 | elv_rqhash_reposition(q, rq); |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 570 | |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 571 | if (next_sorted) { |
| 572 | elv_rqhash_del(q, next); |
| 573 | q->nr_sorted--; |
| 574 | } |
| 575 | |
Tejun Heo | 06b8624 | 2005-10-20 16:46:23 +0200 | [diff] [blame] | 576 | q->last_merge = rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | } |
| 578 | |
Divyesh Shah | 812d402 | 2010-04-08 21:14:23 -0700 | [diff] [blame] | 579 | void elv_bio_merged(struct request_queue *q, struct request *rq, |
| 580 | struct bio *bio) |
| 581 | { |
| 582 | struct elevator_queue *e = q->elevator; |
| 583 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 584 | if (WARN_ON_ONCE(e->uses_mq)) |
| 585 | return; |
| 586 | |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 587 | if (e->type->ops.sq.elevator_bio_merged_fn) |
| 588 | e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio); |
Divyesh Shah | 812d402 | 2010-04-08 21:14:23 -0700 | [diff] [blame] | 589 | } |
| 590 | |
Rafael J. Wysocki | 47fafbc | 2014-12-04 01:00:23 +0100 | [diff] [blame] | 591 | #ifdef CONFIG_PM |
Lin Ming | c815881 | 2013-03-23 11:42:27 +0800 | [diff] [blame] | 592 | static void blk_pm_requeue_request(struct request *rq) |
| 593 | { |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 594 | if (rq->q->dev && !(rq->rq_flags & RQF_PM)) |
Lin Ming | c815881 | 2013-03-23 11:42:27 +0800 | [diff] [blame] | 595 | rq->q->nr_pending--; |
| 596 | } |
| 597 | |
| 598 | static void blk_pm_add_request(struct request_queue *q, struct request *rq) |
| 599 | { |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 600 | if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 && |
Lin Ming | c815881 | 2013-03-23 11:42:27 +0800 | [diff] [blame] | 601 | (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING)) |
| 602 | pm_request_resume(q->dev); |
| 603 | } |
| 604 | #else |
| 605 | static inline void blk_pm_requeue_request(struct request *rq) {} |
| 606 | static inline void blk_pm_add_request(struct request_queue *q, |
| 607 | struct request *rq) |
| 608 | { |
| 609 | } |
| 610 | #endif |
| 611 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 612 | void elv_requeue_request(struct request_queue *q, struct request *rq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | /* |
| 615 | * it already went through dequeue, we need to decrement the |
| 616 | * in_flight count again |
| 617 | */ |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 618 | if (blk_account_rq(rq)) { |
Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 619 | q->in_flight[rq_is_sync(rq)]--; |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 620 | if (rq->rq_flags & RQF_SORTED) |
Jens Axboe | cad9751 | 2007-01-14 22:26:09 +1100 | [diff] [blame] | 621 | elv_deactivate_rq(q, rq); |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 622 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 624 | rq->rq_flags &= ~RQF_STARTED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | |
Lin Ming | c815881 | 2013-03-23 11:42:27 +0800 | [diff] [blame] | 626 | blk_pm_requeue_request(rq); |
| 627 | |
Jens Axboe | b710a48 | 2011-03-30 09:52:30 +0200 | [diff] [blame] | 628 | __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | } |
| 630 | |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 631 | void elv_drain_elevator(struct request_queue *q) |
Tejun Heo | 15853af | 2005-11-10 08:52:05 +0100 | [diff] [blame] | 632 | { |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 633 | struct elevator_queue *e = q->elevator; |
Tejun Heo | 15853af | 2005-11-10 08:52:05 +0100 | [diff] [blame] | 634 | static int printed; |
Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 635 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 636 | if (WARN_ON_ONCE(e->uses_mq)) |
| 637 | return; |
| 638 | |
Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 639 | lockdep_assert_held(q->queue_lock); |
| 640 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 641 | while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) |
Tejun Heo | 15853af | 2005-11-10 08:52:05 +0100 | [diff] [blame] | 642 | ; |
Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 643 | if (q->nr_sorted && printed++ < 10) { |
Tejun Heo | 15853af | 2005-11-10 08:52:05 +0100 | [diff] [blame] | 644 | printk(KERN_ERR "%s: forced dispatching is broken " |
| 645 | "(nr_sorted=%u), please report this\n", |
Tejun Heo | 22f746e | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 646 | q->elevator->type->elevator_name, q->nr_sorted); |
Tejun Heo | 15853af | 2005-11-10 08:52:05 +0100 | [diff] [blame] | 647 | } |
| 648 | } |
| 649 | |
Jens Axboe | b710a48 | 2011-03-30 09:52:30 +0200 | [diff] [blame] | 650 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | { |
Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 652 | trace_block_rq_insert(q, rq); |
Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 653 | |
Lin Ming | c815881 | 2013-03-23 11:42:27 +0800 | [diff] [blame] | 654 | blk_pm_add_request(q, rq); |
| 655 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | rq->q = q; |
| 657 | |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 658 | if (rq->rq_flags & RQF_SOFTBARRIER) { |
Jens Axboe | b710a48 | 2011-03-30 09:52:30 +0200 | [diff] [blame] | 659 | /* barriers are scheduling boundary, update end_sector */ |
Christoph Hellwig | 57292b5 | 2017-01-31 16:57:29 +0100 | [diff] [blame] | 660 | if (!blk_rq_is_passthrough(rq)) { |
Jens Axboe | b710a48 | 2011-03-30 09:52:30 +0200 | [diff] [blame] | 661 | q->end_sector = rq_end_sector(rq); |
| 662 | q->boundary_rq = rq; |
| 663 | } |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 664 | } else if (!(rq->rq_flags & RQF_ELVPRIV) && |
Jens Axboe | 3aa7287 | 2011-04-21 19:28:35 +0200 | [diff] [blame] | 665 | (where == ELEVATOR_INSERT_SORT || |
| 666 | where == ELEVATOR_INSERT_SORT_MERGE)) |
Jens Axboe | b710a48 | 2011-03-30 09:52:30 +0200 | [diff] [blame] | 667 | where = ELEVATOR_INSERT_BACK; |
| 668 | |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 669 | switch (where) { |
Tejun Heo | 28e7d18 | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 670 | case ELEVATOR_INSERT_REQUEUE: |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 671 | case ELEVATOR_INSERT_FRONT: |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 672 | rq->rq_flags |= RQF_SOFTBARRIER; |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 673 | list_add(&rq->queuelist, &q->queue_head); |
| 674 | break; |
| 675 | |
| 676 | case ELEVATOR_INSERT_BACK: |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 677 | rq->rq_flags |= RQF_SOFTBARRIER; |
Tejun Heo | 15853af | 2005-11-10 08:52:05 +0100 | [diff] [blame] | 678 | elv_drain_elevator(q); |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 679 | list_add_tail(&rq->queuelist, &q->queue_head); |
| 680 | /* |
| 681 | * We kick the queue here for the following reasons. |
| 682 | * - The elevator might have returned NULL previously |
| 683 | * to delay requests and returned them now. As the |
| 684 | * queue wasn't empty before this request, ll_rw_blk |
| 685 | * won't run the queue on return, resulting in hang. |
| 686 | * - Usually, back inserted requests won't be merged |
| 687 | * with anything. There's no point in delaying queue |
| 688 | * processing. |
| 689 | */ |
Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 690 | __blk_run_queue(q); |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 691 | break; |
| 692 | |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 693 | case ELEVATOR_INSERT_SORT_MERGE: |
| 694 | /* |
| 695 | * If we succeed in merging this request with one in the |
| 696 | * queue already, we are done - rq has now been freed, |
| 697 | * so no need to do anything further. |
| 698 | */ |
| 699 | if (elv_attempt_insert_merge(q, rq)) |
| 700 | break; |
Bart Van Assche | e29387e | 2017-06-21 09:40:11 -0700 | [diff] [blame] | 701 | /* fall through */ |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 702 | case ELEVATOR_INSERT_SORT: |
Christoph Hellwig | 57292b5 | 2017-01-31 16:57:29 +0100 | [diff] [blame] | 703 | BUG_ON(blk_rq_is_passthrough(rq)); |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 704 | rq->rq_flags |= RQF_SORTED; |
Tejun Heo | 15853af | 2005-11-10 08:52:05 +0100 | [diff] [blame] | 705 | q->nr_sorted++; |
Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 706 | if (rq_mergeable(rq)) { |
| 707 | elv_rqhash_add(q, rq); |
| 708 | if (!q->last_merge) |
| 709 | q->last_merge = rq; |
| 710 | } |
| 711 | |
Tejun Heo | ca23509 | 2005-11-01 17:23:49 +0900 | [diff] [blame] | 712 | /* |
| 713 | * Some ioscheds (cfq) run q->request_fn directly, so |
| 714 | * rq cannot be accessed after calling |
| 715 | * elevator_add_req_fn. |
| 716 | */ |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 717 | q->elevator->type->ops.sq.elevator_add_req_fn(q, rq); |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 718 | break; |
| 719 | |
Tejun Heo | ae1b153 | 2011-01-25 12:43:54 +0100 | [diff] [blame] | 720 | case ELEVATOR_INSERT_FLUSH: |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 721 | rq->rq_flags |= RQF_SOFTBARRIER; |
Tejun Heo | ae1b153 | 2011-01-25 12:43:54 +0100 | [diff] [blame] | 722 | blk_insert_flush(rq); |
| 723 | break; |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 724 | default: |
| 725 | printk(KERN_ERR "%s: bad insertion point %d\n", |
Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 726 | __func__, where); |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 727 | BUG(); |
| 728 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 729 | } |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 730 | EXPORT_SYMBOL(__elv_add_request); |
| 731 | |
Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 732 | void elv_add_request(struct request_queue *q, struct request *rq, int where) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | { |
| 734 | unsigned long flags; |
| 735 | |
| 736 | spin_lock_irqsave(q->queue_lock, flags); |
Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 737 | __elv_add_request(q, rq, where); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 739 | } |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 740 | EXPORT_SYMBOL(elv_add_request); |
| 741 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 742 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 744 | struct elevator_queue *e = q->elevator; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 746 | if (e->uses_mq && e->type->ops.mq.next_request) |
| 747 | return e->type->ops.mq.next_request(q, rq); |
| 748 | else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn) |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 749 | return e->type->ops.sq.elevator_latter_req_fn(q, rq); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 750 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 751 | return NULL; |
| 752 | } |
| 753 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 754 | struct request *elv_former_request(struct request_queue *q, struct request *rq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 756 | struct elevator_queue *e = q->elevator; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 758 | if (e->uses_mq && e->type->ops.mq.former_request) |
| 759 | return e->type->ops.mq.former_request(q, rq); |
| 760 | if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn) |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 761 | return e->type->ops.sq.elevator_former_req_fn(q, rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | return NULL; |
| 763 | } |
| 764 | |
Tejun Heo | 852c788 | 2012-03-05 13:15:27 -0800 | [diff] [blame] | 765 | int elv_set_request(struct request_queue *q, struct request *rq, |
| 766 | struct bio *bio, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 768 | struct elevator_queue *e = q->elevator; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 770 | if (WARN_ON_ONCE(e->uses_mq)) |
| 771 | return 0; |
| 772 | |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 773 | if (e->type->ops.sq.elevator_set_req_fn) |
| 774 | return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | return 0; |
| 776 | } |
| 777 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 778 | void elv_put_request(struct request_queue *q, struct request *rq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 780 | struct elevator_queue *e = q->elevator; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 782 | if (WARN_ON_ONCE(e->uses_mq)) |
| 783 | return; |
| 784 | |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 785 | if (e->type->ops.sq.elevator_put_req_fn) |
| 786 | e->type->ops.sq.elevator_put_req_fn(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 787 | } |
| 788 | |
Christoph Hellwig | ef295ec | 2016-10-28 08:48:16 -0600 | [diff] [blame] | 789 | int elv_may_queue(struct request_queue *q, unsigned int op) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 791 | struct elevator_queue *e = q->elevator; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 793 | if (WARN_ON_ONCE(e->uses_mq)) |
| 794 | return 0; |
| 795 | |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 796 | if (e->type->ops.sq.elevator_may_queue_fn) |
| 797 | return e->type->ops.sq.elevator_may_queue_fn(q, op); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | |
| 799 | return ELV_MQUEUE_MAY; |
| 800 | } |
| 801 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 802 | void elv_completed_request(struct request_queue *q, struct request *rq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 804 | struct elevator_queue *e = q->elevator; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 806 | if (WARN_ON_ONCE(e->uses_mq)) |
| 807 | return; |
| 808 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | /* |
| 810 | * request is released from the driver, io must be done |
| 811 | */ |
Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 812 | if (blk_account_rq(rq)) { |
Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 813 | q->in_flight[rq_is_sync(rq)]--; |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 814 | if ((rq->rq_flags & RQF_SORTED) && |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 815 | e->type->ops.sq.elevator_completed_req_fn) |
| 816 | e->type->ops.sq.elevator_completed_req_fn(q, rq); |
Tejun Heo | 1bc691d | 2006-01-12 15:39:26 +0100 | [diff] [blame] | 817 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 818 | } |
| 819 | |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 820 | #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) |
| 821 | |
| 822 | static ssize_t |
| 823 | elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
| 824 | { |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 825 | struct elv_fs_entry *entry = to_elv(attr); |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 826 | struct elevator_queue *e; |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 827 | ssize_t error; |
| 828 | |
| 829 | if (!entry->show) |
| 830 | return -EIO; |
| 831 | |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 832 | e = container_of(kobj, struct elevator_queue, kobj); |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 833 | mutex_lock(&e->sysfs_lock); |
Tejun Heo | 22f746e | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 834 | error = e->type ? entry->show(e, page) : -ENOENT; |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 835 | mutex_unlock(&e->sysfs_lock); |
| 836 | return error; |
| 837 | } |
| 838 | |
| 839 | static ssize_t |
| 840 | elv_attr_store(struct kobject *kobj, struct attribute *attr, |
| 841 | const char *page, size_t length) |
| 842 | { |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 843 | struct elv_fs_entry *entry = to_elv(attr); |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 844 | struct elevator_queue *e; |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 845 | ssize_t error; |
| 846 | |
| 847 | if (!entry->store) |
| 848 | return -EIO; |
| 849 | |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 850 | e = container_of(kobj, struct elevator_queue, kobj); |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 851 | mutex_lock(&e->sysfs_lock); |
Tejun Heo | 22f746e | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 852 | error = e->type ? entry->store(e, page, length) : -ENOENT; |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 853 | mutex_unlock(&e->sysfs_lock); |
| 854 | return error; |
| 855 | } |
| 856 | |
Emese Revfy | 52cf25d | 2010-01-19 02:58:23 +0100 | [diff] [blame] | 857 | static const struct sysfs_ops elv_sysfs_ops = { |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 858 | .show = elv_attr_show, |
| 859 | .store = elv_attr_store, |
| 860 | }; |
| 861 | |
| 862 | static struct kobj_type elv_ktype = { |
| 863 | .sysfs_ops = &elv_sysfs_ops, |
| 864 | .release = elevator_release, |
| 865 | }; |
| 866 | |
Tejun Heo | 5a5bafd | 2012-03-05 13:14:56 -0800 | [diff] [blame] | 867 | int elv_register_queue(struct request_queue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | { |
Tejun Heo | 5a5bafd | 2012-03-05 13:14:56 -0800 | [diff] [blame] | 869 | struct elevator_queue *e = q->elevator; |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 870 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 871 | |
Greg Kroah-Hartman | b2d6db5 | 2007-12-17 23:05:35 -0700 | [diff] [blame] | 872 | error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 873 | if (!error) { |
Tejun Heo | 22f746e | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 874 | struct elv_fs_entry *attr = e->type->elevator_attrs; |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 875 | if (attr) { |
Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 876 | while (attr->attr.name) { |
| 877 | if (sysfs_create_file(&e->kobj, &attr->attr)) |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 878 | break; |
Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 879 | attr++; |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 880 | } |
| 881 | } |
| 882 | kobject_uevent(&e->kobj, KOBJ_ADD); |
Jens Axboe | 430c62f | 2010-10-07 09:35:16 +0200 | [diff] [blame] | 883 | e->registered = 1; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 884 | if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn) |
Jens Axboe | c51ca6c | 2016-12-10 15:13:59 -0700 | [diff] [blame] | 885 | e->type->ops.sq.elevator_registered_fn(q); |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 886 | } |
| 887 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 888 | } |
Tejun Heo | f8fc877 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 889 | EXPORT_SYMBOL(elv_register_queue); |
Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 890 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | void elv_unregister_queue(struct request_queue *q) |
| 892 | { |
Tejun Heo | f8fc877 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 893 | if (q) { |
| 894 | struct elevator_queue *e = q->elevator; |
| 895 | |
| 896 | kobject_uevent(&e->kobj, KOBJ_REMOVE); |
| 897 | kobject_del(&e->kobj); |
| 898 | e->registered = 0; |
Jan Kara | 8330cdb | 2017-04-19 11:33:27 +0200 | [diff] [blame] | 899 | /* Re-enable throttling in case elevator disabled it */ |
| 900 | wbt_enable_default(q); |
Tejun Heo | f8fc877 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 901 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 902 | } |
Mike Snitzer | 01effb0 | 2010-05-11 08:57:42 +0200 | [diff] [blame] | 903 | EXPORT_SYMBOL(elv_unregister_queue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 | |
Jens Axboe | e567bf7 | 2014-06-22 16:32:48 -0600 | [diff] [blame] | 905 | int elv_register(struct elevator_type *e) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 | { |
Thibaut VARENE | 1ffb96c | 2007-03-15 12:59:19 +0100 | [diff] [blame] | 907 | char *def = ""; |
Jens Axboe | 2a12dcd | 2007-04-26 14:41:53 +0200 | [diff] [blame] | 908 | |
Tejun Heo | 3d3c237 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 909 | /* create icq_cache if requested */ |
| 910 | if (e->icq_size) { |
| 911 | if (WARN_ON(e->icq_size < sizeof(struct io_cq)) || |
| 912 | WARN_ON(e->icq_align < __alignof__(struct io_cq))) |
| 913 | return -EINVAL; |
| 914 | |
| 915 | snprintf(e->icq_cache_name, sizeof(e->icq_cache_name), |
| 916 | "%s_io_cq", e->elevator_name); |
| 917 | e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size, |
| 918 | e->icq_align, 0, NULL); |
| 919 | if (!e->icq_cache) |
| 920 | return -ENOMEM; |
| 921 | } |
| 922 | |
| 923 | /* register, don't allow duplicate names */ |
Jens Axboe | 2a12dcd | 2007-04-26 14:41:53 +0200 | [diff] [blame] | 924 | spin_lock(&elv_list_lock); |
Jens Axboe | 2527d99 | 2017-10-25 12:33:42 -0600 | [diff] [blame] | 925 | if (elevator_find(e->elevator_name, e->uses_mq)) { |
Tejun Heo | 3d3c237 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 926 | spin_unlock(&elv_list_lock); |
| 927 | if (e->icq_cache) |
| 928 | kmem_cache_destroy(e->icq_cache); |
| 929 | return -EBUSY; |
| 930 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | list_add_tail(&e->list, &elv_list); |
Jens Axboe | 2a12dcd | 2007-04-26 14:41:53 +0200 | [diff] [blame] | 932 | spin_unlock(&elv_list_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 | |
Tejun Heo | 3d3c237 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 934 | /* print pretty message */ |
Jens Axboe | 8ac0d9a | 2017-10-25 12:35:02 -0600 | [diff] [blame] | 935 | if (elevator_match(e, chosen_elevator) || |
Nate Diller | 5f00397 | 2006-01-24 10:07:58 +0100 | [diff] [blame] | 936 | (!*chosen_elevator && |
Jens Axboe | 8ac0d9a | 2017-10-25 12:35:02 -0600 | [diff] [blame] | 937 | elevator_match(e, CONFIG_DEFAULT_IOSCHED))) |
Thibaut VARENE | 1ffb96c | 2007-03-15 12:59:19 +0100 | [diff] [blame] | 938 | def = " (default)"; |
| 939 | |
Jens Axboe | 4eb166d | 2008-02-01 00:37:27 +0100 | [diff] [blame] | 940 | printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, |
| 941 | def); |
Tejun Heo | 3d3c237 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 942 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | } |
| 944 | EXPORT_SYMBOL_GPL(elv_register); |
| 945 | |
| 946 | void elv_unregister(struct elevator_type *e) |
| 947 | { |
Tejun Heo | 3d3c237 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 948 | /* unregister */ |
Jens Axboe | 2a12dcd | 2007-04-26 14:41:53 +0200 | [diff] [blame] | 949 | spin_lock(&elv_list_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 950 | list_del_init(&e->list); |
Jens Axboe | 2a12dcd | 2007-04-26 14:41:53 +0200 | [diff] [blame] | 951 | spin_unlock(&elv_list_lock); |
Tejun Heo | 3d3c237 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 952 | |
| 953 | /* |
| 954 | * Destroy icq_cache if it exists. icq's are RCU managed. Make |
| 955 | * sure all RCU operations are complete before proceeding. |
| 956 | */ |
| 957 | if (e->icq_cache) { |
| 958 | rcu_barrier(); |
| 959 | kmem_cache_destroy(e->icq_cache); |
| 960 | e->icq_cache = NULL; |
| 961 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | } |
| 963 | EXPORT_SYMBOL_GPL(elv_unregister); |
| 964 | |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 965 | static int elevator_switch_mq(struct request_queue *q, |
| 966 | struct elevator_type *new_e) |
| 967 | { |
| 968 | int ret; |
| 969 | |
| 970 | blk_mq_freeze_queue(q); |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 971 | |
| 972 | if (q->elevator) { |
| 973 | if (q->elevator->registered) |
| 974 | elv_unregister_queue(q); |
| 975 | ioc_clear_queue(q); |
| 976 | elevator_exit(q, q->elevator); |
| 977 | } |
| 978 | |
| 979 | ret = blk_mq_init_sched(q, new_e); |
| 980 | if (ret) |
| 981 | goto out; |
| 982 | |
| 983 | if (new_e) { |
| 984 | ret = elv_register_queue(q); |
| 985 | if (ret) { |
| 986 | elevator_exit(q, q->elevator); |
| 987 | goto out; |
| 988 | } |
| 989 | } |
| 990 | |
| 991 | if (new_e) |
| 992 | blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); |
| 993 | else |
| 994 | blk_add_trace_msg(q, "elv switch: none"); |
| 995 | |
| 996 | out: |
| 997 | blk_mq_unfreeze_queue(q); |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 998 | return ret; |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 999 | } |
| 1000 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1001 | /* |
| 1002 | * switch to new_e io scheduler. be careful not to introduce deadlocks - |
| 1003 | * we don't free the old io scheduler, before we have allocated what we |
| 1004 | * need for the new one. this way we have a chance of going back to the old |
Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 1005 | * one, if the new one fails init for some reason. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | */ |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1007 | static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1008 | { |
Tejun Heo | 5a5bafd | 2012-03-05 13:14:56 -0800 | [diff] [blame] | 1009 | struct elevator_queue *old = q->elevator; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1010 | bool old_registered = false; |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 1011 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1012 | |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 1013 | if (q->mq_ops) |
| 1014 | return elevator_switch_mq(q, new_e); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1015 | |
Tejun Heo | 5a5bafd | 2012-03-05 13:14:56 -0800 | [diff] [blame] | 1016 | /* |
| 1017 | * Turn on BYPASS and drain all requests w/ elevator private data. |
| 1018 | * Block layer doesn't call into a quiesced elevator - all requests |
| 1019 | * are directly put on the dispatch list without elevator data |
| 1020 | * using INSERT_BACK. All requests have SOFTBARRIER set and no |
| 1021 | * merge happens either. |
| 1022 | */ |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1023 | if (old) { |
| 1024 | old_registered = old->registered; |
Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 1025 | |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 1026 | blk_queue_bypass_start(q); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1027 | |
| 1028 | /* unregister and clear all auxiliary data of the old elevator */ |
| 1029 | if (old_registered) |
| 1030 | elv_unregister_queue(q); |
| 1031 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1032 | ioc_clear_queue(q); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1033 | } |
Tejun Heo | 5a5bafd | 2012-03-05 13:14:56 -0800 | [diff] [blame] | 1034 | |
| 1035 | /* allocate, init and register new elevator */ |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 1036 | err = new_e->ops.sq.elevator_init_fn(q, new_e); |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 1037 | if (err) |
| 1038 | goto fail_init; |
Tejun Heo | 5a5bafd | 2012-03-05 13:14:56 -0800 | [diff] [blame] | 1039 | |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 1040 | err = elv_register_queue(q); |
| 1041 | if (err) |
| 1042 | goto fail_register; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | |
Tejun Heo | 5a5bafd | 2012-03-05 13:14:56 -0800 | [diff] [blame] | 1044 | /* done, kill the old one and finish */ |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1045 | if (old) { |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 1046 | elevator_exit(q, old); |
| 1047 | blk_queue_bypass_end(q); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1048 | } |
Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 1049 | |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 1050 | blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); |
Alan D. Brunelle | 4722dc5 | 2008-05-27 14:55:00 +0200 | [diff] [blame] | 1051 | |
Jens Axboe | 5dd531a | 2010-08-23 13:52:19 +0200 | [diff] [blame] | 1052 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1053 | |
| 1054 | fail_register: |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 1055 | elevator_exit(q, q->elevator); |
Tejun Heo | 5a5bafd | 2012-03-05 13:14:56 -0800 | [diff] [blame] | 1056 | fail_init: |
| 1057 | /* switch failed, restore and re-register old elevator */ |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1058 | if (old) { |
| 1059 | q->elevator = old; |
| 1060 | elv_register_queue(q); |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 1061 | blk_queue_bypass_end(q); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1062 | } |
Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 1063 | |
Jens Axboe | 5dd531a | 2010-08-23 13:52:19 +0200 | [diff] [blame] | 1064 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1065 | } |
| 1066 | |
Jens Axboe | 5dd531a | 2010-08-23 13:52:19 +0200 | [diff] [blame] | 1067 | /* |
| 1068 | * Switch this queue to the given IO scheduler. |
| 1069 | */ |
Tomoki Sekiyama | 7c8a367 | 2013-10-15 16:42:19 -0600 | [diff] [blame] | 1070 | static int __elevator_change(struct request_queue *q, const char *name) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 | { |
| 1072 | char elevator_name[ELV_NAME_MAX]; |
| 1073 | struct elevator_type *e; |
| 1074 | |
David Jeffery | e9a823f | 2017-08-28 10:52:44 -0600 | [diff] [blame] | 1075 | /* Make sure queue is not in the middle of being removed */ |
| 1076 | if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) |
| 1077 | return -ENOENT; |
| 1078 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1079 | /* |
| 1080 | * Special case for mq, turn off scheduling |
| 1081 | */ |
| 1082 | if (q->mq_ops && !strncmp(name, "none", 4)) |
| 1083 | return elevator_switch(q, NULL); |
Martin K. Petersen | cd43e26 | 2009-05-22 17:17:52 -0400 | [diff] [blame] | 1084 | |
Li Zefan | ee2e992 | 2008-10-14 08:49:56 +0200 | [diff] [blame] | 1085 | strlcpy(elevator_name, name, sizeof(elevator_name)); |
Jens Axboe | 2527d99 | 2017-10-25 12:33:42 -0600 | [diff] [blame] | 1086 | e = elevator_get(q, strstrip(elevator_name), true); |
Jens Axboe | 340ff32 | 2017-05-10 07:40:04 -0600 | [diff] [blame] | 1087 | if (!e) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1088 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1089 | |
Jens Axboe | 8ac0d9a | 2017-10-25 12:35:02 -0600 | [diff] [blame] | 1090 | if (q->elevator && elevator_match(q->elevator->type, elevator_name)) { |
Nate Diller | 2ca7d93 | 2005-10-30 15:02:24 -0800 | [diff] [blame] | 1091 | elevator_put(e); |
Jens Axboe | 5dd531a | 2010-08-23 13:52:19 +0200 | [diff] [blame] | 1092 | return 0; |
Nate Diller | 2ca7d93 | 2005-10-30 15:02:24 -0800 | [diff] [blame] | 1093 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1094 | |
Jens Axboe | 5dd531a | 2010-08-23 13:52:19 +0200 | [diff] [blame] | 1095 | return elevator_switch(q, e); |
| 1096 | } |
Tomoki Sekiyama | 7c8a367 | 2013-10-15 16:42:19 -0600 | [diff] [blame] | 1097 | |
Ming Lei | 3a5088c | 2017-04-15 20:38:22 +0800 | [diff] [blame] | 1098 | static inline bool elv_support_iosched(struct request_queue *q) |
| 1099 | { |
| 1100 | if (q->mq_ops && q->tag_set && (q->tag_set->flags & |
| 1101 | BLK_MQ_F_NO_SCHED)) |
| 1102 | return false; |
| 1103 | return true; |
| 1104 | } |
| 1105 | |
Jens Axboe | 5dd531a | 2010-08-23 13:52:19 +0200 | [diff] [blame] | 1106 | ssize_t elv_iosched_store(struct request_queue *q, const char *name, |
| 1107 | size_t count) |
| 1108 | { |
| 1109 | int ret; |
| 1110 | |
Ming Lei | 3a5088c | 2017-04-15 20:38:22 +0800 | [diff] [blame] | 1111 | if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q)) |
Jens Axboe | 5dd531a | 2010-08-23 13:52:19 +0200 | [diff] [blame] | 1112 | return count; |
| 1113 | |
Tomoki Sekiyama | 7c8a367 | 2013-10-15 16:42:19 -0600 | [diff] [blame] | 1114 | ret = __elevator_change(q, name); |
Jens Axboe | 5dd531a | 2010-08-23 13:52:19 +0200 | [diff] [blame] | 1115 | if (!ret) |
| 1116 | return count; |
| 1117 | |
Jens Axboe | 5dd531a | 2010-08-23 13:52:19 +0200 | [diff] [blame] | 1118 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1119 | } |
| 1120 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1121 | ssize_t elv_iosched_show(struct request_queue *q, char *name) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | { |
Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 1123 | struct elevator_queue *e = q->elevator; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1124 | struct elevator_type *elv = NULL; |
Matthias Kaehlcke | 70cee26 | 2007-07-10 12:26:24 +0200 | [diff] [blame] | 1125 | struct elevator_type *__e; |
Jens Axboe | 8ac0d9a | 2017-10-25 12:35:02 -0600 | [diff] [blame] | 1126 | bool uses_mq = q->mq_ops != NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 | int len = 0; |
| 1128 | |
Christoph Hellwig | 5fdee21 | 2017-10-05 21:22:52 +0200 | [diff] [blame] | 1129 | if (!queue_is_rq_based(q)) |
Martin K. Petersen | cd43e26 | 2009-05-22 17:17:52 -0400 | [diff] [blame] | 1130 | return sprintf(name, "none\n"); |
| 1131 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1132 | if (!q->elevator) |
| 1133 | len += sprintf(name+len, "[none] "); |
| 1134 | else |
| 1135 | elv = e->type; |
Martin K. Petersen | cd43e26 | 2009-05-22 17:17:52 -0400 | [diff] [blame] | 1136 | |
Jens Axboe | 2a12dcd | 2007-04-26 14:41:53 +0200 | [diff] [blame] | 1137 | spin_lock(&elv_list_lock); |
Matthias Kaehlcke | 70cee26 | 2007-07-10 12:26:24 +0200 | [diff] [blame] | 1138 | list_for_each_entry(__e, &elv_list, list) { |
Jens Axboe | 8ac0d9a | 2017-10-25 12:35:02 -0600 | [diff] [blame] | 1139 | if (elv && elevator_match(elv, __e->elevator_name) && |
| 1140 | (__e->uses_mq == uses_mq)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1141 | len += sprintf(name+len, "[%s] ", elv->elevator_name); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1142 | continue; |
| 1143 | } |
Ming Lei | 3a5088c | 2017-04-15 20:38:22 +0800 | [diff] [blame] | 1144 | if (__e->uses_mq && q->mq_ops && elv_support_iosched(q)) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1145 | len += sprintf(name+len, "%s ", __e->elevator_name); |
| 1146 | else if (!__e->uses_mq && !q->mq_ops) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1147 | len += sprintf(name+len, "%s ", __e->elevator_name); |
| 1148 | } |
Jens Axboe | 2a12dcd | 2007-04-26 14:41:53 +0200 | [diff] [blame] | 1149 | spin_unlock(&elv_list_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1150 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1151 | if (q->mq_ops && q->elevator) |
| 1152 | len += sprintf(name+len, "none"); |
| 1153 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1154 | len += sprintf(len+name, "\n"); |
| 1155 | return len; |
| 1156 | } |
| 1157 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1158 | struct request *elv_rb_former_request(struct request_queue *q, |
| 1159 | struct request *rq) |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 1160 | { |
| 1161 | struct rb_node *rbprev = rb_prev(&rq->rb_node); |
| 1162 | |
| 1163 | if (rbprev) |
| 1164 | return rb_entry_rq(rbprev); |
| 1165 | |
| 1166 | return NULL; |
| 1167 | } |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 1168 | EXPORT_SYMBOL(elv_rb_former_request); |
| 1169 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1170 | struct request *elv_rb_latter_request(struct request_queue *q, |
| 1171 | struct request *rq) |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 1172 | { |
| 1173 | struct rb_node *rbnext = rb_next(&rq->rb_node); |
| 1174 | |
| 1175 | if (rbnext) |
| 1176 | return rb_entry_rq(rbnext); |
| 1177 | |
| 1178 | return NULL; |
| 1179 | } |
Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 1180 | EXPORT_SYMBOL(elv_rb_latter_request); |