Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Functions related to tagged command queuing |
| 4 | */ |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/blkdev.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 9 | #include <linux/slab.h> |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 10 | |
Adrian Bunk | 278caf0 | 2008-03-04 11:23:44 +0100 | [diff] [blame] | 11 | #include "blk.h" |
| 12 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 13 | /** |
| 14 | * blk_queue_find_tag - find a request by its tag and queue |
| 15 | * @q: The request queue for the device |
| 16 | * @tag: The tag of the request |
| 17 | * |
| 18 | * Notes: |
| 19 | * Should be used when a device returns a tag and you want to match |
| 20 | * it with a request. |
| 21 | * |
| 22 | * no locks need be held. |
| 23 | **/ |
| 24 | struct request *blk_queue_find_tag(struct request_queue *q, int tag) |
| 25 | { |
| 26 | return blk_map_queue_find_tag(q->queue_tags, tag); |
| 27 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 28 | EXPORT_SYMBOL(blk_queue_find_tag); |
| 29 | |
| 30 | /** |
Christoph Hellwig | d45b327 | 2014-07-08 12:25:28 +0200 | [diff] [blame] | 31 | * blk_free_tags - release a given set of tag maintenance info |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 32 | * @bqt: the tag map to free |
| 33 | * |
Christoph Hellwig | d45b327 | 2014-07-08 12:25:28 +0200 | [diff] [blame] | 34 | * Drop the reference count on @bqt and frees it when the last reference |
| 35 | * is dropped. |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 36 | */ |
Christoph Hellwig | d45b327 | 2014-07-08 12:25:28 +0200 | [diff] [blame] | 37 | void blk_free_tags(struct blk_queue_tag *bqt) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 38 | { |
Christoph Hellwig | d45b327 | 2014-07-08 12:25:28 +0200 | [diff] [blame] | 39 | if (atomic_dec_and_test(&bqt->refcnt)) { |
Matthew Wilcox | 0e3eb45 | 2008-08-26 09:02:28 +0200 | [diff] [blame] | 40 | BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < |
| 41 | bqt->max_depth); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 42 | |
| 43 | kfree(bqt->tag_index); |
| 44 | bqt->tag_index = NULL; |
| 45 | |
| 46 | kfree(bqt->tag_map); |
| 47 | bqt->tag_map = NULL; |
| 48 | |
| 49 | kfree(bqt); |
| 50 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 51 | } |
Christoph Hellwig | d45b327 | 2014-07-08 12:25:28 +0200 | [diff] [blame] | 52 | EXPORT_SYMBOL(blk_free_tags); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 53 | |
| 54 | /** |
| 55 | * __blk_queue_free_tags - release tag maintenance info |
| 56 | * @q: the request queue for the device |
| 57 | * |
| 58 | * Notes: |
| 59 | * blk_cleanup_queue() will take care of calling this function, if tagging |
| 60 | * has been used. So there's no need to call this directly. |
| 61 | **/ |
| 62 | void __blk_queue_free_tags(struct request_queue *q) |
| 63 | { |
| 64 | struct blk_queue_tag *bqt = q->queue_tags; |
| 65 | |
| 66 | if (!bqt) |
| 67 | return; |
| 68 | |
Christoph Hellwig | d45b327 | 2014-07-08 12:25:28 +0200 | [diff] [blame] | 69 | blk_free_tags(bqt); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 70 | |
| 71 | q->queue_tags = NULL; |
Jens Axboe | aa94b53 | 2008-05-07 09:27:43 +0200 | [diff] [blame] | 72 | queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 73 | } |
| 74 | |
| 75 | /** |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 76 | * blk_queue_free_tags - release tag maintenance info |
| 77 | * @q: the request queue for the device |
| 78 | * |
| 79 | * Notes: |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 80 | * This is used to disable tagged queuing to a device, yet leave |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 81 | * queue in function. |
| 82 | **/ |
| 83 | void blk_queue_free_tags(struct request_queue *q) |
| 84 | { |
Jens Axboe | aa94b53 | 2008-05-07 09:27:43 +0200 | [diff] [blame] | 85 | queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 86 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 87 | EXPORT_SYMBOL(blk_queue_free_tags); |
| 88 | |
| 89 | static int |
| 90 | init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) |
| 91 | { |
| 92 | struct request **tag_index; |
| 93 | unsigned long *tag_map; |
| 94 | int nr_ulongs; |
| 95 | |
| 96 | if (q && depth > q->nr_requests * 2) { |
| 97 | depth = q->nr_requests * 2; |
| 98 | printk(KERN_ERR "%s: adjusted depth to %d\n", |
Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 99 | __func__, depth); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); |
| 103 | if (!tag_index) |
| 104 | goto fail; |
| 105 | |
| 106 | nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; |
| 107 | tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); |
| 108 | if (!tag_map) |
| 109 | goto fail; |
| 110 | |
| 111 | tags->real_max_depth = depth; |
| 112 | tags->max_depth = depth; |
| 113 | tags->tag_index = tag_index; |
| 114 | tags->tag_map = tag_map; |
| 115 | |
| 116 | return 0; |
| 117 | fail: |
| 118 | kfree(tag_index); |
| 119 | return -ENOMEM; |
| 120 | } |
| 121 | |
| 122 | static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 123 | int depth, int alloc_policy) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 124 | { |
| 125 | struct blk_queue_tag *tags; |
| 126 | |
| 127 | tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); |
| 128 | if (!tags) |
| 129 | goto fail; |
| 130 | |
| 131 | if (init_tag_map(q, tags, depth)) |
| 132 | goto fail; |
| 133 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 134 | atomic_set(&tags->refcnt, 1); |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 135 | tags->alloc_policy = alloc_policy; |
| 136 | tags->next_tag = 0; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 137 | return tags; |
| 138 | fail: |
| 139 | kfree(tags); |
| 140 | return NULL; |
| 141 | } |
| 142 | |
| 143 | /** |
| 144 | * blk_init_tags - initialize the tag info for an external tag map |
| 145 | * @depth: the maximum queue depth supported |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 146 | * @alloc_policy: tag allocation policy |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 147 | **/ |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 148 | struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 149 | { |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 150 | return __blk_queue_init_tags(NULL, depth, alloc_policy); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 151 | } |
| 152 | EXPORT_SYMBOL(blk_init_tags); |
| 153 | |
| 154 | /** |
| 155 | * blk_queue_init_tags - initialize the queue tag info |
| 156 | * @q: the request queue for the device |
| 157 | * @depth: the maximum queue depth supported |
| 158 | * @tags: the tag to use |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 159 | * @alloc_policy: tag allocation policy |
Jens Axboe | aa94b53 | 2008-05-07 09:27:43 +0200 | [diff] [blame] | 160 | * |
| 161 | * Queue lock must be held here if the function is called to resize an |
| 162 | * existing map. |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 163 | **/ |
| 164 | int blk_queue_init_tags(struct request_queue *q, int depth, |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 165 | struct blk_queue_tag *tags, int alloc_policy) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 166 | { |
| 167 | int rc; |
| 168 | |
| 169 | BUG_ON(tags && q->queue_tags && tags != q->queue_tags); |
| 170 | |
| 171 | if (!tags && !q->queue_tags) { |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 172 | tags = __blk_queue_init_tags(q, depth, alloc_policy); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 173 | |
| 174 | if (!tags) |
Peter Senna Tschudin | d41570b | 2012-09-12 17:06:40 +0200 | [diff] [blame] | 175 | return -ENOMEM; |
| 176 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 177 | } else if (q->queue_tags) { |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 178 | rc = blk_queue_resize_tags(q, depth); |
| 179 | if (rc) |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 180 | return rc; |
Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 181 | queue_flag_set(QUEUE_FLAG_QUEUED, q); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 182 | return 0; |
| 183 | } else |
| 184 | atomic_inc(&tags->refcnt); |
| 185 | |
| 186 | /* |
| 187 | * assign it, all done |
| 188 | */ |
| 189 | q->queue_tags = tags; |
Jens Axboe | aa94b53 | 2008-05-07 09:27:43 +0200 | [diff] [blame] | 190 | queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 191 | INIT_LIST_HEAD(&q->tag_busy_list); |
| 192 | return 0; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 193 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 194 | EXPORT_SYMBOL(blk_queue_init_tags); |
| 195 | |
| 196 | /** |
| 197 | * blk_queue_resize_tags - change the queueing depth |
| 198 | * @q: the request queue for the device |
| 199 | * @new_depth: the new max command queueing depth |
| 200 | * |
| 201 | * Notes: |
| 202 | * Must be called with the queue lock held. |
| 203 | **/ |
| 204 | int blk_queue_resize_tags(struct request_queue *q, int new_depth) |
| 205 | { |
| 206 | struct blk_queue_tag *bqt = q->queue_tags; |
| 207 | struct request **tag_index; |
| 208 | unsigned long *tag_map; |
| 209 | int max_depth, nr_ulongs; |
| 210 | |
| 211 | if (!bqt) |
| 212 | return -ENXIO; |
| 213 | |
| 214 | /* |
| 215 | * if we already have large enough real_max_depth. just |
| 216 | * adjust max_depth. *NOTE* as requests with tag value |
| 217 | * between new_depth and real_max_depth can be in-flight, tag |
| 218 | * map can not be shrunk blindly here. |
| 219 | */ |
| 220 | if (new_depth <= bqt->real_max_depth) { |
| 221 | bqt->max_depth = new_depth; |
| 222 | return 0; |
| 223 | } |
| 224 | |
| 225 | /* |
| 226 | * Currently cannot replace a shared tag map with a new |
| 227 | * one, so error out if this is the case |
| 228 | */ |
| 229 | if (atomic_read(&bqt->refcnt) != 1) |
| 230 | return -EBUSY; |
| 231 | |
| 232 | /* |
| 233 | * save the old state info, so we can copy it back |
| 234 | */ |
| 235 | tag_index = bqt->tag_index; |
| 236 | tag_map = bqt->tag_map; |
| 237 | max_depth = bqt->real_max_depth; |
| 238 | |
| 239 | if (init_tag_map(q, bqt, new_depth)) |
| 240 | return -ENOMEM; |
| 241 | |
| 242 | memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); |
| 243 | nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; |
| 244 | memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); |
| 245 | |
| 246 | kfree(tag_index); |
| 247 | kfree(tag_map); |
| 248 | return 0; |
| 249 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 250 | EXPORT_SYMBOL(blk_queue_resize_tags); |
| 251 | |
| 252 | /** |
| 253 | * blk_queue_end_tag - end tag operations for a request |
| 254 | * @q: the request queue for the device |
| 255 | * @rq: the request that has completed |
| 256 | * |
| 257 | * Description: |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 258 | * Typically called when end_that_request_first() returns %0, meaning |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 259 | * all transfers have been done for a request. It's important to call |
| 260 | * this function before end_that_request_last(), as that will put the |
| 261 | * request back on the free list thus corrupting the internal tag list. |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 262 | **/ |
| 263 | void blk_queue_end_tag(struct request_queue *q, struct request *rq) |
| 264 | { |
| 265 | struct blk_queue_tag *bqt = q->queue_tags; |
Dan Williams | f2b20d4 | 2011-12-29 09:16:28 +0100 | [diff] [blame] | 266 | unsigned tag = rq->tag; /* negative tags invalid */ |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 267 | |
Bart Van Assche | 2fff8a9 | 2017-06-20 11:15:45 -0700 | [diff] [blame] | 268 | lockdep_assert_held(q->queue_lock); |
| 269 | |
Dan Williams | f2b20d4 | 2011-12-29 09:16:28 +0100 | [diff] [blame] | 270 | BUG_ON(tag >= bqt->real_max_depth); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 271 | |
| 272 | list_del_init(&rq->queuelist); |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 273 | rq->rq_flags &= ~RQF_QUEUED; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 274 | rq->tag = -1; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 275 | rq->internal_tag = -1; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 276 | |
| 277 | if (unlikely(bqt->tag_index[tag] == NULL)) |
| 278 | printk(KERN_ERR "%s: tag %d is missing\n", |
Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 279 | __func__, tag); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 280 | |
| 281 | bqt->tag_index[tag] = NULL; |
| 282 | |
| 283 | if (unlikely(!test_bit(tag, bqt->tag_map))) { |
| 284 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", |
Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 285 | __func__, tag); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 286 | return; |
| 287 | } |
| 288 | /* |
| 289 | * The tag_map bit acts as a lock for tag_index[bit], so we need |
| 290 | * unlock memory barrier semantics. |
| 291 | */ |
| 292 | clear_bit_unlock(tag, bqt->tag_map); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 293 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 294 | |
| 295 | /** |
| 296 | * blk_queue_start_tag - find a free tag and assign it |
| 297 | * @q: the request queue for the device |
| 298 | * @rq: the block request that needs tagging |
| 299 | * |
| 300 | * Description: |
| 301 | * This can either be used as a stand-alone helper, or possibly be |
| 302 | * assigned as the queue &prep_rq_fn (in which case &struct request |
| 303 | * automagically gets a tag assigned). Note that this function |
| 304 | * assumes that any type of request can be queued! if this is not |
| 305 | * true for your device, you must check the request type before |
| 306 | * calling this function. The request will also be removed from |
| 307 | * the request queue, so it's the drivers responsibility to readd |
| 308 | * it if it should need to be restarted for some reason. |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 309 | **/ |
| 310 | int blk_queue_start_tag(struct request_queue *q, struct request *rq) |
| 311 | { |
| 312 | struct blk_queue_tag *bqt = q->queue_tags; |
Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 313 | unsigned max_depth; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 314 | int tag; |
| 315 | |
Bart Van Assche | 2fff8a9 | 2017-06-20 11:15:45 -0700 | [diff] [blame] | 316 | lockdep_assert_held(q->queue_lock); |
| 317 | |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 318 | if (unlikely((rq->rq_flags & RQF_QUEUED))) { |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 319 | printk(KERN_ERR |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 320 | "%s: request %p for device [%s] already tagged %d", |
Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 321 | __func__, rq, |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 322 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); |
| 323 | BUG(); |
| 324 | } |
| 325 | |
| 326 | /* |
| 327 | * Protect against shared tag maps, as we may not have exclusive |
| 328 | * access to the tag map. |
Jens Axboe | e3ba9ae | 2008-09-25 11:42:41 +0200 | [diff] [blame] | 329 | * |
| 330 | * We reserve a few tags just for sync IO, since we don't want |
| 331 | * to starve sync IO on behalf of flooding async IO. |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 332 | */ |
Jens Axboe | e3ba9ae | 2008-09-25 11:42:41 +0200 | [diff] [blame] | 333 | max_depth = bqt->max_depth; |
Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 334 | if (!rq_is_sync(rq) && max_depth > 1) { |
Jan Kara | a6b3f76 | 2013-06-28 21:32:27 +0200 | [diff] [blame] | 335 | switch (max_depth) { |
| 336 | case 2: |
Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 337 | max_depth = 1; |
Jan Kara | a6b3f76 | 2013-06-28 21:32:27 +0200 | [diff] [blame] | 338 | break; |
| 339 | case 3: |
| 340 | max_depth = 2; |
| 341 | break; |
| 342 | default: |
| 343 | max_depth -= 2; |
| 344 | } |
Jens Axboe | 1b59dd5 | 2009-10-06 20:19:02 +0200 | [diff] [blame] | 345 | if (q->in_flight[BLK_RW_ASYNC] > max_depth) |
Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 346 | return 1; |
| 347 | } |
Jens Axboe | e3ba9ae | 2008-09-25 11:42:41 +0200 | [diff] [blame] | 348 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 349 | do { |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 350 | if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) { |
| 351 | tag = find_first_zero_bit(bqt->tag_map, max_depth); |
| 352 | if (tag >= max_depth) |
| 353 | return 1; |
| 354 | } else { |
| 355 | int start = bqt->next_tag; |
| 356 | int size = min_t(int, bqt->max_depth, max_depth + start); |
| 357 | tag = find_next_zero_bit(bqt->tag_map, size, start); |
| 358 | if (tag >= size && start + size > bqt->max_depth) { |
| 359 | size = start + size - bqt->max_depth; |
| 360 | tag = find_first_zero_bit(bqt->tag_map, size); |
| 361 | } |
| 362 | if (tag >= size) |
| 363 | return 1; |
| 364 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 365 | |
| 366 | } while (test_and_set_bit_lock(tag, bqt->tag_map)); |
| 367 | /* |
| 368 | * We need lock ordering semantics given by test_and_set_bit_lock. |
| 369 | * See blk_queue_end_tag for details. |
| 370 | */ |
| 371 | |
Shaohua Li | ee1b6f7 | 2015-01-15 17:32:25 -0800 | [diff] [blame] | 372 | bqt->next_tag = (tag + 1) % bqt->max_depth; |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 373 | rq->rq_flags |= RQF_QUEUED; |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 374 | rq->tag = tag; |
| 375 | bqt->tag_index[tag] = rq; |
Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 376 | blk_start_request(rq); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 377 | list_add(&rq->queuelist, &q->tag_busy_list); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 378 | return 0; |
| 379 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 380 | EXPORT_SYMBOL(blk_queue_start_tag); |
| 381 | |
| 382 | /** |
| 383 | * blk_queue_invalidate_tags - invalidate all pending tags |
| 384 | * @q: the request queue for the device |
| 385 | * |
| 386 | * Description: |
| 387 | * Hardware conditions may dictate a need to stop all pending requests. |
| 388 | * In this case, we will safely clear the block side of the tag queue and |
| 389 | * readd all requests to the request queue in the right order. |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 390 | **/ |
| 391 | void blk_queue_invalidate_tags(struct request_queue *q) |
| 392 | { |
| 393 | struct list_head *tmp, *n; |
| 394 | |
Bart Van Assche | 2fff8a9 | 2017-06-20 11:15:45 -0700 | [diff] [blame] | 395 | lockdep_assert_held(q->queue_lock); |
| 396 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 397 | list_for_each_safe(tmp, n, &q->tag_busy_list) |
| 398 | blk_requeue_request(q, list_entry_rq(tmp)); |
| 399 | } |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 400 | EXPORT_SYMBOL(blk_queue_invalidate_tags); |