Jens Axboe | 75bb462 | 2014-05-28 10:15:41 -0600 | [diff] [blame] | 1 | /* |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 2 | * Tag allocation using scalable bitmaps. Uses active queue tracking to support |
| 3 | * fairer distribution of tags between multiple submitters when a shared tag map |
| 4 | * is used. |
Jens Axboe | 75bb462 | 2014-05-28 10:15:41 -0600 | [diff] [blame] | 5 | * |
| 6 | * Copyright (C) 2013-2014 Jens Axboe |
| 7 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 8 | #include <linux/kernel.h> |
| 9 | #include <linux/module.h> |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 10 | |
| 11 | #include <linux/blk-mq.h> |
| 12 | #include "blk.h" |
| 13 | #include "blk-mq.h" |
| 14 | #include "blk-mq-tag.h" |
| 15 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 16 | bool blk_mq_has_free_tags(struct blk_mq_tags *tags) |
| 17 | { |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 18 | if (!tags) |
| 19 | return true; |
| 20 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 21 | return sbitmap_any_bit_clear(&tags->bitmap_tags.sb); |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 22 | } |
| 23 | |
| 24 | /* |
| 25 | * If a previously inactive queue goes active, bump the active user count. |
Jianchao Wang | d263ed9 | 2018-08-09 08:34:17 -0600 | [diff] [blame] | 26 | * We need to do this before try to allocate driver tag, then even if fail |
| 27 | * to get tag when first time, the other shared-tag users could reserve |
| 28 | * budget for it. |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 29 | */ |
| 30 | bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) |
| 31 | { |
| 32 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && |
| 33 | !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
| 34 | atomic_inc(&hctx->tags->active_queues); |
| 35 | |
| 36 | return true; |
| 37 | } |
| 38 | |
| 39 | /* |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 40 | * Wakeup all potentially sleeping on tags |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 41 | */ |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 42 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 43 | { |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 44 | sbitmap_queue_wake_all(&tags->bitmap_tags); |
| 45 | if (include_reserve) |
| 46 | sbitmap_queue_wake_all(&tags->breserved_tags); |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | /* |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 50 | * If a previously busy queue goes inactive, potential waiters could now |
| 51 | * be allowed to queue. Wake them up and check. |
| 52 | */ |
| 53 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) |
| 54 | { |
| 55 | struct blk_mq_tags *tags = hctx->tags; |
| 56 | |
| 57 | if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
| 58 | return; |
| 59 | |
| 60 | atomic_dec(&tags->active_queues); |
| 61 | |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 62 | blk_mq_tag_wakeup_all(tags, false); |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 63 | } |
| 64 | |
| 65 | /* |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 66 | * For shared tag users, we track the number of currently active users |
| 67 | * and attempt to provide a fair share of the tag depth for each of them. |
| 68 | */ |
| 69 | static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 70 | struct sbitmap_queue *bt) |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 71 | { |
| 72 | unsigned int depth, users; |
| 73 | |
| 74 | if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) |
| 75 | return true; |
| 76 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
| 77 | return true; |
| 78 | |
| 79 | /* |
| 80 | * Don't try dividing an ant |
| 81 | */ |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 82 | if (bt->sb.depth == 1) |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 83 | return true; |
| 84 | |
| 85 | users = atomic_read(&hctx->tags->active_queues); |
| 86 | if (!users) |
| 87 | return true; |
| 88 | |
| 89 | /* |
| 90 | * Allow at least some tags |
| 91 | */ |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 92 | depth = max((bt->sb.depth + users - 1) / users, 4U); |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 93 | return atomic_read(&hctx->nr_active) < depth; |
| 94 | } |
| 95 | |
Jens Axboe | 200e86b | 2017-01-25 08:11:38 -0700 | [diff] [blame] | 96 | static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, |
| 97 | struct sbitmap_queue *bt) |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 98 | { |
Jens Axboe | 200e86b | 2017-01-25 08:11:38 -0700 | [diff] [blame] | 99 | if (!(data->flags & BLK_MQ_REQ_INTERNAL) && |
| 100 | !hctx_may_queue(data->hctx, bt)) |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 101 | return -1; |
Omar Sandoval | 229a9287 | 2017-04-14 00:59:59 -0700 | [diff] [blame] | 102 | if (data->shallow_depth) |
| 103 | return __sbitmap_queue_get_shallow(bt, data->shallow_depth); |
| 104 | else |
| 105 | return __sbitmap_queue_get(bt); |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 106 | } |
| 107 | |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 108 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 109 | { |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 110 | struct blk_mq_tags *tags = blk_mq_tags_from_data(data); |
| 111 | struct sbitmap_queue *bt; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 112 | struct sbq_wait_state *ws; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 113 | DEFINE_WAIT(wait); |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 114 | unsigned int tag_offset; |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 115 | bool drop_ctx; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 116 | int tag; |
| 117 | |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 118 | if (data->flags & BLK_MQ_REQ_RESERVED) { |
| 119 | if (unlikely(!tags->nr_reserved_tags)) { |
| 120 | WARN_ON_ONCE(1); |
| 121 | return BLK_MQ_TAG_FAIL; |
| 122 | } |
| 123 | bt = &tags->breserved_tags; |
| 124 | tag_offset = 0; |
| 125 | } else { |
| 126 | bt = &tags->bitmap_tags; |
| 127 | tag_offset = tags->nr_reserved_tags; |
| 128 | } |
| 129 | |
Jens Axboe | 200e86b | 2017-01-25 08:11:38 -0700 | [diff] [blame] | 130 | tag = __blk_mq_get_tag(data, bt); |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 131 | if (tag != -1) |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 132 | goto found_tag; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 133 | |
Christoph Hellwig | 6f3b0e8 | 2015-11-26 09:13:05 +0100 | [diff] [blame] | 134 | if (data->flags & BLK_MQ_REQ_NOWAIT) |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 135 | return BLK_MQ_TAG_FAIL; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 136 | |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 137 | ws = bt_wait_ptr(bt, data->hctx); |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 138 | drop_ctx = data->ctx == NULL; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 139 | do { |
Ming Lei | e6fc464 | 2018-05-24 11:00:39 -0600 | [diff] [blame] | 140 | struct sbitmap_queue *bt_prev; |
| 141 | |
Bart Van Assche | b322320 | 2014-12-08 08:46:34 -0700 | [diff] [blame] | 142 | /* |
| 143 | * We're out of tags on this hardware queue, kick any |
| 144 | * pending IO submits before going to sleep waiting for |
Jens Axboe | 8cecb07 | 2017-01-19 07:39:17 -0700 | [diff] [blame] | 145 | * some to complete. |
Bart Van Assche | b322320 | 2014-12-08 08:46:34 -0700 | [diff] [blame] | 146 | */ |
Jens Axboe | 8cecb07 | 2017-01-19 07:39:17 -0700 | [diff] [blame] | 147 | blk_mq_run_hw_queue(data->hctx, false); |
Bart Van Assche | b322320 | 2014-12-08 08:46:34 -0700 | [diff] [blame] | 148 | |
Jens Axboe | 080ff35 | 2014-12-08 08:49:06 -0700 | [diff] [blame] | 149 | /* |
| 150 | * Retry tag allocation after running the hardware queue, |
| 151 | * as running the queue may also have found completions. |
| 152 | */ |
Jens Axboe | 200e86b | 2017-01-25 08:11:38 -0700 | [diff] [blame] | 153 | tag = __blk_mq_get_tag(data, bt); |
Jens Axboe | 080ff35 | 2014-12-08 08:49:06 -0700 | [diff] [blame] | 154 | if (tag != -1) |
| 155 | break; |
| 156 | |
Jens Axboe | 4e5dff4 | 2017-11-14 10:24:58 -0700 | [diff] [blame] | 157 | prepare_to_wait_exclusive(&ws->wait, &wait, |
| 158 | TASK_UNINTERRUPTIBLE); |
| 159 | |
| 160 | tag = __blk_mq_get_tag(data, bt); |
| 161 | if (tag != -1) |
| 162 | break; |
| 163 | |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 164 | if (data->ctx) |
| 165 | blk_mq_put_ctx(data->ctx); |
Ming Lei | cb96a42 | 2014-06-01 00:43:37 +0800 | [diff] [blame] | 166 | |
Ming Lei | e6fc464 | 2018-05-24 11:00:39 -0600 | [diff] [blame] | 167 | bt_prev = bt; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 168 | io_schedule(); |
Ming Lei | cb96a42 | 2014-06-01 00:43:37 +0800 | [diff] [blame] | 169 | |
| 170 | data->ctx = blk_mq_get_ctx(data->q); |
Christoph Hellwig | 7d7e0f9 | 2016-09-14 16:18:54 +0200 | [diff] [blame] | 171 | data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu); |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 172 | tags = blk_mq_tags_from_data(data); |
| 173 | if (data->flags & BLK_MQ_REQ_RESERVED) |
| 174 | bt = &tags->breserved_tags; |
| 175 | else |
| 176 | bt = &tags->bitmap_tags; |
| 177 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 178 | finish_wait(&ws->wait, &wait); |
Ming Lei | e6fc464 | 2018-05-24 11:00:39 -0600 | [diff] [blame] | 179 | |
| 180 | /* |
| 181 | * If destination hw queue is changed, fake wake up on |
| 182 | * previous queue for compensating the wake up miss, so |
| 183 | * other allocations on previous queue won't be starved. |
| 184 | */ |
| 185 | if (bt != bt_prev) |
| 186 | sbitmap_queue_wake_up(bt_prev); |
| 187 | |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 188 | ws = bt_wait_ptr(bt, data->hctx); |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 189 | } while (1); |
| 190 | |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 191 | if (drop_ctx && data->ctx) |
| 192 | blk_mq_put_ctx(data->ctx); |
| 193 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 194 | finish_wait(&ws->wait, &wait); |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 195 | |
| 196 | found_tag: |
| 197 | return tag + tag_offset; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 198 | } |
| 199 | |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 200 | void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, |
| 201 | struct blk_mq_ctx *ctx, unsigned int tag) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 202 | { |
Sagi Grimberg | 415b806 | 2017-02-27 10:04:39 -0700 | [diff] [blame] | 203 | if (!blk_mq_tag_is_reserved(tags, tag)) { |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 204 | const int real_tag = tag - tags->nr_reserved_tags; |
| 205 | |
Jens Axboe | 70114c3 | 2014-11-24 15:52:30 -0700 | [diff] [blame] | 206 | BUG_ON(real_tag >= tags->nr_tags); |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 207 | sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); |
Jens Axboe | 70114c3 | 2014-11-24 15:52:30 -0700 | [diff] [blame] | 208 | } else { |
| 209 | BUG_ON(tag >= tags->nr_reserved_tags); |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 210 | sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); |
Jens Axboe | 70114c3 | 2014-11-24 15:52:30 -0700 | [diff] [blame] | 211 | } |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 212 | } |
| 213 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 214 | struct bt_iter_data { |
| 215 | struct blk_mq_hw_ctx *hctx; |
| 216 | busy_iter_fn *fn; |
| 217 | void *data; |
| 218 | bool reserved; |
| 219 | }; |
| 220 | |
| 221 | static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 222 | { |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 223 | struct bt_iter_data *iter_data = data; |
| 224 | struct blk_mq_hw_ctx *hctx = iter_data->hctx; |
| 225 | struct blk_mq_tags *tags = hctx->tags; |
| 226 | bool reserved = iter_data->reserved; |
Christoph Hellwig | 81481eb | 2014-09-13 16:40:11 -0700 | [diff] [blame] | 227 | struct request *rq; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 228 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 229 | if (!reserved) |
| 230 | bitnr += tags->nr_reserved_tags; |
| 231 | rq = tags->rqs[bitnr]; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 232 | |
Jens Axboe | 7f5562d | 2017-08-04 13:37:03 -0600 | [diff] [blame] | 233 | /* |
| 234 | * We can hit rq == NULL here, because the tagging functions |
| 235 | * test and set the bit before assining ->rqs[]. |
| 236 | */ |
| 237 | if (rq && rq->q == hctx->queue) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 238 | iter_data->fn(hctx, rq, iter_data->data, reserved); |
| 239 | return true; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 240 | } |
| 241 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 242 | static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt, |
| 243 | busy_iter_fn *fn, void *data, bool reserved) |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 244 | { |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 245 | struct bt_iter_data iter_data = { |
| 246 | .hctx = hctx, |
| 247 | .fn = fn, |
| 248 | .data = data, |
| 249 | .reserved = reserved, |
| 250 | }; |
| 251 | |
| 252 | sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data); |
| 253 | } |
| 254 | |
| 255 | struct bt_tags_iter_data { |
| 256 | struct blk_mq_tags *tags; |
| 257 | busy_tag_iter_fn *fn; |
| 258 | void *data; |
| 259 | bool reserved; |
| 260 | }; |
| 261 | |
| 262 | static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
| 263 | { |
| 264 | struct bt_tags_iter_data *iter_data = data; |
| 265 | struct blk_mq_tags *tags = iter_data->tags; |
| 266 | bool reserved = iter_data->reserved; |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 267 | struct request *rq; |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 268 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 269 | if (!reserved) |
| 270 | bitnr += tags->nr_reserved_tags; |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 271 | |
Jens Axboe | 7f5562d | 2017-08-04 13:37:03 -0600 | [diff] [blame] | 272 | /* |
| 273 | * We can hit rq == NULL here, because the tagging functions |
| 274 | * test and set the bit before assining ->rqs[]. |
| 275 | */ |
| 276 | rq = tags->rqs[bitnr]; |
Ming Lei | 2d5ba0e | 2018-08-03 01:49:37 +0800 | [diff] [blame] | 277 | if (rq && blk_mq_request_started(rq)) |
Jens Axboe | 7f5562d | 2017-08-04 13:37:03 -0600 | [diff] [blame] | 278 | iter_data->fn(rq, iter_data->data, reserved); |
| 279 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 280 | return true; |
| 281 | } |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 282 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 283 | static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, |
| 284 | busy_tag_iter_fn *fn, void *data, bool reserved) |
| 285 | { |
| 286 | struct bt_tags_iter_data iter_data = { |
| 287 | .tags = tags, |
| 288 | .fn = fn, |
| 289 | .data = data, |
| 290 | .reserved = reserved, |
| 291 | }; |
| 292 | |
| 293 | if (tags->rqs) |
| 294 | sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data); |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 295 | } |
| 296 | |
Sagi Grimberg | e8f1e16 | 2016-03-10 13:58:49 +0200 | [diff] [blame] | 297 | static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, |
| 298 | busy_tag_iter_fn *fn, void *priv) |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 299 | { |
| 300 | if (tags->nr_reserved_tags) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 301 | bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true); |
| 302 | bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false); |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 303 | } |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 304 | |
Sagi Grimberg | e048948 | 2016-03-10 13:58:46 +0200 | [diff] [blame] | 305 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
| 306 | busy_tag_iter_fn *fn, void *priv) |
| 307 | { |
| 308 | int i; |
| 309 | |
| 310 | for (i = 0; i < tagset->nr_hw_queues; i++) { |
| 311 | if (tagset->tags && tagset->tags[i]) |
| 312 | blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); |
| 313 | } |
| 314 | } |
| 315 | EXPORT_SYMBOL(blk_mq_tagset_busy_iter); |
| 316 | |
Christoph Hellwig | 0bf6cd5 | 2015-09-27 21:01:51 +0200 | [diff] [blame] | 317 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, |
Christoph Hellwig | 81481eb | 2014-09-13 16:40:11 -0700 | [diff] [blame] | 318 | void *priv) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 319 | { |
Christoph Hellwig | 0bf6cd5 | 2015-09-27 21:01:51 +0200 | [diff] [blame] | 320 | struct blk_mq_hw_ctx *hctx; |
| 321 | int i; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 322 | |
Jianchao Wang | f5bbbbe | 2018-08-21 15:15:04 +0800 | [diff] [blame] | 323 | /* |
| 324 | * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and |
Keith Busch | 530ca2c | 2018-09-25 10:36:20 -0600 | [diff] [blame] | 325 | * queue_hw_ctx after freeze the queue, so we use q_usage_counter |
| 326 | * to avoid race with it. |
Jianchao Wang | f5bbbbe | 2018-08-21 15:15:04 +0800 | [diff] [blame] | 327 | */ |
Keith Busch | 530ca2c | 2018-09-25 10:36:20 -0600 | [diff] [blame] | 328 | if (!percpu_ref_tryget(&q->q_usage_counter)) |
Jianchao Wang | f5bbbbe | 2018-08-21 15:15:04 +0800 | [diff] [blame] | 329 | return; |
Christoph Hellwig | 0bf6cd5 | 2015-09-27 21:01:51 +0200 | [diff] [blame] | 330 | |
| 331 | queue_for_each_hw_ctx(q, hctx, i) { |
| 332 | struct blk_mq_tags *tags = hctx->tags; |
| 333 | |
| 334 | /* |
| 335 | * If not software queues are currently mapped to this |
| 336 | * hardware queue, there's nothing to check |
| 337 | */ |
| 338 | if (!blk_mq_hw_queue_mapped(hctx)) |
| 339 | continue; |
| 340 | |
| 341 | if (tags->nr_reserved_tags) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 342 | bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); |
| 343 | bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); |
Christoph Hellwig | 0bf6cd5 | 2015-09-27 21:01:51 +0200 | [diff] [blame] | 344 | } |
Keith Busch | 530ca2c | 2018-09-25 10:36:20 -0600 | [diff] [blame] | 345 | blk_queue_exit(q); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 346 | } |
| 347 | |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 348 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, |
| 349 | bool round_robin, int node) |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 350 | { |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 351 | return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL, |
| 352 | node); |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 353 | } |
| 354 | |
| 355 | static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, |
Shaohua Li | 24391c0 | 2015-01-23 14:18:00 -0700 | [diff] [blame] | 356 | int node, int alloc_policy) |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 357 | { |
| 358 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 359 | bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 360 | |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 361 | if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node)) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 362 | goto free_tags; |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 363 | if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin, |
| 364 | node)) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 365 | goto free_bitmap_tags; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 366 | |
| 367 | return tags; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 368 | free_bitmap_tags: |
| 369 | sbitmap_queue_free(&tags->bitmap_tags); |
| 370 | free_tags: |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 371 | kfree(tags); |
| 372 | return NULL; |
| 373 | } |
| 374 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 375 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
Shaohua Li | 24391c0 | 2015-01-23 14:18:00 -0700 | [diff] [blame] | 376 | unsigned int reserved_tags, |
| 377 | int node, int alloc_policy) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 378 | { |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 379 | struct blk_mq_tags *tags; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 380 | |
| 381 | if (total_tags > BLK_MQ_TAG_MAX) { |
| 382 | pr_err("blk-mq: tag depth too large\n"); |
| 383 | return NULL; |
| 384 | } |
| 385 | |
| 386 | tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); |
| 387 | if (!tags) |
| 388 | return NULL; |
| 389 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 390 | tags->nr_tags = total_tags; |
| 391 | tags->nr_reserved_tags = reserved_tags; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 392 | |
Shaohua Li | 24391c0 | 2015-01-23 14:18:00 -0700 | [diff] [blame] | 393 | return blk_mq_init_bitmap_tags(tags, node, alloc_policy); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 394 | } |
| 395 | |
| 396 | void blk_mq_free_tags(struct blk_mq_tags *tags) |
| 397 | { |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 398 | sbitmap_queue_free(&tags->bitmap_tags); |
| 399 | sbitmap_queue_free(&tags->breserved_tags); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 400 | kfree(tags); |
| 401 | } |
| 402 | |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 403 | int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, |
| 404 | struct blk_mq_tags **tagsptr, unsigned int tdepth, |
| 405 | bool can_grow) |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 406 | { |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 407 | struct blk_mq_tags *tags = *tagsptr; |
| 408 | |
| 409 | if (tdepth <= tags->nr_reserved_tags) |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 410 | return -EINVAL; |
| 411 | |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 412 | /* |
| 413 | * If we are allowed to grow beyond the original size, allocate |
| 414 | * a new set of tags before freeing the old one. |
| 415 | */ |
| 416 | if (tdepth > tags->nr_tags) { |
| 417 | struct blk_mq_tag_set *set = hctx->queue->tag_set; |
| 418 | struct blk_mq_tags *new; |
| 419 | bool ret; |
| 420 | |
| 421 | if (!can_grow) |
| 422 | return -EINVAL; |
| 423 | |
| 424 | /* |
| 425 | * We need some sort of upper limit, set it high enough that |
| 426 | * no valid use cases should require more. |
| 427 | */ |
| 428 | if (tdepth > 16 * BLKDEV_MAX_RQ) |
| 429 | return -EINVAL; |
| 430 | |
Ming Lei | 75d6e17 | 2018-08-02 18:23:26 +0800 | [diff] [blame] | 431 | new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, |
| 432 | tags->nr_reserved_tags); |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 433 | if (!new) |
| 434 | return -ENOMEM; |
| 435 | ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); |
| 436 | if (ret) { |
| 437 | blk_mq_free_rq_map(new); |
| 438 | return -ENOMEM; |
| 439 | } |
| 440 | |
| 441 | blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); |
| 442 | blk_mq_free_rq_map(*tagsptr); |
| 443 | *tagsptr = new; |
| 444 | } else { |
| 445 | /* |
| 446 | * Don't need (or can't) update reserved tags here, they |
| 447 | * remain static and should never need resizing. |
| 448 | */ |
Ming Lei | 75d6e17 | 2018-08-02 18:23:26 +0800 | [diff] [blame] | 449 | sbitmap_queue_resize(&tags->bitmap_tags, |
| 450 | tdepth - tags->nr_reserved_tags); |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 451 | } |
| 452 | |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 453 | return 0; |
| 454 | } |
| 455 | |
Bart Van Assche | 205fb5f | 2014-10-30 14:45:11 +0100 | [diff] [blame] | 456 | /** |
| 457 | * blk_mq_unique_tag() - return a tag that is unique queue-wide |
| 458 | * @rq: request for which to compute a unique tag |
| 459 | * |
| 460 | * The tag field in struct request is unique per hardware queue but not over |
| 461 | * all hardware queues. Hence this function that returns a tag with the |
| 462 | * hardware context index in the upper bits and the per hardware queue tag in |
| 463 | * the lower bits. |
| 464 | * |
| 465 | * Note: When called for a request that is queued on a non-multiqueue request |
| 466 | * queue, the hardware context index is set to zero. |
| 467 | */ |
| 468 | u32 blk_mq_unique_tag(struct request *rq) |
| 469 | { |
| 470 | struct request_queue *q = rq->q; |
| 471 | struct blk_mq_hw_ctx *hctx; |
| 472 | int hwq = 0; |
| 473 | |
| 474 | if (q->mq_ops) { |
Christoph Hellwig | 7d7e0f9 | 2016-09-14 16:18:54 +0200 | [diff] [blame] | 475 | hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); |
Bart Van Assche | 205fb5f | 2014-10-30 14:45:11 +0100 | [diff] [blame] | 476 | hwq = hctx->queue_num; |
| 477 | } |
| 478 | |
| 479 | return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | |
| 480 | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); |
| 481 | } |
| 482 | EXPORT_SYMBOL(blk_mq_unique_tag); |