blob: abdbb47405cb840c7005618cdece6b3165850ecc [file] [log] [blame]
Jens Axboe75bb4622014-05-28 10:15:41 -06001/*
2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3 * over multiple cachelines to avoid ping-pong between multiple submitters
4 * or submitter and completer. Uses rolling wakeups to avoid falling of
5 * the scaling cliff when we run out of tags and have to start putting
6 * submitters to sleep.
7 *
8 * Uses active queue tracking to support fairer distribution of tags
9 * between multiple submitters when a shared tag map is used.
10 *
11 * Copyright (C) 2013-2014 Jens Axboe
12 */
Jens Axboe320ae512013-10-24 09:20:05 +010013#include <linux/kernel.h>
14#include <linux/module.h>
Jens Axboe4bb659b2014-05-09 09:36:49 -060015#include <linux/random.h>
Jens Axboe320ae512013-10-24 09:20:05 +010016
17#include <linux/blk-mq.h>
18#include "blk.h"
19#include "blk-mq.h"
20#include "blk-mq-tag.h"
21
Jens Axboe4bb659b2014-05-09 09:36:49 -060022static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
23{
24 int i;
25
26 for (i = 0; i < bt->map_nr; i++) {
Jens Axboee93ecf62014-05-19 09:17:48 -060027 struct blk_align_bitmap *bm = &bt->map[i];
Jens Axboe4bb659b2014-05-09 09:36:49 -060028 int ret;
29
30 ret = find_first_zero_bit(&bm->word, bm->depth);
31 if (ret < bm->depth)
32 return true;
33 }
34
35 return false;
Jens Axboe320ae512013-10-24 09:20:05 +010036}
37
38bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
39{
Jens Axboe4bb659b2014-05-09 09:36:49 -060040 if (!tags)
41 return true;
42
43 return bt_has_free_tags(&tags->bitmap_tags);
Jens Axboe320ae512013-10-24 09:20:05 +010044}
45
Alexander Gordeev8537b122014-06-17 22:12:35 -070046static inline int bt_index_inc(int index)
Jens Axboe0d2602c2014-05-13 15:10:52 -060047{
Alexander Gordeev8537b122014-06-17 22:12:35 -070048 return (index + 1) & (BT_WAIT_QUEUES - 1);
49}
50
51static inline void bt_index_atomic_inc(atomic_t *index)
52{
53 int old = atomic_read(index);
54 int new = bt_index_inc(old);
55 atomic_cmpxchg(index, old, new);
Jens Axboe0d2602c2014-05-13 15:10:52 -060056}
57
58/*
59 * If a previously inactive queue goes active, bump the active user count.
60 */
61bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
62{
63 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
64 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
65 atomic_inc(&hctx->tags->active_queues);
66
67 return true;
68}
69
70/*
Jens Axboeaed3ea92014-12-22 14:04:42 -070071 * Wakeup all potentially sleeping on tags
Jens Axboe0d2602c2014-05-13 15:10:52 -060072 */
Jens Axboeaed3ea92014-12-22 14:04:42 -070073void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
Jens Axboe0d2602c2014-05-13 15:10:52 -060074{
Jens Axboe0d2602c2014-05-13 15:10:52 -060075 struct blk_mq_bitmap_tags *bt;
76 int i, wake_index;
77
Kosuke Tatsukawa8ee1b7b2015-10-09 00:35:38 +000078 /*
79 * Make sure all changes prior to this are visible from other CPUs.
80 */
81 smp_mb();
Jens Axboe0d2602c2014-05-13 15:10:52 -060082 bt = &tags->bitmap_tags;
Alexander Gordeev8537b122014-06-17 22:12:35 -070083 wake_index = atomic_read(&bt->wake_index);
Jens Axboe0d2602c2014-05-13 15:10:52 -060084 for (i = 0; i < BT_WAIT_QUEUES; i++) {
85 struct bt_wait_state *bs = &bt->bs[wake_index];
86
87 if (waitqueue_active(&bs->wait))
88 wake_up(&bs->wait);
89
Alexander Gordeev8537b122014-06-17 22:12:35 -070090 wake_index = bt_index_inc(wake_index);
Jens Axboe0d2602c2014-05-13 15:10:52 -060091 }
Jens Axboeaed3ea92014-12-22 14:04:42 -070092
93 if (include_reserve) {
94 bt = &tags->breserved_tags;
95 if (waitqueue_active(&bt->bs[0].wait))
96 wake_up(&bt->bs[0].wait);
97 }
Jens Axboe0d2602c2014-05-13 15:10:52 -060098}
99
100/*
Jens Axboee3a2b3f2014-05-20 11:49:02 -0600101 * If a previously busy queue goes inactive, potential waiters could now
102 * be allowed to queue. Wake them up and check.
103 */
104void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
105{
106 struct blk_mq_tags *tags = hctx->tags;
107
108 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
109 return;
110
111 atomic_dec(&tags->active_queues);
112
Jens Axboeaed3ea92014-12-22 14:04:42 -0700113 blk_mq_tag_wakeup_all(tags, false);
Jens Axboee3a2b3f2014-05-20 11:49:02 -0600114}
115
116/*
Jens Axboe0d2602c2014-05-13 15:10:52 -0600117 * For shared tag users, we track the number of currently active users
118 * and attempt to provide a fair share of the tag depth for each of them.
119 */
120static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
121 struct blk_mq_bitmap_tags *bt)
122{
123 unsigned int depth, users;
124
125 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
126 return true;
127 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
128 return true;
129
130 /*
131 * Don't try dividing an ant
132 */
133 if (bt->depth == 1)
134 return true;
135
136 users = atomic_read(&hctx->tags->active_queues);
137 if (!users)
138 return true;
139
140 /*
141 * Allow at least some tags
142 */
143 depth = max((bt->depth + users - 1) / users, 4U);
144 return atomic_read(&hctx->nr_active) < depth;
145}
146
Shaohua Li24391c02015-01-23 14:18:00 -0700147static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag,
148 bool nowrap)
Jens Axboe4bb659b2014-05-09 09:36:49 -0600149{
Jens Axboe0bf36492015-01-14 08:49:55 -0700150 int tag, org_last_tag = last_tag;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600151
Jens Axboe0bf36492015-01-14 08:49:55 -0700152 while (1) {
153 tag = find_next_zero_bit(&bm->word, bm->depth, last_tag);
154 if (unlikely(tag >= bm->depth)) {
Jens Axboe4bb659b2014-05-09 09:36:49 -0600155 /*
Jens Axboe0bf36492015-01-14 08:49:55 -0700156 * We started with an offset, and we didn't reset the
157 * offset to 0 in a failure case, so start from 0 to
Jens Axboe4bb659b2014-05-09 09:36:49 -0600158 * exhaust the map.
159 */
Shaohua Li24391c02015-01-23 14:18:00 -0700160 if (org_last_tag && last_tag && !nowrap) {
Jens Axboe0bf36492015-01-14 08:49:55 -0700161 last_tag = org_last_tag = 0;
162 continue;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600163 }
164 return -1;
165 }
Jens Axboe0bf36492015-01-14 08:49:55 -0700166
167 if (!test_and_set_bit(tag, &bm->word))
168 break;
169
Jens Axboe4bb659b2014-05-09 09:36:49 -0600170 last_tag = tag + 1;
Jens Axboe0bf36492015-01-14 08:49:55 -0700171 if (last_tag >= bm->depth - 1)
172 last_tag = 0;
173 }
Jens Axboe4bb659b2014-05-09 09:36:49 -0600174
175 return tag;
176}
177
Shaohua Li24391c02015-01-23 14:18:00 -0700178#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
179
Jens Axboe4bb659b2014-05-09 09:36:49 -0600180/*
181 * Straight forward bitmap tag implementation, where each bit is a tag
182 * (cleared == free, and set == busy). The small twist is using per-cpu
183 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
184 * contexts. This enables us to drastically limit the space searched,
185 * without dirtying an extra shared cacheline like we would if we stored
186 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
187 * of that, each word of tags is in a separate cacheline. This means that
188 * multiple users will tend to stick to different cachelines, at least
189 * until the map is exhausted.
190 */
Jens Axboe0d2602c2014-05-13 15:10:52 -0600191static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
Shaohua Li24391c02015-01-23 14:18:00 -0700192 unsigned int *tag_cache, struct blk_mq_tags *tags)
Jens Axboe4bb659b2014-05-09 09:36:49 -0600193{
194 unsigned int last_tag, org_last_tag;
195 int index, i, tag;
196
Jens Axboe0d2602c2014-05-13 15:10:52 -0600197 if (!hctx_may_queue(hctx, bt))
198 return -1;
199
Jens Axboe4bb659b2014-05-09 09:36:49 -0600200 last_tag = org_last_tag = *tag_cache;
Jens Axboe59d13bf2014-05-09 13:41:15 -0600201 index = TAG_TO_INDEX(bt, last_tag);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600202
203 for (i = 0; i < bt->map_nr; i++) {
Shaohua Li24391c02015-01-23 14:18:00 -0700204 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag),
205 BT_ALLOC_RR(tags));
Jens Axboe4bb659b2014-05-09 09:36:49 -0600206 if (tag != -1) {
Jens Axboe59d13bf2014-05-09 13:41:15 -0600207 tag += (index << bt->bits_per_word);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600208 goto done;
209 }
210
Jens Axboe0bf36492015-01-14 08:49:55 -0700211 /*
212 * Jump to next index, and reset the last tag to be the
213 * first tag of that index
214 */
215 index++;
216 last_tag = (index << bt->bits_per_word);
217
218 if (index >= bt->map_nr) {
Jens Axboe4bb659b2014-05-09 09:36:49 -0600219 index = 0;
Jens Axboe0bf36492015-01-14 08:49:55 -0700220 last_tag = 0;
221 }
Jens Axboe4bb659b2014-05-09 09:36:49 -0600222 }
223
224 *tag_cache = 0;
225 return -1;
226
227 /*
228 * Only update the cache from the allocation path, if we ended
229 * up using the specific cached tag.
230 */
231done:
Shaohua Li24391c02015-01-23 14:18:00 -0700232 if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) {
Jens Axboe4bb659b2014-05-09 09:36:49 -0600233 last_tag = tag + 1;
234 if (last_tag >= bt->depth - 1)
235 last_tag = 0;
236
237 *tag_cache = last_tag;
238 }
239
240 return tag;
241}
242
Jens Axboe4bb659b2014-05-09 09:36:49 -0600243static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
244 struct blk_mq_hw_ctx *hctx)
245{
246 struct bt_wait_state *bs;
Alexander Gordeev8537b122014-06-17 22:12:35 -0700247 int wait_index;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600248
249 if (!hctx)
250 return &bt->bs[0];
251
Alexander Gordeev8537b122014-06-17 22:12:35 -0700252 wait_index = atomic_read(&hctx->wait_index);
253 bs = &bt->bs[wait_index];
254 bt_index_atomic_inc(&hctx->wait_index);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600255 return bs;
256}
257
Ming Leicb96a422014-06-01 00:43:37 +0800258static int bt_get(struct blk_mq_alloc_data *data,
259 struct blk_mq_bitmap_tags *bt,
260 struct blk_mq_hw_ctx *hctx,
Shaohua Li24391c02015-01-23 14:18:00 -0700261 unsigned int *last_tag, struct blk_mq_tags *tags)
Jens Axboe4bb659b2014-05-09 09:36:49 -0600262{
263 struct bt_wait_state *bs;
264 DEFINE_WAIT(wait);
265 int tag;
266
Shaohua Li24391c02015-01-23 14:18:00 -0700267 tag = __bt_get(hctx, bt, last_tag, tags);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600268 if (tag != -1)
269 return tag;
270
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100271 if (data->flags & BLK_MQ_REQ_NOWAIT)
Jens Axboe4bb659b2014-05-09 09:36:49 -0600272 return -1;
273
Jens Axboe35d37c62014-12-15 08:30:26 -0700274 bs = bt_wait_ptr(bt, hctx);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600275 do {
Jens Axboe4bb659b2014-05-09 09:36:49 -0600276 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
277
Shaohua Li24391c02015-01-23 14:18:00 -0700278 tag = __bt_get(hctx, bt, last_tag, tags);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600279 if (tag != -1)
280 break;
281
Bart Van Asscheb3223202014-12-08 08:46:34 -0700282 /*
283 * We're out of tags on this hardware queue, kick any
284 * pending IO submits before going to sleep waiting for
Sam Bradshawbc188d82015-03-18 17:06:18 -0600285 * some to complete. Note that hctx can be NULL here for
286 * reserved tag allocation.
Bart Van Asscheb3223202014-12-08 08:46:34 -0700287 */
Sam Bradshawbc188d82015-03-18 17:06:18 -0600288 if (hctx)
289 blk_mq_run_hw_queue(hctx, false);
Bart Van Asscheb3223202014-12-08 08:46:34 -0700290
Jens Axboe080ff352014-12-08 08:49:06 -0700291 /*
292 * Retry tag allocation after running the hardware queue,
293 * as running the queue may also have found completions.
294 */
Shaohua Li24391c02015-01-23 14:18:00 -0700295 tag = __bt_get(hctx, bt, last_tag, tags);
Jens Axboe080ff352014-12-08 08:49:06 -0700296 if (tag != -1)
297 break;
298
Ming Leicb96a422014-06-01 00:43:37 +0800299 blk_mq_put_ctx(data->ctx);
300
Jens Axboe4bb659b2014-05-09 09:36:49 -0600301 io_schedule();
Ming Leicb96a422014-06-01 00:43:37 +0800302
303 data->ctx = blk_mq_get_ctx(data->q);
304 data->hctx = data->q->mq_ops->map_queue(data->q,
305 data->ctx->cpu);
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100306 if (data->flags & BLK_MQ_REQ_RESERVED) {
Ming Leicb96a422014-06-01 00:43:37 +0800307 bt = &data->hctx->tags->breserved_tags;
308 } else {
309 last_tag = &data->ctx->last_tag;
310 hctx = data->hctx;
311 bt = &hctx->tags->bitmap_tags;
312 }
Jens Axboe35d37c62014-12-15 08:30:26 -0700313 finish_wait(&bs->wait, &wait);
314 bs = bt_wait_ptr(bt, hctx);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600315 } while (1);
316
317 finish_wait(&bs->wait, &wait);
318 return tag;
319}
320
Ming Leicb96a422014-06-01 00:43:37 +0800321static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100322{
323 int tag;
324
Ming Leicb96a422014-06-01 00:43:37 +0800325 tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
Shaohua Li24391c02015-01-23 14:18:00 -0700326 &data->ctx->last_tag, data->hctx->tags);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600327 if (tag >= 0)
Ming Leicb96a422014-06-01 00:43:37 +0800328 return tag + data->hctx->tags->nr_reserved_tags;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600329
330 return BLK_MQ_TAG_FAIL;
Jens Axboe320ae512013-10-24 09:20:05 +0100331}
332
Ming Leicb96a422014-06-01 00:43:37 +0800333static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100334{
Jens Axboe4bb659b2014-05-09 09:36:49 -0600335 int tag, zero = 0;
Jens Axboe320ae512013-10-24 09:20:05 +0100336
Ming Leicb96a422014-06-01 00:43:37 +0800337 if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
Jens Axboe320ae512013-10-24 09:20:05 +0100338 WARN_ON_ONCE(1);
339 return BLK_MQ_TAG_FAIL;
340 }
341
Shaohua Li24391c02015-01-23 14:18:00 -0700342 tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
343 data->hctx->tags);
Jens Axboe320ae512013-10-24 09:20:05 +0100344 if (tag < 0)
345 return BLK_MQ_TAG_FAIL;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600346
Jens Axboe320ae512013-10-24 09:20:05 +0100347 return tag;
348}
349
Ming Leicb96a422014-06-01 00:43:37 +0800350unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100351{
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100352 if (data->flags & BLK_MQ_REQ_RESERVED)
353 return __blk_mq_get_reserved_tag(data);
354 return __blk_mq_get_tag(data);
Jens Axboe320ae512013-10-24 09:20:05 +0100355}
356
Jens Axboe4bb659b2014-05-09 09:36:49 -0600357static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
358{
359 int i, wake_index;
360
Alexander Gordeev8537b122014-06-17 22:12:35 -0700361 wake_index = atomic_read(&bt->wake_index);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600362 for (i = 0; i < BT_WAIT_QUEUES; i++) {
363 struct bt_wait_state *bs = &bt->bs[wake_index];
364
365 if (waitqueue_active(&bs->wait)) {
Alexander Gordeev8537b122014-06-17 22:12:35 -0700366 int o = atomic_read(&bt->wake_index);
367 if (wake_index != o)
368 atomic_cmpxchg(&bt->wake_index, o, wake_index);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600369
370 return bs;
371 }
372
Alexander Gordeev8537b122014-06-17 22:12:35 -0700373 wake_index = bt_index_inc(wake_index);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600374 }
375
376 return NULL;
377}
378
379static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
380{
Jens Axboe59d13bf2014-05-09 13:41:15 -0600381 const int index = TAG_TO_INDEX(bt, tag);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600382 struct bt_wait_state *bs;
Alexander Gordeev2971c352014-06-12 17:05:37 +0200383 int wait_cnt;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600384
Bart Van Asschec38d1852014-12-09 16:58:35 +0100385 clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
386
387 /* Ensure that the wait list checks occur after clear_bit(). */
388 smp_mb();
Jens Axboe4bb659b2014-05-09 09:36:49 -0600389
390 bs = bt_wake_ptr(bt);
Alexander Gordeev2971c352014-06-12 17:05:37 +0200391 if (!bs)
392 return;
393
394 wait_cnt = atomic_dec_return(&bs->wait_cnt);
Bart Van Assche9d8f0bc2014-10-07 08:45:21 -0600395 if (unlikely(wait_cnt < 0))
396 wait_cnt = atomic_inc_return(&bs->wait_cnt);
Alexander Gordeev2971c352014-06-12 17:05:37 +0200397 if (wait_cnt == 0) {
Alexander Gordeev2971c352014-06-12 17:05:37 +0200398 atomic_add(bt->wake_cnt, &bs->wait_cnt);
Alexander Gordeev8537b122014-06-17 22:12:35 -0700399 bt_index_atomic_inc(&bt->wake_index);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600400 wake_up(&bs->wait);
401 }
402}
403
Jens Axboe0d2602c2014-05-13 15:10:52 -0600404void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
Jens Axboe4bb659b2014-05-09 09:36:49 -0600405 unsigned int *last_tag)
Jens Axboe320ae512013-10-24 09:20:05 +0100406{
Jens Axboe0d2602c2014-05-13 15:10:52 -0600407 struct blk_mq_tags *tags = hctx->tags;
408
Jens Axboe4bb659b2014-05-09 09:36:49 -0600409 if (tag >= tags->nr_reserved_tags) {
410 const int real_tag = tag - tags->nr_reserved_tags;
411
Jens Axboe70114c32014-11-24 15:52:30 -0700412 BUG_ON(real_tag >= tags->nr_tags);
413 bt_clear_tag(&tags->bitmap_tags, real_tag);
Shaohua Li24391c02015-01-23 14:18:00 -0700414 if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
415 *last_tag = real_tag;
Jens Axboe70114c32014-11-24 15:52:30 -0700416 } else {
417 BUG_ON(tag >= tags->nr_reserved_tags);
418 bt_clear_tag(&tags->breserved_tags, tag);
419 }
Jens Axboe320ae512013-10-24 09:20:05 +0100420}
421
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700422static void bt_for_each(struct blk_mq_hw_ctx *hctx,
423 struct blk_mq_bitmap_tags *bt, unsigned int off,
424 busy_iter_fn *fn, void *data, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100425{
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700426 struct request *rq;
427 int bit, i;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600428
429 for (i = 0; i < bt->map_nr; i++) {
Jens Axboee93ecf62014-05-19 09:17:48 -0600430 struct blk_align_bitmap *bm = &bt->map[i];
Jens Axboe4bb659b2014-05-09 09:36:49 -0600431
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700432 for (bit = find_first_bit(&bm->word, bm->depth);
433 bit < bm->depth;
434 bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
Ming Lei0048b482015-08-09 03:41:51 -0400435 rq = hctx->tags->rqs[off + bit];
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700436 if (rq->q == hctx->queue)
437 fn(hctx, rq, data, reserved);
438 }
Jens Axboe4bb659b2014-05-09 09:36:49 -0600439
Jens Axboe59d13bf2014-05-09 13:41:15 -0600440 off += (1 << bt->bits_per_word);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600441 }
Jens Axboe320ae512013-10-24 09:20:05 +0100442}
443
Keith Buschf26cdc82015-06-01 09:29:53 -0600444static void bt_tags_for_each(struct blk_mq_tags *tags,
445 struct blk_mq_bitmap_tags *bt, unsigned int off,
446 busy_tag_iter_fn *fn, void *data, bool reserved)
447{
448 struct request *rq;
449 int bit, i;
450
451 if (!tags->rqs)
452 return;
453 for (i = 0; i < bt->map_nr; i++) {
454 struct blk_align_bitmap *bm = &bt->map[i];
455
456 for (bit = find_first_bit(&bm->word, bm->depth);
457 bit < bm->depth;
458 bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
Ming Lei0048b482015-08-09 03:41:51 -0400459 rq = tags->rqs[off + bit];
Keith Buschf26cdc82015-06-01 09:29:53 -0600460 fn(rq, data, reserved);
461 }
462
463 off += (1 << bt->bits_per_word);
464 }
465}
466
467void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
468 void *priv)
469{
470 if (tags->nr_reserved_tags)
471 bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true);
472 bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
473 false);
474}
475EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
476
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +0200477void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700478 void *priv)
Jens Axboe320ae512013-10-24 09:20:05 +0100479{
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +0200480 struct blk_mq_hw_ctx *hctx;
481 int i;
Jens Axboe320ae512013-10-24 09:20:05 +0100482
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +0200483
484 queue_for_each_hw_ctx(q, hctx, i) {
485 struct blk_mq_tags *tags = hctx->tags;
486
487 /*
488 * If not software queues are currently mapped to this
489 * hardware queue, there's nothing to check
490 */
491 if (!blk_mq_hw_queue_mapped(hctx))
492 continue;
493
494 if (tags->nr_reserved_tags)
495 bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
496 bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
497 false);
498 }
499
Jens Axboe320ae512013-10-24 09:20:05 +0100500}
501
Jens Axboe4bb659b2014-05-09 09:36:49 -0600502static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
503{
504 unsigned int i, used;
505
506 for (i = 0, used = 0; i < bt->map_nr; i++) {
Jens Axboee93ecf62014-05-19 09:17:48 -0600507 struct blk_align_bitmap *bm = &bt->map[i];
Jens Axboe4bb659b2014-05-09 09:36:49 -0600508
509 used += bitmap_weight(&bm->word, bm->depth);
510 }
511
512 return bt->depth - used;
513}
514
Jens Axboee3a2b3f2014-05-20 11:49:02 -0600515static void bt_update_count(struct blk_mq_bitmap_tags *bt,
516 unsigned int depth)
517{
518 unsigned int tags_per_word = 1U << bt->bits_per_word;
519 unsigned int map_depth = depth;
520
521 if (depth) {
522 int i;
523
524 for (i = 0; i < bt->map_nr; i++) {
525 bt->map[i].depth = min(map_depth, tags_per_word);
526 map_depth -= bt->map[i].depth;
527 }
528 }
529
530 bt->wake_cnt = BT_WAIT_BATCH;
Jens Axboeabab13b2014-10-07 08:39:20 -0600531 if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
532 bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
Jens Axboee3a2b3f2014-05-20 11:49:02 -0600533
534 bt->depth = depth;
535}
536
Jens Axboe4bb659b2014-05-09 09:36:49 -0600537static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
538 int node, bool reserved)
539{
540 int i;
541
Jens Axboe59d13bf2014-05-09 13:41:15 -0600542 bt->bits_per_word = ilog2(BITS_PER_LONG);
543
Jens Axboe4bb659b2014-05-09 09:36:49 -0600544 /*
545 * Depth can be zero for reserved tags, that's not a failure
546 * condition.
547 */
548 if (depth) {
Jens Axboee3a2b3f2014-05-20 11:49:02 -0600549 unsigned int nr, tags_per_word;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600550
Jens Axboe59d13bf2014-05-09 13:41:15 -0600551 tags_per_word = (1 << bt->bits_per_word);
552
553 /*
554 * If the tag space is small, shrink the number of tags
555 * per word so we spread over a few cachelines, at least.
556 * If less than 4 tags, just forget about it, it's not
557 * going to work optimally anyway.
558 */
559 if (depth >= 4) {
560 while (tags_per_word * 4 > depth) {
561 bt->bits_per_word--;
562 tags_per_word = (1 << bt->bits_per_word);
563 }
564 }
565
566 nr = ALIGN(depth, tags_per_word) / tags_per_word;
Jens Axboee93ecf62014-05-19 09:17:48 -0600567 bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
Jens Axboe4bb659b2014-05-09 09:36:49 -0600568 GFP_KERNEL, node);
569 if (!bt->map)
570 return -ENOMEM;
571
572 bt->map_nr = nr;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600573 }
574
575 bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
576 if (!bt->bs) {
577 kfree(bt->map);
Tony Battersby564e5592015-02-11 11:32:30 -0500578 bt->map = NULL;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600579 return -ENOMEM;
580 }
581
Jens Axboee3a2b3f2014-05-20 11:49:02 -0600582 bt_update_count(bt, depth);
Alexander Gordeev86fb5c52014-06-17 22:37:23 +0200583
584 for (i = 0; i < BT_WAIT_QUEUES; i++) {
585 init_waitqueue_head(&bt->bs[i].wait);
586 atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
587 }
588
Jens Axboe4bb659b2014-05-09 09:36:49 -0600589 return 0;
590}
591
592static void bt_free(struct blk_mq_bitmap_tags *bt)
593{
594 kfree(bt->map);
595 kfree(bt->bs);
596}
597
598static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
Shaohua Li24391c02015-01-23 14:18:00 -0700599 int node, int alloc_policy)
Jens Axboe4bb659b2014-05-09 09:36:49 -0600600{
601 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
602
Shaohua Li24391c02015-01-23 14:18:00 -0700603 tags->alloc_policy = alloc_policy;
604
Jens Axboe4bb659b2014-05-09 09:36:49 -0600605 if (bt_alloc(&tags->bitmap_tags, depth, node, false))
606 goto enomem;
607 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
608 goto enomem;
609
610 return tags;
611enomem:
612 bt_free(&tags->bitmap_tags);
613 kfree(tags);
614 return NULL;
615}
616
Jens Axboe320ae512013-10-24 09:20:05 +0100617struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
Shaohua Li24391c02015-01-23 14:18:00 -0700618 unsigned int reserved_tags,
619 int node, int alloc_policy)
Jens Axboe320ae512013-10-24 09:20:05 +0100620{
Jens Axboe320ae512013-10-24 09:20:05 +0100621 struct blk_mq_tags *tags;
Jens Axboe320ae512013-10-24 09:20:05 +0100622
623 if (total_tags > BLK_MQ_TAG_MAX) {
624 pr_err("blk-mq: tag depth too large\n");
625 return NULL;
626 }
627
628 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
629 if (!tags)
630 return NULL;
631
Keith Buschf26cdc82015-06-01 09:29:53 -0600632 if (!zalloc_cpumask_var(&tags->cpumask, GFP_KERNEL)) {
633 kfree(tags);
634 return NULL;
635 }
636
Jens Axboe320ae512013-10-24 09:20:05 +0100637 tags->nr_tags = total_tags;
638 tags->nr_reserved_tags = reserved_tags;
Jens Axboe320ae512013-10-24 09:20:05 +0100639
Shaohua Li24391c02015-01-23 14:18:00 -0700640 return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
Jens Axboe320ae512013-10-24 09:20:05 +0100641}
642
643void blk_mq_free_tags(struct blk_mq_tags *tags)
644{
Jens Axboe4bb659b2014-05-09 09:36:49 -0600645 bt_free(&tags->bitmap_tags);
646 bt_free(&tags->breserved_tags);
Junichi Nomuraf42d79a2015-10-14 05:02:15 +0000647 free_cpumask_var(tags->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +0100648 kfree(tags);
649}
650
Jens Axboe4bb659b2014-05-09 09:36:49 -0600651void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
652{
653 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
654
Ming Lei9d3d21a2014-05-10 15:43:14 -0600655 *tag = prandom_u32() % depth;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600656}
657
Jens Axboee3a2b3f2014-05-20 11:49:02 -0600658int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
659{
660 tdepth -= tags->nr_reserved_tags;
661 if (tdepth > tags->nr_tags)
662 return -EINVAL;
663
664 /*
665 * Don't need (or can't) update reserved tags here, they remain
666 * static and should never need resizing.
667 */
668 bt_update_count(&tags->bitmap_tags, tdepth);
Jens Axboeaed3ea92014-12-22 14:04:42 -0700669 blk_mq_tag_wakeup_all(tags, false);
Jens Axboee3a2b3f2014-05-20 11:49:02 -0600670 return 0;
671}
672
Bart Van Assche205fb5f2014-10-30 14:45:11 +0100673/**
674 * blk_mq_unique_tag() - return a tag that is unique queue-wide
675 * @rq: request for which to compute a unique tag
676 *
677 * The tag field in struct request is unique per hardware queue but not over
678 * all hardware queues. Hence this function that returns a tag with the
679 * hardware context index in the upper bits and the per hardware queue tag in
680 * the lower bits.
681 *
682 * Note: When called for a request that is queued on a non-multiqueue request
683 * queue, the hardware context index is set to zero.
684 */
685u32 blk_mq_unique_tag(struct request *rq)
686{
687 struct request_queue *q = rq->q;
688 struct blk_mq_hw_ctx *hctx;
689 int hwq = 0;
690
691 if (q->mq_ops) {
692 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
693 hwq = hctx->queue_num;
694 }
695
696 return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
697 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
698}
699EXPORT_SYMBOL(blk_mq_unique_tag);
700
Jens Axboe320ae512013-10-24 09:20:05 +0100701ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
702{
703 char *orig_page = page;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600704 unsigned int free, res;
Jens Axboe320ae512013-10-24 09:20:05 +0100705
706 if (!tags)
707 return 0;
708
Jens Axboe59d13bf2014-05-09 13:41:15 -0600709 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
710 "bits_per_word=%u\n",
711 tags->nr_tags, tags->nr_reserved_tags,
712 tags->bitmap_tags.bits_per_word);
Jens Axboe320ae512013-10-24 09:20:05 +0100713
Jens Axboe4bb659b2014-05-09 09:36:49 -0600714 free = bt_unused_tags(&tags->bitmap_tags);
715 res = bt_unused_tags(&tags->breserved_tags);
Jens Axboe320ae512013-10-24 09:20:05 +0100716
Jens Axboe4bb659b2014-05-09 09:36:49 -0600717 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600718 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
Jens Axboe320ae512013-10-24 09:20:05 +0100719
720 return page - orig_page;
721}