blob: 1aab39f71d9544c89e82c9540050bc40a83d3eed [file] [log] [blame]
Jens Axboe75bb4622014-05-28 10:15:41 -06001/*
2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3 * over multiple cachelines to avoid ping-pong between multiple submitters
4 * or submitter and completer. Uses rolling wakeups to avoid falling of
5 * the scaling cliff when we run out of tags and have to start putting
6 * submitters to sleep.
7 *
8 * Uses active queue tracking to support fairer distribution of tags
9 * between multiple submitters when a shared tag map is used.
10 *
11 * Copyright (C) 2013-2014 Jens Axboe
12 */
Jens Axboe320ae512013-10-24 09:20:05 +010013#include <linux/kernel.h>
14#include <linux/module.h>
Jens Axboe4bb659b2014-05-09 09:36:49 -060015#include <linux/random.h>
Jens Axboe320ae512013-10-24 09:20:05 +010016
17#include <linux/blk-mq.h>
18#include "blk.h"
19#include "blk-mq.h"
20#include "blk-mq-tag.h"
21
Jens Axboe4bb659b2014-05-09 09:36:49 -060022static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
23{
24 int i;
25
26 for (i = 0; i < bt->map_nr; i++) {
Jens Axboee93ecf62014-05-19 09:17:48 -060027 struct blk_align_bitmap *bm = &bt->map[i];
Jens Axboe4bb659b2014-05-09 09:36:49 -060028 int ret;
29
30 ret = find_first_zero_bit(&bm->word, bm->depth);
31 if (ret < bm->depth)
32 return true;
33 }
34
35 return false;
Jens Axboe320ae512013-10-24 09:20:05 +010036}
37
38bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
39{
Jens Axboe4bb659b2014-05-09 09:36:49 -060040 if (!tags)
41 return true;
42
43 return bt_has_free_tags(&tags->bitmap_tags);
Jens Axboe320ae512013-10-24 09:20:05 +010044}
45
Jens Axboe0d2602c2014-05-13 15:10:52 -060046static inline void bt_index_inc(unsigned int *index)
47{
48 *index = (*index + 1) & (BT_WAIT_QUEUES - 1);
49}
50
51/*
52 * If a previously inactive queue goes active, bump the active user count.
53 */
54bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
55{
56 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
57 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
58 atomic_inc(&hctx->tags->active_queues);
59
60 return true;
61}
62
63/*
Jens Axboee3a2b3f2014-05-20 11:49:02 -060064 * Wakeup all potentially sleeping on normal (non-reserved) tags
Jens Axboe0d2602c2014-05-13 15:10:52 -060065 */
Jens Axboee3a2b3f2014-05-20 11:49:02 -060066static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
Jens Axboe0d2602c2014-05-13 15:10:52 -060067{
Jens Axboe0d2602c2014-05-13 15:10:52 -060068 struct blk_mq_bitmap_tags *bt;
69 int i, wake_index;
70
Jens Axboe0d2602c2014-05-13 15:10:52 -060071 bt = &tags->bitmap_tags;
72 wake_index = bt->wake_index;
73 for (i = 0; i < BT_WAIT_QUEUES; i++) {
74 struct bt_wait_state *bs = &bt->bs[wake_index];
75
76 if (waitqueue_active(&bs->wait))
77 wake_up(&bs->wait);
78
79 bt_index_inc(&wake_index);
80 }
81}
82
83/*
Jens Axboee3a2b3f2014-05-20 11:49:02 -060084 * If a previously busy queue goes inactive, potential waiters could now
85 * be allowed to queue. Wake them up and check.
86 */
87void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
88{
89 struct blk_mq_tags *tags = hctx->tags;
90
91 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
92 return;
93
94 atomic_dec(&tags->active_queues);
95
96 blk_mq_tag_wakeup_all(tags);
97}
98
99/*
Jens Axboe0d2602c2014-05-13 15:10:52 -0600100 * For shared tag users, we track the number of currently active users
101 * and attempt to provide a fair share of the tag depth for each of them.
102 */
103static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
104 struct blk_mq_bitmap_tags *bt)
105{
106 unsigned int depth, users;
107
108 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
109 return true;
110 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
111 return true;
112
113 /*
114 * Don't try dividing an ant
115 */
116 if (bt->depth == 1)
117 return true;
118
119 users = atomic_read(&hctx->tags->active_queues);
120 if (!users)
121 return true;
122
123 /*
124 * Allow at least some tags
125 */
126 depth = max((bt->depth + users - 1) / users, 4U);
127 return atomic_read(&hctx->nr_active) < depth;
128}
129
Jens Axboee93ecf62014-05-19 09:17:48 -0600130static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
Jens Axboe4bb659b2014-05-09 09:36:49 -0600131{
132 int tag, org_last_tag, end;
133
Jens Axboe59d13bf2014-05-09 13:41:15 -0600134 org_last_tag = last_tag;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600135 end = bm->depth;
136 do {
137restart:
138 tag = find_next_zero_bit(&bm->word, end, last_tag);
139 if (unlikely(tag >= end)) {
140 /*
141 * We started with an offset, start from 0 to
142 * exhaust the map.
143 */
144 if (org_last_tag && last_tag) {
145 end = last_tag;
146 last_tag = 0;
147 goto restart;
148 }
149 return -1;
150 }
151 last_tag = tag + 1;
152 } while (test_and_set_bit_lock(tag, &bm->word));
153
154 return tag;
155}
156
157/*
158 * Straight forward bitmap tag implementation, where each bit is a tag
159 * (cleared == free, and set == busy). The small twist is using per-cpu
160 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
161 * contexts. This enables us to drastically limit the space searched,
162 * without dirtying an extra shared cacheline like we would if we stored
163 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
164 * of that, each word of tags is in a separate cacheline. This means that
165 * multiple users will tend to stick to different cachelines, at least
166 * until the map is exhausted.
167 */
Jens Axboe0d2602c2014-05-13 15:10:52 -0600168static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
169 unsigned int *tag_cache)
Jens Axboe4bb659b2014-05-09 09:36:49 -0600170{
171 unsigned int last_tag, org_last_tag;
172 int index, i, tag;
173
Jens Axboe0d2602c2014-05-13 15:10:52 -0600174 if (!hctx_may_queue(hctx, bt))
175 return -1;
176
Jens Axboe4bb659b2014-05-09 09:36:49 -0600177 last_tag = org_last_tag = *tag_cache;
Jens Axboe59d13bf2014-05-09 13:41:15 -0600178 index = TAG_TO_INDEX(bt, last_tag);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600179
180 for (i = 0; i < bt->map_nr; i++) {
Jens Axboe59d13bf2014-05-09 13:41:15 -0600181 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
Jens Axboe4bb659b2014-05-09 09:36:49 -0600182 if (tag != -1) {
Jens Axboe59d13bf2014-05-09 13:41:15 -0600183 tag += (index << bt->bits_per_word);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600184 goto done;
185 }
186
187 last_tag = 0;
188 if (++index >= bt->map_nr)
189 index = 0;
190 }
191
192 *tag_cache = 0;
193 return -1;
194
195 /*
196 * Only update the cache from the allocation path, if we ended
197 * up using the specific cached tag.
198 */
199done:
200 if (tag == org_last_tag) {
201 last_tag = tag + 1;
202 if (last_tag >= bt->depth - 1)
203 last_tag = 0;
204
205 *tag_cache = last_tag;
206 }
207
208 return tag;
209}
210
Jens Axboe4bb659b2014-05-09 09:36:49 -0600211static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
212 struct blk_mq_hw_ctx *hctx)
213{
214 struct bt_wait_state *bs;
215
216 if (!hctx)
217 return &bt->bs[0];
218
219 bs = &bt->bs[hctx->wait_index];
220 bt_index_inc(&hctx->wait_index);
221 return bs;
222}
223
Ming Leicb96a422014-06-01 00:43:37 +0800224static int bt_get(struct blk_mq_alloc_data *data,
225 struct blk_mq_bitmap_tags *bt,
226 struct blk_mq_hw_ctx *hctx,
227 unsigned int *last_tag)
Jens Axboe4bb659b2014-05-09 09:36:49 -0600228{
229 struct bt_wait_state *bs;
230 DEFINE_WAIT(wait);
231 int tag;
232
Jens Axboe0d2602c2014-05-13 15:10:52 -0600233 tag = __bt_get(hctx, bt, last_tag);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600234 if (tag != -1)
235 return tag;
236
Ming Leicb96a422014-06-01 00:43:37 +0800237 if (!(data->gfp & __GFP_WAIT))
Jens Axboe4bb659b2014-05-09 09:36:49 -0600238 return -1;
239
240 bs = bt_wait_ptr(bt, hctx);
241 do {
242 bool was_empty;
243
244 was_empty = list_empty(&wait.task_list);
245 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
246
Jens Axboe0d2602c2014-05-13 15:10:52 -0600247 tag = __bt_get(hctx, bt, last_tag);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600248 if (tag != -1)
249 break;
250
251 if (was_empty)
252 atomic_set(&bs->wait_cnt, bt->wake_cnt);
253
Ming Leicb96a422014-06-01 00:43:37 +0800254 blk_mq_put_ctx(data->ctx);
255
Jens Axboe4bb659b2014-05-09 09:36:49 -0600256 io_schedule();
Ming Leicb96a422014-06-01 00:43:37 +0800257
258 data->ctx = blk_mq_get_ctx(data->q);
259 data->hctx = data->q->mq_ops->map_queue(data->q,
260 data->ctx->cpu);
261 if (data->reserved) {
262 bt = &data->hctx->tags->breserved_tags;
263 } else {
264 last_tag = &data->ctx->last_tag;
265 hctx = data->hctx;
266 bt = &hctx->tags->bitmap_tags;
267 }
268 finish_wait(&bs->wait, &wait);
269 bs = bt_wait_ptr(bt, hctx);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600270 } while (1);
271
272 finish_wait(&bs->wait, &wait);
273 return tag;
274}
275
Ming Leicb96a422014-06-01 00:43:37 +0800276static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100277{
278 int tag;
279
Ming Leicb96a422014-06-01 00:43:37 +0800280 tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
281 &data->ctx->last_tag);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600282 if (tag >= 0)
Ming Leicb96a422014-06-01 00:43:37 +0800283 return tag + data->hctx->tags->nr_reserved_tags;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600284
285 return BLK_MQ_TAG_FAIL;
Jens Axboe320ae512013-10-24 09:20:05 +0100286}
287
Ming Leicb96a422014-06-01 00:43:37 +0800288static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100289{
Jens Axboe4bb659b2014-05-09 09:36:49 -0600290 int tag, zero = 0;
Jens Axboe320ae512013-10-24 09:20:05 +0100291
Ming Leicb96a422014-06-01 00:43:37 +0800292 if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
Jens Axboe320ae512013-10-24 09:20:05 +0100293 WARN_ON_ONCE(1);
294 return BLK_MQ_TAG_FAIL;
295 }
296
Ming Leicb96a422014-06-01 00:43:37 +0800297 tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
Jens Axboe320ae512013-10-24 09:20:05 +0100298 if (tag < 0)
299 return BLK_MQ_TAG_FAIL;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600300
Jens Axboe320ae512013-10-24 09:20:05 +0100301 return tag;
302}
303
Ming Leicb96a422014-06-01 00:43:37 +0800304unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100305{
Ming Leicb96a422014-06-01 00:43:37 +0800306 if (!data->reserved)
307 return __blk_mq_get_tag(data);
Jens Axboe320ae512013-10-24 09:20:05 +0100308
Ming Leicb96a422014-06-01 00:43:37 +0800309 return __blk_mq_get_reserved_tag(data);
Jens Axboe320ae512013-10-24 09:20:05 +0100310}
311
Jens Axboe4bb659b2014-05-09 09:36:49 -0600312static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
313{
314 int i, wake_index;
315
316 wake_index = bt->wake_index;
317 for (i = 0; i < BT_WAIT_QUEUES; i++) {
318 struct bt_wait_state *bs = &bt->bs[wake_index];
319
320 if (waitqueue_active(&bs->wait)) {
321 if (wake_index != bt->wake_index)
322 bt->wake_index = wake_index;
323
324 return bs;
325 }
326
327 bt_index_inc(&wake_index);
328 }
329
330 return NULL;
331}
332
333static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
334{
Jens Axboe59d13bf2014-05-09 13:41:15 -0600335 const int index = TAG_TO_INDEX(bt, tag);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600336 struct bt_wait_state *bs;
337
Ming Lei0289b2e2014-05-11 01:01:48 +0800338 /*
339 * The unlock memory barrier need to order access to req in free
340 * path and clearing tag bit
341 */
342 clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600343
344 bs = bt_wake_ptr(bt);
345 if (bs && atomic_dec_and_test(&bs->wait_cnt)) {
Jens Axboe4bb659b2014-05-09 09:36:49 -0600346 atomic_set(&bs->wait_cnt, bt->wake_cnt);
347 bt_index_inc(&bt->wake_index);
348 wake_up(&bs->wait);
349 }
350}
351
Jens Axboe320ae512013-10-24 09:20:05 +0100352static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
353{
354 BUG_ON(tag >= tags->nr_tags);
355
Jens Axboe4bb659b2014-05-09 09:36:49 -0600356 bt_clear_tag(&tags->bitmap_tags, tag);
Jens Axboe320ae512013-10-24 09:20:05 +0100357}
358
359static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
360 unsigned int tag)
361{
362 BUG_ON(tag >= tags->nr_reserved_tags);
363
Jens Axboe4bb659b2014-05-09 09:36:49 -0600364 bt_clear_tag(&tags->breserved_tags, tag);
Jens Axboe320ae512013-10-24 09:20:05 +0100365}
366
Jens Axboe0d2602c2014-05-13 15:10:52 -0600367void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
Jens Axboe4bb659b2014-05-09 09:36:49 -0600368 unsigned int *last_tag)
Jens Axboe320ae512013-10-24 09:20:05 +0100369{
Jens Axboe0d2602c2014-05-13 15:10:52 -0600370 struct blk_mq_tags *tags = hctx->tags;
371
Jens Axboe4bb659b2014-05-09 09:36:49 -0600372 if (tag >= tags->nr_reserved_tags) {
373 const int real_tag = tag - tags->nr_reserved_tags;
374
375 __blk_mq_put_tag(tags, real_tag);
376 *last_tag = real_tag;
377 } else
Jens Axboe320ae512013-10-24 09:20:05 +0100378 __blk_mq_put_reserved_tag(tags, tag);
379}
380
Jens Axboe4bb659b2014-05-09 09:36:49 -0600381static void bt_for_each_free(struct blk_mq_bitmap_tags *bt,
382 unsigned long *free_map, unsigned int off)
Jens Axboe320ae512013-10-24 09:20:05 +0100383{
Jens Axboe4bb659b2014-05-09 09:36:49 -0600384 int i;
385
386 for (i = 0; i < bt->map_nr; i++) {
Jens Axboee93ecf62014-05-19 09:17:48 -0600387 struct blk_align_bitmap *bm = &bt->map[i];
Jens Axboe4bb659b2014-05-09 09:36:49 -0600388 int bit = 0;
389
390 do {
391 bit = find_next_zero_bit(&bm->word, bm->depth, bit);
392 if (bit >= bm->depth)
393 break;
394
395 __set_bit(bit + off, free_map);
396 bit++;
397 } while (1);
398
Jens Axboe59d13bf2014-05-09 13:41:15 -0600399 off += (1 << bt->bits_per_word);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600400 }
Jens Axboe320ae512013-10-24 09:20:05 +0100401}
402
403void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
404 void (*fn)(void *, unsigned long *), void *data)
405{
406 unsigned long *tag_map;
407 size_t map_size;
408
409 map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
410 tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
411 if (!tag_map)
412 return;
413
Jens Axboe4bb659b2014-05-09 09:36:49 -0600414 bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags);
Jens Axboe320ae512013-10-24 09:20:05 +0100415 if (tags->nr_reserved_tags)
Jens Axboe4bb659b2014-05-09 09:36:49 -0600416 bt_for_each_free(&tags->breserved_tags, tag_map, 0);
Jens Axboe320ae512013-10-24 09:20:05 +0100417
418 fn(data, tag_map);
419 kfree(tag_map);
420}
Sam Bradshawedf866b2014-05-23 13:30:16 -0600421EXPORT_SYMBOL(blk_mq_tag_busy_iter);
Jens Axboe320ae512013-10-24 09:20:05 +0100422
Jens Axboe4bb659b2014-05-09 09:36:49 -0600423static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
424{
425 unsigned int i, used;
426
427 for (i = 0, used = 0; i < bt->map_nr; i++) {
Jens Axboee93ecf62014-05-19 09:17:48 -0600428 struct blk_align_bitmap *bm = &bt->map[i];
Jens Axboe4bb659b2014-05-09 09:36:49 -0600429
430 used += bitmap_weight(&bm->word, bm->depth);
431 }
432
433 return bt->depth - used;
434}
435
Jens Axboee3a2b3f2014-05-20 11:49:02 -0600436static void bt_update_count(struct blk_mq_bitmap_tags *bt,
437 unsigned int depth)
438{
439 unsigned int tags_per_word = 1U << bt->bits_per_word;
440 unsigned int map_depth = depth;
441
442 if (depth) {
443 int i;
444
445 for (i = 0; i < bt->map_nr; i++) {
446 bt->map[i].depth = min(map_depth, tags_per_word);
447 map_depth -= bt->map[i].depth;
448 }
449 }
450
451 bt->wake_cnt = BT_WAIT_BATCH;
452 if (bt->wake_cnt > depth / 4)
453 bt->wake_cnt = max(1U, depth / 4);
454
455 bt->depth = depth;
456}
457
Jens Axboe4bb659b2014-05-09 09:36:49 -0600458static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
459 int node, bool reserved)
460{
461 int i;
462
Jens Axboe59d13bf2014-05-09 13:41:15 -0600463 bt->bits_per_word = ilog2(BITS_PER_LONG);
464
Jens Axboe4bb659b2014-05-09 09:36:49 -0600465 /*
466 * Depth can be zero for reserved tags, that's not a failure
467 * condition.
468 */
469 if (depth) {
Jens Axboee3a2b3f2014-05-20 11:49:02 -0600470 unsigned int nr, tags_per_word;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600471
Jens Axboe59d13bf2014-05-09 13:41:15 -0600472 tags_per_word = (1 << bt->bits_per_word);
473
474 /*
475 * If the tag space is small, shrink the number of tags
476 * per word so we spread over a few cachelines, at least.
477 * If less than 4 tags, just forget about it, it's not
478 * going to work optimally anyway.
479 */
480 if (depth >= 4) {
481 while (tags_per_word * 4 > depth) {
482 bt->bits_per_word--;
483 tags_per_word = (1 << bt->bits_per_word);
484 }
485 }
486
487 nr = ALIGN(depth, tags_per_word) / tags_per_word;
Jens Axboee93ecf62014-05-19 09:17:48 -0600488 bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
Jens Axboe4bb659b2014-05-09 09:36:49 -0600489 GFP_KERNEL, node);
490 if (!bt->map)
491 return -ENOMEM;
492
493 bt->map_nr = nr;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600494 }
495
496 bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
497 if (!bt->bs) {
498 kfree(bt->map);
499 return -ENOMEM;
500 }
501
502 for (i = 0; i < BT_WAIT_QUEUES; i++)
503 init_waitqueue_head(&bt->bs[i].wait);
504
Jens Axboee3a2b3f2014-05-20 11:49:02 -0600505 bt_update_count(bt, depth);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600506 return 0;
507}
508
509static void bt_free(struct blk_mq_bitmap_tags *bt)
510{
511 kfree(bt->map);
512 kfree(bt->bs);
513}
514
515static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
516 int node)
517{
518 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
519
520 if (bt_alloc(&tags->bitmap_tags, depth, node, false))
521 goto enomem;
522 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
523 goto enomem;
524
525 return tags;
526enomem:
527 bt_free(&tags->bitmap_tags);
528 kfree(tags);
529 return NULL;
530}
531
Jens Axboe320ae512013-10-24 09:20:05 +0100532struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
533 unsigned int reserved_tags, int node)
534{
Jens Axboe320ae512013-10-24 09:20:05 +0100535 struct blk_mq_tags *tags;
Jens Axboe320ae512013-10-24 09:20:05 +0100536
537 if (total_tags > BLK_MQ_TAG_MAX) {
538 pr_err("blk-mq: tag depth too large\n");
539 return NULL;
540 }
541
542 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
543 if (!tags)
544 return NULL;
545
Jens Axboe320ae512013-10-24 09:20:05 +0100546 tags->nr_tags = total_tags;
547 tags->nr_reserved_tags = reserved_tags;
Jens Axboe320ae512013-10-24 09:20:05 +0100548
Jens Axboe4bb659b2014-05-09 09:36:49 -0600549 return blk_mq_init_bitmap_tags(tags, node);
Jens Axboe320ae512013-10-24 09:20:05 +0100550}
551
552void blk_mq_free_tags(struct blk_mq_tags *tags)
553{
Jens Axboe4bb659b2014-05-09 09:36:49 -0600554 bt_free(&tags->bitmap_tags);
555 bt_free(&tags->breserved_tags);
Jens Axboe320ae512013-10-24 09:20:05 +0100556 kfree(tags);
557}
558
Jens Axboe4bb659b2014-05-09 09:36:49 -0600559void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
560{
561 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
562
Ming Lei9d3d21a2014-05-10 15:43:14 -0600563 *tag = prandom_u32() % depth;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600564}
565
Jens Axboee3a2b3f2014-05-20 11:49:02 -0600566int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
567{
568 tdepth -= tags->nr_reserved_tags;
569 if (tdepth > tags->nr_tags)
570 return -EINVAL;
571
572 /*
573 * Don't need (or can't) update reserved tags here, they remain
574 * static and should never need resizing.
575 */
576 bt_update_count(&tags->bitmap_tags, tdepth);
577 blk_mq_tag_wakeup_all(tags);
578 return 0;
579}
580
Jens Axboe320ae512013-10-24 09:20:05 +0100581ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
582{
583 char *orig_page = page;
Jens Axboe4bb659b2014-05-09 09:36:49 -0600584 unsigned int free, res;
Jens Axboe320ae512013-10-24 09:20:05 +0100585
586 if (!tags)
587 return 0;
588
Jens Axboe59d13bf2014-05-09 13:41:15 -0600589 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
590 "bits_per_word=%u\n",
591 tags->nr_tags, tags->nr_reserved_tags,
592 tags->bitmap_tags.bits_per_word);
Jens Axboe320ae512013-10-24 09:20:05 +0100593
Jens Axboe4bb659b2014-05-09 09:36:49 -0600594 free = bt_unused_tags(&tags->bitmap_tags);
595 res = bt_unused_tags(&tags->breserved_tags);
Jens Axboe320ae512013-10-24 09:20:05 +0100596
Jens Axboe4bb659b2014-05-09 09:36:49 -0600597 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600598 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
Jens Axboe320ae512013-10-24 09:20:05 +0100599
600 return page - orig_page;
601}