blob: 3215c08c63cc96f0e926acaf56cc93f861f24025 [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#ifndef INT_BLK_MQ_TAG_H
2#define INT_BLK_MQ_TAG_H
3
Jens Axboee93ecf62014-05-19 09:17:48 -06004#include "blk-mq.h"
5
Christoph Hellwig24d2f902014-04-15 14:14:00 -06006/*
7 * Tag address space map.
8 */
9struct blk_mq_tags {
10 unsigned int nr_tags;
11 unsigned int nr_reserved_tags;
Christoph Hellwig24d2f902014-04-15 14:14:00 -060012
Jens Axboe0d2602c2014-05-13 15:10:52 -060013 atomic_t active_queues;
14
Omar Sandoval88459642016-09-17 08:38:44 -060015 struct sbitmap_queue bitmap_tags;
16 struct sbitmap_queue breserved_tags;
Christoph Hellwig24d2f902014-04-15 14:14:00 -060017
18 struct request **rqs;
19 struct list_head page_list;
Shaohua Li24391c02015-01-23 14:18:00 -070020
21 int alloc_policy;
Keith Buschf26cdc82015-06-01 09:29:53 -060022 cpumask_var_t cpumask;
Christoph Hellwig24d2f902014-04-15 14:14:00 -060023};
24
Jens Axboe320ae512013-10-24 09:20:05 +010025
Shaohua Li24391c02015-01-23 14:18:00 -070026extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
Jens Axboe320ae512013-10-24 09:20:05 +010027extern void blk_mq_free_tags(struct blk_mq_tags *tags);
28
Ming Leicb96a422014-06-01 00:43:37 +080029extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
Jens Axboe0d2602c2014-05-13 15:10:52 -060030extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
Jens Axboe320ae512013-10-24 09:20:05 +010031extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
32extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
Jens Axboe4bb659b2014-05-09 09:36:49 -060033extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
Jens Axboee3a2b3f2014-05-20 11:49:02 -060034extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
Jens Axboeaed3ea92014-12-22 14:04:42 -070035extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +020036void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
37 void *priv);
Jens Axboe320ae512013-10-24 09:20:05 +010038
Omar Sandoval88459642016-09-17 08:38:44 -060039static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
40 struct blk_mq_hw_ctx *hctx)
41{
42 if (!hctx)
43 return &bt->ws[0];
44 return sbq_wait_ptr(bt, &hctx->wait_index);
45}
46
Jens Axboe320ae512013-10-24 09:20:05 +010047enum {
48 BLK_MQ_TAG_CACHE_MIN = 1,
49 BLK_MQ_TAG_CACHE_MAX = 64,
50};
51
52enum {
53 BLK_MQ_TAG_FAIL = -1U,
54 BLK_MQ_TAG_MIN = BLK_MQ_TAG_CACHE_MIN,
55 BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1,
56};
57
Jens Axboe0d2602c2014-05-13 15:10:52 -060058extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
59extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
60
61static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
62{
63 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
64 return false;
65
66 return __blk_mq_tag_busy(hctx);
67}
68
69static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
70{
71 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
72 return;
73
74 __blk_mq_tag_idle(hctx);
75}
76
Ming Lei0048b482015-08-09 03:41:51 -040077/*
78 * This helper should only be used for flush request to share tag
79 * with the request cloned from, and both the two requests can't be
80 * in flight at the same time. The caller has to make sure the tag
81 * can't be freed.
82 */
83static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx,
84 unsigned int tag, struct request *rq)
85{
86 hctx->tags->rqs[tag] = rq;
87}
88
Jens Axboe320ae512013-10-24 09:20:05 +010089#endif