Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 2 | #ifndef INT_BLK_MQ_H |
| 3 | #define INT_BLK_MQ_H |
| 4 | |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 5 | #include "blk-stat.h" |
Ming Lei | 244c65a | 2017-11-04 12:39:57 -0600 | [diff] [blame] | 6 | #include "blk-mq-tag.h" |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 7 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 8 | struct blk_mq_tag_set; |
| 9 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 10 | struct blk_mq_ctx { |
| 11 | struct { |
| 12 | spinlock_t lock; |
| 13 | struct list_head rq_list; |
| 14 | } ____cacheline_aligned_in_smp; |
| 15 | |
| 16 | unsigned int cpu; |
| 17 | unsigned int index_hw; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 18 | |
| 19 | /* incremented at dispatch time */ |
| 20 | unsigned long rq_dispatched[2]; |
| 21 | unsigned long rq_merged; |
| 22 | |
| 23 | /* incremented at completion time */ |
| 24 | unsigned long ____cacheline_aligned_in_smp rq_completed[2]; |
| 25 | |
| 26 | struct request_queue *queue; |
| 27 | struct kobject kobj; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 28 | } ____cacheline_aligned_in_smp; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 29 | |
Tejun Heo | 1d9bd51 | 2018-01-09 08:29:48 -0800 | [diff] [blame] | 30 | /* |
| 31 | * Bits for request->gstate. The lower two bits carry MQ_RQ_* state value |
| 32 | * and the upper bits the generation number. |
| 33 | */ |
| 34 | enum mq_rq_state { |
| 35 | MQ_RQ_IDLE = 0, |
| 36 | MQ_RQ_IN_FLIGHT = 1, |
Tejun Heo | 5a61c36 | 2018-01-09 08:29:52 -0800 | [diff] [blame] | 37 | MQ_RQ_COMPLETE = 2, |
Tejun Heo | 1d9bd51 | 2018-01-09 08:29:48 -0800 | [diff] [blame] | 38 | |
| 39 | MQ_RQ_STATE_BITS = 2, |
| 40 | MQ_RQ_STATE_MASK = (1 << MQ_RQ_STATE_BITS) - 1, |
| 41 | MQ_RQ_GEN_INC = 1 << MQ_RQ_STATE_BITS, |
| 42 | }; |
| 43 | |
Tejun Heo | 780db20 | 2014-07-01 10:31:13 -0600 | [diff] [blame] | 44 | void blk_mq_freeze_queue(struct request_queue *q); |
Ming Lei | 3edcc0c | 2013-12-26 21:31:38 +0800 | [diff] [blame] | 45 | void blk_mq_free_queue(struct request_queue *q); |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 46 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 47 | void blk_mq_wake_waiters(struct request_queue *q); |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 48 | bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool); |
Jens Axboe | 2c3ad66 | 2016-12-14 14:34:47 -0700 | [diff] [blame] | 49 | void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 50 | bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, |
| 51 | bool wait); |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 52 | struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, |
| 53 | struct blk_mq_ctx *start); |
Jens Axboe | 2c3ad66 | 2016-12-14 14:34:47 -0700 | [diff] [blame] | 54 | |
| 55 | /* |
| 56 | * Internal helpers for allocating/freeing the request map |
| 57 | */ |
Jens Axboe | cc71a6f | 2017-01-11 14:29:56 -0700 | [diff] [blame] | 58 | void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, |
| 59 | unsigned int hctx_idx); |
| 60 | void blk_mq_free_rq_map(struct blk_mq_tags *tags); |
| 61 | struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, |
| 62 | unsigned int hctx_idx, |
| 63 | unsigned int nr_tags, |
| 64 | unsigned int reserved_tags); |
| 65 | int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, |
| 66 | unsigned int hctx_idx, unsigned int depth); |
Jens Axboe | 2c3ad66 | 2016-12-14 14:34:47 -0700 | [diff] [blame] | 67 | |
| 68 | /* |
| 69 | * Internal helpers for request insertion into sw queues |
| 70 | */ |
| 71 | void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
| 72 | bool at_head); |
Ming Lei | b085029 | 2017-11-02 23:24:34 +0800 | [diff] [blame] | 73 | void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 74 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, |
| 75 | struct list_head *list); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 76 | |
Ming Lei | 396eaf2 | 2018-01-17 11:25:57 -0500 | [diff] [blame] | 77 | /* Used by blk_insert_cloned_request() to issue request directly */ |
Bart Van Assche | c77ff7f | 2018-01-19 08:58:54 -0800 | [diff] [blame] | 78 | blk_status_t blk_mq_request_issue_directly(struct request *rq); |
Ming Lei | 396eaf2 | 2018-01-17 11:25:57 -0500 | [diff] [blame] | 79 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 80 | /* |
| 81 | * CPU -> queue mappings |
| 82 | */ |
Jens Axboe | f14bbe7 | 2014-05-27 12:06:53 -0600 | [diff] [blame] | 83 | extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 84 | |
Christoph Hellwig | 7d7e0f9 | 2016-09-14 16:18:54 +0200 | [diff] [blame] | 85 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, |
| 86 | int cpu) |
| 87 | { |
| 88 | return q->queue_hw_ctx[q->mq_map[cpu]]; |
| 89 | } |
| 90 | |
Jens Axboe | e93ecf6 | 2014-05-19 09:17:48 -0600 | [diff] [blame] | 91 | /* |
Jens Axboe | 67aec14 | 2014-05-30 08:25:36 -0600 | [diff] [blame] | 92 | * sysfs helpers |
| 93 | */ |
Ming Lei | 737f98c | 2017-02-22 18:13:59 +0800 | [diff] [blame] | 94 | extern void blk_mq_sysfs_init(struct request_queue *q); |
Ming Lei | 7ea5fe3 | 2017-02-22 18:14:00 +0800 | [diff] [blame] | 95 | extern void blk_mq_sysfs_deinit(struct request_queue *q); |
Bart Van Assche | 2d0364c | 2017-04-26 13:47:48 -0700 | [diff] [blame] | 96 | extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); |
Jens Axboe | 67aec14 | 2014-05-30 08:25:36 -0600 | [diff] [blame] | 97 | extern int blk_mq_sysfs_register(struct request_queue *q); |
| 98 | extern void blk_mq_sysfs_unregister(struct request_queue *q); |
Keith Busch | 868f2f0 | 2015-12-17 17:08:14 -0700 | [diff] [blame] | 99 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); |
Jens Axboe | 67aec14 | 2014-05-30 08:25:36 -0600 | [diff] [blame] | 100 | |
Ming Lei | e09aae7e | 2015-01-29 20:17:27 +0800 | [diff] [blame] | 101 | void blk_mq_release(struct request_queue *q); |
| 102 | |
Tejun Heo | 1d9bd51 | 2018-01-09 08:29:48 -0800 | [diff] [blame] | 103 | /** |
| 104 | * blk_mq_rq_state() - read the current MQ_RQ_* state of a request |
| 105 | * @rq: target request. |
| 106 | */ |
| 107 | static inline int blk_mq_rq_state(struct request *rq) |
| 108 | { |
| 109 | return READ_ONCE(rq->gstate) & MQ_RQ_STATE_MASK; |
| 110 | } |
| 111 | |
| 112 | /** |
| 113 | * blk_mq_rq_update_state() - set the current MQ_RQ_* state of a request |
| 114 | * @rq: target request. |
| 115 | * @state: new state to set. |
| 116 | * |
| 117 | * Set @rq's state to @state. The caller is responsible for ensuring that |
| 118 | * there are no other updaters. A request can transition into IN_FLIGHT |
| 119 | * only from IDLE and doing so increments the generation number. |
| 120 | */ |
| 121 | static inline void blk_mq_rq_update_state(struct request *rq, |
| 122 | enum mq_rq_state state) |
| 123 | { |
| 124 | u64 old_val = READ_ONCE(rq->gstate); |
| 125 | u64 new_val = (old_val & ~MQ_RQ_STATE_MASK) | state; |
| 126 | |
| 127 | if (state == MQ_RQ_IN_FLIGHT) { |
| 128 | WARN_ON_ONCE((old_val & MQ_RQ_STATE_MASK) != MQ_RQ_IDLE); |
| 129 | new_val += MQ_RQ_GEN_INC; |
| 130 | } |
| 131 | |
| 132 | /* avoid exposing interim values */ |
| 133 | WRITE_ONCE(rq->gstate, new_val); |
| 134 | } |
| 135 | |
Ming Lei | 1aecfe4 | 2014-06-01 00:43:36 +0800 | [diff] [blame] | 136 | static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, |
| 137 | unsigned int cpu) |
| 138 | { |
| 139 | return per_cpu_ptr(q->queue_ctx, cpu); |
| 140 | } |
| 141 | |
| 142 | /* |
| 143 | * This assumes per-cpu software queueing queues. They could be per-node |
| 144 | * as well, for instance. For now this is hardcoded as-is. Note that we don't |
| 145 | * care about preemption, since we know the ctx's are persistent. This does |
| 146 | * mean that we can't rely on ctx always matching the currently running CPU. |
| 147 | */ |
| 148 | static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) |
| 149 | { |
| 150 | return __blk_mq_get_ctx(q, get_cpu()); |
| 151 | } |
| 152 | |
| 153 | static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) |
| 154 | { |
| 155 | put_cpu(); |
| 156 | } |
| 157 | |
Ming Lei | cb96a42 | 2014-06-01 00:43:37 +0800 | [diff] [blame] | 158 | struct blk_mq_alloc_data { |
| 159 | /* input parameter */ |
| 160 | struct request_queue *q; |
Bart Van Assche | 9a95e4e | 2017-11-09 10:49:59 -0800 | [diff] [blame] | 161 | blk_mq_req_flags_t flags; |
Omar Sandoval | 229a9287 | 2017-04-14 00:59:59 -0700 | [diff] [blame] | 162 | unsigned int shallow_depth; |
Ming Lei | cb96a42 | 2014-06-01 00:43:37 +0800 | [diff] [blame] | 163 | |
| 164 | /* input & output parameter */ |
| 165 | struct blk_mq_ctx *ctx; |
| 166 | struct blk_mq_hw_ctx *hctx; |
| 167 | }; |
| 168 | |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 169 | static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) |
| 170 | { |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 171 | if (data->flags & BLK_MQ_REQ_INTERNAL) |
| 172 | return data->hctx->sched_tags; |
| 173 | |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 174 | return data->hctx->tags; |
| 175 | } |
| 176 | |
Bart Van Assche | 5d1b25c | 2016-10-28 17:19:15 -0700 | [diff] [blame] | 177 | static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) |
| 178 | { |
| 179 | return test_bit(BLK_MQ_S_STOPPED, &hctx->state); |
| 180 | } |
| 181 | |
Ming Lei | 19c66e5 | 2014-12-03 19:38:04 +0800 | [diff] [blame] | 182 | static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) |
| 183 | { |
| 184 | return hctx->nr_ctx && hctx->tags; |
| 185 | } |
| 186 | |
Jens Axboe | f299b7c | 2017-08-08 17:51:45 -0600 | [diff] [blame] | 187 | void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, |
| 188 | unsigned int inflight[2]); |
| 189 | |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 190 | static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx) |
| 191 | { |
| 192 | struct request_queue *q = hctx->queue; |
| 193 | |
| 194 | if (q->mq_ops->put_budget) |
| 195 | q->mq_ops->put_budget(hctx); |
| 196 | } |
| 197 | |
Ming Lei | 88022d7 | 2017-11-05 02:21:12 +0800 | [diff] [blame] | 198 | static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx) |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 199 | { |
| 200 | struct request_queue *q = hctx->queue; |
| 201 | |
| 202 | if (q->mq_ops->get_budget) |
| 203 | return q->mq_ops->get_budget(hctx); |
Ming Lei | 88022d7 | 2017-11-05 02:21:12 +0800 | [diff] [blame] | 204 | return true; |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 205 | } |
| 206 | |
Ming Lei | 244c65a | 2017-11-04 12:39:57 -0600 | [diff] [blame] | 207 | static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, |
| 208 | struct request *rq) |
| 209 | { |
| 210 | blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); |
| 211 | rq->tag = -1; |
| 212 | |
| 213 | if (rq->rq_flags & RQF_MQ_INFLIGHT) { |
| 214 | rq->rq_flags &= ~RQF_MQ_INFLIGHT; |
| 215 | atomic_dec(&hctx->nr_active); |
| 216 | } |
| 217 | } |
| 218 | |
| 219 | static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx, |
| 220 | struct request *rq) |
| 221 | { |
| 222 | if (rq->tag == -1 || rq->internal_tag == -1) |
| 223 | return; |
| 224 | |
| 225 | __blk_mq_put_driver_tag(hctx, rq); |
| 226 | } |
| 227 | |
| 228 | static inline void blk_mq_put_driver_tag(struct request *rq) |
| 229 | { |
| 230 | struct blk_mq_hw_ctx *hctx; |
| 231 | |
| 232 | if (rq->tag == -1 || rq->internal_tag == -1) |
| 233 | return; |
| 234 | |
| 235 | hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); |
| 236 | __blk_mq_put_driver_tag(hctx, rq); |
| 237 | } |
| 238 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 239 | #endif |