Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 1 | #ifndef BLK_MQ_H |
| 2 | #define BLK_MQ_H |
| 3 | |
| 4 | #include <linux/blkdev.h> |
| 5 | |
| 6 | struct blk_mq_tags; |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 7 | struct blk_flush_queue; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 8 | |
| 9 | struct blk_mq_cpu_notifier { |
| 10 | struct list_head list; |
| 11 | void *data; |
Jens Axboe | e814e71 | 2014-05-21 13:59:08 -0600 | [diff] [blame] | 12 | int (*notify)(void *data, unsigned long action, unsigned int cpu); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 13 | }; |
| 14 | |
Jens Axboe | 1429d7c | 2014-05-19 09:23:55 -0600 | [diff] [blame] | 15 | struct blk_mq_ctxmap { |
Jens Axboe | 569fd0c | 2015-04-17 08:28:50 -0600 | [diff] [blame] | 16 | unsigned int size; |
Jens Axboe | 1429d7c | 2014-05-19 09:23:55 -0600 | [diff] [blame] | 17 | unsigned int bits_per_word; |
| 18 | struct blk_align_bitmap *map; |
| 19 | }; |
| 20 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 21 | struct blk_mq_hw_ctx { |
| 22 | struct { |
| 23 | spinlock_t lock; |
| 24 | struct list_head dispatch; |
Jens Axboe | 8d354f1 | 2016-08-25 08:00:28 -0600 | [diff] [blame] | 25 | unsigned long state; /* BLK_MQ_S_* flags */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 26 | } ____cacheline_aligned_in_smp; |
| 27 | |
Jens Axboe | 27489a3 | 2016-08-24 15:54:25 -0600 | [diff] [blame] | 28 | struct work_struct run_work; |
Jens Axboe | e4043dc | 2014-04-09 10:18:23 -0600 | [diff] [blame] | 29 | cpumask_var_t cpumask; |
Jens Axboe | 506e931 | 2014-05-07 10:26:44 -0600 | [diff] [blame] | 30 | int next_cpu; |
| 31 | int next_cpu_batch; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 32 | |
| 33 | unsigned long flags; /* BLK_MQ_F_* flags */ |
| 34 | |
| 35 | struct request_queue *queue; |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 36 | struct blk_flush_queue *fq; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 37 | |
| 38 | void *driver_data; |
| 39 | |
Jens Axboe | 1429d7c | 2014-05-19 09:23:55 -0600 | [diff] [blame] | 40 | struct blk_mq_ctxmap ctx_map; |
| 41 | |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 42 | struct blk_mq_ctx **ctxs; |
Jens Axboe | 8d354f1 | 2016-08-25 08:00:28 -0600 | [diff] [blame] | 43 | unsigned int nr_ctx; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 44 | |
Alexander Gordeev | 8537b12 | 2014-06-17 22:12:35 -0700 | [diff] [blame] | 45 | atomic_t wait_index; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 46 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 47 | struct blk_mq_tags *tags; |
| 48 | |
| 49 | unsigned long queued; |
| 50 | unsigned long run; |
Jens Axboe | 8d354f1 | 2016-08-25 08:00:28 -0600 | [diff] [blame] | 51 | #define BLK_MQ_MAX_DISPATCH_ORDER 7 |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 52 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; |
| 53 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 54 | unsigned int numa_node; |
Jens Axboe | 17ded32 | 2015-01-07 10:44:04 -0700 | [diff] [blame] | 55 | unsigned int queue_num; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 56 | |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 57 | atomic_t nr_active; |
| 58 | |
Jens Axboe | 8d354f1 | 2016-08-25 08:00:28 -0600 | [diff] [blame] | 59 | struct delayed_work delay_work; |
| 60 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 61 | struct blk_mq_cpu_notifier cpu_notifier; |
| 62 | struct kobject kobj; |
Jens Axboe | 05229be | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 63 | |
Stephen Bates | 6e21935 | 2016-09-13 12:23:15 -0600 | [diff] [blame] | 64 | unsigned long poll_considered; |
Jens Axboe | 05229be | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 65 | unsigned long poll_invoked; |
| 66 | unsigned long poll_success; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 67 | }; |
| 68 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 69 | struct blk_mq_tag_set { |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 70 | struct blk_mq_ops *ops; |
| 71 | unsigned int nr_hw_queues; |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 72 | unsigned int queue_depth; /* max hw supported */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 73 | unsigned int reserved_tags; |
| 74 | unsigned int cmd_size; /* per-request extra data */ |
| 75 | int numa_node; |
| 76 | unsigned int timeout; |
| 77 | unsigned int flags; /* BLK_MQ_F_* */ |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 78 | void *driver_data; |
| 79 | |
| 80 | struct blk_mq_tags **tags; |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 81 | |
| 82 | struct mutex tag_list_lock; |
| 83 | struct list_head tag_list; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 84 | }; |
| 85 | |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 86 | struct blk_mq_queue_data { |
| 87 | struct request *rq; |
| 88 | struct list_head *list; |
| 89 | bool last; |
| 90 | }; |
| 91 | |
| 92 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 93 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); |
Christoph Hellwig | 0152fb6 | 2014-09-13 16:40:13 -0700 | [diff] [blame] | 94 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 95 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); |
| 96 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 97 | typedef int (init_request_fn)(void *, struct request *, unsigned int, |
| 98 | unsigned int, unsigned int); |
| 99 | typedef void (exit_request_fn)(void *, struct request *, unsigned int, |
| 100 | unsigned int); |
Sagi Grimberg | 486cf98 | 2016-07-06 21:55:48 +0900 | [diff] [blame] | 101 | typedef int (reinit_request_fn)(void *, struct request *); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 102 | |
Christoph Hellwig | 81481eb | 2014-09-13 16:40:11 -0700 | [diff] [blame] | 103 | typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, |
| 104 | bool); |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 105 | typedef void (busy_tag_iter_fn)(struct request *, void *, bool); |
Jens Axboe | 05229be | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 106 | typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); |
| 107 | |
Christoph Hellwig | 81481eb | 2014-09-13 16:40:11 -0700 | [diff] [blame] | 108 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 109 | struct blk_mq_ops { |
| 110 | /* |
| 111 | * Queue request |
| 112 | */ |
| 113 | queue_rq_fn *queue_rq; |
| 114 | |
| 115 | /* |
| 116 | * Map to specific hardware queue |
| 117 | */ |
| 118 | map_queue_fn *map_queue; |
| 119 | |
| 120 | /* |
| 121 | * Called on request timeout |
| 122 | */ |
Christoph Hellwig | 0152fb6 | 2014-09-13 16:40:13 -0700 | [diff] [blame] | 123 | timeout_fn *timeout; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 124 | |
Jens Axboe | 05229be | 2015-11-05 10:44:55 -0700 | [diff] [blame] | 125 | /* |
| 126 | * Called to poll for completion of a specific tag. |
| 127 | */ |
| 128 | poll_fn *poll; |
| 129 | |
Christoph Hellwig | 30a91cb | 2014-02-10 03:24:38 -0800 | [diff] [blame] | 130 | softirq_done_fn *complete; |
| 131 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 132 | /* |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 133 | * Called when the block layer side of a hardware queue has been |
| 134 | * set up, allowing the driver to allocate/init matching structures. |
| 135 | * Ditto for exit/teardown. |
| 136 | */ |
| 137 | init_hctx_fn *init_hctx; |
| 138 | exit_hctx_fn *exit_hctx; |
Christoph Hellwig | e9b267d | 2014-04-15 13:59:10 -0600 | [diff] [blame] | 139 | |
| 140 | /* |
| 141 | * Called for every command allocated by the block layer to allow |
| 142 | * the driver to set up driver specific data. |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 143 | * |
| 144 | * Tag greater than or equal to queue_depth is for setting up |
| 145 | * flush request. |
| 146 | * |
Christoph Hellwig | e9b267d | 2014-04-15 13:59:10 -0600 | [diff] [blame] | 147 | * Ditto for exit/teardown. |
| 148 | */ |
| 149 | init_request_fn *init_request; |
| 150 | exit_request_fn *exit_request; |
Sagi Grimberg | 486cf98 | 2016-07-06 21:55:48 +0900 | [diff] [blame] | 151 | reinit_request_fn *reinit_request; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 152 | }; |
| 153 | |
| 154 | enum { |
| 155 | BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */ |
| 156 | BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */ |
| 157 | BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ |
| 158 | |
| 159 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
Jens Axboe | 8a58d1f | 2014-08-15 12:38:41 -0600 | [diff] [blame] | 160 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
| 161 | BLK_MQ_F_SG_MERGE = 1 << 2, |
Jens Axboe | e167dfb | 2014-10-29 11:18:26 -0600 | [diff] [blame] | 162 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, |
Shaohua Li | 24391c0 | 2015-01-23 14:18:00 -0700 | [diff] [blame] | 163 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
| 164 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 165 | |
Jens Axboe | 5d12f90 | 2014-03-19 15:25:02 -0600 | [diff] [blame] | 166 | BLK_MQ_S_STOPPED = 0, |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 167 | BLK_MQ_S_TAG_ACTIVE = 1, |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 168 | |
Jens Axboe | a4391c6 | 2014-06-05 15:21:56 -0600 | [diff] [blame] | 169 | BLK_MQ_MAX_DEPTH = 10240, |
Jens Axboe | 506e931 | 2014-05-07 10:26:44 -0600 | [diff] [blame] | 170 | |
| 171 | BLK_MQ_CPU_WORK_BATCH = 8, |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 172 | }; |
Shaohua Li | 24391c0 | 2015-01-23 14:18:00 -0700 | [diff] [blame] | 173 | #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ |
| 174 | ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ |
| 175 | ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) |
| 176 | #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ |
| 177 | ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ |
| 178 | << BLK_MQ_F_ALLOC_POLICY_START_BIT) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 179 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 180 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
Mike Snitzer | b62c21b | 2015-03-12 23:56:02 -0400 | [diff] [blame] | 181 | struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, |
| 182 | struct request_queue *q); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 183 | int blk_mq_register_disk(struct gendisk *); |
| 184 | void blk_mq_unregister_disk(struct gendisk *); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 185 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 186 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); |
| 187 | void blk_mq_free_tag_set(struct blk_mq_tag_set *set); |
| 188 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 189 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
| 190 | |
Christoph Hellwig | feb71da | 2014-02-20 15:32:37 -0800 | [diff] [blame] | 191 | void blk_mq_insert_request(struct request *, bool, bool, bool); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 192 | void blk_mq_free_request(struct request *rq); |
Jens Axboe | 7c7f2f2 | 2014-11-17 10:41:57 -0700 | [diff] [blame] | 193 | void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 194 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); |
Christoph Hellwig | 6f3b0e8 | 2015-11-26 09:13:05 +0100 | [diff] [blame] | 195 | |
| 196 | enum { |
| 197 | BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */ |
| 198 | BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */ |
| 199 | }; |
| 200 | |
Christoph Hellwig | 4ce01dd | 2014-05-27 20:59:46 +0200 | [diff] [blame] | 201 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, |
Christoph Hellwig | 6f3b0e8 | 2015-11-26 09:13:05 +0100 | [diff] [blame] | 202 | unsigned int flags); |
Ming Lin | 1f5bd33 | 2016-06-13 16:45:21 +0200 | [diff] [blame] | 203 | struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op, |
| 204 | unsigned int flags, unsigned int hctx_idx); |
Jens Axboe | 0e62f51 | 2014-06-04 10:23:49 -0600 | [diff] [blame] | 205 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 206 | struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 207 | |
Bart Van Assche | 205fb5f | 2014-10-30 14:45:11 +0100 | [diff] [blame] | 208 | enum { |
| 209 | BLK_MQ_UNIQUE_TAG_BITS = 16, |
| 210 | BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, |
| 211 | }; |
| 212 | |
| 213 | u32 blk_mq_unique_tag(struct request *rq); |
| 214 | |
| 215 | static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) |
| 216 | { |
| 217 | return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; |
| 218 | } |
| 219 | |
| 220 | static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) |
| 221 | { |
| 222 | return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; |
| 223 | } |
| 224 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 225 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 226 | |
Keith Busch | 973c019 | 2015-01-07 18:55:43 -0700 | [diff] [blame] | 227 | int blk_mq_request_started(struct request *rq); |
Christoph Hellwig | e249007 | 2014-09-13 16:40:09 -0700 | [diff] [blame] | 228 | void blk_mq_start_request(struct request *rq); |
Christoph Hellwig | c8a446a | 2014-09-13 16:40:10 -0700 | [diff] [blame] | 229 | void blk_mq_end_request(struct request *rq, int error); |
| 230 | void __blk_mq_end_request(struct request *rq, int error); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 231 | |
Christoph Hellwig | ed0791b | 2014-04-16 09:44:57 +0200 | [diff] [blame] | 232 | void blk_mq_requeue_request(struct request *rq); |
Christoph Hellwig | 6fca6a6 | 2014-05-28 08:08:02 -0600 | [diff] [blame] | 233 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); |
Keith Busch | c68ed59 | 2015-01-07 18:55:44 -0700 | [diff] [blame] | 234 | void blk_mq_cancel_requeue_work(struct request_queue *q); |
Christoph Hellwig | 6fca6a6 | 2014-05-28 08:08:02 -0600 | [diff] [blame] | 235 | void blk_mq_kick_requeue_list(struct request_queue *q); |
Mike Snitzer | 2849450 | 2016-09-14 13:28:30 -0400 | [diff] [blame^] | 236 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); |
Jens Axboe | 1885b24 | 2015-01-07 18:55:45 -0700 | [diff] [blame] | 237 | void blk_mq_abort_requeue_list(struct request_queue *q); |
Christoph Hellwig | f4829a9 | 2015-09-27 21:01:50 +0200 | [diff] [blame] | 238 | void blk_mq_complete_request(struct request *rq, int error); |
Christoph Hellwig | 30a91cb | 2014-02-10 03:24:38 -0800 | [diff] [blame] | 239 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 240 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
| 241 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); |
Christoph Hellwig | 280d45f | 2013-10-25 14:45:58 +0100 | [diff] [blame] | 242 | void blk_mq_stop_hw_queues(struct request_queue *q); |
Christoph Hellwig | 2f26855 | 2014-04-16 09:44:56 +0200 | [diff] [blame] | 243 | void blk_mq_start_hw_queues(struct request_queue *q); |
Christoph Hellwig | 1b4a325 | 2014-04-16 09:44:54 +0200 | [diff] [blame] | 244 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
Mike Snitzer | b94ec29 | 2015-03-11 23:56:38 -0400 | [diff] [blame] | 245 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
Christoph Hellwig | 70f4db6 | 2014-04-16 10:48:08 -0600 | [diff] [blame] | 246 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
Sagi Grimberg | e048948 | 2016-03-10 13:58:46 +0200 | [diff] [blame] | 247 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
| 248 | busy_tag_iter_fn *fn, void *priv); |
Jens Axboe | c761d96 | 2015-01-02 15:05:12 -0700 | [diff] [blame] | 249 | void blk_mq_freeze_queue(struct request_queue *q); |
Keith Busch | b4c6a02 | 2014-12-19 17:54:14 -0700 | [diff] [blame] | 250 | void blk_mq_unfreeze_queue(struct request_queue *q); |
| 251 | void blk_mq_freeze_queue_start(struct request_queue *q); |
Sagi Grimberg | 486cf98 | 2016-07-06 21:55:48 +0900 | [diff] [blame] | 252 | int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 253 | |
Keith Busch | 868f2f0 | 2015-12-17 17:08:14 -0700 | [diff] [blame] | 254 | void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); |
| 255 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 256 | /* |
| 257 | * Driver command data is immediately after the request. So subtract request |
Jens Axboe | 2963e3f | 2015-04-09 15:54:05 -0600 | [diff] [blame] | 258 | * size to get back to the original request, add request size to get the PDU. |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 259 | */ |
| 260 | static inline struct request *blk_mq_rq_from_pdu(void *pdu) |
| 261 | { |
| 262 | return pdu - sizeof(struct request); |
| 263 | } |
| 264 | static inline void *blk_mq_rq_to_pdu(struct request *rq) |
| 265 | { |
Jens Axboe | 2963e3f | 2015-04-09 15:54:05 -0600 | [diff] [blame] | 266 | return rq + 1; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 267 | } |
| 268 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 269 | #define queue_for_each_hw_ctx(q, hctx, i) \ |
Jose Alonso | 0d0b7d4 | 2014-01-28 08:09:46 -0700 | [diff] [blame] | 270 | for ((i) = 0; (i) < (q)->nr_hw_queues && \ |
| 271 | ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 272 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 273 | #define hctx_for_each_ctx(hctx, ctx, i) \ |
Jose Alonso | 0d0b7d4 | 2014-01-28 08:09:46 -0700 | [diff] [blame] | 274 | for ((i) = 0; (i) < (hctx)->nr_ctx && \ |
| 275 | ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 276 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 277 | #endif |