Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 1 | #ifndef BLK_MQ_H |
| 2 | #define BLK_MQ_H |
| 3 | |
| 4 | #include <linux/blkdev.h> |
| 5 | |
| 6 | struct blk_mq_tags; |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 7 | struct blk_flush_queue; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 8 | |
| 9 | struct blk_mq_cpu_notifier { |
| 10 | struct list_head list; |
| 11 | void *data; |
Jens Axboe | e814e71 | 2014-05-21 13:59:08 -0600 | [diff] [blame] | 12 | int (*notify)(void *data, unsigned long action, unsigned int cpu); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 13 | }; |
| 14 | |
Jens Axboe | 1429d7c | 2014-05-19 09:23:55 -0600 | [diff] [blame] | 15 | struct blk_mq_ctxmap { |
Jens Axboe | 569fd0c | 2015-04-17 08:28:50 -0600 | [diff] [blame] | 16 | unsigned int size; |
Jens Axboe | 1429d7c | 2014-05-19 09:23:55 -0600 | [diff] [blame] | 17 | unsigned int bits_per_word; |
| 18 | struct blk_align_bitmap *map; |
| 19 | }; |
| 20 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 21 | struct blk_mq_hw_ctx { |
| 22 | struct { |
| 23 | spinlock_t lock; |
| 24 | struct list_head dispatch; |
| 25 | } ____cacheline_aligned_in_smp; |
| 26 | |
| 27 | unsigned long state; /* BLK_MQ_S_* flags */ |
Christoph Hellwig | 70f4db6 | 2014-04-16 10:48:08 -0600 | [diff] [blame] | 28 | struct delayed_work run_work; |
| 29 | struct delayed_work delay_work; |
Jens Axboe | e4043dc | 2014-04-09 10:18:23 -0600 | [diff] [blame] | 30 | cpumask_var_t cpumask; |
Jens Axboe | 506e931 | 2014-05-07 10:26:44 -0600 | [diff] [blame] | 31 | int next_cpu; |
| 32 | int next_cpu_batch; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 33 | |
| 34 | unsigned long flags; /* BLK_MQ_F_* flags */ |
| 35 | |
| 36 | struct request_queue *queue; |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 37 | struct blk_flush_queue *fq; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 38 | |
| 39 | void *driver_data; |
| 40 | |
Jens Axboe | 1429d7c | 2014-05-19 09:23:55 -0600 | [diff] [blame] | 41 | struct blk_mq_ctxmap ctx_map; |
| 42 | |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 43 | unsigned int nr_ctx; |
| 44 | struct blk_mq_ctx **ctxs; |
| 45 | |
Alexander Gordeev | 8537b12 | 2014-06-17 22:12:35 -0700 | [diff] [blame] | 46 | atomic_t wait_index; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 47 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 48 | struct blk_mq_tags *tags; |
| 49 | |
| 50 | unsigned long queued; |
| 51 | unsigned long run; |
| 52 | #define BLK_MQ_MAX_DISPATCH_ORDER 10 |
| 53 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; |
| 54 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 55 | unsigned int numa_node; |
Jens Axboe | 17ded32 | 2015-01-07 10:44:04 -0700 | [diff] [blame] | 56 | unsigned int queue_num; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 57 | |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 58 | atomic_t nr_active; |
| 59 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 60 | struct blk_mq_cpu_notifier cpu_notifier; |
| 61 | struct kobject kobj; |
| 62 | }; |
| 63 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 64 | struct blk_mq_tag_set { |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 65 | struct blk_mq_ops *ops; |
| 66 | unsigned int nr_hw_queues; |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 67 | unsigned int queue_depth; /* max hw supported */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 68 | unsigned int reserved_tags; |
| 69 | unsigned int cmd_size; /* per-request extra data */ |
| 70 | int numa_node; |
| 71 | unsigned int timeout; |
| 72 | unsigned int flags; /* BLK_MQ_F_* */ |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 73 | void *driver_data; |
| 74 | |
| 75 | struct blk_mq_tags **tags; |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 76 | |
| 77 | struct mutex tag_list_lock; |
| 78 | struct list_head tag_list; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 79 | }; |
| 80 | |
Jens Axboe | 74c4505 | 2014-10-29 11:14:52 -0600 | [diff] [blame] | 81 | struct blk_mq_queue_data { |
| 82 | struct request *rq; |
| 83 | struct list_head *list; |
| 84 | bool last; |
| 85 | }; |
| 86 | |
| 87 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 88 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); |
Christoph Hellwig | 0152fb6 | 2014-09-13 16:40:13 -0700 | [diff] [blame] | 89 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 90 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); |
| 91 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 92 | typedef int (init_request_fn)(void *, struct request *, unsigned int, |
| 93 | unsigned int, unsigned int); |
| 94 | typedef void (exit_request_fn)(void *, struct request *, unsigned int, |
| 95 | unsigned int); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 96 | |
Christoph Hellwig | 81481eb | 2014-09-13 16:40:11 -0700 | [diff] [blame] | 97 | typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, |
| 98 | bool); |
| 99 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 100 | struct blk_mq_ops { |
| 101 | /* |
| 102 | * Queue request |
| 103 | */ |
| 104 | queue_rq_fn *queue_rq; |
| 105 | |
| 106 | /* |
| 107 | * Map to specific hardware queue |
| 108 | */ |
| 109 | map_queue_fn *map_queue; |
| 110 | |
| 111 | /* |
| 112 | * Called on request timeout |
| 113 | */ |
Christoph Hellwig | 0152fb6 | 2014-09-13 16:40:13 -0700 | [diff] [blame] | 114 | timeout_fn *timeout; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 115 | |
Christoph Hellwig | 30a91cb | 2014-02-10 03:24:38 -0800 | [diff] [blame] | 116 | softirq_done_fn *complete; |
| 117 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 118 | /* |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 119 | * Called when the block layer side of a hardware queue has been |
| 120 | * set up, allowing the driver to allocate/init matching structures. |
| 121 | * Ditto for exit/teardown. |
| 122 | */ |
| 123 | init_hctx_fn *init_hctx; |
| 124 | exit_hctx_fn *exit_hctx; |
Christoph Hellwig | e9b267d | 2014-04-15 13:59:10 -0600 | [diff] [blame] | 125 | |
| 126 | /* |
| 127 | * Called for every command allocated by the block layer to allow |
| 128 | * the driver to set up driver specific data. |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 129 | * |
| 130 | * Tag greater than or equal to queue_depth is for setting up |
| 131 | * flush request. |
| 132 | * |
Christoph Hellwig | e9b267d | 2014-04-15 13:59:10 -0600 | [diff] [blame] | 133 | * Ditto for exit/teardown. |
| 134 | */ |
| 135 | init_request_fn *init_request; |
| 136 | exit_request_fn *exit_request; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 137 | }; |
| 138 | |
| 139 | enum { |
| 140 | BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */ |
| 141 | BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */ |
| 142 | BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ |
| 143 | |
| 144 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
Jens Axboe | 8a58d1f | 2014-08-15 12:38:41 -0600 | [diff] [blame] | 145 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
| 146 | BLK_MQ_F_SG_MERGE = 1 << 2, |
| 147 | BLK_MQ_F_SYSFS_UP = 1 << 3, |
Jens Axboe | e167dfb | 2014-10-29 11:18:26 -0600 | [diff] [blame] | 148 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, |
Shaohua Li | 24391c0 | 2015-01-23 14:18:00 -0700 | [diff] [blame] | 149 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
| 150 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 151 | |
Jens Axboe | 5d12f90 | 2014-03-19 15:25:02 -0600 | [diff] [blame] | 152 | BLK_MQ_S_STOPPED = 0, |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 153 | BLK_MQ_S_TAG_ACTIVE = 1, |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 154 | |
Jens Axboe | a4391c6 | 2014-06-05 15:21:56 -0600 | [diff] [blame] | 155 | BLK_MQ_MAX_DEPTH = 10240, |
Jens Axboe | 506e931 | 2014-05-07 10:26:44 -0600 | [diff] [blame] | 156 | |
| 157 | BLK_MQ_CPU_WORK_BATCH = 8, |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 158 | }; |
Shaohua Li | 24391c0 | 2015-01-23 14:18:00 -0700 | [diff] [blame] | 159 | #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ |
| 160 | ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ |
| 161 | ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) |
| 162 | #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ |
| 163 | ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ |
| 164 | << BLK_MQ_F_ALLOC_POLICY_START_BIT) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 165 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 166 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
Mike Snitzer | b62c21b | 2015-03-12 23:56:02 -0400 | [diff] [blame] | 167 | struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, |
| 168 | struct request_queue *q); |
Tejun Heo | 17497ac | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 169 | void blk_mq_finish_init(struct request_queue *q); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 170 | int blk_mq_register_disk(struct gendisk *); |
| 171 | void blk_mq_unregister_disk(struct gendisk *); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 172 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 173 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); |
| 174 | void blk_mq_free_tag_set(struct blk_mq_tag_set *set); |
| 175 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 176 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
| 177 | |
Christoph Hellwig | feb71da | 2014-02-20 15:32:37 -0800 | [diff] [blame] | 178 | void blk_mq_insert_request(struct request *, bool, bool, bool); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 179 | void blk_mq_free_request(struct request *rq); |
Jens Axboe | 7c7f2f2 | 2014-11-17 10:41:57 -0700 | [diff] [blame] | 180 | void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 181 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); |
Christoph Hellwig | 4ce01dd | 2014-05-27 20:59:46 +0200 | [diff] [blame] | 182 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, |
| 183 | gfp_t gfp, bool reserved); |
Jens Axboe | 0e62f51 | 2014-06-04 10:23:49 -0600 | [diff] [blame] | 184 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 185 | |
Bart Van Assche | 205fb5f | 2014-10-30 14:45:11 +0100 | [diff] [blame] | 186 | enum { |
| 187 | BLK_MQ_UNIQUE_TAG_BITS = 16, |
| 188 | BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, |
| 189 | }; |
| 190 | |
| 191 | u32 blk_mq_unique_tag(struct request *rq); |
| 192 | |
| 193 | static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) |
| 194 | { |
| 195 | return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; |
| 196 | } |
| 197 | |
| 198 | static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) |
| 199 | { |
| 200 | return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; |
| 201 | } |
| 202 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 203 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); |
Jens Axboe | f14bbe7 | 2014-05-27 12:06:53 -0600 | [diff] [blame] | 204 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 205 | |
Keith Busch | 973c019 | 2015-01-07 18:55:43 -0700 | [diff] [blame] | 206 | int blk_mq_request_started(struct request *rq); |
Christoph Hellwig | e249007 | 2014-09-13 16:40:09 -0700 | [diff] [blame] | 207 | void blk_mq_start_request(struct request *rq); |
Christoph Hellwig | c8a446a | 2014-09-13 16:40:10 -0700 | [diff] [blame] | 208 | void blk_mq_end_request(struct request *rq, int error); |
| 209 | void __blk_mq_end_request(struct request *rq, int error); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 210 | |
Christoph Hellwig | ed0791b | 2014-04-16 09:44:57 +0200 | [diff] [blame] | 211 | void blk_mq_requeue_request(struct request *rq); |
Christoph Hellwig | 6fca6a6 | 2014-05-28 08:08:02 -0600 | [diff] [blame] | 212 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); |
Keith Busch | c68ed59 | 2015-01-07 18:55:44 -0700 | [diff] [blame] | 213 | void blk_mq_cancel_requeue_work(struct request_queue *q); |
Christoph Hellwig | 6fca6a6 | 2014-05-28 08:08:02 -0600 | [diff] [blame] | 214 | void blk_mq_kick_requeue_list(struct request_queue *q); |
Jens Axboe | 1885b24 | 2015-01-07 18:55:45 -0700 | [diff] [blame] | 215 | void blk_mq_abort_requeue_list(struct request_queue *q); |
Christoph Hellwig | 30a91cb | 2014-02-10 03:24:38 -0800 | [diff] [blame] | 216 | void blk_mq_complete_request(struct request *rq); |
| 217 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 218 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
| 219 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); |
Christoph Hellwig | 280d45f | 2013-10-25 14:45:58 +0100 | [diff] [blame] | 220 | void blk_mq_stop_hw_queues(struct request_queue *q); |
Christoph Hellwig | 2f26855 | 2014-04-16 09:44:56 +0200 | [diff] [blame] | 221 | void blk_mq_start_hw_queues(struct request_queue *q); |
Christoph Hellwig | 1b4a325 | 2014-04-16 09:44:54 +0200 | [diff] [blame] | 222 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
Mike Snitzer | b94ec29 | 2015-03-11 23:56:38 -0400 | [diff] [blame] | 223 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
Christoph Hellwig | 70f4db6 | 2014-04-16 10:48:08 -0600 | [diff] [blame] | 224 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
Christoph Hellwig | 81481eb | 2014-09-13 16:40:11 -0700 | [diff] [blame] | 225 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, |
| 226 | void *priv); |
Jens Axboe | c761d96 | 2015-01-02 15:05:12 -0700 | [diff] [blame] | 227 | void blk_mq_freeze_queue(struct request_queue *q); |
Keith Busch | b4c6a02 | 2014-12-19 17:54:14 -0700 | [diff] [blame] | 228 | void blk_mq_unfreeze_queue(struct request_queue *q); |
| 229 | void blk_mq_freeze_queue_start(struct request_queue *q); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 230 | |
| 231 | /* |
| 232 | * Driver command data is immediately after the request. So subtract request |
Jens Axboe | 2963e3f | 2015-04-09 15:54:05 -0600 | [diff] [blame] | 233 | * size to get back to the original request, add request size to get the PDU. |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 234 | */ |
| 235 | static inline struct request *blk_mq_rq_from_pdu(void *pdu) |
| 236 | { |
| 237 | return pdu - sizeof(struct request); |
| 238 | } |
| 239 | static inline void *blk_mq_rq_to_pdu(struct request *rq) |
| 240 | { |
Jens Axboe | 2963e3f | 2015-04-09 15:54:05 -0600 | [diff] [blame] | 241 | return rq + 1; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 242 | } |
| 243 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 244 | #define queue_for_each_hw_ctx(q, hctx, i) \ |
Jose Alonso | 0d0b7d4 | 2014-01-28 08:09:46 -0700 | [diff] [blame] | 245 | for ((i) = 0; (i) < (q)->nr_hw_queues && \ |
| 246 | ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 247 | |
| 248 | #define queue_for_each_ctx(q, ctx, i) \ |
Jose Alonso | 0d0b7d4 | 2014-01-28 08:09:46 -0700 | [diff] [blame] | 249 | for ((i) = 0; (i) < (q)->nr_queues && \ |
| 250 | ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 251 | |
| 252 | #define hctx_for_each_ctx(hctx, ctx, i) \ |
Jose Alonso | 0d0b7d4 | 2014-01-28 08:09:46 -0700 | [diff] [blame] | 253 | for ((i) = 0; (i) < (hctx)->nr_ctx && \ |
| 254 | ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 255 | |
| 256 | #define blk_ctx_sum(q, sum) \ |
| 257 | ({ \ |
| 258 | struct blk_mq_ctx *__x; \ |
| 259 | unsigned int __ret = 0, __i; \ |
| 260 | \ |
| 261 | queue_for_each_ctx((q), __x, __i) \ |
| 262 | __ret += sum; \ |
| 263 | __ret; \ |
| 264 | }) |
| 265 | |
| 266 | #endif |