blob: f606487bba56467d1a76c2b47680105608340394 [file] [log] [blame]
Jens Axboe8324aa92008-01-29 14:51:59 +01001/*
2 * Functions related to tagged command queuing
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
Jens Axboe8324aa92008-01-29 14:51:59 +01009
Adrian Bunk278caf02008-03-04 11:23:44 +010010#include "blk.h"
11
Jens Axboe8324aa92008-01-29 14:51:59 +010012/**
13 * blk_queue_find_tag - find a request by its tag and queue
14 * @q: The request queue for the device
15 * @tag: The tag of the request
16 *
17 * Notes:
18 * Should be used when a device returns a tag and you want to match
19 * it with a request.
20 *
21 * no locks need be held.
22 **/
23struct request *blk_queue_find_tag(struct request_queue *q, int tag)
24{
25 return blk_map_queue_find_tag(q->queue_tags, tag);
26}
Jens Axboe8324aa92008-01-29 14:51:59 +010027EXPORT_SYMBOL(blk_queue_find_tag);
28
29/**
Christoph Hellwig2059fc82014-07-08 12:25:28 +020030 * blk_free_tags - release a given set of tag maintenance info
Jens Axboe8324aa92008-01-29 14:51:59 +010031 * @bqt: the tag map to free
32 *
Christoph Hellwig2059fc82014-07-08 12:25:28 +020033 * Drop the reference count on @bqt and frees it when the last reference
34 * is dropped.
Jens Axboe8324aa92008-01-29 14:51:59 +010035 */
Christoph Hellwig2059fc82014-07-08 12:25:28 +020036void blk_free_tags(struct blk_queue_tag *bqt)
Jens Axboe8324aa92008-01-29 14:51:59 +010037{
Christoph Hellwig2059fc82014-07-08 12:25:28 +020038 if (atomic_dec_and_test(&bqt->refcnt)) {
Matthew Wilcox0e3eb452008-08-26 09:02:28 +020039 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
40 bqt->max_depth);
Jens Axboe8324aa92008-01-29 14:51:59 +010041
42 kfree(bqt->tag_index);
43 bqt->tag_index = NULL;
44
45 kfree(bqt->tag_map);
46 bqt->tag_map = NULL;
47
48 kfree(bqt);
49 }
Jens Axboe8324aa92008-01-29 14:51:59 +010050}
Christoph Hellwig2059fc82014-07-08 12:25:28 +020051EXPORT_SYMBOL(blk_free_tags);
Jens Axboe8324aa92008-01-29 14:51:59 +010052
53/**
54 * __blk_queue_free_tags - release tag maintenance info
55 * @q: the request queue for the device
56 *
57 * Notes:
58 * blk_cleanup_queue() will take care of calling this function, if tagging
59 * has been used. So there's no need to call this directly.
60 **/
61void __blk_queue_free_tags(struct request_queue *q)
62{
63 struct blk_queue_tag *bqt = q->queue_tags;
64
65 if (!bqt)
66 return;
67
Christoph Hellwig2059fc82014-07-08 12:25:28 +020068 blk_free_tags(bqt);
Jens Axboe8324aa92008-01-29 14:51:59 +010069
70 q->queue_tags = NULL;
Jens Axboeaa94b532008-05-07 09:27:43 +020071 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
Jens Axboe8324aa92008-01-29 14:51:59 +010072}
73
74/**
Jens Axboe8324aa92008-01-29 14:51:59 +010075 * blk_queue_free_tags - release tag maintenance info
76 * @q: the request queue for the device
77 *
78 * Notes:
Randy Dunlap710027a2008-08-19 20:13:11 +020079 * This is used to disable tagged queuing to a device, yet leave
Jens Axboe8324aa92008-01-29 14:51:59 +010080 * queue in function.
81 **/
82void blk_queue_free_tags(struct request_queue *q)
83{
Jens Axboeaa94b532008-05-07 09:27:43 +020084 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
Jens Axboe8324aa92008-01-29 14:51:59 +010085}
Jens Axboe8324aa92008-01-29 14:51:59 +010086EXPORT_SYMBOL(blk_queue_free_tags);
87
88static int
89init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
90{
91 struct request **tag_index;
92 unsigned long *tag_map;
93 int nr_ulongs;
94
95 if (q && depth > q->nr_requests * 2) {
96 depth = q->nr_requests * 2;
97 printk(KERN_ERR "%s: adjusted depth to %d\n",
Harvey Harrison24c03d42008-05-01 04:35:17 -070098 __func__, depth);
Jens Axboe8324aa92008-01-29 14:51:59 +010099 }
100
101 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
102 if (!tag_index)
103 goto fail;
104
105 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
106 tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
107 if (!tag_map)
108 goto fail;
109
110 tags->real_max_depth = depth;
111 tags->max_depth = depth;
112 tags->tag_index = tag_index;
113 tags->tag_map = tag_map;
114
115 return 0;
116fail:
117 kfree(tag_index);
118 return -ENOMEM;
119}
120
121static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
122 int depth)
123{
124 struct blk_queue_tag *tags;
125
126 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
127 if (!tags)
128 goto fail;
129
130 if (init_tag_map(q, tags, depth))
131 goto fail;
132
Jens Axboe8324aa92008-01-29 14:51:59 +0100133 atomic_set(&tags->refcnt, 1);
134 return tags;
135fail:
136 kfree(tags);
137 return NULL;
138}
139
140/**
141 * blk_init_tags - initialize the tag info for an external tag map
142 * @depth: the maximum queue depth supported
Jens Axboe8324aa92008-01-29 14:51:59 +0100143 **/
144struct blk_queue_tag *blk_init_tags(int depth)
145{
146 return __blk_queue_init_tags(NULL, depth);
147}
148EXPORT_SYMBOL(blk_init_tags);
149
150/**
151 * blk_queue_init_tags - initialize the queue tag info
152 * @q: the request queue for the device
153 * @depth: the maximum queue depth supported
154 * @tags: the tag to use
Jens Axboeaa94b532008-05-07 09:27:43 +0200155 *
156 * Queue lock must be held here if the function is called to resize an
157 * existing map.
Jens Axboe8324aa92008-01-29 14:51:59 +0100158 **/
159int blk_queue_init_tags(struct request_queue *q, int depth,
160 struct blk_queue_tag *tags)
161{
162 int rc;
163
164 BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
165
166 if (!tags && !q->queue_tags) {
167 tags = __blk_queue_init_tags(q, depth);
168
169 if (!tags)
170 goto fail;
171 } else if (q->queue_tags) {
Jens Axboe6728cb02008-01-31 13:03:55 +0100172 rc = blk_queue_resize_tags(q, depth);
173 if (rc)
Jens Axboe8324aa92008-01-29 14:51:59 +0100174 return rc;
Nick Piggin75ad23b2008-04-29 14:48:33 +0200175 queue_flag_set(QUEUE_FLAG_QUEUED, q);
Jens Axboe8324aa92008-01-29 14:51:59 +0100176 return 0;
177 } else
178 atomic_inc(&tags->refcnt);
179
180 /*
181 * assign it, all done
182 */
183 q->queue_tags = tags;
Jens Axboeaa94b532008-05-07 09:27:43 +0200184 queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
Jens Axboe8324aa92008-01-29 14:51:59 +0100185 INIT_LIST_HEAD(&q->tag_busy_list);
186 return 0;
187fail:
188 kfree(tags);
189 return -ENOMEM;
190}
Jens Axboe8324aa92008-01-29 14:51:59 +0100191EXPORT_SYMBOL(blk_queue_init_tags);
192
193/**
194 * blk_queue_resize_tags - change the queueing depth
195 * @q: the request queue for the device
196 * @new_depth: the new max command queueing depth
197 *
198 * Notes:
199 * Must be called with the queue lock held.
200 **/
201int blk_queue_resize_tags(struct request_queue *q, int new_depth)
202{
203 struct blk_queue_tag *bqt = q->queue_tags;
204 struct request **tag_index;
205 unsigned long *tag_map;
206 int max_depth, nr_ulongs;
207
208 if (!bqt)
209 return -ENXIO;
210
211 /*
212 * if we already have large enough real_max_depth. just
213 * adjust max_depth. *NOTE* as requests with tag value
214 * between new_depth and real_max_depth can be in-flight, tag
215 * map can not be shrunk blindly here.
216 */
217 if (new_depth <= bqt->real_max_depth) {
218 bqt->max_depth = new_depth;
219 return 0;
220 }
221
222 /*
223 * Currently cannot replace a shared tag map with a new
224 * one, so error out if this is the case
225 */
226 if (atomic_read(&bqt->refcnt) != 1)
227 return -EBUSY;
228
229 /*
230 * save the old state info, so we can copy it back
231 */
232 tag_index = bqt->tag_index;
233 tag_map = bqt->tag_map;
234 max_depth = bqt->real_max_depth;
235
236 if (init_tag_map(q, bqt, new_depth))
237 return -ENOMEM;
238
239 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
240 nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
241 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
242
243 kfree(tag_index);
244 kfree(tag_map);
245 return 0;
246}
Jens Axboe8324aa92008-01-29 14:51:59 +0100247EXPORT_SYMBOL(blk_queue_resize_tags);
248
249/**
250 * blk_queue_end_tag - end tag operations for a request
251 * @q: the request queue for the device
252 * @rq: the request that has completed
253 *
254 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +0200255 * Typically called when end_that_request_first() returns %0, meaning
Jens Axboe8324aa92008-01-29 14:51:59 +0100256 * all transfers have been done for a request. It's important to call
257 * this function before end_that_request_last(), as that will put the
258 * request back on the free list thus corrupting the internal tag list.
259 *
260 * Notes:
261 * queue lock must be held.
262 **/
263void blk_queue_end_tag(struct request_queue *q, struct request *rq)
264{
265 struct blk_queue_tag *bqt = q->queue_tags;
Dan Williamsf2b20d42011-12-29 09:16:28 +0100266 unsigned tag = rq->tag; /* negative tags invalid */
Jens Axboe8324aa92008-01-29 14:51:59 +0100267
Dan Williamsf2b20d42011-12-29 09:16:28 +0100268 BUG_ON(tag >= bqt->real_max_depth);
Jens Axboe8324aa92008-01-29 14:51:59 +0100269
270 list_del_init(&rq->queuelist);
271 rq->cmd_flags &= ~REQ_QUEUED;
272 rq->tag = -1;
273
274 if (unlikely(bqt->tag_index[tag] == NULL))
275 printk(KERN_ERR "%s: tag %d is missing\n",
Harvey Harrison24c03d42008-05-01 04:35:17 -0700276 __func__, tag);
Jens Axboe8324aa92008-01-29 14:51:59 +0100277
278 bqt->tag_index[tag] = NULL;
279
280 if (unlikely(!test_bit(tag, bqt->tag_map))) {
281 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
Harvey Harrison24c03d42008-05-01 04:35:17 -0700282 __func__, tag);
Jens Axboe8324aa92008-01-29 14:51:59 +0100283 return;
284 }
285 /*
286 * The tag_map bit acts as a lock for tag_index[bit], so we need
287 * unlock memory barrier semantics.
288 */
289 clear_bit_unlock(tag, bqt->tag_map);
Jens Axboe8324aa92008-01-29 14:51:59 +0100290}
Jens Axboe8324aa92008-01-29 14:51:59 +0100291EXPORT_SYMBOL(blk_queue_end_tag);
292
293/**
294 * blk_queue_start_tag - find a free tag and assign it
295 * @q: the request queue for the device
296 * @rq: the block request that needs tagging
297 *
298 * Description:
299 * This can either be used as a stand-alone helper, or possibly be
300 * assigned as the queue &prep_rq_fn (in which case &struct request
301 * automagically gets a tag assigned). Note that this function
302 * assumes that any type of request can be queued! if this is not
303 * true for your device, you must check the request type before
304 * calling this function. The request will also be removed from
305 * the request queue, so it's the drivers responsibility to readd
306 * it if it should need to be restarted for some reason.
307 *
308 * Notes:
309 * queue lock must be held.
310 **/
311int blk_queue_start_tag(struct request_queue *q, struct request *rq)
312{
313 struct blk_queue_tag *bqt = q->queue_tags;
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200314 unsigned max_depth;
Jens Axboe8324aa92008-01-29 14:51:59 +0100315 int tag;
316
317 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
Jens Axboe6728cb02008-01-31 13:03:55 +0100318 printk(KERN_ERR
Jens Axboe8324aa92008-01-29 14:51:59 +0100319 "%s: request %p for device [%s] already tagged %d",
Harvey Harrison24c03d42008-05-01 04:35:17 -0700320 __func__, rq,
Jens Axboe8324aa92008-01-29 14:51:59 +0100321 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
322 BUG();
323 }
324
325 /*
326 * Protect against shared tag maps, as we may not have exclusive
327 * access to the tag map.
Jens Axboee3ba9ae2008-09-25 11:42:41 +0200328 *
329 * We reserve a few tags just for sync IO, since we don't want
330 * to starve sync IO on behalf of flooding async IO.
Jens Axboe8324aa92008-01-29 14:51:59 +0100331 */
Jens Axboee3ba9ae2008-09-25 11:42:41 +0200332 max_depth = bqt->max_depth;
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200333 if (!rq_is_sync(rq) && max_depth > 1) {
334 max_depth -= 2;
335 if (!max_depth)
336 max_depth = 1;
Jens Axboe1b59dd52009-10-06 20:19:02 +0200337 if (q->in_flight[BLK_RW_ASYNC] > max_depth)
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200338 return 1;
339 }
Jens Axboee3ba9ae2008-09-25 11:42:41 +0200340
Jens Axboe8324aa92008-01-29 14:51:59 +0100341 do {
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200342 tag = find_first_zero_bit(bqt->tag_map, max_depth);
Jens Axboee3ba9ae2008-09-25 11:42:41 +0200343 if (tag >= max_depth)
Jens Axboe8324aa92008-01-29 14:51:59 +0100344 return 1;
345
346 } while (test_and_set_bit_lock(tag, bqt->tag_map));
347 /*
348 * We need lock ordering semantics given by test_and_set_bit_lock.
349 * See blk_queue_end_tag for details.
350 */
351
352 rq->cmd_flags |= REQ_QUEUED;
353 rq->tag = tag;
354 bqt->tag_index[tag] = rq;
Tejun Heo9934c8c2009-05-08 11:54:16 +0900355 blk_start_request(rq);
Jens Axboe8324aa92008-01-29 14:51:59 +0100356 list_add(&rq->queuelist, &q->tag_busy_list);
Jens Axboe8324aa92008-01-29 14:51:59 +0100357 return 0;
358}
Jens Axboe8324aa92008-01-29 14:51:59 +0100359EXPORT_SYMBOL(blk_queue_start_tag);
360
361/**
362 * blk_queue_invalidate_tags - invalidate all pending tags
363 * @q: the request queue for the device
364 *
365 * Description:
366 * Hardware conditions may dictate a need to stop all pending requests.
367 * In this case, we will safely clear the block side of the tag queue and
368 * readd all requests to the request queue in the right order.
369 *
370 * Notes:
371 * queue lock must be held.
372 **/
373void blk_queue_invalidate_tags(struct request_queue *q)
374{
375 struct list_head *tmp, *n;
376
377 list_for_each_safe(tmp, n, &q->tag_busy_list)
378 blk_requeue_request(q, list_entry_rq(tmp));
379}
Jens Axboe8324aa92008-01-29 14:51:59 +0100380EXPORT_SYMBOL(blk_queue_invalidate_tags);