blob: f294f1538f1ee123832e90fe5f86d8c6c90ff137 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000
7 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
8 */
9
10/*
11 * This handles all read/write requests to block devices
12 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/backing-dev.h>
16#include <linux/bio.h>
17#include <linux/blkdev.h>
18#include <linux/highmem.h>
19#include <linux/mm.h>
20#include <linux/kernel_stat.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
24#include <linux/completion.h>
25#include <linux/slab.h>
26#include <linux/swap.h>
27#include <linux/writeback.h>
Andrew Mortonfaccbd4b2006-12-10 02:19:35 -080028#include <linux/task_io_accounting_ops.h>
Jens Axboeff856ba2006-01-09 16:02:34 +010029#include <linux/interrupt.h>
30#include <linux/cpu.h>
Jens Axboe2056a782006-03-23 20:00:26 +010031#include <linux/blktrace_api.h>
Akinobu Mitac17bb492006-12-08 02:39:46 -080032#include <linux/fault-inject.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34/*
35 * for max sense size
36 */
37#include <scsi/scsi_cmnd.h>
38
David Howells65f27f32006-11-22 14:55:48 +000039static void blk_unplug_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040static void blk_unplug_timeout(unsigned long data);
Adrian Bunk93d17d32005-06-25 14:59:10 -070041static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
Tejun Heo52d9e672006-01-06 09:49:58 +010042static void init_request_from_bio(struct request *req, struct bio *bio);
43static int __make_request(request_queue_t *q, struct bio *bio);
Jens Axboeb5deef92006-07-19 23:39:40 +020044static struct io_context *current_io_context(gfp_t gfp_flags, int node);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46/*
47 * For the allocated request tables
48 */
Christoph Lametere18b8902006-12-06 20:33:20 -080049static struct kmem_cache *request_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51/*
52 * For queue allocation
53 */
Christoph Lametere18b8902006-12-06 20:33:20 -080054static struct kmem_cache *requestq_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56/*
57 * For io context allocations
58 */
Christoph Lametere18b8902006-12-06 20:33:20 -080059static struct kmem_cache *iocontext_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
62 * Controlling structure to kblockd
63 */
Jens Axboeff856ba2006-01-09 16:02:34 +010064static struct workqueue_struct *kblockd_workqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66unsigned long blk_max_low_pfn, blk_max_pfn;
67
68EXPORT_SYMBOL(blk_max_low_pfn);
69EXPORT_SYMBOL(blk_max_pfn);
70
Jens Axboeff856ba2006-01-09 16:02:34 +010071static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/* Amount of time in which a process may batch requests */
74#define BLK_BATCH_TIME (HZ/50UL)
75
76/* Number of requests a "batching" process may submit */
77#define BLK_BATCH_REQ 32
78
79/*
80 * Return the threshold (number of used requests) at which the queue is
81 * considered to be congested. It include a little hysteresis to keep the
82 * context switch rate down.
83 */
84static inline int queue_congestion_on_threshold(struct request_queue *q)
85{
86 return q->nr_congestion_on;
87}
88
89/*
90 * The threshold at which a queue is considered to be uncongested
91 */
92static inline int queue_congestion_off_threshold(struct request_queue *q)
93{
94 return q->nr_congestion_off;
95}
96
97static void blk_queue_congestion_threshold(struct request_queue *q)
98{
99 int nr;
100
101 nr = q->nr_requests - (q->nr_requests / 8) + 1;
102 if (nr > q->nr_requests)
103 nr = q->nr_requests;
104 q->nr_congestion_on = nr;
105
106 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
107 if (nr < 1)
108 nr = 1;
109 q->nr_congestion_off = nr;
110}
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112/**
113 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
114 * @bdev: device
115 *
116 * Locates the passed device's request queue and returns the address of its
117 * backing_dev_info
118 *
119 * Will return NULL if the request queue cannot be located.
120 */
121struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
122{
123 struct backing_dev_info *ret = NULL;
124 request_queue_t *q = bdev_get_queue(bdev);
125
126 if (q)
127 ret = &q->backing_dev_info;
128 return ret;
129}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130EXPORT_SYMBOL(blk_get_backing_dev_info);
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132/**
133 * blk_queue_prep_rq - set a prepare_request function for queue
134 * @q: queue
135 * @pfn: prepare_request function
136 *
137 * It's possible for a queue to register a prepare_request callback which
138 * is invoked before the request is handed to the request_fn. The goal of
139 * the function is to prepare a request for I/O, it can be used to build a
140 * cdb from the request data for instance.
141 *
142 */
143void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
144{
145 q->prep_rq_fn = pfn;
146}
147
148EXPORT_SYMBOL(blk_queue_prep_rq);
149
150/**
151 * blk_queue_merge_bvec - set a merge_bvec function for queue
152 * @q: queue
153 * @mbfn: merge_bvec_fn
154 *
155 * Usually queues have static limitations on the max sectors or segments that
156 * we can put in a request. Stacking drivers may have some settings that
157 * are dynamic, and thus we have to query the queue whether it is ok to
158 * add a new bio_vec to a bio at a given offset or not. If the block device
159 * has such limitations, it needs to register a merge_bvec_fn to control
160 * the size of bio's sent to it. Note that a block device *must* allow a
161 * single page to be added to an empty bio. The block device driver may want
162 * to use the bio_split() function to deal with these bio's. By default
163 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
164 * honored.
165 */
166void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
167{
168 q->merge_bvec_fn = mbfn;
169}
170
171EXPORT_SYMBOL(blk_queue_merge_bvec);
172
Jens Axboeff856ba2006-01-09 16:02:34 +0100173void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
174{
175 q->softirq_done_fn = fn;
176}
177
178EXPORT_SYMBOL(blk_queue_softirq_done);
179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180/**
181 * blk_queue_make_request - define an alternate make_request function for a device
182 * @q: the request queue for the device to be affected
183 * @mfn: the alternate make_request function
184 *
185 * Description:
186 * The normal way for &struct bios to be passed to a device
187 * driver is for them to be collected into requests on a request
188 * queue, and then to allow the device driver to select requests
189 * off that queue when it is ready. This works well for many block
190 * devices. However some block devices (typically virtual devices
191 * such as md or lvm) do not benefit from the processing on the
192 * request queue, and are served best by having the requests passed
193 * directly to them. This can be achieved by providing a function
194 * to blk_queue_make_request().
195 *
196 * Caveat:
197 * The driver that does this *must* be able to deal appropriately
198 * with buffers in "highmemory". This can be accomplished by either calling
199 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
200 * blk_queue_bounce() to create a buffer in normal memory.
201 **/
202void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
203{
204 /*
205 * set defaults
206 */
207 q->nr_requests = BLKDEV_MAX_RQ;
Stuart McLaren309c0a12005-09-06 15:17:47 -0700208 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
209 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 q->make_request_fn = mfn;
211 q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
212 q->backing_dev_info.state = 0;
213 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
Mike Christiedefd94b2005-12-05 02:37:06 -0600214 blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 blk_queue_hardsect_size(q, 512);
216 blk_queue_dma_alignment(q, 511);
217 blk_queue_congestion_threshold(q);
218 q->nr_batching = BLK_BATCH_REQ;
219
220 q->unplug_thresh = 4; /* hmm */
221 q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
222 if (q->unplug_delay == 0)
223 q->unplug_delay = 1;
224
David Howells65f27f32006-11-22 14:55:48 +0000225 INIT_WORK(&q->unplug_work, blk_unplug_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 q->unplug_timer.function = blk_unplug_timeout;
228 q->unplug_timer.data = (unsigned long)q;
229
230 /*
231 * by default assume old behaviour and bounce for any highmem page
232 */
233 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234}
235
236EXPORT_SYMBOL(blk_queue_make_request);
237
Jens Axboe1ea25ecb2006-07-18 22:24:11 +0200238static void rq_init(request_queue_t *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239{
240 INIT_LIST_HEAD(&rq->queuelist);
Jens Axboeff856ba2006-01-09 16:02:34 +0100241 INIT_LIST_HEAD(&rq->donelist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
243 rq->errors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 rq->bio = rq->biotail = NULL;
Jens Axboe2e662b62006-07-13 11:55:04 +0200245 INIT_HLIST_NODE(&rq->hash);
246 RB_CLEAR_NODE(&rq->rb_node);
Jens Axboe22e2c502005-06-27 10:55:12 +0200247 rq->ioprio = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 rq->buffer = NULL;
249 rq->ref_count = 1;
250 rq->q = q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 rq->special = NULL;
252 rq->data_len = 0;
253 rq->data = NULL;
Mike Christie df46b9a2005-06-20 14:04:44 +0200254 rq->nr_phys_segments = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 rq->sense = NULL;
256 rq->end_io = NULL;
257 rq->end_io_data = NULL;
Jens Axboeff856ba2006-01-09 16:02:34 +0100258 rq->completion_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
260
261/**
262 * blk_queue_ordered - does this queue support ordered writes
Tejun Heo797e7db2006-01-06 09:51:03 +0100263 * @q: the request queue
264 * @ordered: one of QUEUE_ORDERED_*
Jens Axboefddfdea2006-01-31 15:24:34 +0100265 * @prepare_flush_fn: rq setup helper for cache flush ordered writes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 *
267 * Description:
268 * For journalled file systems, doing ordered writes on a commit
269 * block instead of explicitly doing wait_on_buffer (which is bad
270 * for performance) can be a big win. Block drivers supporting this
271 * feature should call this function and indicate so.
272 *
273 **/
Tejun Heo797e7db2006-01-06 09:51:03 +0100274int blk_queue_ordered(request_queue_t *q, unsigned ordered,
275 prepare_flush_fn *prepare_flush_fn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276{
Tejun Heo797e7db2006-01-06 09:51:03 +0100277 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
278 prepare_flush_fn == NULL) {
279 printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
280 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 }
Tejun Heo797e7db2006-01-06 09:51:03 +0100282
283 if (ordered != QUEUE_ORDERED_NONE &&
284 ordered != QUEUE_ORDERED_DRAIN &&
285 ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
286 ordered != QUEUE_ORDERED_DRAIN_FUA &&
287 ordered != QUEUE_ORDERED_TAG &&
288 ordered != QUEUE_ORDERED_TAG_FLUSH &&
289 ordered != QUEUE_ORDERED_TAG_FUA) {
290 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
291 return -EINVAL;
292 }
293
Tetsuo Takata60481b12006-01-24 10:34:36 +0100294 q->ordered = ordered;
Tejun Heo797e7db2006-01-06 09:51:03 +0100295 q->next_ordered = ordered;
296 q->prepare_flush_fn = prepare_flush_fn;
297
298 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
300
301EXPORT_SYMBOL(blk_queue_ordered);
302
303/**
304 * blk_queue_issue_flush_fn - set function for issuing a flush
305 * @q: the request queue
306 * @iff: the function to be called issuing the flush
307 *
308 * Description:
309 * If a driver supports issuing a flush command, the support is notified
310 * to the block layer by defining it through this call.
311 *
312 **/
313void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)
314{
315 q->issue_flush_fn = iff;
316}
317
318EXPORT_SYMBOL(blk_queue_issue_flush_fn);
319
320/*
321 * Cache flushing for ordered writes handling
322 */
Tejun Heo797e7db2006-01-06 09:51:03 +0100323inline unsigned blk_ordered_cur_seq(request_queue_t *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324{
Tejun Heo797e7db2006-01-06 09:51:03 +0100325 if (!q->ordseq)
326 return 0;
327 return 1 << ffz(q->ordseq);
328}
329
330unsigned blk_ordered_req_seq(struct request *rq)
331{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 request_queue_t *q = rq->q;
333
Tejun Heo797e7db2006-01-06 09:51:03 +0100334 BUG_ON(q->ordseq == 0);
Tejun Heo8922e162005-10-20 16:23:44 +0200335
Tejun Heo797e7db2006-01-06 09:51:03 +0100336 if (rq == &q->pre_flush_rq)
337 return QUEUE_ORDSEQ_PREFLUSH;
338 if (rq == &q->bar_rq)
339 return QUEUE_ORDSEQ_BAR;
340 if (rq == &q->post_flush_rq)
341 return QUEUE_ORDSEQ_POSTFLUSH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
Jens Axboe4aff5e22006-08-10 08:44:47 +0200343 if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
344 (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
Tejun Heo797e7db2006-01-06 09:51:03 +0100345 return QUEUE_ORDSEQ_DRAIN;
346 else
347 return QUEUE_ORDSEQ_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348}
349
Tejun Heo797e7db2006-01-06 09:51:03 +0100350void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351{
Tejun Heo797e7db2006-01-06 09:51:03 +0100352 struct request *rq;
353 int uptodate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Tejun Heo797e7db2006-01-06 09:51:03 +0100355 if (error && !q->orderr)
356 q->orderr = error;
Tejun Heo8922e162005-10-20 16:23:44 +0200357
Tejun Heo797e7db2006-01-06 09:51:03 +0100358 BUG_ON(q->ordseq & seq);
359 q->ordseq |= seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
Tejun Heo797e7db2006-01-06 09:51:03 +0100361 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
362 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 /*
Tejun Heo797e7db2006-01-06 09:51:03 +0100365 * Okay, sequence complete.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 */
Tejun Heo797e7db2006-01-06 09:51:03 +0100367 rq = q->orig_bar_rq;
368 uptodate = q->orderr ? q->orderr : 1;
369
370 q->ordseq = 0;
371
372 end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
373 end_that_request_last(rq, uptodate);
374}
375
376static void pre_flush_end_io(struct request *rq, int error)
377{
378 elv_completed_request(rq->q, rq);
379 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
380}
381
382static void bar_end_io(struct request *rq, int error)
383{
384 elv_completed_request(rq->q, rq);
385 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
386}
387
388static void post_flush_end_io(struct request *rq, int error)
389{
390 elv_completed_request(rq->q, rq);
391 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
392}
393
394static void queue_flush(request_queue_t *q, unsigned which)
395{
396 struct request *rq;
397 rq_end_io_fn *end_io;
398
399 if (which == QUEUE_ORDERED_PREFLUSH) {
400 rq = &q->pre_flush_rq;
401 end_io = pre_flush_end_io;
402 } else {
403 rq = &q->post_flush_rq;
404 end_io = post_flush_end_io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 }
406
Jens Axboe4aff5e22006-08-10 08:44:47 +0200407 rq->cmd_flags = REQ_HARDBARRIER;
Tejun Heo797e7db2006-01-06 09:51:03 +0100408 rq_init(q, rq);
Tejun Heo797e7db2006-01-06 09:51:03 +0100409 rq->elevator_private = NULL;
Jens Axboec00895a2006-09-30 20:29:12 +0200410 rq->elevator_private2 = NULL;
Tejun Heo797e7db2006-01-06 09:51:03 +0100411 rq->rq_disk = q->bar_rq.rq_disk;
Tejun Heo797e7db2006-01-06 09:51:03 +0100412 rq->end_io = end_io;
413 q->prepare_flush_fn(q, rq);
414
Tejun Heo30e96562006-02-08 01:01:31 -0800415 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
Tejun Heo797e7db2006-01-06 09:51:03 +0100416}
417
418static inline struct request *start_ordered(request_queue_t *q,
419 struct request *rq)
420{
421 q->bi_size = 0;
422 q->orderr = 0;
423 q->ordered = q->next_ordered;
424 q->ordseq |= QUEUE_ORDSEQ_STARTED;
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 /*
Tejun Heo797e7db2006-01-06 09:51:03 +0100427 * Prep proxy barrier request.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 */
Tejun Heo797e7db2006-01-06 09:51:03 +0100429 blkdev_dequeue_request(rq);
430 q->orig_bar_rq = rq;
431 rq = &q->bar_rq;
Jens Axboe4aff5e22006-08-10 08:44:47 +0200432 rq->cmd_flags = 0;
Tejun Heo797e7db2006-01-06 09:51:03 +0100433 rq_init(q, rq);
Jens Axboe4aff5e22006-08-10 08:44:47 +0200434 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
435 rq->cmd_flags |= REQ_RW;
436 rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
Tejun Heo797e7db2006-01-06 09:51:03 +0100437 rq->elevator_private = NULL;
Jens Axboec00895a2006-09-30 20:29:12 +0200438 rq->elevator_private2 = NULL;
Tejun Heo797e7db2006-01-06 09:51:03 +0100439 init_request_from_bio(rq, q->orig_bar_rq->bio);
440 rq->end_io = bar_end_io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
Tejun Heo797e7db2006-01-06 09:51:03 +0100442 /*
443 * Queue ordered sequence. As we stack them at the head, we
444 * need to queue in reverse order. Note that we rely on that
445 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
446 * request gets inbetween ordered sequence.
447 */
448 if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
449 queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
450 else
451 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
Tejun Heo30e96562006-02-08 01:01:31 -0800453 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
Tejun Heo797e7db2006-01-06 09:51:03 +0100454
455 if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
456 queue_flush(q, QUEUE_ORDERED_PREFLUSH);
457 rq = &q->pre_flush_rq;
458 } else
459 q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
460
461 if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
462 q->ordseq |= QUEUE_ORDSEQ_DRAIN;
463 else
464 rq = NULL;
465
466 return rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467}
468
Tejun Heo797e7db2006-01-06 09:51:03 +0100469int blk_do_ordered(request_queue_t *q, struct request **rqp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470{
Jens Axboe9a7a67a2006-02-04 23:27:38 -0800471 struct request *rq = *rqp;
Tejun Heo797e7db2006-01-06 09:51:03 +0100472 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Tejun Heo797e7db2006-01-06 09:51:03 +0100474 if (!q->ordseq) {
475 if (!is_barrier)
476 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
Tejun Heo797e7db2006-01-06 09:51:03 +0100478 if (q->next_ordered != QUEUE_ORDERED_NONE) {
479 *rqp = start_ordered(q, rq);
480 return 1;
481 } else {
482 /*
483 * This can happen when the queue switches to
484 * ORDERED_NONE while this request is on it.
485 */
486 blkdev_dequeue_request(rq);
487 end_that_request_first(rq, -EOPNOTSUPP,
488 rq->hard_nr_sectors);
489 end_that_request_last(rq, -EOPNOTSUPP);
490 *rqp = NULL;
491 return 0;
492 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Jens Axboe9a7a67a2006-02-04 23:27:38 -0800495 /*
496 * Ordered sequence in progress
497 */
498
499 /* Special requests are not subject to ordering rules. */
500 if (!blk_fs_request(rq) &&
501 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
502 return 1;
503
Tejun Heo797e7db2006-01-06 09:51:03 +0100504 if (q->ordered & QUEUE_ORDERED_TAG) {
Jens Axboe9a7a67a2006-02-04 23:27:38 -0800505 /* Ordered by tag. Blocking the next barrier is enough. */
Tejun Heo797e7db2006-01-06 09:51:03 +0100506 if (is_barrier && rq != &q->bar_rq)
507 *rqp = NULL;
Jens Axboe9a7a67a2006-02-04 23:27:38 -0800508 } else {
509 /* Ordered by draining. Wait for turn. */
510 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
511 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
512 *rqp = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 }
514
515 return 1;
516}
517
Tejun Heo797e7db2006-01-06 09:51:03 +0100518static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519{
Tejun Heo797e7db2006-01-06 09:51:03 +0100520 request_queue_t *q = bio->bi_private;
521 struct bio_vec *bvec;
522 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Tejun Heo797e7db2006-01-06 09:51:03 +0100524 /*
525 * This is dry run, restore bio_sector and size. We'll finish
526 * this request again with the original bi_end_io after an
527 * error occurs or post flush is complete.
528 */
529 q->bi_size += bytes;
530
531 if (bio->bi_size)
532 return 1;
533
534 /* Rewind bvec's */
535 bio->bi_idx = 0;
536 bio_for_each_segment(bvec, bio, i) {
537 bvec->bv_len += bvec->bv_offset;
538 bvec->bv_offset = 0;
539 }
540
541 /* Reset bio */
542 set_bit(BIO_UPTODATE, &bio->bi_flags);
543 bio->bi_size = q->bi_size;
544 bio->bi_sector -= (q->bi_size >> 9);
545 q->bi_size = 0;
546
547 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548}
Tejun Heo797e7db2006-01-06 09:51:03 +0100549
Jens Axboe1ea25ecb2006-07-18 22:24:11 +0200550static int ordered_bio_endio(struct request *rq, struct bio *bio,
551 unsigned int nbytes, int error)
Tejun Heo797e7db2006-01-06 09:51:03 +0100552{
553 request_queue_t *q = rq->q;
554 bio_end_io_t *endio;
555 void *private;
556
557 if (&q->bar_rq != rq)
558 return 0;
559
560 /*
561 * Okay, this is the barrier request in progress, dry finish it.
562 */
563 if (error && !q->orderr)
564 q->orderr = error;
565
566 endio = bio->bi_end_io;
567 private = bio->bi_private;
568 bio->bi_end_io = flush_dry_bio_endio;
569 bio->bi_private = q;
570
571 bio_endio(bio, nbytes, error);
572
573 bio->bi_end_io = endio;
574 bio->bi_private = private;
575
576 return 1;
577}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
579/**
580 * blk_queue_bounce_limit - set bounce buffer limit for queue
581 * @q: the request queue for the device
582 * @dma_addr: bus address limit
583 *
584 * Description:
585 * Different hardware can have different requirements as to what pages
586 * it can do I/O directly to. A low level driver can call
587 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
Andi Kleen5ee1af92006-03-08 17:57:26 -0800588 * buffers for doing I/O to pages residing above @page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 **/
590void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
591{
592 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
Andi Kleen5ee1af92006-03-08 17:57:26 -0800593 int dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Andi Kleen5ee1af92006-03-08 17:57:26 -0800595 q->bounce_gfp = GFP_NOIO;
596#if BITS_PER_LONG == 64
597 /* Assume anything <= 4GB can be handled by IOMMU.
598 Actually some IOMMUs can handle everything, but I don't
599 know of a way to test this here. */
Andi Kleen82697302006-06-21 14:48:09 +0200600 if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
Andi Kleen5ee1af92006-03-08 17:57:26 -0800601 dma = 1;
602 q->bounce_pfn = max_low_pfn;
603#else
604 if (bounce_pfn < blk_max_low_pfn)
605 dma = 1;
606 q->bounce_pfn = bounce_pfn;
607#endif
608 if (dma) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 init_emergency_isa_pool();
610 q->bounce_gfp = GFP_NOIO | GFP_DMA;
Andi Kleen5ee1af92006-03-08 17:57:26 -0800611 q->bounce_pfn = bounce_pfn;
612 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613}
614
615EXPORT_SYMBOL(blk_queue_bounce_limit);
616
617/**
618 * blk_queue_max_sectors - set max sectors for a request for this queue
619 * @q: the request queue for the device
620 * @max_sectors: max sectors in the usual 512b unit
621 *
622 * Description:
623 * Enables a low level driver to set an upper limit on the size of
624 * received requests.
625 **/
Jens Axboe2cb2e142006-01-17 09:04:32 +0100626void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627{
628 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
629 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
630 printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
631 }
632
Mike Christiedefd94b2005-12-05 02:37:06 -0600633 if (BLK_DEF_MAX_SECTORS > max_sectors)
634 q->max_hw_sectors = q->max_sectors = max_sectors;
635 else {
636 q->max_sectors = BLK_DEF_MAX_SECTORS;
637 q->max_hw_sectors = max_sectors;
638 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639}
640
641EXPORT_SYMBOL(blk_queue_max_sectors);
642
643/**
644 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
645 * @q: the request queue for the device
646 * @max_segments: max number of segments
647 *
648 * Description:
649 * Enables a low level driver to set an upper limit on the number of
650 * physical data segments in a request. This would be the largest sized
651 * scatter list the driver could handle.
652 **/
653void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
654{
655 if (!max_segments) {
656 max_segments = 1;
657 printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
658 }
659
660 q->max_phys_segments = max_segments;
661}
662
663EXPORT_SYMBOL(blk_queue_max_phys_segments);
664
665/**
666 * blk_queue_max_hw_segments - set max hw segments for a request for this queue
667 * @q: the request queue for the device
668 * @max_segments: max number of segments
669 *
670 * Description:
671 * Enables a low level driver to set an upper limit on the number of
672 * hw data segments in a request. This would be the largest number of
673 * address/length pairs the host adapter can actually give as once
674 * to the device.
675 **/
676void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
677{
678 if (!max_segments) {
679 max_segments = 1;
680 printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
681 }
682
683 q->max_hw_segments = max_segments;
684}
685
686EXPORT_SYMBOL(blk_queue_max_hw_segments);
687
688/**
689 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
690 * @q: the request queue for the device
691 * @max_size: max size of segment in bytes
692 *
693 * Description:
694 * Enables a low level driver to set an upper limit on the size of a
695 * coalesced segment
696 **/
697void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size)
698{
699 if (max_size < PAGE_CACHE_SIZE) {
700 max_size = PAGE_CACHE_SIZE;
701 printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
702 }
703
704 q->max_segment_size = max_size;
705}
706
707EXPORT_SYMBOL(blk_queue_max_segment_size);
708
709/**
710 * blk_queue_hardsect_size - set hardware sector size for the queue
711 * @q: the request queue for the device
712 * @size: the hardware sector size, in bytes
713 *
714 * Description:
715 * This should typically be set to the lowest possible sector size
716 * that the hardware can operate on (possible without reverting to
717 * even internal read-modify-write operations). Usually the default
718 * of 512 covers most hardware.
719 **/
720void blk_queue_hardsect_size(request_queue_t *q, unsigned short size)
721{
722 q->hardsect_size = size;
723}
724
725EXPORT_SYMBOL(blk_queue_hardsect_size);
726
727/*
728 * Returns the minimum that is _not_ zero, unless both are zero.
729 */
730#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
731
732/**
733 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
734 * @t: the stacking driver (top)
735 * @b: the underlying device (bottom)
736 **/
737void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
738{
739 /* zero is "infinity" */
Mike Christiedefd94b2005-12-05 02:37:06 -0600740 t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
741 t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
743 t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
744 t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
745 t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
746 t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
NeilBrown89e5c8b2006-03-27 01:18:02 -0800747 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
748 clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749}
750
751EXPORT_SYMBOL(blk_queue_stack_limits);
752
753/**
754 * blk_queue_segment_boundary - set boundary rules for segment merging
755 * @q: the request queue for the device
756 * @mask: the memory boundary mask
757 **/
758void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
759{
760 if (mask < PAGE_CACHE_SIZE - 1) {
761 mask = PAGE_CACHE_SIZE - 1;
762 printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
763 }
764
765 q->seg_boundary_mask = mask;
766}
767
768EXPORT_SYMBOL(blk_queue_segment_boundary);
769
770/**
771 * blk_queue_dma_alignment - set dma length and memory alignment
772 * @q: the request queue for the device
773 * @mask: alignment mask
774 *
775 * description:
776 * set required memory and length aligment for direct dma transactions.
777 * this is used when buiding direct io requests for the queue.
778 *
779 **/
780void blk_queue_dma_alignment(request_queue_t *q, int mask)
781{
782 q->dma_alignment = mask;
783}
784
785EXPORT_SYMBOL(blk_queue_dma_alignment);
786
787/**
788 * blk_queue_find_tag - find a request by its tag and queue
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 * @q: The request queue for the device
790 * @tag: The tag of the request
791 *
792 * Notes:
793 * Should be used when a device returns a tag and you want to match
794 * it with a request.
795 *
796 * no locks need be held.
797 **/
798struct request *blk_queue_find_tag(request_queue_t *q, int tag)
799{
David C Somayajuluf583f492006-10-04 08:27:25 +0200800 return blk_map_queue_find_tag(q->queue_tags, tag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801}
802
803EXPORT_SYMBOL(blk_queue_find_tag);
804
805/**
James Bottomley492dfb42006-08-30 15:48:45 -0400806 * __blk_free_tags - release a given set of tag maintenance info
807 * @bqt: the tag map to free
808 *
809 * Tries to free the specified @bqt@. Returns true if it was
810 * actually freed and false if there are still references using it
811 */
812static int __blk_free_tags(struct blk_queue_tag *bqt)
813{
814 int retval;
815
816 retval = atomic_dec_and_test(&bqt->refcnt);
817 if (retval) {
818 BUG_ON(bqt->busy);
819 BUG_ON(!list_empty(&bqt->busy_list));
820
821 kfree(bqt->tag_index);
822 bqt->tag_index = NULL;
823
824 kfree(bqt->tag_map);
825 bqt->tag_map = NULL;
826
827 kfree(bqt);
828
829 }
830
831 return retval;
832}
833
834/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 * __blk_queue_free_tags - release tag maintenance info
836 * @q: the request queue for the device
837 *
838 * Notes:
839 * blk_cleanup_queue() will take care of calling this function, if tagging
840 * has been used. So there's no need to call this directly.
841 **/
842static void __blk_queue_free_tags(request_queue_t *q)
843{
844 struct blk_queue_tag *bqt = q->queue_tags;
845
846 if (!bqt)
847 return;
848
James Bottomley492dfb42006-08-30 15:48:45 -0400849 __blk_free_tags(bqt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850
851 q->queue_tags = NULL;
852 q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
853}
854
James Bottomley492dfb42006-08-30 15:48:45 -0400855
856/**
857 * blk_free_tags - release a given set of tag maintenance info
858 * @bqt: the tag map to free
859 *
860 * For externally managed @bqt@ frees the map. Callers of this
861 * function must guarantee to have released all the queues that
862 * might have been using this tag map.
863 */
864void blk_free_tags(struct blk_queue_tag *bqt)
865{
866 if (unlikely(!__blk_free_tags(bqt)))
867 BUG();
868}
869EXPORT_SYMBOL(blk_free_tags);
870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871/**
872 * blk_queue_free_tags - release tag maintenance info
873 * @q: the request queue for the device
874 *
875 * Notes:
876 * This is used to disabled tagged queuing to a device, yet leave
877 * queue in function.
878 **/
879void blk_queue_free_tags(request_queue_t *q)
880{
881 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
882}
883
884EXPORT_SYMBOL(blk_queue_free_tags);
885
886static int
887init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
888{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 struct request **tag_index;
890 unsigned long *tag_map;
Tejun Heofa72b902005-06-23 00:08:49 -0700891 int nr_ulongs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
James Bottomley492dfb42006-08-30 15:48:45 -0400893 if (q && depth > q->nr_requests * 2) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 depth = q->nr_requests * 2;
895 printk(KERN_ERR "%s: adjusted depth to %d\n",
896 __FUNCTION__, depth);
897 }
898
Jens Axboef68110f2006-03-08 13:31:44 +0100899 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 if (!tag_index)
901 goto fail;
902
Tejun Heof7d37d02005-06-23 00:08:50 -0700903 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
Jens Axboef68110f2006-03-08 13:31:44 +0100904 tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 if (!tag_map)
906 goto fail;
907
Tejun Heoba025082005-08-05 13:28:11 -0700908 tags->real_max_depth = depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 tags->max_depth = depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 tags->tag_index = tag_index;
911 tags->tag_map = tag_map;
912
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 return 0;
914fail:
915 kfree(tag_index);
916 return -ENOMEM;
917}
918
James Bottomley492dfb42006-08-30 15:48:45 -0400919static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
920 int depth)
921{
922 struct blk_queue_tag *tags;
923
924 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
925 if (!tags)
926 goto fail;
927
928 if (init_tag_map(q, tags, depth))
929 goto fail;
930
931 INIT_LIST_HEAD(&tags->busy_list);
932 tags->busy = 0;
933 atomic_set(&tags->refcnt, 1);
934 return tags;
935fail:
936 kfree(tags);
937 return NULL;
938}
939
940/**
941 * blk_init_tags - initialize the tag info for an external tag map
942 * @depth: the maximum queue depth supported
943 * @tags: the tag to use
944 **/
945struct blk_queue_tag *blk_init_tags(int depth)
946{
947 return __blk_queue_init_tags(NULL, depth);
948}
949EXPORT_SYMBOL(blk_init_tags);
950
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951/**
952 * blk_queue_init_tags - initialize the queue tag info
953 * @q: the request queue for the device
954 * @depth: the maximum queue depth supported
955 * @tags: the tag to use
956 **/
957int blk_queue_init_tags(request_queue_t *q, int depth,
958 struct blk_queue_tag *tags)
959{
960 int rc;
961
962 BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
963
964 if (!tags && !q->queue_tags) {
James Bottomley492dfb42006-08-30 15:48:45 -0400965 tags = __blk_queue_init_tags(q, depth);
966
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 if (!tags)
968 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 } else if (q->queue_tags) {
970 if ((rc = blk_queue_resize_tags(q, depth)))
971 return rc;
972 set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
973 return 0;
974 } else
975 atomic_inc(&tags->refcnt);
976
977 /*
978 * assign it, all done
979 */
980 q->queue_tags = tags;
981 q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
982 return 0;
983fail:
984 kfree(tags);
985 return -ENOMEM;
986}
987
988EXPORT_SYMBOL(blk_queue_init_tags);
989
990/**
991 * blk_queue_resize_tags - change the queueing depth
992 * @q: the request queue for the device
993 * @new_depth: the new max command queueing depth
994 *
995 * Notes:
996 * Must be called with the queue lock held.
997 **/
998int blk_queue_resize_tags(request_queue_t *q, int new_depth)
999{
1000 struct blk_queue_tag *bqt = q->queue_tags;
1001 struct request **tag_index;
1002 unsigned long *tag_map;
Tejun Heofa72b902005-06-23 00:08:49 -07001003 int max_depth, nr_ulongs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 if (!bqt)
1006 return -ENXIO;
1007
1008 /*
Tejun Heoba025082005-08-05 13:28:11 -07001009 * if we already have large enough real_max_depth. just
1010 * adjust max_depth. *NOTE* as requests with tag value
1011 * between new_depth and real_max_depth can be in-flight, tag
1012 * map can not be shrunk blindly here.
1013 */
1014 if (new_depth <= bqt->real_max_depth) {
1015 bqt->max_depth = new_depth;
1016 return 0;
1017 }
1018
1019 /*
James Bottomley492dfb42006-08-30 15:48:45 -04001020 * Currently cannot replace a shared tag map with a new
1021 * one, so error out if this is the case
1022 */
1023 if (atomic_read(&bqt->refcnt) != 1)
1024 return -EBUSY;
1025
1026 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 * save the old state info, so we can copy it back
1028 */
1029 tag_index = bqt->tag_index;
1030 tag_map = bqt->tag_map;
Tejun Heoba025082005-08-05 13:28:11 -07001031 max_depth = bqt->real_max_depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032
1033 if (init_tag_map(q, bqt, new_depth))
1034 return -ENOMEM;
1035
1036 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
Tejun Heof7d37d02005-06-23 00:08:50 -07001037 nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
Tejun Heofa72b902005-06-23 00:08:49 -07001038 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
1040 kfree(tag_index);
1041 kfree(tag_map);
1042 return 0;
1043}
1044
1045EXPORT_SYMBOL(blk_queue_resize_tags);
1046
1047/**
1048 * blk_queue_end_tag - end tag operations for a request
1049 * @q: the request queue for the device
1050 * @rq: the request that has completed
1051 *
1052 * Description:
1053 * Typically called when end_that_request_first() returns 0, meaning
1054 * all transfers have been done for a request. It's important to call
1055 * this function before end_that_request_last(), as that will put the
1056 * request back on the free list thus corrupting the internal tag list.
1057 *
1058 * Notes:
1059 * queue lock must be held.
1060 **/
1061void blk_queue_end_tag(request_queue_t *q, struct request *rq)
1062{
1063 struct blk_queue_tag *bqt = q->queue_tags;
1064 int tag = rq->tag;
1065
1066 BUG_ON(tag == -1);
1067
Tejun Heoba025082005-08-05 13:28:11 -07001068 if (unlikely(tag >= bqt->real_max_depth))
Tejun Heo040c9282005-06-23 00:08:51 -07001069 /*
1070 * This can happen after tag depth has been reduced.
1071 * FIXME: how about a warning or info message here?
1072 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 return;
1074
1075 if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
Tejun Heo040c9282005-06-23 00:08:51 -07001076 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
1077 __FUNCTION__, tag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 return;
1079 }
1080
1081 list_del_init(&rq->queuelist);
Jens Axboe4aff5e22006-08-10 08:44:47 +02001082 rq->cmd_flags &= ~REQ_QUEUED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 rq->tag = -1;
1084
1085 if (unlikely(bqt->tag_index[tag] == NULL))
Tejun Heo040c9282005-06-23 00:08:51 -07001086 printk(KERN_ERR "%s: tag %d is missing\n",
1087 __FUNCTION__, tag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
1089 bqt->tag_index[tag] = NULL;
1090 bqt->busy--;
1091}
1092
1093EXPORT_SYMBOL(blk_queue_end_tag);
1094
1095/**
1096 * blk_queue_start_tag - find a free tag and assign it
1097 * @q: the request queue for the device
1098 * @rq: the block request that needs tagging
1099 *
1100 * Description:
1101 * This can either be used as a stand-alone helper, or possibly be
1102 * assigned as the queue &prep_rq_fn (in which case &struct request
1103 * automagically gets a tag assigned). Note that this function
1104 * assumes that any type of request can be queued! if this is not
1105 * true for your device, you must check the request type before
1106 * calling this function. The request will also be removed from
1107 * the request queue, so it's the drivers responsibility to readd
1108 * it if it should need to be restarted for some reason.
1109 *
1110 * Notes:
1111 * queue lock must be held.
1112 **/
1113int blk_queue_start_tag(request_queue_t *q, struct request *rq)
1114{
1115 struct blk_queue_tag *bqt = q->queue_tags;
Tejun Heo2bf0fda2005-06-23 00:08:48 -07001116 int tag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117
Jens Axboe4aff5e22006-08-10 08:44:47 +02001118 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 printk(KERN_ERR
Tejun Heo040c9282005-06-23 00:08:51 -07001120 "%s: request %p for device [%s] already tagged %d",
1121 __FUNCTION__, rq,
1122 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 BUG();
1124 }
1125
Jens Axboe059af492006-09-21 20:37:22 +02001126 /*
1127 * Protect against shared tag maps, as we may not have exclusive
1128 * access to the tag map.
1129 */
1130 do {
1131 tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
1132 if (tag >= bqt->max_depth)
1133 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Jens Axboe059af492006-09-21 20:37:22 +02001135 } while (test_and_set_bit(tag, bqt->tag_map));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
Jens Axboe4aff5e22006-08-10 08:44:47 +02001137 rq->cmd_flags |= REQ_QUEUED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 rq->tag = tag;
1139 bqt->tag_index[tag] = rq;
1140 blkdev_dequeue_request(rq);
1141 list_add(&rq->queuelist, &bqt->busy_list);
1142 bqt->busy++;
1143 return 0;
1144}
1145
1146EXPORT_SYMBOL(blk_queue_start_tag);
1147
1148/**
1149 * blk_queue_invalidate_tags - invalidate all pending tags
1150 * @q: the request queue for the device
1151 *
1152 * Description:
1153 * Hardware conditions may dictate a need to stop all pending requests.
1154 * In this case, we will safely clear the block side of the tag queue and
1155 * readd all requests to the request queue in the right order.
1156 *
1157 * Notes:
1158 * queue lock must be held.
1159 **/
1160void blk_queue_invalidate_tags(request_queue_t *q)
1161{
1162 struct blk_queue_tag *bqt = q->queue_tags;
1163 struct list_head *tmp, *n;
1164 struct request *rq;
1165
1166 list_for_each_safe(tmp, n, &bqt->busy_list) {
1167 rq = list_entry_rq(tmp);
1168
1169 if (rq->tag == -1) {
Tejun Heo040c9282005-06-23 00:08:51 -07001170 printk(KERN_ERR
1171 "%s: bad tag found on list\n", __FUNCTION__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 list_del_init(&rq->queuelist);
Jens Axboe4aff5e22006-08-10 08:44:47 +02001173 rq->cmd_flags &= ~REQ_QUEUED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 } else
1175 blk_queue_end_tag(q, rq);
1176
Jens Axboe4aff5e22006-08-10 08:44:47 +02001177 rq->cmd_flags &= ~REQ_STARTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
1179 }
1180}
1181
1182EXPORT_SYMBOL(blk_queue_invalidate_tags);
1183
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184void blk_dump_rq_flags(struct request *rq, char *msg)
1185{
1186 int bit;
1187
Jens Axboe4aff5e22006-08-10 08:44:47 +02001188 printk("%s: dev %s: type=%x, flags=%x\n", msg,
1189 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
1190 rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192 printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
1193 rq->nr_sectors,
1194 rq->current_nr_sectors);
1195 printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
1196
Jens Axboe4aff5e22006-08-10 08:44:47 +02001197 if (blk_pc_request(rq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 printk("cdb: ");
1199 for (bit = 0; bit < sizeof(rq->cmd); bit++)
1200 printk("%02x ", rq->cmd[bit]);
1201 printk("\n");
1202 }
1203}
1204
1205EXPORT_SYMBOL(blk_dump_rq_flags);
1206
1207void blk_recount_segments(request_queue_t *q, struct bio *bio)
1208{
1209 struct bio_vec *bv, *bvprv = NULL;
1210 int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
1211 int high, highprv = 1;
1212
1213 if (unlikely(!bio->bi_io_vec))
1214 return;
1215
1216 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
1217 hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
1218 bio_for_each_segment(bv, bio, i) {
1219 /*
1220 * the trick here is making sure that a high page is never
1221 * considered part of another segment, since that might
1222 * change with the bounce page.
1223 */
Vasily Tarasovf772b3d2007-03-27 08:52:47 +02001224 high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 if (high || highprv)
1226 goto new_hw_segment;
1227 if (cluster) {
1228 if (seg_size + bv->bv_len > q->max_segment_size)
1229 goto new_segment;
1230 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
1231 goto new_segment;
1232 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
1233 goto new_segment;
1234 if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
1235 goto new_hw_segment;
1236
1237 seg_size += bv->bv_len;
1238 hw_seg_size += bv->bv_len;
1239 bvprv = bv;
1240 continue;
1241 }
1242new_segment:
1243 if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
1244 !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
1245 hw_seg_size += bv->bv_len;
1246 } else {
1247new_hw_segment:
1248 if (hw_seg_size > bio->bi_hw_front_size)
1249 bio->bi_hw_front_size = hw_seg_size;
1250 hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
1251 nr_hw_segs++;
1252 }
1253
1254 nr_phys_segs++;
1255 bvprv = bv;
1256 seg_size = bv->bv_len;
1257 highprv = high;
1258 }
1259 if (hw_seg_size > bio->bi_hw_back_size)
1260 bio->bi_hw_back_size = hw_seg_size;
1261 if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
1262 bio->bi_hw_front_size = hw_seg_size;
1263 bio->bi_phys_segments = nr_phys_segs;
1264 bio->bi_hw_segments = nr_hw_segs;
1265 bio->bi_flags |= (1 << BIO_SEG_VALID);
1266}
Neil Brown387bb172007-02-08 14:20:29 -08001267EXPORT_SYMBOL(blk_recount_segments);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
Adrian Bunk93d17d32005-06-25 14:59:10 -07001269static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 struct bio *nxt)
1271{
1272 if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
1273 return 0;
1274
1275 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
1276 return 0;
1277 if (bio->bi_size + nxt->bi_size > q->max_segment_size)
1278 return 0;
1279
1280 /*
1281 * bio and nxt are contigous in memory, check if the queue allows
1282 * these two to be merged into one
1283 */
1284 if (BIO_SEG_BOUNDARY(q, bio, nxt))
1285 return 1;
1286
1287 return 0;
1288}
1289
Adrian Bunk93d17d32005-06-25 14:59:10 -07001290static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 struct bio *nxt)
1292{
1293 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
1294 blk_recount_segments(q, bio);
1295 if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
1296 blk_recount_segments(q, nxt);
1297 if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
1298 BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size))
1299 return 0;
1300 if (bio->bi_size + nxt->bi_size > q->max_segment_size)
1301 return 0;
1302
1303 return 1;
1304}
1305
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306/*
1307 * map a request to scatterlist, return number of sg entries setup. Caller
1308 * must make sure sg can hold rq->nr_phys_segments entries
1309 */
1310int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
1311{
1312 struct bio_vec *bvec, *bvprv;
1313 struct bio *bio;
1314 int nsegs, i, cluster;
1315
1316 nsegs = 0;
1317 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
1318
1319 /*
1320 * for each bio in rq
1321 */
1322 bvprv = NULL;
1323 rq_for_each_bio(bio, rq) {
1324 /*
1325 * for each segment in bio
1326 */
1327 bio_for_each_segment(bvec, bio, i) {
1328 int nbytes = bvec->bv_len;
1329
1330 if (bvprv && cluster) {
1331 if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
1332 goto new_segment;
1333
1334 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
1335 goto new_segment;
1336 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
1337 goto new_segment;
1338
1339 sg[nsegs - 1].length += nbytes;
1340 } else {
1341new_segment:
1342 memset(&sg[nsegs],0,sizeof(struct scatterlist));
1343 sg[nsegs].page = bvec->bv_page;
1344 sg[nsegs].length = nbytes;
1345 sg[nsegs].offset = bvec->bv_offset;
1346
1347 nsegs++;
1348 }
1349 bvprv = bvec;
1350 } /* segments in bio */
1351 } /* bios in rq */
1352
1353 return nsegs;
1354}
1355
1356EXPORT_SYMBOL(blk_rq_map_sg);
1357
1358/*
1359 * the standard queue merge functions, can be overridden with device
1360 * specific ones if so desired
1361 */
1362
1363static inline int ll_new_mergeable(request_queue_t *q,
1364 struct request *req,
1365 struct bio *bio)
1366{
1367 int nr_phys_segs = bio_phys_segments(q, bio);
1368
1369 if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
Jens Axboe4aff5e22006-08-10 08:44:47 +02001370 req->cmd_flags |= REQ_NOMERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 if (req == q->last_merge)
1372 q->last_merge = NULL;
1373 return 0;
1374 }
1375
1376 /*
1377 * A hw segment is just getting larger, bump just the phys
1378 * counter.
1379 */
1380 req->nr_phys_segments += nr_phys_segs;
1381 return 1;
1382}
1383
1384static inline int ll_new_hw_segment(request_queue_t *q,
1385 struct request *req,
1386 struct bio *bio)
1387{
1388 int nr_hw_segs = bio_hw_segments(q, bio);
1389 int nr_phys_segs = bio_phys_segments(q, bio);
1390
1391 if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
1392 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
Jens Axboe4aff5e22006-08-10 08:44:47 +02001393 req->cmd_flags |= REQ_NOMERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 if (req == q->last_merge)
1395 q->last_merge = NULL;
1396 return 0;
1397 }
1398
1399 /*
1400 * This will form the start of a new hw segment. Bump both
1401 * counters.
1402 */
1403 req->nr_hw_segments += nr_hw_segs;
1404 req->nr_phys_segments += nr_phys_segs;
1405 return 1;
1406}
1407
Jens Axboe1aa4f242006-12-19 08:33:11 +01001408int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409{
Mike Christiedefd94b2005-12-05 02:37:06 -06001410 unsigned short max_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 int len;
1412
Mike Christiedefd94b2005-12-05 02:37:06 -06001413 if (unlikely(blk_pc_request(req)))
1414 max_sectors = q->max_hw_sectors;
1415 else
1416 max_sectors = q->max_sectors;
1417
1418 if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
Jens Axboe4aff5e22006-08-10 08:44:47 +02001419 req->cmd_flags |= REQ_NOMERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 if (req == q->last_merge)
1421 q->last_merge = NULL;
1422 return 0;
1423 }
1424 if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
1425 blk_recount_segments(q, req->biotail);
1426 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
1427 blk_recount_segments(q, bio);
1428 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
1429 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
1430 !BIOVEC_VIRT_OVERSIZE(len)) {
1431 int mergeable = ll_new_mergeable(q, req, bio);
1432
1433 if (mergeable) {
1434 if (req->nr_hw_segments == 1)
1435 req->bio->bi_hw_front_size = len;
1436 if (bio->bi_hw_segments == 1)
1437 bio->bi_hw_back_size = len;
1438 }
1439 return mergeable;
1440 }
1441
1442 return ll_new_hw_segment(q, req, bio);
1443}
Jens Axboe1aa4f242006-12-19 08:33:11 +01001444EXPORT_SYMBOL(ll_back_merge_fn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445
1446static int ll_front_merge_fn(request_queue_t *q, struct request *req,
1447 struct bio *bio)
1448{
Mike Christiedefd94b2005-12-05 02:37:06 -06001449 unsigned short max_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 int len;
1451
Mike Christiedefd94b2005-12-05 02:37:06 -06001452 if (unlikely(blk_pc_request(req)))
1453 max_sectors = q->max_hw_sectors;
1454 else
1455 max_sectors = q->max_sectors;
1456
1457
1458 if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
Jens Axboe4aff5e22006-08-10 08:44:47 +02001459 req->cmd_flags |= REQ_NOMERGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 if (req == q->last_merge)
1461 q->last_merge = NULL;
1462 return 0;
1463 }
1464 len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
1465 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
1466 blk_recount_segments(q, bio);
1467 if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
1468 blk_recount_segments(q, req->bio);
1469 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
1470 !BIOVEC_VIRT_OVERSIZE(len)) {
1471 int mergeable = ll_new_mergeable(q, req, bio);
1472
1473 if (mergeable) {
1474 if (bio->bi_hw_segments == 1)
1475 bio->bi_hw_front_size = len;
1476 if (req->nr_hw_segments == 1)
1477 req->biotail->bi_hw_back_size = len;
1478 }
1479 return mergeable;
1480 }
1481
1482 return ll_new_hw_segment(q, req, bio);
1483}
1484
1485static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
1486 struct request *next)
1487{
Nikita Danilovdfa1a552005-06-25 14:59:20 -07001488 int total_phys_segments;
1489 int total_hw_segments;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
1491 /*
1492 * First check if the either of the requests are re-queued
1493 * requests. Can't merge them if they are.
1494 */
1495 if (req->special || next->special)
1496 return 0;
1497
1498 /*
Nikita Danilovdfa1a552005-06-25 14:59:20 -07001499 * Will it become too large?
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 */
1501 if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
1502 return 0;
1503
1504 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
1505 if (blk_phys_contig_segment(q, req->biotail, next->bio))
1506 total_phys_segments--;
1507
1508 if (total_phys_segments > q->max_phys_segments)
1509 return 0;
1510
1511 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
1512 if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
1513 int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
1514 /*
1515 * propagate the combined length to the end of the requests
1516 */
1517 if (req->nr_hw_segments == 1)
1518 req->bio->bi_hw_front_size = len;
1519 if (next->nr_hw_segments == 1)
1520 next->biotail->bi_hw_back_size = len;
1521 total_hw_segments--;
1522 }
1523
1524 if (total_hw_segments > q->max_hw_segments)
1525 return 0;
1526
1527 /* Merge is OK... */
1528 req->nr_phys_segments = total_phys_segments;
1529 req->nr_hw_segments = total_hw_segments;
1530 return 1;
1531}
1532
1533/*
1534 * "plug" the device if there are no outstanding requests: this will
1535 * force the transfer to start only after we have put all the requests
1536 * on the list.
1537 *
1538 * This is called with interrupts off and no requests on the queue and
1539 * with the queue lock held.
1540 */
1541void blk_plug_device(request_queue_t *q)
1542{
1543 WARN_ON(!irqs_disabled());
1544
1545 /*
1546 * don't plug a stopped queue, it must be paired with blk_start_queue()
1547 * which will restart the queueing
1548 */
Coywolf Qi Hunt7daac492006-04-19 10:14:49 +02001549 if (blk_queue_stopped(q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 return;
1551
Jens Axboe2056a782006-03-23 20:00:26 +01001552 if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
Jens Axboe2056a782006-03-23 20:00:26 +01001554 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
1555 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556}
1557
1558EXPORT_SYMBOL(blk_plug_device);
1559
1560/*
1561 * remove the queue from the plugged list, if present. called with
1562 * queue lock held and interrupts disabled.
1563 */
1564int blk_remove_plug(request_queue_t *q)
1565{
1566 WARN_ON(!irqs_disabled());
1567
1568 if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
1569 return 0;
1570
1571 del_timer(&q->unplug_timer);
1572 return 1;
1573}
1574
1575EXPORT_SYMBOL(blk_remove_plug);
1576
1577/*
1578 * remove the plug and let it rip..
1579 */
1580void __generic_unplug_device(request_queue_t *q)
1581{
Coywolf Qi Hunt7daac492006-04-19 10:14:49 +02001582 if (unlikely(blk_queue_stopped(q)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 return;
1584
1585 if (!blk_remove_plug(q))
1586 return;
1587
Jens Axboe22e2c502005-06-27 10:55:12 +02001588 q->request_fn(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589}
1590EXPORT_SYMBOL(__generic_unplug_device);
1591
1592/**
1593 * generic_unplug_device - fire a request queue
1594 * @q: The &request_queue_t in question
1595 *
1596 * Description:
1597 * Linux uses plugging to build bigger requests queues before letting
1598 * the device have at them. If a queue is plugged, the I/O scheduler
1599 * is still adding and merging requests on the queue. Once the queue
1600 * gets unplugged, the request_fn defined for the queue is invoked and
1601 * transfers started.
1602 **/
1603void generic_unplug_device(request_queue_t *q)
1604{
1605 spin_lock_irq(q->queue_lock);
1606 __generic_unplug_device(q);
1607 spin_unlock_irq(q->queue_lock);
1608}
1609EXPORT_SYMBOL(generic_unplug_device);
1610
1611static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
1612 struct page *page)
1613{
1614 request_queue_t *q = bdi->unplug_io_data;
1615
1616 /*
1617 * devices don't necessarily have an ->unplug_fn defined
1618 */
Jens Axboe2056a782006-03-23 20:00:26 +01001619 if (q->unplug_fn) {
1620 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
1621 q->rq.count[READ] + q->rq.count[WRITE]);
1622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 q->unplug_fn(q);
Jens Axboe2056a782006-03-23 20:00:26 +01001624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625}
1626
David Howells65f27f32006-11-22 14:55:48 +00001627static void blk_unplug_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628{
David Howells65f27f32006-11-22 14:55:48 +00001629 request_queue_t *q = container_of(work, request_queue_t, unplug_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
Jens Axboe2056a782006-03-23 20:00:26 +01001631 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
1632 q->rq.count[READ] + q->rq.count[WRITE]);
1633
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 q->unplug_fn(q);
1635}
1636
1637static void blk_unplug_timeout(unsigned long data)
1638{
1639 request_queue_t *q = (request_queue_t *)data;
1640
Jens Axboe2056a782006-03-23 20:00:26 +01001641 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
1642 q->rq.count[READ] + q->rq.count[WRITE]);
1643
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 kblockd_schedule_work(&q->unplug_work);
1645}
1646
1647/**
1648 * blk_start_queue - restart a previously stopped queue
1649 * @q: The &request_queue_t in question
1650 *
1651 * Description:
1652 * blk_start_queue() will clear the stop flag on the queue, and call
1653 * the request_fn for the queue if it was in a stopped state when
1654 * entered. Also see blk_stop_queue(). Queue lock must be held.
1655 **/
1656void blk_start_queue(request_queue_t *q)
1657{
Paolo 'Blaisorblade' Giarrussoa038e252006-06-05 12:09:01 +02001658 WARN_ON(!irqs_disabled());
1659
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
1661
1662 /*
1663 * one level of recursion is ok and is much faster than kicking
1664 * the unplug handling
1665 */
1666 if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
1667 q->request_fn(q);
1668 clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
1669 } else {
1670 blk_plug_device(q);
1671 kblockd_schedule_work(&q->unplug_work);
1672 }
1673}
1674
1675EXPORT_SYMBOL(blk_start_queue);
1676
1677/**
1678 * blk_stop_queue - stop a queue
1679 * @q: The &request_queue_t in question
1680 *
1681 * Description:
1682 * The Linux block layer assumes that a block driver will consume all
1683 * entries on the request queue when the request_fn strategy is called.
1684 * Often this will not happen, because of hardware limitations (queue
1685 * depth settings). If a device driver gets a 'queue full' response,
1686 * or if it simply chooses not to queue more I/O at one point, it can
1687 * call this function to prevent the request_fn from being called until
1688 * the driver has signalled it's ready to go again. This happens by calling
1689 * blk_start_queue() to restart queue operations. Queue lock must be held.
1690 **/
1691void blk_stop_queue(request_queue_t *q)
1692{
1693 blk_remove_plug(q);
1694 set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
1695}
1696EXPORT_SYMBOL(blk_stop_queue);
1697
1698/**
1699 * blk_sync_queue - cancel any pending callbacks on a queue
1700 * @q: the queue
1701 *
1702 * Description:
1703 * The block layer may perform asynchronous callback activity
1704 * on a queue, such as calling the unplug function after a timeout.
1705 * A block device may call blk_sync_queue to ensure that any
1706 * such activity is cancelled, thus allowing it to release resources
Michael Opdenacker59c51592007-05-09 08:57:56 +02001707 * that the callbacks might use. The caller must already have made sure
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 * that its ->make_request_fn will not re-add plugging prior to calling
1709 * this function.
1710 *
1711 */
1712void blk_sync_queue(struct request_queue *q)
1713{
1714 del_timer_sync(&q->unplug_timer);
1715 kblockd_flush();
1716}
1717EXPORT_SYMBOL(blk_sync_queue);
1718
1719/**
1720 * blk_run_queue - run a single device queue
1721 * @q: The queue to run
1722 */
1723void blk_run_queue(struct request_queue *q)
1724{
1725 unsigned long flags;
1726
1727 spin_lock_irqsave(q->queue_lock, flags);
1728 blk_remove_plug(q);
Jens Axboedac07ec2006-05-11 08:20:16 +02001729
1730 /*
1731 * Only recurse once to avoid overrunning the stack, let the unplug
1732 * handling reinvoke the handler shortly if we already got there.
1733 */
1734 if (!elv_queue_empty(q)) {
1735 if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
1736 q->request_fn(q);
1737 clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
1738 } else {
1739 blk_plug_device(q);
1740 kblockd_schedule_work(&q->unplug_work);
1741 }
1742 }
1743
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 spin_unlock_irqrestore(q->queue_lock, flags);
1745}
1746EXPORT_SYMBOL(blk_run_queue);
1747
1748/**
1749 * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
Martin Waitza5802902006-04-02 13:59:55 +02001750 * @kobj: the kobj belonging of the request queue to be released
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 *
1752 * Description:
1753 * blk_cleanup_queue is the pair to blk_init_queue() or
1754 * blk_queue_make_request(). It should be called when a request queue is
1755 * being released; typically when a block device is being de-registered.
1756 * Currently, its primary task it to free all the &struct request
1757 * structures that were allocated to the queue and the queue itself.
1758 *
1759 * Caveat:
1760 * Hopefully the low level driver will have finished any
1761 * outstanding requests first...
1762 **/
Al Viro483f4af2006-03-18 18:34:37 -05001763static void blk_release_queue(struct kobject *kobj)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764{
Al Viro483f4af2006-03-18 18:34:37 -05001765 request_queue_t *q = container_of(kobj, struct request_queue, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 struct request_list *rl = &q->rq;
1767
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 blk_sync_queue(q);
1769
1770 if (rl->rq_pool)
1771 mempool_destroy(rl->rq_pool);
1772
1773 if (q->queue_tags)
1774 __blk_queue_free_tags(q);
1775
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -07001776 blk_trace_shutdown(q);
Jens Axboe2056a782006-03-23 20:00:26 +01001777
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 kmem_cache_free(requestq_cachep, q);
1779}
1780
Al Viro483f4af2006-03-18 18:34:37 -05001781void blk_put_queue(request_queue_t *q)
1782{
1783 kobject_put(&q->kobj);
1784}
1785EXPORT_SYMBOL(blk_put_queue);
1786
1787void blk_cleanup_queue(request_queue_t * q)
1788{
1789 mutex_lock(&q->sysfs_lock);
1790 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
1791 mutex_unlock(&q->sysfs_lock);
1792
1793 if (q->elevator)
1794 elevator_exit(q->elevator);
1795
1796 blk_put_queue(q);
1797}
1798
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799EXPORT_SYMBOL(blk_cleanup_queue);
1800
1801static int blk_init_free_list(request_queue_t *q)
1802{
1803 struct request_list *rl = &q->rq;
1804
1805 rl->count[READ] = rl->count[WRITE] = 0;
1806 rl->starved[READ] = rl->starved[WRITE] = 0;
Tejun Heocb98fc82005-10-28 08:29:39 +02001807 rl->elvpriv = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 init_waitqueue_head(&rl->wait[READ]);
1809 init_waitqueue_head(&rl->wait[WRITE]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
Christoph Lameter19460892005-06-23 00:08:19 -07001811 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
1812 mempool_free_slab, request_cachep, q->node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
1814 if (!rl->rq_pool)
1815 return -ENOMEM;
1816
1817 return 0;
1818}
1819
Al Viro8267e262005-10-21 03:20:53 -04001820request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821{
Christoph Lameter19460892005-06-23 00:08:19 -07001822 return blk_alloc_queue_node(gfp_mask, -1);
1823}
1824EXPORT_SYMBOL(blk_alloc_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825
Al Viro483f4af2006-03-18 18:34:37 -05001826static struct kobj_type queue_ktype;
1827
Al Viro8267e262005-10-21 03:20:53 -04001828request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
Christoph Lameter19460892005-06-23 00:08:19 -07001829{
1830 request_queue_t *q;
1831
1832 q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 if (!q)
1834 return NULL;
1835
1836 memset(q, 0, sizeof(*q));
1837 init_timer(&q->unplug_timer);
Al Viro483f4af2006-03-18 18:34:37 -05001838
1839 snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
1840 q->kobj.ktype = &queue_ktype;
1841 kobject_init(&q->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
1843 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
1844 q->backing_dev_info.unplug_io_data = q;
1845
Al Viro483f4af2006-03-18 18:34:37 -05001846 mutex_init(&q->sysfs_lock);
1847
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 return q;
1849}
Christoph Lameter19460892005-06-23 00:08:19 -07001850EXPORT_SYMBOL(blk_alloc_queue_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
1852/**
1853 * blk_init_queue - prepare a request queue for use with a block device
1854 * @rfn: The function to be called to process requests that have been
1855 * placed on the queue.
1856 * @lock: Request queue spin lock
1857 *
1858 * Description:
1859 * If a block device wishes to use the standard request handling procedures,
1860 * which sorts requests and coalesces adjacent requests, then it must
1861 * call blk_init_queue(). The function @rfn will be called when there
1862 * are requests on the queue that need to be processed. If the device
1863 * supports plugging, then @rfn may not be called immediately when requests
1864 * are available on the queue, but may be called at some time later instead.
1865 * Plugged queues are generally unplugged when a buffer belonging to one
1866 * of the requests on the queue is needed, or due to memory pressure.
1867 *
1868 * @rfn is not required, or even expected, to remove all requests off the
1869 * queue, but only as many as it can handle at a time. If it does leave
1870 * requests on the queue, it is responsible for arranging that the requests
1871 * get dealt with eventually.
1872 *
1873 * The queue spin lock must be held while manipulating the requests on the
Paolo 'Blaisorblade' Giarrussoa038e252006-06-05 12:09:01 +02001874 * request queue; this lock will be taken also from interrupt context, so irq
1875 * disabling is needed for it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 *
1877 * Function returns a pointer to the initialized request queue, or NULL if
1878 * it didn't succeed.
1879 *
1880 * Note:
1881 * blk_init_queue() must be paired with a blk_cleanup_queue() call
1882 * when the block device is deactivated (such as at module unload).
1883 **/
Christoph Lameter19460892005-06-23 00:08:19 -07001884
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1886{
Christoph Lameter19460892005-06-23 00:08:19 -07001887 return blk_init_queue_node(rfn, lock, -1);
1888}
1889EXPORT_SYMBOL(blk_init_queue);
1890
1891request_queue_t *
1892blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1893{
1894 request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896 if (!q)
1897 return NULL;
1898
Christoph Lameter19460892005-06-23 00:08:19 -07001899 q->node = node_id;
Al Viro8669aaf2006-03-18 13:50:00 -05001900 if (blk_init_free_list(q)) {
1901 kmem_cache_free(requestq_cachep, q);
1902 return NULL;
1903 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904
152587d2005-04-12 16:22:06 -05001905 /*
1906 * if caller didn't supply a lock, they get per-queue locking with
1907 * our embedded lock
1908 */
1909 if (!lock) {
1910 spin_lock_init(&q->__queue_lock);
1911 lock = &q->__queue_lock;
1912 }
1913
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 q->request_fn = rfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 q->prep_rq_fn = NULL;
1916 q->unplug_fn = generic_unplug_device;
1917 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
1918 q->queue_lock = lock;
1919
1920 blk_queue_segment_boundary(q, 0xffffffff);
1921
1922 blk_queue_make_request(q, __make_request);
1923 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
1924
1925 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
1926 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
1927
Alan Stern44ec9542007-02-20 11:01:57 -05001928 q->sg_reserved_size = INT_MAX;
1929
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 /*
1931 * all done
1932 */
1933 if (!elevator_init(q, NULL)) {
1934 blk_queue_congestion_threshold(q);
1935 return q;
1936 }
1937
Al Viro8669aaf2006-03-18 13:50:00 -05001938 blk_put_queue(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 return NULL;
1940}
Christoph Lameter19460892005-06-23 00:08:19 -07001941EXPORT_SYMBOL(blk_init_queue_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
1943int blk_get_queue(request_queue_t *q)
1944{
Nick Pigginfde6ad22005-06-23 00:08:53 -07001945 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
Al Viro483f4af2006-03-18 18:34:37 -05001946 kobject_get(&q->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 return 0;
1948 }
1949
1950 return 1;
1951}
1952
1953EXPORT_SYMBOL(blk_get_queue);
1954
1955static inline void blk_free_request(request_queue_t *q, struct request *rq)
1956{
Jens Axboe4aff5e22006-08-10 08:44:47 +02001957 if (rq->cmd_flags & REQ_ELVPRIV)
Tejun Heocb98fc82005-10-28 08:29:39 +02001958 elv_put_request(q, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 mempool_free(rq, q->rq.rq_pool);
1960}
1961
Jens Axboe1ea25ecb2006-07-18 22:24:11 +02001962static struct request *
Jens Axboecb78b282006-07-28 09:32:57 +02001963blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964{
1965 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
1966
1967 if (!rq)
1968 return NULL;
1969
1970 /*
Jens Axboe4aff5e22006-08-10 08:44:47 +02001971 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 * see bio.h and blkdev.h
1973 */
Jens Axboe49171e52006-08-10 08:59:11 +02001974 rq->cmd_flags = rw | REQ_ALLOCED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
Tejun Heocb98fc82005-10-28 08:29:39 +02001976 if (priv) {
Jens Axboecb78b282006-07-28 09:32:57 +02001977 if (unlikely(elv_set_request(q, rq, gfp_mask))) {
Tejun Heocb98fc82005-10-28 08:29:39 +02001978 mempool_free(rq, q->rq.rq_pool);
1979 return NULL;
1980 }
Jens Axboe4aff5e22006-08-10 08:44:47 +02001981 rq->cmd_flags |= REQ_ELVPRIV;
Tejun Heocb98fc82005-10-28 08:29:39 +02001982 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
Tejun Heocb98fc82005-10-28 08:29:39 +02001984 return rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985}
1986
1987/*
1988 * ioc_batching returns true if the ioc is a valid batching request and
1989 * should be given priority access to a request.
1990 */
1991static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
1992{
1993 if (!ioc)
1994 return 0;
1995
1996 /*
1997 * Make sure the process is able to allocate at least 1 request
1998 * even if the batch times out, otherwise we could theoretically
1999 * lose wakeups.
2000 */
2001 return ioc->nr_batch_requests == q->nr_batching ||
2002 (ioc->nr_batch_requests > 0
2003 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
2004}
2005
2006/*
2007 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
2008 * will cause the process to be a "batcher" on all queues in the system. This
2009 * is the behaviour we want though - once it gets a wakeup it should be given
2010 * a nice run.
2011 */
Adrian Bunk93d17d32005-06-25 14:59:10 -07002012static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013{
2014 if (!ioc || ioc_batching(q, ioc))
2015 return;
2016
2017 ioc->nr_batch_requests = q->nr_batching;
2018 ioc->last_waited = jiffies;
2019}
2020
2021static void __freed_request(request_queue_t *q, int rw)
2022{
2023 struct request_list *rl = &q->rq;
2024
2025 if (rl->count[rw] < queue_congestion_off_threshold(q))
Thomas Maier79e2de42006-10-19 23:28:15 -07002026 blk_clear_queue_congested(q, rw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
2028 if (rl->count[rw] + 1 <= q->nr_requests) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 if (waitqueue_active(&rl->wait[rw]))
2030 wake_up(&rl->wait[rw]);
2031
2032 blk_clear_queue_full(q, rw);
2033 }
2034}
2035
2036/*
2037 * A request has just been released. Account for it, update the full and
2038 * congestion status, wake up any waiters. Called under q->queue_lock.
2039 */
Tejun Heocb98fc82005-10-28 08:29:39 +02002040static void freed_request(request_queue_t *q, int rw, int priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041{
2042 struct request_list *rl = &q->rq;
2043
2044 rl->count[rw]--;
Tejun Heocb98fc82005-10-28 08:29:39 +02002045 if (priv)
2046 rl->elvpriv--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047
2048 __freed_request(q, rw);
2049
2050 if (unlikely(rl->starved[rw ^ 1]))
2051 __freed_request(q, rw ^ 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052}
2053
2054#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
2055/*
Nick Piggind6344532005-06-28 20:45:14 -07002056 * Get a free request, queue_lock must be held.
2057 * Returns NULL on failure, with queue_lock held.
2058 * Returns !NULL on success, with queue_lock *not held*.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 */
Jens Axboe7749a8d2006-12-13 13:02:26 +01002060static struct request *get_request(request_queue_t *q, int rw_flags,
2061 struct bio *bio, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062{
2063 struct request *rq = NULL;
2064 struct request_list *rl = &q->rq;
Jens Axboe88ee5ef2005-11-12 11:09:12 +01002065 struct io_context *ioc = NULL;
Jens Axboe7749a8d2006-12-13 13:02:26 +01002066 const int rw = rw_flags & 0x01;
Jens Axboe88ee5ef2005-11-12 11:09:12 +01002067 int may_queue, priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Jens Axboe7749a8d2006-12-13 13:02:26 +01002069 may_queue = elv_may_queue(q, rw_flags);
Jens Axboe88ee5ef2005-11-12 11:09:12 +01002070 if (may_queue == ELV_MQUEUE_NO)
2071 goto rq_starved;
2072
2073 if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
2074 if (rl->count[rw]+1 >= q->nr_requests) {
Jens Axboeb5deef92006-07-19 23:39:40 +02002075 ioc = current_io_context(GFP_ATOMIC, q->node);
Jens Axboe88ee5ef2005-11-12 11:09:12 +01002076 /*
2077 * The queue will fill after this allocation, so set
2078 * it as full, and mark this process as "batching".
2079 * This process will be allowed to complete a batch of
2080 * requests, others will be blocked.
2081 */
2082 if (!blk_queue_full(q, rw)) {
2083 ioc_set_batching(q, ioc);
2084 blk_set_queue_full(q, rw);
2085 } else {
2086 if (may_queue != ELV_MQUEUE_MUST
2087 && !ioc_batching(q, ioc)) {
2088 /*
2089 * The queue is full and the allocating
2090 * process is not a "batcher", and not
2091 * exempted by the IO scheduler
2092 */
2093 goto out;
2094 }
2095 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 }
Thomas Maier79e2de42006-10-19 23:28:15 -07002097 blk_set_queue_congested(q, rw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 }
2099
Jens Axboe082cf692005-06-28 16:35:11 +02002100 /*
2101 * Only allow batching queuers to allocate up to 50% over the defined
2102 * limit of requests, otherwise we could have thousands of requests
2103 * allocated with any setting of ->nr_requests
2104 */
Hugh Dickinsfd782a42005-06-29 15:15:40 +01002105 if (rl->count[rw] >= (3 * q->nr_requests / 2))
Jens Axboe082cf692005-06-28 16:35:11 +02002106 goto out;
Hugh Dickinsfd782a42005-06-29 15:15:40 +01002107
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 rl->count[rw]++;
2109 rl->starved[rw] = 0;
Tejun Heocb98fc82005-10-28 08:29:39 +02002110
Jens Axboe64521d12005-10-28 08:30:39 +02002111 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
Tejun Heocb98fc82005-10-28 08:29:39 +02002112 if (priv)
2113 rl->elvpriv++;
2114
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 spin_unlock_irq(q->queue_lock);
2116
Jens Axboe7749a8d2006-12-13 13:02:26 +01002117 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
Jens Axboe88ee5ef2005-11-12 11:09:12 +01002118 if (unlikely(!rq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 /*
2120 * Allocation failed presumably due to memory. Undo anything
2121 * we might have messed up.
2122 *
2123 * Allocating task should really be put onto the front of the
2124 * wait queue, but this is pretty rare.
2125 */
2126 spin_lock_irq(q->queue_lock);
Tejun Heocb98fc82005-10-28 08:29:39 +02002127 freed_request(q, rw, priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
2129 /*
2130 * in the very unlikely event that allocation failed and no
2131 * requests for this direction was pending, mark us starved
2132 * so that freeing of a request in the other direction will
2133 * notice us. another possible fix would be to split the
2134 * rq mempool into READ and WRITE
2135 */
2136rq_starved:
2137 if (unlikely(rl->count[rw] == 0))
2138 rl->starved[rw] = 1;
2139
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 goto out;
2141 }
2142
Jens Axboe88ee5ef2005-11-12 11:09:12 +01002143 /*
2144 * ioc may be NULL here, and ioc_batching will be false. That's
2145 * OK, if the queue is under the request limit then requests need
2146 * not count toward the nr_batch_requests limit. There will always
2147 * be some limit enforced by BLK_BATCH_TIME.
2148 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 if (ioc_batching(q, ioc))
2150 ioc->nr_batch_requests--;
2151
2152 rq_init(q, rq);
Jens Axboe2056a782006-03-23 20:00:26 +01002153
2154 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 return rq;
2157}
2158
2159/*
2160 * No available requests for this queue, unplug the device and wait for some
2161 * requests to become available.
Nick Piggind6344532005-06-28 20:45:14 -07002162 *
2163 * Called with q->queue_lock held, and returns with it unlocked.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 */
Jens Axboe7749a8d2006-12-13 13:02:26 +01002165static struct request *get_request_wait(request_queue_t *q, int rw_flags,
Jens Axboe22e2c502005-06-27 10:55:12 +02002166 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167{
Jens Axboe7749a8d2006-12-13 13:02:26 +01002168 const int rw = rw_flags & 0x01;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 struct request *rq;
2170
Jens Axboe7749a8d2006-12-13 13:02:26 +01002171 rq = get_request(q, rw_flags, bio, GFP_NOIO);
Nick Piggin450991b2005-06-28 20:45:13 -07002172 while (!rq) {
2173 DEFINE_WAIT(wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 struct request_list *rl = &q->rq;
2175
2176 prepare_to_wait_exclusive(&rl->wait[rw], &wait,
2177 TASK_UNINTERRUPTIBLE);
2178
Jens Axboe7749a8d2006-12-13 13:02:26 +01002179 rq = get_request(q, rw_flags, bio, GFP_NOIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
2181 if (!rq) {
2182 struct io_context *ioc;
2183
Jens Axboe2056a782006-03-23 20:00:26 +01002184 blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
2185
Nick Piggind6344532005-06-28 20:45:14 -07002186 __generic_unplug_device(q);
2187 spin_unlock_irq(q->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 io_schedule();
2189
2190 /*
2191 * After sleeping, we become a "batching" process and
2192 * will be able to allocate at least one request, and
2193 * up to a big batch of them for a small period time.
2194 * See ioc_batching, ioc_set_batching
2195 */
Jens Axboeb5deef92006-07-19 23:39:40 +02002196 ioc = current_io_context(GFP_NOIO, q->node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 ioc_set_batching(q, ioc);
Nick Piggind6344532005-06-28 20:45:14 -07002198
2199 spin_lock_irq(q->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 }
2201 finish_wait(&rl->wait[rw], &wait);
Nick Piggin450991b2005-06-28 20:45:13 -07002202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
2204 return rq;
2205}
2206
Al Viro8267e262005-10-21 03:20:53 -04002207struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208{
2209 struct request *rq;
2210
2211 BUG_ON(rw != READ && rw != WRITE);
2212
Nick Piggind6344532005-06-28 20:45:14 -07002213 spin_lock_irq(q->queue_lock);
2214 if (gfp_mask & __GFP_WAIT) {
Jens Axboe22e2c502005-06-27 10:55:12 +02002215 rq = get_request_wait(q, rw, NULL);
Nick Piggind6344532005-06-28 20:45:14 -07002216 } else {
Jens Axboe22e2c502005-06-27 10:55:12 +02002217 rq = get_request(q, rw, NULL, gfp_mask);
Nick Piggind6344532005-06-28 20:45:14 -07002218 if (!rq)
2219 spin_unlock_irq(q->queue_lock);
2220 }
2221 /* q->queue_lock is unlocked at this point */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
2223 return rq;
2224}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225EXPORT_SYMBOL(blk_get_request);
2226
2227/**
Jens Axboedc72ef42006-07-20 14:54:05 +02002228 * blk_start_queueing - initiate dispatch of requests to device
2229 * @q: request queue to kick into gear
2230 *
2231 * This is basically a helper to remove the need to know whether a queue
2232 * is plugged or not if someone just wants to initiate dispatch of requests
2233 * for this queue.
2234 *
2235 * The queue lock must be held with interrupts disabled.
2236 */
2237void blk_start_queueing(request_queue_t *q)
2238{
2239 if (!blk_queue_plugged(q))
2240 q->request_fn(q);
2241 else
2242 __generic_unplug_device(q);
2243}
2244EXPORT_SYMBOL(blk_start_queueing);
2245
2246/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 * blk_requeue_request - put a request back on queue
2248 * @q: request queue where request should be inserted
2249 * @rq: request to be inserted
2250 *
2251 * Description:
2252 * Drivers often keep queueing requests until the hardware cannot accept
2253 * more, when that condition happens we need to put the request back
2254 * on the queue. Must be called with queue lock held.
2255 */
2256void blk_requeue_request(request_queue_t *q, struct request *rq)
2257{
Jens Axboe2056a782006-03-23 20:00:26 +01002258 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
2259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 if (blk_rq_tagged(rq))
2261 blk_queue_end_tag(q, rq);
2262
2263 elv_requeue_request(q, rq);
2264}
2265
2266EXPORT_SYMBOL(blk_requeue_request);
2267
2268/**
2269 * blk_insert_request - insert a special request in to a request queue
2270 * @q: request queue where request should be inserted
2271 * @rq: request to be inserted
2272 * @at_head: insert request at head or tail of queue
2273 * @data: private data
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 *
2275 * Description:
2276 * Many block devices need to execute commands asynchronously, so they don't
2277 * block the whole kernel from preemption during request execution. This is
2278 * accomplished normally by inserting aritficial requests tagged as
2279 * REQ_SPECIAL in to the corresponding request queue, and letting them be
2280 * scheduled for actual execution by the request queue.
2281 *
2282 * We have the option of inserting the head or the tail of the queue.
2283 * Typically we use the tail for new ioctls and so forth. We use the head
2284 * of the queue for things like a QUEUE_FULL message from a device, or a
2285 * host that is unable to accept a particular command.
2286 */
2287void blk_insert_request(request_queue_t *q, struct request *rq,
Tejun Heo 867d1192005-04-24 02:06:05 -05002288 int at_head, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289{
Tejun Heo 867d1192005-04-24 02:06:05 -05002290 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 unsigned long flags;
2292
2293 /*
2294 * tell I/O scheduler that this isn't a regular read/write (ie it
2295 * must not attempt merges on this) and that it acts as a soft
2296 * barrier
2297 */
Jens Axboe4aff5e22006-08-10 08:44:47 +02002298 rq->cmd_type = REQ_TYPE_SPECIAL;
2299 rq->cmd_flags |= REQ_SOFTBARRIER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
2301 rq->special = data;
2302
2303 spin_lock_irqsave(q->queue_lock, flags);
2304
2305 /*
2306 * If command is tagged, release the tag
2307 */
Tejun Heo 867d1192005-04-24 02:06:05 -05002308 if (blk_rq_tagged(rq))
2309 blk_queue_end_tag(q, rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310
Tejun Heo 867d1192005-04-24 02:06:05 -05002311 drive_stat_acct(rq, rq->nr_sectors, 1);
2312 __elv_add_request(q, rq, where, 0);
Jens Axboedc72ef42006-07-20 14:54:05 +02002313 blk_start_queueing(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 spin_unlock_irqrestore(q->queue_lock, flags);
2315}
2316
2317EXPORT_SYMBOL(blk_insert_request);
2318
Mike Christie0e75f902006-12-01 10:40:55 +01002319static int __blk_rq_unmap_user(struct bio *bio)
2320{
2321 int ret = 0;
2322
2323 if (bio) {
2324 if (bio_flagged(bio, BIO_USER_MAPPED))
2325 bio_unmap_user(bio);
2326 else
2327 ret = bio_uncopy_user(bio);
2328 }
2329
2330 return ret;
2331}
2332
2333static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
2334 void __user *ubuf, unsigned int len)
2335{
2336 unsigned long uaddr;
2337 struct bio *bio, *orig_bio;
2338 int reading, ret;
2339
2340 reading = rq_data_dir(rq) == READ;
2341
2342 /*
2343 * if alignment requirement is satisfied, map in user pages for
2344 * direct dma. else, set up kernel bounce buffers
2345 */
2346 uaddr = (unsigned long) ubuf;
2347 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
2348 bio = bio_map_user(q, NULL, uaddr, len, reading);
2349 else
2350 bio = bio_copy_user(q, uaddr, len, reading);
2351
Jens Axboe29852592006-12-19 08:27:31 +01002352 if (IS_ERR(bio))
Mike Christie0e75f902006-12-01 10:40:55 +01002353 return PTR_ERR(bio);
Mike Christie0e75f902006-12-01 10:40:55 +01002354
2355 orig_bio = bio;
2356 blk_queue_bounce(q, &bio);
Jens Axboe29852592006-12-19 08:27:31 +01002357
Mike Christie0e75f902006-12-01 10:40:55 +01002358 /*
2359 * We link the bounce buffer in and could have to traverse it
2360 * later so we have to get a ref to prevent it from being freed
2361 */
2362 bio_get(bio);
2363
Mike Christie0e75f902006-12-01 10:40:55 +01002364 if (!rq->bio)
2365 blk_rq_bio_prep(q, rq, bio);
Jens Axboe1aa4f242006-12-19 08:33:11 +01002366 else if (!ll_back_merge_fn(q, rq, bio)) {
Mike Christie0e75f902006-12-01 10:40:55 +01002367 ret = -EINVAL;
Mike Christie0e75f902006-12-01 10:40:55 +01002368 goto unmap_bio;
2369 } else {
2370 rq->biotail->bi_next = bio;
2371 rq->biotail = bio;
2372
Mike Christie0e75f902006-12-01 10:40:55 +01002373 rq->data_len += bio->bi_size;
2374 }
Mike Christie0e75f902006-12-01 10:40:55 +01002375
2376 return bio->bi_size;
2377
2378unmap_bio:
2379 /* if it was boucned we must call the end io function */
2380 bio_endio(bio, bio->bi_size, 0);
2381 __blk_rq_unmap_user(orig_bio);
2382 bio_put(bio);
2383 return ret;
2384}
2385
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386/**
2387 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
2388 * @q: request queue where request should be inserted
Christoph Hellwig 73747ae2005-06-20 14:21:01 +02002389 * @rq: request structure to fill
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 * @ubuf: the user buffer
2391 * @len: length of user data
2392 *
2393 * Description:
2394 * Data will be mapped directly for zero copy io, if possible. Otherwise
2395 * a kernel bounce buffer is used.
2396 *
2397 * A matching blk_rq_unmap_user() must be issued at the end of io, while
2398 * still in process context.
2399 *
2400 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
2401 * before being submitted to the device, as pages mapped may be out of
2402 * reach. It's the callers responsibility to make sure this happens. The
2403 * original bio must be passed back in to blk_rq_unmap_user() for proper
2404 * unmapping.
2405 */
Jens Axboedd1cab92005-06-20 14:06:01 +02002406int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
Mike Christie0e75f902006-12-01 10:40:55 +01002407 unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408{
Mike Christie0e75f902006-12-01 10:40:55 +01002409 unsigned long bytes_read = 0;
Jens Axboe8e5cfc42006-12-19 11:12:46 +01002410 struct bio *bio = NULL;
Mike Christie0e75f902006-12-01 10:40:55 +01002411 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412
Mike Christiedefd94b2005-12-05 02:37:06 -06002413 if (len > (q->max_hw_sectors << 9))
Jens Axboedd1cab92005-06-20 14:06:01 +02002414 return -EINVAL;
2415 if (!len || !ubuf)
2416 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417
Mike Christie0e75f902006-12-01 10:40:55 +01002418 while (bytes_read != len) {
2419 unsigned long map_len, end, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
Mike Christie0e75f902006-12-01 10:40:55 +01002421 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
2422 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
2423 >> PAGE_SHIFT;
2424 start = (unsigned long)ubuf >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425
Mike Christie0e75f902006-12-01 10:40:55 +01002426 /*
2427 * A bad offset could cause us to require BIO_MAX_PAGES + 1
2428 * pages. If this happens we just lower the requested
2429 * mapping len by a page so that we can fit
2430 */
2431 if (end - start > BIO_MAX_PAGES)
2432 map_len -= PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
Mike Christie0e75f902006-12-01 10:40:55 +01002434 ret = __blk_rq_map_user(q, rq, ubuf, map_len);
2435 if (ret < 0)
2436 goto unmap_rq;
Jens Axboe8e5cfc42006-12-19 11:12:46 +01002437 if (!bio)
2438 bio = rq->bio;
Mike Christie0e75f902006-12-01 10:40:55 +01002439 bytes_read += ret;
2440 ubuf += ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 }
2442
Mike Christie0e75f902006-12-01 10:40:55 +01002443 rq->buffer = rq->data = NULL;
2444 return 0;
2445unmap_rq:
Jens Axboe8e5cfc42006-12-19 11:12:46 +01002446 blk_rq_unmap_user(bio);
Mike Christie0e75f902006-12-01 10:40:55 +01002447 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448}
2449
2450EXPORT_SYMBOL(blk_rq_map_user);
2451
2452/**
James Bottomley f1970ba2005-06-20 14:06:52 +02002453 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
2454 * @q: request queue where request should be inserted
2455 * @rq: request to map data to
2456 * @iov: pointer to the iovec
2457 * @iov_count: number of elements in the iovec
Randy Dunlapaf9997e2006-12-22 01:06:52 -08002458 * @len: I/O byte count
James Bottomley f1970ba2005-06-20 14:06:52 +02002459 *
2460 * Description:
2461 * Data will be mapped directly for zero copy io, if possible. Otherwise
2462 * a kernel bounce buffer is used.
2463 *
2464 * A matching blk_rq_unmap_user() must be issued at the end of io, while
2465 * still in process context.
2466 *
2467 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
2468 * before being submitted to the device, as pages mapped may be out of
2469 * reach. It's the callers responsibility to make sure this happens. The
2470 * original bio must be passed back in to blk_rq_unmap_user() for proper
2471 * unmapping.
2472 */
2473int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
Mike Christie0e75f902006-12-01 10:40:55 +01002474 struct sg_iovec *iov, int iov_count, unsigned int len)
James Bottomley f1970ba2005-06-20 14:06:52 +02002475{
2476 struct bio *bio;
2477
2478 if (!iov || iov_count <= 0)
2479 return -EINVAL;
2480
2481 /* we don't allow misaligned data like bio_map_user() does. If the
2482 * user is using sg, they're expected to know the alignment constraints
2483 * and respect them accordingly */
2484 bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
2485 if (IS_ERR(bio))
2486 return PTR_ERR(bio);
2487
Mike Christie0e75f902006-12-01 10:40:55 +01002488 if (bio->bi_size != len) {
2489 bio_endio(bio, bio->bi_size, 0);
2490 bio_unmap_user(bio);
2491 return -EINVAL;
2492 }
2493
2494 bio_get(bio);
James Bottomley f1970ba2005-06-20 14:06:52 +02002495 blk_rq_bio_prep(q, rq, bio);
2496 rq->buffer = rq->data = NULL;
James Bottomley f1970ba2005-06-20 14:06:52 +02002497 return 0;
2498}
2499
2500EXPORT_SYMBOL(blk_rq_map_user_iov);
2501
2502/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 * blk_rq_unmap_user - unmap a request with user data
Jens Axboe8e5cfc42006-12-19 11:12:46 +01002504 * @bio: start of bio list
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 *
2506 * Description:
Jens Axboe8e5cfc42006-12-19 11:12:46 +01002507 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
2508 * supply the original rq->bio from the blk_rq_map_user() return, since
2509 * the io completion may have changed rq->bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 */
Jens Axboe8e5cfc42006-12-19 11:12:46 +01002511int blk_rq_unmap_user(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512{
Jens Axboe8e5cfc42006-12-19 11:12:46 +01002513 struct bio *mapped_bio;
Jens Axboe48785bb2006-12-19 11:07:59 +01002514 int ret = 0, ret2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515
Jens Axboe8e5cfc42006-12-19 11:12:46 +01002516 while (bio) {
2517 mapped_bio = bio;
2518 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
Mike Christie0e75f902006-12-01 10:40:55 +01002519 mapped_bio = bio->bi_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520
Jens Axboe48785bb2006-12-19 11:07:59 +01002521 ret2 = __blk_rq_unmap_user(mapped_bio);
2522 if (ret2 && !ret)
2523 ret = ret2;
2524
Jens Axboe8e5cfc42006-12-19 11:12:46 +01002525 mapped_bio = bio;
2526 bio = bio->bi_next;
2527 bio_put(mapped_bio);
Mike Christie0e75f902006-12-01 10:40:55 +01002528 }
Jens Axboe48785bb2006-12-19 11:07:59 +01002529
2530 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531}
2532
2533EXPORT_SYMBOL(blk_rq_unmap_user);
2534
2535/**
Mike Christie df46b9a2005-06-20 14:04:44 +02002536 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
2537 * @q: request queue where request should be inserted
Christoph Hellwig 73747ae2005-06-20 14:21:01 +02002538 * @rq: request to fill
Mike Christie df46b9a2005-06-20 14:04:44 +02002539 * @kbuf: the kernel buffer
2540 * @len: length of user data
Christoph Hellwig 73747ae2005-06-20 14:21:01 +02002541 * @gfp_mask: memory allocation flags
Mike Christie df46b9a2005-06-20 14:04:44 +02002542 */
Jens Axboedd1cab92005-06-20 14:06:01 +02002543int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
Al Viro8267e262005-10-21 03:20:53 -04002544 unsigned int len, gfp_t gfp_mask)
Mike Christie df46b9a2005-06-20 14:04:44 +02002545{
Mike Christie df46b9a2005-06-20 14:04:44 +02002546 struct bio *bio;
2547
Mike Christiedefd94b2005-12-05 02:37:06 -06002548 if (len > (q->max_hw_sectors << 9))
Jens Axboedd1cab92005-06-20 14:06:01 +02002549 return -EINVAL;
2550 if (!len || !kbuf)
2551 return -EINVAL;
Mike Christie df46b9a2005-06-20 14:04:44 +02002552
2553 bio = bio_map_kern(q, kbuf, len, gfp_mask);
Jens Axboedd1cab92005-06-20 14:06:01 +02002554 if (IS_ERR(bio))
2555 return PTR_ERR(bio);
Mike Christie df46b9a2005-06-20 14:04:44 +02002556
Jens Axboedd1cab92005-06-20 14:06:01 +02002557 if (rq_data_dir(rq) == WRITE)
2558 bio->bi_rw |= (1 << BIO_RW);
Mike Christie df46b9a2005-06-20 14:04:44 +02002559
Jens Axboedd1cab92005-06-20 14:06:01 +02002560 blk_rq_bio_prep(q, rq, bio);
Mike Christie821de3a2007-05-08 19:12:23 +02002561 blk_queue_bounce(q, &rq->bio);
Jens Axboedd1cab92005-06-20 14:06:01 +02002562 rq->buffer = rq->data = NULL;
Jens Axboedd1cab92005-06-20 14:06:01 +02002563 return 0;
Mike Christie df46b9a2005-06-20 14:04:44 +02002564}
2565
2566EXPORT_SYMBOL(blk_rq_map_kern);
2567
Christoph Hellwig 73747ae2005-06-20 14:21:01 +02002568/**
2569 * blk_execute_rq_nowait - insert a request into queue for execution
2570 * @q: queue to insert the request in
2571 * @bd_disk: matching gendisk
2572 * @rq: request to insert
2573 * @at_head: insert request at head or tail of queue
2574 * @done: I/O completion handler
2575 *
2576 * Description:
2577 * Insert a fully prepared request at the back of the io scheduler queue
2578 * for execution. Don't wait for completion.
2579 */
James Bottomley f1970ba2005-06-20 14:06:52 +02002580void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
2581 struct request *rq, int at_head,
Tejun Heo8ffdc652006-01-06 09:49:03 +01002582 rq_end_io_fn *done)
James Bottomley f1970ba2005-06-20 14:06:52 +02002583{
2584 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
2585
2586 rq->rq_disk = bd_disk;
Jens Axboe4aff5e22006-08-10 08:44:47 +02002587 rq->cmd_flags |= REQ_NOMERGE;
James Bottomley f1970ba2005-06-20 14:06:52 +02002588 rq->end_io = done;
Andrew Morton4c5d0bb2006-03-22 08:08:01 +01002589 WARN_ON(irqs_disabled());
2590 spin_lock_irq(q->queue_lock);
2591 __elv_add_request(q, rq, where, 1);
2592 __generic_unplug_device(q);
2593 spin_unlock_irq(q->queue_lock);
James Bottomley f1970ba2005-06-20 14:06:52 +02002594}
Mike Christie6e39b69e2005-11-11 05:30:24 -06002595EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
2596
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597/**
2598 * blk_execute_rq - insert a request into queue for execution
2599 * @q: queue to insert the request in
2600 * @bd_disk: matching gendisk
2601 * @rq: request to insert
James Bottomley 994ca9a2005-06-20 14:11:09 +02002602 * @at_head: insert request at head or tail of queue
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 *
2604 * Description:
2605 * Insert a fully prepared request at the back of the io scheduler queue
Christoph Hellwig 73747ae2005-06-20 14:21:01 +02002606 * for execution and wait for completion.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 */
2608int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
James Bottomley 994ca9a2005-06-20 14:11:09 +02002609 struct request *rq, int at_head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610{
Ingo Molnar60be6b92006-07-03 00:25:26 -07002611 DECLARE_COMPLETION_ONSTACK(wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 char sense[SCSI_SENSE_BUFFERSIZE];
2613 int err = 0;
2614
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 /*
2616 * we need an extra reference to the request, so we can look at
2617 * it after io completion
2618 */
2619 rq->ref_count++;
2620
2621 if (!rq->sense) {
2622 memset(sense, 0, sizeof(sense));
2623 rq->sense = sense;
2624 rq->sense_len = 0;
2625 }
2626
Jens Axboec00895a2006-09-30 20:29:12 +02002627 rq->end_io_data = &wait;
James Bottomley 994ca9a2005-06-20 14:11:09 +02002628 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 wait_for_completion(&wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630
2631 if (rq->errors)
2632 err = -EIO;
2633
2634 return err;
2635}
2636
2637EXPORT_SYMBOL(blk_execute_rq);
2638
2639/**
2640 * blkdev_issue_flush - queue a flush
2641 * @bdev: blockdev to issue flush for
2642 * @error_sector: error sector
2643 *
2644 * Description:
2645 * Issue a flush for the block device in question. Caller can supply
2646 * room for storing the error offset in case of a flush error, if they
2647 * wish to. Caller must run wait_for_completion() on its own.
2648 */
2649int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2650{
2651 request_queue_t *q;
2652
2653 if (bdev->bd_disk == NULL)
2654 return -ENXIO;
2655
2656 q = bdev_get_queue(bdev);
2657 if (!q)
2658 return -ENXIO;
2659 if (!q->issue_flush_fn)
2660 return -EOPNOTSUPP;
2661
2662 return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
2663}
2664
2665EXPORT_SYMBOL(blkdev_issue_flush);
2666
Adrian Bunk93d17d32005-06-25 14:59:10 -07002667static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668{
2669 int rw = rq_data_dir(rq);
2670
2671 if (!blk_fs_request(rq) || !rq->rq_disk)
2672 return;
2673
Jens Axboed72d9042005-11-01 08:35:42 +01002674 if (!new_io) {
Jens Axboea3623572005-11-01 09:26:16 +01002675 __disk_stat_inc(rq->rq_disk, merges[rw]);
Jens Axboed72d9042005-11-01 08:35:42 +01002676 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 disk_round_stats(rq->rq_disk);
2678 rq->rq_disk->in_flight++;
2679 }
2680}
2681
2682/*
2683 * add-request adds a request to the linked list.
2684 * queue lock is held and interrupts disabled, as we muck with the
2685 * request queue list.
2686 */
2687static inline void add_request(request_queue_t * q, struct request * req)
2688{
2689 drive_stat_acct(req, req->nr_sectors, 1);
2690
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 /*
2692 * elevator indicated where it wants this request to be
2693 * inserted at elevator_merge time
2694 */
2695 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
2696}
2697
2698/*
2699 * disk_round_stats() - Round off the performance stats on a struct
2700 * disk_stats.
2701 *
2702 * The average IO queue length and utilisation statistics are maintained
2703 * by observing the current state of the queue length and the amount of
2704 * time it has been in this state for.
2705 *
2706 * Normally, that accounting is done on IO completion, but that can result
2707 * in more than a second's worth of IO being accounted for within any one
2708 * second, leading to >100% utilisation. To deal with that, we call this
2709 * function to do a round-off before returning the results when reading
2710 * /proc/diskstats. This accounts immediately for all queue usage up to
2711 * the current jiffies and restarts the counters again.
2712 */
2713void disk_round_stats(struct gendisk *disk)
2714{
2715 unsigned long now = jiffies;
2716
Chen, Kenneth Wb2982642005-10-13 21:49:29 +02002717 if (now == disk->stamp)
2718 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719
Chen, Kenneth W20e5c812005-10-13 21:48:42 +02002720 if (disk->in_flight) {
2721 __disk_stat_add(disk, time_in_queue,
2722 disk->in_flight * (now - disk->stamp));
2723 __disk_stat_add(disk, io_ticks, (now - disk->stamp));
2724 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 disk->stamp = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726}
2727
Jun'ichi "Nick" Nomura3eaf8402006-02-01 03:04:53 -08002728EXPORT_SYMBOL_GPL(disk_round_stats);
2729
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730/*
2731 * queue lock must be held
2732 */
Mike Christie6e39b69e2005-11-11 05:30:24 -06002733void __blk_put_request(request_queue_t *q, struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 if (unlikely(!q))
2736 return;
2737 if (unlikely(--req->ref_count))
2738 return;
2739
Tejun Heo8922e162005-10-20 16:23:44 +02002740 elv_completed_request(q, req);
2741
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 /*
2743 * Request may not have originated from ll_rw_blk. if not,
2744 * it didn't come out of our reserved rq pools
2745 */
Jens Axboe49171e52006-08-10 08:59:11 +02002746 if (req->cmd_flags & REQ_ALLOCED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 int rw = rq_data_dir(req);
Jens Axboe4aff5e22006-08-10 08:44:47 +02002748 int priv = req->cmd_flags & REQ_ELVPRIV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750 BUG_ON(!list_empty(&req->queuelist));
Jens Axboe98170642006-07-28 09:23:08 +02002751 BUG_ON(!hlist_unhashed(&req->hash));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752
2753 blk_free_request(q, req);
Tejun Heocb98fc82005-10-28 08:29:39 +02002754 freed_request(q, rw, priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 }
2756}
2757
Mike Christie6e39b69e2005-11-11 05:30:24 -06002758EXPORT_SYMBOL_GPL(__blk_put_request);
2759
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760void blk_put_request(struct request *req)
2761{
Tejun Heo8922e162005-10-20 16:23:44 +02002762 unsigned long flags;
2763 request_queue_t *q = req->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
Tejun Heo8922e162005-10-20 16:23:44 +02002765 /*
2766 * Gee, IDE calls in w/ NULL q. Fix IDE and remove the
2767 * following if (q) test.
2768 */
2769 if (q) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 spin_lock_irqsave(q->queue_lock, flags);
2771 __blk_put_request(q, req);
2772 spin_unlock_irqrestore(q->queue_lock, flags);
2773 }
2774}
2775
2776EXPORT_SYMBOL(blk_put_request);
2777
2778/**
2779 * blk_end_sync_rq - executes a completion event on a request
2780 * @rq: request to complete
Jens Axboefddfdea2006-01-31 15:24:34 +01002781 * @error: end io status of the request
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 */
Tejun Heo8ffdc652006-01-06 09:49:03 +01002783void blk_end_sync_rq(struct request *rq, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784{
Jens Axboec00895a2006-09-30 20:29:12 +02002785 struct completion *waiting = rq->end_io_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786
Jens Axboec00895a2006-09-30 20:29:12 +02002787 rq->end_io_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 __blk_put_request(rq->q, rq);
2789
2790 /*
2791 * complete last, if this is a stack request the process (and thus
2792 * the rq pointer) could be invalid right after this complete()
2793 */
2794 complete(waiting);
2795}
2796EXPORT_SYMBOL(blk_end_sync_rq);
2797
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798/*
2799 * Has to be called with the request spinlock acquired
2800 */
2801static int attempt_merge(request_queue_t *q, struct request *req,
2802 struct request *next)
2803{
2804 if (!rq_mergeable(req) || !rq_mergeable(next))
2805 return 0;
2806
2807 /*
Andreas Mohrd6e05ed2006-06-26 18:35:02 +02002808 * not contiguous
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 */
2810 if (req->sector + req->nr_sectors != next->sector)
2811 return 0;
2812
2813 if (rq_data_dir(req) != rq_data_dir(next)
2814 || req->rq_disk != next->rq_disk
Jens Axboec00895a2006-09-30 20:29:12 +02002815 || next->special)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 return 0;
2817
2818 /*
2819 * If we are allowed to merge, then append bio list
2820 * from next to rq and release next. merge_requests_fn
2821 * will have updated segment counts, update sector
2822 * counts here.
2823 */
Jens Axboe1aa4f242006-12-19 08:33:11 +01002824 if (!ll_merge_requests_fn(q, req, next))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 return 0;
2826
2827 /*
2828 * At this point we have either done a back merge
2829 * or front merge. We need the smaller start_time of
2830 * the merged requests to be the current request
2831 * for accounting purposes.
2832 */
2833 if (time_after(req->start_time, next->start_time))
2834 req->start_time = next->start_time;
2835
2836 req->biotail->bi_next = next->bio;
2837 req->biotail = next->biotail;
2838
2839 req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
2840
2841 elv_merge_requests(q, req, next);
2842
2843 if (req->rq_disk) {
2844 disk_round_stats(req->rq_disk);
2845 req->rq_disk->in_flight--;
2846 }
2847
Jens Axboe22e2c502005-06-27 10:55:12 +02002848 req->ioprio = ioprio_best(req->ioprio, next->ioprio);
2849
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 __blk_put_request(q, next);
2851 return 1;
2852}
2853
2854static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
2855{
2856 struct request *next = elv_latter_request(q, rq);
2857
2858 if (next)
2859 return attempt_merge(q, rq, next);
2860
2861 return 0;
2862}
2863
2864static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
2865{
2866 struct request *prev = elv_former_request(q, rq);
2867
2868 if (prev)
2869 return attempt_merge(q, prev, rq);
2870
2871 return 0;
2872}
2873
Tejun Heo52d9e672006-01-06 09:49:58 +01002874static void init_request_from_bio(struct request *req, struct bio *bio)
2875{
Jens Axboe4aff5e22006-08-10 08:44:47 +02002876 req->cmd_type = REQ_TYPE_FS;
Tejun Heo52d9e672006-01-06 09:49:58 +01002877
2878 /*
2879 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
2880 */
2881 if (bio_rw_ahead(bio) || bio_failfast(bio))
Jens Axboe4aff5e22006-08-10 08:44:47 +02002882 req->cmd_flags |= REQ_FAILFAST;
Tejun Heo52d9e672006-01-06 09:49:58 +01002883
2884 /*
2885 * REQ_BARRIER implies no merging, but lets make it explicit
2886 */
2887 if (unlikely(bio_barrier(bio)))
Jens Axboe4aff5e22006-08-10 08:44:47 +02002888 req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
Tejun Heo52d9e672006-01-06 09:49:58 +01002889
Jens Axboeb31dc662006-06-13 08:26:10 +02002890 if (bio_sync(bio))
Jens Axboe4aff5e22006-08-10 08:44:47 +02002891 req->cmd_flags |= REQ_RW_SYNC;
Jens Axboe5404bc72006-08-10 09:01:02 +02002892 if (bio_rw_meta(bio))
2893 req->cmd_flags |= REQ_RW_META;
Jens Axboeb31dc662006-06-13 08:26:10 +02002894
Tejun Heo52d9e672006-01-06 09:49:58 +01002895 req->errors = 0;
2896 req->hard_sector = req->sector = bio->bi_sector;
2897 req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
2898 req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
2899 req->nr_phys_segments = bio_phys_segments(req->q, bio);
2900 req->nr_hw_segments = bio_hw_segments(req->q, bio);
2901 req->buffer = bio_data(bio); /* see ->buffer comment above */
Tejun Heo52d9e672006-01-06 09:49:58 +01002902 req->bio = req->biotail = bio;
2903 req->ioprio = bio_prio(bio);
2904 req->rq_disk = bio->bi_bdev->bd_disk;
2905 req->start_time = jiffies;
2906}
2907
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908static int __make_request(request_queue_t *q, struct bio *bio)
2909{
Nick Piggin450991b2005-06-28 20:45:13 -07002910 struct request *req;
Jens Axboe51da90f2006-07-18 04:14:45 +02002911 int el_ret, nr_sectors, barrier, err;
2912 const unsigned short prio = bio_prio(bio);
2913 const int sync = bio_sync(bio);
Jens Axboe7749a8d2006-12-13 13:02:26 +01002914 int rw_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 nr_sectors = bio_sectors(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917
2918 /*
2919 * low level driver can indicate that it wants pages above a
2920 * certain limit bounced to low memory (ie for highmem, or even
2921 * ISA dma in theory)
2922 */
2923 blk_queue_bounce(q, &bio);
2924
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 barrier = bio_barrier(bio);
Tejun Heo797e7db2006-01-06 09:51:03 +01002926 if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 err = -EOPNOTSUPP;
2928 goto end_io;
2929 }
2930
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 spin_lock_irq(q->queue_lock);
2932
Nick Piggin450991b2005-06-28 20:45:13 -07002933 if (unlikely(barrier) || elv_queue_empty(q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 goto get_rq;
2935
2936 el_ret = elv_merge(q, &req, bio);
2937 switch (el_ret) {
2938 case ELEVATOR_BACK_MERGE:
2939 BUG_ON(!rq_mergeable(req));
2940
Jens Axboe1aa4f242006-12-19 08:33:11 +01002941 if (!ll_back_merge_fn(q, req, bio))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942 break;
2943
Jens Axboe2056a782006-03-23 20:00:26 +01002944 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
2945
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946 req->biotail->bi_next = bio;
2947 req->biotail = bio;
2948 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
Jens Axboe22e2c502005-06-27 10:55:12 +02002949 req->ioprio = ioprio_best(req->ioprio, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 drive_stat_acct(req, nr_sectors, 0);
2951 if (!attempt_back_merge(q, req))
Jens Axboe2e662b62006-07-13 11:55:04 +02002952 elv_merged_request(q, req, el_ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 goto out;
2954
2955 case ELEVATOR_FRONT_MERGE:
2956 BUG_ON(!rq_mergeable(req));
2957
Jens Axboe1aa4f242006-12-19 08:33:11 +01002958 if (!ll_front_merge_fn(q, req, bio))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 break;
2960
Jens Axboe2056a782006-03-23 20:00:26 +01002961 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
2962
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 bio->bi_next = req->bio;
2964 req->bio = bio;
2965
2966 /*
2967 * may not be valid. if the low level driver said
2968 * it didn't need a bounce buffer then it better
2969 * not touch req->buffer either...
2970 */
2971 req->buffer = bio_data(bio);
Jens Axboe51da90f2006-07-18 04:14:45 +02002972 req->current_nr_sectors = bio_cur_sectors(bio);
2973 req->hard_cur_sectors = req->current_nr_sectors;
2974 req->sector = req->hard_sector = bio->bi_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
Jens Axboe22e2c502005-06-27 10:55:12 +02002976 req->ioprio = ioprio_best(req->ioprio, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977 drive_stat_acct(req, nr_sectors, 0);
2978 if (!attempt_front_merge(q, req))
Jens Axboe2e662b62006-07-13 11:55:04 +02002979 elv_merged_request(q, req, el_ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 goto out;
2981
Nick Piggin450991b2005-06-28 20:45:13 -07002982 /* ELV_NO_MERGE: elevator says don't/can't merge. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 default:
Nick Piggin450991b2005-06-28 20:45:13 -07002984 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 }
2986
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987get_rq:
Nick Piggin450991b2005-06-28 20:45:13 -07002988 /*
Jens Axboe7749a8d2006-12-13 13:02:26 +01002989 * This sync check and mask will be re-done in init_request_from_bio(),
2990 * but we need to set it earlier to expose the sync flag to the
2991 * rq allocator and io schedulers.
2992 */
2993 rw_flags = bio_data_dir(bio);
2994 if (sync)
2995 rw_flags |= REQ_RW_SYNC;
2996
2997 /*
Nick Piggin450991b2005-06-28 20:45:13 -07002998 * Grab a free request. This is might sleep but can not fail.
Nick Piggind6344532005-06-28 20:45:14 -07002999 * Returns with the queue unlocked.
Nick Piggin450991b2005-06-28 20:45:13 -07003000 */
Jens Axboe7749a8d2006-12-13 13:02:26 +01003001 req = get_request_wait(q, rw_flags, bio);
Nick Piggind6344532005-06-28 20:45:14 -07003002
Nick Piggin450991b2005-06-28 20:45:13 -07003003 /*
3004 * After dropping the lock and possibly sleeping here, our request
3005 * may now be mergeable after it had proven unmergeable (above).
3006 * We don't worry about that case for efficiency. It won't happen
3007 * often, and the elevators are able to handle it.
3008 */
Tejun Heo52d9e672006-01-06 09:49:58 +01003009 init_request_from_bio(req, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010
Nick Piggin450991b2005-06-28 20:45:13 -07003011 spin_lock_irq(q->queue_lock);
3012 if (elv_queue_empty(q))
3013 blk_plug_device(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014 add_request(q, req);
3015out:
Jens Axboe4a534f92005-04-16 15:25:40 -07003016 if (sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 __generic_unplug_device(q);
3018
3019 spin_unlock_irq(q->queue_lock);
3020 return 0;
3021
3022end_io:
3023 bio_endio(bio, nr_sectors << 9, err);
3024 return 0;
3025}
3026
3027/*
3028 * If bio->bi_dev is a partition, remap the location
3029 */
3030static inline void blk_partition_remap(struct bio *bio)
3031{
3032 struct block_device *bdev = bio->bi_bdev;
3033
3034 if (bdev != bdev->bd_contains) {
3035 struct hd_struct *p = bdev->bd_part;
Jens Axboea3623572005-11-01 09:26:16 +01003036 const int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037
Jens Axboea3623572005-11-01 09:26:16 +01003038 p->sectors[rw] += bio_sectors(bio);
3039 p->ios[rw]++;
3040
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 bio->bi_sector += p->start_sect;
3042 bio->bi_bdev = bdev->bd_contains;
3043 }
3044}
3045
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046static void handle_bad_sector(struct bio *bio)
3047{
3048 char b[BDEVNAME_SIZE];
3049
3050 printk(KERN_INFO "attempt to access beyond end of device\n");
3051 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
3052 bdevname(bio->bi_bdev, b),
3053 bio->bi_rw,
3054 (unsigned long long)bio->bi_sector + bio_sectors(bio),
3055 (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
3056
3057 set_bit(BIO_EOF, &bio->bi_flags);
3058}
3059
Akinobu Mitac17bb492006-12-08 02:39:46 -08003060#ifdef CONFIG_FAIL_MAKE_REQUEST
3061
3062static DECLARE_FAULT_ATTR(fail_make_request);
3063
3064static int __init setup_fail_make_request(char *str)
3065{
3066 return setup_fault_attr(&fail_make_request, str);
3067}
3068__setup("fail_make_request=", setup_fail_make_request);
3069
3070static int should_fail_request(struct bio *bio)
3071{
3072 if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
3073 (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
3074 return should_fail(&fail_make_request, bio->bi_size);
3075
3076 return 0;
3077}
3078
3079static int __init fail_make_request_debugfs(void)
3080{
3081 return init_fault_attr_dentries(&fail_make_request,
3082 "fail_make_request");
3083}
3084
3085late_initcall(fail_make_request_debugfs);
3086
3087#else /* CONFIG_FAIL_MAKE_REQUEST */
3088
3089static inline int should_fail_request(struct bio *bio)
3090{
3091 return 0;
3092}
3093
3094#endif /* CONFIG_FAIL_MAKE_REQUEST */
3095
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096/**
3097 * generic_make_request: hand a buffer to its device driver for I/O
3098 * @bio: The bio describing the location in memory and on the device.
3099 *
3100 * generic_make_request() is used to make I/O requests of block
3101 * devices. It is passed a &struct bio, which describes the I/O that needs
3102 * to be done.
3103 *
3104 * generic_make_request() does not return any status. The
3105 * success/failure status of the request, along with notification of
3106 * completion, is delivered asynchronously through the bio->bi_end_io
3107 * function described (one day) else where.
3108 *
3109 * The caller of generic_make_request must make sure that bi_io_vec
3110 * are set to describe the memory buffer, and that bi_dev and bi_sector are
3111 * set to describe the device address, and the
3112 * bi_end_io and optionally bi_private are set to describe how
3113 * completion notification should be signaled.
3114 *
3115 * generic_make_request and the drivers it calls may use bi_next if this
3116 * bio happens to be merged with someone else, and may change bi_dev and
3117 * bi_sector for remaps as it sees fit. So the values of these fields
3118 * should NOT be depended on after the call to generic_make_request.
3119 */
3120void generic_make_request(struct bio *bio)
3121{
3122 request_queue_t *q;
3123 sector_t maxsector;
NeilBrown5ddfe962006-10-30 22:07:21 -08003124 sector_t old_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 int ret, nr_sectors = bio_sectors(bio);
Jens Axboe2056a782006-03-23 20:00:26 +01003126 dev_t old_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127
3128 might_sleep();
3129 /* Test device or partition size, when known. */
3130 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
3131 if (maxsector) {
3132 sector_t sector = bio->bi_sector;
3133
3134 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
3135 /*
3136 * This may well happen - the kernel calls bread()
3137 * without checking the size of the device, e.g., when
3138 * mounting a device.
3139 */
3140 handle_bad_sector(bio);
3141 goto end_io;
3142 }
3143 }
3144
3145 /*
3146 * Resolve the mapping until finished. (drivers are
3147 * still free to implement/resolve their own stacking
3148 * by explicitly returning 0)
3149 *
3150 * NOTE: we don't repeat the blk_size check for each new device.
3151 * Stacking drivers are expected to know what they are doing.
3152 */
NeilBrown5ddfe962006-10-30 22:07:21 -08003153 old_sector = -1;
Jens Axboe2056a782006-03-23 20:00:26 +01003154 old_dev = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 do {
3156 char b[BDEVNAME_SIZE];
3157
3158 q = bdev_get_queue(bio->bi_bdev);
3159 if (!q) {
3160 printk(KERN_ERR
3161 "generic_make_request: Trying to access "
3162 "nonexistent block-device %s (%Lu)\n",
3163 bdevname(bio->bi_bdev, b),
3164 (long long) bio->bi_sector);
3165end_io:
3166 bio_endio(bio, bio->bi_size, -EIO);
3167 break;
3168 }
3169
3170 if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
3171 printk("bio too big device %s (%u > %u)\n",
3172 bdevname(bio->bi_bdev, b),
3173 bio_sectors(bio),
3174 q->max_hw_sectors);
3175 goto end_io;
3176 }
3177
Nick Pigginfde6ad22005-06-23 00:08:53 -07003178 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179 goto end_io;
3180
Akinobu Mitac17bb492006-12-08 02:39:46 -08003181 if (should_fail_request(bio))
3182 goto end_io;
3183
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184 /*
3185 * If this device has partitions, remap block n
3186 * of partition p to block n+start(p) of the disk.
3187 */
3188 blk_partition_remap(bio);
3189
NeilBrown5ddfe962006-10-30 22:07:21 -08003190 if (old_sector != -1)
Jens Axboe2056a782006-03-23 20:00:26 +01003191 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
NeilBrown5ddfe962006-10-30 22:07:21 -08003192 old_sector);
Jens Axboe2056a782006-03-23 20:00:26 +01003193
3194 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
3195
NeilBrown5ddfe962006-10-30 22:07:21 -08003196 old_sector = bio->bi_sector;
Jens Axboe2056a782006-03-23 20:00:26 +01003197 old_dev = bio->bi_bdev->bd_dev;
3198
NeilBrown5ddfe962006-10-30 22:07:21 -08003199 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
3200 if (maxsector) {
3201 sector_t sector = bio->bi_sector;
3202
Andrew Mortondf66b852006-11-02 22:06:56 -08003203 if (maxsector < nr_sectors ||
3204 maxsector - nr_sectors < sector) {
NeilBrown5ddfe962006-10-30 22:07:21 -08003205 /*
Andrew Mortondf66b852006-11-02 22:06:56 -08003206 * This may well happen - partitions are not
3207 * checked to make sure they are within the size
3208 * of the whole device.
NeilBrown5ddfe962006-10-30 22:07:21 -08003209 */
3210 handle_bad_sector(bio);
3211 goto end_io;
3212 }
3213 }
3214
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 ret = q->make_request_fn(q, bio);
3216 } while (ret);
3217}
3218
3219EXPORT_SYMBOL(generic_make_request);
3220
3221/**
3222 * submit_bio: submit a bio to the block device layer for I/O
3223 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
3224 * @bio: The &struct bio which describes the I/O
3225 *
3226 * submit_bio() is very similar in purpose to generic_make_request(), and
3227 * uses that function to do most of the work. Both are fairly rough
3228 * interfaces, @bio must be presetup and ready for I/O.
3229 *
3230 */
3231void submit_bio(int rw, struct bio *bio)
3232{
3233 int count = bio_sectors(bio);
3234
3235 BIO_BUG_ON(!bio->bi_size);
3236 BIO_BUG_ON(!bio->bi_io_vec);
Jens Axboe22e2c502005-06-27 10:55:12 +02003237 bio->bi_rw |= rw;
Andrew Mortonfaccbd4b2006-12-10 02:19:35 -08003238 if (rw & WRITE) {
Christoph Lameterf8891e52006-06-30 01:55:45 -07003239 count_vm_events(PGPGOUT, count);
Andrew Mortonfaccbd4b2006-12-10 02:19:35 -08003240 } else {
3241 task_io_account_read(bio->bi_size);
Christoph Lameterf8891e52006-06-30 01:55:45 -07003242 count_vm_events(PGPGIN, count);
Andrew Mortonfaccbd4b2006-12-10 02:19:35 -08003243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244
3245 if (unlikely(block_dump)) {
3246 char b[BDEVNAME_SIZE];
3247 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
3248 current->comm, current->pid,
3249 (rw & WRITE) ? "WRITE" : "READ",
3250 (unsigned long long)bio->bi_sector,
3251 bdevname(bio->bi_bdev,b));
3252 }
3253
3254 generic_make_request(bio);
3255}
3256
3257EXPORT_SYMBOL(submit_bio);
3258
Adrian Bunk93d17d32005-06-25 14:59:10 -07003259static void blk_recalc_rq_segments(struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260{
3261 struct bio *bio, *prevbio = NULL;
3262 int nr_phys_segs, nr_hw_segs;
3263 unsigned int phys_size, hw_size;
3264 request_queue_t *q = rq->q;
3265
3266 if (!rq->bio)
3267 return;
3268
3269 phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
3270 rq_for_each_bio(bio, rq) {
3271 /* Force bio hw/phys segs to be recalculated. */
3272 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
3273
3274 nr_phys_segs += bio_phys_segments(q, bio);
3275 nr_hw_segs += bio_hw_segments(q, bio);
3276 if (prevbio) {
3277 int pseg = phys_size + prevbio->bi_size + bio->bi_size;
3278 int hseg = hw_size + prevbio->bi_size + bio->bi_size;
3279
3280 if (blk_phys_contig_segment(q, prevbio, bio) &&
3281 pseg <= q->max_segment_size) {
3282 nr_phys_segs--;
3283 phys_size += prevbio->bi_size + bio->bi_size;
3284 } else
3285 phys_size = 0;
3286
3287 if (blk_hw_contig_segment(q, prevbio, bio) &&
3288 hseg <= q->max_segment_size) {
3289 nr_hw_segs--;
3290 hw_size += prevbio->bi_size + bio->bi_size;
3291 } else
3292 hw_size = 0;
3293 }
3294 prevbio = bio;
3295 }
3296
3297 rq->nr_phys_segments = nr_phys_segs;
3298 rq->nr_hw_segments = nr_hw_segs;
3299}
3300
Adrian Bunk93d17d32005-06-25 14:59:10 -07003301static void blk_recalc_rq_sectors(struct request *rq, int nsect)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302{
3303 if (blk_fs_request(rq)) {
3304 rq->hard_sector += nsect;
3305 rq->hard_nr_sectors -= nsect;
3306
3307 /*
3308 * Move the I/O submission pointers ahead if required.
3309 */
3310 if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
3311 (rq->sector <= rq->hard_sector)) {
3312 rq->sector = rq->hard_sector;
3313 rq->nr_sectors = rq->hard_nr_sectors;
3314 rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
3315 rq->current_nr_sectors = rq->hard_cur_sectors;
3316 rq->buffer = bio_data(rq->bio);
3317 }
3318
3319 /*
3320 * if total number of sectors is less than the first segment
3321 * size, something has gone terribly wrong
3322 */
3323 if (rq->nr_sectors < rq->current_nr_sectors) {
3324 printk("blk: request botched\n");
3325 rq->nr_sectors = rq->current_nr_sectors;
3326 }
3327 }
3328}
3329
3330static int __end_that_request_first(struct request *req, int uptodate,
3331 int nr_bytes)
3332{
3333 int total_bytes, bio_nbytes, error, next_idx = 0;
3334 struct bio *bio;
3335
Jens Axboe2056a782006-03-23 20:00:26 +01003336 blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
3337
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338 /*
3339 * extend uptodate bool to allow < 0 value to be direct io error
3340 */
3341 error = 0;
3342 if (end_io_error(uptodate))
3343 error = !uptodate ? -EIO : uptodate;
3344
3345 /*
3346 * for a REQ_BLOCK_PC request, we want to carry any eventual
3347 * sense key with us all the way through
3348 */
3349 if (!blk_pc_request(req))
3350 req->errors = 0;
3351
3352 if (!uptodate) {
Jens Axboe4aff5e22006-08-10 08:44:47 +02003353 if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 printk("end_request: I/O error, dev %s, sector %llu\n",
3355 req->rq_disk ? req->rq_disk->disk_name : "?",
3356 (unsigned long long)req->sector);
3357 }
3358
Jens Axboed72d9042005-11-01 08:35:42 +01003359 if (blk_fs_request(req) && req->rq_disk) {
Jens Axboea3623572005-11-01 09:26:16 +01003360 const int rw = rq_data_dir(req);
3361
Jens Axboe53e86062006-01-17 11:09:27 +01003362 disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
Jens Axboed72d9042005-11-01 08:35:42 +01003363 }
3364
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 total_bytes = bio_nbytes = 0;
3366 while ((bio = req->bio) != NULL) {
3367 int nbytes;
3368
3369 if (nr_bytes >= bio->bi_size) {
3370 req->bio = bio->bi_next;
3371 nbytes = bio->bi_size;
Tejun Heo797e7db2006-01-06 09:51:03 +01003372 if (!ordered_bio_endio(req, bio, nbytes, error))
3373 bio_endio(bio, nbytes, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 next_idx = 0;
3375 bio_nbytes = 0;
3376 } else {
3377 int idx = bio->bi_idx + next_idx;
3378
3379 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
3380 blk_dump_rq_flags(req, "__end_that");
3381 printk("%s: bio idx %d >= vcnt %d\n",
3382 __FUNCTION__,
3383 bio->bi_idx, bio->bi_vcnt);
3384 break;
3385 }
3386
3387 nbytes = bio_iovec_idx(bio, idx)->bv_len;
3388 BIO_BUG_ON(nbytes > bio->bi_size);
3389
3390 /*
3391 * not a complete bvec done
3392 */
3393 if (unlikely(nbytes > nr_bytes)) {
3394 bio_nbytes += nr_bytes;
3395 total_bytes += nr_bytes;
3396 break;
3397 }
3398
3399 /*
3400 * advance to the next vector
3401 */
3402 next_idx++;
3403 bio_nbytes += nbytes;
3404 }
3405
3406 total_bytes += nbytes;
3407 nr_bytes -= nbytes;
3408
3409 if ((bio = req->bio)) {
3410 /*
3411 * end more in this run, or just return 'not-done'
3412 */
3413 if (unlikely(nr_bytes <= 0))
3414 break;
3415 }
3416 }
3417
3418 /*
3419 * completely done
3420 */
3421 if (!req->bio)
3422 return 0;
3423
3424 /*
3425 * if the request wasn't completed, update state
3426 */
3427 if (bio_nbytes) {
Tejun Heo797e7db2006-01-06 09:51:03 +01003428 if (!ordered_bio_endio(req, bio, bio_nbytes, error))
3429 bio_endio(bio, bio_nbytes, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430 bio->bi_idx += next_idx;
3431 bio_iovec(bio)->bv_offset += nr_bytes;
3432 bio_iovec(bio)->bv_len -= nr_bytes;
3433 }
3434
3435 blk_recalc_rq_sectors(req, total_bytes >> 9);
3436 blk_recalc_rq_segments(req);
3437 return 1;
3438}
3439
3440/**
3441 * end_that_request_first - end I/O on a request
3442 * @req: the request being processed
3443 * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
3444 * @nr_sectors: number of sectors to end I/O on
3445 *
3446 * Description:
3447 * Ends I/O on a number of sectors attached to @req, and sets it up
3448 * for the next range of segments (if any) in the cluster.
3449 *
3450 * Return:
3451 * 0 - we are done with this request, call end_that_request_last()
3452 * 1 - still buffers pending for this request
3453 **/
3454int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
3455{
3456 return __end_that_request_first(req, uptodate, nr_sectors << 9);
3457}
3458
3459EXPORT_SYMBOL(end_that_request_first);
3460
3461/**
3462 * end_that_request_chunk - end I/O on a request
3463 * @req: the request being processed
3464 * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
3465 * @nr_bytes: number of bytes to complete
3466 *
3467 * Description:
3468 * Ends I/O on a number of bytes attached to @req, and sets it up
3469 * for the next range of segments (if any). Like end_that_request_first(),
3470 * but deals with bytes instead of sectors.
3471 *
3472 * Return:
3473 * 0 - we are done with this request, call end_that_request_last()
3474 * 1 - still buffers pending for this request
3475 **/
3476int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
3477{
3478 return __end_that_request_first(req, uptodate, nr_bytes);
3479}
3480
3481EXPORT_SYMBOL(end_that_request_chunk);
3482
3483/*
Jens Axboeff856ba2006-01-09 16:02:34 +01003484 * splice the completion data to a local structure and hand off to
3485 * process_completion_queue() to complete the requests
3486 */
3487static void blk_done_softirq(struct softirq_action *h)
3488{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07003489 struct list_head *cpu_list, local_list;
Jens Axboeff856ba2006-01-09 16:02:34 +01003490
3491 local_irq_disable();
3492 cpu_list = &__get_cpu_var(blk_cpu_done);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07003493 list_replace_init(cpu_list, &local_list);
Jens Axboeff856ba2006-01-09 16:02:34 +01003494 local_irq_enable();
3495
3496 while (!list_empty(&local_list)) {
3497 struct request *rq = list_entry(local_list.next, struct request, donelist);
3498
3499 list_del_init(&rq->donelist);
3500 rq->q->softirq_done_fn(rq);
3501 }
3502}
3503
Jens Axboeff856ba2006-01-09 16:02:34 +01003504static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
3505 void *hcpu)
3506{
3507 /*
3508 * If a CPU goes away, splice its entries to the current CPU
3509 * and trigger a run of the softirq
3510 */
3511 if (action == CPU_DEAD) {
3512 int cpu = (unsigned long) hcpu;
3513
3514 local_irq_disable();
3515 list_splice_init(&per_cpu(blk_cpu_done, cpu),
3516 &__get_cpu_var(blk_cpu_done));
3517 raise_softirq_irqoff(BLOCK_SOFTIRQ);
3518 local_irq_enable();
3519 }
3520
3521 return NOTIFY_OK;
3522}
3523
3524
Chandra Seetharaman054cc8a2006-06-27 02:54:07 -07003525static struct notifier_block __devinitdata blk_cpu_notifier = {
Jens Axboeff856ba2006-01-09 16:02:34 +01003526 .notifier_call = blk_cpu_notify,
3527};
3528
Jens Axboeff856ba2006-01-09 16:02:34 +01003529/**
3530 * blk_complete_request - end I/O on a request
3531 * @req: the request being processed
3532 *
3533 * Description:
3534 * Ends all I/O on a request. It does not handle partial completions,
Andreas Mohrd6e05ed2006-06-26 18:35:02 +02003535 * unless the driver actually implements this in its completion callback
Jens Axboeff856ba2006-01-09 16:02:34 +01003536 * through requeueing. Theh actual completion happens out-of-order,
3537 * through a softirq handler. The user must have registered a completion
3538 * callback through blk_queue_softirq_done().
3539 **/
3540
3541void blk_complete_request(struct request *req)
3542{
3543 struct list_head *cpu_list;
3544 unsigned long flags;
3545
3546 BUG_ON(!req->q->softirq_done_fn);
3547
3548 local_irq_save(flags);
3549
3550 cpu_list = &__get_cpu_var(blk_cpu_done);
3551 list_add_tail(&req->donelist, cpu_list);
3552 raise_softirq_irqoff(BLOCK_SOFTIRQ);
3553
3554 local_irq_restore(flags);
3555}
3556
3557EXPORT_SYMBOL(blk_complete_request);
3558
3559/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560 * queue lock must be held
3561 */
Tejun Heo8ffdc652006-01-06 09:49:03 +01003562void end_that_request_last(struct request *req, int uptodate)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563{
3564 struct gendisk *disk = req->rq_disk;
Tejun Heo8ffdc652006-01-06 09:49:03 +01003565 int error;
3566
3567 /*
3568 * extend uptodate bool to allow < 0 value to be direct io error
3569 */
3570 error = 0;
3571 if (end_io_error(uptodate))
3572 error = !uptodate ? -EIO : uptodate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573
3574 if (unlikely(laptop_mode) && blk_fs_request(req))
3575 laptop_io_completion();
3576
Jens Axboefd0ff8a2006-05-23 11:23:49 +02003577 /*
3578 * Account IO completion. bar_rq isn't accounted as a normal
3579 * IO on queueing nor completion. Accounting the containing
3580 * request is enough.
3581 */
3582 if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583 unsigned long duration = jiffies - req->start_time;
Jens Axboea3623572005-11-01 09:26:16 +01003584 const int rw = rq_data_dir(req);
3585
3586 __disk_stat_inc(disk, ios[rw]);
3587 __disk_stat_add(disk, ticks[rw], duration);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588 disk_round_stats(disk);
3589 disk->in_flight--;
3590 }
3591 if (req->end_io)
Tejun Heo8ffdc652006-01-06 09:49:03 +01003592 req->end_io(req, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593 else
3594 __blk_put_request(req->q, req);
3595}
3596
3597EXPORT_SYMBOL(end_that_request_last);
3598
3599void end_request(struct request *req, int uptodate)
3600{
3601 if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
3602 add_disk_randomness(req->rq_disk);
3603 blkdev_dequeue_request(req);
Tejun Heo8ffdc652006-01-06 09:49:03 +01003604 end_that_request_last(req, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605 }
3606}
3607
3608EXPORT_SYMBOL(end_request);
3609
3610void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
3611{
Jens Axboe4aff5e22006-08-10 08:44:47 +02003612 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
3613 rq->cmd_flags |= (bio->bi_rw & 3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614
3615 rq->nr_phys_segments = bio_phys_segments(q, bio);
3616 rq->nr_hw_segments = bio_hw_segments(q, bio);
3617 rq->current_nr_sectors = bio_cur_sectors(bio);
3618 rq->hard_cur_sectors = rq->current_nr_sectors;
3619 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
3620 rq->buffer = bio_data(bio);
Mike Christie0e75f902006-12-01 10:40:55 +01003621 rq->data_len = bio->bi_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622
3623 rq->bio = rq->biotail = bio;
3624}
3625
3626EXPORT_SYMBOL(blk_rq_bio_prep);
3627
3628int kblockd_schedule_work(struct work_struct *work)
3629{
3630 return queue_work(kblockd_workqueue, work);
3631}
3632
3633EXPORT_SYMBOL(kblockd_schedule_work);
3634
3635void kblockd_flush(void)
3636{
3637 flush_workqueue(kblockd_workqueue);
3638}
3639EXPORT_SYMBOL(kblockd_flush);
3640
3641int __init blk_dev_init(void)
3642{
Jens Axboeff856ba2006-01-09 16:02:34 +01003643 int i;
3644
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 kblockd_workqueue = create_workqueue("kblockd");
3646 if (!kblockd_workqueue)
3647 panic("Failed to create kblockd\n");
3648
3649 request_cachep = kmem_cache_create("blkdev_requests",
3650 sizeof(struct request), 0, SLAB_PANIC, NULL, NULL);
3651
3652 requestq_cachep = kmem_cache_create("blkdev_queue",
3653 sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL);
3654
3655 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3656 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3657
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -08003658 for_each_possible_cpu(i)
Jens Axboeff856ba2006-01-09 16:02:34 +01003659 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3660
3661 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
Chandra Seetharaman5a67e4c2006-06-27 02:54:11 -07003662 register_hotcpu_notifier(&blk_cpu_notifier);
Jens Axboeff856ba2006-01-09 16:02:34 +01003663
Vasily Tarasovf772b3d2007-03-27 08:52:47 +02003664 blk_max_low_pfn = max_low_pfn - 1;
3665 blk_max_pfn = max_pfn - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666
3667 return 0;
3668}
3669
3670/*
3671 * IO Context helper functions
3672 */
3673void put_io_context(struct io_context *ioc)
3674{
3675 if (ioc == NULL)
3676 return;
3677
3678 BUG_ON(atomic_read(&ioc->refcount) == 0);
3679
3680 if (atomic_dec_and_test(&ioc->refcount)) {
Jens Axboee2d74ac2006-03-28 08:59:01 +02003681 struct cfq_io_context *cic;
3682
Al Viro334e94d2006-03-18 15:05:53 -05003683 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 if (ioc->aic && ioc->aic->dtor)
3685 ioc->aic->dtor(ioc->aic);
Jens Axboee2d74ac2006-03-28 08:59:01 +02003686 if (ioc->cic_root.rb_node != NULL) {
Jens Axboe7143dd42006-03-28 09:00:28 +02003687 struct rb_node *n = rb_first(&ioc->cic_root);
3688
3689 cic = rb_entry(n, struct cfq_io_context, rb_node);
Jens Axboee2d74ac2006-03-28 08:59:01 +02003690 cic->dtor(ioc);
3691 }
Al Viro334e94d2006-03-18 15:05:53 -05003692 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693
3694 kmem_cache_free(iocontext_cachep, ioc);
3695 }
3696}
3697EXPORT_SYMBOL(put_io_context);
3698
3699/* Called by the exitting task */
3700void exit_io_context(void)
3701{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702 struct io_context *ioc;
Jens Axboee2d74ac2006-03-28 08:59:01 +02003703 struct cfq_io_context *cic;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704
Jens Axboe22e2c502005-06-27 10:55:12 +02003705 task_lock(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706 ioc = current->io_context;
3707 current->io_context = NULL;
Jens Axboe22e2c502005-06-27 10:55:12 +02003708 task_unlock(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709
Oleg Nesterov25034d72006-08-29 09:15:14 +02003710 ioc->task = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 if (ioc->aic && ioc->aic->exit)
3712 ioc->aic->exit(ioc->aic);
Jens Axboee2d74ac2006-03-28 08:59:01 +02003713 if (ioc->cic_root.rb_node != NULL) {
3714 cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node);
3715 cic->exit(ioc);
3716 }
Oleg Nesterov25034d72006-08-29 09:15:14 +02003717
Linus Torvalds1da177e2005-04-16 15:20:36 -07003718 put_io_context(ioc);
3719}
3720
3721/*
3722 * If the current task has no IO context then create one and initialise it.
Nick Pigginfb3cc432005-06-28 20:45:15 -07003723 * Otherwise, return its existing IO context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724 *
Nick Pigginfb3cc432005-06-28 20:45:15 -07003725 * This returned IO context doesn't have a specifically elevated refcount,
3726 * but since the current task itself holds a reference, the context can be
3727 * used in general code, so long as it stays within `current` context.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728 */
Jens Axboeb5deef92006-07-19 23:39:40 +02003729static struct io_context *current_io_context(gfp_t gfp_flags, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730{
3731 struct task_struct *tsk = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732 struct io_context *ret;
3733
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734 ret = tsk->io_context;
Nick Pigginfb3cc432005-06-28 20:45:15 -07003735 if (likely(ret))
3736 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737
Jens Axboeb5deef92006-07-19 23:39:40 +02003738 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739 if (ret) {
3740 atomic_set(&ret->refcount, 1);
Jens Axboe22e2c502005-06-27 10:55:12 +02003741 ret->task = current;
Jens Axboefc463792006-08-29 09:05:44 +02003742 ret->ioprio_changed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743 ret->last_waited = jiffies; /* doesn't matter... */
3744 ret->nr_batch_requests = 0; /* because this is 0 */
3745 ret->aic = NULL;
Jens Axboee2d74ac2006-03-28 08:59:01 +02003746 ret->cic_root.rb_node = NULL;
Jens Axboe4e521c22007-04-24 21:17:33 +02003747 ret->ioc_data = NULL;
Oleg Nesterov9f83e452006-08-21 08:34:15 +02003748 /* make sure set_task_ioprio() sees the settings above */
3749 smp_wmb();
Nick Pigginfb3cc432005-06-28 20:45:15 -07003750 tsk->io_context = ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 }
3752
3753 return ret;
3754}
Nick Pigginfb3cc432005-06-28 20:45:15 -07003755EXPORT_SYMBOL(current_io_context);
3756
3757/*
3758 * If the current task has no IO context then create one and initialise it.
3759 * If it does have a context, take a ref on it.
3760 *
3761 * This is always called in the context of the task which submitted the I/O.
3762 */
Jens Axboeb5deef92006-07-19 23:39:40 +02003763struct io_context *get_io_context(gfp_t gfp_flags, int node)
Nick Pigginfb3cc432005-06-28 20:45:15 -07003764{
3765 struct io_context *ret;
Jens Axboeb5deef92006-07-19 23:39:40 +02003766 ret = current_io_context(gfp_flags, node);
Nick Pigginfb3cc432005-06-28 20:45:15 -07003767 if (likely(ret))
3768 atomic_inc(&ret->refcount);
3769 return ret;
3770}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771EXPORT_SYMBOL(get_io_context);
3772
3773void copy_io_context(struct io_context **pdst, struct io_context **psrc)
3774{
3775 struct io_context *src = *psrc;
3776 struct io_context *dst = *pdst;
3777
3778 if (src) {
3779 BUG_ON(atomic_read(&src->refcount) == 0);
3780 atomic_inc(&src->refcount);
3781 put_io_context(dst);
3782 *pdst = src;
3783 }
3784}
3785EXPORT_SYMBOL(copy_io_context);
3786
3787void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
3788{
3789 struct io_context *temp;
3790 temp = *ioc1;
3791 *ioc1 = *ioc2;
3792 *ioc2 = temp;
3793}
3794EXPORT_SYMBOL(swap_io_context);
3795
3796/*
3797 * sysfs parts below
3798 */
3799struct queue_sysfs_entry {
3800 struct attribute attr;
3801 ssize_t (*show)(struct request_queue *, char *);
3802 ssize_t (*store)(struct request_queue *, const char *, size_t);
3803};
3804
3805static ssize_t
3806queue_var_show(unsigned int var, char *page)
3807{
3808 return sprintf(page, "%d\n", var);
3809}
3810
3811static ssize_t
3812queue_var_store(unsigned long *var, const char *page, size_t count)
3813{
3814 char *p = (char *) page;
3815
3816 *var = simple_strtoul(p, &p, 10);
3817 return count;
3818}
3819
3820static ssize_t queue_requests_show(struct request_queue *q, char *page)
3821{
3822 return queue_var_show(q->nr_requests, (page));
3823}
3824
3825static ssize_t
3826queue_requests_store(struct request_queue *q, const char *page, size_t count)
3827{
3828 struct request_list *rl = &q->rq;
Al Viroc981ff92006-03-18 13:51:29 -05003829 unsigned long nr;
3830 int ret = queue_var_store(&nr, page, count);
3831 if (nr < BLKDEV_MIN_RQ)
3832 nr = BLKDEV_MIN_RQ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833
Al Viroc981ff92006-03-18 13:51:29 -05003834 spin_lock_irq(q->queue_lock);
3835 q->nr_requests = nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836 blk_queue_congestion_threshold(q);
3837
3838 if (rl->count[READ] >= queue_congestion_on_threshold(q))
Thomas Maier79e2de42006-10-19 23:28:15 -07003839 blk_set_queue_congested(q, READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840 else if (rl->count[READ] < queue_congestion_off_threshold(q))
Thomas Maier79e2de42006-10-19 23:28:15 -07003841 blk_clear_queue_congested(q, READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842
3843 if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
Thomas Maier79e2de42006-10-19 23:28:15 -07003844 blk_set_queue_congested(q, WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845 else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
Thomas Maier79e2de42006-10-19 23:28:15 -07003846 blk_clear_queue_congested(q, WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847
3848 if (rl->count[READ] >= q->nr_requests) {
3849 blk_set_queue_full(q, READ);
3850 } else if (rl->count[READ]+1 <= q->nr_requests) {
3851 blk_clear_queue_full(q, READ);
3852 wake_up(&rl->wait[READ]);
3853 }
3854
3855 if (rl->count[WRITE] >= q->nr_requests) {
3856 blk_set_queue_full(q, WRITE);
3857 } else if (rl->count[WRITE]+1 <= q->nr_requests) {
3858 blk_clear_queue_full(q, WRITE);
3859 wake_up(&rl->wait[WRITE]);
3860 }
Al Viroc981ff92006-03-18 13:51:29 -05003861 spin_unlock_irq(q->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862 return ret;
3863}
3864
3865static ssize_t queue_ra_show(struct request_queue *q, char *page)
3866{
3867 int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
3868
3869 return queue_var_show(ra_kb, (page));
3870}
3871
3872static ssize_t
3873queue_ra_store(struct request_queue *q, const char *page, size_t count)
3874{
3875 unsigned long ra_kb;
3876 ssize_t ret = queue_var_store(&ra_kb, page, count);
3877
3878 spin_lock_irq(q->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
3880 spin_unlock_irq(q->queue_lock);
3881
3882 return ret;
3883}
3884
3885static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
3886{
3887 int max_sectors_kb = q->max_sectors >> 1;
3888
3889 return queue_var_show(max_sectors_kb, (page));
3890}
3891
3892static ssize_t
3893queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
3894{
3895 unsigned long max_sectors_kb,
3896 max_hw_sectors_kb = q->max_hw_sectors >> 1,
3897 page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
3898 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
3899 int ra_kb;
3900
3901 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
3902 return -EINVAL;
3903 /*
3904 * Take the queue lock to update the readahead and max_sectors
3905 * values synchronously:
3906 */
3907 spin_lock_irq(q->queue_lock);
3908 /*
3909 * Trim readahead window as well, if necessary:
3910 */
3911 ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
3912 if (ra_kb > max_sectors_kb)
3913 q->backing_dev_info.ra_pages =
3914 max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
3915
3916 q->max_sectors = max_sectors_kb << 1;
3917 spin_unlock_irq(q->queue_lock);
3918
3919 return ret;
3920}
3921
3922static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
3923{
3924 int max_hw_sectors_kb = q->max_hw_sectors >> 1;
3925
3926 return queue_var_show(max_hw_sectors_kb, (page));
3927}
3928
3929
3930static struct queue_sysfs_entry queue_requests_entry = {
3931 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
3932 .show = queue_requests_show,
3933 .store = queue_requests_store,
3934};
3935
3936static struct queue_sysfs_entry queue_ra_entry = {
3937 .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
3938 .show = queue_ra_show,
3939 .store = queue_ra_store,
3940};
3941
3942static struct queue_sysfs_entry queue_max_sectors_entry = {
3943 .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
3944 .show = queue_max_sectors_show,
3945 .store = queue_max_sectors_store,
3946};
3947
3948static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
3949 .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
3950 .show = queue_max_hw_sectors_show,
3951};
3952
3953static struct queue_sysfs_entry queue_iosched_entry = {
3954 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
3955 .show = elv_iosched_show,
3956 .store = elv_iosched_store,
3957};
3958
3959static struct attribute *default_attrs[] = {
3960 &queue_requests_entry.attr,
3961 &queue_ra_entry.attr,
3962 &queue_max_hw_sectors_entry.attr,
3963 &queue_max_sectors_entry.attr,
3964 &queue_iosched_entry.attr,
3965 NULL,
3966};
3967
3968#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
3969
3970static ssize_t
3971queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3972{
3973 struct queue_sysfs_entry *entry = to_queue(attr);
Al Viro483f4af2006-03-18 18:34:37 -05003974 request_queue_t *q = container_of(kobj, struct request_queue, kobj);
3975 ssize_t res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977 if (!entry->show)
Dmitry Torokhov6c1852a2005-04-29 01:26:06 -05003978 return -EIO;
Al Viro483f4af2006-03-18 18:34:37 -05003979 mutex_lock(&q->sysfs_lock);
3980 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
3981 mutex_unlock(&q->sysfs_lock);
3982 return -ENOENT;
3983 }
3984 res = entry->show(q, page);
3985 mutex_unlock(&q->sysfs_lock);
3986 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987}
3988
3989static ssize_t
3990queue_attr_store(struct kobject *kobj, struct attribute *attr,
3991 const char *page, size_t length)
3992{
3993 struct queue_sysfs_entry *entry = to_queue(attr);
Al Viro483f4af2006-03-18 18:34:37 -05003994 request_queue_t *q = container_of(kobj, struct request_queue, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995
Al Viro483f4af2006-03-18 18:34:37 -05003996 ssize_t res;
3997
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998 if (!entry->store)
Dmitry Torokhov6c1852a2005-04-29 01:26:06 -05003999 return -EIO;
Al Viro483f4af2006-03-18 18:34:37 -05004000 mutex_lock(&q->sysfs_lock);
4001 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
4002 mutex_unlock(&q->sysfs_lock);
4003 return -ENOENT;
4004 }
4005 res = entry->store(q, page, length);
4006 mutex_unlock(&q->sysfs_lock);
4007 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008}
4009
4010static struct sysfs_ops queue_sysfs_ops = {
4011 .show = queue_attr_show,
4012 .store = queue_attr_store,
4013};
4014
Adrian Bunk93d17d32005-06-25 14:59:10 -07004015static struct kobj_type queue_ktype = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016 .sysfs_ops = &queue_sysfs_ops,
4017 .default_attrs = default_attrs,
Al Viro483f4af2006-03-18 18:34:37 -05004018 .release = blk_release_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019};
4020
4021int blk_register_queue(struct gendisk *disk)
4022{
4023 int ret;
4024
4025 request_queue_t *q = disk->queue;
4026
4027 if (!q || !q->request_fn)
4028 return -ENXIO;
4029
4030 q->kobj.parent = kobject_get(&disk->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031
Al Viro483f4af2006-03-18 18:34:37 -05004032 ret = kobject_add(&q->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 if (ret < 0)
4034 return ret;
4035
Al Viro483f4af2006-03-18 18:34:37 -05004036 kobject_uevent(&q->kobj, KOBJ_ADD);
4037
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 ret = elv_register_queue(q);
4039 if (ret) {
Al Viro483f4af2006-03-18 18:34:37 -05004040 kobject_uevent(&q->kobj, KOBJ_REMOVE);
4041 kobject_del(&q->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042 return ret;
4043 }
4044
4045 return 0;
4046}
4047
4048void blk_unregister_queue(struct gendisk *disk)
4049{
4050 request_queue_t *q = disk->queue;
4051
4052 if (q && q->request_fn) {
4053 elv_unregister_queue(q);
4054
Al Viro483f4af2006-03-18 18:34:37 -05004055 kobject_uevent(&q->kobj, KOBJ_REMOVE);
4056 kobject_del(&q->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057 kobject_put(&disk->kobj);
4058 }
4059}