blob: d2da64170513caae07726bca207f71ba13065b46 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
Jens Axboe6728cb02008-01-31 13:03:55 +01006 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11/*
12 * This handles all read/write requests to block devices
13 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
19#include <linux/highmem.h>
20#include <linux/mm.h>
21#include <linux/kernel_stat.h>
22#include <linux/string.h>
23#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/completion.h>
25#include <linux/slab.h>
26#include <linux/swap.h>
27#include <linux/writeback.h>
Andrew Mortonfaccbd4b2006-12-10 02:19:35 -080028#include <linux/task_io_accounting_ops.h>
Akinobu Mitac17bb492006-12-08 02:39:46 -080029#include <linux/fault-inject.h>
Jens Axboe73c10102011-03-08 13:19:51 +010030#include <linux/list_sort.h>
Tejun Heoe3c78ca2011-10-19 14:32:38 +020031#include <linux/delay.h>
Tejun Heoaaf7c682012-04-19 16:29:22 -070032#include <linux/ratelimit.h>
Li Zefan55782132009-06-09 13:43:05 +080033
34#define CREATE_TRACE_POINTS
35#include <trace/events/block.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Jens Axboe8324aa92008-01-29 14:51:59 +010037#include "blk.h"
Tejun Heo5efd6112012-03-05 13:15:12 -080038#include "blk-cgroup.h"
Jens Axboe8324aa92008-01-29 14:51:59 +010039
Mike Snitzerd07335e2010-11-16 12:52:38 +010040EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
Jun'ichi Nomurab0da3f02009-10-01 21:16:13 +020041EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
Li Zefan55782132009-06-09 13:43:05 +080042EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
Ingo Molnar0bfc2452008-11-26 11:59:56 +010043
Tejun Heoa73f7302011-12-14 00:33:37 +010044DEFINE_IDA(blk_queue_ida);
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/*
47 * For the allocated request tables
48 */
Adrian Bunk5ece6c52008-02-18 13:45:51 +010049static struct kmem_cache *request_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51/*
52 * For queue allocation
53 */
Jens Axboe6728cb02008-01-31 13:03:55 +010054struct kmem_cache *blk_requestq_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 * Controlling structure to kblockd
58 */
Jens Axboeff856ba2006-01-09 16:02:34 +010059static struct workqueue_struct *kblockd_workqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Jens Axboe26b82562008-01-29 13:54:41 +010061static void drive_stat_acct(struct request *rq, int new_io)
62{
Jens Axboe28f13702008-05-07 10:15:46 +020063 struct hd_struct *part;
Jens Axboe26b82562008-01-29 13:54:41 +010064 int rw = rq_data_dir(rq);
Tejun Heoc9959052008-08-25 19:47:21 +090065 int cpu;
Jens Axboe26b82562008-01-29 13:54:41 +010066
Jens Axboec2553b52009-04-24 08:10:11 +020067 if (!blk_do_io_stat(rq))
Jens Axboe26b82562008-01-29 13:54:41 +010068 return;
69
Tejun Heo074a7ac2008-08-25 19:56:14 +090070 cpu = part_stat_lock();
Tejun Heoc9959052008-08-25 19:47:21 +090071
Jerome Marchand09e099d2011-01-05 16:57:38 +010072 if (!new_io) {
73 part = rq->part;
Tejun Heo074a7ac2008-08-25 19:56:14 +090074 part_stat_inc(cpu, part, merges[rw]);
Jerome Marchand09e099d2011-01-05 16:57:38 +010075 } else {
76 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
Jens Axboe6c23a962011-01-07 08:43:37 +010077 if (!hd_struct_try_get(part)) {
Jerome Marchand09e099d2011-01-05 16:57:38 +010078 /*
79 * The partition is already being removed,
80 * the request will be accounted on the disk only
81 *
82 * We take a reference on disk->part0 although that
83 * partition will never be deleted, so we can treat
84 * it as any other partition.
85 */
86 part = &rq->rq_disk->part0;
Jens Axboe6c23a962011-01-07 08:43:37 +010087 hd_struct_get(part);
Jerome Marchand09e099d2011-01-05 16:57:38 +010088 }
Tejun Heo074a7ac2008-08-25 19:56:14 +090089 part_round_stats(cpu, part);
Nikanth Karthikesan316d3152009-10-06 20:16:55 +020090 part_inc_in_flight(part, rw);
Jerome Marchand09e099d2011-01-05 16:57:38 +010091 rq->part = part;
Jens Axboe26b82562008-01-29 13:54:41 +010092 }
Tejun Heoe71bf0d2008-09-03 09:03:02 +020093
Tejun Heo074a7ac2008-08-25 19:56:14 +090094 part_stat_unlock();
Jens Axboe26b82562008-01-29 13:54:41 +010095}
96
Jens Axboe8324aa92008-01-29 14:51:59 +010097void blk_queue_congestion_threshold(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098{
99 int nr;
100
101 nr = q->nr_requests - (q->nr_requests / 8) + 1;
102 if (nr > q->nr_requests)
103 nr = q->nr_requests;
104 q->nr_congestion_on = nr;
105
106 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
107 if (nr < 1)
108 nr = 1;
109 q->nr_congestion_off = nr;
110}
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112/**
113 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
114 * @bdev: device
115 *
116 * Locates the passed device's request queue and returns the address of its
117 * backing_dev_info
118 *
119 * Will return NULL if the request queue cannot be located.
120 */
121struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
122{
123 struct backing_dev_info *ret = NULL;
Jens Axboe165125e2007-07-24 09:28:11 +0200124 struct request_queue *q = bdev_get_queue(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
126 if (q)
127 ret = &q->backing_dev_info;
128 return ret;
129}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130EXPORT_SYMBOL(blk_get_backing_dev_info);
131
FUJITA Tomonori2a4aa302008-04-29 09:54:36 +0200132void blk_rq_init(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133{
FUJITA Tomonori1afb20f2008-04-25 12:26:28 +0200134 memset(rq, 0, sizeof(*rq));
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 INIT_LIST_HEAD(&rq->queuelist);
Jens Axboe242f9dc2008-09-14 05:55:09 -0700137 INIT_LIST_HEAD(&rq->timeout_list);
Jens Axboec7c22e42008-09-13 20:26:01 +0200138 rq->cpu = -1;
Jens Axboe63a71382008-02-08 12:41:03 +0100139 rq->q = q;
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900140 rq->__sector = (sector_t) -1;
Jens Axboe2e662b62006-07-13 11:55:04 +0200141 INIT_HLIST_NODE(&rq->hash);
142 RB_CLEAR_NODE(&rq->rb_node);
FUJITA Tomonorid7e3c322008-04-29 09:54:39 +0200143 rq->cmd = rq->__cmd;
Li Zefane2494e12009-04-02 13:43:26 +0800144 rq->cmd_len = BLK_MAX_CDB;
Jens Axboe63a71382008-02-08 12:41:03 +0100145 rq->tag = -1;
Jens Axboe63a71382008-02-08 12:41:03 +0100146 rq->ref_count = 1;
Tejun Heob243ddc2009-04-23 11:05:18 +0900147 rq->start_time = jiffies;
Divyesh Shah91952912010-04-01 15:01:41 -0700148 set_start_time_ns(rq);
Jerome Marchand09e099d2011-01-05 16:57:38 +0100149 rq->part = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150}
FUJITA Tomonori2a4aa302008-04-29 09:54:36 +0200151EXPORT_SYMBOL(blk_rq_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
NeilBrown5bb23a62007-09-27 12:46:13 +0200153static void req_bio_endio(struct request *rq, struct bio *bio,
154 unsigned int nbytes, int error)
Tejun Heo797e7db2006-01-06 09:51:03 +0100155{
Tejun Heo143a87f2011-01-25 12:43:52 +0100156 if (error)
157 clear_bit(BIO_UPTODATE, &bio->bi_flags);
158 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
159 error = -EIO;
Tejun Heo797e7db2006-01-06 09:51:03 +0100160
Tejun Heo143a87f2011-01-25 12:43:52 +0100161 if (unlikely(nbytes > bio->bi_size)) {
162 printk(KERN_ERR "%s: want %u bytes done, %u left\n",
163 __func__, nbytes, bio->bi_size);
164 nbytes = bio->bi_size;
NeilBrown5bb23a62007-09-27 12:46:13 +0200165 }
Tejun Heo143a87f2011-01-25 12:43:52 +0100166
167 if (unlikely(rq->cmd_flags & REQ_QUIET))
168 set_bit(BIO_QUIET, &bio->bi_flags);
169
170 bio->bi_size -= nbytes;
171 bio->bi_sector += (nbytes >> 9);
172
173 if (bio_integrity(bio))
174 bio_integrity_advance(bio, nbytes);
175
176 /* don't actually finish bio if it's part of flush sequence */
177 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
178 bio_endio(bio, error);
Tejun Heo797e7db2006-01-06 09:51:03 +0100179}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181void blk_dump_rq_flags(struct request *rq, char *msg)
182{
183 int bit;
184
Jens Axboe6728cb02008-01-31 13:03:55 +0100185 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
Jens Axboe4aff5e22006-08-10 08:44:47 +0200186 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
187 rq->cmd_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Tejun Heo83096eb2009-05-07 22:24:39 +0900189 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
190 (unsigned long long)blk_rq_pos(rq),
191 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
Tejun Heo731ec492009-04-23 11:05:20 +0900192 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900193 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200195 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
Jens Axboe6728cb02008-01-31 13:03:55 +0100196 printk(KERN_INFO " cdb: ");
FUJITA Tomonorid34c87e2008-04-29 14:37:52 +0200197 for (bit = 0; bit < BLK_MAX_CDB; bit++)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 printk("%02x ", rq->cmd[bit]);
199 printk("\n");
200 }
201}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202EXPORT_SYMBOL(blk_dump_rq_flags);
203
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500204static void blk_delay_work(struct work_struct *work)
Jens Axboe6c5e0c42008-08-01 20:31:32 +0200205{
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500206 struct request_queue *q;
Jens Axboe6c5e0c42008-08-01 20:31:32 +0200207
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500208 q = container_of(work, struct request_queue, delay_work.work);
209 spin_lock_irq(q->queue_lock);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200210 __blk_run_queue(q);
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500211 spin_unlock_irq(q->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
214/**
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500215 * blk_delay_queue - restart queueing after defined interval
216 * @q: The &struct request_queue in question
217 * @msecs: Delay in msecs
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 *
219 * Description:
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500220 * Sometimes queueing needs to be postponed for a little while, to allow
221 * resources to come back. This function will make sure that queueing is
222 * restarted around the specified time.
223 */
224void blk_delay_queue(struct request_queue *q, unsigned long msecs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
Jens Axboe4521cc42011-04-18 11:36:39 +0200226 queue_delayed_work(kblockd_workqueue, &q->delay_work,
227 msecs_to_jiffies(msecs));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228}
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500229EXPORT_SYMBOL(blk_delay_queue);
Alan D. Brunelle2ad8b1e2007-11-07 14:26:56 -0500230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231/**
232 * blk_start_queue - restart a previously stopped queue
Jens Axboe165125e2007-07-24 09:28:11 +0200233 * @q: The &struct request_queue in question
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 *
235 * Description:
236 * blk_start_queue() will clear the stop flag on the queue, and call
237 * the request_fn for the queue if it was in a stopped state when
238 * entered. Also see blk_stop_queue(). Queue lock must be held.
239 **/
Jens Axboe165125e2007-07-24 09:28:11 +0200240void blk_start_queue(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241{
Paolo 'Blaisorblade' Giarrussoa038e252006-06-05 12:09:01 +0200242 WARN_ON(!irqs_disabled());
243
Nick Piggin75ad23b2008-04-29 14:48:33 +0200244 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200245 __blk_run_queue(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247EXPORT_SYMBOL(blk_start_queue);
248
249/**
250 * blk_stop_queue - stop a queue
Jens Axboe165125e2007-07-24 09:28:11 +0200251 * @q: The &struct request_queue in question
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 *
253 * Description:
254 * The Linux block layer assumes that a block driver will consume all
255 * entries on the request queue when the request_fn strategy is called.
256 * Often this will not happen, because of hardware limitations (queue
257 * depth settings). If a device driver gets a 'queue full' response,
258 * or if it simply chooses not to queue more I/O at one point, it can
259 * call this function to prevent the request_fn from being called until
260 * the driver has signalled it's ready to go again. This happens by calling
261 * blk_start_queue() to restart queue operations. Queue lock must be held.
262 **/
Jens Axboe165125e2007-07-24 09:28:11 +0200263void blk_stop_queue(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264{
Tejun Heo136b5722012-08-21 13:18:24 -0700265 cancel_delayed_work(&q->delay_work);
Nick Piggin75ad23b2008-04-29 14:48:33 +0200266 queue_flag_set(QUEUE_FLAG_STOPPED, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267}
268EXPORT_SYMBOL(blk_stop_queue);
269
270/**
271 * blk_sync_queue - cancel any pending callbacks on a queue
272 * @q: the queue
273 *
274 * Description:
275 * The block layer may perform asynchronous callback activity
276 * on a queue, such as calling the unplug function after a timeout.
277 * A block device may call blk_sync_queue to ensure that any
278 * such activity is cancelled, thus allowing it to release resources
Michael Opdenacker59c51592007-05-09 08:57:56 +0200279 * that the callbacks might use. The caller must already have made sure
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 * that its ->make_request_fn will not re-add plugging prior to calling
281 * this function.
282 *
Vivek Goyalda527772011-03-02 19:05:33 -0500283 * This function does not cancel any asynchronous activity arising
284 * out of elevator or throttling code. That would require elevaotor_exit()
Tejun Heo5efd6112012-03-05 13:15:12 -0800285 * and blkcg_exit_queue() to be called with queue lock initialized.
Vivek Goyalda527772011-03-02 19:05:33 -0500286 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 */
288void blk_sync_queue(struct request_queue *q)
289{
Jens Axboe70ed28b2008-11-19 14:38:39 +0100290 del_timer_sync(&q->timeout);
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500291 cancel_delayed_work_sync(&q->delay_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292}
293EXPORT_SYMBOL(blk_sync_queue);
294
295/**
Jens Axboe80a4b582008-10-14 09:51:06 +0200296 * __blk_run_queue - run a single device queue
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 * @q: The queue to run
Jens Axboe80a4b582008-10-14 09:51:06 +0200298 *
299 * Description:
300 * See @blk_run_queue. This variant must be called with the queue lock
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200301 * held and interrupts disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 */
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200303void __blk_run_queue(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
Tejun Heoa538cd02009-04-23 11:05:17 +0900305 if (unlikely(blk_queue_stopped(q)))
306 return;
307
Jens Axboec21e6be2011-04-19 13:32:46 +0200308 q->request_fn(q);
Nick Piggin75ad23b2008-04-29 14:48:33 +0200309}
310EXPORT_SYMBOL(__blk_run_queue);
Jens Axboedac07ec2006-05-11 08:20:16 +0200311
Nick Piggin75ad23b2008-04-29 14:48:33 +0200312/**
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200313 * blk_run_queue_async - run a single device queue in workqueue context
314 * @q: The queue to run
315 *
316 * Description:
317 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
318 * of us.
319 */
320void blk_run_queue_async(struct request_queue *q)
321{
Tejun Heoe7c2f962012-08-21 13:18:24 -0700322 if (likely(!blk_queue_stopped(q)))
323 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200324}
Jens Axboec21e6be2011-04-19 13:32:46 +0200325EXPORT_SYMBOL(blk_run_queue_async);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200326
327/**
Nick Piggin75ad23b2008-04-29 14:48:33 +0200328 * blk_run_queue - run a single device queue
329 * @q: The queue to run
Jens Axboe80a4b582008-10-14 09:51:06 +0200330 *
331 * Description:
332 * Invoke request handling on this queue, if it has pending work to do.
Tejun Heoa7f55792009-04-23 11:05:17 +0900333 * May be used to restart queueing when a request has completed.
Nick Piggin75ad23b2008-04-29 14:48:33 +0200334 */
335void blk_run_queue(struct request_queue *q)
336{
337 unsigned long flags;
338
339 spin_lock_irqsave(q->queue_lock, flags);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200340 __blk_run_queue(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 spin_unlock_irqrestore(q->queue_lock, flags);
342}
343EXPORT_SYMBOL(blk_run_queue);
344
Jens Axboe165125e2007-07-24 09:28:11 +0200345void blk_put_queue(struct request_queue *q)
Al Viro483f4af2006-03-18 18:34:37 -0500346{
347 kobject_put(&q->kobj);
348}
Jens Axboed86e0e82011-05-27 07:44:43 +0200349EXPORT_SYMBOL(blk_put_queue);
Al Viro483f4af2006-03-18 18:34:37 -0500350
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200351/**
352 * blk_drain_queue - drain requests from request_queue
353 * @q: queue to drain
Tejun Heoc9a929d2011-10-19 14:42:16 +0200354 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200355 *
Tejun Heoc9a929d2011-10-19 14:42:16 +0200356 * Drain requests from @q. If @drain_all is set, all requests are drained.
357 * If not, only ELVPRIV requests are drained. The caller is responsible
358 * for ensuring that no new requests which need to be drained are queued.
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200359 */
Tejun Heoc9a929d2011-10-19 14:42:16 +0200360void blk_drain_queue(struct request_queue *q, bool drain_all)
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200361{
Asias He458f27a2012-06-15 08:45:25 +0200362 int i;
363
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200364 while (true) {
Tejun Heo481a7d62011-12-14 00:33:37 +0100365 bool drain = false;
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200366
367 spin_lock_irq(q->queue_lock);
368
Tejun Heob855b042012-03-06 21:24:55 +0100369 /*
370 * The caller might be trying to drain @q before its
371 * elevator is initialized.
372 */
373 if (q->elevator)
374 elv_drain_elevator(q);
375
Tejun Heo5efd6112012-03-05 13:15:12 -0800376 blkcg_drain_queue(q);
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200377
Tejun Heo4eabc942011-12-15 20:03:04 +0100378 /*
379 * This function might be called on a queue which failed
Tejun Heob855b042012-03-06 21:24:55 +0100380 * driver init after queue creation or is not yet fully
381 * active yet. Some drivers (e.g. fd and loop) get unhappy
382 * in such cases. Kick queue iff dispatch queue has
383 * something on it and @q has request_fn set.
Tejun Heo4eabc942011-12-15 20:03:04 +0100384 */
Tejun Heob855b042012-03-06 21:24:55 +0100385 if (!list_empty(&q->queue_head) && q->request_fn)
Tejun Heo4eabc942011-12-15 20:03:04 +0100386 __blk_run_queue(q);
Tejun Heoc9a929d2011-10-19 14:42:16 +0200387
Tejun Heo8a5ecdd2012-06-04 20:40:58 -0700388 drain |= q->nr_rqs_elvpriv;
Tejun Heo481a7d62011-12-14 00:33:37 +0100389
390 /*
391 * Unfortunately, requests are queued at and tracked from
392 * multiple places and there's no single counter which can
393 * be drained. Check all the queues and counters.
394 */
395 if (drain_all) {
396 drain |= !list_empty(&q->queue_head);
397 for (i = 0; i < 2; i++) {
Tejun Heo8a5ecdd2012-06-04 20:40:58 -0700398 drain |= q->nr_rqs[i];
Tejun Heo481a7d62011-12-14 00:33:37 +0100399 drain |= q->in_flight[i];
400 drain |= !list_empty(&q->flush_queue[i]);
401 }
402 }
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200403
404 spin_unlock_irq(q->queue_lock);
405
Tejun Heo481a7d62011-12-14 00:33:37 +0100406 if (!drain)
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200407 break;
408 msleep(10);
409 }
Asias He458f27a2012-06-15 08:45:25 +0200410
411 /*
412 * With queue marked dead, any woken up waiter will fail the
413 * allocation path, so the wakeup chaining is lost and we're
414 * left with hung waiters. We need to wake up those waiters.
415 */
416 if (q->request_fn) {
Tejun Heoa0516612012-06-26 15:05:44 -0700417 struct request_list *rl;
418
Asias He458f27a2012-06-15 08:45:25 +0200419 spin_lock_irq(q->queue_lock);
Tejun Heoa0516612012-06-26 15:05:44 -0700420
421 blk_queue_for_each_rl(rl, q)
422 for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
423 wake_up_all(&rl->wait[i]);
424
Asias He458f27a2012-06-15 08:45:25 +0200425 spin_unlock_irq(q->queue_lock);
426 }
Tejun Heoe3c78ca2011-10-19 14:32:38 +0200427}
428
Tejun Heoc9a929d2011-10-19 14:42:16 +0200429/**
Tejun Heod7325802012-03-05 13:14:58 -0800430 * blk_queue_bypass_start - enter queue bypass mode
431 * @q: queue of interest
432 *
433 * In bypass mode, only the dispatch FIFO queue of @q is used. This
434 * function makes @q enter bypass mode and drains all requests which were
Tejun Heo6ecf23a2012-03-05 13:14:59 -0800435 * throttled or issued before. On return, it's guaranteed that no request
Tejun Heo80fd9972012-04-13 14:50:53 -0700436 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
437 * inside queue or RCU read lock.
Tejun Heod7325802012-03-05 13:14:58 -0800438 */
439void blk_queue_bypass_start(struct request_queue *q)
440{
Tejun Heob82d4b12012-04-13 13:11:31 -0700441 bool drain;
442
Tejun Heod7325802012-03-05 13:14:58 -0800443 spin_lock_irq(q->queue_lock);
Tejun Heob82d4b12012-04-13 13:11:31 -0700444 drain = !q->bypass_depth++;
Tejun Heod7325802012-03-05 13:14:58 -0800445 queue_flag_set(QUEUE_FLAG_BYPASS, q);
446 spin_unlock_irq(q->queue_lock);
447
Tejun Heob82d4b12012-04-13 13:11:31 -0700448 if (drain) {
449 blk_drain_queue(q, false);
450 /* ensure blk_queue_bypass() is %true inside RCU read lock */
451 synchronize_rcu();
452 }
Tejun Heod7325802012-03-05 13:14:58 -0800453}
454EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
455
456/**
457 * blk_queue_bypass_end - leave queue bypass mode
458 * @q: queue of interest
459 *
460 * Leave bypass mode and restore the normal queueing behavior.
461 */
462void blk_queue_bypass_end(struct request_queue *q)
463{
464 spin_lock_irq(q->queue_lock);
465 if (!--q->bypass_depth)
466 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
467 WARN_ON_ONCE(q->bypass_depth < 0);
468 spin_unlock_irq(q->queue_lock);
469}
470EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
471
472/**
Tejun Heoc9a929d2011-10-19 14:42:16 +0200473 * blk_cleanup_queue - shutdown a request queue
474 * @q: request queue to shutdown
475 *
476 * Mark @q DEAD, drain all pending requests, destroy and put it. All
477 * future requests will be failed immediately with -ENODEV.
Vivek Goyalc94a96a2011-03-02 19:04:42 -0500478 */
Jens Axboe6728cb02008-01-31 13:03:55 +0100479void blk_cleanup_queue(struct request_queue *q)
Al Viro483f4af2006-03-18 18:34:37 -0500480{
Tejun Heoc9a929d2011-10-19 14:42:16 +0200481 spinlock_t *lock = q->queue_lock;
Jens Axboee3335de92008-09-18 09:22:54 -0700482
Tejun Heoc9a929d2011-10-19 14:42:16 +0200483 /* mark @q DEAD, no new request or merges will be allowed afterwards */
Al Viro483f4af2006-03-18 18:34:37 -0500484 mutex_lock(&q->sysfs_lock);
Nick Piggin75ad23b2008-04-29 14:48:33 +0200485 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
Tejun Heoc9a929d2011-10-19 14:42:16 +0200486 spin_lock_irq(lock);
Tejun Heo6ecf23a2012-03-05 13:14:59 -0800487
Tejun Heo80fd9972012-04-13 14:50:53 -0700488 /*
489 * Dead queue is permanently in bypass mode till released. Note
490 * that, unlike blk_queue_bypass_start(), we aren't performing
491 * synchronize_rcu() after entering bypass mode to avoid the delay
492 * as some drivers create and destroy a lot of queues while
493 * probing. This is still safe because blk_release_queue() will be
494 * called only after the queue refcnt drops to zero and nothing,
495 * RCU or not, would be traversing the queue by then.
496 */
Tejun Heo6ecf23a2012-03-05 13:14:59 -0800497 q->bypass_depth++;
498 queue_flag_set(QUEUE_FLAG_BYPASS, q);
499
Tejun Heoc9a929d2011-10-19 14:42:16 +0200500 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
501 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
502 queue_flag_set(QUEUE_FLAG_DEAD, q);
Tejun Heoc9a929d2011-10-19 14:42:16 +0200503 spin_unlock_irq(lock);
504 mutex_unlock(&q->sysfs_lock);
505
Tejun Heob855b042012-03-06 21:24:55 +0100506 /* drain all requests queued before DEAD marking */
507 blk_drain_queue(q, true);
Tejun Heoc9a929d2011-10-19 14:42:16 +0200508
509 /* @q won't process any more request, flush async actions */
510 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
511 blk_sync_queue(q);
512
Asias He5e5cfac2012-05-24 23:28:52 +0800513 spin_lock_irq(lock);
514 if (q->queue_lock != &q->__queue_lock)
515 q->queue_lock = &q->__queue_lock;
516 spin_unlock_irq(lock);
517
Tejun Heoc9a929d2011-10-19 14:42:16 +0200518 /* @q is and will stay empty, shutdown and put */
Al Viro483f4af2006-03-18 18:34:37 -0500519 blk_put_queue(q);
520}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521EXPORT_SYMBOL(blk_cleanup_queue);
522
Tejun Heo5b788ce2012-06-04 20:40:59 -0700523int blk_init_rl(struct request_list *rl, struct request_queue *q,
524 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525{
Mike Snitzer1abec4f2010-05-25 13:15:15 -0400526 if (unlikely(rl->rq_pool))
527 return 0;
528
Tejun Heo5b788ce2012-06-04 20:40:59 -0700529 rl->q = q;
Jens Axboe1faa16d2009-04-06 14:48:01 +0200530 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
531 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
Jens Axboe1faa16d2009-04-06 14:48:01 +0200532 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
533 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
Christoph Lameter19460892005-06-23 00:08:19 -0700535 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
Tejun Heoa91a5ac2012-06-04 20:40:53 -0700536 mempool_free_slab, request_cachep,
Tejun Heo5b788ce2012-06-04 20:40:59 -0700537 gfp_mask, q->node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 if (!rl->rq_pool)
539 return -ENOMEM;
540
541 return 0;
542}
543
Tejun Heo5b788ce2012-06-04 20:40:59 -0700544void blk_exit_rl(struct request_list *rl)
545{
546 if (rl->rq_pool)
547 mempool_destroy(rl->rq_pool);
548}
549
Jens Axboe165125e2007-07-24 09:28:11 +0200550struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551{
Christoph Lameter19460892005-06-23 00:08:19 -0700552 return blk_alloc_queue_node(gfp_mask, -1);
553}
554EXPORT_SYMBOL(blk_alloc_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
Jens Axboe165125e2007-07-24 09:28:11 +0200556struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
Christoph Lameter19460892005-06-23 00:08:19 -0700557{
Jens Axboe165125e2007-07-24 09:28:11 +0200558 struct request_queue *q;
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -0700559 int err;
Christoph Lameter19460892005-06-23 00:08:19 -0700560
Jens Axboe8324aa92008-01-29 14:51:59 +0100561 q = kmem_cache_alloc_node(blk_requestq_cachep,
Christoph Lameter94f60302007-07-17 04:03:29 -0700562 gfp_mask | __GFP_ZERO, node_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 if (!q)
564 return NULL;
565
Dan Carpenter00380a42012-03-23 09:58:54 +0100566 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
Tejun Heoa73f7302011-12-14 00:33:37 +0100567 if (q->id < 0)
568 goto fail_q;
569
Jens Axboe0989a022009-06-12 14:42:56 +0200570 q->backing_dev_info.ra_pages =
571 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
572 q->backing_dev_info.state = 0;
573 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
Jens Axboed9938312009-06-12 14:45:52 +0200574 q->backing_dev_info.name = "block";
Mike Snitzer51514122011-11-23 10:59:13 +0100575 q->node = node_id;
Jens Axboe0989a022009-06-12 14:42:56 +0200576
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -0700577 err = bdi_init(&q->backing_dev_info);
Tejun Heoa73f7302011-12-14 00:33:37 +0100578 if (err)
579 goto fail_id;
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -0700580
Matthew Garrett31373d02010-04-06 14:25:14 +0200581 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
582 laptop_mode_timer_fn, (unsigned long) q);
Jens Axboe242f9dc2008-09-14 05:55:09 -0700583 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
Tejun Heob855b042012-03-06 21:24:55 +0100584 INIT_LIST_HEAD(&q->queue_head);
Jens Axboe242f9dc2008-09-14 05:55:09 -0700585 INIT_LIST_HEAD(&q->timeout_list);
Tejun Heoa612fdd2011-12-14 00:33:41 +0100586 INIT_LIST_HEAD(&q->icq_list);
Tejun Heo4eef3042012-03-05 13:15:18 -0800587#ifdef CONFIG_BLK_CGROUP
Tejun Heoe8989fa2012-03-05 13:15:20 -0800588 INIT_LIST_HEAD(&q->blkg_list);
Tejun Heo4eef3042012-03-05 13:15:18 -0800589#endif
Tejun Heoae1b1532011-01-25 12:43:54 +0100590 INIT_LIST_HEAD(&q->flush_queue[0]);
591 INIT_LIST_HEAD(&q->flush_queue[1]);
592 INIT_LIST_HEAD(&q->flush_data_in_flight);
Jens Axboe3cca6dc2011-03-02 11:08:00 -0500593 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
Al Viro483f4af2006-03-18 18:34:37 -0500594
Jens Axboe8324aa92008-01-29 14:51:59 +0100595 kobject_init(&q->kobj, &blk_queue_ktype);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
Al Viro483f4af2006-03-18 18:34:37 -0500597 mutex_init(&q->sysfs_lock);
Neil Browne7e72bf2008-05-14 16:05:54 -0700598 spin_lock_init(&q->__queue_lock);
Al Viro483f4af2006-03-18 18:34:37 -0500599
Vivek Goyalc94a96a2011-03-02 19:04:42 -0500600 /*
601 * By default initialize queue_lock to internal lock and driver can
602 * override it later if need be.
603 */
604 q->queue_lock = &q->__queue_lock;
605
Tejun Heob82d4b12012-04-13 13:11:31 -0700606 /*
607 * A queue starts its life with bypass turned on to avoid
608 * unnecessary bypass on/off overhead and nasty surprises during
609 * init. The initial bypass will be finished at the end of
610 * blk_init_allocated_queue().
611 */
612 q->bypass_depth = 1;
613 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
614
Tejun Heo5efd6112012-03-05 13:15:12 -0800615 if (blkcg_init_queue(q))
Tejun Heof51b8022012-03-05 13:15:05 -0800616 goto fail_id;
617
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 return q;
Tejun Heoa73f7302011-12-14 00:33:37 +0100619
620fail_id:
621 ida_simple_remove(&blk_queue_ida, q->id);
622fail_q:
623 kmem_cache_free(blk_requestq_cachep, q);
624 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625}
Christoph Lameter19460892005-06-23 00:08:19 -0700626EXPORT_SYMBOL(blk_alloc_queue_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
628/**
629 * blk_init_queue - prepare a request queue for use with a block device
630 * @rfn: The function to be called to process requests that have been
631 * placed on the queue.
632 * @lock: Request queue spin lock
633 *
634 * Description:
635 * If a block device wishes to use the standard request handling procedures,
636 * which sorts requests and coalesces adjacent requests, then it must
637 * call blk_init_queue(). The function @rfn will be called when there
638 * are requests on the queue that need to be processed. If the device
639 * supports plugging, then @rfn may not be called immediately when requests
640 * are available on the queue, but may be called at some time later instead.
641 * Plugged queues are generally unplugged when a buffer belonging to one
642 * of the requests on the queue is needed, or due to memory pressure.
643 *
644 * @rfn is not required, or even expected, to remove all requests off the
645 * queue, but only as many as it can handle at a time. If it does leave
646 * requests on the queue, it is responsible for arranging that the requests
647 * get dealt with eventually.
648 *
649 * The queue spin lock must be held while manipulating the requests on the
Paolo 'Blaisorblade' Giarrussoa038e252006-06-05 12:09:01 +0200650 * request queue; this lock will be taken also from interrupt context, so irq
651 * disabling is needed for it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 *
Randy Dunlap710027a2008-08-19 20:13:11 +0200653 * Function returns a pointer to the initialized request queue, or %NULL if
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 * it didn't succeed.
655 *
656 * Note:
657 * blk_init_queue() must be paired with a blk_cleanup_queue() call
658 * when the block device is deactivated (such as at module unload).
659 **/
Christoph Lameter19460892005-06-23 00:08:19 -0700660
Jens Axboe165125e2007-07-24 09:28:11 +0200661struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662{
Christoph Lameter19460892005-06-23 00:08:19 -0700663 return blk_init_queue_node(rfn, lock, -1);
664}
665EXPORT_SYMBOL(blk_init_queue);
666
Jens Axboe165125e2007-07-24 09:28:11 +0200667struct request_queue *
Christoph Lameter19460892005-06-23 00:08:19 -0700668blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
669{
Mike Snitzerc86d1b82010-06-03 11:34:52 -0600670 struct request_queue *uninit_q, *q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
Mike Snitzerc86d1b82010-06-03 11:34:52 -0600672 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
673 if (!uninit_q)
674 return NULL;
675
Mike Snitzer51514122011-11-23 10:59:13 +0100676 q = blk_init_allocated_queue(uninit_q, rfn, lock);
Mike Snitzerc86d1b82010-06-03 11:34:52 -0600677 if (!q)
678 blk_cleanup_queue(uninit_q);
679
680 return q;
Mike Snitzer01effb02010-05-11 08:57:42 +0200681}
682EXPORT_SYMBOL(blk_init_queue_node);
683
684struct request_queue *
685blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
686 spinlock_t *lock)
687{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 if (!q)
689 return NULL;
690
Tejun Heoa0516612012-06-26 15:05:44 -0700691 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
Al Viro8669aaf2006-03-18 13:50:00 -0500692 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 q->request_fn = rfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 q->prep_rq_fn = NULL;
James Bottomley28018c22010-07-01 19:49:17 +0900696 q->unprep_rq_fn = NULL;
Jens Axboebc58ba92009-01-23 10:54:44 +0100697 q->queue_flags = QUEUE_FLAG_DEFAULT;
Vivek Goyalc94a96a2011-03-02 19:04:42 -0500698
699 /* Override internal queue lock with supplied lock pointer */
700 if (lock)
701 q->queue_lock = lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
Jens Axboef3b144a2009-03-06 08:48:33 +0100703 /*
704 * This also sets hw/phys segments, boundary and size
705 */
Jens Axboec20e8de2011-09-12 12:03:37 +0200706 blk_queue_make_request(q, blk_queue_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
Alan Stern44ec9542007-02-20 11:01:57 -0500708 q->sg_reserved_size = INT_MAX;
709
Tejun Heob82d4b12012-04-13 13:11:31 -0700710 /* init elevator */
711 if (elevator_init(q, NULL))
712 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Tejun Heob82d4b12012-04-13 13:11:31 -0700714 blk_queue_congestion_threshold(q);
715
716 /* all done, end the initial bypass */
717 blk_queue_bypass_end(q);
718 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719}
Mike Snitzer51514122011-11-23 10:59:13 +0100720EXPORT_SYMBOL(blk_init_allocated_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
Tejun Heo09ac46c2011-12-14 00:33:38 +0100722bool blk_get_queue(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723{
Tejun Heo34f60552011-12-14 00:33:37 +0100724 if (likely(!blk_queue_dead(q))) {
Tejun Heo09ac46c2011-12-14 00:33:38 +0100725 __blk_get_queue(q);
726 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 }
728
Tejun Heo09ac46c2011-12-14 00:33:38 +0100729 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730}
Jens Axboed86e0e82011-05-27 07:44:43 +0200731EXPORT_SYMBOL(blk_get_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
Tejun Heo5b788ce2012-06-04 20:40:59 -0700733static inline void blk_free_request(struct request_list *rl, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
Tejun Heof1f8cc92011-12-14 00:33:42 +0100735 if (rq->cmd_flags & REQ_ELVPRIV) {
Tejun Heo5b788ce2012-06-04 20:40:59 -0700736 elv_put_request(rl->q, rq);
Tejun Heof1f8cc92011-12-14 00:33:42 +0100737 if (rq->elv.icq)
Tejun Heo11a31222012-02-07 07:51:30 +0100738 put_io_context(rq->elv.icq->ioc);
Tejun Heof1f8cc92011-12-14 00:33:42 +0100739 }
740
Tejun Heo5b788ce2012-06-04 20:40:59 -0700741 mempool_free(rq, rl->rq_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742}
743
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744/*
745 * ioc_batching returns true if the ioc is a valid batching request and
746 * should be given priority access to a request.
747 */
Jens Axboe165125e2007-07-24 09:28:11 +0200748static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749{
750 if (!ioc)
751 return 0;
752
753 /*
754 * Make sure the process is able to allocate at least 1 request
755 * even if the batch times out, otherwise we could theoretically
756 * lose wakeups.
757 */
758 return ioc->nr_batch_requests == q->nr_batching ||
759 (ioc->nr_batch_requests > 0
760 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
761}
762
763/*
764 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
765 * will cause the process to be a "batcher" on all queues in the system. This
766 * is the behaviour we want though - once it gets a wakeup it should be given
767 * a nice run.
768 */
Jens Axboe165125e2007-07-24 09:28:11 +0200769static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770{
771 if (!ioc || ioc_batching(q, ioc))
772 return;
773
774 ioc->nr_batch_requests = q->nr_batching;
775 ioc->last_waited = jiffies;
776}
777
Tejun Heo5b788ce2012-06-04 20:40:59 -0700778static void __freed_request(struct request_list *rl, int sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779{
Tejun Heo5b788ce2012-06-04 20:40:59 -0700780 struct request_queue *q = rl->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
Tejun Heoa0516612012-06-26 15:05:44 -0700782 /*
783 * bdi isn't aware of blkcg yet. As all async IOs end up root
784 * blkcg anyway, just use root blkcg state.
785 */
786 if (rl == &q->root_rl &&
787 rl->count[sync] < queue_congestion_off_threshold(q))
Jens Axboe1faa16d2009-04-06 14:48:01 +0200788 blk_clear_queue_congested(q, sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
Jens Axboe1faa16d2009-04-06 14:48:01 +0200790 if (rl->count[sync] + 1 <= q->nr_requests) {
791 if (waitqueue_active(&rl->wait[sync]))
792 wake_up(&rl->wait[sync]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
Tejun Heo5b788ce2012-06-04 20:40:59 -0700794 blk_clear_rl_full(rl, sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 }
796}
797
798/*
799 * A request has just been released. Account for it, update the full and
800 * congestion status, wake up any waiters. Called under q->queue_lock.
801 */
Tejun Heo5b788ce2012-06-04 20:40:59 -0700802static void freed_request(struct request_list *rl, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803{
Tejun Heo5b788ce2012-06-04 20:40:59 -0700804 struct request_queue *q = rl->q;
Tejun Heo75eb6c32011-10-19 14:31:22 +0200805 int sync = rw_is_sync(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806
Tejun Heo8a5ecdd2012-06-04 20:40:58 -0700807 q->nr_rqs[sync]--;
Jens Axboe1faa16d2009-04-06 14:48:01 +0200808 rl->count[sync]--;
Tejun Heo75eb6c32011-10-19 14:31:22 +0200809 if (flags & REQ_ELVPRIV)
Tejun Heo8a5ecdd2012-06-04 20:40:58 -0700810 q->nr_rqs_elvpriv--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Tejun Heo5b788ce2012-06-04 20:40:59 -0700812 __freed_request(rl, sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Jens Axboe1faa16d2009-04-06 14:48:01 +0200814 if (unlikely(rl->starved[sync ^ 1]))
Tejun Heo5b788ce2012-06-04 20:40:59 -0700815 __freed_request(rl, sync ^ 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816}
817
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818/*
Mike Snitzer9d5a4e92011-02-11 11:05:46 +0100819 * Determine if elevator data should be initialized when allocating the
820 * request associated with @bio.
821 */
822static bool blk_rq_should_init_elevator(struct bio *bio)
823{
824 if (!bio)
825 return true;
826
827 /*
828 * Flush requests do not use the elevator so skip initialization.
829 * This allows a request to share the flush and elevator data.
830 */
831 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
832 return false;
833
834 return true;
835}
836
Tejun Heoda8303c2011-10-19 14:33:05 +0200837/**
Tejun Heo852c7882012-03-05 13:15:27 -0800838 * rq_ioc - determine io_context for request allocation
839 * @bio: request being allocated is for this bio (can be %NULL)
840 *
841 * Determine io_context to use for request allocation for @bio. May return
842 * %NULL if %current->io_context doesn't exist.
843 */
844static struct io_context *rq_ioc(struct bio *bio)
845{
846#ifdef CONFIG_BLK_CGROUP
847 if (bio && bio->bi_ioc)
848 return bio->bi_ioc;
849#endif
850 return current->io_context;
851}
852
853/**
Tejun Heoa06e05e2012-06-04 20:40:55 -0700854 * __get_request - get a free request
Tejun Heo5b788ce2012-06-04 20:40:59 -0700855 * @rl: request list to allocate from
Tejun Heoda8303c2011-10-19 14:33:05 +0200856 * @rw_flags: RW and SYNC flags
857 * @bio: bio to allocate request for (can be %NULL)
858 * @gfp_mask: allocation mask
859 *
860 * Get a free request from @q. This function may fail under memory
861 * pressure or if @q is dead.
862 *
863 * Must be callled with @q->queue_lock held and,
864 * Returns %NULL on failure, with @q->queue_lock held.
865 * Returns !%NULL on success, with @q->queue_lock *not held*.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 */
Tejun Heo5b788ce2012-06-04 20:40:59 -0700867static struct request *__get_request(struct request_list *rl, int rw_flags,
Tejun Heoa06e05e2012-06-04 20:40:55 -0700868 struct bio *bio, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869{
Tejun Heo5b788ce2012-06-04 20:40:59 -0700870 struct request_queue *q = rl->q;
Tejun Heob6792812012-03-05 13:15:23 -0800871 struct request *rq;
Tejun Heo7f4b35d2012-06-04 20:40:56 -0700872 struct elevator_type *et = q->elevator->type;
873 struct io_context *ioc = rq_ioc(bio);
Tejun Heof1f8cc92011-12-14 00:33:42 +0100874 struct io_cq *icq = NULL;
Jens Axboe1faa16d2009-04-06 14:48:01 +0200875 const bool is_sync = rw_is_sync(rw_flags) != 0;
Tejun Heo75eb6c32011-10-19 14:31:22 +0200876 int may_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
Tejun Heo34f60552011-12-14 00:33:37 +0100878 if (unlikely(blk_queue_dead(q)))
Tejun Heoda8303c2011-10-19 14:33:05 +0200879 return NULL;
880
Jens Axboe7749a8d2006-12-13 13:02:26 +0100881 may_queue = elv_may_queue(q, rw_flags);
Jens Axboe88ee5ef2005-11-12 11:09:12 +0100882 if (may_queue == ELV_MQUEUE_NO)
883 goto rq_starved;
884
Jens Axboe1faa16d2009-04-06 14:48:01 +0200885 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
886 if (rl->count[is_sync]+1 >= q->nr_requests) {
Tejun Heof2dbd762011-12-14 00:33:40 +0100887 /*
Jens Axboe88ee5ef2005-11-12 11:09:12 +0100888 * The queue will fill after this allocation, so set
889 * it as full, and mark this process as "batching".
890 * This process will be allowed to complete a batch of
891 * requests, others will be blocked.
892 */
Tejun Heo5b788ce2012-06-04 20:40:59 -0700893 if (!blk_rl_full(rl, is_sync)) {
Jens Axboe88ee5ef2005-11-12 11:09:12 +0100894 ioc_set_batching(q, ioc);
Tejun Heo5b788ce2012-06-04 20:40:59 -0700895 blk_set_rl_full(rl, is_sync);
Jens Axboe88ee5ef2005-11-12 11:09:12 +0100896 } else {
897 if (may_queue != ELV_MQUEUE_MUST
898 && !ioc_batching(q, ioc)) {
899 /*
900 * The queue is full and the allocating
901 * process is not a "batcher", and not
902 * exempted by the IO scheduler
903 */
Tejun Heob6792812012-03-05 13:15:23 -0800904 return NULL;
Jens Axboe88ee5ef2005-11-12 11:09:12 +0100905 }
906 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
Tejun Heoa0516612012-06-26 15:05:44 -0700908 /*
909 * bdi isn't aware of blkcg yet. As all async IOs end up
910 * root blkcg anyway, just use root blkcg state.
911 */
912 if (rl == &q->root_rl)
913 blk_set_queue_congested(q, is_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 }
915
Jens Axboe082cf692005-06-28 16:35:11 +0200916 /*
917 * Only allow batching queuers to allocate up to 50% over the defined
918 * limit of requests, otherwise we could have thousands of requests
919 * allocated with any setting of ->nr_requests
920 */
Jens Axboe1faa16d2009-04-06 14:48:01 +0200921 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
Tejun Heob6792812012-03-05 13:15:23 -0800922 return NULL;
Hugh Dickinsfd782a42005-06-29 15:15:40 +0100923
Tejun Heo8a5ecdd2012-06-04 20:40:58 -0700924 q->nr_rqs[is_sync]++;
Jens Axboe1faa16d2009-04-06 14:48:01 +0200925 rl->count[is_sync]++;
926 rl->starved[is_sync] = 0;
Tejun Heocb98fc82005-10-28 08:29:39 +0200927
Tejun Heof1f8cc92011-12-14 00:33:42 +0100928 /*
929 * Decide whether the new request will be managed by elevator. If
930 * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will
931 * prevent the current elevator from being destroyed until the new
932 * request is freed. This guarantees icq's won't be destroyed and
933 * makes creating new ones safe.
934 *
935 * Also, lookup icq while holding queue_lock. If it doesn't exist,
936 * it will be created after releasing queue_lock.
937 */
Tejun Heod7325802012-03-05 13:14:58 -0800938 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
Tejun Heo75eb6c32011-10-19 14:31:22 +0200939 rw_flags |= REQ_ELVPRIV;
Tejun Heo8a5ecdd2012-06-04 20:40:58 -0700940 q->nr_rqs_elvpriv++;
Tejun Heof1f8cc92011-12-14 00:33:42 +0100941 if (et->icq_cache && ioc)
942 icq = ioc_lookup_icq(ioc, q);
Mike Snitzer9d5a4e92011-02-11 11:05:46 +0100943 }
Tejun Heocb98fc82005-10-28 08:29:39 +0200944
Jens Axboef253b862010-10-24 22:06:02 +0200945 if (blk_queue_io_stat(q))
946 rw_flags |= REQ_IO_STAT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 spin_unlock_irq(q->queue_lock);
948
Tejun Heo29e2b092012-04-19 16:29:21 -0700949 /* allocate and init request */
Tejun Heo5b788ce2012-06-04 20:40:59 -0700950 rq = mempool_alloc(rl->rq_pool, gfp_mask);
Tejun Heo29e2b092012-04-19 16:29:21 -0700951 if (!rq)
Tejun Heob6792812012-03-05 13:15:23 -0800952 goto fail_alloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
Tejun Heo29e2b092012-04-19 16:29:21 -0700954 blk_rq_init(q, rq);
Tejun Heoa0516612012-06-26 15:05:44 -0700955 blk_rq_set_rl(rq, rl);
Tejun Heo29e2b092012-04-19 16:29:21 -0700956 rq->cmd_flags = rw_flags | REQ_ALLOCED;
957
Tejun Heoaaf7c682012-04-19 16:29:22 -0700958 /* init elvpriv */
Tejun Heo29e2b092012-04-19 16:29:21 -0700959 if (rw_flags & REQ_ELVPRIV) {
Tejun Heoaaf7c682012-04-19 16:29:22 -0700960 if (unlikely(et->icq_cache && !icq)) {
Tejun Heo7f4b35d2012-06-04 20:40:56 -0700961 if (ioc)
962 icq = ioc_create_icq(ioc, q, gfp_mask);
Tejun Heoaaf7c682012-04-19 16:29:22 -0700963 if (!icq)
964 goto fail_elvpriv;
Tejun Heo29e2b092012-04-19 16:29:21 -0700965 }
Tejun Heoaaf7c682012-04-19 16:29:22 -0700966
967 rq->elv.icq = icq;
968 if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
969 goto fail_elvpriv;
970
971 /* @rq->elv.icq holds io_context until @rq is freed */
Tejun Heo29e2b092012-04-19 16:29:21 -0700972 if (icq)
973 get_io_context(icq->ioc);
974 }
Tejun Heoaaf7c682012-04-19 16:29:22 -0700975out:
Jens Axboe88ee5ef2005-11-12 11:09:12 +0100976 /*
977 * ioc may be NULL here, and ioc_batching will be false. That's
978 * OK, if the queue is under the request limit then requests need
979 * not count toward the nr_batch_requests limit. There will always
980 * be some limit enforced by BLK_BATCH_TIME.
981 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 if (ioc_batching(q, ioc))
983 ioc->nr_batch_requests--;
Jens Axboe6728cb02008-01-31 13:03:55 +0100984
Jens Axboe1faa16d2009-04-06 14:48:01 +0200985 trace_block_getrq(q, bio, rw_flags & 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 return rq;
Tejun Heob6792812012-03-05 13:15:23 -0800987
Tejun Heoaaf7c682012-04-19 16:29:22 -0700988fail_elvpriv:
989 /*
990 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
991 * and may fail indefinitely under memory pressure and thus
992 * shouldn't stall IO. Treat this request as !elvpriv. This will
993 * disturb iosched and blkcg but weird is bettern than dead.
994 */
995 printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
996 dev_name(q->backing_dev_info.dev));
997
998 rq->cmd_flags &= ~REQ_ELVPRIV;
999 rq->elv.icq = NULL;
1000
1001 spin_lock_irq(q->queue_lock);
Tejun Heo8a5ecdd2012-06-04 20:40:58 -07001002 q->nr_rqs_elvpriv--;
Tejun Heoaaf7c682012-04-19 16:29:22 -07001003 spin_unlock_irq(q->queue_lock);
1004 goto out;
1005
Tejun Heob6792812012-03-05 13:15:23 -08001006fail_alloc:
1007 /*
1008 * Allocation failed presumably due to memory. Undo anything we
1009 * might have messed up.
1010 *
1011 * Allocating task should really be put onto the front of the wait
1012 * queue, but this is pretty rare.
1013 */
1014 spin_lock_irq(q->queue_lock);
Tejun Heo5b788ce2012-06-04 20:40:59 -07001015 freed_request(rl, rw_flags);
Tejun Heob6792812012-03-05 13:15:23 -08001016
1017 /*
1018 * in the very unlikely event that allocation failed and no
1019 * requests for this direction was pending, mark us starved so that
1020 * freeing of a request in the other direction will notice
1021 * us. another possible fix would be to split the rq mempool into
1022 * READ and WRITE
1023 */
1024rq_starved:
1025 if (unlikely(rl->count[is_sync] == 0))
1026 rl->starved[is_sync] = 1;
1027 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028}
1029
Tejun Heoda8303c2011-10-19 14:33:05 +02001030/**
Tejun Heoa06e05e2012-06-04 20:40:55 -07001031 * get_request - get a free request
Tejun Heoda8303c2011-10-19 14:33:05 +02001032 * @q: request_queue to allocate request from
1033 * @rw_flags: RW and SYNC flags
1034 * @bio: bio to allocate request for (can be %NULL)
Tejun Heoa06e05e2012-06-04 20:40:55 -07001035 * @gfp_mask: allocation mask
Nick Piggind6344532005-06-28 20:45:14 -07001036 *
Tejun Heoa06e05e2012-06-04 20:40:55 -07001037 * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
1038 * function keeps retrying under memory pressure and fails iff @q is dead.
Tejun Heoda8303c2011-10-19 14:33:05 +02001039 *
1040 * Must be callled with @q->queue_lock held and,
1041 * Returns %NULL on failure, with @q->queue_lock held.
1042 * Returns !%NULL on success, with @q->queue_lock *not held*.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 */
Tejun Heoa06e05e2012-06-04 20:40:55 -07001044static struct request *get_request(struct request_queue *q, int rw_flags,
1045 struct bio *bio, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046{
Jens Axboe1faa16d2009-04-06 14:48:01 +02001047 const bool is_sync = rw_is_sync(rw_flags) != 0;
Tejun Heoa06e05e2012-06-04 20:40:55 -07001048 DEFINE_WAIT(wait);
Tejun Heoa0516612012-06-26 15:05:44 -07001049 struct request_list *rl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 struct request *rq;
Tejun Heoa0516612012-06-26 15:05:44 -07001051
1052 rl = blk_get_rl(q, bio); /* transferred to @rq on success */
Tejun Heoa06e05e2012-06-04 20:40:55 -07001053retry:
Tejun Heoa0516612012-06-26 15:05:44 -07001054 rq = __get_request(rl, rw_flags, bio, gfp_mask);
Tejun Heoa06e05e2012-06-04 20:40:55 -07001055 if (rq)
1056 return rq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
Tejun Heoa0516612012-06-26 15:05:44 -07001058 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) {
1059 blk_put_rl(rl);
Tejun Heoa06e05e2012-06-04 20:40:55 -07001060 return NULL;
Tejun Heoa0516612012-06-26 15:05:44 -07001061 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062
Tejun Heoa06e05e2012-06-04 20:40:55 -07001063 /* wait on @rl and retry */
1064 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1065 TASK_UNINTERRUPTIBLE);
Tejun Heoda8303c2011-10-19 14:33:05 +02001066
Tejun Heoa06e05e2012-06-04 20:40:55 -07001067 trace_block_sleeprq(q, bio, rw_flags & 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
Tejun Heoa06e05e2012-06-04 20:40:55 -07001069 spin_unlock_irq(q->queue_lock);
1070 io_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
Tejun Heoa06e05e2012-06-04 20:40:55 -07001072 /*
1073 * After sleeping, we become a "batching" process and will be able
1074 * to allocate at least one request, and up to a big batch of them
1075 * for a small period time. See ioc_batching, ioc_set_batching
1076 */
Tejun Heoa06e05e2012-06-04 20:40:55 -07001077 ioc_set_batching(q, current->io_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
Tejun Heoa06e05e2012-06-04 20:40:55 -07001079 spin_lock_irq(q->queue_lock);
1080 finish_wait(&rl->wait[is_sync], &wait);
Jens Axboe2056a782006-03-23 20:00:26 +01001081
Tejun Heoa06e05e2012-06-04 20:40:55 -07001082 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083}
1084
Jens Axboe165125e2007-07-24 09:28:11 +02001085struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086{
1087 struct request *rq;
1088
1089 BUG_ON(rw != READ && rw != WRITE);
1090
Tejun Heo7f4b35d2012-06-04 20:40:56 -07001091 /* create ioc upfront */
1092 create_io_context(gfp_mask, q->node);
1093
Nick Piggind6344532005-06-28 20:45:14 -07001094 spin_lock_irq(q->queue_lock);
Tejun Heoa06e05e2012-06-04 20:40:55 -07001095 rq = get_request(q, rw, NULL, gfp_mask);
Tejun Heoda8303c2011-10-19 14:33:05 +02001096 if (!rq)
1097 spin_unlock_irq(q->queue_lock);
Nick Piggind6344532005-06-28 20:45:14 -07001098 /* q->queue_lock is unlocked at this point */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
1100 return rq;
1101}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102EXPORT_SYMBOL(blk_get_request);
1103
1104/**
Boaz Harrosh79eb63e2009-05-17 18:57:15 +03001105 * blk_make_request - given a bio, allocate a corresponding struct request.
Randy Dunlap8ebf9752009-06-11 20:00:41 -07001106 * @q: target request queue
Boaz Harrosh79eb63e2009-05-17 18:57:15 +03001107 * @bio: The bio describing the memory mappings that will be submitted for IO.
1108 * It may be a chained-bio properly constructed by block/bio layer.
Randy Dunlap8ebf9752009-06-11 20:00:41 -07001109 * @gfp_mask: gfp flags to be used for memory allocation
Jens Axboedc72ef42006-07-20 14:54:05 +02001110 *
Boaz Harrosh79eb63e2009-05-17 18:57:15 +03001111 * blk_make_request is the parallel of generic_make_request for BLOCK_PC
1112 * type commands. Where the struct request needs to be farther initialized by
1113 * the caller. It is passed a &struct bio, which describes the memory info of
1114 * the I/O transfer.
1115 *
1116 * The caller of blk_make_request must make sure that bi_io_vec
1117 * are set to describe the memory buffers. That bio_data_dir() will return
1118 * the needed direction of the request. (And all bio's in the passed bio-chain
1119 * are properly set accordingly)
1120 *
1121 * If called under none-sleepable conditions, mapped bio buffers must not
1122 * need bouncing, by calling the appropriate masked or flagged allocator,
1123 * suitable for the target device. Otherwise the call to blk_queue_bounce will
1124 * BUG.
Jens Axboe53674ac2009-05-19 19:52:35 +02001125 *
1126 * WARNING: When allocating/cloning a bio-chain, careful consideration should be
1127 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
1128 * anything but the first bio in the chain. Otherwise you risk waiting for IO
1129 * completion of a bio that hasn't been submitted yet, thus resulting in a
1130 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
1131 * of bio_alloc(), as that avoids the mempool deadlock.
1132 * If possible a big IO should be split into smaller parts when allocation
1133 * fails. Partial allocation should not be an error, or you risk a live-lock.
Jens Axboedc72ef42006-07-20 14:54:05 +02001134 */
Boaz Harrosh79eb63e2009-05-17 18:57:15 +03001135struct request *blk_make_request(struct request_queue *q, struct bio *bio,
1136 gfp_t gfp_mask)
Jens Axboedc72ef42006-07-20 14:54:05 +02001137{
Boaz Harrosh79eb63e2009-05-17 18:57:15 +03001138 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
1139
1140 if (unlikely(!rq))
1141 return ERR_PTR(-ENOMEM);
1142
1143 for_each_bio(bio) {
1144 struct bio *bounce_bio = bio;
1145 int ret;
1146
1147 blk_queue_bounce(q, &bounce_bio);
1148 ret = blk_rq_append_bio(q, rq, bounce_bio);
1149 if (unlikely(ret)) {
1150 blk_put_request(rq);
1151 return ERR_PTR(ret);
1152 }
1153 }
1154
1155 return rq;
Jens Axboedc72ef42006-07-20 14:54:05 +02001156}
Boaz Harrosh79eb63e2009-05-17 18:57:15 +03001157EXPORT_SYMBOL(blk_make_request);
Jens Axboedc72ef42006-07-20 14:54:05 +02001158
1159/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 * blk_requeue_request - put a request back on queue
1161 * @q: request queue where request should be inserted
1162 * @rq: request to be inserted
1163 *
1164 * Description:
1165 * Drivers often keep queueing requests until the hardware cannot accept
1166 * more, when that condition happens we need to put the request back
1167 * on the queue. Must be called with queue lock held.
1168 */
Jens Axboe165125e2007-07-24 09:28:11 +02001169void blk_requeue_request(struct request_queue *q, struct request *rq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170{
Jens Axboe242f9dc2008-09-14 05:55:09 -07001171 blk_delete_timer(rq);
1172 blk_clear_rq_complete(rq);
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +01001173 trace_block_rq_requeue(q, rq);
Jens Axboe2056a782006-03-23 20:00:26 +01001174
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 if (blk_rq_tagged(rq))
1176 blk_queue_end_tag(q, rq);
1177
James Bottomleyba396a62009-05-27 14:17:08 +02001178 BUG_ON(blk_queued_rq(rq));
1179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 elv_requeue_request(q, rq);
1181}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182EXPORT_SYMBOL(blk_requeue_request);
1183
Jens Axboe73c10102011-03-08 13:19:51 +01001184static void add_acct_request(struct request_queue *q, struct request *rq,
1185 int where)
1186{
1187 drive_stat_acct(rq, 1);
Jens Axboe7eaceac2011-03-10 08:52:07 +01001188 __elv_add_request(q, rq, where);
Jens Axboe73c10102011-03-08 13:19:51 +01001189}
1190
Tejun Heo074a7ac2008-08-25 19:56:14 +09001191static void part_round_stats_single(int cpu, struct hd_struct *part,
1192 unsigned long now)
1193{
1194 if (now == part->stamp)
1195 return;
1196
Nikanth Karthikesan316d3152009-10-06 20:16:55 +02001197 if (part_in_flight(part)) {
Tejun Heo074a7ac2008-08-25 19:56:14 +09001198 __part_stat_add(cpu, part, time_in_queue,
Nikanth Karthikesan316d3152009-10-06 20:16:55 +02001199 part_in_flight(part) * (now - part->stamp));
Tejun Heo074a7ac2008-08-25 19:56:14 +09001200 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1201 }
1202 part->stamp = now;
1203}
1204
1205/**
Randy Dunlap496aa8a2008-10-16 07:46:23 +02001206 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1207 * @cpu: cpu number for stats access
1208 * @part: target partition
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 *
1210 * The average IO queue length and utilisation statistics are maintained
1211 * by observing the current state of the queue length and the amount of
1212 * time it has been in this state for.
1213 *
1214 * Normally, that accounting is done on IO completion, but that can result
1215 * in more than a second's worth of IO being accounted for within any one
1216 * second, leading to >100% utilisation. To deal with that, we call this
1217 * function to do a round-off before returning the results when reading
1218 * /proc/diskstats. This accounts immediately for all queue usage up to
1219 * the current jiffies and restarts the counters again.
1220 */
Tejun Heoc9959052008-08-25 19:47:21 +09001221void part_round_stats(int cpu, struct hd_struct *part)
Jerome Marchand6f2576a2008-02-08 11:04:35 +01001222{
1223 unsigned long now = jiffies;
1224
Tejun Heo074a7ac2008-08-25 19:56:14 +09001225 if (part->partno)
1226 part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1227 part_round_stats_single(cpu, part, now);
Jerome Marchand6f2576a2008-02-08 11:04:35 +01001228}
Tejun Heo074a7ac2008-08-25 19:56:14 +09001229EXPORT_SYMBOL_GPL(part_round_stats);
Jerome Marchand6f2576a2008-02-08 11:04:35 +01001230
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231/*
1232 * queue lock must be held
1233 */
Jens Axboe165125e2007-07-24 09:28:11 +02001234void __blk_put_request(struct request_queue *q, struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 if (unlikely(!q))
1237 return;
1238 if (unlikely(--req->ref_count))
1239 return;
1240
Tejun Heo8922e162005-10-20 16:23:44 +02001241 elv_completed_request(q, req);
1242
Boaz Harrosh1cd96c22009-03-24 12:35:07 +01001243 /* this is a bio leak */
1244 WARN_ON(req->bio != NULL);
1245
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 /*
1247 * Request may not have originated from ll_rw_blk. if not,
1248 * it didn't come out of our reserved rq pools
1249 */
Jens Axboe49171e52006-08-10 08:59:11 +02001250 if (req->cmd_flags & REQ_ALLOCED) {
Tejun Heo75eb6c32011-10-19 14:31:22 +02001251 unsigned int flags = req->cmd_flags;
Tejun Heoa0516612012-06-26 15:05:44 -07001252 struct request_list *rl = blk_rq_rl(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 BUG_ON(!list_empty(&req->queuelist));
Jens Axboe98170642006-07-28 09:23:08 +02001255 BUG_ON(!hlist_unhashed(&req->hash));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
Tejun Heoa0516612012-06-26 15:05:44 -07001257 blk_free_request(rl, req);
1258 freed_request(rl, flags);
1259 blk_put_rl(rl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 }
1261}
Mike Christie6e39b69e2005-11-11 05:30:24 -06001262EXPORT_SYMBOL_GPL(__blk_put_request);
1263
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264void blk_put_request(struct request *req)
1265{
Tejun Heo8922e162005-10-20 16:23:44 +02001266 unsigned long flags;
Jens Axboe165125e2007-07-24 09:28:11 +02001267 struct request_queue *q = req->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
FUJITA Tomonori52a93ba2008-07-15 21:21:45 +02001269 spin_lock_irqsave(q->queue_lock, flags);
1270 __blk_put_request(q, req);
1271 spin_unlock_irqrestore(q->queue_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273EXPORT_SYMBOL(blk_put_request);
1274
Christoph Hellwig66ac0282010-06-18 16:59:42 +02001275/**
1276 * blk_add_request_payload - add a payload to a request
1277 * @rq: request to update
1278 * @page: page backing the payload
1279 * @len: length of the payload.
1280 *
1281 * This allows to later add a payload to an already submitted request by
1282 * a block driver. The driver needs to take care of freeing the payload
1283 * itself.
1284 *
1285 * Note that this is a quite horrible hack and nothing but handling of
1286 * discard requests should ever use it.
1287 */
1288void blk_add_request_payload(struct request *rq, struct page *page,
1289 unsigned int len)
1290{
1291 struct bio *bio = rq->bio;
1292
1293 bio->bi_io_vec->bv_page = page;
1294 bio->bi_io_vec->bv_offset = 0;
1295 bio->bi_io_vec->bv_len = len;
1296
1297 bio->bi_size = len;
1298 bio->bi_vcnt = 1;
1299 bio->bi_phys_segments = 1;
1300
1301 rq->__data_len = rq->resid_len = len;
1302 rq->nr_phys_segments = 1;
1303 rq->buffer = bio_data(bio);
1304}
1305EXPORT_SYMBOL_GPL(blk_add_request_payload);
1306
Jens Axboe73c10102011-03-08 13:19:51 +01001307static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1308 struct bio *bio)
1309{
1310 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1311
Jens Axboe73c10102011-03-08 13:19:51 +01001312 if (!ll_back_merge_fn(q, req, bio))
1313 return false;
1314
1315 trace_block_bio_backmerge(q, bio);
1316
1317 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1318 blk_rq_set_mixed_merge(req);
1319
1320 req->biotail->bi_next = bio;
1321 req->biotail = bio;
1322 req->__data_len += bio->bi_size;
1323 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1324
1325 drive_stat_acct(req, 0);
1326 return true;
1327}
1328
1329static bool bio_attempt_front_merge(struct request_queue *q,
1330 struct request *req, struct bio *bio)
1331{
1332 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
Jens Axboe73c10102011-03-08 13:19:51 +01001333
Jens Axboe73c10102011-03-08 13:19:51 +01001334 if (!ll_front_merge_fn(q, req, bio))
1335 return false;
1336
1337 trace_block_bio_frontmerge(q, bio);
1338
1339 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1340 blk_rq_set_mixed_merge(req);
1341
Jens Axboe73c10102011-03-08 13:19:51 +01001342 bio->bi_next = req->bio;
1343 req->bio = bio;
1344
1345 /*
1346 * may not be valid. if the low level driver said
1347 * it didn't need a bounce buffer then it better
1348 * not touch req->buffer either...
1349 */
1350 req->buffer = bio_data(bio);
1351 req->__sector = bio->bi_sector;
1352 req->__data_len += bio->bi_size;
1353 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1354
1355 drive_stat_acct(req, 0);
1356 return true;
1357}
1358
Tejun Heobd87b582011-10-19 14:33:08 +02001359/**
1360 * attempt_plug_merge - try to merge with %current's plugged list
1361 * @q: request_queue new bio is being queued at
1362 * @bio: new bio being queued
1363 * @request_count: out parameter for number of traversed plugged requests
1364 *
1365 * Determine whether @bio being queued on @q can be merged with a request
1366 * on %current's plugged list. Returns %true if merge was successful,
1367 * otherwise %false.
1368 *
Tejun Heo07c2bd32012-02-08 09:19:42 +01001369 * Plugging coalesces IOs from the same issuer for the same purpose without
1370 * going through @q->queue_lock. As such it's more of an issuing mechanism
1371 * than scheduling, and the request, while may have elvpriv data, is not
1372 * added on the elevator at this point. In addition, we don't have
1373 * reliable access to the elevator outside queue lock. Only check basic
1374 * merging parameters without querying the elevator.
Jens Axboe73c10102011-03-08 13:19:51 +01001375 */
Tejun Heobd87b582011-10-19 14:33:08 +02001376static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1377 unsigned int *request_count)
Jens Axboe73c10102011-03-08 13:19:51 +01001378{
1379 struct blk_plug *plug;
1380 struct request *rq;
1381 bool ret = false;
1382
Tejun Heobd87b582011-10-19 14:33:08 +02001383 plug = current->plug;
Jens Axboe73c10102011-03-08 13:19:51 +01001384 if (!plug)
1385 goto out;
Shaohua Li56ebdaf2011-08-24 16:04:34 +02001386 *request_count = 0;
Jens Axboe73c10102011-03-08 13:19:51 +01001387
1388 list_for_each_entry_reverse(rq, &plug->list, queuelist) {
1389 int el_ret;
1390
Shaohua Li1b2e19f2012-04-06 11:37:47 -06001391 if (rq->q == q)
1392 (*request_count)++;
Shaohua Li56ebdaf2011-08-24 16:04:34 +02001393
Tejun Heo07c2bd32012-02-08 09:19:42 +01001394 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
Jens Axboe73c10102011-03-08 13:19:51 +01001395 continue;
1396
Tejun Heo050c8ea2012-02-08 09:19:38 +01001397 el_ret = blk_try_merge(rq, bio);
Jens Axboe73c10102011-03-08 13:19:51 +01001398 if (el_ret == ELEVATOR_BACK_MERGE) {
1399 ret = bio_attempt_back_merge(q, rq, bio);
1400 if (ret)
1401 break;
1402 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1403 ret = bio_attempt_front_merge(q, rq, bio);
1404 if (ret)
1405 break;
1406 }
1407 }
1408out:
1409 return ret;
1410}
1411
Jens Axboe86db1e22008-01-29 14:53:40 +01001412void init_request_from_bio(struct request *req, struct bio *bio)
Tejun Heo52d9e672006-01-06 09:49:58 +01001413{
Jens Axboe4aff5e22006-08-10 08:44:47 +02001414 req->cmd_type = REQ_TYPE_FS;
Tejun Heo52d9e672006-01-06 09:49:58 +01001415
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001416 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1417 if (bio->bi_rw & REQ_RAHEAD)
Tejun Heoa82afdf2009-07-03 17:48:16 +09001418 req->cmd_flags |= REQ_FAILFAST_MASK;
Jens Axboeb31dc662006-06-13 08:26:10 +02001419
Tejun Heo52d9e672006-01-06 09:49:58 +01001420 req->errors = 0;
Tejun Heoa2dec7b2009-05-07 22:24:44 +09001421 req->__sector = bio->bi_sector;
Tejun Heo52d9e672006-01-06 09:49:58 +01001422 req->ioprio = bio_prio(bio);
NeilBrownbc1c56f2007-08-16 13:31:30 +02001423 blk_rq_bio_prep(req->q, req, bio);
Tejun Heo52d9e672006-01-06 09:49:58 +01001424}
1425
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001426void blk_queue_bio(struct request_queue *q, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427{
Jiri Slaby5e00d1b2010-08-12 14:31:06 +02001428 const bool sync = !!(bio->bi_rw & REQ_SYNC);
Jens Axboe73c10102011-03-08 13:19:51 +01001429 struct blk_plug *plug;
1430 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
1431 struct request *req;
Shaohua Li56ebdaf2011-08-24 16:04:34 +02001432 unsigned int request_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 /*
1435 * low level driver can indicate that it wants pages above a
1436 * certain limit bounced to low memory (ie for highmem, or even
1437 * ISA dma in theory)
1438 */
1439 blk_queue_bounce(q, &bio);
1440
Tejun Heo4fed9472010-09-03 11:56:17 +02001441 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
Jens Axboe73c10102011-03-08 13:19:51 +01001442 spin_lock_irq(q->queue_lock);
Tejun Heoae1b1532011-01-25 12:43:54 +01001443 where = ELEVATOR_INSERT_FLUSH;
Tejun Heo28e7d182010-09-03 11:56:16 +02001444 goto get_rq;
1445 }
1446
Jens Axboe73c10102011-03-08 13:19:51 +01001447 /*
1448 * Check if we can merge with the plugged list before grabbing
1449 * any locks.
1450 */
Tejun Heobd87b582011-10-19 14:33:08 +02001451 if (attempt_plug_merge(q, bio, &request_count))
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001452 return;
Jens Axboe73c10102011-03-08 13:19:51 +01001453
1454 spin_lock_irq(q->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
1456 el_ret = elv_merge(q, &req, bio);
Jens Axboe73c10102011-03-08 13:19:51 +01001457 if (el_ret == ELEVATOR_BACK_MERGE) {
Jens Axboe73c10102011-03-08 13:19:51 +01001458 if (bio_attempt_back_merge(q, req, bio)) {
Tejun Heo07c2bd32012-02-08 09:19:42 +01001459 elv_bio_merged(q, req, bio);
Jens Axboe73c10102011-03-08 13:19:51 +01001460 if (!attempt_back_merge(q, req))
1461 elv_merged_request(q, req, el_ret);
1462 goto out_unlock;
Tejun Heo80a761f2009-07-03 17:48:17 +09001463 }
Jens Axboe73c10102011-03-08 13:19:51 +01001464 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
Jens Axboe73c10102011-03-08 13:19:51 +01001465 if (bio_attempt_front_merge(q, req, bio)) {
Tejun Heo07c2bd32012-02-08 09:19:42 +01001466 elv_bio_merged(q, req, bio);
Jens Axboe73c10102011-03-08 13:19:51 +01001467 if (!attempt_front_merge(q, req))
1468 elv_merged_request(q, req, el_ret);
1469 goto out_unlock;
1470 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 }
1472
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473get_rq:
Nick Piggin450991b2005-06-28 20:45:13 -07001474 /*
Jens Axboe7749a8d2006-12-13 13:02:26 +01001475 * This sync check and mask will be re-done in init_request_from_bio(),
1476 * but we need to set it earlier to expose the sync flag to the
1477 * rq allocator and io schedulers.
1478 */
1479 rw_flags = bio_data_dir(bio);
1480 if (sync)
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001481 rw_flags |= REQ_SYNC;
Jens Axboe7749a8d2006-12-13 13:02:26 +01001482
1483 /*
Nick Piggin450991b2005-06-28 20:45:13 -07001484 * Grab a free request. This is might sleep but can not fail.
Nick Piggind6344532005-06-28 20:45:14 -07001485 * Returns with the queue unlocked.
Nick Piggin450991b2005-06-28 20:45:13 -07001486 */
Tejun Heoa06e05e2012-06-04 20:40:55 -07001487 req = get_request(q, rw_flags, bio, GFP_NOIO);
Tejun Heoda8303c2011-10-19 14:33:05 +02001488 if (unlikely(!req)) {
1489 bio_endio(bio, -ENODEV); /* @q is dead */
1490 goto out_unlock;
1491 }
Nick Piggind6344532005-06-28 20:45:14 -07001492
Nick Piggin450991b2005-06-28 20:45:13 -07001493 /*
1494 * After dropping the lock and possibly sleeping here, our request
1495 * may now be mergeable after it had proven unmergeable (above).
1496 * We don't worry about that case for efficiency. It won't happen
1497 * often, and the elevators are able to handle it.
1498 */
Tejun Heo52d9e672006-01-06 09:49:58 +01001499 init_request_from_bio(req, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
Tao Ma9562ad92011-10-24 16:11:30 +02001501 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
Jens Axboe11ccf112011-07-26 15:01:15 +02001502 req->cpu = raw_smp_processor_id();
Tejun Heodd831002010-09-03 11:56:16 +02001503
Jens Axboe73c10102011-03-08 13:19:51 +01001504 plug = current->plug;
Jens Axboe721a9602011-03-09 11:56:30 +01001505 if (plug) {
Jens Axboedc6d36c2011-04-12 10:28:28 +02001506 /*
1507 * If this is the first request added after a plug, fire
1508 * of a plug trace. If others have been added before, check
1509 * if we have multiple devices in this plug. If so, make a
1510 * note to sort the list before dispatch.
1511 */
1512 if (list_empty(&plug->list))
1513 trace_block_plug(q);
Shaohua Li3540d5e2011-11-16 09:21:50 +01001514 else {
1515 if (!plug->should_sort) {
1516 struct request *__rq;
Jens Axboe73c10102011-03-08 13:19:51 +01001517
Shaohua Li3540d5e2011-11-16 09:21:50 +01001518 __rq = list_entry_rq(plug->list.prev);
1519 if (__rq->q != q)
1520 plug->should_sort = 1;
1521 }
Shaohua Li019ceb72011-11-16 09:21:50 +01001522 if (request_count >= BLK_MAX_REQUEST_COUNT) {
Shaohua Li3540d5e2011-11-16 09:21:50 +01001523 blk_flush_plug_list(plug, false);
Shaohua Li019ceb72011-11-16 09:21:50 +01001524 trace_block_plug(q);
1525 }
Jens Axboe73c10102011-03-08 13:19:51 +01001526 }
Shaohua Lia6327162011-08-24 16:04:32 +02001527 list_add_tail(&req->queuelist, &plug->list);
1528 drive_stat_acct(req, 1);
Jens Axboe73c10102011-03-08 13:19:51 +01001529 } else {
1530 spin_lock_irq(q->queue_lock);
1531 add_acct_request(q, req, where);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02001532 __blk_run_queue(q);
Jens Axboe73c10102011-03-08 13:19:51 +01001533out_unlock:
1534 spin_unlock_irq(q->queue_lock);
1535 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536}
Jens Axboec20e8de2011-09-12 12:03:37 +02001537EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538
1539/*
1540 * If bio->bi_dev is a partition, remap the location
1541 */
1542static inline void blk_partition_remap(struct bio *bio)
1543{
1544 struct block_device *bdev = bio->bi_bdev;
1545
Jens Axboebf2de6f2007-09-27 13:01:25 +02001546 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 struct hd_struct *p = bdev->bd_part;
Jens Axboea3623572005-11-01 09:26:16 +01001548
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 bio->bi_sector += p->start_sect;
1550 bio->bi_bdev = bdev->bd_contains;
Alan D. Brunellec7149d62007-08-07 15:30:23 +02001551
Mike Snitzerd07335e2010-11-16 12:52:38 +01001552 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1553 bdev->bd_dev,
1554 bio->bi_sector - p->start_sect);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 }
1556}
1557
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558static void handle_bad_sector(struct bio *bio)
1559{
1560 char b[BDEVNAME_SIZE];
1561
1562 printk(KERN_INFO "attempt to access beyond end of device\n");
1563 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1564 bdevname(bio->bi_bdev, b),
1565 bio->bi_rw,
1566 (unsigned long long)bio->bi_sector + bio_sectors(bio),
Mike Snitzer77304d22010-11-08 14:39:12 +01001567 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
1569 set_bit(BIO_EOF, &bio->bi_flags);
1570}
1571
Akinobu Mitac17bb492006-12-08 02:39:46 -08001572#ifdef CONFIG_FAIL_MAKE_REQUEST
1573
1574static DECLARE_FAULT_ATTR(fail_make_request);
1575
1576static int __init setup_fail_make_request(char *str)
1577{
1578 return setup_fault_attr(&fail_make_request, str);
1579}
1580__setup("fail_make_request=", setup_fail_make_request);
1581
Akinobu Mitab2c9cd32011-07-26 16:09:03 -07001582static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
Akinobu Mitac17bb492006-12-08 02:39:46 -08001583{
Akinobu Mitab2c9cd32011-07-26 16:09:03 -07001584 return part->make_it_fail && should_fail(&fail_make_request, bytes);
Akinobu Mitac17bb492006-12-08 02:39:46 -08001585}
1586
1587static int __init fail_make_request_debugfs(void)
1588{
Akinobu Mitadd48c082011-08-03 16:21:01 -07001589 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1590 NULL, &fail_make_request);
1591
1592 return IS_ERR(dir) ? PTR_ERR(dir) : 0;
Akinobu Mitac17bb492006-12-08 02:39:46 -08001593}
1594
1595late_initcall(fail_make_request_debugfs);
1596
1597#else /* CONFIG_FAIL_MAKE_REQUEST */
1598
Akinobu Mitab2c9cd32011-07-26 16:09:03 -07001599static inline bool should_fail_request(struct hd_struct *part,
1600 unsigned int bytes)
Akinobu Mitac17bb492006-12-08 02:39:46 -08001601{
Akinobu Mitab2c9cd32011-07-26 16:09:03 -07001602 return false;
Akinobu Mitac17bb492006-12-08 02:39:46 -08001603}
1604
1605#endif /* CONFIG_FAIL_MAKE_REQUEST */
1606
Jens Axboec07e2b42007-07-18 13:27:58 +02001607/*
1608 * Check whether this bio extends beyond the end of the device.
1609 */
1610static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1611{
1612 sector_t maxsector;
1613
1614 if (!nr_sectors)
1615 return 0;
1616
1617 /* Test device or partition size, when known. */
Mike Snitzer77304d22010-11-08 14:39:12 +01001618 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
Jens Axboec07e2b42007-07-18 13:27:58 +02001619 if (maxsector) {
1620 sector_t sector = bio->bi_sector;
1621
1622 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1623 /*
1624 * This may well happen - the kernel calls bread()
1625 * without checking the size of the device, e.g., when
1626 * mounting a device.
1627 */
1628 handle_bad_sector(bio);
1629 return 1;
1630 }
1631 }
1632
1633 return 0;
1634}
1635
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001636static noinline_for_stack bool
1637generic_make_request_checks(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638{
Jens Axboe165125e2007-07-24 09:28:11 +02001639 struct request_queue *q;
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001640 int nr_sectors = bio_sectors(bio);
Jens Axboe51fd77b2007-11-02 08:49:08 +01001641 int err = -EIO;
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001642 char b[BDEVNAME_SIZE];
1643 struct hd_struct *part;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
1645 might_sleep();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
Jens Axboec07e2b42007-07-18 13:27:58 +02001647 if (bio_check_eod(bio, nr_sectors))
1648 goto end_io;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001650 q = bdev_get_queue(bio->bi_bdev);
1651 if (unlikely(!q)) {
1652 printk(KERN_ERR
1653 "generic_make_request: Trying to access "
1654 "nonexistent block-device %s (%Lu)\n",
1655 bdevname(bio->bi_bdev, b),
1656 (long long) bio->bi_sector);
1657 goto end_io;
1658 }
1659
1660 if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
1661 nr_sectors > queue_max_hw_sectors(q))) {
1662 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1663 bdevname(bio->bi_bdev, b),
1664 bio_sectors(bio),
1665 queue_max_hw_sectors(q));
1666 goto end_io;
1667 }
1668
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001669 part = bio->bi_bdev->bd_part;
1670 if (should_fail_request(part, bio->bi_size) ||
1671 should_fail_request(&part_to_disk(part)->part0,
1672 bio->bi_size))
1673 goto end_io;
1674
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 /*
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001676 * If this device has partitions, remap block n
1677 * of partition p to block n+start(p) of the disk.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 */
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001679 blk_partition_remap(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001681 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1682 goto end_io;
1683
1684 if (bio_check_eod(bio, nr_sectors))
1685 goto end_io;
1686
1687 /*
1688 * Filter flush bio's early so that make_request based
1689 * drivers without flush support don't have to worry
1690 * about them.
1691 */
1692 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1693 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1694 if (!nr_sectors) {
1695 err = 0;
Tejun Heoa7384672008-11-28 13:32:03 +09001696 goto end_io;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 }
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001698 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001700 if ((bio->bi_rw & REQ_DISCARD) &&
1701 (!blk_queue_discard(q) ||
1702 ((bio->bi_rw & REQ_SECURE) &&
1703 !blk_queue_secdiscard(q)))) {
1704 err = -EOPNOTSUPP;
1705 goto end_io;
1706 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
Tejun Heo7f4b35d2012-06-04 20:40:56 -07001708 /*
1709 * Various block parts want %current->io_context and lazy ioc
1710 * allocation ends up trading a lot of pain for a small amount of
1711 * memory. Just allocate it upfront. This may fail and block
1712 * layer knows how to live with it.
1713 */
1714 create_io_context(GFP_ATOMIC, q->node);
1715
Tejun Heobc16a4f2011-10-19 14:33:01 +02001716 if (blk_throtl_bio(q, bio))
1717 return false; /* throttled, will be resubmitted later */
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001718
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001719 trace_block_bio_queue(q, bio);
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001720 return true;
Tejun Heoa7384672008-11-28 13:32:03 +09001721
1722end_io:
1723 bio_endio(bio, err);
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001724 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725}
1726
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001727/**
1728 * generic_make_request - hand a buffer to its device driver for I/O
1729 * @bio: The bio describing the location in memory and on the device.
1730 *
1731 * generic_make_request() is used to make I/O requests of block
1732 * devices. It is passed a &struct bio, which describes the I/O that needs
1733 * to be done.
1734 *
1735 * generic_make_request() does not return any status. The
1736 * success/failure status of the request, along with notification of
1737 * completion, is delivered asynchronously through the bio->bi_end_io
1738 * function described (one day) else where.
1739 *
1740 * The caller of generic_make_request must make sure that bi_io_vec
1741 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1742 * set to describe the device address, and the
1743 * bi_end_io and optionally bi_private are set to describe how
1744 * completion notification should be signaled.
1745 *
1746 * generic_make_request and the drivers it calls may use bi_next if this
1747 * bio happens to be merged with someone else, and may resubmit the bio to
1748 * a lower device by calling into generic_make_request recursively, which
1749 * means the bio should NOT be touched after the call to ->make_request_fn.
Neil Brownd89d8792007-05-01 09:53:42 +02001750 */
1751void generic_make_request(struct bio *bio)
1752{
Akinobu Mitabddd87c2010-02-23 08:55:42 +01001753 struct bio_list bio_list_on_stack;
1754
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001755 if (!generic_make_request_checks(bio))
1756 return;
1757
1758 /*
1759 * We only want one ->make_request_fn to be active at a time, else
1760 * stack usage with stacked devices could be a problem. So use
1761 * current->bio_list to keep a list of requests submited by a
1762 * make_request_fn function. current->bio_list is also used as a
1763 * flag to say if generic_make_request is currently active in this
1764 * task or not. If it is NULL, then no make_request is active. If
1765 * it is non-NULL, then a make_request is active, and new requests
1766 * should be added at the tail
1767 */
Akinobu Mitabddd87c2010-02-23 08:55:42 +01001768 if (current->bio_list) {
Akinobu Mitabddd87c2010-02-23 08:55:42 +01001769 bio_list_add(current->bio_list, bio);
Neil Brownd89d8792007-05-01 09:53:42 +02001770 return;
1771 }
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001772
Neil Brownd89d8792007-05-01 09:53:42 +02001773 /* following loop may be a bit non-obvious, and so deserves some
1774 * explanation.
1775 * Before entering the loop, bio->bi_next is NULL (as all callers
1776 * ensure that) so we have a list with a single bio.
1777 * We pretend that we have just taken it off a longer list, so
Akinobu Mitabddd87c2010-02-23 08:55:42 +01001778 * we assign bio_list to a pointer to the bio_list_on_stack,
1779 * thus initialising the bio_list of new bios to be
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001780 * added. ->make_request() may indeed add some more bios
Neil Brownd89d8792007-05-01 09:53:42 +02001781 * through a recursive call to generic_make_request. If it
1782 * did, we find a non-NULL value in bio_list and re-enter the loop
1783 * from the top. In this case we really did just take the bio
Akinobu Mitabddd87c2010-02-23 08:55:42 +01001784 * of the top of the list (no pretending) and so remove it from
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001785 * bio_list, and call into ->make_request() again.
Neil Brownd89d8792007-05-01 09:53:42 +02001786 */
1787 BUG_ON(bio->bi_next);
Akinobu Mitabddd87c2010-02-23 08:55:42 +01001788 bio_list_init(&bio_list_on_stack);
1789 current->bio_list = &bio_list_on_stack;
Neil Brownd89d8792007-05-01 09:53:42 +02001790 do {
Christoph Hellwig27a84d52011-09-15 14:01:40 +02001791 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1792
1793 q->make_request_fn(q, bio);
1794
Akinobu Mitabddd87c2010-02-23 08:55:42 +01001795 bio = bio_list_pop(current->bio_list);
Neil Brownd89d8792007-05-01 09:53:42 +02001796 } while (bio);
Akinobu Mitabddd87c2010-02-23 08:55:42 +01001797 current->bio_list = NULL; /* deactivate */
Neil Brownd89d8792007-05-01 09:53:42 +02001798}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799EXPORT_SYMBOL(generic_make_request);
1800
1801/**
Randy Dunlap710027a2008-08-19 20:13:11 +02001802 * submit_bio - submit a bio to the block device layer for I/O
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1804 * @bio: The &struct bio which describes the I/O
1805 *
1806 * submit_bio() is very similar in purpose to generic_make_request(), and
1807 * uses that function to do most of the work. Both are fairly rough
Randy Dunlap710027a2008-08-19 20:13:11 +02001808 * interfaces; @bio must be presetup and ready for I/O.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 *
1810 */
1811void submit_bio(int rw, struct bio *bio)
1812{
1813 int count = bio_sectors(bio);
1814
Jens Axboe22e2c502005-06-27 10:55:12 +02001815 bio->bi_rw |= rw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Jens Axboebf2de6f2007-09-27 13:01:25 +02001817 /*
1818 * If it's a regular read/write or a barrier with data attached,
1819 * go through the normal accounting stuff before submission.
1820 */
Jens Axboe3ffb52e2010-06-29 13:33:38 +02001821 if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
Jens Axboebf2de6f2007-09-27 13:01:25 +02001822 if (rw & WRITE) {
1823 count_vm_events(PGPGOUT, count);
1824 } else {
1825 task_io_account_read(bio->bi_size);
1826 count_vm_events(PGPGIN, count);
1827 }
1828
1829 if (unlikely(block_dump)) {
1830 char b[BDEVNAME_SIZE];
San Mehat8dcbdc72010-09-14 08:48:01 +02001831 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001832 current->comm, task_pid_nr(current),
Jens Axboebf2de6f2007-09-27 13:01:25 +02001833 (rw & WRITE) ? "WRITE" : "READ",
1834 (unsigned long long)bio->bi_sector,
San Mehat8dcbdc72010-09-14 08:48:01 +02001835 bdevname(bio->bi_bdev, b),
1836 count);
Jens Axboebf2de6f2007-09-27 13:01:25 +02001837 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 }
1839
1840 generic_make_request(bio);
1841}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842EXPORT_SYMBOL(submit_bio);
1843
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -05001844/**
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001845 * blk_rq_check_limits - Helper function to check a request for the queue limit
1846 * @q: the queue
1847 * @rq: the request being checked
1848 *
1849 * Description:
1850 * @rq may have been made based on weaker limitations of upper-level queues
1851 * in request stacking drivers, and it may violate the limitation of @q.
1852 * Since the block layer and the underlying device driver trust @rq
1853 * after it is inserted to @q, it should be checked against @q before
1854 * the insertion using this generic function.
1855 *
1856 * This function should also be useful for request stacking drivers
Stefan Weileef35c22010-08-06 21:11:15 +02001857 * in some cases below, so export this function.
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001858 * Request stacking drivers like request-based dm may change the queue
1859 * limits while requests are in the queue (e.g. dm's table swapping).
1860 * Such request stacking drivers should check those requests agaist
1861 * the new queue limits again when they dispatch those requests,
1862 * although such checkings are also done against the old queue limits
1863 * when submitting requests.
1864 */
1865int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1866{
ike Snitzer33839772010-08-08 12:11:33 -04001867 if (rq->cmd_flags & REQ_DISCARD)
1868 return 0;
1869
Martin K. Petersenae03bf62009-05-22 17:17:50 -04001870 if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
1871 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001872 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1873 return -EIO;
1874 }
1875
1876 /*
1877 * queue's settings related to segment counting like q->bounce_pfn
1878 * may differ from that of other stacking queues.
1879 * Recalculate it to check the request correctly on this queue's
1880 * limitation.
1881 */
1882 blk_recalc_rq_segments(rq);
Martin K. Petersen8a783622010-02-26 00:20:39 -05001883 if (rq->nr_phys_segments > queue_max_segments(q)) {
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001884 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1885 return -EIO;
1886 }
1887
1888 return 0;
1889}
1890EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1891
1892/**
1893 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1894 * @q: the queue to submit the request
1895 * @rq: the request being queued
1896 */
1897int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1898{
1899 unsigned long flags;
Jeff Moyer4853aba2011-08-15 21:37:25 +02001900 int where = ELEVATOR_INSERT_BACK;
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001901
1902 if (blk_rq_check_limits(q, rq))
1903 return -EIO;
1904
Akinobu Mitab2c9cd32011-07-26 16:09:03 -07001905 if (rq->rq_disk &&
1906 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001907 return -EIO;
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001908
1909 spin_lock_irqsave(q->queue_lock, flags);
Tejun Heo8ba61432011-12-14 00:33:37 +01001910 if (unlikely(blk_queue_dead(q))) {
1911 spin_unlock_irqrestore(q->queue_lock, flags);
1912 return -ENODEV;
1913 }
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001914
1915 /*
1916 * Submitting request must be dequeued before calling this function
1917 * because it will be linked to another request_queue
1918 */
1919 BUG_ON(blk_queued_rq(rq));
1920
Jeff Moyer4853aba2011-08-15 21:37:25 +02001921 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
1922 where = ELEVATOR_INSERT_FLUSH;
1923
1924 add_acct_request(q, rq, where);
Jeff Moyere67b77c2011-10-17 12:57:23 +02001925 if (where == ELEVATOR_INSERT_FLUSH)
1926 __blk_run_queue(q);
Kiyoshi Ueda82124d62008-09-18 10:45:38 -04001927 spin_unlock_irqrestore(q->queue_lock, flags);
1928
1929 return 0;
1930}
1931EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1932
Tejun Heo80a761f2009-07-03 17:48:17 +09001933/**
1934 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1935 * @rq: request to examine
1936 *
1937 * Description:
1938 * A request could be merge of IOs which require different failure
1939 * handling. This function determines the number of bytes which
1940 * can be failed from the beginning of the request without
1941 * crossing into area which need to be retried further.
1942 *
1943 * Return:
1944 * The number of bytes to fail.
1945 *
1946 * Context:
1947 * queue_lock must be held.
1948 */
1949unsigned int blk_rq_err_bytes(const struct request *rq)
1950{
1951 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1952 unsigned int bytes = 0;
1953 struct bio *bio;
1954
1955 if (!(rq->cmd_flags & REQ_MIXED_MERGE))
1956 return blk_rq_bytes(rq);
1957
1958 /*
1959 * Currently the only 'mixing' which can happen is between
1960 * different fastfail types. We can safely fail portions
1961 * which have all the failfast bits that the first one has -
1962 * the ones which are at least as eager to fail as the first
1963 * one.
1964 */
1965 for (bio = rq->bio; bio; bio = bio->bi_next) {
1966 if ((bio->bi_rw & ff) != ff)
1967 break;
1968 bytes += bio->bi_size;
1969 }
1970
1971 /* this could lead to infinite loop */
1972 BUG_ON(blk_rq_bytes(rq) && !bytes);
1973 return bytes;
1974}
1975EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1976
Jens Axboebc58ba92009-01-23 10:54:44 +01001977static void blk_account_io_completion(struct request *req, unsigned int bytes)
1978{
Jens Axboec2553b52009-04-24 08:10:11 +02001979 if (blk_do_io_stat(req)) {
Jens Axboebc58ba92009-01-23 10:54:44 +01001980 const int rw = rq_data_dir(req);
1981 struct hd_struct *part;
1982 int cpu;
1983
1984 cpu = part_stat_lock();
Jerome Marchand09e099d2011-01-05 16:57:38 +01001985 part = req->part;
Jens Axboebc58ba92009-01-23 10:54:44 +01001986 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1987 part_stat_unlock();
1988 }
1989}
1990
1991static void blk_account_io_done(struct request *req)
1992{
Jens Axboebc58ba92009-01-23 10:54:44 +01001993 /*
Tejun Heodd4c1332010-09-03 11:56:16 +02001994 * Account IO completion. flush_rq isn't accounted as a
1995 * normal IO on queueing nor completion. Accounting the
1996 * containing request is enough.
Jens Axboebc58ba92009-01-23 10:54:44 +01001997 */
Tejun Heo414b4ff2011-01-25 12:43:49 +01001998 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
Jens Axboebc58ba92009-01-23 10:54:44 +01001999 unsigned long duration = jiffies - req->start_time;
2000 const int rw = rq_data_dir(req);
2001 struct hd_struct *part;
2002 int cpu;
2003
2004 cpu = part_stat_lock();
Jerome Marchand09e099d2011-01-05 16:57:38 +01002005 part = req->part;
Jens Axboebc58ba92009-01-23 10:54:44 +01002006
2007 part_stat_inc(cpu, part, ios[rw]);
2008 part_stat_add(cpu, part, ticks[rw], duration);
2009 part_round_stats(cpu, part);
Nikanth Karthikesan316d3152009-10-06 20:16:55 +02002010 part_dec_in_flight(part, rw);
Jens Axboebc58ba92009-01-23 10:54:44 +01002011
Jens Axboe6c23a962011-01-07 08:43:37 +01002012 hd_struct_put(part);
Jens Axboebc58ba92009-01-23 10:54:44 +01002013 part_stat_unlock();
2014 }
2015}
2016
Tejun Heo53a08802008-12-03 12:41:26 +01002017/**
Tejun Heo9934c8c2009-05-08 11:54:16 +09002018 * blk_peek_request - peek at the top of a request queue
2019 * @q: request queue to peek at
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -05002020 *
2021 * Description:
Tejun Heo9934c8c2009-05-08 11:54:16 +09002022 * Return the request at the top of @q. The returned request
2023 * should be started using blk_start_request() before LLD starts
2024 * processing it.
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -05002025 *
2026 * Return:
Tejun Heo9934c8c2009-05-08 11:54:16 +09002027 * Pointer to the request at the top of @q if available. Null
2028 * otherwise.
2029 *
2030 * Context:
2031 * queue_lock must be held.
2032 */
2033struct request *blk_peek_request(struct request_queue *q)
Tejun Heo158dbda2009-04-23 11:05:18 +09002034{
2035 struct request *rq;
2036 int ret;
2037
2038 while ((rq = __elv_next_request(q)) != NULL) {
2039 if (!(rq->cmd_flags & REQ_STARTED)) {
2040 /*
2041 * This is the first time the device driver
2042 * sees this request (possibly after
2043 * requeueing). Notify IO scheduler.
2044 */
Christoph Hellwig33659eb2010-08-07 18:17:56 +02002045 if (rq->cmd_flags & REQ_SORTED)
Tejun Heo158dbda2009-04-23 11:05:18 +09002046 elv_activate_rq(q, rq);
2047
2048 /*
2049 * just mark as started even if we don't start
2050 * it, a request that has been delayed should
2051 * not be passed by new incoming requests
2052 */
2053 rq->cmd_flags |= REQ_STARTED;
2054 trace_block_rq_issue(q, rq);
2055 }
2056
2057 if (!q->boundary_rq || q->boundary_rq == rq) {
2058 q->end_sector = rq_end_sector(rq);
2059 q->boundary_rq = NULL;
2060 }
2061
2062 if (rq->cmd_flags & REQ_DONTPREP)
2063 break;
2064
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002065 if (q->dma_drain_size && blk_rq_bytes(rq)) {
Tejun Heo158dbda2009-04-23 11:05:18 +09002066 /*
2067 * make sure space for the drain appears we
2068 * know we can do this because max_hw_segments
2069 * has been adjusted to be one fewer than the
2070 * device can handle
2071 */
2072 rq->nr_phys_segments++;
2073 }
2074
2075 if (!q->prep_rq_fn)
2076 break;
2077
2078 ret = q->prep_rq_fn(q, rq);
2079 if (ret == BLKPREP_OK) {
2080 break;
2081 } else if (ret == BLKPREP_DEFER) {
2082 /*
2083 * the request may have been (partially) prepped.
2084 * we need to keep this request in the front to
2085 * avoid resource deadlock. REQ_STARTED will
2086 * prevent other fs requests from passing this one.
2087 */
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002088 if (q->dma_drain_size && blk_rq_bytes(rq) &&
Tejun Heo158dbda2009-04-23 11:05:18 +09002089 !(rq->cmd_flags & REQ_DONTPREP)) {
2090 /*
2091 * remove the space for the drain we added
2092 * so that we don't add it again
2093 */
2094 --rq->nr_phys_segments;
2095 }
2096
2097 rq = NULL;
2098 break;
2099 } else if (ret == BLKPREP_KILL) {
2100 rq->cmd_flags |= REQ_QUIET;
James Bottomleyc143dc92009-05-30 06:43:49 +02002101 /*
2102 * Mark this request as started so we don't trigger
2103 * any debug logic in the end I/O path.
2104 */
2105 blk_start_request(rq);
Tejun Heo40cbbb72009-04-23 11:05:19 +09002106 __blk_end_request_all(rq, -EIO);
Tejun Heo158dbda2009-04-23 11:05:18 +09002107 } else {
2108 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2109 break;
2110 }
2111 }
2112
2113 return rq;
2114}
Tejun Heo9934c8c2009-05-08 11:54:16 +09002115EXPORT_SYMBOL(blk_peek_request);
Tejun Heo158dbda2009-04-23 11:05:18 +09002116
Tejun Heo9934c8c2009-05-08 11:54:16 +09002117void blk_dequeue_request(struct request *rq)
Tejun Heo158dbda2009-04-23 11:05:18 +09002118{
Tejun Heo9934c8c2009-05-08 11:54:16 +09002119 struct request_queue *q = rq->q;
2120
Tejun Heo158dbda2009-04-23 11:05:18 +09002121 BUG_ON(list_empty(&rq->queuelist));
2122 BUG_ON(ELV_ON_HASH(rq));
2123
2124 list_del_init(&rq->queuelist);
2125
2126 /*
2127 * the time frame between a request being removed from the lists
2128 * and to it is freed is accounted as io that is in progress at
2129 * the driver side.
2130 */
Divyesh Shah91952912010-04-01 15:01:41 -07002131 if (blk_account_rq(rq)) {
Jens Axboe0a7ae2f2009-05-20 08:54:31 +02002132 q->in_flight[rq_is_sync(rq)]++;
Divyesh Shah91952912010-04-01 15:01:41 -07002133 set_io_start_time_ns(rq);
2134 }
Tejun Heo158dbda2009-04-23 11:05:18 +09002135}
2136
Tejun Heo5efccd12009-04-23 11:05:18 +09002137/**
Tejun Heo9934c8c2009-05-08 11:54:16 +09002138 * blk_start_request - start request processing on the driver
2139 * @req: request to dequeue
2140 *
2141 * Description:
2142 * Dequeue @req and start timeout timer on it. This hands off the
2143 * request to the driver.
2144 *
2145 * Block internal functions which don't want to start timer should
2146 * call blk_dequeue_request().
2147 *
2148 * Context:
2149 * queue_lock must be held.
2150 */
2151void blk_start_request(struct request *req)
2152{
2153 blk_dequeue_request(req);
2154
2155 /*
Tejun Heo5f49f632009-05-19 18:33:05 +09002156 * We are now handing the request to the hardware, initialize
2157 * resid_len to full count and add the timeout handler.
Tejun Heo9934c8c2009-05-08 11:54:16 +09002158 */
Tejun Heo5f49f632009-05-19 18:33:05 +09002159 req->resid_len = blk_rq_bytes(req);
FUJITA Tomonoridbb66c42009-06-09 05:47:10 +02002160 if (unlikely(blk_bidi_rq(req)))
2161 req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
2162
Tejun Heo9934c8c2009-05-08 11:54:16 +09002163 blk_add_timer(req);
2164}
2165EXPORT_SYMBOL(blk_start_request);
2166
2167/**
2168 * blk_fetch_request - fetch a request from a request queue
2169 * @q: request queue to fetch a request from
2170 *
2171 * Description:
2172 * Return the request at the top of @q. The request is started on
2173 * return and LLD can start processing it immediately.
2174 *
2175 * Return:
2176 * Pointer to the request at the top of @q if available. Null
2177 * otherwise.
2178 *
2179 * Context:
2180 * queue_lock must be held.
2181 */
2182struct request *blk_fetch_request(struct request_queue *q)
2183{
2184 struct request *rq;
2185
2186 rq = blk_peek_request(q);
2187 if (rq)
2188 blk_start_request(rq);
2189 return rq;
2190}
2191EXPORT_SYMBOL(blk_fetch_request);
2192
2193/**
Tejun Heo2e60e022009-04-23 11:05:18 +09002194 * blk_update_request - Special helper function for request stacking drivers
Randy Dunlap8ebf9752009-06-11 20:00:41 -07002195 * @req: the request being processed
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -05002196 * @error: %0 for success, < %0 for error
Randy Dunlap8ebf9752009-06-11 20:00:41 -07002197 * @nr_bytes: number of bytes to complete @req
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -05002198 *
2199 * Description:
Randy Dunlap8ebf9752009-06-11 20:00:41 -07002200 * Ends I/O on a number of bytes attached to @req, but doesn't complete
2201 * the request structure even if @req doesn't have leftover.
2202 * If @req has leftover, sets it up for the next range of segments.
Tejun Heo2e60e022009-04-23 11:05:18 +09002203 *
2204 * This special helper function is only for request stacking drivers
2205 * (e.g. request-based dm) so that they can handle partial completion.
2206 * Actual device drivers should use blk_end_request instead.
2207 *
2208 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2209 * %false return from this function.
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -05002210 *
2211 * Return:
Tejun Heo2e60e022009-04-23 11:05:18 +09002212 * %false - this request doesn't have any more data
2213 * %true - this request has more data
Kiyoshi Ueda3bcddea2007-12-11 17:52:28 -05002214 **/
Tejun Heo2e60e022009-04-23 11:05:18 +09002215bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216{
Kiyoshi Ueda5450d3e2007-12-11 17:53:03 -05002217 int total_bytes, bio_nbytes, next_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 struct bio *bio;
2219
Tejun Heo2e60e022009-04-23 11:05:18 +09002220 if (!req->bio)
2221 return false;
2222
Arnaldo Carvalho de Melo5f3ea372008-10-30 08:34:33 +01002223 trace_block_rq_complete(req->q, req);
Jens Axboe2056a782006-03-23 20:00:26 +01002224
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 /*
Tejun Heo6f414692009-04-19 07:00:41 +09002226 * For fs requests, rq is just carrier of independent bio's
2227 * and each partial completion should be handled separately.
2228 * Reset per-request error on each partial completion.
2229 *
2230 * TODO: tj: This is too subtle. It would be better to let
2231 * low level drivers do what they see fit.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 */
Christoph Hellwig33659eb2010-08-07 18:17:56 +02002233 if (req->cmd_type == REQ_TYPE_FS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 req->errors = 0;
2235
Christoph Hellwig33659eb2010-08-07 18:17:56 +02002236 if (error && req->cmd_type == REQ_TYPE_FS &&
2237 !(req->cmd_flags & REQ_QUIET)) {
Hannes Reinecke79775562011-01-18 10:13:13 +01002238 char *error_type;
2239
2240 switch (error) {
2241 case -ENOLINK:
2242 error_type = "recoverable transport";
2243 break;
2244 case -EREMOTEIO:
2245 error_type = "critical target";
2246 break;
2247 case -EBADE:
2248 error_type = "critical nexus";
2249 break;
2250 case -EIO:
2251 default:
2252 error_type = "I/O";
2253 break;
2254 }
Yi Zou37d7b342012-08-30 16:26:25 -07002255 printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
2256 error_type, req->rq_disk ?
2257 req->rq_disk->disk_name : "?",
2258 (unsigned long long)blk_rq_pos(req));
2259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 }
2261
Jens Axboebc58ba92009-01-23 10:54:44 +01002262 blk_account_io_completion(req, nr_bytes);
Jens Axboed72d9042005-11-01 08:35:42 +01002263
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 total_bytes = bio_nbytes = 0;
2265 while ((bio = req->bio) != NULL) {
2266 int nbytes;
2267
2268 if (nr_bytes >= bio->bi_size) {
2269 req->bio = bio->bi_next;
2270 nbytes = bio->bi_size;
NeilBrown5bb23a62007-09-27 12:46:13 +02002271 req_bio_endio(req, bio, nbytes, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 next_idx = 0;
2273 bio_nbytes = 0;
2274 } else {
2275 int idx = bio->bi_idx + next_idx;
2276
Kazuhisa Ichikawaaf498d72009-05-12 13:27:45 +02002277 if (unlikely(idx >= bio->bi_vcnt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 blk_dump_rq_flags(req, "__end_that");
Jens Axboe6728cb02008-01-31 13:03:55 +01002279 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
Kazuhisa Ichikawaaf498d72009-05-12 13:27:45 +02002280 __func__, idx, bio->bi_vcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 break;
2282 }
2283
2284 nbytes = bio_iovec_idx(bio, idx)->bv_len;
2285 BIO_BUG_ON(nbytes > bio->bi_size);
2286
2287 /*
2288 * not a complete bvec done
2289 */
2290 if (unlikely(nbytes > nr_bytes)) {
2291 bio_nbytes += nr_bytes;
2292 total_bytes += nr_bytes;
2293 break;
2294 }
2295
2296 /*
2297 * advance to the next vector
2298 */
2299 next_idx++;
2300 bio_nbytes += nbytes;
2301 }
2302
2303 total_bytes += nbytes;
2304 nr_bytes -= nbytes;
2305
Jens Axboe6728cb02008-01-31 13:03:55 +01002306 bio = req->bio;
2307 if (bio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 /*
2309 * end more in this run, or just return 'not-done'
2310 */
2311 if (unlikely(nr_bytes <= 0))
2312 break;
2313 }
2314 }
2315
2316 /*
2317 * completely done
2318 */
Tejun Heo2e60e022009-04-23 11:05:18 +09002319 if (!req->bio) {
2320 /*
2321 * Reset counters so that the request stacking driver
2322 * can find how many bytes remain in the request
2323 * later.
2324 */
Tejun Heoa2dec7b2009-05-07 22:24:44 +09002325 req->__data_len = 0;
Tejun Heo2e60e022009-04-23 11:05:18 +09002326 return false;
2327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328
2329 /*
2330 * if the request wasn't completed, update state
2331 */
2332 if (bio_nbytes) {
NeilBrown5bb23a62007-09-27 12:46:13 +02002333 req_bio_endio(req, bio, bio_nbytes, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 bio->bi_idx += next_idx;
2335 bio_iovec(bio)->bv_offset += nr_bytes;
2336 bio_iovec(bio)->bv_len -= nr_bytes;
2337 }
2338
Tejun Heoa2dec7b2009-05-07 22:24:44 +09002339 req->__data_len -= total_bytes;
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002340 req->buffer = bio_data(req->bio);
2341
2342 /* update sector only for requests with clear definition of sector */
Christoph Hellwig33659eb2010-08-07 18:17:56 +02002343 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
Tejun Heoa2dec7b2009-05-07 22:24:44 +09002344 req->__sector += total_bytes >> 9;
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002345
Tejun Heo80a761f2009-07-03 17:48:17 +09002346 /* mixed attributes always follow the first bio */
2347 if (req->cmd_flags & REQ_MIXED_MERGE) {
2348 req->cmd_flags &= ~REQ_FAILFAST_MASK;
2349 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
2350 }
2351
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002352 /*
2353 * If total number of sectors is less than the first segment
2354 * size, something has gone terribly wrong.
2355 */
2356 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
Jens Axboe81829242011-03-30 09:51:33 +02002357 blk_dump_rq_flags(req, "request botched");
Tejun Heoa2dec7b2009-05-07 22:24:44 +09002358 req->__data_len = blk_rq_cur_bytes(req);
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002359 }
2360
2361 /* recalculate the number of segments */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 blk_recalc_rq_segments(req);
Tejun Heo2e46e8b2009-05-07 22:24:41 +09002363
Tejun Heo2e60e022009-04-23 11:05:18 +09002364 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365}
Tejun Heo2e60e022009-04-23 11:05:18 +09002366EXPORT_SYMBOL_GPL(blk_update_request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367
Tejun Heo2e60e022009-04-23 11:05:18 +09002368static bool blk_update_bidi_request(struct request *rq, int error,
2369 unsigned int nr_bytes,
2370 unsigned int bidi_bytes)
Tejun Heo5efccd12009-04-23 11:05:18 +09002371{
Tejun Heo2e60e022009-04-23 11:05:18 +09002372 if (blk_update_request(rq, error, nr_bytes))
2373 return true;
Tejun Heo5efccd12009-04-23 11:05:18 +09002374
Tejun Heo2e60e022009-04-23 11:05:18 +09002375 /* Bidi request must be completed as a whole */
2376 if (unlikely(blk_bidi_rq(rq)) &&
2377 blk_update_request(rq->next_rq, error, bidi_bytes))
2378 return true;
Tejun Heo5efccd12009-04-23 11:05:18 +09002379
Jens Axboee2e1a142010-06-09 10:42:09 +02002380 if (blk_queue_add_random(rq->q))
2381 add_disk_randomness(rq->rq_disk);
Tejun Heo2e60e022009-04-23 11:05:18 +09002382
2383 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384}
2385
James Bottomley28018c22010-07-01 19:49:17 +09002386/**
2387 * blk_unprep_request - unprepare a request
2388 * @req: the request
2389 *
2390 * This function makes a request ready for complete resubmission (or
2391 * completion). It happens only after all error handling is complete,
2392 * so represents the appropriate moment to deallocate any resources
2393 * that were allocated to the request in the prep_rq_fn. The queue
2394 * lock is held when calling this.
2395 */
2396void blk_unprep_request(struct request *req)
2397{
2398 struct request_queue *q = req->q;
2399
2400 req->cmd_flags &= ~REQ_DONTPREP;
2401 if (q->unprep_rq_fn)
2402 q->unprep_rq_fn(q, req);
2403}
2404EXPORT_SYMBOL_GPL(blk_unprep_request);
2405
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406/*
2407 * queue lock must be held
2408 */
Tejun Heo2e60e022009-04-23 11:05:18 +09002409static void blk_finish_request(struct request *req, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410{
Kiyoshi Uedab8286232007-12-11 17:53:24 -05002411 if (blk_rq_tagged(req))
2412 blk_queue_end_tag(req->q, req);
2413
James Bottomleyba396a62009-05-27 14:17:08 +02002414 BUG_ON(blk_queued_rq(req));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415
Christoph Hellwig33659eb2010-08-07 18:17:56 +02002416 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
Matthew Garrett31373d02010-04-06 14:25:14 +02002417 laptop_io_completion(&req->q->backing_dev_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418
Mike Andersone78042e2008-10-30 02:16:20 -07002419 blk_delete_timer(req);
2420
James Bottomley28018c22010-07-01 19:49:17 +09002421 if (req->cmd_flags & REQ_DONTPREP)
2422 blk_unprep_request(req);
2423
2424
Jens Axboebc58ba92009-01-23 10:54:44 +01002425 blk_account_io_done(req);
Kiyoshi Uedab8286232007-12-11 17:53:24 -05002426
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 if (req->end_io)
Tejun Heo8ffdc652006-01-06 09:49:03 +01002428 req->end_io(req, error);
Kiyoshi Uedab8286232007-12-11 17:53:24 -05002429 else {
2430 if (blk_bidi_rq(req))
2431 __blk_put_request(req->next_rq->q, req->next_rq);
2432
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 __blk_put_request(req->q, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 }
2435}
2436
Kiyoshi Ueda3b113132007-12-11 17:41:17 -05002437/**
Tejun Heo2e60e022009-04-23 11:05:18 +09002438 * blk_end_bidi_request - Complete a bidi request
2439 * @rq: the request to complete
Randy Dunlap710027a2008-08-19 20:13:11 +02002440 * @error: %0 for success, < %0 for error
Kiyoshi Uedae3a04fe2007-12-11 17:51:46 -05002441 * @nr_bytes: number of bytes to complete @rq
2442 * @bidi_bytes: number of bytes to complete @rq->next_rq
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002443 *
2444 * Description:
2445 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
Tejun Heo2e60e022009-04-23 11:05:18 +09002446 * Drivers that supports bidi can safely call this member for any
2447 * type of request, bidi or uni. In the later case @bidi_bytes is
2448 * just ignored.
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002449 *
2450 * Return:
Tejun Heo2e60e022009-04-23 11:05:18 +09002451 * %false - we are done with this request
2452 * %true - still buffers pending for this request
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002453 **/
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002454static bool blk_end_bidi_request(struct request *rq, int error,
2455 unsigned int nr_bytes, unsigned int bidi_bytes)
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002456{
2457 struct request_queue *q = rq->q;
Tejun Heo2e60e022009-04-23 11:05:18 +09002458 unsigned long flags;
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002459
Tejun Heo2e60e022009-04-23 11:05:18 +09002460 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2461 return true;
Kiyoshi Uedae19a3ab2007-12-11 17:51:02 -05002462
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002463 spin_lock_irqsave(q->queue_lock, flags);
Tejun Heo2e60e022009-04-23 11:05:18 +09002464 blk_finish_request(rq, error);
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002465 spin_unlock_irqrestore(q->queue_lock, flags);
2466
Tejun Heo2e60e022009-04-23 11:05:18 +09002467 return false;
Kiyoshi Uedae3a04fe2007-12-11 17:51:46 -05002468}
Kiyoshi Uedae3a04fe2007-12-11 17:51:46 -05002469
2470/**
Tejun Heo2e60e022009-04-23 11:05:18 +09002471 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2472 * @rq: the request to complete
2473 * @error: %0 for success, < %0 for error
2474 * @nr_bytes: number of bytes to complete @rq
2475 * @bidi_bytes: number of bytes to complete @rq->next_rq
Tejun Heo5efccd12009-04-23 11:05:18 +09002476 *
2477 * Description:
Tejun Heo2e60e022009-04-23 11:05:18 +09002478 * Identical to blk_end_bidi_request() except that queue lock is
2479 * assumed to be locked on entry and remains so on return.
Tejun Heo5efccd12009-04-23 11:05:18 +09002480 *
Tejun Heo2e60e022009-04-23 11:05:18 +09002481 * Return:
2482 * %false - we are done with this request
2483 * %true - still buffers pending for this request
Tejun Heo5efccd12009-04-23 11:05:18 +09002484 **/
Jeff Moyer4853aba2011-08-15 21:37:25 +02002485bool __blk_end_bidi_request(struct request *rq, int error,
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002486 unsigned int nr_bytes, unsigned int bidi_bytes)
Tejun Heo5efccd12009-04-23 11:05:18 +09002487{
Tejun Heo2e60e022009-04-23 11:05:18 +09002488 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2489 return true;
Tejun Heo5efccd12009-04-23 11:05:18 +09002490
Tejun Heo2e60e022009-04-23 11:05:18 +09002491 blk_finish_request(rq, error);
Tejun Heo5efccd12009-04-23 11:05:18 +09002492
Tejun Heo2e60e022009-04-23 11:05:18 +09002493 return false;
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002494}
2495
2496/**
2497 * blk_end_request - Helper function for drivers to complete the request.
2498 * @rq: the request being processed
2499 * @error: %0 for success, < %0 for error
2500 * @nr_bytes: number of bytes to complete
2501 *
2502 * Description:
2503 * Ends I/O on a number of bytes attached to @rq.
2504 * If @rq has leftover, sets it up for the next range of segments.
2505 *
2506 * Return:
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002507 * %false - we are done with this request
2508 * %true - still buffers pending for this request
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002509 **/
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002510bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002511{
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002512 return blk_end_bidi_request(rq, error, nr_bytes, 0);
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002513}
Jens Axboe56ad1742009-07-28 22:11:24 +02002514EXPORT_SYMBOL(blk_end_request);
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002515
2516/**
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002517 * blk_end_request_all - Helper function for drives to finish the request.
2518 * @rq: the request to finish
Randy Dunlap8ebf9752009-06-11 20:00:41 -07002519 * @error: %0 for success, < %0 for error
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002520 *
2521 * Description:
2522 * Completely finish @rq.
2523 */
2524void blk_end_request_all(struct request *rq, int error)
2525{
2526 bool pending;
2527 unsigned int bidi_bytes = 0;
2528
2529 if (unlikely(blk_bidi_rq(rq)))
2530 bidi_bytes = blk_rq_bytes(rq->next_rq);
2531
2532 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2533 BUG_ON(pending);
2534}
Jens Axboe56ad1742009-07-28 22:11:24 +02002535EXPORT_SYMBOL(blk_end_request_all);
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002536
2537/**
2538 * blk_end_request_cur - Helper function to finish the current request chunk.
2539 * @rq: the request to finish the current chunk for
Randy Dunlap8ebf9752009-06-11 20:00:41 -07002540 * @error: %0 for success, < %0 for error
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002541 *
2542 * Description:
2543 * Complete the current consecutively mapped chunk from @rq.
2544 *
2545 * Return:
2546 * %false - we are done with this request
2547 * %true - still buffers pending for this request
2548 */
2549bool blk_end_request_cur(struct request *rq, int error)
2550{
2551 return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2552}
Jens Axboe56ad1742009-07-28 22:11:24 +02002553EXPORT_SYMBOL(blk_end_request_cur);
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002554
2555/**
Tejun Heo80a761f2009-07-03 17:48:17 +09002556 * blk_end_request_err - Finish a request till the next failure boundary.
2557 * @rq: the request to finish till the next failure boundary for
2558 * @error: must be negative errno
2559 *
2560 * Description:
2561 * Complete @rq till the next failure boundary.
2562 *
2563 * Return:
2564 * %false - we are done with this request
2565 * %true - still buffers pending for this request
2566 */
2567bool blk_end_request_err(struct request *rq, int error)
2568{
2569 WARN_ON(error >= 0);
2570 return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2571}
2572EXPORT_SYMBOL_GPL(blk_end_request_err);
2573
2574/**
Kiyoshi Uedae19a3ab2007-12-11 17:51:02 -05002575 * __blk_end_request - Helper function for drivers to complete the request.
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002576 * @rq: the request being processed
Kiyoshi Uedae19a3ab2007-12-11 17:51:02 -05002577 * @error: %0 for success, < %0 for error
2578 * @nr_bytes: number of bytes to complete
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002579 *
2580 * Description:
2581 * Must be called with queue lock held unlike blk_end_request().
2582 *
2583 * Return:
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002584 * %false - we are done with this request
2585 * %true - still buffers pending for this request
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002586 **/
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002587bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002588{
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002589 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002590}
Jens Axboe56ad1742009-07-28 22:11:24 +02002591EXPORT_SYMBOL(__blk_end_request);
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002592
2593/**
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002594 * __blk_end_request_all - Helper function for drives to finish the request.
2595 * @rq: the request to finish
Randy Dunlap8ebf9752009-06-11 20:00:41 -07002596 * @error: %0 for success, < %0 for error
Kiyoshi Ueda336cdb42007-12-11 17:40:30 -05002597 *
2598 * Description:
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002599 * Completely finish @rq. Must be called with queue lock held.
Kiyoshi Ueda32fab442008-09-18 10:45:09 -04002600 */
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002601void __blk_end_request_all(struct request *rq, int error)
Kiyoshi Ueda32fab442008-09-18 10:45:09 -04002602{
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002603 bool pending;
2604 unsigned int bidi_bytes = 0;
2605
2606 if (unlikely(blk_bidi_rq(rq)))
2607 bidi_bytes = blk_rq_bytes(rq->next_rq);
2608
2609 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2610 BUG_ON(pending);
Kiyoshi Ueda32fab442008-09-18 10:45:09 -04002611}
Jens Axboe56ad1742009-07-28 22:11:24 +02002612EXPORT_SYMBOL(__blk_end_request_all);
Kiyoshi Ueda32fab442008-09-18 10:45:09 -04002613
2614/**
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002615 * __blk_end_request_cur - Helper function to finish the current request chunk.
2616 * @rq: the request to finish the current chunk for
Randy Dunlap8ebf9752009-06-11 20:00:41 -07002617 * @error: %0 for success, < %0 for error
Kiyoshi Uedae19a3ab2007-12-11 17:51:02 -05002618 *
2619 * Description:
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002620 * Complete the current consecutively mapped chunk from @rq. Must
2621 * be called with queue lock held.
Kiyoshi Uedae19a3ab2007-12-11 17:51:02 -05002622 *
2623 * Return:
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002624 * %false - we are done with this request
2625 * %true - still buffers pending for this request
2626 */
2627bool __blk_end_request_cur(struct request *rq, int error)
Kiyoshi Uedae19a3ab2007-12-11 17:51:02 -05002628{
FUJITA Tomonorib1f74492009-05-11 17:56:09 +09002629 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
Kiyoshi Uedae19a3ab2007-12-11 17:51:02 -05002630}
Jens Axboe56ad1742009-07-28 22:11:24 +02002631EXPORT_SYMBOL(__blk_end_request_cur);
Kiyoshi Uedae19a3ab2007-12-11 17:51:02 -05002632
Tejun Heo80a761f2009-07-03 17:48:17 +09002633/**
2634 * __blk_end_request_err - Finish a request till the next failure boundary.
2635 * @rq: the request to finish till the next failure boundary for
2636 * @error: must be negative errno
2637 *
2638 * Description:
2639 * Complete @rq till the next failure boundary. Must be called
2640 * with queue lock held.
2641 *
2642 * Return:
2643 * %false - we are done with this request
2644 * %true - still buffers pending for this request
2645 */
2646bool __blk_end_request_err(struct request *rq, int error)
2647{
2648 WARN_ON(error >= 0);
2649 return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2650}
2651EXPORT_SYMBOL_GPL(__blk_end_request_err);
2652
Jens Axboe86db1e22008-01-29 14:53:40 +01002653void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2654 struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655{
Tejun Heoa82afdf2009-07-03 17:48:16 +09002656 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02002657 rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658
David Woodhousefb2dce82008-08-05 18:01:53 +01002659 if (bio_has_data(bio)) {
2660 rq->nr_phys_segments = bio_phys_segments(q, bio);
David Woodhousefb2dce82008-08-05 18:01:53 +01002661 rq->buffer = bio_data(bio);
2662 }
Tejun Heoa2dec7b2009-05-07 22:24:44 +09002663 rq->__data_len = bio->bi_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664 rq->bio = rq->biotail = bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665
NeilBrown66846572007-08-16 13:31:28 +02002666 if (bio->bi_bdev)
2667 rq->rq_disk = bio->bi_bdev->bd_disk;
2668}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669
Ilya Loginov2d4dc892009-11-26 09:16:19 +01002670#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2671/**
2672 * rq_flush_dcache_pages - Helper function to flush all pages in a request
2673 * @rq: the request to be flushed
2674 *
2675 * Description:
2676 * Flush all pages in @rq.
2677 */
2678void rq_flush_dcache_pages(struct request *rq)
2679{
2680 struct req_iterator iter;
2681 struct bio_vec *bvec;
2682
2683 rq_for_each_segment(bvec, rq, iter)
2684 flush_dcache_page(bvec->bv_page);
2685}
2686EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2687#endif
2688
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +02002689/**
2690 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2691 * @q : the queue of the device being checked
2692 *
2693 * Description:
2694 * Check if underlying low-level drivers of a device are busy.
2695 * If the drivers want to export their busy state, they must set own
2696 * exporting function using blk_queue_lld_busy() first.
2697 *
2698 * Basically, this function is used only by request stacking drivers
2699 * to stop dispatching requests to underlying devices when underlying
2700 * devices are busy. This behavior helps more I/O merging on the queue
2701 * of the request stacking driver and prevents I/O throughput regression
2702 * on burst I/O load.
2703 *
2704 * Return:
2705 * 0 - Not busy (The request stacking driver should dispatch request)
2706 * 1 - Busy (The request stacking driver should stop dispatching request)
2707 */
2708int blk_lld_busy(struct request_queue *q)
2709{
2710 if (q->lld_busy_fn)
2711 return q->lld_busy_fn(q);
2712
2713 return 0;
2714}
2715EXPORT_SYMBOL_GPL(blk_lld_busy);
2716
Kiyoshi Uedab0fd2712009-06-11 13:10:16 +02002717/**
2718 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2719 * @rq: the clone request to be cleaned up
2720 *
2721 * Description:
2722 * Free all bios in @rq for a cloned request.
2723 */
2724void blk_rq_unprep_clone(struct request *rq)
2725{
2726 struct bio *bio;
2727
2728 while ((bio = rq->bio) != NULL) {
2729 rq->bio = bio->bi_next;
2730
2731 bio_put(bio);
2732 }
2733}
2734EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2735
2736/*
2737 * Copy attributes of the original request to the clone request.
2738 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
2739 */
2740static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2741{
2742 dst->cpu = src->cpu;
Tejun Heo3a2edd02010-09-03 11:56:18 +02002743 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
Kiyoshi Uedab0fd2712009-06-11 13:10:16 +02002744 dst->cmd_type = src->cmd_type;
2745 dst->__sector = blk_rq_pos(src);
2746 dst->__data_len = blk_rq_bytes(src);
2747 dst->nr_phys_segments = src->nr_phys_segments;
2748 dst->ioprio = src->ioprio;
2749 dst->extra_len = src->extra_len;
2750}
2751
2752/**
2753 * blk_rq_prep_clone - Helper function to setup clone request
2754 * @rq: the request to be setup
2755 * @rq_src: original request to be cloned
2756 * @bs: bio_set that bios for clone are allocated from
2757 * @gfp_mask: memory allocation mask for bio
2758 * @bio_ctr: setup function to be called for each clone bio.
2759 * Returns %0 for success, non %0 for failure.
2760 * @data: private data to be passed to @bio_ctr
2761 *
2762 * Description:
2763 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
2764 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
2765 * are not copied, and copying such parts is the caller's responsibility.
2766 * Also, pages which the original bios are pointing to are not copied
2767 * and the cloned bios just point same pages.
2768 * So cloned bios must be completed before original bios, which means
2769 * the caller must complete @rq before @rq_src.
2770 */
2771int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
2772 struct bio_set *bs, gfp_t gfp_mask,
2773 int (*bio_ctr)(struct bio *, struct bio *, void *),
2774 void *data)
2775{
2776 struct bio *bio, *bio_src;
2777
2778 if (!bs)
2779 bs = fs_bio_set;
2780
2781 blk_rq_init(NULL, rq);
2782
2783 __rq_for_each_bio(bio_src, rq_src) {
2784 bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
2785 if (!bio)
2786 goto free_and_out;
2787
2788 __bio_clone(bio, bio_src);
2789
2790 if (bio_integrity(bio_src) &&
Martin K. Petersen7878cba2009-06-26 15:37:49 +02002791 bio_integrity_clone(bio, bio_src, gfp_mask, bs))
Kiyoshi Uedab0fd2712009-06-11 13:10:16 +02002792 goto free_and_out;
2793
2794 if (bio_ctr && bio_ctr(bio, bio_src, data))
2795 goto free_and_out;
2796
2797 if (rq->bio) {
2798 rq->biotail->bi_next = bio;
2799 rq->biotail = bio;
2800 } else
2801 rq->bio = rq->biotail = bio;
2802 }
2803
2804 __blk_rq_prep_clone(rq, rq_src);
2805
2806 return 0;
2807
2808free_and_out:
2809 if (bio)
2810 bio_free(bio, bs);
2811 blk_rq_unprep_clone(rq);
2812
2813 return -ENOMEM;
2814}
2815EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
2816
Jens Axboe18887ad2008-07-28 13:08:45 +02002817int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818{
2819 return queue_work(kblockd_workqueue, work);
2820}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821EXPORT_SYMBOL(kblockd_schedule_work);
2822
Vivek Goyale43473b2010-09-15 17:06:35 -04002823int kblockd_schedule_delayed_work(struct request_queue *q,
2824 struct delayed_work *dwork, unsigned long delay)
2825{
2826 return queue_delayed_work(kblockd_workqueue, dwork, delay);
2827}
2828EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2829
Jens Axboe73c10102011-03-08 13:19:51 +01002830#define PLUG_MAGIC 0x91827364
2831
Suresh Jayaraman75df7132011-09-21 10:00:16 +02002832/**
2833 * blk_start_plug - initialize blk_plug and track it inside the task_struct
2834 * @plug: The &struct blk_plug that needs to be initialized
2835 *
2836 * Description:
2837 * Tracking blk_plug inside the task_struct will help with auto-flushing the
2838 * pending I/O should the task end up blocking between blk_start_plug() and
2839 * blk_finish_plug(). This is important from a performance perspective, but
2840 * also ensures that we don't deadlock. For instance, if the task is blocking
2841 * for a memory allocation, memory reclaim could end up wanting to free a
2842 * page belonging to that request that is currently residing in our private
2843 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
2844 * this kind of deadlock.
2845 */
Jens Axboe73c10102011-03-08 13:19:51 +01002846void blk_start_plug(struct blk_plug *plug)
2847{
2848 struct task_struct *tsk = current;
2849
2850 plug->magic = PLUG_MAGIC;
2851 INIT_LIST_HEAD(&plug->list);
NeilBrown048c9372011-04-18 09:52:22 +02002852 INIT_LIST_HEAD(&plug->cb_list);
Jens Axboe73c10102011-03-08 13:19:51 +01002853 plug->should_sort = 0;
2854
2855 /*
2856 * If this is a nested plug, don't actually assign it. It will be
2857 * flushed on its own.
2858 */
2859 if (!tsk->plug) {
2860 /*
2861 * Store ordering should not be needed here, since a potential
2862 * preempt will imply a full memory barrier
2863 */
2864 tsk->plug = plug;
2865 }
2866}
2867EXPORT_SYMBOL(blk_start_plug);
2868
2869static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2870{
2871 struct request *rqa = container_of(a, struct request, queuelist);
2872 struct request *rqb = container_of(b, struct request, queuelist);
2873
Konstantin Khlebnikovf83e8262011-04-04 00:15:02 +02002874 return !(rqa->q <= rqb->q);
Jens Axboe73c10102011-03-08 13:19:51 +01002875}
2876
Jens Axboe49cac012011-04-16 13:51:05 +02002877/*
2878 * If 'from_schedule' is true, then postpone the dispatch of requests
2879 * until a safe kblockd context. We due this to avoid accidental big
2880 * additional stack usage in driver dispatch, in places where the originally
2881 * plugger did not intend it.
2882 */
Jens Axboef6603782011-04-15 15:49:07 +02002883static void queue_unplugged(struct request_queue *q, unsigned int depth,
Jens Axboe49cac012011-04-16 13:51:05 +02002884 bool from_schedule)
Jens Axboe99e22592011-04-18 09:59:55 +02002885 __releases(q->queue_lock)
Jens Axboe94b5eb22011-04-12 10:12:19 +02002886{
Jens Axboe49cac012011-04-16 13:51:05 +02002887 trace_block_unplug(q, depth, !from_schedule);
Jens Axboe99e22592011-04-18 09:59:55 +02002888
2889 /*
Tejun Heo8ba61432011-12-14 00:33:37 +01002890 * Don't mess with dead queue.
2891 */
2892 if (unlikely(blk_queue_dead(q))) {
2893 spin_unlock(q->queue_lock);
2894 return;
2895 }
2896
2897 /*
Jens Axboe99e22592011-04-18 09:59:55 +02002898 * If we are punting this to kblockd, then we can safely drop
2899 * the queue_lock before waking kblockd (which needs to take
2900 * this lock).
2901 */
2902 if (from_schedule) {
2903 spin_unlock(q->queue_lock);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02002904 blk_run_queue_async(q);
Jens Axboe99e22592011-04-18 09:59:55 +02002905 } else {
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +02002906 __blk_run_queue(q);
Jens Axboe99e22592011-04-18 09:59:55 +02002907 spin_unlock(q->queue_lock);
2908 }
2909
Jens Axboe94b5eb22011-04-12 10:12:19 +02002910}
2911
NeilBrown74018dc2012-07-31 09:08:15 +02002912static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
NeilBrown048c9372011-04-18 09:52:22 +02002913{
2914 LIST_HEAD(callbacks);
2915
Shaohua Li2a7d5552012-07-31 09:08:15 +02002916 while (!list_empty(&plug->cb_list)) {
2917 list_splice_init(&plug->cb_list, &callbacks);
NeilBrown048c9372011-04-18 09:52:22 +02002918
Shaohua Li2a7d5552012-07-31 09:08:15 +02002919 while (!list_empty(&callbacks)) {
2920 struct blk_plug_cb *cb = list_first_entry(&callbacks,
NeilBrown048c9372011-04-18 09:52:22 +02002921 struct blk_plug_cb,
2922 list);
Shaohua Li2a7d5552012-07-31 09:08:15 +02002923 list_del(&cb->list);
NeilBrown74018dc2012-07-31 09:08:15 +02002924 cb->callback(cb, from_schedule);
Shaohua Li2a7d5552012-07-31 09:08:15 +02002925 }
NeilBrown048c9372011-04-18 09:52:22 +02002926 }
2927}
2928
NeilBrown9cbb1752012-07-31 09:08:14 +02002929struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
2930 int size)
2931{
2932 struct blk_plug *plug = current->plug;
2933 struct blk_plug_cb *cb;
2934
2935 if (!plug)
2936 return NULL;
2937
2938 list_for_each_entry(cb, &plug->cb_list, list)
2939 if (cb->callback == unplug && cb->data == data)
2940 return cb;
2941
2942 /* Not currently on the callback list */
2943 BUG_ON(size < sizeof(*cb));
2944 cb = kzalloc(size, GFP_ATOMIC);
2945 if (cb) {
2946 cb->data = data;
2947 cb->callback = unplug;
2948 list_add(&cb->list, &plug->cb_list);
2949 }
2950 return cb;
2951}
2952EXPORT_SYMBOL(blk_check_plugged);
2953
Jens Axboe49cac012011-04-16 13:51:05 +02002954void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
Jens Axboe73c10102011-03-08 13:19:51 +01002955{
2956 struct request_queue *q;
2957 unsigned long flags;
2958 struct request *rq;
NeilBrown109b8122011-04-11 14:13:10 +02002959 LIST_HEAD(list);
Jens Axboe94b5eb22011-04-12 10:12:19 +02002960 unsigned int depth;
Jens Axboe73c10102011-03-08 13:19:51 +01002961
2962 BUG_ON(plug->magic != PLUG_MAGIC);
2963
NeilBrown74018dc2012-07-31 09:08:15 +02002964 flush_plug_callbacks(plug, from_schedule);
Jens Axboe73c10102011-03-08 13:19:51 +01002965 if (list_empty(&plug->list))
2966 return;
2967
NeilBrown109b8122011-04-11 14:13:10 +02002968 list_splice_init(&plug->list, &list);
2969
2970 if (plug->should_sort) {
2971 list_sort(NULL, &list, plug_rq_cmp);
2972 plug->should_sort = 0;
2973 }
Jens Axboe73c10102011-03-08 13:19:51 +01002974
2975 q = NULL;
Jens Axboe94b5eb22011-04-12 10:12:19 +02002976 depth = 0;
Jens Axboe18811272011-04-12 10:11:24 +02002977
2978 /*
2979 * Save and disable interrupts here, to avoid doing it for every
2980 * queue lock we have to take.
2981 */
Jens Axboe73c10102011-03-08 13:19:51 +01002982 local_irq_save(flags);
NeilBrown109b8122011-04-11 14:13:10 +02002983 while (!list_empty(&list)) {
2984 rq = list_entry_rq(list.next);
Jens Axboe73c10102011-03-08 13:19:51 +01002985 list_del_init(&rq->queuelist);
Jens Axboe73c10102011-03-08 13:19:51 +01002986 BUG_ON(!rq->q);
2987 if (rq->q != q) {
Jens Axboe99e22592011-04-18 09:59:55 +02002988 /*
2989 * This drops the queue lock
2990 */
2991 if (q)
Jens Axboe49cac012011-04-16 13:51:05 +02002992 queue_unplugged(q, depth, from_schedule);
Jens Axboe73c10102011-03-08 13:19:51 +01002993 q = rq->q;
Jens Axboe94b5eb22011-04-12 10:12:19 +02002994 depth = 0;
Jens Axboe73c10102011-03-08 13:19:51 +01002995 spin_lock(q->queue_lock);
2996 }
Tejun Heo8ba61432011-12-14 00:33:37 +01002997
2998 /*
2999 * Short-circuit if @q is dead
3000 */
3001 if (unlikely(blk_queue_dead(q))) {
3002 __blk_end_request_all(rq, -ENODEV);
3003 continue;
3004 }
3005
Jens Axboe73c10102011-03-08 13:19:51 +01003006 /*
3007 * rq is already accounted, so use raw insert
3008 */
Jens Axboe401a18e2011-03-25 16:57:52 +01003009 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
3010 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3011 else
3012 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
Jens Axboe94b5eb22011-04-12 10:12:19 +02003013
3014 depth++;
Jens Axboe73c10102011-03-08 13:19:51 +01003015 }
3016
Jens Axboe99e22592011-04-18 09:59:55 +02003017 /*
3018 * This drops the queue lock
3019 */
3020 if (q)
Jens Axboe49cac012011-04-16 13:51:05 +02003021 queue_unplugged(q, depth, from_schedule);
Jens Axboe73c10102011-03-08 13:19:51 +01003022
Jens Axboe73c10102011-03-08 13:19:51 +01003023 local_irq_restore(flags);
3024}
Jens Axboe73c10102011-03-08 13:19:51 +01003025
3026void blk_finish_plug(struct blk_plug *plug)
3027{
Jens Axboef6603782011-04-15 15:49:07 +02003028 blk_flush_plug_list(plug, false);
Christoph Hellwig88b996c2011-04-15 15:20:10 +02003029
3030 if (plug == current->plug)
3031 current->plug = NULL;
Jens Axboe73c10102011-03-08 13:19:51 +01003032}
3033EXPORT_SYMBOL(blk_finish_plug);
3034
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035int __init blk_dev_init(void)
3036{
Nikanth Karthikesan9eb55b02009-04-27 14:53:54 +02003037 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
3038 sizeof(((struct request *)0)->cmd_flags));
3039
Tejun Heo89b90be2011-01-03 15:01:47 +01003040 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3041 kblockd_workqueue = alloc_workqueue("kblockd",
3042 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043 if (!kblockd_workqueue)
3044 panic("Failed to create kblockd\n");
3045
3046 request_cachep = kmem_cache_create("blkdev_requests",
Paul Mundt20c2df82007-07-20 10:11:58 +09003047 sizeof(struct request), 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048
Jens Axboe8324aa92008-01-29 14:51:59 +01003049 blk_requestq_cachep = kmem_cache_create("blkdev_queue",
Jens Axboe165125e2007-07-24 09:28:11 +02003050 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 return 0;
3053}