| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 1991, 1992 Linus Torvalds | 
|  | 3 | * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics | 
|  | 4 | * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE | 
|  | 5 | * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 6 | * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> | 
|  | 7 | *	-  July2000 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 | 
|  | 9 | */ | 
|  | 10 |  | 
|  | 11 | /* | 
|  | 12 | * This handles all read/write requests to block devices | 
|  | 13 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/kernel.h> | 
|  | 15 | #include <linux/module.h> | 
|  | 16 | #include <linux/backing-dev.h> | 
|  | 17 | #include <linux/bio.h> | 
|  | 18 | #include <linux/blkdev.h> | 
|  | 19 | #include <linux/highmem.h> | 
|  | 20 | #include <linux/mm.h> | 
|  | 21 | #include <linux/kernel_stat.h> | 
|  | 22 | #include <linux/string.h> | 
|  | 23 | #include <linux/init.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/completion.h> | 
|  | 25 | #include <linux/slab.h> | 
|  | 26 | #include <linux/swap.h> | 
|  | 27 | #include <linux/writeback.h> | 
| Andrew Morton | faccbd4 | 2006-12-10 02:19:35 -0800 | [diff] [blame] | 28 | #include <linux/task_io_accounting_ops.h> | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 29 | #include <linux/fault-inject.h> | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 30 | #include <linux/list_sort.h> | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 31 | #include <linux/delay.h> | 
| Asutosh Das | 75de0c3 | 2013-03-07 17:43:35 +0530 | [diff] [blame] | 32 | #include <linux/ratelimit.h> | 
| Li Zefan | 5578213 | 2009-06-09 13:43:05 +0800 | [diff] [blame] | 33 |  | 
|  | 34 | #define CREATE_TRACE_POINTS | 
|  | 35 | #include <trace/events/block.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 37 | #include "blk.h" | 
|  | 38 |  | 
| Mike Snitzer | d07335e | 2010-11-16 12:52:38 +0100 | [diff] [blame] | 39 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); | 
| Jun'ichi Nomura | b0da3f0 | 2009-10-01 21:16:13 +0200 | [diff] [blame] | 40 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); | 
| Li Zefan | 5578213 | 2009-06-09 13:43:05 +0800 | [diff] [blame] | 41 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); | 
| Ingo Molnar | 0bfc245 | 2008-11-26 11:59:56 +0100 | [diff] [blame] | 42 |  | 
| Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 43 | DEFINE_IDA(blk_queue_ida); | 
|  | 44 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | /* | 
|  | 46 | * For the allocated request tables | 
|  | 47 | */ | 
| Adrian Bunk | 5ece6c5 | 2008-02-18 13:45:51 +0100 | [diff] [blame] | 48 | static struct kmem_cache *request_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 |  | 
|  | 50 | /* | 
|  | 51 | * For queue allocation | 
|  | 52 | */ | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 53 | struct kmem_cache *blk_requestq_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 |  | 
|  | 55 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | * Controlling structure to kblockd | 
|  | 57 | */ | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 58 | static struct workqueue_struct *kblockd_workqueue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 |  | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 60 | static void drive_stat_acct(struct request *rq, int new_io) | 
|  | 61 | { | 
| Jens Axboe | 28f1370 | 2008-05-07 10:15:46 +0200 | [diff] [blame] | 62 | struct hd_struct *part; | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 63 | int rw = rq_data_dir(rq); | 
| Tejun Heo | c995905 | 2008-08-25 19:47:21 +0900 | [diff] [blame] | 64 | int cpu; | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 65 |  | 
| Jens Axboe | c2553b5 | 2009-04-24 08:10:11 +0200 | [diff] [blame] | 66 | if (!blk_do_io_stat(rq)) | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 67 | return; | 
|  | 68 |  | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 69 | cpu = part_stat_lock(); | 
| Tejun Heo | c995905 | 2008-08-25 19:47:21 +0900 | [diff] [blame] | 70 |  | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 71 | if (!new_io) { | 
|  | 72 | part = rq->part; | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 73 | part_stat_inc(cpu, part, merges[rw]); | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 74 | } else { | 
|  | 75 | part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); | 
| Jens Axboe | 6c23a96 | 2011-01-07 08:43:37 +0100 | [diff] [blame] | 76 | if (!hd_struct_try_get(part)) { | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 77 | /* | 
|  | 78 | * The partition is already being removed, | 
|  | 79 | * the request will be accounted on the disk only | 
|  | 80 | * | 
|  | 81 | * We take a reference on disk->part0 although that | 
|  | 82 | * partition will never be deleted, so we can treat | 
|  | 83 | * it as any other partition. | 
|  | 84 | */ | 
|  | 85 | part = &rq->rq_disk->part0; | 
| Jens Axboe | 6c23a96 | 2011-01-07 08:43:37 +0100 | [diff] [blame] | 86 | hd_struct_get(part); | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 87 | } | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 88 | part_round_stats(cpu, part); | 
| Nikanth Karthikesan | 316d315 | 2009-10-06 20:16:55 +0200 | [diff] [blame] | 89 | part_inc_in_flight(part, rw); | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 90 | rq->part = part; | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 91 | } | 
| Tejun Heo | e71bf0d | 2008-09-03 09:03:02 +0200 | [diff] [blame] | 92 |  | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 93 | part_stat_unlock(); | 
| Jens Axboe | 26b8256 | 2008-01-29 13:54:41 +0100 | [diff] [blame] | 94 | } | 
|  | 95 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 96 | void blk_queue_congestion_threshold(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | { | 
|  | 98 | int nr; | 
|  | 99 |  | 
|  | 100 | nr = q->nr_requests - (q->nr_requests / 8) + 1; | 
|  | 101 | if (nr > q->nr_requests) | 
|  | 102 | nr = q->nr_requests; | 
|  | 103 | q->nr_congestion_on = nr; | 
|  | 104 |  | 
|  | 105 | nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; | 
|  | 106 | if (nr < 1) | 
|  | 107 | nr = 1; | 
|  | 108 | q->nr_congestion_off = nr; | 
|  | 109 | } | 
|  | 110 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | /** | 
|  | 112 | * blk_get_backing_dev_info - get the address of a queue's backing_dev_info | 
|  | 113 | * @bdev:	device | 
|  | 114 | * | 
|  | 115 | * Locates the passed device's request queue and returns the address of its | 
|  | 116 | * backing_dev_info | 
|  | 117 | * | 
|  | 118 | * Will return NULL if the request queue cannot be located. | 
|  | 119 | */ | 
|  | 120 | struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) | 
|  | 121 | { | 
|  | 122 | struct backing_dev_info *ret = NULL; | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 123 | struct request_queue *q = bdev_get_queue(bdev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 |  | 
|  | 125 | if (q) | 
|  | 126 | ret = &q->backing_dev_info; | 
|  | 127 | return ret; | 
|  | 128 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | EXPORT_SYMBOL(blk_get_backing_dev_info); | 
|  | 130 |  | 
| FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 131 | void blk_rq_init(struct request_queue *q, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | { | 
| FUJITA Tomonori | 1afb20f | 2008-04-25 12:26:28 +0200 | [diff] [blame] | 133 | memset(rq, 0, sizeof(*rq)); | 
|  | 134 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | INIT_LIST_HEAD(&rq->queuelist); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 136 | INIT_LIST_HEAD(&rq->timeout_list); | 
| Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 137 | rq->cpu = -1; | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 138 | rq->q = q; | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 139 | rq->__sector = (sector_t) -1; | 
| Jens Axboe | 2e662b6 | 2006-07-13 11:55:04 +0200 | [diff] [blame] | 140 | INIT_HLIST_NODE(&rq->hash); | 
|  | 141 | RB_CLEAR_NODE(&rq->rb_node); | 
| FUJITA Tomonori | d7e3c32 | 2008-04-29 09:54:39 +0200 | [diff] [blame] | 142 | rq->cmd = rq->__cmd; | 
| Li Zefan | e2494e1 | 2009-04-02 13:43:26 +0800 | [diff] [blame] | 143 | rq->cmd_len = BLK_MAX_CDB; | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 144 | rq->tag = -1; | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 145 | rq->ref_count = 1; | 
| Tejun Heo | b243ddc | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 146 | rq->start_time = jiffies; | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 147 | set_start_time_ns(rq); | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 148 | rq->part = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | } | 
| FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 150 | EXPORT_SYMBOL(blk_rq_init); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 |  | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 152 | static void req_bio_endio(struct request *rq, struct bio *bio, | 
|  | 153 | unsigned int nbytes, int error) | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 154 | { | 
| Tejun Heo | 143a87f | 2011-01-25 12:43:52 +0100 | [diff] [blame] | 155 | if (error) | 
|  | 156 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | 
|  | 157 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | 
|  | 158 | error = -EIO; | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 159 |  | 
| Tejun Heo | 143a87f | 2011-01-25 12:43:52 +0100 | [diff] [blame] | 160 | if (unlikely(nbytes > bio->bi_size)) { | 
|  | 161 | printk(KERN_ERR "%s: want %u bytes done, %u left\n", | 
|  | 162 | __func__, nbytes, bio->bi_size); | 
|  | 163 | nbytes = bio->bi_size; | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 164 | } | 
| Tejun Heo | 143a87f | 2011-01-25 12:43:52 +0100 | [diff] [blame] | 165 |  | 
|  | 166 | if (unlikely(rq->cmd_flags & REQ_QUIET)) | 
|  | 167 | set_bit(BIO_QUIET, &bio->bi_flags); | 
|  | 168 |  | 
|  | 169 | bio->bi_size -= nbytes; | 
|  | 170 | bio->bi_sector += (nbytes >> 9); | 
|  | 171 |  | 
|  | 172 | if (bio_integrity(bio)) | 
|  | 173 | bio_integrity_advance(bio, nbytes); | 
|  | 174 |  | 
|  | 175 | /* don't actually finish bio if it's part of flush sequence */ | 
|  | 176 | if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) | 
|  | 177 | bio_endio(bio, error); | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 178 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | void blk_dump_rq_flags(struct request *rq, char *msg) | 
|  | 181 | { | 
|  | 182 | int bit; | 
|  | 183 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 184 | printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 185 | rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, | 
|  | 186 | rq->cmd_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 |  | 
| Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 188 | printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n", | 
|  | 189 | (unsigned long long)blk_rq_pos(rq), | 
|  | 190 | blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); | 
| Tejun Heo | 731ec49 | 2009-04-23 11:05:20 +0900 | [diff] [blame] | 191 | printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u\n", | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 192 | rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 |  | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 194 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 195 | printk(KERN_INFO "  cdb: "); | 
| FUJITA Tomonori | d34c87e | 2008-04-29 14:37:52 +0200 | [diff] [blame] | 196 | for (bit = 0; bit < BLK_MAX_CDB; bit++) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | printk("%02x ", rq->cmd[bit]); | 
|  | 198 | printk("\n"); | 
|  | 199 | } | 
|  | 200 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | EXPORT_SYMBOL(blk_dump_rq_flags); | 
|  | 202 |  | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 203 | static void blk_delay_work(struct work_struct *work) | 
| Jens Axboe | 6c5e0c4 | 2008-08-01 20:31:32 +0200 | [diff] [blame] | 204 | { | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 205 | struct request_queue *q; | 
| Jens Axboe | 6c5e0c4 | 2008-08-01 20:31:32 +0200 | [diff] [blame] | 206 |  | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 207 | q = container_of(work, struct request_queue, delay_work.work); | 
|  | 208 | spin_lock_irq(q->queue_lock); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 209 | __blk_run_queue(q); | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 210 | spin_unlock_irq(q->queue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 |  | 
|  | 213 | /** | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 214 | * blk_delay_queue - restart queueing after defined interval | 
|  | 215 | * @q:		The &struct request_queue in question | 
|  | 216 | * @msecs:	Delay in msecs | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | * | 
|  | 218 | * Description: | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 219 | *   Sometimes queueing needs to be postponed for a little while, to allow | 
|  | 220 | *   resources to come back. This function will make sure that queueing is | 
|  | 221 | *   restarted around the specified time. | 
|  | 222 | */ | 
|  | 223 | void blk_delay_queue(struct request_queue *q, unsigned long msecs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | { | 
| Jens Axboe | 4521cc4 | 2011-04-18 11:36:39 +0200 | [diff] [blame] | 225 | queue_delayed_work(kblockd_workqueue, &q->delay_work, | 
|  | 226 | msecs_to_jiffies(msecs)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | } | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 228 | EXPORT_SYMBOL(blk_delay_queue); | 
| Alan D. Brunelle | 2ad8b1e | 2007-11-07 14:26:56 -0500 | [diff] [blame] | 229 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | /** | 
|  | 231 | * blk_start_queue - restart a previously stopped queue | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 232 | * @q:    The &struct request_queue in question | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | * | 
|  | 234 | * Description: | 
|  | 235 | *   blk_start_queue() will clear the stop flag on the queue, and call | 
|  | 236 | *   the request_fn for the queue if it was in a stopped state when | 
|  | 237 | *   entered. Also see blk_stop_queue(). Queue lock must be held. | 
|  | 238 | **/ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 239 | void blk_start_queue(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | { | 
| Paolo 'Blaisorblade' Giarrusso | a038e25 | 2006-06-05 12:09:01 +0200 | [diff] [blame] | 241 | WARN_ON(!irqs_disabled()); | 
|  | 242 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 243 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 244 | __blk_run_queue(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | EXPORT_SYMBOL(blk_start_queue); | 
|  | 247 |  | 
|  | 248 | /** | 
|  | 249 | * blk_stop_queue - stop a queue | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 250 | * @q:    The &struct request_queue in question | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | * | 
|  | 252 | * Description: | 
|  | 253 | *   The Linux block layer assumes that a block driver will consume all | 
|  | 254 | *   entries on the request queue when the request_fn strategy is called. | 
|  | 255 | *   Often this will not happen, because of hardware limitations (queue | 
|  | 256 | *   depth settings). If a device driver gets a 'queue full' response, | 
|  | 257 | *   or if it simply chooses not to queue more I/O at one point, it can | 
|  | 258 | *   call this function to prevent the request_fn from being called until | 
|  | 259 | *   the driver has signalled it's ready to go again. This happens by calling | 
|  | 260 | *   blk_start_queue() to restart queue operations. Queue lock must be held. | 
|  | 261 | **/ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 262 | void blk_stop_queue(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | { | 
| Jens Axboe | ad3d9d7 | 2011-03-25 16:58:59 +0100 | [diff] [blame] | 264 | __cancel_delayed_work(&q->delay_work); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 265 | queue_flag_set(QUEUE_FLAG_STOPPED, q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | } | 
|  | 267 | EXPORT_SYMBOL(blk_stop_queue); | 
|  | 268 |  | 
|  | 269 | /** | 
|  | 270 | * blk_sync_queue - cancel any pending callbacks on a queue | 
|  | 271 | * @q: the queue | 
|  | 272 | * | 
|  | 273 | * Description: | 
|  | 274 | *     The block layer may perform asynchronous callback activity | 
|  | 275 | *     on a queue, such as calling the unplug function after a timeout. | 
|  | 276 | *     A block device may call blk_sync_queue to ensure that any | 
|  | 277 | *     such activity is cancelled, thus allowing it to release resources | 
| Michael Opdenacker | 59c5159 | 2007-05-09 08:57:56 +0200 | [diff] [blame] | 278 | *     that the callbacks might use. The caller must already have made sure | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | *     that its ->make_request_fn will not re-add plugging prior to calling | 
|  | 280 | *     this function. | 
|  | 281 | * | 
| Vivek Goyal | da52777 | 2011-03-02 19:05:33 -0500 | [diff] [blame] | 282 | *     This function does not cancel any asynchronous activity arising | 
|  | 283 | *     out of elevator or throttling code. That would require elevaotor_exit() | 
|  | 284 | *     and blk_throtl_exit() to be called with queue lock initialized. | 
|  | 285 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | */ | 
|  | 287 | void blk_sync_queue(struct request_queue *q) | 
|  | 288 | { | 
| Jens Axboe | 70ed28b | 2008-11-19 14:38:39 +0100 | [diff] [blame] | 289 | del_timer_sync(&q->timeout); | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 290 | cancel_delayed_work_sync(&q->delay_work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | } | 
|  | 292 | EXPORT_SYMBOL(blk_sync_queue); | 
|  | 293 |  | 
|  | 294 | /** | 
| Jens Axboe | 80a4b58 | 2008-10-14 09:51:06 +0200 | [diff] [blame] | 295 | * __blk_run_queue - run a single device queue | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | * @q:	The queue to run | 
| Jens Axboe | 80a4b58 | 2008-10-14 09:51:06 +0200 | [diff] [blame] | 297 | * | 
|  | 298 | * Description: | 
|  | 299 | *    See @blk_run_queue. This variant must be called with the queue lock | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 300 | *    held and interrupts disabled. | 
| Tatyana Brokhman | 88fd1b4 | 2012-12-04 16:04:15 +0200 | [diff] [blame] | 301 | *    Device driver will be notified of an urgent request | 
|  | 302 | *    pending under the following conditions: | 
|  | 303 | *    1. The driver and the current scheduler support urgent reques handling | 
|  | 304 | *    2. There is an urgent request pending in the scheduler | 
|  | 305 | *    3. There isn't already an urgent request in flight, meaning previously | 
|  | 306 | *       notified urgent request completed (!q->notified_urgent) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | */ | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 308 | void __blk_run_queue(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | { | 
| Tejun Heo | a538cd0 | 2009-04-23 11:05:17 +0900 | [diff] [blame] | 310 | if (unlikely(blk_queue_stopped(q))) | 
|  | 311 | return; | 
|  | 312 |  | 
| Tatyana Brokhman | 88fd1b4 | 2012-12-04 16:04:15 +0200 | [diff] [blame] | 313 | if (!q->notified_urgent && | 
|  | 314 | q->elevator->type->ops.elevator_is_urgent_fn && | 
|  | 315 | q->urgent_request_fn && | 
|  | 316 | q->elevator->type->ops.elevator_is_urgent_fn(q)) { | 
|  | 317 | q->notified_urgent = true; | 
|  | 318 | q->urgent_request_fn(q); | 
|  | 319 | } else | 
|  | 320 | q->request_fn(q); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 321 | } | 
|  | 322 | EXPORT_SYMBOL(__blk_run_queue); | 
| Jens Axboe | dac07ec | 2006-05-11 08:20:16 +0200 | [diff] [blame] | 323 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 324 | /** | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 325 | * blk_run_queue_async - run a single device queue in workqueue context | 
|  | 326 | * @q:	The queue to run | 
|  | 327 | * | 
|  | 328 | * Description: | 
|  | 329 | *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf | 
|  | 330 | *    of us. | 
|  | 331 | */ | 
|  | 332 | void blk_run_queue_async(struct request_queue *q) | 
|  | 333 | { | 
| Shaohua Li | 3ec717b | 2011-05-18 11:22:43 +0200 | [diff] [blame] | 334 | if (likely(!blk_queue_stopped(q))) { | 
|  | 335 | __cancel_delayed_work(&q->delay_work); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 336 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); | 
| Shaohua Li | 3ec717b | 2011-05-18 11:22:43 +0200 | [diff] [blame] | 337 | } | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 338 | } | 
| Jens Axboe | c21e6be | 2011-04-19 13:32:46 +0200 | [diff] [blame] | 339 | EXPORT_SYMBOL(blk_run_queue_async); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 340 |  | 
|  | 341 | /** | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 342 | * blk_run_queue - run a single device queue | 
|  | 343 | * @q: The queue to run | 
| Jens Axboe | 80a4b58 | 2008-10-14 09:51:06 +0200 | [diff] [blame] | 344 | * | 
|  | 345 | * Description: | 
|  | 346 | *    Invoke request handling on this queue, if it has pending work to do. | 
| Tejun Heo | a7f5579 | 2009-04-23 11:05:17 +0900 | [diff] [blame] | 347 | *    May be used to restart queueing when a request has completed. | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 348 | */ | 
|  | 349 | void blk_run_queue(struct request_queue *q) | 
|  | 350 | { | 
|  | 351 | unsigned long flags; | 
|  | 352 |  | 
|  | 353 | spin_lock_irqsave(q->queue_lock, flags); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 354 | __blk_run_queue(q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | spin_unlock_irqrestore(q->queue_lock, flags); | 
|  | 356 | } | 
|  | 357 | EXPORT_SYMBOL(blk_run_queue); | 
|  | 358 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 359 | void blk_put_queue(struct request_queue *q) | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 360 | { | 
|  | 361 | kobject_put(&q->kobj); | 
|  | 362 | } | 
| Jens Axboe | d86e0e8 | 2011-05-27 07:44:43 +0200 | [diff] [blame] | 363 | EXPORT_SYMBOL(blk_put_queue); | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 364 |  | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 365 | /** | 
|  | 366 | * blk_drain_queue - drain requests from request_queue | 
|  | 367 | * @q: queue to drain | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 368 | * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 369 | * | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 370 | * Drain requests from @q.  If @drain_all is set, all requests are drained. | 
|  | 371 | * If not, only ELVPRIV requests are drained.  The caller is responsible | 
|  | 372 | * for ensuring that no new requests which need to be drained are queued. | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 373 | */ | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 374 | void blk_drain_queue(struct request_queue *q, bool drain_all) | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 375 | { | 
|  | 376 | while (true) { | 
| Tejun Heo | 481a7d6 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 377 | bool drain = false; | 
|  | 378 | int i; | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 379 |  | 
|  | 380 | spin_lock_irq(q->queue_lock); | 
|  | 381 |  | 
|  | 382 | elv_drain_elevator(q); | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 383 | if (drain_all) | 
|  | 384 | blk_throtl_drain(q); | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 385 |  | 
| Tejun Heo | 4eabc94 | 2011-12-15 20:03:04 +0100 | [diff] [blame] | 386 | /* | 
|  | 387 | * This function might be called on a queue which failed | 
|  | 388 | * driver init after queue creation.  Some drivers | 
|  | 389 | * (e.g. fd) get unhappy in such cases.  Kick queue iff | 
|  | 390 | * dispatch queue has something on it. | 
|  | 391 | */ | 
|  | 392 | if (!list_empty(&q->queue_head)) | 
|  | 393 | __blk_run_queue(q); | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 394 |  | 
| Tejun Heo | 481a7d6 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 395 | drain |= q->rq.elvpriv; | 
|  | 396 |  | 
|  | 397 | /* | 
|  | 398 | * Unfortunately, requests are queued at and tracked from | 
|  | 399 | * multiple places and there's no single counter which can | 
|  | 400 | * be drained.  Check all the queues and counters. | 
|  | 401 | */ | 
|  | 402 | if (drain_all) { | 
|  | 403 | drain |= !list_empty(&q->queue_head); | 
|  | 404 | for (i = 0; i < 2; i++) { | 
|  | 405 | drain |= q->rq.count[i]; | 
|  | 406 | drain |= q->in_flight[i]; | 
|  | 407 | drain |= !list_empty(&q->flush_queue[i]); | 
|  | 408 | } | 
|  | 409 | } | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 410 |  | 
|  | 411 | spin_unlock_irq(q->queue_lock); | 
|  | 412 |  | 
| Tejun Heo | 481a7d6 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 413 | if (!drain) | 
| Tejun Heo | e3c78ca | 2011-10-19 14:32:38 +0200 | [diff] [blame] | 414 | break; | 
|  | 415 | msleep(10); | 
|  | 416 | } | 
|  | 417 | } | 
|  | 418 |  | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 419 | /** | 
|  | 420 | * blk_cleanup_queue - shutdown a request queue | 
|  | 421 | * @q: request queue to shutdown | 
|  | 422 | * | 
|  | 423 | * Mark @q DEAD, drain all pending requests, destroy and put it.  All | 
|  | 424 | * future requests will be failed immediately with -ENODEV. | 
| Vivek Goyal | c94a96a | 2011-03-02 19:04:42 -0500 | [diff] [blame] | 425 | */ | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 426 | void blk_cleanup_queue(struct request_queue *q) | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 427 | { | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 428 | spinlock_t *lock = q->queue_lock; | 
| Jens Axboe | e3335de | 2008-09-18 09:22:54 -0700 | [diff] [blame] | 429 |  | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 430 | /* mark @q DEAD, no new request or merges will be allowed afterwards */ | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 431 | mutex_lock(&q->sysfs_lock); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 432 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 433 |  | 
|  | 434 | spin_lock_irq(lock); | 
|  | 435 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); | 
|  | 436 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | 
|  | 437 | queue_flag_set(QUEUE_FLAG_DEAD, q); | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 438 |  | 
| Hannes Reinecke | 777eb1b | 2011-09-28 08:07:01 -0600 | [diff] [blame] | 439 | if (q->queue_lock != &q->__queue_lock) | 
|  | 440 | q->queue_lock = &q->__queue_lock; | 
| Vivek Goyal | da52777 | 2011-03-02 19:05:33 -0500 | [diff] [blame] | 441 |  | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 442 | spin_unlock_irq(lock); | 
|  | 443 | mutex_unlock(&q->sysfs_lock); | 
|  | 444 |  | 
| Tejun Heo | 6dd9ad7 | 2011-11-03 18:52:11 +0100 | [diff] [blame] | 445 | /* | 
|  | 446 | * Drain all requests queued before DEAD marking.  The caller might | 
|  | 447 | * be trying to tear down @q before its elevator is initialized, in | 
|  | 448 | * which case we don't want to call into draining. | 
|  | 449 | */ | 
|  | 450 | if (q->elevator) | 
|  | 451 | blk_drain_queue(q, true); | 
| Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 452 |  | 
|  | 453 | /* @q won't process any more request, flush async actions */ | 
|  | 454 | del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); | 
|  | 455 | blk_sync_queue(q); | 
|  | 456 |  | 
|  | 457 | /* @q is and will stay empty, shutdown and put */ | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 458 | blk_put_queue(q); | 
|  | 459 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | EXPORT_SYMBOL(blk_cleanup_queue); | 
|  | 461 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 462 | static int blk_init_free_list(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | { | 
|  | 464 | struct request_list *rl = &q->rq; | 
|  | 465 |  | 
| Mike Snitzer | 1abec4f | 2010-05-25 13:15:15 -0400 | [diff] [blame] | 466 | if (unlikely(rl->rq_pool)) | 
|  | 467 | return 0; | 
|  | 468 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 469 | rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; | 
|  | 470 | rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 471 | rl->elvpriv = 0; | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 472 | init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); | 
|  | 473 | init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 |  | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 475 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | 
|  | 476 | mempool_free_slab, request_cachep, q->node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 |  | 
|  | 478 | if (!rl->rq_pool) | 
|  | 479 | return -ENOMEM; | 
|  | 480 |  | 
|  | 481 | return 0; | 
|  | 482 | } | 
|  | 483 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 484 | struct request_queue *blk_alloc_queue(gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | { | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 486 | return blk_alloc_queue_node(gfp_mask, -1); | 
|  | 487 | } | 
|  | 488 | EXPORT_SYMBOL(blk_alloc_queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 490 | struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 491 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 492 | struct request_queue *q; | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 493 | int err; | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 494 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 495 | q = kmem_cache_alloc_node(blk_requestq_cachep, | 
| Christoph Lameter | 94f6030 | 2007-07-17 04:03:29 -0700 | [diff] [blame] | 496 | gfp_mask | __GFP_ZERO, node_id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | if (!q) | 
|  | 498 | return NULL; | 
|  | 499 |  | 
| Dan Carpenter | 00380a4 | 2012-03-23 09:58:54 +0100 | [diff] [blame] | 500 | q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); | 
| Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 501 | if (q->id < 0) | 
|  | 502 | goto fail_q; | 
|  | 503 |  | 
| Jens Axboe | 0989a02 | 2009-06-12 14:42:56 +0200 | [diff] [blame] | 504 | q->backing_dev_info.ra_pages = | 
|  | 505 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 
|  | 506 | q->backing_dev_info.state = 0; | 
|  | 507 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | 
| Jens Axboe | d993831 | 2009-06-12 14:45:52 +0200 | [diff] [blame] | 508 | q->backing_dev_info.name = "block"; | 
| Mike Snitzer | 5151412 | 2011-11-23 10:59:13 +0100 | [diff] [blame] | 509 | q->node = node_id; | 
| Jens Axboe | 0989a02 | 2009-06-12 14:42:56 +0200 | [diff] [blame] | 510 |  | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 511 | err = bdi_init(&q->backing_dev_info); | 
| Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 512 | if (err) | 
|  | 513 | goto fail_id; | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 514 |  | 
| Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 515 | if (blk_throtl_init(q)) | 
|  | 516 | goto fail_id; | 
| Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 517 |  | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 518 | setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, | 
|  | 519 | laptop_mode_timer_fn, (unsigned long) q); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 520 | setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); | 
|  | 521 | INIT_LIST_HEAD(&q->timeout_list); | 
| Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 522 | INIT_LIST_HEAD(&q->icq_list); | 
| Tejun Heo | ae1b153 | 2011-01-25 12:43:54 +0100 | [diff] [blame] | 523 | INIT_LIST_HEAD(&q->flush_queue[0]); | 
|  | 524 | INIT_LIST_HEAD(&q->flush_queue[1]); | 
|  | 525 | INIT_LIST_HEAD(&q->flush_data_in_flight); | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 526 | INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 527 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 528 | kobject_init(&q->kobj, &blk_queue_ktype); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 |  | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 530 | mutex_init(&q->sysfs_lock); | 
| Neil Brown | e7e72bf | 2008-05-14 16:05:54 -0700 | [diff] [blame] | 531 | spin_lock_init(&q->__queue_lock); | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 532 |  | 
| Vivek Goyal | c94a96a | 2011-03-02 19:04:42 -0500 | [diff] [blame] | 533 | /* | 
|  | 534 | * By default initialize queue_lock to internal lock and driver can | 
|  | 535 | * override it later if need be. | 
|  | 536 | */ | 
|  | 537 | q->queue_lock = &q->__queue_lock; | 
|  | 538 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | return q; | 
| Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 540 |  | 
|  | 541 | fail_id: | 
|  | 542 | ida_simple_remove(&blk_queue_ida, q->id); | 
|  | 543 | fail_q: | 
|  | 544 | kmem_cache_free(blk_requestq_cachep, q); | 
|  | 545 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | } | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 547 | EXPORT_SYMBOL(blk_alloc_queue_node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 |  | 
|  | 549 | /** | 
|  | 550 | * blk_init_queue  - prepare a request queue for use with a block device | 
|  | 551 | * @rfn:  The function to be called to process requests that have been | 
|  | 552 | *        placed on the queue. | 
|  | 553 | * @lock: Request queue spin lock | 
|  | 554 | * | 
|  | 555 | * Description: | 
|  | 556 | *    If a block device wishes to use the standard request handling procedures, | 
|  | 557 | *    which sorts requests and coalesces adjacent requests, then it must | 
|  | 558 | *    call blk_init_queue().  The function @rfn will be called when there | 
|  | 559 | *    are requests on the queue that need to be processed.  If the device | 
|  | 560 | *    supports plugging, then @rfn may not be called immediately when requests | 
|  | 561 | *    are available on the queue, but may be called at some time later instead. | 
|  | 562 | *    Plugged queues are generally unplugged when a buffer belonging to one | 
|  | 563 | *    of the requests on the queue is needed, or due to memory pressure. | 
|  | 564 | * | 
|  | 565 | *    @rfn is not required, or even expected, to remove all requests off the | 
|  | 566 | *    queue, but only as many as it can handle at a time.  If it does leave | 
|  | 567 | *    requests on the queue, it is responsible for arranging that the requests | 
|  | 568 | *    get dealt with eventually. | 
|  | 569 | * | 
|  | 570 | *    The queue spin lock must be held while manipulating the requests on the | 
| Paolo 'Blaisorblade' Giarrusso | a038e25 | 2006-06-05 12:09:01 +0200 | [diff] [blame] | 571 | *    request queue; this lock will be taken also from interrupt context, so irq | 
|  | 572 | *    disabling is needed for it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | * | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 574 | *    Function returns a pointer to the initialized request queue, or %NULL if | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | *    it didn't succeed. | 
|  | 576 | * | 
|  | 577 | * Note: | 
|  | 578 | *    blk_init_queue() must be paired with a blk_cleanup_queue() call | 
|  | 579 | *    when the block device is deactivated (such as at module unload). | 
|  | 580 | **/ | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 581 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 582 | struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | { | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 584 | return blk_init_queue_node(rfn, lock, -1); | 
|  | 585 | } | 
|  | 586 | EXPORT_SYMBOL(blk_init_queue); | 
|  | 587 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 588 | struct request_queue * | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 589 | blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | 
|  | 590 | { | 
| Mike Snitzer | c86d1b8 | 2010-06-03 11:34:52 -0600 | [diff] [blame] | 591 | struct request_queue *uninit_q, *q; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 |  | 
| Mike Snitzer | c86d1b8 | 2010-06-03 11:34:52 -0600 | [diff] [blame] | 593 | uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); | 
|  | 594 | if (!uninit_q) | 
|  | 595 | return NULL; | 
|  | 596 |  | 
| Mike Snitzer | 5151412 | 2011-11-23 10:59:13 +0100 | [diff] [blame] | 597 | q = blk_init_allocated_queue(uninit_q, rfn, lock); | 
| Mike Snitzer | c86d1b8 | 2010-06-03 11:34:52 -0600 | [diff] [blame] | 598 | if (!q) | 
|  | 599 | blk_cleanup_queue(uninit_q); | 
|  | 600 |  | 
|  | 601 | return q; | 
| Mike Snitzer | 01effb0 | 2010-05-11 08:57:42 +0200 | [diff] [blame] | 602 | } | 
|  | 603 | EXPORT_SYMBOL(blk_init_queue_node); | 
|  | 604 |  | 
|  | 605 | struct request_queue * | 
|  | 606 | blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, | 
|  | 607 | spinlock_t *lock) | 
|  | 608 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | if (!q) | 
|  | 610 | return NULL; | 
|  | 611 |  | 
| Mike Snitzer | c86d1b8 | 2010-06-03 11:34:52 -0600 | [diff] [blame] | 612 | if (blk_init_free_list(q)) | 
| Al Viro | 8669aaf | 2006-03-18 13:50:00 -0500 | [diff] [blame] | 613 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 |  | 
|  | 615 | q->request_fn		= rfn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | q->prep_rq_fn		= NULL; | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 617 | q->unprep_rq_fn		= NULL; | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 618 | q->queue_flags		= QUEUE_FLAG_DEFAULT; | 
| Vivek Goyal | c94a96a | 2011-03-02 19:04:42 -0500 | [diff] [blame] | 619 |  | 
|  | 620 | /* Override internal queue lock with supplied lock pointer */ | 
|  | 621 | if (lock) | 
|  | 622 | q->queue_lock		= lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 |  | 
| Jens Axboe | f3b144a | 2009-03-06 08:48:33 +0100 | [diff] [blame] | 624 | /* | 
|  | 625 | * This also sets hw/phys segments, boundary and size | 
|  | 626 | */ | 
| Jens Axboe | c20e8de | 2011-09-12 12:03:37 +0200 | [diff] [blame] | 627 | blk_queue_make_request(q, blk_queue_bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 |  | 
| Alan Stern | 44ec954 | 2007-02-20 11:01:57 -0500 | [diff] [blame] | 629 | q->sg_reserved_size = INT_MAX; | 
|  | 630 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | /* | 
|  | 632 | * all done | 
|  | 633 | */ | 
|  | 634 | if (!elevator_init(q, NULL)) { | 
|  | 635 | blk_queue_congestion_threshold(q); | 
|  | 636 | return q; | 
|  | 637 | } | 
|  | 638 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | return NULL; | 
|  | 640 | } | 
| Mike Snitzer | 5151412 | 2011-11-23 10:59:13 +0100 | [diff] [blame] | 641 | EXPORT_SYMBOL(blk_init_allocated_queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 |  | 
| Tejun Heo | 09ac46c | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 643 | bool blk_get_queue(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | { | 
| Tejun Heo | 34f6055 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 645 | if (likely(!blk_queue_dead(q))) { | 
| Tejun Heo | 09ac46c | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 646 | __blk_get_queue(q); | 
|  | 647 | return true; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | } | 
|  | 649 |  | 
| Tejun Heo | 09ac46c | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 650 | return false; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | } | 
| Jens Axboe | d86e0e8 | 2011-05-27 07:44:43 +0200 | [diff] [blame] | 652 | EXPORT_SYMBOL(blk_get_queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 654 | static inline void blk_free_request(struct request_queue *q, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | { | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 656 | if (rq->cmd_flags & REQ_ELVPRIV) { | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 657 | elv_put_request(q, rq); | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 658 | if (rq->elv.icq) | 
| Tejun Heo | 11a3122 | 2012-02-07 07:51:30 +0100 | [diff] [blame] | 659 | put_io_context(rq->elv.icq->ioc); | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 660 | } | 
|  | 661 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | mempool_free(rq, q->rq.rq_pool); | 
|  | 663 | } | 
|  | 664 |  | 
| Jens Axboe | 1ea25ec | 2006-07-18 22:24:11 +0200 | [diff] [blame] | 665 | static struct request * | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 666 | blk_alloc_request(struct request_queue *q, struct io_cq *icq, | 
|  | 667 | unsigned int flags, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | { | 
|  | 669 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); | 
|  | 670 |  | 
|  | 671 | if (!rq) | 
|  | 672 | return NULL; | 
|  | 673 |  | 
| FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 674 | blk_rq_init(q, rq); | 
| FUJITA Tomonori | 1afb20f | 2008-04-25 12:26:28 +0200 | [diff] [blame] | 675 |  | 
| Jerome Marchand | 42dad76 | 2009-04-22 14:01:49 +0200 | [diff] [blame] | 676 | rq->cmd_flags = flags | REQ_ALLOCED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 |  | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 678 | if (flags & REQ_ELVPRIV) { | 
|  | 679 | rq->elv.icq = icq; | 
|  | 680 | if (unlikely(elv_set_request(q, rq, gfp_mask))) { | 
|  | 681 | mempool_free(rq, q->rq.rq_pool); | 
|  | 682 | return NULL; | 
|  | 683 | } | 
|  | 684 | /* @rq->elv.icq holds on to io_context until @rq is freed */ | 
|  | 685 | if (icq) | 
|  | 686 | get_io_context(icq->ioc); | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 687 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 |  | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 689 | return rq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | } | 
|  | 691 |  | 
|  | 692 | /* | 
|  | 693 | * ioc_batching returns true if the ioc is a valid batching request and | 
|  | 694 | * should be given priority access to a request. | 
|  | 695 | */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 696 | static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | { | 
|  | 698 | if (!ioc) | 
|  | 699 | return 0; | 
|  | 700 |  | 
|  | 701 | /* | 
|  | 702 | * Make sure the process is able to allocate at least 1 request | 
|  | 703 | * even if the batch times out, otherwise we could theoretically | 
|  | 704 | * lose wakeups. | 
|  | 705 | */ | 
|  | 706 | return ioc->nr_batch_requests == q->nr_batching || | 
|  | 707 | (ioc->nr_batch_requests > 0 | 
|  | 708 | && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); | 
|  | 709 | } | 
|  | 710 |  | 
|  | 711 | /* | 
|  | 712 | * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This | 
|  | 713 | * will cause the process to be a "batcher" on all queues in the system. This | 
|  | 714 | * is the behaviour we want though - once it gets a wakeup it should be given | 
|  | 715 | * a nice run. | 
|  | 716 | */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 717 | static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | { | 
|  | 719 | if (!ioc || ioc_batching(q, ioc)) | 
|  | 720 | return; | 
|  | 721 |  | 
|  | 722 | ioc->nr_batch_requests = q->nr_batching; | 
|  | 723 | ioc->last_waited = jiffies; | 
|  | 724 | } | 
|  | 725 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 726 | static void __freed_request(struct request_queue *q, int sync) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | { | 
|  | 728 | struct request_list *rl = &q->rq; | 
|  | 729 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 730 | if (rl->count[sync] < queue_congestion_off_threshold(q)) | 
|  | 731 | blk_clear_queue_congested(q, sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 733 | if (rl->count[sync] + 1 <= q->nr_requests) { | 
|  | 734 | if (waitqueue_active(&rl->wait[sync])) | 
|  | 735 | wake_up(&rl->wait[sync]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 737 | blk_clear_queue_full(q, sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 | } | 
|  | 739 | } | 
|  | 740 |  | 
|  | 741 | /* | 
|  | 742 | * A request has just been released.  Account for it, update the full and | 
|  | 743 | * congestion status, wake up any waiters.   Called under q->queue_lock. | 
|  | 744 | */ | 
| Tejun Heo | 75eb6c3 | 2011-10-19 14:31:22 +0200 | [diff] [blame] | 745 | static void freed_request(struct request_queue *q, unsigned int flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | { | 
|  | 747 | struct request_list *rl = &q->rq; | 
| Tejun Heo | 75eb6c3 | 2011-10-19 14:31:22 +0200 | [diff] [blame] | 748 | int sync = rw_is_sync(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 749 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 750 | rl->count[sync]--; | 
| Tejun Heo | 75eb6c3 | 2011-10-19 14:31:22 +0200 | [diff] [blame] | 751 | if (flags & REQ_ELVPRIV) | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 752 | rl->elvpriv--; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 753 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 754 | __freed_request(q, sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 756 | if (unlikely(rl->starved[sync ^ 1])) | 
|  | 757 | __freed_request(q, sync ^ 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | } | 
|  | 759 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | /* | 
| Mike Snitzer | 9d5a4e9 | 2011-02-11 11:05:46 +0100 | [diff] [blame] | 761 | * Determine if elevator data should be initialized when allocating the | 
|  | 762 | * request associated with @bio. | 
|  | 763 | */ | 
|  | 764 | static bool blk_rq_should_init_elevator(struct bio *bio) | 
|  | 765 | { | 
|  | 766 | if (!bio) | 
|  | 767 | return true; | 
|  | 768 |  | 
|  | 769 | /* | 
|  | 770 | * Flush requests do not use the elevator so skip initialization. | 
|  | 771 | * This allows a request to share the flush and elevator data. | 
|  | 772 | */ | 
|  | 773 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) | 
|  | 774 | return false; | 
|  | 775 |  | 
|  | 776 | return true; | 
|  | 777 | } | 
|  | 778 |  | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 779 | /** | 
|  | 780 | * get_request - get a free request | 
|  | 781 | * @q: request_queue to allocate request from | 
|  | 782 | * @rw_flags: RW and SYNC flags | 
|  | 783 | * @bio: bio to allocate request for (can be %NULL) | 
|  | 784 | * @gfp_mask: allocation mask | 
|  | 785 | * | 
|  | 786 | * Get a free request from @q.  This function may fail under memory | 
|  | 787 | * pressure or if @q is dead. | 
|  | 788 | * | 
|  | 789 | * Must be callled with @q->queue_lock held and, | 
|  | 790 | * Returns %NULL on failure, with @q->queue_lock held. | 
|  | 791 | * Returns !%NULL on success, with @q->queue_lock *not held*. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 793 | static struct request *get_request(struct request_queue *q, int rw_flags, | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 794 | struct bio *bio, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 795 | { | 
|  | 796 | struct request *rq = NULL; | 
|  | 797 | struct request_list *rl = &q->rq; | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 798 | struct elevator_type *et; | 
| Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 799 | struct io_context *ioc; | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 800 | struct io_cq *icq = NULL; | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 801 | const bool is_sync = rw_is_sync(rw_flags) != 0; | 
| Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 802 | bool retried = false; | 
| Tejun Heo | 75eb6c3 | 2011-10-19 14:31:22 +0200 | [diff] [blame] | 803 | int may_queue; | 
| Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 804 | retry: | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 805 | et = q->elevator->type; | 
| Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 806 | ioc = current->io_context; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 |  | 
| Tejun Heo | 34f6055 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 808 | if (unlikely(blk_queue_dead(q))) | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 809 | return NULL; | 
|  | 810 |  | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 811 | may_queue = elv_may_queue(q, rw_flags); | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 812 | if (may_queue == ELV_MQUEUE_NO) | 
|  | 813 | goto rq_starved; | 
|  | 814 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 815 | if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { | 
|  | 816 | if (rl->count[is_sync]+1 >= q->nr_requests) { | 
| Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 817 | /* | 
|  | 818 | * We want ioc to record batching state.  If it's | 
|  | 819 | * not already there, creating a new one requires | 
|  | 820 | * dropping queue_lock, which in turn requires | 
|  | 821 | * retesting conditions to avoid queue hang. | 
|  | 822 | */ | 
|  | 823 | if (!ioc && !retried) { | 
|  | 824 | spin_unlock_irq(q->queue_lock); | 
|  | 825 | create_io_context(current, gfp_mask, q->node); | 
|  | 826 | spin_lock_irq(q->queue_lock); | 
|  | 827 | retried = true; | 
|  | 828 | goto retry; | 
|  | 829 | } | 
|  | 830 |  | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 831 | /* | 
|  | 832 | * The queue will fill after this allocation, so set | 
|  | 833 | * it as full, and mark this process as "batching". | 
|  | 834 | * This process will be allowed to complete a batch of | 
|  | 835 | * requests, others will be blocked. | 
|  | 836 | */ | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 837 | if (!blk_queue_full(q, is_sync)) { | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 838 | ioc_set_batching(q, ioc); | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 839 | blk_set_queue_full(q, is_sync); | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 840 | } else { | 
|  | 841 | if (may_queue != ELV_MQUEUE_MUST | 
|  | 842 | && !ioc_batching(q, ioc)) { | 
|  | 843 | /* | 
|  | 844 | * The queue is full and the allocating | 
|  | 845 | * process is not a "batcher", and not | 
|  | 846 | * exempted by the IO scheduler | 
|  | 847 | */ | 
|  | 848 | goto out; | 
|  | 849 | } | 
|  | 850 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | } | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 852 | blk_set_queue_congested(q, is_sync); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | } | 
|  | 854 |  | 
| Jens Axboe | 082cf69 | 2005-06-28 16:35:11 +0200 | [diff] [blame] | 855 | /* | 
|  | 856 | * Only allow batching queuers to allocate up to 50% over the defined | 
|  | 857 | * limit of requests, otherwise we could have thousands of requests | 
|  | 858 | * allocated with any setting of ->nr_requests | 
|  | 859 | */ | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 860 | if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) | 
| Jens Axboe | 082cf69 | 2005-06-28 16:35:11 +0200 | [diff] [blame] | 861 | goto out; | 
| Hugh Dickins | fd782a4 | 2005-06-29 15:15:40 +0100 | [diff] [blame] | 862 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 863 | rl->count[is_sync]++; | 
|  | 864 | rl->starved[is_sync] = 0; | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 865 |  | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 866 | /* | 
|  | 867 | * Decide whether the new request will be managed by elevator.  If | 
|  | 868 | * so, mark @rw_flags and increment elvpriv.  Non-zero elvpriv will | 
|  | 869 | * prevent the current elevator from being destroyed until the new | 
|  | 870 | * request is freed.  This guarantees icq's won't be destroyed and | 
|  | 871 | * makes creating new ones safe. | 
|  | 872 | * | 
|  | 873 | * Also, lookup icq while holding queue_lock.  If it doesn't exist, | 
|  | 874 | * it will be created after releasing queue_lock. | 
|  | 875 | */ | 
| Tejun Heo | 75eb6c3 | 2011-10-19 14:31:22 +0200 | [diff] [blame] | 876 | if (blk_rq_should_init_elevator(bio) && | 
|  | 877 | !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) { | 
|  | 878 | rw_flags |= REQ_ELVPRIV; | 
|  | 879 | rl->elvpriv++; | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 880 | if (et->icq_cache && ioc) | 
|  | 881 | icq = ioc_lookup_icq(ioc, q); | 
| Mike Snitzer | 9d5a4e9 | 2011-02-11 11:05:46 +0100 | [diff] [blame] | 882 | } | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 883 |  | 
| Jens Axboe | f253b86 | 2010-10-24 22:06:02 +0200 | [diff] [blame] | 884 | if (blk_queue_io_stat(q)) | 
|  | 885 | rw_flags |= REQ_IO_STAT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 886 | spin_unlock_irq(q->queue_lock); | 
|  | 887 |  | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 888 | /* create icq if missing */ | 
| Shaohua Li | 05c30b9 | 2012-01-19 09:20:10 +0100 | [diff] [blame] | 889 | if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) { | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 890 | icq = ioc_create_icq(q, gfp_mask); | 
| Shaohua Li | 05c30b9 | 2012-01-19 09:20:10 +0100 | [diff] [blame] | 891 | if (!icq) | 
|  | 892 | goto fail_icq; | 
|  | 893 | } | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 894 |  | 
| Shaohua Li | 05c30b9 | 2012-01-19 09:20:10 +0100 | [diff] [blame] | 895 | rq = blk_alloc_request(q, icq, rw_flags, gfp_mask); | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 896 |  | 
| Shaohua Li | 05c30b9 | 2012-01-19 09:20:10 +0100 | [diff] [blame] | 897 | fail_icq: | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 898 | if (unlikely(!rq)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 | /* | 
|  | 900 | * Allocation failed presumably due to memory. Undo anything | 
|  | 901 | * we might have messed up. | 
|  | 902 | * | 
|  | 903 | * Allocating task should really be put onto the front of the | 
|  | 904 | * wait queue, but this is pretty rare. | 
|  | 905 | */ | 
|  | 906 | spin_lock_irq(q->queue_lock); | 
| Tejun Heo | 75eb6c3 | 2011-10-19 14:31:22 +0200 | [diff] [blame] | 907 | freed_request(q, rw_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 |  | 
|  | 909 | /* | 
|  | 910 | * in the very unlikely event that allocation failed and no | 
|  | 911 | * requests for this direction was pending, mark us starved | 
|  | 912 | * so that freeing of a request in the other direction will | 
|  | 913 | * notice us. another possible fix would be to split the | 
|  | 914 | * rq mempool into READ and WRITE | 
|  | 915 | */ | 
|  | 916 | rq_starved: | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 917 | if (unlikely(rl->count[is_sync] == 0)) | 
|  | 918 | rl->starved[is_sync] = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | goto out; | 
|  | 921 | } | 
|  | 922 |  | 
| Jens Axboe | 88ee5ef | 2005-11-12 11:09:12 +0100 | [diff] [blame] | 923 | /* | 
|  | 924 | * ioc may be NULL here, and ioc_batching will be false. That's | 
|  | 925 | * OK, if the queue is under the request limit then requests need | 
|  | 926 | * not count toward the nr_batch_requests limit. There will always | 
|  | 927 | * be some limit enforced by BLK_BATCH_TIME. | 
|  | 928 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | if (ioc_batching(q, ioc)) | 
|  | 930 | ioc->nr_batch_requests--; | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 931 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 932 | trace_block_getrq(q, bio, rw_flags & 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 | return rq; | 
|  | 935 | } | 
|  | 936 |  | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 937 | /** | 
|  | 938 | * get_request_wait - get a free request with retry | 
|  | 939 | * @q: request_queue to allocate request from | 
|  | 940 | * @rw_flags: RW and SYNC flags | 
|  | 941 | * @bio: bio to allocate request for (can be %NULL) | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 942 | * | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 943 | * Get a free request from @q.  This function keeps retrying under memory | 
|  | 944 | * pressure and fails iff @q is dead. | 
|  | 945 | * | 
|  | 946 | * Must be callled with @q->queue_lock held and, | 
|  | 947 | * Returns %NULL on failure, with @q->queue_lock held. | 
|  | 948 | * Returns !%NULL on success, with @q->queue_lock *not held*. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 949 | */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 950 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 951 | struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | { | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 953 | const bool is_sync = rw_is_sync(rw_flags) != 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 | struct request *rq; | 
|  | 955 |  | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 956 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 957 | while (!rq) { | 
|  | 958 | DEFINE_WAIT(wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 | struct request_list *rl = &q->rq; | 
|  | 960 |  | 
| Tejun Heo | 34f6055 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 961 | if (unlikely(blk_queue_dead(q))) | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 962 | return NULL; | 
|  | 963 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 964 | prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | TASK_UNINTERRUPTIBLE); | 
|  | 966 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 967 | trace_block_sleeprq(q, bio, rw_flags & 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 968 |  | 
| Zhang, Yanmin | 05caf8d | 2008-05-22 15:13:29 +0200 | [diff] [blame] | 969 | spin_unlock_irq(q->queue_lock); | 
|  | 970 | io_schedule(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 |  | 
| Zhang, Yanmin | 05caf8d | 2008-05-22 15:13:29 +0200 | [diff] [blame] | 972 | /* | 
|  | 973 | * After sleeping, we become a "batching" process and | 
|  | 974 | * will be able to allocate at least one request, and | 
|  | 975 | * up to a big batch of them for a small period time. | 
|  | 976 | * See ioc_batching, ioc_set_batching | 
|  | 977 | */ | 
| Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 978 | create_io_context(current, GFP_NOIO, q->node); | 
|  | 979 | ioc_set_batching(q, current->io_context); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 980 |  | 
| Zhang, Yanmin | 05caf8d | 2008-05-22 15:13:29 +0200 | [diff] [blame] | 981 | spin_lock_irq(q->queue_lock); | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 982 | finish_wait(&rl->wait[is_sync], &wait); | 
| Zhang, Yanmin | 05caf8d | 2008-05-22 15:13:29 +0200 | [diff] [blame] | 983 |  | 
|  | 984 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 
|  | 985 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 986 |  | 
|  | 987 | return rq; | 
|  | 988 | } | 
|  | 989 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 990 | struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 991 | { | 
|  | 992 | struct request *rq; | 
|  | 993 |  | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 994 | spin_lock_irq(q->queue_lock); | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 995 | if (gfp_mask & __GFP_WAIT) | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 996 | rq = get_request_wait(q, rw, NULL); | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 997 | else | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 998 | rq = get_request(q, rw, NULL, gfp_mask); | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 999 | if (!rq) | 
|  | 1000 | spin_unlock_irq(q->queue_lock); | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 1001 | /* q->queue_lock is unlocked at this point */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 |  | 
|  | 1003 | return rq; | 
|  | 1004 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1005 | EXPORT_SYMBOL(blk_get_request); | 
|  | 1006 |  | 
|  | 1007 | /** | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 1008 | * blk_make_request - given a bio, allocate a corresponding struct request. | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 1009 | * @q: target request queue | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 1010 | * @bio:  The bio describing the memory mappings that will be submitted for IO. | 
|  | 1011 | *        It may be a chained-bio properly constructed by block/bio layer. | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 1012 | * @gfp_mask: gfp flags to be used for memory allocation | 
| Jens Axboe | dc72ef4 | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 1013 | * | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 1014 | * blk_make_request is the parallel of generic_make_request for BLOCK_PC | 
|  | 1015 | * type commands. Where the struct request needs to be farther initialized by | 
|  | 1016 | * the caller. It is passed a &struct bio, which describes the memory info of | 
|  | 1017 | * the I/O transfer. | 
|  | 1018 | * | 
|  | 1019 | * The caller of blk_make_request must make sure that bi_io_vec | 
|  | 1020 | * are set to describe the memory buffers. That bio_data_dir() will return | 
|  | 1021 | * the needed direction of the request. (And all bio's in the passed bio-chain | 
|  | 1022 | * are properly set accordingly) | 
|  | 1023 | * | 
|  | 1024 | * If called under none-sleepable conditions, mapped bio buffers must not | 
|  | 1025 | * need bouncing, by calling the appropriate masked or flagged allocator, | 
|  | 1026 | * suitable for the target device. Otherwise the call to blk_queue_bounce will | 
|  | 1027 | * BUG. | 
| Jens Axboe | 53674ac | 2009-05-19 19:52:35 +0200 | [diff] [blame] | 1028 | * | 
|  | 1029 | * WARNING: When allocating/cloning a bio-chain, careful consideration should be | 
|  | 1030 | * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for | 
|  | 1031 | * anything but the first bio in the chain. Otherwise you risk waiting for IO | 
|  | 1032 | * completion of a bio that hasn't been submitted yet, thus resulting in a | 
|  | 1033 | * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead | 
|  | 1034 | * of bio_alloc(), as that avoids the mempool deadlock. | 
|  | 1035 | * If possible a big IO should be split into smaller parts when allocation | 
|  | 1036 | * fails. Partial allocation should not be an error, or you risk a live-lock. | 
| Jens Axboe | dc72ef4 | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 1037 | */ | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 1038 | struct request *blk_make_request(struct request_queue *q, struct bio *bio, | 
|  | 1039 | gfp_t gfp_mask) | 
| Jens Axboe | dc72ef4 | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 1040 | { | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 1041 | struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); | 
|  | 1042 |  | 
|  | 1043 | if (unlikely(!rq)) | 
|  | 1044 | return ERR_PTR(-ENOMEM); | 
|  | 1045 |  | 
|  | 1046 | for_each_bio(bio) { | 
|  | 1047 | struct bio *bounce_bio = bio; | 
|  | 1048 | int ret; | 
|  | 1049 |  | 
|  | 1050 | blk_queue_bounce(q, &bounce_bio); | 
|  | 1051 | ret = blk_rq_append_bio(q, rq, bounce_bio); | 
|  | 1052 | if (unlikely(ret)) { | 
|  | 1053 | blk_put_request(rq); | 
|  | 1054 | return ERR_PTR(ret); | 
|  | 1055 | } | 
|  | 1056 | } | 
|  | 1057 |  | 
|  | 1058 | return rq; | 
| Jens Axboe | dc72ef4 | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 1059 | } | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 1060 | EXPORT_SYMBOL(blk_make_request); | 
| Jens Axboe | dc72ef4 | 2006-07-20 14:54:05 +0200 | [diff] [blame] | 1061 |  | 
|  | 1062 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1063 | * blk_requeue_request - put a request back on queue | 
|  | 1064 | * @q:		request queue where request should be inserted | 
|  | 1065 | * @rq:		request to be inserted | 
|  | 1066 | * | 
|  | 1067 | * Description: | 
|  | 1068 | *    Drivers often keep queueing requests until the hardware cannot accept | 
|  | 1069 | *    more, when that condition happens we need to put the request back | 
|  | 1070 | *    on the queue. Must be called with queue lock held. | 
|  | 1071 | */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1072 | void blk_requeue_request(struct request_queue *q, struct request *rq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | { | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 1074 | blk_delete_timer(rq); | 
|  | 1075 | blk_clear_rq_complete(rq); | 
| Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 1076 | trace_block_rq_requeue(q, rq); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 1077 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 | if (blk_rq_tagged(rq)) | 
|  | 1079 | blk_queue_end_tag(q, rq); | 
|  | 1080 |  | 
| James Bottomley | ba396a6 | 2009-05-27 14:17:08 +0200 | [diff] [blame] | 1081 | BUG_ON(blk_queued_rq(rq)); | 
|  | 1082 |  | 
| Tatyana Brokhman | fd79902 | 2013-04-11 14:57:15 +0300 | [diff] [blame] | 1083 | if (rq->cmd_flags & REQ_URGENT) { | 
|  | 1084 | /* | 
|  | 1085 | * It's not compliant with the design to re-insert | 
|  | 1086 | * urgent requests. We want to be able to track this | 
|  | 1087 | * down. | 
|  | 1088 | */ | 
| Tatyana Brokhman | 7f9b9bf | 2013-05-16 14:36:58 +0300 | [diff] [blame] | 1089 | pr_debug("%s(): requeueing an URGENT request", __func__); | 
| Tatyana Brokhman | fd79902 | 2013-04-11 14:57:15 +0300 | [diff] [blame] | 1090 | WARN_ON(!q->dispatched_urgent); | 
|  | 1091 | q->dispatched_urgent = false; | 
|  | 1092 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1093 | elv_requeue_request(q, rq); | 
|  | 1094 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 | EXPORT_SYMBOL(blk_requeue_request); | 
|  | 1096 |  | 
| Tatyana Brokhman | 57d8019 | 2012-12-04 15:54:43 +0200 | [diff] [blame] | 1097 | /** | 
|  | 1098 | * blk_reinsert_request() - Insert a request back to the scheduler | 
|  | 1099 | * @q:		request queue | 
|  | 1100 | * @rq:		request to be inserted | 
|  | 1101 | * | 
|  | 1102 | * This function inserts the request back to the scheduler as if | 
|  | 1103 | * it was never dispatched. | 
|  | 1104 | * | 
|  | 1105 | * Return: 0 on success, error code on fail | 
|  | 1106 | */ | 
|  | 1107 | int blk_reinsert_request(struct request_queue *q, struct request *rq) | 
|  | 1108 | { | 
|  | 1109 | if (unlikely(!rq) || unlikely(!q)) | 
|  | 1110 | return -EIO; | 
|  | 1111 |  | 
|  | 1112 | blk_delete_timer(rq); | 
|  | 1113 | blk_clear_rq_complete(rq); | 
|  | 1114 | trace_block_rq_requeue(q, rq); | 
|  | 1115 |  | 
|  | 1116 | if (blk_rq_tagged(rq)) | 
|  | 1117 | blk_queue_end_tag(q, rq); | 
|  | 1118 |  | 
|  | 1119 | BUG_ON(blk_queued_rq(rq)); | 
| Tatyana Brokhman | fd79902 | 2013-04-11 14:57:15 +0300 | [diff] [blame] | 1120 | if (rq->cmd_flags & REQ_URGENT) { | 
|  | 1121 | /* | 
|  | 1122 | * It's not compliant with the design to re-insert | 
|  | 1123 | * urgent requests. We want to be able to track this | 
|  | 1124 | * down. | 
|  | 1125 | */ | 
| Tatyana Brokhman | 7f9b9bf | 2013-05-16 14:36:58 +0300 | [diff] [blame] | 1126 | pr_debug("%s(): reinserting an URGENT request", __func__); | 
| Tatyana Brokhman | fd79902 | 2013-04-11 14:57:15 +0300 | [diff] [blame] | 1127 | WARN_ON(!q->dispatched_urgent); | 
|  | 1128 | q->dispatched_urgent = false; | 
|  | 1129 | } | 
| Tatyana Brokhman | 57d8019 | 2012-12-04 15:54:43 +0200 | [diff] [blame] | 1130 |  | 
|  | 1131 | return elv_reinsert_request(q, rq); | 
|  | 1132 | } | 
|  | 1133 | EXPORT_SYMBOL(blk_reinsert_request); | 
|  | 1134 |  | 
|  | 1135 | /** | 
|  | 1136 | * blk_reinsert_req_sup() - check whether the scheduler supports | 
|  | 1137 | *          reinsertion of requests | 
|  | 1138 | * @q:		request queue | 
|  | 1139 | * | 
|  | 1140 | * Returns true if the current scheduler supports reinserting | 
|  | 1141 | * request. False otherwise | 
|  | 1142 | */ | 
|  | 1143 | bool blk_reinsert_req_sup(struct request_queue *q) | 
|  | 1144 | { | 
|  | 1145 | if (unlikely(!q)) | 
|  | 1146 | return false; | 
|  | 1147 | return q->elevator->type->ops.elevator_reinsert_req_fn ? true : false; | 
|  | 1148 | } | 
|  | 1149 | EXPORT_SYMBOL(blk_reinsert_req_sup); | 
|  | 1150 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1151 | static void add_acct_request(struct request_queue *q, struct request *rq, | 
|  | 1152 | int where) | 
|  | 1153 | { | 
|  | 1154 | drive_stat_acct(rq, 1); | 
| Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 1155 | __elv_add_request(q, rq, where); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1156 | } | 
|  | 1157 |  | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1158 | static void part_round_stats_single(int cpu, struct hd_struct *part, | 
|  | 1159 | unsigned long now) | 
|  | 1160 | { | 
|  | 1161 | if (now == part->stamp) | 
|  | 1162 | return; | 
|  | 1163 |  | 
| Nikanth Karthikesan | 316d315 | 2009-10-06 20:16:55 +0200 | [diff] [blame] | 1164 | if (part_in_flight(part)) { | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1165 | __part_stat_add(cpu, part, time_in_queue, | 
| Nikanth Karthikesan | 316d315 | 2009-10-06 20:16:55 +0200 | [diff] [blame] | 1166 | part_in_flight(part) * (now - part->stamp)); | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1167 | __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); | 
|  | 1168 | } | 
|  | 1169 | part->stamp = now; | 
|  | 1170 | } | 
|  | 1171 |  | 
|  | 1172 | /** | 
| Randy Dunlap | 496aa8a | 2008-10-16 07:46:23 +0200 | [diff] [blame] | 1173 | * part_round_stats() - Round off the performance stats on a struct disk_stats. | 
|  | 1174 | * @cpu: cpu number for stats access | 
|  | 1175 | * @part: target partition | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 | * | 
|  | 1177 | * The average IO queue length and utilisation statistics are maintained | 
|  | 1178 | * by observing the current state of the queue length and the amount of | 
|  | 1179 | * time it has been in this state for. | 
|  | 1180 | * | 
|  | 1181 | * Normally, that accounting is done on IO completion, but that can result | 
|  | 1182 | * in more than a second's worth of IO being accounted for within any one | 
|  | 1183 | * second, leading to >100% utilisation.  To deal with that, we call this | 
|  | 1184 | * function to do a round-off before returning the results when reading | 
|  | 1185 | * /proc/diskstats.  This accounts immediately for all queue usage up to | 
|  | 1186 | * the current jiffies and restarts the counters again. | 
|  | 1187 | */ | 
| Tejun Heo | c995905 | 2008-08-25 19:47:21 +0900 | [diff] [blame] | 1188 | void part_round_stats(int cpu, struct hd_struct *part) | 
| Jerome Marchand | 6f2576a | 2008-02-08 11:04:35 +0100 | [diff] [blame] | 1189 | { | 
|  | 1190 | unsigned long now = jiffies; | 
|  | 1191 |  | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1192 | if (part->partno) | 
|  | 1193 | part_round_stats_single(cpu, &part_to_disk(part)->part0, now); | 
|  | 1194 | part_round_stats_single(cpu, part, now); | 
| Jerome Marchand | 6f2576a | 2008-02-08 11:04:35 +0100 | [diff] [blame] | 1195 | } | 
| Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 1196 | EXPORT_SYMBOL_GPL(part_round_stats); | 
| Jerome Marchand | 6f2576a | 2008-02-08 11:04:35 +0100 | [diff] [blame] | 1197 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 | /* | 
|  | 1199 | * queue lock must be held | 
|  | 1200 | */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1201 | void __blk_put_request(struct request_queue *q, struct request *req) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1202 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1203 | if (unlikely(!q)) | 
|  | 1204 | return; | 
|  | 1205 | if (unlikely(--req->ref_count)) | 
|  | 1206 | return; | 
|  | 1207 |  | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 1208 | elv_completed_request(q, req); | 
|  | 1209 |  | 
| Boaz Harrosh | 1cd96c2 | 2009-03-24 12:35:07 +0100 | [diff] [blame] | 1210 | /* this is a bio leak */ | 
|  | 1211 | WARN_ON(req->bio != NULL); | 
|  | 1212 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1213 | /* | 
|  | 1214 | * Request may not have originated from ll_rw_blk. if not, | 
|  | 1215 | * it didn't come out of our reserved rq pools | 
|  | 1216 | */ | 
| Jens Axboe | 49171e5 | 2006-08-10 08:59:11 +0200 | [diff] [blame] | 1217 | if (req->cmd_flags & REQ_ALLOCED) { | 
| Tejun Heo | 75eb6c3 | 2011-10-19 14:31:22 +0200 | [diff] [blame] | 1218 | unsigned int flags = req->cmd_flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1219 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1220 | BUG_ON(!list_empty(&req->queuelist)); | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 1221 | BUG_ON(!hlist_unhashed(&req->hash)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1222 |  | 
|  | 1223 | blk_free_request(q, req); | 
| Tejun Heo | 75eb6c3 | 2011-10-19 14:31:22 +0200 | [diff] [blame] | 1224 | freed_request(q, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1225 | } | 
|  | 1226 | } | 
| Mike Christie | 6e39b69 | 2005-11-11 05:30:24 -0600 | [diff] [blame] | 1227 | EXPORT_SYMBOL_GPL(__blk_put_request); | 
|  | 1228 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1229 | void blk_put_request(struct request *req) | 
|  | 1230 | { | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 1231 | unsigned long flags; | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1232 | struct request_queue *q = req->q; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1233 |  | 
| FUJITA Tomonori | 52a93ba | 2008-07-15 21:21:45 +0200 | [diff] [blame] | 1234 | spin_lock_irqsave(q->queue_lock, flags); | 
|  | 1235 | __blk_put_request(q, req); | 
|  | 1236 | spin_unlock_irqrestore(q->queue_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1238 | EXPORT_SYMBOL(blk_put_request); | 
|  | 1239 |  | 
| Christoph Hellwig | 66ac028 | 2010-06-18 16:59:42 +0200 | [diff] [blame] | 1240 | /** | 
|  | 1241 | * blk_add_request_payload - add a payload to a request | 
|  | 1242 | * @rq: request to update | 
|  | 1243 | * @page: page backing the payload | 
|  | 1244 | * @len: length of the payload. | 
|  | 1245 | * | 
|  | 1246 | * This allows to later add a payload to an already submitted request by | 
|  | 1247 | * a block driver.  The driver needs to take care of freeing the payload | 
|  | 1248 | * itself. | 
|  | 1249 | * | 
|  | 1250 | * Note that this is a quite horrible hack and nothing but handling of | 
|  | 1251 | * discard requests should ever use it. | 
|  | 1252 | */ | 
|  | 1253 | void blk_add_request_payload(struct request *rq, struct page *page, | 
|  | 1254 | unsigned int len) | 
|  | 1255 | { | 
|  | 1256 | struct bio *bio = rq->bio; | 
|  | 1257 |  | 
|  | 1258 | bio->bi_io_vec->bv_page = page; | 
|  | 1259 | bio->bi_io_vec->bv_offset = 0; | 
|  | 1260 | bio->bi_io_vec->bv_len = len; | 
|  | 1261 |  | 
|  | 1262 | bio->bi_size = len; | 
|  | 1263 | bio->bi_vcnt = 1; | 
|  | 1264 | bio->bi_phys_segments = 1; | 
|  | 1265 |  | 
|  | 1266 | rq->__data_len = rq->resid_len = len; | 
|  | 1267 | rq->nr_phys_segments = 1; | 
|  | 1268 | rq->buffer = bio_data(bio); | 
|  | 1269 | } | 
|  | 1270 | EXPORT_SYMBOL_GPL(blk_add_request_payload); | 
|  | 1271 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1272 | static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, | 
|  | 1273 | struct bio *bio) | 
|  | 1274 | { | 
|  | 1275 | const int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 
|  | 1276 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1277 | if (!ll_back_merge_fn(q, req, bio)) | 
|  | 1278 | return false; | 
|  | 1279 |  | 
|  | 1280 | trace_block_bio_backmerge(q, bio); | 
|  | 1281 |  | 
|  | 1282 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | 
|  | 1283 | blk_rq_set_mixed_merge(req); | 
|  | 1284 |  | 
|  | 1285 | req->biotail->bi_next = bio; | 
|  | 1286 | req->biotail = bio; | 
|  | 1287 | req->__data_len += bio->bi_size; | 
|  | 1288 | req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); | 
|  | 1289 |  | 
|  | 1290 | drive_stat_acct(req, 0); | 
|  | 1291 | return true; | 
|  | 1292 | } | 
|  | 1293 |  | 
|  | 1294 | static bool bio_attempt_front_merge(struct request_queue *q, | 
|  | 1295 | struct request *req, struct bio *bio) | 
|  | 1296 | { | 
|  | 1297 | const int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1298 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1299 | if (!ll_front_merge_fn(q, req, bio)) | 
|  | 1300 | return false; | 
|  | 1301 |  | 
|  | 1302 | trace_block_bio_frontmerge(q, bio); | 
|  | 1303 |  | 
|  | 1304 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | 
|  | 1305 | blk_rq_set_mixed_merge(req); | 
|  | 1306 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1307 | bio->bi_next = req->bio; | 
|  | 1308 | req->bio = bio; | 
|  | 1309 |  | 
|  | 1310 | /* | 
|  | 1311 | * may not be valid. if the low level driver said | 
|  | 1312 | * it didn't need a bounce buffer then it better | 
|  | 1313 | * not touch req->buffer either... | 
|  | 1314 | */ | 
|  | 1315 | req->buffer = bio_data(bio); | 
|  | 1316 | req->__sector = bio->bi_sector; | 
|  | 1317 | req->__data_len += bio->bi_size; | 
|  | 1318 | req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); | 
|  | 1319 |  | 
|  | 1320 | drive_stat_acct(req, 0); | 
|  | 1321 | return true; | 
|  | 1322 | } | 
|  | 1323 |  | 
| Tejun Heo | bd87b58 | 2011-10-19 14:33:08 +0200 | [diff] [blame] | 1324 | /** | 
|  | 1325 | * attempt_plug_merge - try to merge with %current's plugged list | 
|  | 1326 | * @q: request_queue new bio is being queued at | 
|  | 1327 | * @bio: new bio being queued | 
|  | 1328 | * @request_count: out parameter for number of traversed plugged requests | 
|  | 1329 | * | 
|  | 1330 | * Determine whether @bio being queued on @q can be merged with a request | 
|  | 1331 | * on %current's plugged list.  Returns %true if merge was successful, | 
|  | 1332 | * otherwise %false. | 
|  | 1333 | * | 
| Tejun Heo | 07c2bd3 | 2012-02-08 09:19:42 +0100 | [diff] [blame] | 1334 | * Plugging coalesces IOs from the same issuer for the same purpose without | 
|  | 1335 | * going through @q->queue_lock.  As such it's more of an issuing mechanism | 
|  | 1336 | * than scheduling, and the request, while may have elvpriv data, is not | 
|  | 1337 | * added on the elevator at this point.  In addition, we don't have | 
|  | 1338 | * reliable access to the elevator outside queue lock.  Only check basic | 
|  | 1339 | * merging parameters without querying the elevator. | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1340 | */ | 
| Tejun Heo | bd87b58 | 2011-10-19 14:33:08 +0200 | [diff] [blame] | 1341 | static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, | 
|  | 1342 | unsigned int *request_count) | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1343 | { | 
|  | 1344 | struct blk_plug *plug; | 
|  | 1345 | struct request *rq; | 
|  | 1346 | bool ret = false; | 
|  | 1347 |  | 
| Tejun Heo | bd87b58 | 2011-10-19 14:33:08 +0200 | [diff] [blame] | 1348 | plug = current->plug; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1349 | if (!plug) | 
|  | 1350 | goto out; | 
| Shaohua Li | 56ebdaf | 2011-08-24 16:04:34 +0200 | [diff] [blame] | 1351 | *request_count = 0; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1352 |  | 
|  | 1353 | list_for_each_entry_reverse(rq, &plug->list, queuelist) { | 
|  | 1354 | int el_ret; | 
|  | 1355 |  | 
| Shaohua Li | 1b2e19f | 2012-04-06 11:37:47 -0600 | [diff] [blame] | 1356 | if (rq->q == q) | 
|  | 1357 | (*request_count)++; | 
| Shaohua Li | 56ebdaf | 2011-08-24 16:04:34 +0200 | [diff] [blame] | 1358 |  | 
| Tejun Heo | 07c2bd3 | 2012-02-08 09:19:42 +0100 | [diff] [blame] | 1359 | if (rq->q != q || !blk_rq_merge_ok(rq, bio)) | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1360 | continue; | 
|  | 1361 |  | 
| Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 1362 | el_ret = blk_try_merge(rq, bio); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1363 | if (el_ret == ELEVATOR_BACK_MERGE) { | 
|  | 1364 | ret = bio_attempt_back_merge(q, rq, bio); | 
|  | 1365 | if (ret) | 
|  | 1366 | break; | 
|  | 1367 | } else if (el_ret == ELEVATOR_FRONT_MERGE) { | 
|  | 1368 | ret = bio_attempt_front_merge(q, rq, bio); | 
|  | 1369 | if (ret) | 
|  | 1370 | break; | 
|  | 1371 | } | 
|  | 1372 | } | 
|  | 1373 | out: | 
|  | 1374 | return ret; | 
|  | 1375 | } | 
|  | 1376 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1377 | void init_request_from_bio(struct request *req, struct bio *bio) | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1378 | { | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 1379 | req->cmd_type = REQ_TYPE_FS; | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1380 |  | 
| Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 1381 | req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; | 
|  | 1382 | if (bio->bi_rw & REQ_RAHEAD) | 
| Tejun Heo | a82afdf | 2009-07-03 17:48:16 +0900 | [diff] [blame] | 1383 | req->cmd_flags |= REQ_FAILFAST_MASK; | 
| Jens Axboe | b31dc66 | 2006-06-13 08:26:10 +0200 | [diff] [blame] | 1384 |  | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1385 | req->errors = 0; | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 1386 | req->__sector = bio->bi_sector; | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1387 | req->ioprio = bio_prio(bio); | 
| NeilBrown | bc1c56f | 2007-08-16 13:31:30 +0200 | [diff] [blame] | 1388 | blk_rq_bio_prep(req->q, req, bio); | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1389 | } | 
| Maya Erez | 6018155 | 2012-06-27 11:25:26 +0300 | [diff] [blame] | 1390 | EXPORT_SYMBOL(init_request_from_bio); | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1391 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1392 | void blk_queue_bio(struct request_queue *q, struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1393 | { | 
| Jiri Slaby | 5e00d1b | 2010-08-12 14:31:06 +0200 | [diff] [blame] | 1394 | const bool sync = !!(bio->bi_rw & REQ_SYNC); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1395 | struct blk_plug *plug; | 
|  | 1396 | int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; | 
|  | 1397 | struct request *req; | 
| Shaohua Li | 56ebdaf | 2011-08-24 16:04:34 +0200 | [diff] [blame] | 1398 | unsigned int request_count = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1399 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1400 | /* | 
|  | 1401 | * low level driver can indicate that it wants pages above a | 
|  | 1402 | * certain limit bounced to low memory (ie for highmem, or even | 
|  | 1403 | * ISA dma in theory) | 
|  | 1404 | */ | 
|  | 1405 | blk_queue_bounce(q, &bio); | 
|  | 1406 |  | 
| Tejun Heo | 4fed947 | 2010-09-03 11:56:17 +0200 | [diff] [blame] | 1407 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1408 | spin_lock_irq(q->queue_lock); | 
| Tejun Heo | ae1b153 | 2011-01-25 12:43:54 +0100 | [diff] [blame] | 1409 | where = ELEVATOR_INSERT_FLUSH; | 
| Tejun Heo | 28e7d18 | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 1410 | goto get_rq; | 
|  | 1411 | } | 
|  | 1412 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1413 | /* | 
|  | 1414 | * Check if we can merge with the plugged list before grabbing | 
|  | 1415 | * any locks. | 
|  | 1416 | */ | 
| Tejun Heo | bd87b58 | 2011-10-19 14:33:08 +0200 | [diff] [blame] | 1417 | if (attempt_plug_merge(q, bio, &request_count)) | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1418 | return; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1419 |  | 
|  | 1420 | spin_lock_irq(q->queue_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1421 |  | 
|  | 1422 | el_ret = elv_merge(q, &req, bio); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1423 | if (el_ret == ELEVATOR_BACK_MERGE) { | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1424 | if (bio_attempt_back_merge(q, req, bio)) { | 
| Tejun Heo | 07c2bd3 | 2012-02-08 09:19:42 +0100 | [diff] [blame] | 1425 | elv_bio_merged(q, req, bio); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1426 | if (!attempt_back_merge(q, req)) | 
|  | 1427 | elv_merged_request(q, req, el_ret); | 
|  | 1428 | goto out_unlock; | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1429 | } | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1430 | } else if (el_ret == ELEVATOR_FRONT_MERGE) { | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1431 | if (bio_attempt_front_merge(q, req, bio)) { | 
| Tejun Heo | 07c2bd3 | 2012-02-08 09:19:42 +0100 | [diff] [blame] | 1432 | elv_bio_merged(q, req, bio); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1433 | if (!attempt_front_merge(q, req)) | 
|  | 1434 | elv_merged_request(q, req, el_ret); | 
|  | 1435 | goto out_unlock; | 
|  | 1436 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 | } | 
|  | 1438 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1439 | get_rq: | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1440 | /* | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 1441 | * This sync check and mask will be re-done in init_request_from_bio(), | 
|  | 1442 | * but we need to set it earlier to expose the sync flag to the | 
|  | 1443 | * rq allocator and io schedulers. | 
|  | 1444 | */ | 
|  | 1445 | rw_flags = bio_data_dir(bio); | 
|  | 1446 | if (sync) | 
| Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 1447 | rw_flags |= REQ_SYNC; | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 1448 |  | 
|  | 1449 | /* | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1450 | * Grab a free request. This is might sleep but can not fail. | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 1451 | * Returns with the queue unlocked. | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1452 | */ | 
| Jens Axboe | 7749a8d | 2006-12-13 13:02:26 +0100 | [diff] [blame] | 1453 | req = get_request_wait(q, rw_flags, bio); | 
| Tejun Heo | da8303c | 2011-10-19 14:33:05 +0200 | [diff] [blame] | 1454 | if (unlikely(!req)) { | 
|  | 1455 | bio_endio(bio, -ENODEV);	/* @q is dead */ | 
|  | 1456 | goto out_unlock; | 
|  | 1457 | } | 
| Nick Piggin | d634453 | 2005-06-28 20:45:14 -0700 | [diff] [blame] | 1458 |  | 
| Nick Piggin | 450991b | 2005-06-28 20:45:13 -0700 | [diff] [blame] | 1459 | /* | 
|  | 1460 | * After dropping the lock and possibly sleeping here, our request | 
|  | 1461 | * may now be mergeable after it had proven unmergeable (above). | 
|  | 1462 | * We don't worry about that case for efficiency. It won't happen | 
|  | 1463 | * often, and the elevators are able to handle it. | 
|  | 1464 | */ | 
| Tejun Heo | 52d9e67 | 2006-01-06 09:49:58 +0100 | [diff] [blame] | 1465 | init_request_from_bio(req, bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1466 |  | 
| Tao Ma | 9562ad9 | 2011-10-24 16:11:30 +0200 | [diff] [blame] | 1467 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) | 
| Jens Axboe | 11ccf11 | 2011-07-26 15:01:15 +0200 | [diff] [blame] | 1468 | req->cpu = raw_smp_processor_id(); | 
| Tejun Heo | dd83100 | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 1469 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1470 | plug = current->plug; | 
| Jens Axboe | 721a960 | 2011-03-09 11:56:30 +0100 | [diff] [blame] | 1471 | if (plug) { | 
| Jens Axboe | dc6d36c | 2011-04-12 10:28:28 +0200 | [diff] [blame] | 1472 | /* | 
|  | 1473 | * If this is the first request added after a plug, fire | 
|  | 1474 | * of a plug trace. If others have been added before, check | 
|  | 1475 | * if we have multiple devices in this plug. If so, make a | 
|  | 1476 | * note to sort the list before dispatch. | 
|  | 1477 | */ | 
|  | 1478 | if (list_empty(&plug->list)) | 
|  | 1479 | trace_block_plug(q); | 
| Shaohua Li | 3540d5e | 2011-11-16 09:21:50 +0100 | [diff] [blame] | 1480 | else { | 
|  | 1481 | if (!plug->should_sort) { | 
|  | 1482 | struct request *__rq; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1483 |  | 
| Shaohua Li | 3540d5e | 2011-11-16 09:21:50 +0100 | [diff] [blame] | 1484 | __rq = list_entry_rq(plug->list.prev); | 
|  | 1485 | if (__rq->q != q) | 
|  | 1486 | plug->should_sort = 1; | 
|  | 1487 | } | 
| Shaohua Li | 019ceb7 | 2011-11-16 09:21:50 +0100 | [diff] [blame] | 1488 | if (request_count >= BLK_MAX_REQUEST_COUNT) { | 
| Shaohua Li | 3540d5e | 2011-11-16 09:21:50 +0100 | [diff] [blame] | 1489 | blk_flush_plug_list(plug, false); | 
| Shaohua Li | 019ceb7 | 2011-11-16 09:21:50 +0100 | [diff] [blame] | 1490 | trace_block_plug(q); | 
|  | 1491 | } | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1492 | } | 
| Shaohua Li | a632716 | 2011-08-24 16:04:32 +0200 | [diff] [blame] | 1493 | list_add_tail(&req->queuelist, &plug->list); | 
|  | 1494 | drive_stat_acct(req, 1); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1495 | } else { | 
|  | 1496 | spin_lock_irq(q->queue_lock); | 
|  | 1497 | add_acct_request(q, req, where); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 1498 | __blk_run_queue(q); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1499 | out_unlock: | 
|  | 1500 | spin_unlock_irq(q->queue_lock); | 
|  | 1501 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1502 | } | 
| Jens Axboe | c20e8de | 2011-09-12 12:03:37 +0200 | [diff] [blame] | 1503 | EXPORT_SYMBOL_GPL(blk_queue_bio);	/* for device mapper only */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1504 |  | 
|  | 1505 | /* | 
|  | 1506 | * If bio->bi_dev is a partition, remap the location | 
|  | 1507 | */ | 
|  | 1508 | static inline void blk_partition_remap(struct bio *bio) | 
|  | 1509 | { | 
|  | 1510 | struct block_device *bdev = bio->bi_bdev; | 
|  | 1511 |  | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1512 | if (bio_sectors(bio) && bdev != bdev->bd_contains) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1513 | struct hd_struct *p = bdev->bd_part; | 
| Jens Axboe | a362357 | 2005-11-01 09:26:16 +0100 | [diff] [blame] | 1514 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1515 | bio->bi_sector += p->start_sect; | 
|  | 1516 | bio->bi_bdev = bdev->bd_contains; | 
| Alan D. Brunelle | c7149d6 | 2007-08-07 15:30:23 +0200 | [diff] [blame] | 1517 |  | 
| Mike Snitzer | d07335e | 2010-11-16 12:52:38 +0100 | [diff] [blame] | 1518 | trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, | 
|  | 1519 | bdev->bd_dev, | 
|  | 1520 | bio->bi_sector - p->start_sect); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1521 | } | 
|  | 1522 | } | 
|  | 1523 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1524 | static void handle_bad_sector(struct bio *bio) | 
|  | 1525 | { | 
|  | 1526 | char b[BDEVNAME_SIZE]; | 
|  | 1527 |  | 
|  | 1528 | printk(KERN_INFO "attempt to access beyond end of device\n"); | 
|  | 1529 | printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", | 
|  | 1530 | bdevname(bio->bi_bdev, b), | 
|  | 1531 | bio->bi_rw, | 
|  | 1532 | (unsigned long long)bio->bi_sector + bio_sectors(bio), | 
| Mike Snitzer | 77304d2 | 2010-11-08 14:39:12 +0100 | [diff] [blame] | 1533 | (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1534 |  | 
|  | 1535 | set_bit(BIO_EOF, &bio->bi_flags); | 
|  | 1536 | } | 
|  | 1537 |  | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1538 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 
|  | 1539 |  | 
|  | 1540 | static DECLARE_FAULT_ATTR(fail_make_request); | 
|  | 1541 |  | 
|  | 1542 | static int __init setup_fail_make_request(char *str) | 
|  | 1543 | { | 
|  | 1544 | return setup_fault_attr(&fail_make_request, str); | 
|  | 1545 | } | 
|  | 1546 | __setup("fail_make_request=", setup_fail_make_request); | 
|  | 1547 |  | 
| Akinobu Mita | b2c9cd3 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 1548 | static bool should_fail_request(struct hd_struct *part, unsigned int bytes) | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1549 | { | 
| Akinobu Mita | b2c9cd3 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 1550 | return part->make_it_fail && should_fail(&fail_make_request, bytes); | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1551 | } | 
|  | 1552 |  | 
|  | 1553 | static int __init fail_make_request_debugfs(void) | 
|  | 1554 | { | 
| Akinobu Mita | dd48c08 | 2011-08-03 16:21:01 -0700 | [diff] [blame] | 1555 | struct dentry *dir = fault_create_debugfs_attr("fail_make_request", | 
|  | 1556 | NULL, &fail_make_request); | 
|  | 1557 |  | 
|  | 1558 | return IS_ERR(dir) ? PTR_ERR(dir) : 0; | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1559 | } | 
|  | 1560 |  | 
|  | 1561 | late_initcall(fail_make_request_debugfs); | 
|  | 1562 |  | 
|  | 1563 | #else /* CONFIG_FAIL_MAKE_REQUEST */ | 
|  | 1564 |  | 
| Akinobu Mita | b2c9cd3 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 1565 | static inline bool should_fail_request(struct hd_struct *part, | 
|  | 1566 | unsigned int bytes) | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1567 | { | 
| Akinobu Mita | b2c9cd3 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 1568 | return false; | 
| Akinobu Mita | c17bb49 | 2006-12-08 02:39:46 -0800 | [diff] [blame] | 1569 | } | 
|  | 1570 |  | 
|  | 1571 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ | 
|  | 1572 |  | 
| Jens Axboe | c07e2b4 | 2007-07-18 13:27:58 +0200 | [diff] [blame] | 1573 | /* | 
|  | 1574 | * Check whether this bio extends beyond the end of the device. | 
|  | 1575 | */ | 
|  | 1576 | static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) | 
|  | 1577 | { | 
|  | 1578 | sector_t maxsector; | 
|  | 1579 |  | 
|  | 1580 | if (!nr_sectors) | 
|  | 1581 | return 0; | 
|  | 1582 |  | 
|  | 1583 | /* Test device or partition size, when known. */ | 
| Mike Snitzer | 77304d2 | 2010-11-08 14:39:12 +0100 | [diff] [blame] | 1584 | maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; | 
| Jens Axboe | c07e2b4 | 2007-07-18 13:27:58 +0200 | [diff] [blame] | 1585 | if (maxsector) { | 
|  | 1586 | sector_t sector = bio->bi_sector; | 
|  | 1587 |  | 
|  | 1588 | if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { | 
|  | 1589 | /* | 
|  | 1590 | * This may well happen - the kernel calls bread() | 
|  | 1591 | * without checking the size of the device, e.g., when | 
|  | 1592 | * mounting a device. | 
|  | 1593 | */ | 
|  | 1594 | handle_bad_sector(bio); | 
|  | 1595 | return 1; | 
|  | 1596 | } | 
|  | 1597 | } | 
|  | 1598 |  | 
|  | 1599 | return 0; | 
|  | 1600 | } | 
|  | 1601 |  | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1602 | static noinline_for_stack bool | 
|  | 1603 | generic_make_request_checks(struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1604 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1605 | struct request_queue *q; | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1606 | int nr_sectors = bio_sectors(bio); | 
| Jens Axboe | 51fd77b | 2007-11-02 08:49:08 +0100 | [diff] [blame] | 1607 | int err = -EIO; | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1608 | char b[BDEVNAME_SIZE]; | 
|  | 1609 | struct hd_struct *part; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1610 |  | 
|  | 1611 | might_sleep(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1612 |  | 
| Jens Axboe | c07e2b4 | 2007-07-18 13:27:58 +0200 | [diff] [blame] | 1613 | if (bio_check_eod(bio, nr_sectors)) | 
|  | 1614 | goto end_io; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1615 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1616 | q = bdev_get_queue(bio->bi_bdev); | 
|  | 1617 | if (unlikely(!q)) { | 
|  | 1618 | printk(KERN_ERR | 
|  | 1619 | "generic_make_request: Trying to access " | 
|  | 1620 | "nonexistent block-device %s (%Lu)\n", | 
|  | 1621 | bdevname(bio->bi_bdev, b), | 
|  | 1622 | (long long) bio->bi_sector); | 
|  | 1623 | goto end_io; | 
|  | 1624 | } | 
|  | 1625 |  | 
| Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 1626 | if (unlikely(!(bio->bi_rw & (REQ_DISCARD | REQ_SANITIZE)) && | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1627 | nr_sectors > queue_max_hw_sectors(q))) { | 
|  | 1628 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", | 
|  | 1629 | bdevname(bio->bi_bdev, b), | 
|  | 1630 | bio_sectors(bio), | 
|  | 1631 | queue_max_hw_sectors(q)); | 
|  | 1632 | goto end_io; | 
|  | 1633 | } | 
|  | 1634 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1635 | part = bio->bi_bdev->bd_part; | 
|  | 1636 | if (should_fail_request(part, bio->bi_size) || | 
|  | 1637 | should_fail_request(&part_to_disk(part)->part0, | 
|  | 1638 | bio->bi_size)) | 
|  | 1639 | goto end_io; | 
|  | 1640 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1641 | /* | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1642 | * If this device has partitions, remap block n | 
|  | 1643 | * of partition p to block n+start(p) of the disk. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1644 | */ | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1645 | blk_partition_remap(bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1646 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1647 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) | 
|  | 1648 | goto end_io; | 
|  | 1649 |  | 
|  | 1650 | if (bio_check_eod(bio, nr_sectors)) | 
|  | 1651 | goto end_io; | 
|  | 1652 |  | 
|  | 1653 | /* | 
|  | 1654 | * Filter flush bio's early so that make_request based | 
|  | 1655 | * drivers without flush support don't have to worry | 
|  | 1656 | * about them. | 
|  | 1657 | */ | 
|  | 1658 | if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { | 
|  | 1659 | bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); | 
|  | 1660 | if (!nr_sectors) { | 
|  | 1661 | err = 0; | 
| Tejun Heo | a738467 | 2008-11-28 13:32:03 +0900 | [diff] [blame] | 1662 | goto end_io; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1663 | } | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1664 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1665 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1666 | if ((bio->bi_rw & REQ_DISCARD) && | 
|  | 1667 | (!blk_queue_discard(q) || | 
|  | 1668 | ((bio->bi_rw & REQ_SECURE) && | 
|  | 1669 | !blk_queue_secdiscard(q)))) { | 
|  | 1670 | err = -EOPNOTSUPP; | 
|  | 1671 | goto end_io; | 
|  | 1672 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1673 |  | 
| Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 1674 | if ((bio->bi_rw & REQ_SANITIZE) && | 
|  | 1675 | (!blk_queue_sanitize(q))) { | 
|  | 1676 | pr_info("%s - got a SANITIZE request but the queue " | 
|  | 1677 | "doesn't support sanitize requests", __func__); | 
|  | 1678 | err = -EOPNOTSUPP; | 
|  | 1679 | goto end_io; | 
|  | 1680 | } | 
|  | 1681 |  | 
| Tejun Heo | bc16a4f | 2011-10-19 14:33:01 +0200 | [diff] [blame] | 1682 | if (blk_throtl_bio(q, bio)) | 
|  | 1683 | return false;	/* throttled, will be resubmitted later */ | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1684 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1685 | trace_block_bio_queue(q, bio); | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1686 | return true; | 
| Tejun Heo | a738467 | 2008-11-28 13:32:03 +0900 | [diff] [blame] | 1687 |  | 
|  | 1688 | end_io: | 
|  | 1689 | bio_endio(bio, err); | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1690 | return false; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1691 | } | 
|  | 1692 |  | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1693 | /** | 
|  | 1694 | * generic_make_request - hand a buffer to its device driver for I/O | 
|  | 1695 | * @bio:  The bio describing the location in memory and on the device. | 
|  | 1696 | * | 
|  | 1697 | * generic_make_request() is used to make I/O requests of block | 
|  | 1698 | * devices. It is passed a &struct bio, which describes the I/O that needs | 
|  | 1699 | * to be done. | 
|  | 1700 | * | 
|  | 1701 | * generic_make_request() does not return any status.  The | 
|  | 1702 | * success/failure status of the request, along with notification of | 
|  | 1703 | * completion, is delivered asynchronously through the bio->bi_end_io | 
|  | 1704 | * function described (one day) else where. | 
|  | 1705 | * | 
|  | 1706 | * The caller of generic_make_request must make sure that bi_io_vec | 
|  | 1707 | * are set to describe the memory buffer, and that bi_dev and bi_sector are | 
|  | 1708 | * set to describe the device address, and the | 
|  | 1709 | * bi_end_io and optionally bi_private are set to describe how | 
|  | 1710 | * completion notification should be signaled. | 
|  | 1711 | * | 
|  | 1712 | * generic_make_request and the drivers it calls may use bi_next if this | 
|  | 1713 | * bio happens to be merged with someone else, and may resubmit the bio to | 
|  | 1714 | * a lower device by calling into generic_make_request recursively, which | 
|  | 1715 | * means the bio should NOT be touched after the call to ->make_request_fn. | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1716 | */ | 
|  | 1717 | void generic_make_request(struct bio *bio) | 
|  | 1718 | { | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1719 | struct bio_list bio_list_on_stack; | 
|  | 1720 |  | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1721 | if (!generic_make_request_checks(bio)) | 
|  | 1722 | return; | 
|  | 1723 |  | 
|  | 1724 | /* | 
|  | 1725 | * We only want one ->make_request_fn to be active at a time, else | 
|  | 1726 | * stack usage with stacked devices could be a problem.  So use | 
|  | 1727 | * current->bio_list to keep a list of requests submited by a | 
|  | 1728 | * make_request_fn function.  current->bio_list is also used as a | 
|  | 1729 | * flag to say if generic_make_request is currently active in this | 
|  | 1730 | * task or not.  If it is NULL, then no make_request is active.  If | 
|  | 1731 | * it is non-NULL, then a make_request is active, and new requests | 
|  | 1732 | * should be added at the tail | 
|  | 1733 | */ | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1734 | if (current->bio_list) { | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1735 | bio_list_add(current->bio_list, bio); | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1736 | return; | 
|  | 1737 | } | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1738 |  | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1739 | /* following loop may be a bit non-obvious, and so deserves some | 
|  | 1740 | * explanation. | 
|  | 1741 | * Before entering the loop, bio->bi_next is NULL (as all callers | 
|  | 1742 | * ensure that) so we have a list with a single bio. | 
|  | 1743 | * We pretend that we have just taken it off a longer list, so | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1744 | * we assign bio_list to a pointer to the bio_list_on_stack, | 
|  | 1745 | * thus initialising the bio_list of new bios to be | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1746 | * added.  ->make_request() may indeed add some more bios | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1747 | * through a recursive call to generic_make_request.  If it | 
|  | 1748 | * did, we find a non-NULL value in bio_list and re-enter the loop | 
|  | 1749 | * from the top.  In this case we really did just take the bio | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1750 | * of the top of the list (no pretending) and so remove it from | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1751 | * bio_list, and call into ->make_request() again. | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1752 | */ | 
|  | 1753 | BUG_ON(bio->bi_next); | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1754 | bio_list_init(&bio_list_on_stack); | 
|  | 1755 | current->bio_list = &bio_list_on_stack; | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1756 | do { | 
| Christoph Hellwig | 27a84d5 | 2011-09-15 14:01:40 +0200 | [diff] [blame] | 1757 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | 
|  | 1758 |  | 
|  | 1759 | q->make_request_fn(q, bio); | 
|  | 1760 |  | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1761 | bio = bio_list_pop(current->bio_list); | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1762 | } while (bio); | 
| Akinobu Mita | bddd87c | 2010-02-23 08:55:42 +0100 | [diff] [blame] | 1763 | current->bio_list = NULL; /* deactivate */ | 
| Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1764 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1765 | EXPORT_SYMBOL(generic_make_request); | 
|  | 1766 |  | 
|  | 1767 | /** | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 1768 | * submit_bio - submit a bio to the block device layer for I/O | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1769 | * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) | 
|  | 1770 | * @bio: The &struct bio which describes the I/O | 
|  | 1771 | * | 
|  | 1772 | * submit_bio() is very similar in purpose to generic_make_request(), and | 
|  | 1773 | * uses that function to do most of the work. Both are fairly rough | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 1774 | * interfaces; @bio must be presetup and ready for I/O. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1775 | * | 
|  | 1776 | */ | 
|  | 1777 | void submit_bio(int rw, struct bio *bio) | 
|  | 1778 | { | 
|  | 1779 | int count = bio_sectors(bio); | 
|  | 1780 |  | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1781 | bio->bi_rw |= rw; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1782 |  | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1783 | /* | 
|  | 1784 | * If it's a regular read/write or a barrier with data attached, | 
|  | 1785 | * go through the normal accounting stuff before submission. | 
|  | 1786 | */ | 
| Maya Erez | 73937f5 | 2012-05-24 23:33:05 +0300 | [diff] [blame] | 1787 | if (bio_has_data(bio) && | 
|  | 1788 | (!(rw & (REQ_DISCARD | REQ_SANITIZE)))) { | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1789 | if (rw & WRITE) { | 
|  | 1790 | count_vm_events(PGPGOUT, count); | 
|  | 1791 | } else { | 
|  | 1792 | task_io_account_read(bio->bi_size); | 
|  | 1793 | count_vm_events(PGPGIN, count); | 
|  | 1794 | } | 
|  | 1795 |  | 
|  | 1796 | if (unlikely(block_dump)) { | 
|  | 1797 | char b[BDEVNAME_SIZE]; | 
| San Mehat | 8dcbdc7 | 2010-09-14 08:48:01 +0200 | [diff] [blame] | 1798 | printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", | 
| Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 1799 | current->comm, task_pid_nr(current), | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1800 | (rw & WRITE) ? "WRITE" : "READ", | 
|  | 1801 | (unsigned long long)bio->bi_sector, | 
| San Mehat | 8dcbdc7 | 2010-09-14 08:48:01 +0200 | [diff] [blame] | 1802 | bdevname(bio->bi_bdev, b), | 
|  | 1803 | count); | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 1804 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1805 | } | 
|  | 1806 |  | 
|  | 1807 | generic_make_request(bio); | 
|  | 1808 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1809 | EXPORT_SYMBOL(submit_bio); | 
|  | 1810 |  | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 1811 | /** | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1812 | * blk_rq_check_limits - Helper function to check a request for the queue limit | 
|  | 1813 | * @q:  the queue | 
|  | 1814 | * @rq: the request being checked | 
|  | 1815 | * | 
|  | 1816 | * Description: | 
|  | 1817 | *    @rq may have been made based on weaker limitations of upper-level queues | 
|  | 1818 | *    in request stacking drivers, and it may violate the limitation of @q. | 
|  | 1819 | *    Since the block layer and the underlying device driver trust @rq | 
|  | 1820 | *    after it is inserted to @q, it should be checked against @q before | 
|  | 1821 | *    the insertion using this generic function. | 
|  | 1822 | * | 
|  | 1823 | *    This function should also be useful for request stacking drivers | 
| Stefan Weil | eef35c2 | 2010-08-06 21:11:15 +0200 | [diff] [blame] | 1824 | *    in some cases below, so export this function. | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1825 | *    Request stacking drivers like request-based dm may change the queue | 
|  | 1826 | *    limits while requests are in the queue (e.g. dm's table swapping). | 
|  | 1827 | *    Such request stacking drivers should check those requests agaist | 
|  | 1828 | *    the new queue limits again when they dispatch those requests, | 
|  | 1829 | *    although such checkings are also done against the old queue limits | 
|  | 1830 | *    when submitting requests. | 
|  | 1831 | */ | 
|  | 1832 | int blk_rq_check_limits(struct request_queue *q, struct request *rq) | 
|  | 1833 | { | 
| Maya Erez | 73937f5 | 2012-05-24 23:33:05 +0300 | [diff] [blame] | 1834 | if (rq->cmd_flags & (REQ_DISCARD | REQ_SANITIZE)) | 
| ike Snitzer | 3383977 | 2010-08-08 12:11:33 -0400 | [diff] [blame] | 1835 | return 0; | 
|  | 1836 |  | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1837 | if (blk_rq_sectors(rq) > queue_max_sectors(q) || | 
|  | 1838 | blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1839 | printk(KERN_ERR "%s: over max size limit.\n", __func__); | 
|  | 1840 | return -EIO; | 
|  | 1841 | } | 
|  | 1842 |  | 
|  | 1843 | /* | 
|  | 1844 | * queue's settings related to segment counting like q->bounce_pfn | 
|  | 1845 | * may differ from that of other stacking queues. | 
|  | 1846 | * Recalculate it to check the request correctly on this queue's | 
|  | 1847 | * limitation. | 
|  | 1848 | */ | 
|  | 1849 | blk_recalc_rq_segments(rq); | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 1850 | if (rq->nr_phys_segments > queue_max_segments(q)) { | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1851 | printk(KERN_ERR "%s: over max segments limit.\n", __func__); | 
|  | 1852 | return -EIO; | 
|  | 1853 | } | 
|  | 1854 |  | 
|  | 1855 | return 0; | 
|  | 1856 | } | 
|  | 1857 | EXPORT_SYMBOL_GPL(blk_rq_check_limits); | 
|  | 1858 |  | 
|  | 1859 | /** | 
|  | 1860 | * blk_insert_cloned_request - Helper for stacking drivers to submit a request | 
|  | 1861 | * @q:  the queue to submit the request | 
|  | 1862 | * @rq: the request being queued | 
|  | 1863 | */ | 
|  | 1864 | int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | 
|  | 1865 | { | 
|  | 1866 | unsigned long flags; | 
| Jeff Moyer | 4853aba | 2011-08-15 21:37:25 +0200 | [diff] [blame] | 1867 | int where = ELEVATOR_INSERT_BACK; | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1868 |  | 
|  | 1869 | if (blk_rq_check_limits(q, rq)) | 
|  | 1870 | return -EIO; | 
|  | 1871 |  | 
| Akinobu Mita | b2c9cd3 | 2011-07-26 16:09:03 -0700 | [diff] [blame] | 1872 | if (rq->rq_disk && | 
|  | 1873 | should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1874 | return -EIO; | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1875 |  | 
|  | 1876 | spin_lock_irqsave(q->queue_lock, flags); | 
| Tejun Heo | 8ba6143 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 1877 | if (unlikely(blk_queue_dead(q))) { | 
|  | 1878 | spin_unlock_irqrestore(q->queue_lock, flags); | 
|  | 1879 | return -ENODEV; | 
|  | 1880 | } | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1881 |  | 
|  | 1882 | /* | 
|  | 1883 | * Submitting request must be dequeued before calling this function | 
|  | 1884 | * because it will be linked to another request_queue | 
|  | 1885 | */ | 
|  | 1886 | BUG_ON(blk_queued_rq(rq)); | 
|  | 1887 |  | 
| Jeff Moyer | 4853aba | 2011-08-15 21:37:25 +0200 | [diff] [blame] | 1888 | if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) | 
|  | 1889 | where = ELEVATOR_INSERT_FLUSH; | 
|  | 1890 |  | 
|  | 1891 | add_acct_request(q, rq, where); | 
| Jeff Moyer | e67b77c | 2011-10-17 12:57:23 +0200 | [diff] [blame] | 1892 | if (where == ELEVATOR_INSERT_FLUSH) | 
|  | 1893 | __blk_run_queue(q); | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 1894 | spin_unlock_irqrestore(q->queue_lock, flags); | 
|  | 1895 |  | 
|  | 1896 | return 0; | 
|  | 1897 | } | 
|  | 1898 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); | 
|  | 1899 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 1900 | /** | 
|  | 1901 | * blk_rq_err_bytes - determine number of bytes till the next failure boundary | 
|  | 1902 | * @rq: request to examine | 
|  | 1903 | * | 
|  | 1904 | * Description: | 
|  | 1905 | *     A request could be merge of IOs which require different failure | 
|  | 1906 | *     handling.  This function determines the number of bytes which | 
|  | 1907 | *     can be failed from the beginning of the request without | 
|  | 1908 | *     crossing into area which need to be retried further. | 
|  | 1909 | * | 
|  | 1910 | * Return: | 
|  | 1911 | *     The number of bytes to fail. | 
|  | 1912 | * | 
|  | 1913 | * Context: | 
|  | 1914 | *     queue_lock must be held. | 
|  | 1915 | */ | 
|  | 1916 | unsigned int blk_rq_err_bytes(const struct request *rq) | 
|  | 1917 | { | 
|  | 1918 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | 
|  | 1919 | unsigned int bytes = 0; | 
|  | 1920 | struct bio *bio; | 
|  | 1921 |  | 
|  | 1922 | if (!(rq->cmd_flags & REQ_MIXED_MERGE)) | 
|  | 1923 | return blk_rq_bytes(rq); | 
|  | 1924 |  | 
|  | 1925 | /* | 
|  | 1926 | * Currently the only 'mixing' which can happen is between | 
|  | 1927 | * different fastfail types.  We can safely fail portions | 
|  | 1928 | * which have all the failfast bits that the first one has - | 
|  | 1929 | * the ones which are at least as eager to fail as the first | 
|  | 1930 | * one. | 
|  | 1931 | */ | 
|  | 1932 | for (bio = rq->bio; bio; bio = bio->bi_next) { | 
|  | 1933 | if ((bio->bi_rw & ff) != ff) | 
|  | 1934 | break; | 
|  | 1935 | bytes += bio->bi_size; | 
|  | 1936 | } | 
|  | 1937 |  | 
|  | 1938 | /* this could lead to infinite loop */ | 
|  | 1939 | BUG_ON(blk_rq_bytes(rq) && !bytes); | 
|  | 1940 | return bytes; | 
|  | 1941 | } | 
|  | 1942 | EXPORT_SYMBOL_GPL(blk_rq_err_bytes); | 
|  | 1943 |  | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1944 | static void blk_account_io_completion(struct request *req, unsigned int bytes) | 
|  | 1945 | { | 
| Jens Axboe | c2553b5 | 2009-04-24 08:10:11 +0200 | [diff] [blame] | 1946 | if (blk_do_io_stat(req)) { | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1947 | const int rw = rq_data_dir(req); | 
|  | 1948 | struct hd_struct *part; | 
|  | 1949 | int cpu; | 
|  | 1950 |  | 
|  | 1951 | cpu = part_stat_lock(); | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 1952 | part = req->part; | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1953 | part_stat_add(cpu, part, sectors[rw], bytes >> 9); | 
|  | 1954 | part_stat_unlock(); | 
|  | 1955 | } | 
|  | 1956 | } | 
|  | 1957 |  | 
|  | 1958 | static void blk_account_io_done(struct request *req) | 
|  | 1959 | { | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1960 | /* | 
| Tejun Heo | dd4c133 | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 1961 | * Account IO completion.  flush_rq isn't accounted as a | 
|  | 1962 | * normal IO on queueing nor completion.  Accounting the | 
|  | 1963 | * containing request is enough. | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1964 | */ | 
| Tejun Heo | 414b4ff | 2011-01-25 12:43:49 +0100 | [diff] [blame] | 1965 | if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1966 | unsigned long duration = jiffies - req->start_time; | 
|  | 1967 | const int rw = rq_data_dir(req); | 
|  | 1968 | struct hd_struct *part; | 
|  | 1969 | int cpu; | 
|  | 1970 |  | 
|  | 1971 | cpu = part_stat_lock(); | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 1972 | part = req->part; | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1973 |  | 
|  | 1974 | part_stat_inc(cpu, part, ios[rw]); | 
|  | 1975 | part_stat_add(cpu, part, ticks[rw], duration); | 
|  | 1976 | part_round_stats(cpu, part); | 
| Nikanth Karthikesan | 316d315 | 2009-10-06 20:16:55 +0200 | [diff] [blame] | 1977 | part_dec_in_flight(part, rw); | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1978 |  | 
| Jens Axboe | 6c23a96 | 2011-01-07 08:43:37 +0100 | [diff] [blame] | 1979 | hd_struct_put(part); | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 1980 | part_stat_unlock(); | 
|  | 1981 | } | 
|  | 1982 | } | 
|  | 1983 |  | 
| Tejun Heo | 53a0880 | 2008-12-03 12:41:26 +0100 | [diff] [blame] | 1984 | /** | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1985 | * blk_peek_request - peek at the top of a request queue | 
|  | 1986 | * @q: request queue to peek at | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 1987 | * | 
|  | 1988 | * Description: | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1989 | *     Return the request at the top of @q.  The returned request | 
|  | 1990 | *     should be started using blk_start_request() before LLD starts | 
|  | 1991 | *     processing it. | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 1992 | * | 
|  | 1993 | * Return: | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 1994 | *     Pointer to the request at the top of @q if available.  Null | 
|  | 1995 | *     otherwise. | 
|  | 1996 | * | 
|  | 1997 | * Context: | 
|  | 1998 | *     queue_lock must be held. | 
|  | 1999 | */ | 
|  | 2000 | struct request *blk_peek_request(struct request_queue *q) | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2001 | { | 
|  | 2002 | struct request *rq; | 
|  | 2003 | int ret; | 
|  | 2004 |  | 
|  | 2005 | while ((rq = __elv_next_request(q)) != NULL) { | 
|  | 2006 | if (!(rq->cmd_flags & REQ_STARTED)) { | 
|  | 2007 | /* | 
|  | 2008 | * This is the first time the device driver | 
|  | 2009 | * sees this request (possibly after | 
|  | 2010 | * requeueing).  Notify IO scheduler. | 
|  | 2011 | */ | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 2012 | if (rq->cmd_flags & REQ_SORTED) | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2013 | elv_activate_rq(q, rq); | 
|  | 2014 |  | 
|  | 2015 | /* | 
|  | 2016 | * just mark as started even if we don't start | 
|  | 2017 | * it, a request that has been delayed should | 
|  | 2018 | * not be passed by new incoming requests | 
|  | 2019 | */ | 
|  | 2020 | rq->cmd_flags |= REQ_STARTED; | 
| Tatyana Brokhman | 2dd5b26 | 2013-05-01 14:35:20 +0300 | [diff] [blame] | 2021 | if (rq->cmd_flags & REQ_URGENT) { | 
|  | 2022 | WARN_ON(q->dispatched_urgent); | 
|  | 2023 | q->dispatched_urgent = true; | 
|  | 2024 | } | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2025 | trace_block_rq_issue(q, rq); | 
|  | 2026 | } | 
|  | 2027 |  | 
|  | 2028 | if (!q->boundary_rq || q->boundary_rq == rq) { | 
|  | 2029 | q->end_sector = rq_end_sector(rq); | 
|  | 2030 | q->boundary_rq = NULL; | 
|  | 2031 | } | 
|  | 2032 |  | 
|  | 2033 | if (rq->cmd_flags & REQ_DONTPREP) | 
|  | 2034 | break; | 
|  | 2035 |  | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2036 | if (q->dma_drain_size && blk_rq_bytes(rq)) { | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2037 | /* | 
|  | 2038 | * make sure space for the drain appears we | 
|  | 2039 | * know we can do this because max_hw_segments | 
|  | 2040 | * has been adjusted to be one fewer than the | 
|  | 2041 | * device can handle | 
|  | 2042 | */ | 
|  | 2043 | rq->nr_phys_segments++; | 
|  | 2044 | } | 
|  | 2045 |  | 
|  | 2046 | if (!q->prep_rq_fn) | 
|  | 2047 | break; | 
|  | 2048 |  | 
|  | 2049 | ret = q->prep_rq_fn(q, rq); | 
|  | 2050 | if (ret == BLKPREP_OK) { | 
|  | 2051 | break; | 
|  | 2052 | } else if (ret == BLKPREP_DEFER) { | 
|  | 2053 | /* | 
|  | 2054 | * the request may have been (partially) prepped. | 
|  | 2055 | * we need to keep this request in the front to | 
|  | 2056 | * avoid resource deadlock.  REQ_STARTED will | 
|  | 2057 | * prevent other fs requests from passing this one. | 
|  | 2058 | */ | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2059 | if (q->dma_drain_size && blk_rq_bytes(rq) && | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2060 | !(rq->cmd_flags & REQ_DONTPREP)) { | 
|  | 2061 | /* | 
|  | 2062 | * remove the space for the drain we added | 
|  | 2063 | * so that we don't add it again | 
|  | 2064 | */ | 
|  | 2065 | --rq->nr_phys_segments; | 
|  | 2066 | } | 
|  | 2067 |  | 
|  | 2068 | rq = NULL; | 
|  | 2069 | break; | 
|  | 2070 | } else if (ret == BLKPREP_KILL) { | 
|  | 2071 | rq->cmd_flags |= REQ_QUIET; | 
| James Bottomley | c143dc9 | 2009-05-30 06:43:49 +0200 | [diff] [blame] | 2072 | /* | 
|  | 2073 | * Mark this request as started so we don't trigger | 
|  | 2074 | * any debug logic in the end I/O path. | 
|  | 2075 | */ | 
|  | 2076 | blk_start_request(rq); | 
| Tejun Heo | 40cbbb7 | 2009-04-23 11:05:19 +0900 | [diff] [blame] | 2077 | __blk_end_request_all(rq, -EIO); | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2078 | } else { | 
|  | 2079 | printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); | 
|  | 2080 | break; | 
|  | 2081 | } | 
|  | 2082 | } | 
|  | 2083 |  | 
|  | 2084 | return rq; | 
|  | 2085 | } | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2086 | EXPORT_SYMBOL(blk_peek_request); | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2087 |  | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2088 | void blk_dequeue_request(struct request *rq) | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2089 | { | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2090 | struct request_queue *q = rq->q; | 
|  | 2091 |  | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2092 | BUG_ON(list_empty(&rq->queuelist)); | 
|  | 2093 | BUG_ON(ELV_ON_HASH(rq)); | 
|  | 2094 |  | 
|  | 2095 | list_del_init(&rq->queuelist); | 
|  | 2096 |  | 
|  | 2097 | /* | 
|  | 2098 | * the time frame between a request being removed from the lists | 
|  | 2099 | * and to it is freed is accounted as io that is in progress at | 
|  | 2100 | * the driver side. | 
|  | 2101 | */ | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 2102 | if (blk_account_rq(rq)) { | 
| Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 2103 | q->in_flight[rq_is_sync(rq)]++; | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 2104 | set_io_start_time_ns(rq); | 
|  | 2105 | } | 
| Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2106 | } | 
|  | 2107 |  | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2108 | /** | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2109 | * blk_start_request - start request processing on the driver | 
|  | 2110 | * @req: request to dequeue | 
|  | 2111 | * | 
|  | 2112 | * Description: | 
|  | 2113 | *     Dequeue @req and start timeout timer on it.  This hands off the | 
|  | 2114 | *     request to the driver. | 
|  | 2115 | * | 
|  | 2116 | *     Block internal functions which don't want to start timer should | 
|  | 2117 | *     call blk_dequeue_request(). | 
|  | 2118 | * | 
|  | 2119 | * Context: | 
|  | 2120 | *     queue_lock must be held. | 
|  | 2121 | */ | 
|  | 2122 | void blk_start_request(struct request *req) | 
|  | 2123 | { | 
|  | 2124 | blk_dequeue_request(req); | 
|  | 2125 |  | 
|  | 2126 | /* | 
| Tejun Heo | 5f49f63 | 2009-05-19 18:33:05 +0900 | [diff] [blame] | 2127 | * We are now handing the request to the hardware, initialize | 
|  | 2128 | * resid_len to full count and add the timeout handler. | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2129 | */ | 
| Tejun Heo | 5f49f63 | 2009-05-19 18:33:05 +0900 | [diff] [blame] | 2130 | req->resid_len = blk_rq_bytes(req); | 
| FUJITA Tomonori | dbb66c4 | 2009-06-09 05:47:10 +0200 | [diff] [blame] | 2131 | if (unlikely(blk_bidi_rq(req))) | 
|  | 2132 | req->next_rq->resid_len = blk_rq_bytes(req->next_rq); | 
|  | 2133 |  | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2134 | blk_add_timer(req); | 
|  | 2135 | } | 
|  | 2136 | EXPORT_SYMBOL(blk_start_request); | 
|  | 2137 |  | 
|  | 2138 | /** | 
|  | 2139 | * blk_fetch_request - fetch a request from a request queue | 
|  | 2140 | * @q: request queue to fetch a request from | 
|  | 2141 | * | 
|  | 2142 | * Description: | 
|  | 2143 | *     Return the request at the top of @q.  The request is started on | 
|  | 2144 | *     return and LLD can start processing it immediately. | 
|  | 2145 | * | 
|  | 2146 | * Return: | 
|  | 2147 | *     Pointer to the request at the top of @q if available.  Null | 
|  | 2148 | *     otherwise. | 
|  | 2149 | * | 
|  | 2150 | * Context: | 
|  | 2151 | *     queue_lock must be held. | 
|  | 2152 | */ | 
|  | 2153 | struct request *blk_fetch_request(struct request_queue *q) | 
|  | 2154 | { | 
|  | 2155 | struct request *rq; | 
|  | 2156 |  | 
|  | 2157 | rq = blk_peek_request(q); | 
| Tatyana Brokhman | 2dd5b26 | 2013-05-01 14:35:20 +0300 | [diff] [blame] | 2158 | if (rq) | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 2159 | blk_start_request(rq); | 
|  | 2160 | return rq; | 
|  | 2161 | } | 
|  | 2162 | EXPORT_SYMBOL(blk_fetch_request); | 
|  | 2163 |  | 
|  | 2164 | /** | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2165 | * blk_update_request - Special helper function for request stacking drivers | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2166 | * @req:      the request being processed | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 2167 | * @error:    %0 for success, < %0 for error | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2168 | * @nr_bytes: number of bytes to complete @req | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 2169 | * | 
|  | 2170 | * Description: | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2171 | *     Ends I/O on a number of bytes attached to @req, but doesn't complete | 
|  | 2172 | *     the request structure even if @req doesn't have leftover. | 
|  | 2173 | *     If @req has leftover, sets it up for the next range of segments. | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2174 | * | 
|  | 2175 | *     This special helper function is only for request stacking drivers | 
|  | 2176 | *     (e.g. request-based dm) so that they can handle partial completion. | 
|  | 2177 | *     Actual device drivers should use blk_end_request instead. | 
|  | 2178 | * | 
|  | 2179 | *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees | 
|  | 2180 | *     %false return from this function. | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 2181 | * | 
|  | 2182 | * Return: | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2183 | *     %false - this request doesn't have any more data | 
|  | 2184 | *     %true  - this request has more data | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 2185 | **/ | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2186 | bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2187 | { | 
| Kiyoshi Ueda | 5450d3e | 2007-12-11 17:53:03 -0500 | [diff] [blame] | 2188 | int total_bytes, bio_nbytes, next_idx = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2189 | struct bio *bio; | 
|  | 2190 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2191 | if (!req->bio) | 
|  | 2192 | return false; | 
|  | 2193 |  | 
| Arnaldo Carvalho de Melo | 5f3ea37 | 2008-10-30 08:34:33 +0100 | [diff] [blame] | 2194 | trace_block_rq_complete(req->q, req); | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 2195 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2196 | /* | 
| Tejun Heo | 6f41469 | 2009-04-19 07:00:41 +0900 | [diff] [blame] | 2197 | * For fs requests, rq is just carrier of independent bio's | 
|  | 2198 | * and each partial completion should be handled separately. | 
|  | 2199 | * Reset per-request error on each partial completion. | 
|  | 2200 | * | 
|  | 2201 | * TODO: tj: This is too subtle.  It would be better to let | 
|  | 2202 | * low level drivers do what they see fit. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2203 | */ | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 2204 | if (req->cmd_type == REQ_TYPE_FS) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2205 | req->errors = 0; | 
|  | 2206 |  | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 2207 | if (error && req->cmd_type == REQ_TYPE_FS && | 
|  | 2208 | !(req->cmd_flags & REQ_QUIET)) { | 
| Hannes Reinecke | 7977556 | 2011-01-18 10:13:13 +0100 | [diff] [blame] | 2209 | char *error_type; | 
|  | 2210 |  | 
|  | 2211 | switch (error) { | 
|  | 2212 | case -ENOLINK: | 
|  | 2213 | error_type = "recoverable transport"; | 
|  | 2214 | break; | 
|  | 2215 | case -EREMOTEIO: | 
|  | 2216 | error_type = "critical target"; | 
|  | 2217 | break; | 
|  | 2218 | case -EBADE: | 
|  | 2219 | error_type = "critical nexus"; | 
|  | 2220 | break; | 
|  | 2221 | case -EIO: | 
|  | 2222 | default: | 
|  | 2223 | error_type = "I/O"; | 
|  | 2224 | break; | 
|  | 2225 | } | 
| Asutosh Das | 75de0c3 | 2013-03-07 17:43:35 +0530 | [diff] [blame] | 2226 | printk_ratelimited( | 
|  | 2227 | KERN_ERR "end_request: %s error, dev %s, sector %llu\n", | 
|  | 2228 | error_type, | 
|  | 2229 | req->rq_disk ? req->rq_disk->disk_name : "?", | 
|  | 2230 | (unsigned long long)blk_rq_pos(req)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2231 | } | 
|  | 2232 |  | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2233 | blk_account_io_completion(req, nr_bytes); | 
| Jens Axboe | d72d904 | 2005-11-01 08:35:42 +0100 | [diff] [blame] | 2234 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2235 | total_bytes = bio_nbytes = 0; | 
|  | 2236 | while ((bio = req->bio) != NULL) { | 
|  | 2237 | int nbytes; | 
|  | 2238 |  | 
|  | 2239 | if (nr_bytes >= bio->bi_size) { | 
|  | 2240 | req->bio = bio->bi_next; | 
|  | 2241 | nbytes = bio->bi_size; | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 2242 | req_bio_endio(req, bio, nbytes, error); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2243 | next_idx = 0; | 
|  | 2244 | bio_nbytes = 0; | 
|  | 2245 | } else { | 
|  | 2246 | int idx = bio->bi_idx + next_idx; | 
|  | 2247 |  | 
| Kazuhisa Ichikawa | af498d7 | 2009-05-12 13:27:45 +0200 | [diff] [blame] | 2248 | if (unlikely(idx >= bio->bi_vcnt)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2249 | blk_dump_rq_flags(req, "__end_that"); | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 2250 | printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", | 
| Kazuhisa Ichikawa | af498d7 | 2009-05-12 13:27:45 +0200 | [diff] [blame] | 2251 | __func__, idx, bio->bi_vcnt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2252 | break; | 
|  | 2253 | } | 
|  | 2254 |  | 
|  | 2255 | nbytes = bio_iovec_idx(bio, idx)->bv_len; | 
|  | 2256 | BIO_BUG_ON(nbytes > bio->bi_size); | 
|  | 2257 |  | 
|  | 2258 | /* | 
|  | 2259 | * not a complete bvec done | 
|  | 2260 | */ | 
|  | 2261 | if (unlikely(nbytes > nr_bytes)) { | 
|  | 2262 | bio_nbytes += nr_bytes; | 
|  | 2263 | total_bytes += nr_bytes; | 
|  | 2264 | break; | 
|  | 2265 | } | 
|  | 2266 |  | 
|  | 2267 | /* | 
|  | 2268 | * advance to the next vector | 
|  | 2269 | */ | 
|  | 2270 | next_idx++; | 
|  | 2271 | bio_nbytes += nbytes; | 
|  | 2272 | } | 
|  | 2273 |  | 
|  | 2274 | total_bytes += nbytes; | 
|  | 2275 | nr_bytes -= nbytes; | 
|  | 2276 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 2277 | bio = req->bio; | 
|  | 2278 | if (bio) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2279 | /* | 
|  | 2280 | * end more in this run, or just return 'not-done' | 
|  | 2281 | */ | 
|  | 2282 | if (unlikely(nr_bytes <= 0)) | 
|  | 2283 | break; | 
|  | 2284 | } | 
|  | 2285 | } | 
|  | 2286 |  | 
|  | 2287 | /* | 
|  | 2288 | * completely done | 
|  | 2289 | */ | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2290 | if (!req->bio) { | 
|  | 2291 | /* | 
|  | 2292 | * Reset counters so that the request stacking driver | 
|  | 2293 | * can find how many bytes remain in the request | 
|  | 2294 | * later. | 
|  | 2295 | */ | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2296 | req->__data_len = 0; | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2297 | return false; | 
|  | 2298 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2299 |  | 
|  | 2300 | /* | 
|  | 2301 | * if the request wasn't completed, update state | 
|  | 2302 | */ | 
|  | 2303 | if (bio_nbytes) { | 
| NeilBrown | 5bb23a6 | 2007-09-27 12:46:13 +0200 | [diff] [blame] | 2304 | req_bio_endio(req, bio, bio_nbytes, error); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2305 | bio->bi_idx += next_idx; | 
|  | 2306 | bio_iovec(bio)->bv_offset += nr_bytes; | 
|  | 2307 | bio_iovec(bio)->bv_len -= nr_bytes; | 
|  | 2308 | } | 
|  | 2309 |  | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2310 | req->__data_len -= total_bytes; | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2311 | req->buffer = bio_data(req->bio); | 
|  | 2312 |  | 
|  | 2313 | /* update sector only for requests with clear definition of sector */ | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 2314 | if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2315 | req->__sector += total_bytes >> 9; | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2316 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 2317 | /* mixed attributes always follow the first bio */ | 
|  | 2318 | if (req->cmd_flags & REQ_MIXED_MERGE) { | 
|  | 2319 | req->cmd_flags &= ~REQ_FAILFAST_MASK; | 
|  | 2320 | req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; | 
|  | 2321 | } | 
|  | 2322 |  | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2323 | /* | 
|  | 2324 | * If total number of sectors is less than the first segment | 
|  | 2325 | * size, something has gone terribly wrong. | 
|  | 2326 | */ | 
|  | 2327 | if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { | 
| Jens Axboe | 8182924 | 2011-03-30 09:51:33 +0200 | [diff] [blame] | 2328 | blk_dump_rq_flags(req, "request botched"); | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2329 | req->__data_len = blk_rq_cur_bytes(req); | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2330 | } | 
|  | 2331 |  | 
|  | 2332 | /* recalculate the number of segments */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2333 | blk_recalc_rq_segments(req); | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 2334 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2335 | return true; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2336 | } | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2337 | EXPORT_SYMBOL_GPL(blk_update_request); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2338 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2339 | static bool blk_update_bidi_request(struct request *rq, int error, | 
|  | 2340 | unsigned int nr_bytes, | 
|  | 2341 | unsigned int bidi_bytes) | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2342 | { | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2343 | if (blk_update_request(rq, error, nr_bytes)) | 
|  | 2344 | return true; | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2345 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2346 | /* Bidi request must be completed as a whole */ | 
|  | 2347 | if (unlikely(blk_bidi_rq(rq)) && | 
|  | 2348 | blk_update_request(rq->next_rq, error, bidi_bytes)) | 
|  | 2349 | return true; | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2350 |  | 
| Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 2351 | if (blk_queue_add_random(rq->q)) | 
|  | 2352 | add_disk_randomness(rq->rq_disk); | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2353 |  | 
|  | 2354 | return false; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2355 | } | 
|  | 2356 |  | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 2357 | /** | 
|  | 2358 | * blk_unprep_request - unprepare a request | 
|  | 2359 | * @req:	the request | 
|  | 2360 | * | 
|  | 2361 | * This function makes a request ready for complete resubmission (or | 
|  | 2362 | * completion).  It happens only after all error handling is complete, | 
|  | 2363 | * so represents the appropriate moment to deallocate any resources | 
|  | 2364 | * that were allocated to the request in the prep_rq_fn.  The queue | 
|  | 2365 | * lock is held when calling this. | 
|  | 2366 | */ | 
|  | 2367 | void blk_unprep_request(struct request *req) | 
|  | 2368 | { | 
|  | 2369 | struct request_queue *q = req->q; | 
|  | 2370 |  | 
|  | 2371 | req->cmd_flags &= ~REQ_DONTPREP; | 
|  | 2372 | if (q->unprep_rq_fn) | 
|  | 2373 | q->unprep_rq_fn(q, req); | 
|  | 2374 | } | 
|  | 2375 | EXPORT_SYMBOL_GPL(blk_unprep_request); | 
|  | 2376 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2377 | /* | 
|  | 2378 | * queue lock must be held | 
|  | 2379 | */ | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2380 | static void blk_finish_request(struct request *req, int error) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2381 | { | 
| Kiyoshi Ueda | b828623 | 2007-12-11 17:53:24 -0500 | [diff] [blame] | 2382 | if (blk_rq_tagged(req)) | 
|  | 2383 | blk_queue_end_tag(req->q, req); | 
|  | 2384 |  | 
| James Bottomley | ba396a6 | 2009-05-27 14:17:08 +0200 | [diff] [blame] | 2385 | BUG_ON(blk_queued_rq(req)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2386 |  | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 2387 | if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) | 
| Matthew Garrett | 31373d0 | 2010-04-06 14:25:14 +0200 | [diff] [blame] | 2388 | laptop_io_completion(&req->q->backing_dev_info); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2389 |  | 
| Mike Anderson | e78042e | 2008-10-30 02:16:20 -0700 | [diff] [blame] | 2390 | blk_delete_timer(req); | 
|  | 2391 |  | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 2392 | if (req->cmd_flags & REQ_DONTPREP) | 
|  | 2393 | blk_unprep_request(req); | 
|  | 2394 |  | 
|  | 2395 |  | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 2396 | blk_account_io_done(req); | 
| Kiyoshi Ueda | b828623 | 2007-12-11 17:53:24 -0500 | [diff] [blame] | 2397 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2398 | if (req->end_io) | 
| Tejun Heo | 8ffdc65 | 2006-01-06 09:49:03 +0100 | [diff] [blame] | 2399 | req->end_io(req, error); | 
| Kiyoshi Ueda | b828623 | 2007-12-11 17:53:24 -0500 | [diff] [blame] | 2400 | else { | 
|  | 2401 | if (blk_bidi_rq(req)) | 
|  | 2402 | __blk_put_request(req->next_rq->q, req->next_rq); | 
|  | 2403 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2404 | __blk_put_request(req->q, req); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2405 | } | 
|  | 2406 | } | 
|  | 2407 |  | 
| Kiyoshi Ueda | 3b11313 | 2007-12-11 17:41:17 -0500 | [diff] [blame] | 2408 | /** | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2409 | * blk_end_bidi_request - Complete a bidi request | 
|  | 2410 | * @rq:         the request to complete | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 2411 | * @error:      %0 for success, < %0 for error | 
| Kiyoshi Ueda | e3a04fe | 2007-12-11 17:51:46 -0500 | [diff] [blame] | 2412 | * @nr_bytes:   number of bytes to complete @rq | 
|  | 2413 | * @bidi_bytes: number of bytes to complete @rq->next_rq | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2414 | * | 
|  | 2415 | * Description: | 
|  | 2416 | *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq. | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2417 | *     Drivers that supports bidi can safely call this member for any | 
|  | 2418 | *     type of request, bidi or uni.  In the later case @bidi_bytes is | 
|  | 2419 | *     just ignored. | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2420 | * | 
|  | 2421 | * Return: | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2422 | *     %false - we are done with this request | 
|  | 2423 | *     %true  - still buffers pending for this request | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2424 | **/ | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2425 | static bool blk_end_bidi_request(struct request *rq, int error, | 
|  | 2426 | unsigned int nr_bytes, unsigned int bidi_bytes) | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2427 | { | 
|  | 2428 | struct request_queue *q = rq->q; | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2429 | unsigned long flags; | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2430 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2431 | if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) | 
|  | 2432 | return true; | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2433 |  | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2434 | spin_lock_irqsave(q->queue_lock, flags); | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2435 | blk_finish_request(rq, error); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2436 | spin_unlock_irqrestore(q->queue_lock, flags); | 
|  | 2437 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2438 | return false; | 
| Kiyoshi Ueda | e3a04fe | 2007-12-11 17:51:46 -0500 | [diff] [blame] | 2439 | } | 
| Kiyoshi Ueda | e3a04fe | 2007-12-11 17:51:46 -0500 | [diff] [blame] | 2440 |  | 
|  | 2441 | /** | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2442 | * __blk_end_bidi_request - Complete a bidi request with queue lock held | 
|  | 2443 | * @rq:         the request to complete | 
|  | 2444 | * @error:      %0 for success, < %0 for error | 
|  | 2445 | * @nr_bytes:   number of bytes to complete @rq | 
|  | 2446 | * @bidi_bytes: number of bytes to complete @rq->next_rq | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2447 | * | 
|  | 2448 | * Description: | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2449 | *     Identical to blk_end_bidi_request() except that queue lock is | 
|  | 2450 | *     assumed to be locked on entry and remains so on return. | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2451 | * | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2452 | * Return: | 
|  | 2453 | *     %false - we are done with this request | 
|  | 2454 | *     %true  - still buffers pending for this request | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2455 | **/ | 
| Jeff Moyer | 4853aba | 2011-08-15 21:37:25 +0200 | [diff] [blame] | 2456 | bool __blk_end_bidi_request(struct request *rq, int error, | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2457 | unsigned int nr_bytes, unsigned int bidi_bytes) | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2458 | { | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2459 | if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) | 
|  | 2460 | return true; | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2461 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2462 | blk_finish_request(rq, error); | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2463 |  | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 2464 | return false; | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2465 | } | 
|  | 2466 |  | 
|  | 2467 | /** | 
|  | 2468 | * blk_end_request - Helper function for drivers to complete the request. | 
|  | 2469 | * @rq:       the request being processed | 
|  | 2470 | * @error:    %0 for success, < %0 for error | 
|  | 2471 | * @nr_bytes: number of bytes to complete | 
|  | 2472 | * | 
|  | 2473 | * Description: | 
|  | 2474 | *     Ends I/O on a number of bytes attached to @rq. | 
|  | 2475 | *     If @rq has leftover, sets it up for the next range of segments. | 
|  | 2476 | * | 
|  | 2477 | * Return: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2478 | *     %false - we are done with this request | 
|  | 2479 | *     %true  - still buffers pending for this request | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2480 | **/ | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2481 | bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2482 | { | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2483 | return blk_end_bidi_request(rq, error, nr_bytes, 0); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2484 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2485 | EXPORT_SYMBOL(blk_end_request); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2486 |  | 
|  | 2487 | /** | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2488 | * blk_end_request_all - Helper function for drives to finish the request. | 
|  | 2489 | * @rq: the request to finish | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2490 | * @error: %0 for success, < %0 for error | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2491 | * | 
|  | 2492 | * Description: | 
|  | 2493 | *     Completely finish @rq. | 
|  | 2494 | */ | 
|  | 2495 | void blk_end_request_all(struct request *rq, int error) | 
|  | 2496 | { | 
|  | 2497 | bool pending; | 
|  | 2498 | unsigned int bidi_bytes = 0; | 
|  | 2499 |  | 
|  | 2500 | if (unlikely(blk_bidi_rq(rq))) | 
|  | 2501 | bidi_bytes = blk_rq_bytes(rq->next_rq); | 
|  | 2502 |  | 
|  | 2503 | pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); | 
|  | 2504 | BUG_ON(pending); | 
|  | 2505 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2506 | EXPORT_SYMBOL(blk_end_request_all); | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2507 |  | 
|  | 2508 | /** | 
|  | 2509 | * blk_end_request_cur - Helper function to finish the current request chunk. | 
|  | 2510 | * @rq: the request to finish the current chunk for | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2511 | * @error: %0 for success, < %0 for error | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2512 | * | 
|  | 2513 | * Description: | 
|  | 2514 | *     Complete the current consecutively mapped chunk from @rq. | 
|  | 2515 | * | 
|  | 2516 | * Return: | 
|  | 2517 | *     %false - we are done with this request | 
|  | 2518 | *     %true  - still buffers pending for this request | 
|  | 2519 | */ | 
|  | 2520 | bool blk_end_request_cur(struct request *rq, int error) | 
|  | 2521 | { | 
|  | 2522 | return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); | 
|  | 2523 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2524 | EXPORT_SYMBOL(blk_end_request_cur); | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2525 |  | 
|  | 2526 | /** | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 2527 | * blk_end_request_err - Finish a request till the next failure boundary. | 
|  | 2528 | * @rq: the request to finish till the next failure boundary for | 
|  | 2529 | * @error: must be negative errno | 
|  | 2530 | * | 
|  | 2531 | * Description: | 
|  | 2532 | *     Complete @rq till the next failure boundary. | 
|  | 2533 | * | 
|  | 2534 | * Return: | 
|  | 2535 | *     %false - we are done with this request | 
|  | 2536 | *     %true  - still buffers pending for this request | 
|  | 2537 | */ | 
|  | 2538 | bool blk_end_request_err(struct request *rq, int error) | 
|  | 2539 | { | 
|  | 2540 | WARN_ON(error >= 0); | 
|  | 2541 | return blk_end_request(rq, error, blk_rq_err_bytes(rq)); | 
|  | 2542 | } | 
|  | 2543 | EXPORT_SYMBOL_GPL(blk_end_request_err); | 
|  | 2544 |  | 
|  | 2545 | /** | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2546 | * __blk_end_request - Helper function for drivers to complete the request. | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2547 | * @rq:       the request being processed | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2548 | * @error:    %0 for success, < %0 for error | 
|  | 2549 | * @nr_bytes: number of bytes to complete | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2550 | * | 
|  | 2551 | * Description: | 
|  | 2552 | *     Must be called with queue lock held unlike blk_end_request(). | 
|  | 2553 | * | 
|  | 2554 | * Return: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2555 | *     %false - we are done with this request | 
|  | 2556 | *     %true  - still buffers pending for this request | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2557 | **/ | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2558 | bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2559 | { | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2560 | return __blk_end_bidi_request(rq, error, nr_bytes, 0); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2561 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2562 | EXPORT_SYMBOL(__blk_end_request); | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2563 |  | 
|  | 2564 | /** | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2565 | * __blk_end_request_all - Helper function for drives to finish the request. | 
|  | 2566 | * @rq: the request to finish | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2567 | * @error: %0 for success, < %0 for error | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 2568 | * | 
|  | 2569 | * Description: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2570 | *     Completely finish @rq.  Must be called with queue lock held. | 
| Kiyoshi Ueda | 32fab44 | 2008-09-18 10:45:09 -0400 | [diff] [blame] | 2571 | */ | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2572 | void __blk_end_request_all(struct request *rq, int error) | 
| Kiyoshi Ueda | 32fab44 | 2008-09-18 10:45:09 -0400 | [diff] [blame] | 2573 | { | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2574 | bool pending; | 
|  | 2575 | unsigned int bidi_bytes = 0; | 
|  | 2576 |  | 
|  | 2577 | if (unlikely(blk_bidi_rq(rq))) | 
|  | 2578 | bidi_bytes = blk_rq_bytes(rq->next_rq); | 
|  | 2579 |  | 
|  | 2580 | pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); | 
|  | 2581 | BUG_ON(pending); | 
| Kiyoshi Ueda | 32fab44 | 2008-09-18 10:45:09 -0400 | [diff] [blame] | 2582 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2583 | EXPORT_SYMBOL(__blk_end_request_all); | 
| Kiyoshi Ueda | 32fab44 | 2008-09-18 10:45:09 -0400 | [diff] [blame] | 2584 |  | 
|  | 2585 | /** | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2586 | * __blk_end_request_cur - Helper function to finish the current request chunk. | 
|  | 2587 | * @rq: the request to finish the current chunk for | 
| Randy Dunlap | 8ebf975 | 2009-06-11 20:00:41 -0700 | [diff] [blame] | 2588 | * @error: %0 for success, < %0 for error | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2589 | * | 
|  | 2590 | * Description: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2591 | *     Complete the current consecutively mapped chunk from @rq.  Must | 
|  | 2592 | *     be called with queue lock held. | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2593 | * | 
|  | 2594 | * Return: | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2595 | *     %false - we are done with this request | 
|  | 2596 | *     %true  - still buffers pending for this request | 
|  | 2597 | */ | 
|  | 2598 | bool __blk_end_request_cur(struct request *rq, int error) | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2599 | { | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 2600 | return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2601 | } | 
| Jens Axboe | 56ad174 | 2009-07-28 22:11:24 +0200 | [diff] [blame] | 2602 | EXPORT_SYMBOL(__blk_end_request_cur); | 
| Kiyoshi Ueda | e19a3ab | 2007-12-11 17:51:02 -0500 | [diff] [blame] | 2603 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 2604 | /** | 
|  | 2605 | * __blk_end_request_err - Finish a request till the next failure boundary. | 
|  | 2606 | * @rq: the request to finish till the next failure boundary for | 
|  | 2607 | * @error: must be negative errno | 
|  | 2608 | * | 
|  | 2609 | * Description: | 
|  | 2610 | *     Complete @rq till the next failure boundary.  Must be called | 
|  | 2611 | *     with queue lock held. | 
|  | 2612 | * | 
|  | 2613 | * Return: | 
|  | 2614 | *     %false - we are done with this request | 
|  | 2615 | *     %true  - still buffers pending for this request | 
|  | 2616 | */ | 
|  | 2617 | bool __blk_end_request_err(struct request *rq, int error) | 
|  | 2618 | { | 
|  | 2619 | WARN_ON(error >= 0); | 
|  | 2620 | return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); | 
|  | 2621 | } | 
|  | 2622 | EXPORT_SYMBOL_GPL(__blk_end_request_err); | 
|  | 2623 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 2624 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 
|  | 2625 | struct bio *bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2626 | { | 
| Tejun Heo | a82afdf | 2009-07-03 17:48:16 +0900 | [diff] [blame] | 2627 | /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ | 
| Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 2628 | rq->cmd_flags |= bio->bi_rw & REQ_WRITE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2629 |  | 
| David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 2630 | if (bio_has_data(bio)) { | 
|  | 2631 | rq->nr_phys_segments = bio_phys_segments(q, bio); | 
| David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 2632 | rq->buffer = bio_data(bio); | 
|  | 2633 | } | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 2634 | rq->__data_len = bio->bi_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2635 | rq->bio = rq->biotail = bio; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2636 |  | 
| NeilBrown | 6684657 | 2007-08-16 13:31:28 +0200 | [diff] [blame] | 2637 | if (bio->bi_bdev) | 
|  | 2638 | rq->rq_disk = bio->bi_bdev->bd_disk; | 
|  | 2639 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2640 |  | 
| Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 2641 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 
|  | 2642 | /** | 
|  | 2643 | * rq_flush_dcache_pages - Helper function to flush all pages in a request | 
|  | 2644 | * @rq: the request to be flushed | 
|  | 2645 | * | 
|  | 2646 | * Description: | 
|  | 2647 | *     Flush all pages in @rq. | 
|  | 2648 | */ | 
|  | 2649 | void rq_flush_dcache_pages(struct request *rq) | 
|  | 2650 | { | 
|  | 2651 | struct req_iterator iter; | 
|  | 2652 | struct bio_vec *bvec; | 
|  | 2653 |  | 
|  | 2654 | rq_for_each_segment(bvec, rq, iter) | 
|  | 2655 | flush_dcache_page(bvec->bv_page); | 
|  | 2656 | } | 
|  | 2657 | EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); | 
|  | 2658 | #endif | 
|  | 2659 |  | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 2660 | /** | 
|  | 2661 | * blk_lld_busy - Check if underlying low-level drivers of a device are busy | 
|  | 2662 | * @q : the queue of the device being checked | 
|  | 2663 | * | 
|  | 2664 | * Description: | 
|  | 2665 | *    Check if underlying low-level drivers of a device are busy. | 
|  | 2666 | *    If the drivers want to export their busy state, they must set own | 
|  | 2667 | *    exporting function using blk_queue_lld_busy() first. | 
|  | 2668 | * | 
|  | 2669 | *    Basically, this function is used only by request stacking drivers | 
|  | 2670 | *    to stop dispatching requests to underlying devices when underlying | 
|  | 2671 | *    devices are busy.  This behavior helps more I/O merging on the queue | 
|  | 2672 | *    of the request stacking driver and prevents I/O throughput regression | 
|  | 2673 | *    on burst I/O load. | 
|  | 2674 | * | 
|  | 2675 | * Return: | 
|  | 2676 | *    0 - Not busy (The request stacking driver should dispatch request) | 
|  | 2677 | *    1 - Busy (The request stacking driver should stop dispatching request) | 
|  | 2678 | */ | 
|  | 2679 | int blk_lld_busy(struct request_queue *q) | 
|  | 2680 | { | 
|  | 2681 | if (q->lld_busy_fn) | 
|  | 2682 | return q->lld_busy_fn(q); | 
|  | 2683 |  | 
|  | 2684 | return 0; | 
|  | 2685 | } | 
|  | 2686 | EXPORT_SYMBOL_GPL(blk_lld_busy); | 
|  | 2687 |  | 
| Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 2688 | /** | 
|  | 2689 | * blk_rq_unprep_clone - Helper function to free all bios in a cloned request | 
|  | 2690 | * @rq: the clone request to be cleaned up | 
|  | 2691 | * | 
|  | 2692 | * Description: | 
|  | 2693 | *     Free all bios in @rq for a cloned request. | 
|  | 2694 | */ | 
|  | 2695 | void blk_rq_unprep_clone(struct request *rq) | 
|  | 2696 | { | 
|  | 2697 | struct bio *bio; | 
|  | 2698 |  | 
|  | 2699 | while ((bio = rq->bio) != NULL) { | 
|  | 2700 | rq->bio = bio->bi_next; | 
|  | 2701 |  | 
|  | 2702 | bio_put(bio); | 
|  | 2703 | } | 
|  | 2704 | } | 
|  | 2705 | EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); | 
|  | 2706 |  | 
|  | 2707 | /* | 
|  | 2708 | * Copy attributes of the original request to the clone request. | 
|  | 2709 | * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. | 
|  | 2710 | */ | 
|  | 2711 | static void __blk_rq_prep_clone(struct request *dst, struct request *src) | 
|  | 2712 | { | 
|  | 2713 | dst->cpu = src->cpu; | 
| Tejun Heo | 3a2edd0 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 2714 | dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE; | 
| Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 2715 | dst->cmd_type = src->cmd_type; | 
|  | 2716 | dst->__sector = blk_rq_pos(src); | 
|  | 2717 | dst->__data_len = blk_rq_bytes(src); | 
|  | 2718 | dst->nr_phys_segments = src->nr_phys_segments; | 
|  | 2719 | dst->ioprio = src->ioprio; | 
|  | 2720 | dst->extra_len = src->extra_len; | 
|  | 2721 | } | 
|  | 2722 |  | 
|  | 2723 | /** | 
|  | 2724 | * blk_rq_prep_clone - Helper function to setup clone request | 
|  | 2725 | * @rq: the request to be setup | 
|  | 2726 | * @rq_src: original request to be cloned | 
|  | 2727 | * @bs: bio_set that bios for clone are allocated from | 
|  | 2728 | * @gfp_mask: memory allocation mask for bio | 
|  | 2729 | * @bio_ctr: setup function to be called for each clone bio. | 
|  | 2730 | *           Returns %0 for success, non %0 for failure. | 
|  | 2731 | * @data: private data to be passed to @bio_ctr | 
|  | 2732 | * | 
|  | 2733 | * Description: | 
|  | 2734 | *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. | 
|  | 2735 | *     The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) | 
|  | 2736 | *     are not copied, and copying such parts is the caller's responsibility. | 
|  | 2737 | *     Also, pages which the original bios are pointing to are not copied | 
|  | 2738 | *     and the cloned bios just point same pages. | 
|  | 2739 | *     So cloned bios must be completed before original bios, which means | 
|  | 2740 | *     the caller must complete @rq before @rq_src. | 
|  | 2741 | */ | 
|  | 2742 | int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 
|  | 2743 | struct bio_set *bs, gfp_t gfp_mask, | 
|  | 2744 | int (*bio_ctr)(struct bio *, struct bio *, void *), | 
|  | 2745 | void *data) | 
|  | 2746 | { | 
|  | 2747 | struct bio *bio, *bio_src; | 
|  | 2748 |  | 
|  | 2749 | if (!bs) | 
|  | 2750 | bs = fs_bio_set; | 
|  | 2751 |  | 
|  | 2752 | blk_rq_init(NULL, rq); | 
|  | 2753 |  | 
|  | 2754 | __rq_for_each_bio(bio_src, rq_src) { | 
|  | 2755 | bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); | 
|  | 2756 | if (!bio) | 
|  | 2757 | goto free_and_out; | 
|  | 2758 |  | 
|  | 2759 | __bio_clone(bio, bio_src); | 
|  | 2760 |  | 
|  | 2761 | if (bio_integrity(bio_src) && | 
| Martin K. Petersen | 7878cba | 2009-06-26 15:37:49 +0200 | [diff] [blame] | 2762 | bio_integrity_clone(bio, bio_src, gfp_mask, bs)) | 
| Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 2763 | goto free_and_out; | 
|  | 2764 |  | 
|  | 2765 | if (bio_ctr && bio_ctr(bio, bio_src, data)) | 
|  | 2766 | goto free_and_out; | 
|  | 2767 |  | 
|  | 2768 | if (rq->bio) { | 
|  | 2769 | rq->biotail->bi_next = bio; | 
|  | 2770 | rq->biotail = bio; | 
|  | 2771 | } else | 
|  | 2772 | rq->bio = rq->biotail = bio; | 
|  | 2773 | } | 
|  | 2774 |  | 
|  | 2775 | __blk_rq_prep_clone(rq, rq_src); | 
|  | 2776 |  | 
|  | 2777 | return 0; | 
|  | 2778 |  | 
|  | 2779 | free_and_out: | 
|  | 2780 | if (bio) | 
|  | 2781 | bio_free(bio, bs); | 
|  | 2782 | blk_rq_unprep_clone(rq); | 
|  | 2783 |  | 
|  | 2784 | return -ENOMEM; | 
|  | 2785 | } | 
|  | 2786 | EXPORT_SYMBOL_GPL(blk_rq_prep_clone); | 
|  | 2787 |  | 
| Jens Axboe | 18887ad | 2008-07-28 13:08:45 +0200 | [diff] [blame] | 2788 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2789 | { | 
|  | 2790 | return queue_work(kblockd_workqueue, work); | 
|  | 2791 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2792 | EXPORT_SYMBOL(kblockd_schedule_work); | 
|  | 2793 |  | 
| Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 2794 | int kblockd_schedule_delayed_work(struct request_queue *q, | 
|  | 2795 | struct delayed_work *dwork, unsigned long delay) | 
|  | 2796 | { | 
|  | 2797 | return queue_delayed_work(kblockd_workqueue, dwork, delay); | 
|  | 2798 | } | 
|  | 2799 | EXPORT_SYMBOL(kblockd_schedule_delayed_work); | 
|  | 2800 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2801 | #define PLUG_MAGIC	0x91827364 | 
|  | 2802 |  | 
| Suresh Jayaraman | 75df713 | 2011-09-21 10:00:16 +0200 | [diff] [blame] | 2803 | /** | 
|  | 2804 | * blk_start_plug - initialize blk_plug and track it inside the task_struct | 
|  | 2805 | * @plug:	The &struct blk_plug that needs to be initialized | 
|  | 2806 | * | 
|  | 2807 | * Description: | 
|  | 2808 | *   Tracking blk_plug inside the task_struct will help with auto-flushing the | 
|  | 2809 | *   pending I/O should the task end up blocking between blk_start_plug() and | 
|  | 2810 | *   blk_finish_plug(). This is important from a performance perspective, but | 
|  | 2811 | *   also ensures that we don't deadlock. For instance, if the task is blocking | 
|  | 2812 | *   for a memory allocation, memory reclaim could end up wanting to free a | 
|  | 2813 | *   page belonging to that request that is currently residing in our private | 
|  | 2814 | *   plug. By flushing the pending I/O when the process goes to sleep, we avoid | 
|  | 2815 | *   this kind of deadlock. | 
|  | 2816 | */ | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2817 | void blk_start_plug(struct blk_plug *plug) | 
|  | 2818 | { | 
|  | 2819 | struct task_struct *tsk = current; | 
|  | 2820 |  | 
|  | 2821 | plug->magic = PLUG_MAGIC; | 
|  | 2822 | INIT_LIST_HEAD(&plug->list); | 
| NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 2823 | INIT_LIST_HEAD(&plug->cb_list); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2824 | plug->should_sort = 0; | 
|  | 2825 |  | 
|  | 2826 | /* | 
|  | 2827 | * If this is a nested plug, don't actually assign it. It will be | 
|  | 2828 | * flushed on its own. | 
|  | 2829 | */ | 
|  | 2830 | if (!tsk->plug) { | 
|  | 2831 | /* | 
|  | 2832 | * Store ordering should not be needed here, since a potential | 
|  | 2833 | * preempt will imply a full memory barrier | 
|  | 2834 | */ | 
|  | 2835 | tsk->plug = plug; | 
|  | 2836 | } | 
|  | 2837 | } | 
|  | 2838 | EXPORT_SYMBOL(blk_start_plug); | 
|  | 2839 |  | 
|  | 2840 | static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) | 
|  | 2841 | { | 
|  | 2842 | struct request *rqa = container_of(a, struct request, queuelist); | 
|  | 2843 | struct request *rqb = container_of(b, struct request, queuelist); | 
|  | 2844 |  | 
| Konstantin Khlebnikov | f83e826 | 2011-04-04 00:15:02 +0200 | [diff] [blame] | 2845 | return !(rqa->q <= rqb->q); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2846 | } | 
|  | 2847 |  | 
| Jens Axboe | 49cac01 | 2011-04-16 13:51:05 +0200 | [diff] [blame] | 2848 | /* | 
|  | 2849 | * If 'from_schedule' is true, then postpone the dispatch of requests | 
|  | 2850 | * until a safe kblockd context. We due this to avoid accidental big | 
|  | 2851 | * additional stack usage in driver dispatch, in places where the originally | 
|  | 2852 | * plugger did not intend it. | 
|  | 2853 | */ | 
| Jens Axboe | f660378 | 2011-04-15 15:49:07 +0200 | [diff] [blame] | 2854 | static void queue_unplugged(struct request_queue *q, unsigned int depth, | 
| Jens Axboe | 49cac01 | 2011-04-16 13:51:05 +0200 | [diff] [blame] | 2855 | bool from_schedule) | 
| Jens Axboe | 99e2259 | 2011-04-18 09:59:55 +0200 | [diff] [blame] | 2856 | __releases(q->queue_lock) | 
| Jens Axboe | 94b5eb2 | 2011-04-12 10:12:19 +0200 | [diff] [blame] | 2857 | { | 
| Jens Axboe | 49cac01 | 2011-04-16 13:51:05 +0200 | [diff] [blame] | 2858 | trace_block_unplug(q, depth, !from_schedule); | 
| Jens Axboe | 99e2259 | 2011-04-18 09:59:55 +0200 | [diff] [blame] | 2859 |  | 
|  | 2860 | /* | 
| Tejun Heo | 8ba6143 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 2861 | * Don't mess with dead queue. | 
|  | 2862 | */ | 
|  | 2863 | if (unlikely(blk_queue_dead(q))) { | 
|  | 2864 | spin_unlock(q->queue_lock); | 
|  | 2865 | return; | 
|  | 2866 | } | 
|  | 2867 |  | 
|  | 2868 | /* | 
| Jens Axboe | 99e2259 | 2011-04-18 09:59:55 +0200 | [diff] [blame] | 2869 | * If we are punting this to kblockd, then we can safely drop | 
|  | 2870 | * the queue_lock before waking kblockd (which needs to take | 
|  | 2871 | * this lock). | 
|  | 2872 | */ | 
|  | 2873 | if (from_schedule) { | 
|  | 2874 | spin_unlock(q->queue_lock); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 2875 | blk_run_queue_async(q); | 
| Jens Axboe | 99e2259 | 2011-04-18 09:59:55 +0200 | [diff] [blame] | 2876 | } else { | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 2877 | __blk_run_queue(q); | 
| Jens Axboe | 99e2259 | 2011-04-18 09:59:55 +0200 | [diff] [blame] | 2878 | spin_unlock(q->queue_lock); | 
|  | 2879 | } | 
|  | 2880 |  | 
| Jens Axboe | 94b5eb2 | 2011-04-12 10:12:19 +0200 | [diff] [blame] | 2881 | } | 
|  | 2882 |  | 
| NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 2883 | static void flush_plug_callbacks(struct blk_plug *plug) | 
|  | 2884 | { | 
|  | 2885 | LIST_HEAD(callbacks); | 
|  | 2886 |  | 
|  | 2887 | if (list_empty(&plug->cb_list)) | 
|  | 2888 | return; | 
|  | 2889 |  | 
|  | 2890 | list_splice_init(&plug->cb_list, &callbacks); | 
|  | 2891 |  | 
|  | 2892 | while (!list_empty(&callbacks)) { | 
|  | 2893 | struct blk_plug_cb *cb = list_first_entry(&callbacks, | 
|  | 2894 | struct blk_plug_cb, | 
|  | 2895 | list); | 
|  | 2896 | list_del(&cb->list); | 
|  | 2897 | cb->callback(cb); | 
|  | 2898 | } | 
|  | 2899 | } | 
|  | 2900 |  | 
| Jens Axboe | 49cac01 | 2011-04-16 13:51:05 +0200 | [diff] [blame] | 2901 | void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2902 | { | 
|  | 2903 | struct request_queue *q; | 
|  | 2904 | unsigned long flags; | 
|  | 2905 | struct request *rq; | 
| NeilBrown | 109b812 | 2011-04-11 14:13:10 +0200 | [diff] [blame] | 2906 | LIST_HEAD(list); | 
| Jens Axboe | 94b5eb2 | 2011-04-12 10:12:19 +0200 | [diff] [blame] | 2907 | unsigned int depth; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2908 |  | 
|  | 2909 | BUG_ON(plug->magic != PLUG_MAGIC); | 
|  | 2910 |  | 
| NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 2911 | flush_plug_callbacks(plug); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2912 | if (list_empty(&plug->list)) | 
|  | 2913 | return; | 
|  | 2914 |  | 
| NeilBrown | 109b812 | 2011-04-11 14:13:10 +0200 | [diff] [blame] | 2915 | list_splice_init(&plug->list, &list); | 
|  | 2916 |  | 
|  | 2917 | if (plug->should_sort) { | 
|  | 2918 | list_sort(NULL, &list, plug_rq_cmp); | 
|  | 2919 | plug->should_sort = 0; | 
|  | 2920 | } | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2921 |  | 
|  | 2922 | q = NULL; | 
| Jens Axboe | 94b5eb2 | 2011-04-12 10:12:19 +0200 | [diff] [blame] | 2923 | depth = 0; | 
| Jens Axboe | 1881127 | 2011-04-12 10:11:24 +0200 | [diff] [blame] | 2924 |  | 
|  | 2925 | /* | 
|  | 2926 | * Save and disable interrupts here, to avoid doing it for every | 
|  | 2927 | * queue lock we have to take. | 
|  | 2928 | */ | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2929 | local_irq_save(flags); | 
| NeilBrown | 109b812 | 2011-04-11 14:13:10 +0200 | [diff] [blame] | 2930 | while (!list_empty(&list)) { | 
|  | 2931 | rq = list_entry_rq(list.next); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2932 | list_del_init(&rq->queuelist); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2933 | BUG_ON(!rq->q); | 
|  | 2934 | if (rq->q != q) { | 
| Jens Axboe | 99e2259 | 2011-04-18 09:59:55 +0200 | [diff] [blame] | 2935 | /* | 
|  | 2936 | * This drops the queue lock | 
|  | 2937 | */ | 
|  | 2938 | if (q) | 
| Jens Axboe | 49cac01 | 2011-04-16 13:51:05 +0200 | [diff] [blame] | 2939 | queue_unplugged(q, depth, from_schedule); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2940 | q = rq->q; | 
| Jens Axboe | 94b5eb2 | 2011-04-12 10:12:19 +0200 | [diff] [blame] | 2941 | depth = 0; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2942 | spin_lock(q->queue_lock); | 
|  | 2943 | } | 
| Tejun Heo | 8ba6143 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 2944 |  | 
|  | 2945 | /* | 
|  | 2946 | * Short-circuit if @q is dead | 
|  | 2947 | */ | 
|  | 2948 | if (unlikely(blk_queue_dead(q))) { | 
|  | 2949 | __blk_end_request_all(rq, -ENODEV); | 
|  | 2950 | continue; | 
|  | 2951 | } | 
|  | 2952 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2953 | /* | 
|  | 2954 | * rq is already accounted, so use raw insert | 
|  | 2955 | */ | 
| Jens Axboe | 401a18e | 2011-03-25 16:57:52 +0100 | [diff] [blame] | 2956 | if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) | 
|  | 2957 | __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); | 
|  | 2958 | else | 
|  | 2959 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); | 
| Jens Axboe | 94b5eb2 | 2011-04-12 10:12:19 +0200 | [diff] [blame] | 2960 |  | 
|  | 2961 | depth++; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2962 | } | 
|  | 2963 |  | 
| Jens Axboe | 99e2259 | 2011-04-18 09:59:55 +0200 | [diff] [blame] | 2964 | /* | 
|  | 2965 | * This drops the queue lock | 
|  | 2966 | */ | 
|  | 2967 | if (q) | 
| Jens Axboe | 49cac01 | 2011-04-16 13:51:05 +0200 | [diff] [blame] | 2968 | queue_unplugged(q, depth, from_schedule); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2969 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2970 | local_irq_restore(flags); | 
|  | 2971 | } | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2972 |  | 
|  | 2973 | void blk_finish_plug(struct blk_plug *plug) | 
|  | 2974 | { | 
| Jens Axboe | f660378 | 2011-04-15 15:49:07 +0200 | [diff] [blame] | 2975 | blk_flush_plug_list(plug, false); | 
| Christoph Hellwig | 88b996c | 2011-04-15 15:20:10 +0200 | [diff] [blame] | 2976 |  | 
|  | 2977 | if (plug == current->plug) | 
|  | 2978 | current->plug = NULL; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 2979 | } | 
|  | 2980 | EXPORT_SYMBOL(blk_finish_plug); | 
|  | 2981 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2982 | int __init blk_dev_init(void) | 
|  | 2983 | { | 
| Nikanth Karthikesan | 9eb55b0 | 2009-04-27 14:53:54 +0200 | [diff] [blame] | 2984 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 
|  | 2985 | sizeof(((struct request *)0)->cmd_flags)); | 
|  | 2986 |  | 
| Tejun Heo | 89b90be | 2011-01-03 15:01:47 +0100 | [diff] [blame] | 2987 | /* used for unplugging and affects IO latency/throughput - HIGHPRI */ | 
|  | 2988 | kblockd_workqueue = alloc_workqueue("kblockd", | 
|  | 2989 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2990 | if (!kblockd_workqueue) | 
|  | 2991 | panic("Failed to create kblockd\n"); | 
|  | 2992 |  | 
|  | 2993 | request_cachep = kmem_cache_create("blkdev_requests", | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 2994 | sizeof(struct request), 0, SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2995 |  | 
| Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 2996 | blk_requestq_cachep = kmem_cache_create("blkdev_queue", | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 2997 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2998 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2999 | return 0; | 
|  | 3000 | } |