Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to segment and merge handling |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/bio.h> |
| 7 | #include <linux/blkdev.h> |
| 8 | #include <linux/scatterlist.h> |
| 9 | |
| 10 | #include "blk.h" |
| 11 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 13 | struct bio *bio) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 14 | { |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 15 | unsigned int phys_size; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 16 | struct bio_vec *bv, *bvprv = NULL; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 17 | int cluster, i, high, highprv = 1; |
| 18 | unsigned int seg_size, nr_phys_segs; |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 19 | struct bio *fbio, *bbio; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 20 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 21 | if (!bio) |
| 22 | return 0; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 23 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 24 | fbio = bio; |
Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 25 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
Mikulas Patocka | 5df97b9 | 2008-08-15 10:20:02 +0200 | [diff] [blame] | 26 | seg_size = 0; |
| 27 | phys_size = nr_phys_segs = 0; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 28 | for_each_bio(bio) { |
| 29 | bio_for_each_segment(bv, bio, i) { |
| 30 | /* |
| 31 | * the trick here is making sure that a high page is |
| 32 | * never considered part of another segment, since that |
| 33 | * might change with the bounce page. |
| 34 | */ |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 35 | high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 36 | if (high || highprv) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 37 | goto new_segment; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 38 | if (cluster) { |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 39 | if (seg_size + bv->bv_len |
| 40 | > queue_max_segment_size(q)) |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 41 | goto new_segment; |
| 42 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) |
| 43 | goto new_segment; |
| 44 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) |
| 45 | goto new_segment; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 46 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 47 | seg_size += bv->bv_len; |
| 48 | bvprv = bv; |
| 49 | continue; |
| 50 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 51 | new_segment: |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 52 | if (nr_phys_segs == 1 && seg_size > |
| 53 | fbio->bi_seg_front_size) |
| 54 | fbio->bi_seg_front_size = seg_size; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 55 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 56 | nr_phys_segs++; |
| 57 | bvprv = bv; |
| 58 | seg_size = bv->bv_len; |
| 59 | highprv = high; |
| 60 | } |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 61 | bbio = bio; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 62 | } |
| 63 | |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 64 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
| 65 | fbio->bi_seg_front_size = seg_size; |
| 66 | if (seg_size > bbio->bi_seg_back_size) |
| 67 | bbio->bi_seg_back_size = seg_size; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 68 | |
| 69 | return nr_phys_segs; |
| 70 | } |
| 71 | |
| 72 | void blk_recalc_rq_segments(struct request *rq) |
| 73 | { |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 74 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
| 78 | { |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 79 | struct bio *nxt = bio->bi_next; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 80 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 81 | bio->bi_next = NULL; |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 82 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 83 | bio->bi_next = nxt; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 84 | bio->bi_flags |= (1 << BIO_SEG_VALID); |
| 85 | } |
| 86 | EXPORT_SYMBOL(blk_recount_segments); |
| 87 | |
| 88 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
| 89 | struct bio *nxt) |
| 90 | { |
Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 91 | if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 92 | return 0; |
| 93 | |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 94 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 95 | queue_max_segment_size(q)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 96 | return 0; |
| 97 | |
David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 98 | if (!bio_has_data(bio)) |
| 99 | return 1; |
| 100 | |
| 101 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) |
| 102 | return 0; |
| 103 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 104 | /* |
David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 105 | * bio and nxt are contiguous in memory; check if the queue allows |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 106 | * these two to be merged into one |
| 107 | */ |
| 108 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) |
| 109 | return 1; |
| 110 | |
| 111 | return 0; |
| 112 | } |
| 113 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 114 | /* |
| 115 | * map a request to scatterlist, return number of sg entries setup. Caller |
| 116 | * must make sure sg can hold rq->nr_phys_segments entries |
| 117 | */ |
| 118 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
| 119 | struct scatterlist *sglist) |
| 120 | { |
| 121 | struct bio_vec *bvec, *bvprv; |
| 122 | struct req_iterator iter; |
| 123 | struct scatterlist *sg; |
| 124 | int nsegs, cluster; |
| 125 | |
| 126 | nsegs = 0; |
Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 127 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 128 | |
| 129 | /* |
| 130 | * for each bio in rq |
| 131 | */ |
| 132 | bvprv = NULL; |
| 133 | sg = NULL; |
| 134 | rq_for_each_segment(bvec, rq, iter) { |
| 135 | int nbytes = bvec->bv_len; |
| 136 | |
| 137 | if (bvprv && cluster) { |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 138 | if (sg->length + nbytes > queue_max_segment_size(q)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 139 | goto new_segment; |
| 140 | |
| 141 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
| 142 | goto new_segment; |
| 143 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
| 144 | goto new_segment; |
| 145 | |
| 146 | sg->length += nbytes; |
| 147 | } else { |
| 148 | new_segment: |
| 149 | if (!sg) |
| 150 | sg = sglist; |
| 151 | else { |
| 152 | /* |
| 153 | * If the driver previously mapped a shorter |
| 154 | * list, we could see a termination bit |
| 155 | * prematurely unless it fully inits the sg |
| 156 | * table on each mapping. We KNOW that there |
| 157 | * must be more entries here or the driver |
| 158 | * would be buggy, so force clear the |
| 159 | * termination bit to avoid doing a full |
| 160 | * sg_init_table() in drivers for each command. |
| 161 | */ |
| 162 | sg->page_link &= ~0x02; |
| 163 | sg = sg_next(sg); |
| 164 | } |
| 165 | |
| 166 | sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); |
| 167 | nsegs++; |
| 168 | } |
| 169 | bvprv = bvec; |
| 170 | } /* segments in rq */ |
| 171 | |
FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 172 | |
| 173 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && |
Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 174 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
| 175 | unsigned int pad_len = |
| 176 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; |
FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 177 | |
| 178 | sg->length += pad_len; |
| 179 | rq->extra_len += pad_len; |
| 180 | } |
| 181 | |
Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 182 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
Tejun Heo | db0a2e0 | 2008-02-19 11:36:55 +0100 | [diff] [blame] | 183 | if (rq->cmd_flags & REQ_RW) |
| 184 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
| 185 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 186 | sg->page_link &= ~0x02; |
| 187 | sg = sg_next(sg); |
| 188 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), |
| 189 | q->dma_drain_size, |
| 190 | ((unsigned long)q->dma_drain_buffer) & |
| 191 | (PAGE_SIZE - 1)); |
| 192 | nsegs++; |
FUJITA Tomonori | 7a85f88 | 2008-03-04 11:17:11 +0100 | [diff] [blame] | 193 | rq->extra_len += q->dma_drain_size; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | if (sg) |
| 197 | sg_mark_end(sg); |
| 198 | |
| 199 | return nsegs; |
| 200 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 201 | EXPORT_SYMBOL(blk_rq_map_sg); |
| 202 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 203 | static inline int ll_new_hw_segment(struct request_queue *q, |
| 204 | struct request *req, |
| 205 | struct bio *bio) |
| 206 | { |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 207 | int nr_phys_segs = bio_phys_segments(q, bio); |
| 208 | |
Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 209 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) { |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 210 | req->cmd_flags |= REQ_NOMERGE; |
| 211 | if (req == q->last_merge) |
| 212 | q->last_merge = NULL; |
| 213 | return 0; |
| 214 | } |
| 215 | |
| 216 | /* |
| 217 | * This will form the start of a new hw segment. Bump both |
| 218 | * counters. |
| 219 | */ |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 220 | req->nr_phys_segments += nr_phys_segs; |
| 221 | return 1; |
| 222 | } |
| 223 | |
| 224 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
| 225 | struct bio *bio) |
| 226 | { |
| 227 | unsigned short max_sectors; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 228 | |
| 229 | if (unlikely(blk_pc_request(req))) |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 230 | max_sectors = queue_max_hw_sectors(q); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 231 | else |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 232 | max_sectors = queue_max_sectors(q); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 233 | |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 234 | if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 235 | req->cmd_flags |= REQ_NOMERGE; |
| 236 | if (req == q->last_merge) |
| 237 | q->last_merge = NULL; |
| 238 | return 0; |
| 239 | } |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 240 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 241 | blk_recount_segments(q, req->biotail); |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 242 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 243 | blk_recount_segments(q, bio); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 244 | |
| 245 | return ll_new_hw_segment(q, req, bio); |
| 246 | } |
| 247 | |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 248 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 249 | struct bio *bio) |
| 250 | { |
| 251 | unsigned short max_sectors; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 252 | |
| 253 | if (unlikely(blk_pc_request(req))) |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 254 | max_sectors = queue_max_hw_sectors(q); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 255 | else |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 256 | max_sectors = queue_max_sectors(q); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 257 | |
| 258 | |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 259 | if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 260 | req->cmd_flags |= REQ_NOMERGE; |
| 261 | if (req == q->last_merge) |
| 262 | q->last_merge = NULL; |
| 263 | return 0; |
| 264 | } |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 265 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 266 | blk_recount_segments(q, bio); |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 267 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 268 | blk_recount_segments(q, req->bio); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 269 | |
| 270 | return ll_new_hw_segment(q, req, bio); |
| 271 | } |
| 272 | |
| 273 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
| 274 | struct request *next) |
| 275 | { |
| 276 | int total_phys_segments; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 277 | unsigned int seg_size = |
| 278 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 279 | |
| 280 | /* |
| 281 | * First check if the either of the requests are re-queued |
| 282 | * requests. Can't merge them if they are. |
| 283 | */ |
| 284 | if (req->special || next->special) |
| 285 | return 0; |
| 286 | |
| 287 | /* |
| 288 | * Will it become too large? |
| 289 | */ |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 290 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 291 | return 0; |
| 292 | |
| 293 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 294 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
| 295 | if (req->nr_phys_segments == 1) |
| 296 | req->bio->bi_seg_front_size = seg_size; |
| 297 | if (next->nr_phys_segments == 1) |
| 298 | next->biotail->bi_seg_back_size = seg_size; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 299 | total_phys_segments--; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 300 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 301 | |
Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 302 | if (total_phys_segments > queue_max_segments(q)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 303 | return 0; |
| 304 | |
| 305 | /* Merge is OK... */ |
| 306 | req->nr_phys_segments = total_phys_segments; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 307 | return 1; |
| 308 | } |
| 309 | |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 310 | /** |
| 311 | * blk_rq_set_mixed_merge - mark a request as mixed merge |
| 312 | * @rq: request to mark as mixed merge |
| 313 | * |
| 314 | * Description: |
| 315 | * @rq is about to be mixed merged. Make sure the attributes |
| 316 | * which can be mixed are set in each bio and mark @rq as mixed |
| 317 | * merged. |
| 318 | */ |
| 319 | void blk_rq_set_mixed_merge(struct request *rq) |
| 320 | { |
| 321 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; |
| 322 | struct bio *bio; |
| 323 | |
| 324 | if (rq->cmd_flags & REQ_MIXED_MERGE) |
| 325 | return; |
| 326 | |
| 327 | /* |
| 328 | * @rq will no longer represent mixable attributes for all the |
| 329 | * contained bios. It will just track those of the first one. |
| 330 | * Distributes the attributs to each bio. |
| 331 | */ |
| 332 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
| 333 | WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && |
| 334 | (bio->bi_rw & REQ_FAILFAST_MASK) != ff); |
| 335 | bio->bi_rw |= ff; |
| 336 | } |
| 337 | rq->cmd_flags |= REQ_MIXED_MERGE; |
| 338 | } |
| 339 | |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 340 | static void blk_account_io_merge(struct request *req) |
| 341 | { |
| 342 | if (blk_do_io_stat(req)) { |
| 343 | struct hd_struct *part; |
| 344 | int cpu; |
| 345 | |
| 346 | cpu = part_stat_lock(); |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 347 | part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 348 | |
| 349 | part_round_stats(cpu, part); |
Nikanth Karthikesan | 316d315 | 2009-10-06 20:16:55 +0200 | [diff] [blame] | 350 | part_dec_in_flight(part, rq_data_dir(req)); |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 351 | |
| 352 | part_stat_unlock(); |
| 353 | } |
| 354 | } |
| 355 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 356 | /* |
| 357 | * Has to be called with the request spinlock acquired |
| 358 | */ |
| 359 | static int attempt_merge(struct request_queue *q, struct request *req, |
| 360 | struct request *next) |
| 361 | { |
| 362 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
| 363 | return 0; |
| 364 | |
| 365 | /* |
| 366 | * not contiguous |
| 367 | */ |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 368 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 369 | return 0; |
| 370 | |
| 371 | if (rq_data_dir(req) != rq_data_dir(next) |
| 372 | || req->rq_disk != next->rq_disk |
| 373 | || next->special) |
| 374 | return 0; |
| 375 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 376 | if (blk_integrity_rq(req) != blk_integrity_rq(next)) |
| 377 | return 0; |
| 378 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 379 | /* |
| 380 | * If we are allowed to merge, then append bio list |
| 381 | * from next to rq and release next. merge_requests_fn |
| 382 | * will have updated segment counts, update sector |
| 383 | * counts here. |
| 384 | */ |
| 385 | if (!ll_merge_requests_fn(q, req, next)) |
| 386 | return 0; |
| 387 | |
| 388 | /* |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 389 | * If failfast settings disagree or any of the two is already |
| 390 | * a mixed merge, mark both as mixed before proceeding. This |
| 391 | * makes sure that all involved bios have mixable attributes |
| 392 | * set properly. |
| 393 | */ |
| 394 | if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || |
| 395 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
| 396 | (next->cmd_flags & REQ_FAILFAST_MASK)) { |
| 397 | blk_rq_set_mixed_merge(req); |
| 398 | blk_rq_set_mixed_merge(next); |
| 399 | } |
| 400 | |
| 401 | /* |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 402 | * At this point we have either done a back merge |
| 403 | * or front merge. We need the smaller start_time of |
| 404 | * the merged requests to be the current request |
| 405 | * for accounting purposes. |
| 406 | */ |
| 407 | if (time_after(req->start_time, next->start_time)) |
| 408 | req->start_time = next->start_time; |
| 409 | |
| 410 | req->biotail->bi_next = next->bio; |
| 411 | req->biotail = next->biotail; |
| 412 | |
Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 413 | req->__data_len += blk_rq_bytes(next); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 414 | |
| 415 | elv_merge_requests(q, req, next); |
| 416 | |
Jerome Marchand | 42dad76 | 2009-04-22 14:01:49 +0200 | [diff] [blame] | 417 | /* |
| 418 | * 'next' is going away, so update stats accordingly |
| 419 | */ |
| 420 | blk_account_io_merge(next); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 421 | |
| 422 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); |
Jens Axboe | ab780f1 | 2008-08-26 10:25:02 +0200 | [diff] [blame] | 423 | if (blk_rq_cpu_valid(next)) |
| 424 | req->cpu = next->cpu; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 425 | |
Boaz Harrosh | 1cd96c2 | 2009-03-24 12:35:07 +0100 | [diff] [blame] | 426 | /* owner-ship of bio passed from next to req */ |
| 427 | next->bio = NULL; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 428 | __blk_put_request(q, next); |
| 429 | return 1; |
| 430 | } |
| 431 | |
| 432 | int attempt_back_merge(struct request_queue *q, struct request *rq) |
| 433 | { |
| 434 | struct request *next = elv_latter_request(q, rq); |
| 435 | |
| 436 | if (next) |
| 437 | return attempt_merge(q, rq, next); |
| 438 | |
| 439 | return 0; |
| 440 | } |
| 441 | |
| 442 | int attempt_front_merge(struct request_queue *q, struct request *rq) |
| 443 | { |
| 444 | struct request *prev = elv_former_request(q, rq); |
| 445 | |
| 446 | if (prev) |
| 447 | return attempt_merge(q, prev, rq); |
| 448 | |
| 449 | return 0; |
| 450 | } |