Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to segment and merge handling |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/bio.h> |
| 7 | #include <linux/blkdev.h> |
| 8 | #include <linux/scatterlist.h> |
| 9 | |
| 10 | #include "blk.h" |
| 11 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 13 | struct bio *bio) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 14 | { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 15 | struct bio_vec bv, bvprv = { NULL }; |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 16 | int cluster, high, highprv = 1, no_sg_merge; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 17 | unsigned int seg_size, nr_phys_segs; |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 18 | struct bio *fbio, *bbio; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 19 | struct bvec_iter iter; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 20 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 21 | if (!bio) |
| 22 | return 0; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 23 | |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 24 | /* |
| 25 | * This should probably be returning 0, but blk_add_request_payload() |
| 26 | * (Christoph!!!!) |
| 27 | */ |
| 28 | if (bio->bi_rw & REQ_DISCARD) |
| 29 | return 1; |
| 30 | |
| 31 | if (bio->bi_rw & REQ_WRITE_SAME) |
| 32 | return 1; |
| 33 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 34 | fbio = bio; |
Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 35 | cluster = blk_queue_cluster(q); |
Mikulas Patocka | 5df97b9 | 2008-08-15 10:20:02 +0200 | [diff] [blame] | 36 | seg_size = 0; |
Andi Kleen | 2c8919d | 2010-06-21 11:02:47 +0200 | [diff] [blame] | 37 | nr_phys_segs = 0; |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 38 | no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); |
| 39 | high = 0; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 40 | for_each_bio(bio) { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 41 | bio_for_each_segment(bv, bio, iter) { |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 42 | /* |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 43 | * If SG merging is disabled, each bio vector is |
| 44 | * a segment |
| 45 | */ |
| 46 | if (no_sg_merge) |
| 47 | goto new_segment; |
| 48 | |
| 49 | /* |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 50 | * the trick here is making sure that a high page is |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 51 | * never considered part of another segment, since |
| 52 | * that might change with the bounce page. |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 53 | */ |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 54 | high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); |
| 55 | if (!high && !highprv && cluster) { |
| 56 | if (seg_size + bv.bv_len |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 57 | > queue_max_segment_size(q)) |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 58 | goto new_segment; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 59 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 60 | goto new_segment; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 61 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 62 | goto new_segment; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 63 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 64 | seg_size += bv.bv_len; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 65 | bvprv = bv; |
| 66 | continue; |
| 67 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 68 | new_segment: |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 69 | if (nr_phys_segs == 1 && seg_size > |
| 70 | fbio->bi_seg_front_size) |
| 71 | fbio->bi_seg_front_size = seg_size; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 72 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 73 | nr_phys_segs++; |
| 74 | bvprv = bv; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 75 | seg_size = bv.bv_len; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 76 | highprv = high; |
| 77 | } |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 78 | bbio = bio; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 79 | } |
| 80 | |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 81 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
| 82 | fbio->bi_seg_front_size = seg_size; |
| 83 | if (seg_size > bbio->bi_seg_back_size) |
| 84 | bbio->bi_seg_back_size = seg_size; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 85 | |
| 86 | return nr_phys_segs; |
| 87 | } |
| 88 | |
| 89 | void blk_recalc_rq_segments(struct request *rq) |
| 90 | { |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 91 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
| 95 | { |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 96 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags)) |
| 97 | bio->bi_phys_segments = bio->bi_vcnt; |
| 98 | else { |
| 99 | struct bio *nxt = bio->bi_next; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 100 | |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 101 | bio->bi_next = NULL; |
| 102 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); |
| 103 | bio->bi_next = nxt; |
| 104 | } |
| 105 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 106 | bio->bi_flags |= (1 << BIO_SEG_VALID); |
| 107 | } |
| 108 | EXPORT_SYMBOL(blk_recount_segments); |
| 109 | |
| 110 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
| 111 | struct bio *nxt) |
| 112 | { |
Kent Overstreet | 2b8221e | 2013-12-03 14:29:09 -0700 | [diff] [blame] | 113 | struct bio_vec end_bv = { NULL }, nxt_bv; |
Kent Overstreet | f619d25 | 2013-08-07 14:30:33 -0700 | [diff] [blame] | 114 | struct bvec_iter iter; |
| 115 | |
Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 116 | if (!blk_queue_cluster(q)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 117 | return 0; |
| 118 | |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 119 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 120 | queue_max_segment_size(q)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 121 | return 0; |
| 122 | |
David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 123 | if (!bio_has_data(bio)) |
| 124 | return 1; |
| 125 | |
Kent Overstreet | f619d25 | 2013-08-07 14:30:33 -0700 | [diff] [blame] | 126 | bio_for_each_segment(end_bv, bio, iter) |
| 127 | if (end_bv.bv_len == iter.bi_size) |
| 128 | break; |
| 129 | |
| 130 | nxt_bv = bio_iovec(nxt); |
| 131 | |
| 132 | if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) |
David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 133 | return 0; |
| 134 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 135 | /* |
David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 136 | * bio and nxt are contiguous in memory; check if the queue allows |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 137 | * these two to be merged into one |
| 138 | */ |
Kent Overstreet | f619d25 | 2013-08-07 14:30:33 -0700 | [diff] [blame] | 139 | if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 140 | return 1; |
| 141 | |
| 142 | return 0; |
| 143 | } |
| 144 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 145 | static inline void |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 146 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 147 | struct scatterlist *sglist, struct bio_vec *bvprv, |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 148 | struct scatterlist **sg, int *nsegs, int *cluster) |
| 149 | { |
| 150 | |
| 151 | int nbytes = bvec->bv_len; |
| 152 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 153 | if (*sg && *cluster) { |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 154 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
| 155 | goto new_segment; |
| 156 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 157 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 158 | goto new_segment; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 159 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 160 | goto new_segment; |
| 161 | |
| 162 | (*sg)->length += nbytes; |
| 163 | } else { |
| 164 | new_segment: |
| 165 | if (!*sg) |
| 166 | *sg = sglist; |
| 167 | else { |
| 168 | /* |
| 169 | * If the driver previously mapped a shorter |
| 170 | * list, we could see a termination bit |
| 171 | * prematurely unless it fully inits the sg |
| 172 | * table on each mapping. We KNOW that there |
| 173 | * must be more entries here or the driver |
| 174 | * would be buggy, so force clear the |
| 175 | * termination bit to avoid doing a full |
| 176 | * sg_init_table() in drivers for each command. |
| 177 | */ |
Paolo Bonzini | c8164d8 | 2013-03-20 15:37:08 +1030 | [diff] [blame] | 178 | sg_unmark_end(*sg); |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 179 | *sg = sg_next(*sg); |
| 180 | } |
| 181 | |
| 182 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); |
| 183 | (*nsegs)++; |
| 184 | } |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 185 | *bvprv = *bvec; |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 186 | } |
| 187 | |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 188 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
| 189 | struct scatterlist *sglist, |
| 190 | struct scatterlist **sg) |
| 191 | { |
| 192 | struct bio_vec bvec, bvprv = { NULL }; |
| 193 | struct bvec_iter iter; |
| 194 | int nsegs, cluster; |
| 195 | |
| 196 | nsegs = 0; |
| 197 | cluster = blk_queue_cluster(q); |
| 198 | |
| 199 | if (bio->bi_rw & REQ_DISCARD) { |
| 200 | /* |
| 201 | * This is a hack - drivers should be neither modifying the |
| 202 | * biovec, nor relying on bi_vcnt - but because of |
| 203 | * blk_add_request_payload(), a discard bio may or may not have |
| 204 | * a payload we need to set up here (thank you Christoph) and |
| 205 | * bi_vcnt is really the only way of telling if we need to. |
| 206 | */ |
| 207 | |
| 208 | if (bio->bi_vcnt) |
| 209 | goto single_segment; |
| 210 | |
| 211 | return 0; |
| 212 | } |
| 213 | |
| 214 | if (bio->bi_rw & REQ_WRITE_SAME) { |
| 215 | single_segment: |
| 216 | *sg = sglist; |
| 217 | bvec = bio_iovec(bio); |
| 218 | sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); |
| 219 | return 1; |
| 220 | } |
| 221 | |
| 222 | for_each_bio(bio) |
| 223 | bio_for_each_segment(bvec, bio, iter) |
| 224 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, |
| 225 | &nsegs, &cluster); |
| 226 | |
| 227 | return nsegs; |
| 228 | } |
| 229 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 230 | /* |
| 231 | * map a request to scatterlist, return number of sg entries setup. Caller |
| 232 | * must make sure sg can hold rq->nr_phys_segments entries |
| 233 | */ |
| 234 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
| 235 | struct scatterlist *sglist) |
| 236 | { |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 237 | struct scatterlist *sg = NULL; |
| 238 | int nsegs = 0; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 239 | |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 240 | if (rq->bio) |
| 241 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); |
FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 242 | |
| 243 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && |
Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 244 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
| 245 | unsigned int pad_len = |
| 246 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; |
FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 247 | |
| 248 | sg->length += pad_len; |
| 249 | rq->extra_len += pad_len; |
| 250 | } |
| 251 | |
Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 252 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 253 | if (rq->cmd_flags & REQ_WRITE) |
Tejun Heo | db0a2e0 | 2008-02-19 11:36:55 +0100 | [diff] [blame] | 254 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
| 255 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 256 | sg->page_link &= ~0x02; |
| 257 | sg = sg_next(sg); |
| 258 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), |
| 259 | q->dma_drain_size, |
| 260 | ((unsigned long)q->dma_drain_buffer) & |
| 261 | (PAGE_SIZE - 1)); |
| 262 | nsegs++; |
FUJITA Tomonori | 7a85f88 | 2008-03-04 11:17:11 +0100 | [diff] [blame] | 263 | rq->extra_len += q->dma_drain_size; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | if (sg) |
| 267 | sg_mark_end(sg); |
| 268 | |
| 269 | return nsegs; |
| 270 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 271 | EXPORT_SYMBOL(blk_rq_map_sg); |
| 272 | |
Asias He | 85b9f66 | 2012-08-02 23:42:04 +0200 | [diff] [blame] | 273 | /** |
| 274 | * blk_bio_map_sg - map a bio to a scatterlist |
| 275 | * @q: request_queue in question |
| 276 | * @bio: bio being mapped |
| 277 | * @sglist: scatterlist being mapped |
| 278 | * |
| 279 | * Note: |
| 280 | * Caller must make sure sg can hold bio->bi_phys_segments entries |
| 281 | * |
| 282 | * Will return the number of sg entries setup |
| 283 | */ |
| 284 | int blk_bio_map_sg(struct request_queue *q, struct bio *bio, |
| 285 | struct scatterlist *sglist) |
| 286 | { |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 287 | struct scatterlist *sg = NULL; |
| 288 | int nsegs; |
| 289 | struct bio *next = bio->bi_next; |
| 290 | bio->bi_next = NULL; |
Asias He | 85b9f66 | 2012-08-02 23:42:04 +0200 | [diff] [blame] | 291 | |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 292 | nsegs = __blk_bios_map_sg(q, bio, sglist, &sg); |
| 293 | bio->bi_next = next; |
Asias He | 85b9f66 | 2012-08-02 23:42:04 +0200 | [diff] [blame] | 294 | if (sg) |
| 295 | sg_mark_end(sg); |
| 296 | |
| 297 | BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments); |
| 298 | return nsegs; |
| 299 | } |
| 300 | EXPORT_SYMBOL(blk_bio_map_sg); |
| 301 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 302 | static inline int ll_new_hw_segment(struct request_queue *q, |
| 303 | struct request *req, |
| 304 | struct bio *bio) |
| 305 | { |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 306 | int nr_phys_segs = bio_phys_segments(q, bio); |
| 307 | |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 308 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) |
| 309 | goto no_merge; |
| 310 | |
| 311 | if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio)) |
| 312 | goto no_merge; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 313 | |
| 314 | /* |
| 315 | * This will form the start of a new hw segment. Bump both |
| 316 | * counters. |
| 317 | */ |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 318 | req->nr_phys_segments += nr_phys_segs; |
| 319 | return 1; |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 320 | |
| 321 | no_merge: |
| 322 | req->cmd_flags |= REQ_NOMERGE; |
| 323 | if (req == q->last_merge) |
| 324 | q->last_merge = NULL; |
| 325 | return 0; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 326 | } |
| 327 | |
| 328 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
| 329 | struct bio *bio) |
| 330 | { |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 331 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
| 332 | blk_rq_get_max_sectors(req)) { |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 333 | req->cmd_flags |= REQ_NOMERGE; |
| 334 | if (req == q->last_merge) |
| 335 | q->last_merge = NULL; |
| 336 | return 0; |
| 337 | } |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 338 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 339 | blk_recount_segments(q, req->biotail); |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 340 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 341 | blk_recount_segments(q, bio); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 342 | |
| 343 | return ll_new_hw_segment(q, req, bio); |
| 344 | } |
| 345 | |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 346 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 347 | struct bio *bio) |
| 348 | { |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 349 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
| 350 | blk_rq_get_max_sectors(req)) { |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 351 | req->cmd_flags |= REQ_NOMERGE; |
| 352 | if (req == q->last_merge) |
| 353 | q->last_merge = NULL; |
| 354 | return 0; |
| 355 | } |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 356 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 357 | blk_recount_segments(q, bio); |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 358 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 359 | blk_recount_segments(q, req->bio); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 360 | |
| 361 | return ll_new_hw_segment(q, req, bio); |
| 362 | } |
| 363 | |
Jens Axboe | e7e2450 | 2013-10-29 12:11:47 -0600 | [diff] [blame] | 364 | /* |
| 365 | * blk-mq uses req->special to carry normal driver per-request payload, it |
| 366 | * does not indicate a prepared command that we cannot merge with. |
| 367 | */ |
| 368 | static bool req_no_special_merge(struct request *req) |
| 369 | { |
| 370 | struct request_queue *q = req->q; |
| 371 | |
| 372 | return !q->mq_ops && req->special; |
| 373 | } |
| 374 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 375 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
| 376 | struct request *next) |
| 377 | { |
| 378 | int total_phys_segments; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 379 | unsigned int seg_size = |
| 380 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 381 | |
| 382 | /* |
| 383 | * First check if the either of the requests are re-queued |
| 384 | * requests. Can't merge them if they are. |
| 385 | */ |
Jens Axboe | e7e2450 | 2013-10-29 12:11:47 -0600 | [diff] [blame] | 386 | if (req_no_special_merge(req) || req_no_special_merge(next)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 387 | return 0; |
| 388 | |
| 389 | /* |
| 390 | * Will it become too large? |
| 391 | */ |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 392 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
| 393 | blk_rq_get_max_sectors(req)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 394 | return 0; |
| 395 | |
| 396 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 397 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
| 398 | if (req->nr_phys_segments == 1) |
| 399 | req->bio->bi_seg_front_size = seg_size; |
| 400 | if (next->nr_phys_segments == 1) |
| 401 | next->biotail->bi_seg_back_size = seg_size; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 402 | total_phys_segments--; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 403 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 404 | |
Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 405 | if (total_phys_segments > queue_max_segments(q)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 406 | return 0; |
| 407 | |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 408 | if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next)) |
| 409 | return 0; |
| 410 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 411 | /* Merge is OK... */ |
| 412 | req->nr_phys_segments = total_phys_segments; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 413 | return 1; |
| 414 | } |
| 415 | |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 416 | /** |
| 417 | * blk_rq_set_mixed_merge - mark a request as mixed merge |
| 418 | * @rq: request to mark as mixed merge |
| 419 | * |
| 420 | * Description: |
| 421 | * @rq is about to be mixed merged. Make sure the attributes |
| 422 | * which can be mixed are set in each bio and mark @rq as mixed |
| 423 | * merged. |
| 424 | */ |
| 425 | void blk_rq_set_mixed_merge(struct request *rq) |
| 426 | { |
| 427 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; |
| 428 | struct bio *bio; |
| 429 | |
| 430 | if (rq->cmd_flags & REQ_MIXED_MERGE) |
| 431 | return; |
| 432 | |
| 433 | /* |
| 434 | * @rq will no longer represent mixable attributes for all the |
| 435 | * contained bios. It will just track those of the first one. |
| 436 | * Distributes the attributs to each bio. |
| 437 | */ |
| 438 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
| 439 | WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && |
| 440 | (bio->bi_rw & REQ_FAILFAST_MASK) != ff); |
| 441 | bio->bi_rw |= ff; |
| 442 | } |
| 443 | rq->cmd_flags |= REQ_MIXED_MERGE; |
| 444 | } |
| 445 | |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 446 | static void blk_account_io_merge(struct request *req) |
| 447 | { |
| 448 | if (blk_do_io_stat(req)) { |
| 449 | struct hd_struct *part; |
| 450 | int cpu; |
| 451 | |
| 452 | cpu = part_stat_lock(); |
Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 453 | part = req->part; |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 454 | |
| 455 | part_round_stats(cpu, part); |
Nikanth Karthikesan | 316d315 | 2009-10-06 20:16:55 +0200 | [diff] [blame] | 456 | part_dec_in_flight(part, rq_data_dir(req)); |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 457 | |
Jens Axboe | 6c23a96 | 2011-01-07 08:43:37 +0100 | [diff] [blame] | 458 | hd_struct_put(part); |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 459 | part_stat_unlock(); |
| 460 | } |
| 461 | } |
| 462 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 463 | /* |
| 464 | * Has to be called with the request spinlock acquired |
| 465 | */ |
| 466 | static int attempt_merge(struct request_queue *q, struct request *req, |
| 467 | struct request *next) |
| 468 | { |
| 469 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
| 470 | return 0; |
| 471 | |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 472 | if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) |
| 473 | return 0; |
| 474 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 475 | /* |
| 476 | * not contiguous |
| 477 | */ |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 478 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 479 | return 0; |
| 480 | |
| 481 | if (rq_data_dir(req) != rq_data_dir(next) |
| 482 | || req->rq_disk != next->rq_disk |
Jens Axboe | e7e2450 | 2013-10-29 12:11:47 -0600 | [diff] [blame] | 483 | || req_no_special_merge(next)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 484 | return 0; |
| 485 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 486 | if (req->cmd_flags & REQ_WRITE_SAME && |
| 487 | !blk_write_same_mergeable(req->bio, next->bio)) |
| 488 | return 0; |
| 489 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 490 | /* |
| 491 | * If we are allowed to merge, then append bio list |
| 492 | * from next to rq and release next. merge_requests_fn |
| 493 | * will have updated segment counts, update sector |
| 494 | * counts here. |
| 495 | */ |
| 496 | if (!ll_merge_requests_fn(q, req, next)) |
| 497 | return 0; |
| 498 | |
| 499 | /* |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 500 | * If failfast settings disagree or any of the two is already |
| 501 | * a mixed merge, mark both as mixed before proceeding. This |
| 502 | * makes sure that all involved bios have mixable attributes |
| 503 | * set properly. |
| 504 | */ |
| 505 | if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || |
| 506 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
| 507 | (next->cmd_flags & REQ_FAILFAST_MASK)) { |
| 508 | blk_rq_set_mixed_merge(req); |
| 509 | blk_rq_set_mixed_merge(next); |
| 510 | } |
| 511 | |
| 512 | /* |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 513 | * At this point we have either done a back merge |
| 514 | * or front merge. We need the smaller start_time of |
| 515 | * the merged requests to be the current request |
| 516 | * for accounting purposes. |
| 517 | */ |
| 518 | if (time_after(req->start_time, next->start_time)) |
| 519 | req->start_time = next->start_time; |
| 520 | |
| 521 | req->biotail->bi_next = next->bio; |
| 522 | req->biotail = next->biotail; |
| 523 | |
Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 524 | req->__data_len += blk_rq_bytes(next); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 525 | |
| 526 | elv_merge_requests(q, req, next); |
| 527 | |
Jerome Marchand | 42dad76 | 2009-04-22 14:01:49 +0200 | [diff] [blame] | 528 | /* |
| 529 | * 'next' is going away, so update stats accordingly |
| 530 | */ |
| 531 | blk_account_io_merge(next); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 532 | |
| 533 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); |
Jens Axboe | ab780f1 | 2008-08-26 10:25:02 +0200 | [diff] [blame] | 534 | if (blk_rq_cpu_valid(next)) |
| 535 | req->cpu = next->cpu; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 536 | |
Boaz Harrosh | 1cd96c2 | 2009-03-24 12:35:07 +0100 | [diff] [blame] | 537 | /* owner-ship of bio passed from next to req */ |
| 538 | next->bio = NULL; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 539 | __blk_put_request(q, next); |
| 540 | return 1; |
| 541 | } |
| 542 | |
| 543 | int attempt_back_merge(struct request_queue *q, struct request *rq) |
| 544 | { |
| 545 | struct request *next = elv_latter_request(q, rq); |
| 546 | |
| 547 | if (next) |
| 548 | return attempt_merge(q, rq, next); |
| 549 | |
| 550 | return 0; |
| 551 | } |
| 552 | |
| 553 | int attempt_front_merge(struct request_queue *q, struct request *rq) |
| 554 | { |
| 555 | struct request *prev = elv_former_request(q, rq); |
| 556 | |
| 557 | if (prev) |
| 558 | return attempt_merge(q, prev, rq); |
| 559 | |
| 560 | return 0; |
| 561 | } |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 562 | |
| 563 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, |
| 564 | struct request *next) |
| 565 | { |
| 566 | return attempt_merge(q, rq, next); |
| 567 | } |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 568 | |
| 569 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) |
| 570 | { |
Jens Axboe | 66cb45a | 2014-06-24 16:22:24 -0600 | [diff] [blame] | 571 | struct request_queue *q = rq->q; |
| 572 | |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 573 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 574 | return false; |
| 575 | |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 576 | if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) |
| 577 | return false; |
| 578 | |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 579 | /* different data direction or already started, don't merge */ |
| 580 | if (bio_data_dir(bio) != rq_data_dir(rq)) |
| 581 | return false; |
| 582 | |
| 583 | /* must be same device and not a special request */ |
Jens Axboe | e7e2450 | 2013-10-29 12:11:47 -0600 | [diff] [blame] | 584 | if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 585 | return false; |
| 586 | |
| 587 | /* only merge integrity protected bio into ditto rq */ |
| 588 | if (bio_integrity(bio) != blk_integrity_rq(rq)) |
| 589 | return false; |
| 590 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 591 | /* must be using the same buffer */ |
| 592 | if (rq->cmd_flags & REQ_WRITE_SAME && |
| 593 | !blk_write_same_mergeable(rq->bio, bio)) |
| 594 | return false; |
| 595 | |
Jens Axboe | 66cb45a | 2014-06-24 16:22:24 -0600 | [diff] [blame] | 596 | if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { |
| 597 | struct bio_vec *bprev; |
| 598 | |
| 599 | bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; |
| 600 | if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) |
| 601 | return false; |
| 602 | } |
| 603 | |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 604 | return true; |
| 605 | } |
| 606 | |
| 607 | int blk_try_merge(struct request *rq, struct bio *bio) |
| 608 | { |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 609 | if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 610 | return ELEVATOR_BACK_MERGE; |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 611 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 612 | return ELEVATOR_FRONT_MERGE; |
| 613 | return ELEVATOR_NO_MERGE; |
| 614 | } |