Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to segment and merge handling |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/bio.h> |
| 7 | #include <linux/blkdev.h> |
| 8 | #include <linux/scatterlist.h> |
| 9 | |
| 10 | #include "blk.h" |
| 11 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 12 | static struct bio *blk_bio_discard_split(struct request_queue *q, |
| 13 | struct bio *bio, |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 14 | struct bio_set *bs, |
| 15 | unsigned *nsegs) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 16 | { |
| 17 | unsigned int max_discard_sectors, granularity; |
| 18 | int alignment; |
| 19 | sector_t tmp; |
| 20 | unsigned split_sectors; |
| 21 | |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 22 | *nsegs = 1; |
| 23 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 24 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
| 25 | granularity = max(q->limits.discard_granularity >> 9, 1U); |
| 26 | |
| 27 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
| 28 | max_discard_sectors -= max_discard_sectors % granularity; |
| 29 | |
| 30 | if (unlikely(!max_discard_sectors)) { |
| 31 | /* XXX: warn */ |
| 32 | return NULL; |
| 33 | } |
| 34 | |
| 35 | if (bio_sectors(bio) <= max_discard_sectors) |
| 36 | return NULL; |
| 37 | |
| 38 | split_sectors = max_discard_sectors; |
| 39 | |
| 40 | /* |
| 41 | * If the next starting sector would be misaligned, stop the discard at |
| 42 | * the previous aligned sector. |
| 43 | */ |
| 44 | alignment = (q->limits.discard_alignment >> 9) % granularity; |
| 45 | |
| 46 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; |
| 47 | tmp = sector_div(tmp, granularity); |
| 48 | |
| 49 | if (split_sectors > tmp) |
| 50 | split_sectors -= tmp; |
| 51 | |
| 52 | return bio_split(bio, split_sectors, GFP_NOIO, bs); |
| 53 | } |
| 54 | |
| 55 | static struct bio *blk_bio_write_same_split(struct request_queue *q, |
| 56 | struct bio *bio, |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 57 | struct bio_set *bs, |
| 58 | unsigned *nsegs) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 59 | { |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 60 | *nsegs = 1; |
| 61 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 62 | if (!q->limits.max_write_same_sectors) |
| 63 | return NULL; |
| 64 | |
| 65 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) |
| 66 | return NULL; |
| 67 | |
| 68 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); |
| 69 | } |
| 70 | |
| 71 | static struct bio *blk_bio_segment_split(struct request_queue *q, |
| 72 | struct bio *bio, |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 73 | struct bio_set *bs, |
| 74 | unsigned *segs) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 75 | { |
Jens Axboe | 5014c31 | 2015-09-02 16:46:02 -0600 | [diff] [blame] | 76 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 77 | struct bvec_iter iter; |
Kent Overstreet | 8ae1266 | 2015-04-27 23:48:34 -0700 | [diff] [blame] | 78 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 79 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 80 | bio_for_each_segment(bv, bio, iter) { |
Ming Lei | 52cc6ee | 2015-09-17 09:58:38 -0600 | [diff] [blame] | 81 | if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 82 | goto split; |
| 83 | |
| 84 | /* |
| 85 | * If the queue doesn't support SG gaps and adding this |
| 86 | * offset would create a gap, disallow it. |
| 87 | */ |
Jens Axboe | 5014c31 | 2015-09-02 16:46:02 -0600 | [diff] [blame] | 88 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 89 | goto split; |
| 90 | |
Jens Axboe | 5014c31 | 2015-09-02 16:46:02 -0600 | [diff] [blame] | 91 | if (bvprvp && blk_queue_cluster(q)) { |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 92 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) |
| 93 | goto new_segment; |
Jens Axboe | 5014c31 | 2015-09-02 16:46:02 -0600 | [diff] [blame] | 94 | if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 95 | goto new_segment; |
Jens Axboe | 5014c31 | 2015-09-02 16:46:02 -0600 | [diff] [blame] | 96 | if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 97 | goto new_segment; |
| 98 | |
| 99 | seg_size += bv.bv_len; |
| 100 | bvprv = bv; |
Jens Axboe | 5014c31 | 2015-09-02 16:46:02 -0600 | [diff] [blame] | 101 | bvprvp = &bv; |
Ming Lei | 52cc6ee | 2015-09-17 09:58:38 -0600 | [diff] [blame] | 102 | sectors += bv.bv_len >> 9; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 103 | continue; |
| 104 | } |
| 105 | new_segment: |
| 106 | if (nsegs == queue_max_segments(q)) |
| 107 | goto split; |
| 108 | |
| 109 | nsegs++; |
| 110 | bvprv = bv; |
Jens Axboe | 5014c31 | 2015-09-02 16:46:02 -0600 | [diff] [blame] | 111 | bvprvp = &bv; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 112 | seg_size = bv.bv_len; |
Ming Lei | 52cc6ee | 2015-09-17 09:58:38 -0600 | [diff] [blame] | 113 | sectors += bv.bv_len >> 9; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 114 | } |
| 115 | |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 116 | *segs = nsegs; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 117 | return NULL; |
| 118 | split: |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 119 | *segs = nsegs; |
Ming Lei | 52cc6ee | 2015-09-17 09:58:38 -0600 | [diff] [blame] | 120 | return bio_split(bio, sectors, GFP_NOIO, bs); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | void blk_queue_split(struct request_queue *q, struct bio **bio, |
| 124 | struct bio_set *bs) |
| 125 | { |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 126 | struct bio *split, *res; |
| 127 | unsigned nsegs; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 128 | |
| 129 | if ((*bio)->bi_rw & REQ_DISCARD) |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 130 | split = blk_bio_discard_split(q, *bio, bs, &nsegs); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 131 | else if ((*bio)->bi_rw & REQ_WRITE_SAME) |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 132 | split = blk_bio_write_same_split(q, *bio, bs, &nsegs); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 133 | else |
Ming Lei | bdced43 | 2015-10-20 23:13:52 +0800 | [diff] [blame] | 134 | split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); |
| 135 | |
| 136 | /* physical segments can be figured out during splitting */ |
| 137 | res = split ? split : *bio; |
| 138 | res->bi_phys_segments = nsegs; |
| 139 | bio_set_flag(res, BIO_SEG_VALID); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 140 | |
| 141 | if (split) { |
Ming Lei | 6ac45ae | 2015-10-20 23:13:53 +0800 | [diff] [blame] | 142 | /* there isn't chance to merge the splitted bio */ |
| 143 | split->bi_rw |= REQ_NOMERGE; |
| 144 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 145 | bio_chain(split, *bio); |
| 146 | generic_make_request(*bio); |
| 147 | *bio = split; |
| 148 | } |
| 149 | } |
| 150 | EXPORT_SYMBOL(blk_queue_split); |
| 151 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 152 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
Ming Lei | 0738854 | 2014-09-02 23:02:59 +0800 | [diff] [blame] | 153 | struct bio *bio, |
| 154 | bool no_sg_merge) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 155 | { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 156 | struct bio_vec bv, bvprv = { NULL }; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 157 | int cluster, prev = 0; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 158 | unsigned int seg_size, nr_phys_segs; |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 159 | struct bio *fbio, *bbio; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 160 | struct bvec_iter iter; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 161 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 162 | if (!bio) |
| 163 | return 0; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 164 | |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 165 | /* |
| 166 | * This should probably be returning 0, but blk_add_request_payload() |
| 167 | * (Christoph!!!!) |
| 168 | */ |
| 169 | if (bio->bi_rw & REQ_DISCARD) |
| 170 | return 1; |
| 171 | |
| 172 | if (bio->bi_rw & REQ_WRITE_SAME) |
| 173 | return 1; |
| 174 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 175 | fbio = bio; |
Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 176 | cluster = blk_queue_cluster(q); |
Mikulas Patocka | 5df97b9 | 2008-08-15 10:20:02 +0200 | [diff] [blame] | 177 | seg_size = 0; |
Andi Kleen | 2c8919d | 2010-06-21 11:02:47 +0200 | [diff] [blame] | 178 | nr_phys_segs = 0; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 179 | for_each_bio(bio) { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 180 | bio_for_each_segment(bv, bio, iter) { |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 181 | /* |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 182 | * If SG merging is disabled, each bio vector is |
| 183 | * a segment |
| 184 | */ |
| 185 | if (no_sg_merge) |
| 186 | goto new_segment; |
| 187 | |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 188 | if (prev && cluster) { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 189 | if (seg_size + bv.bv_len |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 190 | > queue_max_segment_size(q)) |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 191 | goto new_segment; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 192 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 193 | goto new_segment; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 194 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 195 | goto new_segment; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 196 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 197 | seg_size += bv.bv_len; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 198 | bvprv = bv; |
| 199 | continue; |
| 200 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 201 | new_segment: |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 202 | if (nr_phys_segs == 1 && seg_size > |
| 203 | fbio->bi_seg_front_size) |
| 204 | fbio->bi_seg_front_size = seg_size; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 205 | |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 206 | nr_phys_segs++; |
| 207 | bvprv = bv; |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 208 | prev = 1; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 209 | seg_size = bv.bv_len; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 210 | } |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 211 | bbio = bio; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 212 | } |
| 213 | |
Jens Axboe | 59247ea | 2009-03-06 08:55:24 +0100 | [diff] [blame] | 214 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
| 215 | fbio->bi_seg_front_size = seg_size; |
| 216 | if (seg_size > bbio->bi_seg_back_size) |
| 217 | bbio->bi_seg_back_size = seg_size; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 218 | |
| 219 | return nr_phys_segs; |
| 220 | } |
| 221 | |
| 222 | void blk_recalc_rq_segments(struct request *rq) |
| 223 | { |
Ming Lei | 0738854 | 2014-09-02 23:02:59 +0800 | [diff] [blame] | 224 | bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, |
| 225 | &rq->q->queue_flags); |
| 226 | |
| 227 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, |
| 228 | no_sg_merge); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 229 | } |
| 230 | |
| 231 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
| 232 | { |
Ming Lei | 7f60dca | 2014-11-12 00:15:41 +0800 | [diff] [blame] | 233 | unsigned short seg_cnt; |
Ming Lei | 764f612 | 2014-10-09 23:17:35 +0800 | [diff] [blame] | 234 | |
Ming Lei | 7f60dca | 2014-11-12 00:15:41 +0800 | [diff] [blame] | 235 | /* estimate segment number by bi_vcnt for non-cloned bio */ |
| 236 | if (bio_flagged(bio, BIO_CLONED)) |
| 237 | seg_cnt = bio_segments(bio); |
| 238 | else |
| 239 | seg_cnt = bio->bi_vcnt; |
| 240 | |
| 241 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && |
| 242 | (seg_cnt < queue_max_segments(q))) |
| 243 | bio->bi_phys_segments = seg_cnt; |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 244 | else { |
| 245 | struct bio *nxt = bio->bi_next; |
Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 246 | |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 247 | bio->bi_next = NULL; |
Ming Lei | 7f60dca | 2014-11-12 00:15:41 +0800 | [diff] [blame] | 248 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); |
Jens Axboe | 05f1dd5 | 2014-05-29 09:53:32 -0600 | [diff] [blame] | 249 | bio->bi_next = nxt; |
| 250 | } |
| 251 | |
Jens Axboe | b7c44ed | 2015-07-24 12:37:59 -0600 | [diff] [blame] | 252 | bio_set_flag(bio, BIO_SEG_VALID); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 253 | } |
| 254 | EXPORT_SYMBOL(blk_recount_segments); |
| 255 | |
| 256 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
| 257 | struct bio *nxt) |
| 258 | { |
Kent Overstreet | 2b8221e | 2013-12-03 14:29:09 -0700 | [diff] [blame] | 259 | struct bio_vec end_bv = { NULL }, nxt_bv; |
Kent Overstreet | f619d25 | 2013-08-07 14:30:33 -0700 | [diff] [blame] | 260 | struct bvec_iter iter; |
| 261 | |
Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 262 | if (!blk_queue_cluster(q)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 263 | return 0; |
| 264 | |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 265 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 266 | queue_max_segment_size(q)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 267 | return 0; |
| 268 | |
David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 269 | if (!bio_has_data(bio)) |
| 270 | return 1; |
| 271 | |
Kent Overstreet | f619d25 | 2013-08-07 14:30:33 -0700 | [diff] [blame] | 272 | bio_for_each_segment(end_bv, bio, iter) |
| 273 | if (end_bv.bv_len == iter.bi_size) |
| 274 | break; |
| 275 | |
| 276 | nxt_bv = bio_iovec(nxt); |
| 277 | |
| 278 | if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) |
David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 279 | return 0; |
| 280 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 281 | /* |
David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 282 | * bio and nxt are contiguous in memory; check if the queue allows |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 283 | * these two to be merged into one |
| 284 | */ |
Kent Overstreet | f619d25 | 2013-08-07 14:30:33 -0700 | [diff] [blame] | 285 | if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 286 | return 1; |
| 287 | |
| 288 | return 0; |
| 289 | } |
| 290 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 291 | static inline void |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 292 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 293 | struct scatterlist *sglist, struct bio_vec *bvprv, |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 294 | struct scatterlist **sg, int *nsegs, int *cluster) |
| 295 | { |
| 296 | |
| 297 | int nbytes = bvec->bv_len; |
| 298 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 299 | if (*sg && *cluster) { |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 300 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
| 301 | goto new_segment; |
| 302 | |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 303 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 304 | goto new_segment; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 305 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 306 | goto new_segment; |
| 307 | |
| 308 | (*sg)->length += nbytes; |
| 309 | } else { |
| 310 | new_segment: |
| 311 | if (!*sg) |
| 312 | *sg = sglist; |
| 313 | else { |
| 314 | /* |
| 315 | * If the driver previously mapped a shorter |
| 316 | * list, we could see a termination bit |
| 317 | * prematurely unless it fully inits the sg |
| 318 | * table on each mapping. We KNOW that there |
| 319 | * must be more entries here or the driver |
| 320 | * would be buggy, so force clear the |
| 321 | * termination bit to avoid doing a full |
| 322 | * sg_init_table() in drivers for each command. |
| 323 | */ |
Paolo Bonzini | c8164d8 | 2013-03-20 15:37:08 +1030 | [diff] [blame] | 324 | sg_unmark_end(*sg); |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 325 | *sg = sg_next(*sg); |
| 326 | } |
| 327 | |
| 328 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); |
| 329 | (*nsegs)++; |
| 330 | } |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 331 | *bvprv = *bvec; |
Asias He | 963ab9e | 2012-08-02 23:42:03 +0200 | [diff] [blame] | 332 | } |
| 333 | |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 334 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
| 335 | struct scatterlist *sglist, |
| 336 | struct scatterlist **sg) |
| 337 | { |
| 338 | struct bio_vec bvec, bvprv = { NULL }; |
| 339 | struct bvec_iter iter; |
| 340 | int nsegs, cluster; |
| 341 | |
| 342 | nsegs = 0; |
| 343 | cluster = blk_queue_cluster(q); |
| 344 | |
| 345 | if (bio->bi_rw & REQ_DISCARD) { |
| 346 | /* |
| 347 | * This is a hack - drivers should be neither modifying the |
| 348 | * biovec, nor relying on bi_vcnt - but because of |
| 349 | * blk_add_request_payload(), a discard bio may or may not have |
| 350 | * a payload we need to set up here (thank you Christoph) and |
| 351 | * bi_vcnt is really the only way of telling if we need to. |
| 352 | */ |
| 353 | |
| 354 | if (bio->bi_vcnt) |
| 355 | goto single_segment; |
| 356 | |
| 357 | return 0; |
| 358 | } |
| 359 | |
| 360 | if (bio->bi_rw & REQ_WRITE_SAME) { |
| 361 | single_segment: |
| 362 | *sg = sglist; |
| 363 | bvec = bio_iovec(bio); |
| 364 | sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); |
| 365 | return 1; |
| 366 | } |
| 367 | |
| 368 | for_each_bio(bio) |
| 369 | bio_for_each_segment(bvec, bio, iter) |
| 370 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, |
| 371 | &nsegs, &cluster); |
| 372 | |
| 373 | return nsegs; |
| 374 | } |
| 375 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 376 | /* |
| 377 | * map a request to scatterlist, return number of sg entries setup. Caller |
| 378 | * must make sure sg can hold rq->nr_phys_segments entries |
| 379 | */ |
| 380 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
| 381 | struct scatterlist *sglist) |
| 382 | { |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 383 | struct scatterlist *sg = NULL; |
| 384 | int nsegs = 0; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 385 | |
Kent Overstreet | 5cb8850 | 2014-02-07 13:53:46 -0700 | [diff] [blame] | 386 | if (rq->bio) |
| 387 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); |
FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 388 | |
| 389 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && |
Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 390 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
| 391 | unsigned int pad_len = |
| 392 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; |
FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 393 | |
| 394 | sg->length += pad_len; |
| 395 | rq->extra_len += pad_len; |
| 396 | } |
| 397 | |
Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 398 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 399 | if (rq->cmd_flags & REQ_WRITE) |
Tejun Heo | db0a2e0 | 2008-02-19 11:36:55 +0100 | [diff] [blame] | 400 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
| 401 | |
Dan Williams | da81ed1 | 2015-08-07 18:15:14 +0200 | [diff] [blame] | 402 | sg_unmark_end(sg); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 403 | sg = sg_next(sg); |
| 404 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), |
| 405 | q->dma_drain_size, |
| 406 | ((unsigned long)q->dma_drain_buffer) & |
| 407 | (PAGE_SIZE - 1)); |
| 408 | nsegs++; |
FUJITA Tomonori | 7a85f88 | 2008-03-04 11:17:11 +0100 | [diff] [blame] | 409 | rq->extra_len += q->dma_drain_size; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 410 | } |
| 411 | |
| 412 | if (sg) |
| 413 | sg_mark_end(sg); |
| 414 | |
| 415 | return nsegs; |
| 416 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 417 | EXPORT_SYMBOL(blk_rq_map_sg); |
| 418 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 419 | static inline int ll_new_hw_segment(struct request_queue *q, |
| 420 | struct request *req, |
| 421 | struct bio *bio) |
| 422 | { |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 423 | int nr_phys_segs = bio_phys_segments(q, bio); |
| 424 | |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 425 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) |
| 426 | goto no_merge; |
| 427 | |
Martin K. Petersen | 4eaf99b | 2014-09-26 19:20:06 -0400 | [diff] [blame] | 428 | if (blk_integrity_merge_bio(q, req, bio) == false) |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 429 | goto no_merge; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 430 | |
| 431 | /* |
| 432 | * This will form the start of a new hw segment. Bump both |
| 433 | * counters. |
| 434 | */ |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 435 | req->nr_phys_segments += nr_phys_segs; |
| 436 | return 1; |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 437 | |
| 438 | no_merge: |
| 439 | req->cmd_flags |= REQ_NOMERGE; |
| 440 | if (req == q->last_merge) |
| 441 | q->last_merge = NULL; |
| 442 | return 0; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 443 | } |
| 444 | |
| 445 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
| 446 | struct bio *bio) |
| 447 | { |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 448 | if (req_gap_back_merge(req, bio)) |
| 449 | return 0; |
Sagi Grimberg | 7f39add | 2015-09-11 09:03:04 -0600 | [diff] [blame] | 450 | if (blk_integrity_rq(req) && |
| 451 | integrity_req_gap_back_merge(req, bio)) |
| 452 | return 0; |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 453 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
| 454 | blk_rq_get_max_sectors(req)) { |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 455 | req->cmd_flags |= REQ_NOMERGE; |
| 456 | if (req == q->last_merge) |
| 457 | q->last_merge = NULL; |
| 458 | return 0; |
| 459 | } |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 460 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 461 | blk_recount_segments(q, req->biotail); |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 462 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 463 | blk_recount_segments(q, bio); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 464 | |
| 465 | return ll_new_hw_segment(q, req, bio); |
| 466 | } |
| 467 | |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 468 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 469 | struct bio *bio) |
| 470 | { |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 471 | |
| 472 | if (req_gap_front_merge(req, bio)) |
| 473 | return 0; |
Sagi Grimberg | 7f39add | 2015-09-11 09:03:04 -0600 | [diff] [blame] | 474 | if (blk_integrity_rq(req) && |
| 475 | integrity_req_gap_front_merge(req, bio)) |
| 476 | return 0; |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 477 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
| 478 | blk_rq_get_max_sectors(req)) { |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 479 | req->cmd_flags |= REQ_NOMERGE; |
| 480 | if (req == q->last_merge) |
| 481 | q->last_merge = NULL; |
| 482 | return 0; |
| 483 | } |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 484 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 485 | blk_recount_segments(q, bio); |
Jens Axboe | 2cdf79c | 2008-05-07 09:33:55 +0200 | [diff] [blame] | 486 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 487 | blk_recount_segments(q, req->bio); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 488 | |
| 489 | return ll_new_hw_segment(q, req, bio); |
| 490 | } |
| 491 | |
Jens Axboe | e7e2450 | 2013-10-29 12:11:47 -0600 | [diff] [blame] | 492 | /* |
| 493 | * blk-mq uses req->special to carry normal driver per-request payload, it |
| 494 | * does not indicate a prepared command that we cannot merge with. |
| 495 | */ |
| 496 | static bool req_no_special_merge(struct request *req) |
| 497 | { |
| 498 | struct request_queue *q = req->q; |
| 499 | |
| 500 | return !q->mq_ops && req->special; |
| 501 | } |
| 502 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 503 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
| 504 | struct request *next) |
| 505 | { |
| 506 | int total_phys_segments; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 507 | unsigned int seg_size = |
| 508 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 509 | |
| 510 | /* |
| 511 | * First check if the either of the requests are re-queued |
| 512 | * requests. Can't merge them if they are. |
| 513 | */ |
Jens Axboe | e7e2450 | 2013-10-29 12:11:47 -0600 | [diff] [blame] | 514 | if (req_no_special_merge(req) || req_no_special_merge(next)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 515 | return 0; |
| 516 | |
Jens Axboe | 5e7c427 | 2015-09-03 19:28:20 +0300 | [diff] [blame] | 517 | if (req_gap_back_merge(req, next->bio)) |
Keith Busch | 854fbb9 | 2015-02-11 08:20:13 -0700 | [diff] [blame] | 518 | return 0; |
| 519 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 520 | /* |
| 521 | * Will it become too large? |
| 522 | */ |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 523 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
| 524 | blk_rq_get_max_sectors(req)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 525 | return 0; |
| 526 | |
| 527 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 528 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
| 529 | if (req->nr_phys_segments == 1) |
| 530 | req->bio->bi_seg_front_size = seg_size; |
| 531 | if (next->nr_phys_segments == 1) |
| 532 | next->biotail->bi_seg_back_size = seg_size; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 533 | total_phys_segments--; |
FUJITA Tomonori | 8677142 | 2008-10-13 14:19:05 +0200 | [diff] [blame] | 534 | } |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 535 | |
Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 536 | if (total_phys_segments > queue_max_segments(q)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 537 | return 0; |
| 538 | |
Martin K. Petersen | 4eaf99b | 2014-09-26 19:20:06 -0400 | [diff] [blame] | 539 | if (blk_integrity_merge_rq(q, req, next) == false) |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 540 | return 0; |
| 541 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 542 | /* Merge is OK... */ |
| 543 | req->nr_phys_segments = total_phys_segments; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 544 | return 1; |
| 545 | } |
| 546 | |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 547 | /** |
| 548 | * blk_rq_set_mixed_merge - mark a request as mixed merge |
| 549 | * @rq: request to mark as mixed merge |
| 550 | * |
| 551 | * Description: |
| 552 | * @rq is about to be mixed merged. Make sure the attributes |
| 553 | * which can be mixed are set in each bio and mark @rq as mixed |
| 554 | * merged. |
| 555 | */ |
| 556 | void blk_rq_set_mixed_merge(struct request *rq) |
| 557 | { |
| 558 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; |
| 559 | struct bio *bio; |
| 560 | |
| 561 | if (rq->cmd_flags & REQ_MIXED_MERGE) |
| 562 | return; |
| 563 | |
| 564 | /* |
| 565 | * @rq will no longer represent mixable attributes for all the |
| 566 | * contained bios. It will just track those of the first one. |
| 567 | * Distributes the attributs to each bio. |
| 568 | */ |
| 569 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
| 570 | WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && |
| 571 | (bio->bi_rw & REQ_FAILFAST_MASK) != ff); |
| 572 | bio->bi_rw |= ff; |
| 573 | } |
| 574 | rq->cmd_flags |= REQ_MIXED_MERGE; |
| 575 | } |
| 576 | |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 577 | static void blk_account_io_merge(struct request *req) |
| 578 | { |
| 579 | if (blk_do_io_stat(req)) { |
| 580 | struct hd_struct *part; |
| 581 | int cpu; |
| 582 | |
| 583 | cpu = part_stat_lock(); |
Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 584 | part = req->part; |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 585 | |
| 586 | part_round_stats(cpu, part); |
Nikanth Karthikesan | 316d315 | 2009-10-06 20:16:55 +0200 | [diff] [blame] | 587 | part_dec_in_flight(part, rq_data_dir(req)); |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 588 | |
Jens Axboe | 6c23a96 | 2011-01-07 08:43:37 +0100 | [diff] [blame] | 589 | hd_struct_put(part); |
Jerome Marchand | 26308ea | 2009-03-27 10:31:51 +0100 | [diff] [blame] | 590 | part_stat_unlock(); |
| 591 | } |
| 592 | } |
| 593 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 594 | /* |
| 595 | * Has to be called with the request spinlock acquired |
| 596 | */ |
| 597 | static int attempt_merge(struct request_queue *q, struct request *req, |
| 598 | struct request *next) |
| 599 | { |
| 600 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
| 601 | return 0; |
| 602 | |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 603 | if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) |
| 604 | return 0; |
| 605 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 606 | /* |
| 607 | * not contiguous |
| 608 | */ |
Tejun Heo | 83096eb | 2009-05-07 22:24:39 +0900 | [diff] [blame] | 609 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 610 | return 0; |
| 611 | |
| 612 | if (rq_data_dir(req) != rq_data_dir(next) |
| 613 | || req->rq_disk != next->rq_disk |
Jens Axboe | e7e2450 | 2013-10-29 12:11:47 -0600 | [diff] [blame] | 614 | || req_no_special_merge(next)) |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 615 | return 0; |
| 616 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 617 | if (req->cmd_flags & REQ_WRITE_SAME && |
| 618 | !blk_write_same_mergeable(req->bio, next->bio)) |
| 619 | return 0; |
| 620 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 621 | /* |
| 622 | * If we are allowed to merge, then append bio list |
| 623 | * from next to rq and release next. merge_requests_fn |
| 624 | * will have updated segment counts, update sector |
| 625 | * counts here. |
| 626 | */ |
| 627 | if (!ll_merge_requests_fn(q, req, next)) |
| 628 | return 0; |
| 629 | |
| 630 | /* |
Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 631 | * If failfast settings disagree or any of the two is already |
| 632 | * a mixed merge, mark both as mixed before proceeding. This |
| 633 | * makes sure that all involved bios have mixable attributes |
| 634 | * set properly. |
| 635 | */ |
| 636 | if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || |
| 637 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
| 638 | (next->cmd_flags & REQ_FAILFAST_MASK)) { |
| 639 | blk_rq_set_mixed_merge(req); |
| 640 | blk_rq_set_mixed_merge(next); |
| 641 | } |
| 642 | |
| 643 | /* |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 644 | * At this point we have either done a back merge |
| 645 | * or front merge. We need the smaller start_time of |
| 646 | * the merged requests to be the current request |
| 647 | * for accounting purposes. |
| 648 | */ |
| 649 | if (time_after(req->start_time, next->start_time)) |
| 650 | req->start_time = next->start_time; |
| 651 | |
| 652 | req->biotail->bi_next = next->bio; |
| 653 | req->biotail = next->biotail; |
| 654 | |
Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 655 | req->__data_len += blk_rq_bytes(next); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 656 | |
| 657 | elv_merge_requests(q, req, next); |
| 658 | |
Jerome Marchand | 42dad76 | 2009-04-22 14:01:49 +0200 | [diff] [blame] | 659 | /* |
| 660 | * 'next' is going away, so update stats accordingly |
| 661 | */ |
| 662 | blk_account_io_merge(next); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 663 | |
| 664 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); |
Jens Axboe | ab780f1 | 2008-08-26 10:25:02 +0200 | [diff] [blame] | 665 | if (blk_rq_cpu_valid(next)) |
| 666 | req->cpu = next->cpu; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 667 | |
Boaz Harrosh | 1cd96c2 | 2009-03-24 12:35:07 +0100 | [diff] [blame] | 668 | /* owner-ship of bio passed from next to req */ |
| 669 | next->bio = NULL; |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 670 | __blk_put_request(q, next); |
| 671 | return 1; |
| 672 | } |
| 673 | |
| 674 | int attempt_back_merge(struct request_queue *q, struct request *rq) |
| 675 | { |
| 676 | struct request *next = elv_latter_request(q, rq); |
| 677 | |
| 678 | if (next) |
| 679 | return attempt_merge(q, rq, next); |
| 680 | |
| 681 | return 0; |
| 682 | } |
| 683 | |
| 684 | int attempt_front_merge(struct request_queue *q, struct request *rq) |
| 685 | { |
| 686 | struct request *prev = elv_former_request(q, rq); |
| 687 | |
| 688 | if (prev) |
| 689 | return attempt_merge(q, prev, rq); |
| 690 | |
| 691 | return 0; |
| 692 | } |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 693 | |
| 694 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, |
| 695 | struct request *next) |
| 696 | { |
| 697 | return attempt_merge(q, rq, next); |
| 698 | } |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 699 | |
| 700 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) |
| 701 | { |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 702 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 703 | return false; |
| 704 | |
Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 705 | if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) |
| 706 | return false; |
| 707 | |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 708 | /* different data direction or already started, don't merge */ |
| 709 | if (bio_data_dir(bio) != rq_data_dir(rq)) |
| 710 | return false; |
| 711 | |
| 712 | /* must be same device and not a special request */ |
Jens Axboe | e7e2450 | 2013-10-29 12:11:47 -0600 | [diff] [blame] | 713 | if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 714 | return false; |
| 715 | |
| 716 | /* only merge integrity protected bio into ditto rq */ |
Martin K. Petersen | 4eaf99b | 2014-09-26 19:20:06 -0400 | [diff] [blame] | 717 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 718 | return false; |
| 719 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 720 | /* must be using the same buffer */ |
| 721 | if (rq->cmd_flags & REQ_WRITE_SAME && |
| 722 | !blk_write_same_mergeable(rq->bio, bio)) |
| 723 | return false; |
| 724 | |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 725 | return true; |
| 726 | } |
| 727 | |
| 728 | int blk_try_merge(struct request *rq, struct bio *bio) |
| 729 | { |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 730 | if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 731 | return ELEVATOR_BACK_MERGE; |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 732 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 733 | return ELEVATOR_FRONT_MERGE; |
| 734 | return ELEVATOR_NO_MERGE; |
| 735 | } |