blob: 5f2448253797710c7d059a16ea5477e2354bcc60 [file] [log] [blame]
Jens Axboed6d48192008-01-29 14:04:06 +01001/*
2 * Functions related to segment and merge handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
Jens Axboe1e428072009-02-23 09:03:10 +010012static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
Jens Axboe59247ea2009-03-06 08:55:24 +010013 struct bio *bio)
Jens Axboed6d48192008-01-29 14:04:06 +010014{
Jens Axboed6d48192008-01-29 14:04:06 +010015 struct bio_vec *bv, *bvprv = NULL;
Jens Axboe1e428072009-02-23 09:03:10 +010016 int cluster, i, high, highprv = 1;
17 unsigned int seg_size, nr_phys_segs;
Jens Axboe59247ea2009-03-06 08:55:24 +010018 struct bio *fbio, *bbio;
Jens Axboed6d48192008-01-29 14:04:06 +010019
Jens Axboe1e428072009-02-23 09:03:10 +010020 if (!bio)
21 return 0;
Jens Axboed6d48192008-01-29 14:04:06 +010022
Jens Axboe1e428072009-02-23 09:03:10 +010023 fbio = bio;
Martin K. Petersene692cb62010-12-01 19:41:49 +010024 cluster = blk_queue_cluster(q);
Mikulas Patocka5df97b92008-08-15 10:20:02 +020025 seg_size = 0;
Andi Kleen2c8919d2010-06-21 11:02:47 +020026 nr_phys_segs = 0;
Jens Axboe1e428072009-02-23 09:03:10 +010027 for_each_bio(bio) {
28 bio_for_each_segment(bv, bio, i) {
29 /*
30 * the trick here is making sure that a high page is
31 * never considered part of another segment, since that
32 * might change with the bounce page.
33 */
Martin K. Petersenae03bf62009-05-22 17:17:50 -040034 high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
Jens Axboe1e428072009-02-23 09:03:10 +010035 if (high || highprv)
Jens Axboed6d48192008-01-29 14:04:06 +010036 goto new_segment;
Jens Axboe1e428072009-02-23 09:03:10 +010037 if (cluster) {
Martin K. Petersenae03bf62009-05-22 17:17:50 -040038 if (seg_size + bv->bv_len
39 > queue_max_segment_size(q))
Jens Axboe1e428072009-02-23 09:03:10 +010040 goto new_segment;
41 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
42 goto new_segment;
43 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
44 goto new_segment;
Jens Axboed6d48192008-01-29 14:04:06 +010045
Jens Axboe1e428072009-02-23 09:03:10 +010046 seg_size += bv->bv_len;
47 bvprv = bv;
48 continue;
49 }
Jens Axboed6d48192008-01-29 14:04:06 +010050new_segment:
Jens Axboe1e428072009-02-23 09:03:10 +010051 if (nr_phys_segs == 1 && seg_size >
52 fbio->bi_seg_front_size)
53 fbio->bi_seg_front_size = seg_size;
FUJITA Tomonori86771422008-10-13 14:19:05 +020054
Jens Axboe1e428072009-02-23 09:03:10 +010055 nr_phys_segs++;
56 bvprv = bv;
57 seg_size = bv->bv_len;
58 highprv = high;
59 }
Jens Axboe59247ea2009-03-06 08:55:24 +010060 bbio = bio;
Jens Axboed6d48192008-01-29 14:04:06 +010061 }
62
Jens Axboe59247ea2009-03-06 08:55:24 +010063 if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
64 fbio->bi_seg_front_size = seg_size;
65 if (seg_size > bbio->bi_seg_back_size)
66 bbio->bi_seg_back_size = seg_size;
Jens Axboe1e428072009-02-23 09:03:10 +010067
68 return nr_phys_segs;
69}
70
71void blk_recalc_rq_segments(struct request *rq)
72{
Jens Axboe59247ea2009-03-06 08:55:24 +010073 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
Jens Axboed6d48192008-01-29 14:04:06 +010074}
75
76void blk_recount_segments(struct request_queue *q, struct bio *bio)
77{
Jens Axboed6d48192008-01-29 14:04:06 +010078 struct bio *nxt = bio->bi_next;
Jens Axboe1e428072009-02-23 09:03:10 +010079
Jens Axboed6d48192008-01-29 14:04:06 +010080 bio->bi_next = NULL;
Jens Axboe59247ea2009-03-06 08:55:24 +010081 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
Jens Axboed6d48192008-01-29 14:04:06 +010082 bio->bi_next = nxt;
Jens Axboed6d48192008-01-29 14:04:06 +010083 bio->bi_flags |= (1 << BIO_SEG_VALID);
84}
85EXPORT_SYMBOL(blk_recount_segments);
86
87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
88 struct bio *nxt)
89{
Martin K. Petersene692cb62010-12-01 19:41:49 +010090 if (!blk_queue_cluster(q))
Jens Axboed6d48192008-01-29 14:04:06 +010091 return 0;
92
FUJITA Tomonori86771422008-10-13 14:19:05 +020093 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
Martin K. Petersenae03bf62009-05-22 17:17:50 -040094 queue_max_segment_size(q))
Jens Axboed6d48192008-01-29 14:04:06 +010095 return 0;
96
David Woodhousee17fc0a2008-08-09 16:42:20 +010097 if (!bio_has_data(bio))
98 return 1;
99
100 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
101 return 0;
102
Jens Axboed6d48192008-01-29 14:04:06 +0100103 /*
David Woodhousee17fc0a2008-08-09 16:42:20 +0100104 * bio and nxt are contiguous in memory; check if the queue allows
Jens Axboed6d48192008-01-29 14:04:06 +0100105 * these two to be merged into one
106 */
107 if (BIO_SEG_BOUNDARY(q, bio, nxt))
108 return 1;
109
110 return 0;
111}
112
Asias He963ab9e2012-08-02 23:42:03 +0200113static void
114__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
115 struct scatterlist *sglist, struct bio_vec **bvprv,
116 struct scatterlist **sg, int *nsegs, int *cluster)
117{
118
119 int nbytes = bvec->bv_len;
120
121 if (*bvprv && *cluster) {
122 if ((*sg)->length + nbytes > queue_max_segment_size(q))
123 goto new_segment;
124
125 if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
126 goto new_segment;
127 if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
128 goto new_segment;
129
130 (*sg)->length += nbytes;
131 } else {
132new_segment:
133 if (!*sg)
134 *sg = sglist;
135 else {
136 /*
137 * If the driver previously mapped a shorter
138 * list, we could see a termination bit
139 * prematurely unless it fully inits the sg
140 * table on each mapping. We KNOW that there
141 * must be more entries here or the driver
142 * would be buggy, so force clear the
143 * termination bit to avoid doing a full
144 * sg_init_table() in drivers for each command.
145 */
Paolo Bonzinic8164d82013-03-20 15:37:08 +1030146 sg_unmark_end(*sg);
Asias He963ab9e2012-08-02 23:42:03 +0200147 *sg = sg_next(*sg);
148 }
149
150 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
151 (*nsegs)++;
152 }
153 *bvprv = bvec;
154}
155
Jens Axboed6d48192008-01-29 14:04:06 +0100156/*
157 * map a request to scatterlist, return number of sg entries setup. Caller
158 * must make sure sg can hold rq->nr_phys_segments entries
159 */
160int blk_rq_map_sg(struct request_queue *q, struct request *rq,
161 struct scatterlist *sglist)
162{
163 struct bio_vec *bvec, *bvprv;
164 struct req_iterator iter;
165 struct scatterlist *sg;
166 int nsegs, cluster;
167
168 nsegs = 0;
Martin K. Petersene692cb62010-12-01 19:41:49 +0100169 cluster = blk_queue_cluster(q);
Jens Axboed6d48192008-01-29 14:04:06 +0100170
171 /*
172 * for each bio in rq
173 */
174 bvprv = NULL;
175 sg = NULL;
176 rq_for_each_segment(bvec, rq, iter) {
Asias He963ab9e2012-08-02 23:42:03 +0200177 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
178 &nsegs, &cluster);
Jens Axboed6d48192008-01-29 14:04:06 +0100179 } /* segments in rq */
180
FUJITA Tomonorif18573a2008-04-11 12:56:52 +0200181
182 if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900183 (blk_rq_bytes(rq) & q->dma_pad_mask)) {
184 unsigned int pad_len =
185 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
FUJITA Tomonorif18573a2008-04-11 12:56:52 +0200186
187 sg->length += pad_len;
188 rq->extra_len += pad_len;
189 }
190
Tejun Heo2fb98e82008-02-19 11:36:53 +0100191 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200192 if (rq->cmd_flags & REQ_WRITE)
Tejun Heodb0a2e02008-02-19 11:36:55 +0100193 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
194
Jens Axboed6d48192008-01-29 14:04:06 +0100195 sg->page_link &= ~0x02;
196 sg = sg_next(sg);
197 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
198 q->dma_drain_size,
199 ((unsigned long)q->dma_drain_buffer) &
200 (PAGE_SIZE - 1));
201 nsegs++;
FUJITA Tomonori7a85f882008-03-04 11:17:11 +0100202 rq->extra_len += q->dma_drain_size;
Jens Axboed6d48192008-01-29 14:04:06 +0100203 }
204
205 if (sg)
206 sg_mark_end(sg);
207
208 return nsegs;
209}
Jens Axboed6d48192008-01-29 14:04:06 +0100210EXPORT_SYMBOL(blk_rq_map_sg);
211
Asias He85b9f662012-08-02 23:42:04 +0200212/**
213 * blk_bio_map_sg - map a bio to a scatterlist
214 * @q: request_queue in question
215 * @bio: bio being mapped
216 * @sglist: scatterlist being mapped
217 *
218 * Note:
219 * Caller must make sure sg can hold bio->bi_phys_segments entries
220 *
221 * Will return the number of sg entries setup
222 */
223int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
224 struct scatterlist *sglist)
225{
226 struct bio_vec *bvec, *bvprv;
227 struct scatterlist *sg;
228 int nsegs, cluster;
229 unsigned long i;
230
231 nsegs = 0;
232 cluster = blk_queue_cluster(q);
233
234 bvprv = NULL;
235 sg = NULL;
236 bio_for_each_segment(bvec, bio, i) {
237 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
238 &nsegs, &cluster);
239 } /* segments in bio */
240
241 if (sg)
242 sg_mark_end(sg);
243
244 BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
245 return nsegs;
246}
247EXPORT_SYMBOL(blk_bio_map_sg);
248
Jens Axboed6d48192008-01-29 14:04:06 +0100249static inline int ll_new_hw_segment(struct request_queue *q,
250 struct request *req,
251 struct bio *bio)
252{
Jens Axboed6d48192008-01-29 14:04:06 +0100253 int nr_phys_segs = bio_phys_segments(q, bio);
254
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200255 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
256 goto no_merge;
257
258 if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
259 goto no_merge;
Jens Axboed6d48192008-01-29 14:04:06 +0100260
261 /*
262 * This will form the start of a new hw segment. Bump both
263 * counters.
264 */
Jens Axboed6d48192008-01-29 14:04:06 +0100265 req->nr_phys_segments += nr_phys_segs;
266 return 1;
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200267
268no_merge:
269 req->cmd_flags |= REQ_NOMERGE;
270 if (req == q->last_merge)
271 q->last_merge = NULL;
272 return 0;
Jens Axboed6d48192008-01-29 14:04:06 +0100273}
274
275int ll_back_merge_fn(struct request_queue *q, struct request *req,
276 struct bio *bio)
277{
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400278 if (blk_rq_sectors(req) + bio_sectors(bio) >
279 blk_rq_get_max_sectors(req)) {
Jens Axboed6d48192008-01-29 14:04:06 +0100280 req->cmd_flags |= REQ_NOMERGE;
281 if (req == q->last_merge)
282 q->last_merge = NULL;
283 return 0;
284 }
Jens Axboe2cdf79c2008-05-07 09:33:55 +0200285 if (!bio_flagged(req->biotail, BIO_SEG_VALID))
Jens Axboed6d48192008-01-29 14:04:06 +0100286 blk_recount_segments(q, req->biotail);
Jens Axboe2cdf79c2008-05-07 09:33:55 +0200287 if (!bio_flagged(bio, BIO_SEG_VALID))
Jens Axboed6d48192008-01-29 14:04:06 +0100288 blk_recount_segments(q, bio);
Jens Axboed6d48192008-01-29 14:04:06 +0100289
290 return ll_new_hw_segment(q, req, bio);
291}
292
Jens Axboe6728cb02008-01-31 13:03:55 +0100293int ll_front_merge_fn(struct request_queue *q, struct request *req,
Jens Axboed6d48192008-01-29 14:04:06 +0100294 struct bio *bio)
295{
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400296 if (blk_rq_sectors(req) + bio_sectors(bio) >
297 blk_rq_get_max_sectors(req)) {
Jens Axboed6d48192008-01-29 14:04:06 +0100298 req->cmd_flags |= REQ_NOMERGE;
299 if (req == q->last_merge)
300 q->last_merge = NULL;
301 return 0;
302 }
Jens Axboe2cdf79c2008-05-07 09:33:55 +0200303 if (!bio_flagged(bio, BIO_SEG_VALID))
Jens Axboed6d48192008-01-29 14:04:06 +0100304 blk_recount_segments(q, bio);
Jens Axboe2cdf79c2008-05-07 09:33:55 +0200305 if (!bio_flagged(req->bio, BIO_SEG_VALID))
Jens Axboed6d48192008-01-29 14:04:06 +0100306 blk_recount_segments(q, req->bio);
Jens Axboed6d48192008-01-29 14:04:06 +0100307
308 return ll_new_hw_segment(q, req, bio);
309}
310
311static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
312 struct request *next)
313{
314 int total_phys_segments;
FUJITA Tomonori86771422008-10-13 14:19:05 +0200315 unsigned int seg_size =
316 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
Jens Axboed6d48192008-01-29 14:04:06 +0100317
318 /*
319 * First check if the either of the requests are re-queued
320 * requests. Can't merge them if they are.
321 */
322 if (req->special || next->special)
323 return 0;
324
325 /*
326 * Will it become too large?
327 */
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400328 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
329 blk_rq_get_max_sectors(req))
Jens Axboed6d48192008-01-29 14:04:06 +0100330 return 0;
331
332 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
FUJITA Tomonori86771422008-10-13 14:19:05 +0200333 if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
334 if (req->nr_phys_segments == 1)
335 req->bio->bi_seg_front_size = seg_size;
336 if (next->nr_phys_segments == 1)
337 next->biotail->bi_seg_back_size = seg_size;
Jens Axboed6d48192008-01-29 14:04:06 +0100338 total_phys_segments--;
FUJITA Tomonori86771422008-10-13 14:19:05 +0200339 }
Jens Axboed6d48192008-01-29 14:04:06 +0100340
Martin K. Petersen8a783622010-02-26 00:20:39 -0500341 if (total_phys_segments > queue_max_segments(q))
Jens Axboed6d48192008-01-29 14:04:06 +0100342 return 0;
343
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200344 if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
345 return 0;
346
Jens Axboed6d48192008-01-29 14:04:06 +0100347 /* Merge is OK... */
348 req->nr_phys_segments = total_phys_segments;
Jens Axboed6d48192008-01-29 14:04:06 +0100349 return 1;
350}
351
Tejun Heo80a761f2009-07-03 17:48:17 +0900352/**
353 * blk_rq_set_mixed_merge - mark a request as mixed merge
354 * @rq: request to mark as mixed merge
355 *
356 * Description:
357 * @rq is about to be mixed merged. Make sure the attributes
358 * which can be mixed are set in each bio and mark @rq as mixed
359 * merged.
360 */
361void blk_rq_set_mixed_merge(struct request *rq)
362{
363 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
364 struct bio *bio;
365
366 if (rq->cmd_flags & REQ_MIXED_MERGE)
367 return;
368
369 /*
370 * @rq will no longer represent mixable attributes for all the
371 * contained bios. It will just track those of the first one.
372 * Distributes the attributs to each bio.
373 */
374 for (bio = rq->bio; bio; bio = bio->bi_next) {
375 WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
376 (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
377 bio->bi_rw |= ff;
378 }
379 rq->cmd_flags |= REQ_MIXED_MERGE;
380}
381
Jerome Marchand26308ea2009-03-27 10:31:51 +0100382static void blk_account_io_merge(struct request *req)
383{
384 if (blk_do_io_stat(req)) {
385 struct hd_struct *part;
386 int cpu;
387
388 cpu = part_stat_lock();
Jerome Marchand09e099d2011-01-05 16:57:38 +0100389 part = req->part;
Jerome Marchand26308ea2009-03-27 10:31:51 +0100390
391 part_round_stats(cpu, part);
Nikanth Karthikesan316d3152009-10-06 20:16:55 +0200392 part_dec_in_flight(part, rq_data_dir(req));
Jerome Marchand26308ea2009-03-27 10:31:51 +0100393
Jens Axboe6c23a962011-01-07 08:43:37 +0100394 hd_struct_put(part);
Jerome Marchand26308ea2009-03-27 10:31:51 +0100395 part_stat_unlock();
396 }
397}
398
Jens Axboed6d48192008-01-29 14:04:06 +0100399/*
400 * Has to be called with the request spinlock acquired
401 */
402static int attempt_merge(struct request_queue *q, struct request *req,
403 struct request *next)
404{
405 if (!rq_mergeable(req) || !rq_mergeable(next))
406 return 0;
407
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400408 if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
409 return 0;
410
Jens Axboed6d48192008-01-29 14:04:06 +0100411 /*
412 * not contiguous
413 */
Tejun Heo83096eb2009-05-07 22:24:39 +0900414 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
Jens Axboed6d48192008-01-29 14:04:06 +0100415 return 0;
416
417 if (rq_data_dir(req) != rq_data_dir(next)
418 || req->rq_disk != next->rq_disk
419 || next->special)
420 return 0;
421
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400422 if (req->cmd_flags & REQ_WRITE_SAME &&
423 !blk_write_same_mergeable(req->bio, next->bio))
424 return 0;
425
Jens Axboed6d48192008-01-29 14:04:06 +0100426 /*
427 * If we are allowed to merge, then append bio list
428 * from next to rq and release next. merge_requests_fn
429 * will have updated segment counts, update sector
430 * counts here.
431 */
432 if (!ll_merge_requests_fn(q, req, next))
433 return 0;
434
435 /*
Tejun Heo80a761f2009-07-03 17:48:17 +0900436 * If failfast settings disagree or any of the two is already
437 * a mixed merge, mark both as mixed before proceeding. This
438 * makes sure that all involved bios have mixable attributes
439 * set properly.
440 */
441 if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
442 (req->cmd_flags & REQ_FAILFAST_MASK) !=
443 (next->cmd_flags & REQ_FAILFAST_MASK)) {
444 blk_rq_set_mixed_merge(req);
445 blk_rq_set_mixed_merge(next);
446 }
447
448 /*
Jens Axboed6d48192008-01-29 14:04:06 +0100449 * At this point we have either done a back merge
450 * or front merge. We need the smaller start_time of
451 * the merged requests to be the current request
452 * for accounting purposes.
453 */
454 if (time_after(req->start_time, next->start_time))
455 req->start_time = next->start_time;
456
457 req->biotail->bi_next = next->bio;
458 req->biotail = next->biotail;
459
Tejun Heoa2dec7b2009-05-07 22:24:44 +0900460 req->__data_len += blk_rq_bytes(next);
Jens Axboed6d48192008-01-29 14:04:06 +0100461
462 elv_merge_requests(q, req, next);
463
Jerome Marchand42dad762009-04-22 14:01:49 +0200464 /*
465 * 'next' is going away, so update stats accordingly
466 */
467 blk_account_io_merge(next);
Jens Axboed6d48192008-01-29 14:04:06 +0100468
469 req->ioprio = ioprio_best(req->ioprio, next->ioprio);
Jens Axboeab780f12008-08-26 10:25:02 +0200470 if (blk_rq_cpu_valid(next))
471 req->cpu = next->cpu;
Jens Axboed6d48192008-01-29 14:04:06 +0100472
Boaz Harrosh1cd96c22009-03-24 12:35:07 +0100473 /* owner-ship of bio passed from next to req */
474 next->bio = NULL;
Jens Axboed6d48192008-01-29 14:04:06 +0100475 __blk_put_request(q, next);
476 return 1;
477}
478
479int attempt_back_merge(struct request_queue *q, struct request *rq)
480{
481 struct request *next = elv_latter_request(q, rq);
482
483 if (next)
484 return attempt_merge(q, rq, next);
485
486 return 0;
487}
488
489int attempt_front_merge(struct request_queue *q, struct request *rq)
490{
491 struct request *prev = elv_former_request(q, rq);
492
493 if (prev)
494 return attempt_merge(q, prev, rq);
495
496 return 0;
497}
Jens Axboe5e84ea32011-03-21 10:14:27 +0100498
499int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
500 struct request *next)
501{
502 return attempt_merge(q, rq, next);
503}
Tejun Heo050c8ea2012-02-08 09:19:38 +0100504
505bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
506{
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400507 if (!rq_mergeable(rq) || !bio_mergeable(bio))
Tejun Heo050c8ea2012-02-08 09:19:38 +0100508 return false;
509
Martin K. Petersenf31dc1c2012-09-18 12:19:26 -0400510 if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
511 return false;
512
Tejun Heo050c8ea2012-02-08 09:19:38 +0100513 /* different data direction or already started, don't merge */
514 if (bio_data_dir(bio) != rq_data_dir(rq))
515 return false;
516
517 /* must be same device and not a special request */
518 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
519 return false;
520
521 /* only merge integrity protected bio into ditto rq */
522 if (bio_integrity(bio) != blk_integrity_rq(rq))
523 return false;
524
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400525 /* must be using the same buffer */
526 if (rq->cmd_flags & REQ_WRITE_SAME &&
527 !blk_write_same_mergeable(rq->bio, bio))
528 return false;
529
Tejun Heo050c8ea2012-02-08 09:19:38 +0100530 return true;
531}
532
533int blk_try_merge(struct request *rq, struct bio *bio)
534{
535 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
536 return ELEVATOR_BACK_MERGE;
537 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
538 return ELEVATOR_FRONT_MERGE;
539 return ELEVATOR_NO_MERGE;
540}