Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to mapping data to requests |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/bio.h> |
| 7 | #include <linux/blkdev.h> |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 8 | #include <linux/uio.h> |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 9 | |
| 10 | #include "blk.h" |
| 11 | |
| 12 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, |
| 13 | struct bio *bio) |
| 14 | { |
| 15 | if (!rq->bio) |
| 16 | blk_rq_bio_prep(q, rq, bio); |
| 17 | else if (!ll_back_merge_fn(q, rq, bio)) |
| 18 | return -EINVAL; |
| 19 | else { |
| 20 | rq->biotail->bi_next = bio; |
| 21 | rq->biotail = bio; |
| 22 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 23 | rq->__data_len += bio->bi_iter.bi_size; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 24 | } |
| 25 | return 0; |
| 26 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 27 | |
| 28 | static int __blk_rq_unmap_user(struct bio *bio) |
| 29 | { |
| 30 | int ret = 0; |
| 31 | |
| 32 | if (bio) { |
| 33 | if (bio_flagged(bio, BIO_USER_MAPPED)) |
| 34 | bio_unmap_user(bio); |
| 35 | else |
| 36 | ret = bio_uncopy_user(bio); |
| 37 | } |
| 38 | |
| 39 | return ret; |
| 40 | } |
| 41 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 42 | /** |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 43 | * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 44 | * @q: request queue where request should be inserted |
| 45 | * @rq: request to map data to |
FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 46 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 47 | * @iter: iovec iterator |
FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 48 | * @gfp_mask: memory allocation flags |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 49 | * |
| 50 | * Description: |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 51 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 52 | * a kernel bounce buffer is used. |
| 53 | * |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 54 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 55 | * still in process context. |
| 56 | * |
| 57 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() |
| 58 | * before being submitted to the device, as pages mapped may be out of |
| 59 | * reach. It's the callers responsibility to make sure this happens. The |
| 60 | * original bio must be passed back in to blk_rq_unmap_user() for proper |
| 61 | * unmapping. |
| 62 | */ |
| 63 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 64 | struct rq_map_data *map_data, |
| 65 | const struct iov_iter *iter, gfp_t gfp_mask) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 66 | { |
| 67 | struct bio *bio; |
FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 68 | int unaligned = 0; |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 69 | struct iov_iter i; |
| 70 | struct iovec iov; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 71 | |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 72 | if (!iter || !iter->count) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 73 | return -EINVAL; |
| 74 | |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 75 | iov_for_each(iov, i, *iter) { |
| 76 | unsigned long uaddr = (unsigned long) iov.iov_base; |
FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 77 | |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 78 | if (!iov.iov_len) |
Xiaotian Feng | 5478755 | 2010-11-29 10:03:55 +0100 | [diff] [blame] | 79 | return -EINVAL; |
| 80 | |
Ben Hutchings | 6b76106 | 2011-11-13 19:58:09 +0100 | [diff] [blame] | 81 | /* |
| 82 | * Keep going so we check length of all segments |
| 83 | */ |
| 84 | if (uaddr & queue_dma_alignment(q)) |
FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 85 | unaligned = 1; |
FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 86 | } |
| 87 | |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 88 | if (unaligned || (q->dma_pad_mask & iter->count) || map_data) |
| 89 | bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); |
FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 90 | else |
Christoph Hellwig | 37f19e5 | 2015-01-18 16:16:33 +0100 | [diff] [blame] | 91 | bio = bio_map_user_iov(q, iter, gfp_mask); |
FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 92 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 93 | if (IS_ERR(bio)) |
| 94 | return PTR_ERR(bio); |
| 95 | |
Christoph Hellwig | a0763b2 | 2015-02-11 14:07:49 +0100 | [diff] [blame] | 96 | if (map_data && map_data->null_mapped) |
| 97 | bio->bi_flags |= (1 << BIO_NULL_MAPPED); |
| 98 | |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 99 | if (bio->bi_iter.bi_size != iter->count) { |
Jens Axboe | c26156b | 2008-11-18 15:07:05 +0100 | [diff] [blame] | 100 | /* |
| 101 | * Grab an extra reference to this bio, as bio_unmap_user() |
| 102 | * expects to be able to drop it twice as it happens on the |
| 103 | * normal IO completion path |
| 104 | */ |
| 105 | bio_get(bio); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 106 | bio_endio(bio, 0); |
Petr Vandrovec | 53cc0b29 | 2008-11-19 11:12:14 +0100 | [diff] [blame] | 107 | __blk_rq_unmap_user(bio); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 108 | return -EINVAL; |
| 109 | } |
| 110 | |
FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 111 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
| 112 | rq->cmd_flags |= REQ_COPY_USER; |
| 113 | |
FUJITA Tomonori | 07359fc | 2008-06-26 19:39:23 +0200 | [diff] [blame] | 114 | blk_queue_bounce(q, &bio); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 115 | bio_get(bio); |
| 116 | blk_rq_bio_prep(q, rq, bio); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 117 | return 0; |
| 118 | } |
FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 119 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 120 | |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 121 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
| 122 | struct rq_map_data *map_data, void __user *ubuf, |
| 123 | unsigned long len, gfp_t gfp_mask) |
| 124 | { |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 125 | struct iovec iov; |
| 126 | struct iov_iter i; |
Al Viro | 8f7e885 | 2015-03-21 20:06:04 -0400 | [diff] [blame^] | 127 | int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 128 | |
Al Viro | 8f7e885 | 2015-03-21 20:06:04 -0400 | [diff] [blame^] | 129 | if (unlikely(ret < 0)) |
| 130 | return ret; |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 131 | |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 132 | return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 133 | } |
| 134 | EXPORT_SYMBOL(blk_rq_map_user); |
| 135 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 136 | /** |
| 137 | * blk_rq_unmap_user - unmap a request with user data |
| 138 | * @bio: start of bio list |
| 139 | * |
| 140 | * Description: |
| 141 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must |
| 142 | * supply the original rq->bio from the blk_rq_map_user() return, since |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 143 | * the I/O completion may have changed rq->bio. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 144 | */ |
| 145 | int blk_rq_unmap_user(struct bio *bio) |
| 146 | { |
| 147 | struct bio *mapped_bio; |
| 148 | int ret = 0, ret2; |
| 149 | |
| 150 | while (bio) { |
| 151 | mapped_bio = bio; |
| 152 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) |
| 153 | mapped_bio = bio->bi_private; |
| 154 | |
| 155 | ret2 = __blk_rq_unmap_user(mapped_bio); |
| 156 | if (ret2 && !ret) |
| 157 | ret = ret2; |
| 158 | |
| 159 | mapped_bio = bio; |
| 160 | bio = bio->bi_next; |
| 161 | bio_put(mapped_bio); |
| 162 | } |
| 163 | |
| 164 | return ret; |
| 165 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 166 | EXPORT_SYMBOL(blk_rq_unmap_user); |
| 167 | |
| 168 | /** |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 169 | * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 170 | * @q: request queue where request should be inserted |
| 171 | * @rq: request to fill |
| 172 | * @kbuf: the kernel buffer |
| 173 | * @len: length of user data |
| 174 | * @gfp_mask: memory allocation flags |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 175 | * |
| 176 | * Description: |
| 177 | * Data will be mapped directly if possible. Otherwise a bounce |
Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 178 | * buffer is used. Can be called multiple times to append multiple |
James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 179 | * buffers. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 180 | */ |
| 181 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, |
| 182 | unsigned int len, gfp_t gfp_mask) |
| 183 | { |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 184 | int reading = rq_data_dir(rq) == READ; |
Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 185 | unsigned long addr = (unsigned long) kbuf; |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 186 | int do_copy = 0; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 187 | struct bio *bio; |
James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 188 | int ret; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 189 | |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 190 | if (len > (queue_max_hw_sectors(q) << 9)) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 191 | return -EINVAL; |
| 192 | if (!len || !kbuf) |
| 193 | return -EINVAL; |
| 194 | |
Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 195 | do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 196 | if (do_copy) |
| 197 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); |
| 198 | else |
| 199 | bio = bio_map_kern(q, kbuf, len, gfp_mask); |
| 200 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 201 | if (IS_ERR(bio)) |
| 202 | return PTR_ERR(bio); |
| 203 | |
majianpeng | 609f6ea | 2011-12-21 15:27:24 +0100 | [diff] [blame] | 204 | if (!reading) |
Benny Halevy | a45dc2d | 2010-09-13 21:32:19 +0200 | [diff] [blame] | 205 | bio->bi_rw |= REQ_WRITE; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 206 | |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 207 | if (do_copy) |
| 208 | rq->cmd_flags |= REQ_COPY_USER; |
| 209 | |
James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 210 | ret = blk_rq_append_bio(q, rq, bio); |
| 211 | if (unlikely(ret)) { |
| 212 | /* request is too big */ |
| 213 | bio_put(bio); |
| 214 | return ret; |
| 215 | } |
| 216 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 217 | blk_queue_bounce(q, &rq->bio); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 218 | return 0; |
| 219 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 220 | EXPORT_SYMBOL(blk_rq_map_kern); |