Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to mapping data to requests |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/bio.h> |
| 7 | #include <linux/blkdev.h> |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 8 | #include <linux/uio.h> |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 9 | |
| 10 | #include "blk.h" |
| 11 | |
Sagi Grimberg | 4634845 | 2015-09-03 19:28:23 +0300 | [diff] [blame] | 12 | static bool iovec_gap_to_prv(struct request_queue *q, |
| 13 | struct iovec *prv, struct iovec *cur) |
| 14 | { |
| 15 | unsigned long prev_end; |
| 16 | |
| 17 | if (!queue_virt_boundary(q)) |
| 18 | return false; |
| 19 | |
| 20 | if (prv->iov_base == NULL && prv->iov_len == 0) |
| 21 | /* prv is not set - don't check */ |
| 22 | return false; |
| 23 | |
| 24 | prev_end = (unsigned long)(prv->iov_base + prv->iov_len); |
| 25 | |
| 26 | return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) || |
| 27 | prev_end & queue_virt_boundary(q)); |
| 28 | } |
| 29 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 30 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, |
| 31 | struct bio *bio) |
| 32 | { |
| 33 | if (!rq->bio) |
| 34 | blk_rq_bio_prep(q, rq, bio); |
| 35 | else if (!ll_back_merge_fn(q, rq, bio)) |
| 36 | return -EINVAL; |
| 37 | else { |
| 38 | rq->biotail->bi_next = bio; |
| 39 | rq->biotail = bio; |
| 40 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 41 | rq->__data_len += bio->bi_iter.bi_size; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 42 | } |
| 43 | return 0; |
| 44 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 45 | |
| 46 | static int __blk_rq_unmap_user(struct bio *bio) |
| 47 | { |
| 48 | int ret = 0; |
| 49 | |
| 50 | if (bio) { |
| 51 | if (bio_flagged(bio, BIO_USER_MAPPED)) |
| 52 | bio_unmap_user(bio); |
| 53 | else |
| 54 | ret = bio_uncopy_user(bio); |
| 55 | } |
| 56 | |
| 57 | return ret; |
| 58 | } |
| 59 | |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 60 | static int __blk_rq_map_user_iov(struct request *rq, |
| 61 | struct rq_map_data *map_data, struct iov_iter *iter, |
| 62 | gfp_t gfp_mask, bool copy) |
| 63 | { |
| 64 | struct request_queue *q = rq->q; |
| 65 | struct bio *bio, *orig_bio; |
| 66 | int ret; |
| 67 | |
| 68 | if (copy) |
| 69 | bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); |
| 70 | else |
| 71 | bio = bio_map_user_iov(q, iter, gfp_mask); |
| 72 | |
| 73 | if (IS_ERR(bio)) |
| 74 | return PTR_ERR(bio); |
| 75 | |
| 76 | if (map_data && map_data->null_mapped) |
| 77 | bio_set_flag(bio, BIO_NULL_MAPPED); |
| 78 | |
| 79 | iov_iter_advance(iter, bio->bi_iter.bi_size); |
| 80 | if (map_data) |
| 81 | map_data->offset += bio->bi_iter.bi_size; |
| 82 | |
| 83 | orig_bio = bio; |
| 84 | blk_queue_bounce(q, &bio); |
| 85 | |
| 86 | /* |
| 87 | * We link the bounce buffer in and could have to traverse it |
| 88 | * later so we have to get a ref to prevent it from being freed |
| 89 | */ |
| 90 | bio_get(bio); |
| 91 | |
| 92 | ret = blk_rq_append_bio(q, rq, bio); |
| 93 | if (ret) { |
| 94 | bio_endio(bio); |
| 95 | __blk_rq_unmap_user(orig_bio); |
| 96 | bio_put(bio); |
| 97 | return ret; |
| 98 | } |
| 99 | |
| 100 | return 0; |
| 101 | } |
| 102 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 103 | /** |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 104 | * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 105 | * @q: request queue where request should be inserted |
| 106 | * @rq: request to map data to |
FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 107 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 108 | * @iter: iovec iterator |
FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 109 | * @gfp_mask: memory allocation flags |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 110 | * |
| 111 | * Description: |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 112 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 113 | * a kernel bounce buffer is used. |
| 114 | * |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 115 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 116 | * still in process context. |
| 117 | * |
| 118 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() |
| 119 | * before being submitted to the device, as pages mapped may be out of |
| 120 | * reach. It's the callers responsibility to make sure this happens. The |
| 121 | * original bio must be passed back in to blk_rq_unmap_user() for proper |
| 122 | * unmapping. |
| 123 | */ |
| 124 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 125 | struct rq_map_data *map_data, |
| 126 | const struct iov_iter *iter, gfp_t gfp_mask) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 127 | { |
Sagi Grimberg | 4634845 | 2015-09-03 19:28:23 +0300 | [diff] [blame] | 128 | struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 129 | bool copy = (q->dma_pad_mask & iter->count) || map_data; |
| 130 | struct bio *bio = NULL; |
| 131 | struct iov_iter i; |
| 132 | int ret; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 133 | |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 134 | if (!iter || !iter->count) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 135 | return -EINVAL; |
| 136 | |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 137 | iov_for_each(iov, i, *iter) { |
| 138 | unsigned long uaddr = (unsigned long) iov.iov_base; |
FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 139 | |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 140 | if (!iov.iov_len) |
Xiaotian Feng | 5478755 | 2010-11-29 10:03:55 +0100 | [diff] [blame] | 141 | return -EINVAL; |
| 142 | |
Ben Hutchings | 6b76106 | 2011-11-13 19:58:09 +0100 | [diff] [blame] | 143 | /* |
| 144 | * Keep going so we check length of all segments |
| 145 | */ |
Sagi Grimberg | 4634845 | 2015-09-03 19:28:23 +0300 | [diff] [blame] | 146 | if ((uaddr & queue_dma_alignment(q)) || |
| 147 | iovec_gap_to_prv(q, &prv, &iov)) |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 148 | copy = true; |
Sagi Grimberg | 4634845 | 2015-09-03 19:28:23 +0300 | [diff] [blame] | 149 | |
| 150 | prv.iov_base = iov.iov_base; |
| 151 | prv.iov_len = iov.iov_len; |
FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 152 | } |
| 153 | |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 154 | i = *iter; |
| 155 | do { |
| 156 | ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); |
| 157 | if (ret) |
| 158 | goto unmap_rq; |
| 159 | if (!bio) |
| 160 | bio = rq->bio; |
| 161 | } while (iov_iter_count(&i)); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 162 | |
FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 163 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
| 164 | rq->cmd_flags |= REQ_COPY_USER; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 165 | return 0; |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 166 | |
| 167 | unmap_rq: |
| 168 | __blk_rq_unmap_user(bio); |
| 169 | rq->bio = NULL; |
| 170 | return -EINVAL; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 171 | } |
FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 172 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 173 | |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 174 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
| 175 | struct rq_map_data *map_data, void __user *ubuf, |
| 176 | unsigned long len, gfp_t gfp_mask) |
| 177 | { |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 178 | struct iovec iov; |
| 179 | struct iov_iter i; |
Al Viro | 8f7e885 | 2015-03-21 20:06:04 -0400 | [diff] [blame] | 180 | int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 181 | |
Al Viro | 8f7e885 | 2015-03-21 20:06:04 -0400 | [diff] [blame] | 182 | if (unlikely(ret < 0)) |
| 183 | return ret; |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 184 | |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 185 | return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 186 | } |
| 187 | EXPORT_SYMBOL(blk_rq_map_user); |
| 188 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 189 | /** |
| 190 | * blk_rq_unmap_user - unmap a request with user data |
| 191 | * @bio: start of bio list |
| 192 | * |
| 193 | * Description: |
| 194 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must |
| 195 | * supply the original rq->bio from the blk_rq_map_user() return, since |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 196 | * the I/O completion may have changed rq->bio. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 197 | */ |
| 198 | int blk_rq_unmap_user(struct bio *bio) |
| 199 | { |
| 200 | struct bio *mapped_bio; |
| 201 | int ret = 0, ret2; |
| 202 | |
| 203 | while (bio) { |
| 204 | mapped_bio = bio; |
| 205 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) |
| 206 | mapped_bio = bio->bi_private; |
| 207 | |
| 208 | ret2 = __blk_rq_unmap_user(mapped_bio); |
| 209 | if (ret2 && !ret) |
| 210 | ret = ret2; |
| 211 | |
| 212 | mapped_bio = bio; |
| 213 | bio = bio->bi_next; |
| 214 | bio_put(mapped_bio); |
| 215 | } |
| 216 | |
| 217 | return ret; |
| 218 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 219 | EXPORT_SYMBOL(blk_rq_unmap_user); |
| 220 | |
| 221 | /** |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 222 | * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 223 | * @q: request queue where request should be inserted |
| 224 | * @rq: request to fill |
| 225 | * @kbuf: the kernel buffer |
| 226 | * @len: length of user data |
| 227 | * @gfp_mask: memory allocation flags |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 228 | * |
| 229 | * Description: |
| 230 | * Data will be mapped directly if possible. Otherwise a bounce |
Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 231 | * buffer is used. Can be called multiple times to append multiple |
James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 232 | * buffers. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 233 | */ |
| 234 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, |
| 235 | unsigned int len, gfp_t gfp_mask) |
| 236 | { |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 237 | int reading = rq_data_dir(rq) == READ; |
Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 238 | unsigned long addr = (unsigned long) kbuf; |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 239 | int do_copy = 0; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 240 | struct bio *bio; |
James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 241 | int ret; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 242 | |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 243 | if (len > (queue_max_hw_sectors(q) << 9)) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 244 | return -EINVAL; |
| 245 | if (!len || !kbuf) |
| 246 | return -EINVAL; |
| 247 | |
Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 248 | do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 249 | if (do_copy) |
| 250 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); |
| 251 | else |
| 252 | bio = bio_map_kern(q, kbuf, len, gfp_mask); |
| 253 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 254 | if (IS_ERR(bio)) |
| 255 | return PTR_ERR(bio); |
| 256 | |
majianpeng | 609f6ea | 2011-12-21 15:27:24 +0100 | [diff] [blame] | 257 | if (!reading) |
Benny Halevy | a45dc2d | 2010-09-13 21:32:19 +0200 | [diff] [blame] | 258 | bio->bi_rw |= REQ_WRITE; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 259 | |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 260 | if (do_copy) |
| 261 | rq->cmd_flags |= REQ_COPY_USER; |
| 262 | |
James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 263 | ret = blk_rq_append_bio(q, rq, bio); |
| 264 | if (unlikely(ret)) { |
| 265 | /* request is too big */ |
| 266 | bio_put(bio); |
| 267 | return ret; |
| 268 | } |
| 269 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 270 | blk_queue_bounce(q, &rq->bio); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 271 | return 0; |
| 272 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 273 | EXPORT_SYMBOL(blk_rq_map_kern); |