Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to mapping data to requests |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame^] | 5 | #include <linux/sched/task_stack.h> |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 6 | #include <linux/module.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/blkdev.h> |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 9 | #include <linux/uio.h> |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 10 | |
| 11 | #include "blk.h" |
| 12 | |
Christoph Hellwig | 98d61d5 | 2016-07-19 11:31:51 +0200 | [diff] [blame] | 13 | /* |
| 14 | * Append a bio to a passthrough request. Only works can be merged into |
| 15 | * the request based on the driver constraints. |
| 16 | */ |
| 17 | int blk_rq_append_bio(struct request *rq, struct bio *bio) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 18 | { |
Christoph Hellwig | 98d61d5 | 2016-07-19 11:31:51 +0200 | [diff] [blame] | 19 | if (!rq->bio) { |
| 20 | blk_rq_bio_prep(rq->q, rq, bio); |
| 21 | } else { |
| 22 | if (!ll_back_merge_fn(rq->q, rq, bio)) |
| 23 | return -EINVAL; |
| 24 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 25 | rq->biotail->bi_next = bio; |
| 26 | rq->biotail = bio; |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 27 | rq->__data_len += bio->bi_iter.bi_size; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 28 | } |
Christoph Hellwig | 98d61d5 | 2016-07-19 11:31:51 +0200 | [diff] [blame] | 29 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 30 | return 0; |
| 31 | } |
Christoph Hellwig | 98d61d5 | 2016-07-19 11:31:51 +0200 | [diff] [blame] | 32 | EXPORT_SYMBOL(blk_rq_append_bio); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 33 | |
| 34 | static int __blk_rq_unmap_user(struct bio *bio) |
| 35 | { |
| 36 | int ret = 0; |
| 37 | |
| 38 | if (bio) { |
| 39 | if (bio_flagged(bio, BIO_USER_MAPPED)) |
| 40 | bio_unmap_user(bio); |
| 41 | else |
| 42 | ret = bio_uncopy_user(bio); |
| 43 | } |
| 44 | |
| 45 | return ret; |
| 46 | } |
| 47 | |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 48 | static int __blk_rq_map_user_iov(struct request *rq, |
| 49 | struct rq_map_data *map_data, struct iov_iter *iter, |
| 50 | gfp_t gfp_mask, bool copy) |
| 51 | { |
| 52 | struct request_queue *q = rq->q; |
| 53 | struct bio *bio, *orig_bio; |
| 54 | int ret; |
| 55 | |
| 56 | if (copy) |
| 57 | bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); |
| 58 | else |
| 59 | bio = bio_map_user_iov(q, iter, gfp_mask); |
| 60 | |
| 61 | if (IS_ERR(bio)) |
| 62 | return PTR_ERR(bio); |
| 63 | |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 64 | bio->bi_opf &= ~REQ_OP_MASK; |
| 65 | bio->bi_opf |= req_op(rq); |
| 66 | |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 67 | if (map_data && map_data->null_mapped) |
| 68 | bio_set_flag(bio, BIO_NULL_MAPPED); |
| 69 | |
| 70 | iov_iter_advance(iter, bio->bi_iter.bi_size); |
| 71 | if (map_data) |
| 72 | map_data->offset += bio->bi_iter.bi_size; |
| 73 | |
| 74 | orig_bio = bio; |
| 75 | blk_queue_bounce(q, &bio); |
| 76 | |
| 77 | /* |
| 78 | * We link the bounce buffer in and could have to traverse it |
| 79 | * later so we have to get a ref to prevent it from being freed |
| 80 | */ |
| 81 | bio_get(bio); |
| 82 | |
Christoph Hellwig | 98d61d5 | 2016-07-19 11:31:51 +0200 | [diff] [blame] | 83 | ret = blk_rq_append_bio(rq, bio); |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 84 | if (ret) { |
| 85 | bio_endio(bio); |
| 86 | __blk_rq_unmap_user(orig_bio); |
| 87 | bio_put(bio); |
| 88 | return ret; |
| 89 | } |
| 90 | |
| 91 | return 0; |
| 92 | } |
| 93 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 94 | /** |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 95 | * blk_rq_map_user_iov - map user data to a request, for passthrough requests |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 96 | * @q: request queue where request should be inserted |
| 97 | * @rq: request to map data to |
FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 98 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 99 | * @iter: iovec iterator |
FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 100 | * @gfp_mask: memory allocation flags |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 101 | * |
| 102 | * Description: |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 103 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 104 | * a kernel bounce buffer is used. |
| 105 | * |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 106 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 107 | * still in process context. |
| 108 | * |
| 109 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() |
| 110 | * before being submitted to the device, as pages mapped may be out of |
| 111 | * reach. It's the callers responsibility to make sure this happens. The |
| 112 | * original bio must be passed back in to blk_rq_unmap_user() for proper |
| 113 | * unmapping. |
| 114 | */ |
| 115 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 116 | struct rq_map_data *map_data, |
| 117 | const struct iov_iter *iter, gfp_t gfp_mask) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 118 | { |
Al Viro | 357f435 | 2016-04-08 19:05:19 -0400 | [diff] [blame] | 119 | bool copy = false; |
| 120 | unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 121 | struct bio *bio = NULL; |
| 122 | struct iov_iter i; |
| 123 | int ret; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 124 | |
Linus Torvalds | a0ac402 | 2016-12-06 16:18:14 -0800 | [diff] [blame] | 125 | if (!iter_is_iovec(iter)) |
| 126 | goto fail; |
| 127 | |
Al Viro | 357f435 | 2016-04-08 19:05:19 -0400 | [diff] [blame] | 128 | if (map_data) |
| 129 | copy = true; |
| 130 | else if (iov_iter_alignment(iter) & align) |
| 131 | copy = true; |
| 132 | else if (queue_virt_boundary(q)) |
| 133 | copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); |
FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 134 | |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 135 | i = *iter; |
| 136 | do { |
| 137 | ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); |
| 138 | if (ret) |
| 139 | goto unmap_rq; |
| 140 | if (!bio) |
| 141 | bio = rq->bio; |
| 142 | } while (iov_iter_count(&i)); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 143 | |
FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 144 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 145 | rq->rq_flags |= RQF_COPY_USER; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 146 | return 0; |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 147 | |
| 148 | unmap_rq: |
| 149 | __blk_rq_unmap_user(bio); |
Linus Torvalds | a0ac402 | 2016-12-06 16:18:14 -0800 | [diff] [blame] | 150 | fail: |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 151 | rq->bio = NULL; |
| 152 | return -EINVAL; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 153 | } |
FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 154 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 155 | |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 156 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
| 157 | struct rq_map_data *map_data, void __user *ubuf, |
| 158 | unsigned long len, gfp_t gfp_mask) |
| 159 | { |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 160 | struct iovec iov; |
| 161 | struct iov_iter i; |
Al Viro | 8f7e885 | 2015-03-21 20:06:04 -0400 | [diff] [blame] | 162 | int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 163 | |
Al Viro | 8f7e885 | 2015-03-21 20:06:04 -0400 | [diff] [blame] | 164 | if (unlikely(ret < 0)) |
| 165 | return ret; |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 166 | |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 167 | return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 168 | } |
| 169 | EXPORT_SYMBOL(blk_rq_map_user); |
| 170 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 171 | /** |
| 172 | * blk_rq_unmap_user - unmap a request with user data |
| 173 | * @bio: start of bio list |
| 174 | * |
| 175 | * Description: |
| 176 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must |
| 177 | * supply the original rq->bio from the blk_rq_map_user() return, since |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 178 | * the I/O completion may have changed rq->bio. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 179 | */ |
| 180 | int blk_rq_unmap_user(struct bio *bio) |
| 181 | { |
| 182 | struct bio *mapped_bio; |
| 183 | int ret = 0, ret2; |
| 184 | |
| 185 | while (bio) { |
| 186 | mapped_bio = bio; |
| 187 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) |
| 188 | mapped_bio = bio->bi_private; |
| 189 | |
| 190 | ret2 = __blk_rq_unmap_user(mapped_bio); |
| 191 | if (ret2 && !ret) |
| 192 | ret = ret2; |
| 193 | |
| 194 | mapped_bio = bio; |
| 195 | bio = bio->bi_next; |
| 196 | bio_put(mapped_bio); |
| 197 | } |
| 198 | |
| 199 | return ret; |
| 200 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 201 | EXPORT_SYMBOL(blk_rq_unmap_user); |
| 202 | |
| 203 | /** |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 204 | * blk_rq_map_kern - map kernel data to a request, for passthrough requests |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 205 | * @q: request queue where request should be inserted |
| 206 | * @rq: request to fill |
| 207 | * @kbuf: the kernel buffer |
| 208 | * @len: length of user data |
| 209 | * @gfp_mask: memory allocation flags |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 210 | * |
| 211 | * Description: |
| 212 | * Data will be mapped directly if possible. Otherwise a bounce |
Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 213 | * buffer is used. Can be called multiple times to append multiple |
James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 214 | * buffers. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 215 | */ |
| 216 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, |
| 217 | unsigned int len, gfp_t gfp_mask) |
| 218 | { |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 219 | int reading = rq_data_dir(rq) == READ; |
Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 220 | unsigned long addr = (unsigned long) kbuf; |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 221 | int do_copy = 0; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 222 | struct bio *bio; |
James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 223 | int ret; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 224 | |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 225 | if (len > (queue_max_hw_sectors(q) << 9)) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 226 | return -EINVAL; |
| 227 | if (!len || !kbuf) |
| 228 | return -EINVAL; |
| 229 | |
Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 230 | do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 231 | if (do_copy) |
| 232 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); |
| 233 | else |
| 234 | bio = bio_map_kern(q, kbuf, len, gfp_mask); |
| 235 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 236 | if (IS_ERR(bio)) |
| 237 | return PTR_ERR(bio); |
| 238 | |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 239 | bio->bi_opf &= ~REQ_OP_MASK; |
| 240 | bio->bi_opf |= req_op(rq); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 241 | |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 242 | if (do_copy) |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 243 | rq->rq_flags |= RQF_COPY_USER; |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 244 | |
Christoph Hellwig | 98d61d5 | 2016-07-19 11:31:51 +0200 | [diff] [blame] | 245 | ret = blk_rq_append_bio(rq, bio); |
James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 246 | if (unlikely(ret)) { |
| 247 | /* request is too big */ |
| 248 | bio_put(bio); |
| 249 | return ret; |
| 250 | } |
| 251 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 252 | blk_queue_bounce(q, &rq->bio); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 253 | return 0; |
| 254 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 255 | EXPORT_SYMBOL(blk_rq_map_kern); |