blob: b0790268ed9d9ad804b8b3d396ae16cdc8e229aa [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Jens Axboe86db1e22008-01-29 14:53:40 +01002/*
3 * Functions related to mapping data to requests
4 */
5#include <linux/kernel.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +01006#include <linux/sched/task_stack.h>
Jens Axboe86db1e22008-01-29 14:53:40 +01007#include <linux/module.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
Kent Overstreet26e49cf2015-01-18 16:16:31 +010010#include <linux/uio.h>
Jens Axboe86db1e22008-01-29 14:53:40 +010011
12#include "blk.h"
13
Christoph Hellwig98d61d52016-07-19 11:31:51 +020014/*
Jens Axboe0abc2a12017-12-18 15:40:44 +080015 * Append a bio to a passthrough request. Only works if the bio can be merged
16 * into the request based on the driver constraints.
Christoph Hellwig98d61d52016-07-19 11:31:51 +020017 */
Jens Axboe0abc2a12017-12-18 15:40:44 +080018int blk_rq_append_bio(struct request *rq, struct bio **bio)
Jens Axboe86db1e22008-01-29 14:53:40 +010019{
Jens Axboe0abc2a12017-12-18 15:40:44 +080020 struct bio *orig_bio = *bio;
Christoph Hellwig14ccb662019-06-06 12:29:01 +020021 struct bvec_iter iter;
22 struct bio_vec bv;
23 unsigned int nr_segs = 0;
Jens Axboe0abc2a12017-12-18 15:40:44 +080024
25 blk_queue_bounce(rq->q, bio);
Christoph Hellwigcaa4b0242017-06-27 12:13:21 -060026
Christoph Hellwig14ccb662019-06-06 12:29:01 +020027 bio_for_each_bvec(bv, *bio, iter)
28 nr_segs++;
29
Christoph Hellwig98d61d52016-07-19 11:31:51 +020030 if (!rq->bio) {
Christoph Hellwig14ccb662019-06-06 12:29:01 +020031 blk_rq_bio_prep(rq, *bio, nr_segs);
Christoph Hellwig98d61d52016-07-19 11:31:51 +020032 } else {
Christoph Hellwig14ccb662019-06-06 12:29:01 +020033 if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
Jens Axboe0abc2a12017-12-18 15:40:44 +080034 if (orig_bio != *bio) {
35 bio_put(*bio);
36 *bio = orig_bio;
37 }
Christoph Hellwig98d61d52016-07-19 11:31:51 +020038 return -EINVAL;
Jens Axboe0abc2a12017-12-18 15:40:44 +080039 }
Christoph Hellwig98d61d52016-07-19 11:31:51 +020040
Jens Axboe0abc2a12017-12-18 15:40:44 +080041 rq->biotail->bi_next = *bio;
42 rq->biotail = *bio;
43 rq->__data_len += (*bio)->bi_iter.bi_size;
Jens Axboe86db1e22008-01-29 14:53:40 +010044 }
Christoph Hellwig98d61d52016-07-19 11:31:51 +020045
Jens Axboe86db1e22008-01-29 14:53:40 +010046 return 0;
47}
Christoph Hellwig98d61d52016-07-19 11:31:51 +020048EXPORT_SYMBOL(blk_rq_append_bio);
Jens Axboe86db1e22008-01-29 14:53:40 +010049
50static int __blk_rq_unmap_user(struct bio *bio)
51{
52 int ret = 0;
53
54 if (bio) {
55 if (bio_flagged(bio, BIO_USER_MAPPED))
56 bio_unmap_user(bio);
57 else
58 ret = bio_uncopy_user(bio);
59 }
60
61 return ret;
62}
63
Christoph Hellwig4d6af732016-03-02 18:07:14 +010064static int __blk_rq_map_user_iov(struct request *rq,
65 struct rq_map_data *map_data, struct iov_iter *iter,
66 gfp_t gfp_mask, bool copy)
67{
68 struct request_queue *q = rq->q;
69 struct bio *bio, *orig_bio;
70 int ret;
71
72 if (copy)
73 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
74 else
75 bio = bio_map_user_iov(q, iter, gfp_mask);
76
77 if (IS_ERR(bio))
78 return PTR_ERR(bio);
79
Christoph Hellwigaebf5262017-01-31 16:57:31 +010080 bio->bi_opf &= ~REQ_OP_MASK;
81 bio->bi_opf |= req_op(rq);
82
Christoph Hellwig4d6af732016-03-02 18:07:14 +010083 orig_bio = bio;
Christoph Hellwig4d6af732016-03-02 18:07:14 +010084
85 /*
86 * We link the bounce buffer in and could have to traverse it
87 * later so we have to get a ref to prevent it from being freed
88 */
Jens Axboe0abc2a12017-12-18 15:40:44 +080089 ret = blk_rq_append_bio(rq, &bio);
Christoph Hellwig4d6af732016-03-02 18:07:14 +010090 if (ret) {
Christoph Hellwig4d6af732016-03-02 18:07:14 +010091 __blk_rq_unmap_user(orig_bio);
Christoph Hellwig4d6af732016-03-02 18:07:14 +010092 return ret;
93 }
Jens Axboe0abc2a12017-12-18 15:40:44 +080094 bio_get(bio);
Christoph Hellwig4d6af732016-03-02 18:07:14 +010095
96 return 0;
97}
98
Jens Axboe86db1e22008-01-29 14:53:40 +010099/**
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100100 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
Jens Axboe86db1e22008-01-29 14:53:40 +0100101 * @q: request queue where request should be inserted
102 * @rq: request to map data to
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900103 * @map_data: pointer to the rq_map_data holding pages (if necessary)
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100104 * @iter: iovec iterator
FUJITA Tomonoria3bce902008-08-28 16:17:05 +0900105 * @gfp_mask: memory allocation flags
Jens Axboe86db1e22008-01-29 14:53:40 +0100106 *
107 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +0200108 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
Jens Axboe86db1e22008-01-29 14:53:40 +0100109 * a kernel bounce buffer is used.
110 *
Randy Dunlap710027a2008-08-19 20:13:11 +0200111 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
Jens Axboe86db1e22008-01-29 14:53:40 +0100112 * still in process context.
113 *
114 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
115 * before being submitted to the device, as pages mapped may be out of
116 * reach. It's the callers responsibility to make sure this happens. The
117 * original bio must be passed back in to blk_rq_unmap_user() for proper
118 * unmapping.
119 */
120int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100121 struct rq_map_data *map_data,
122 const struct iov_iter *iter, gfp_t gfp_mask)
Jens Axboe86db1e22008-01-29 14:53:40 +0100123{
Al Viro357f4352016-04-08 19:05:19 -0400124 bool copy = false;
125 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
Christoph Hellwig4d6af732016-03-02 18:07:14 +0100126 struct bio *bio = NULL;
127 struct iov_iter i;
Douglas Gilbert69e09272018-01-14 17:00:48 -0500128 int ret = -EINVAL;
Jens Axboe86db1e22008-01-29 14:53:40 +0100129
Linus Torvaldsa0ac4022016-12-06 16:18:14 -0800130 if (!iter_is_iovec(iter))
131 goto fail;
132
Al Viro357f4352016-04-08 19:05:19 -0400133 if (map_data)
134 copy = true;
135 else if (iov_iter_alignment(iter) & align)
136 copy = true;
137 else if (queue_virt_boundary(q))
138 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +0200139
Christoph Hellwig4d6af732016-03-02 18:07:14 +0100140 i = *iter;
141 do {
142 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
143 if (ret)
144 goto unmap_rq;
145 if (!bio)
146 bio = rq->bio;
147 } while (iov_iter_count(&i));
Jens Axboe86db1e22008-01-29 14:53:40 +0100148
FUJITA Tomonorif18573a2008-04-11 12:56:52 +0200149 if (!bio_flagged(bio, BIO_USER_MAPPED))
Christoph Hellwige8064022016-10-20 15:12:13 +0200150 rq->rq_flags |= RQF_COPY_USER;
Jens Axboe86db1e22008-01-29 14:53:40 +0100151 return 0;
Christoph Hellwig4d6af732016-03-02 18:07:14 +0100152
153unmap_rq:
Yang Yingliang3a1cba82019-12-18 16:44:04 +0800154 blk_rq_unmap_user(bio);
Linus Torvaldsa0ac4022016-12-06 16:18:14 -0800155fail:
Christoph Hellwig4d6af732016-03-02 18:07:14 +0100156 rq->bio = NULL;
Douglas Gilbert69e09272018-01-14 17:00:48 -0500157 return ret;
Jens Axboe86db1e22008-01-29 14:53:40 +0100158}
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900159EXPORT_SYMBOL(blk_rq_map_user_iov);
Jens Axboe86db1e22008-01-29 14:53:40 +0100160
Christoph Hellwigddad8dd2015-01-18 16:16:29 +0100161int blk_rq_map_user(struct request_queue *q, struct request *rq,
162 struct rq_map_data *map_data, void __user *ubuf,
163 unsigned long len, gfp_t gfp_mask)
164{
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100165 struct iovec iov;
166 struct iov_iter i;
Al Viro8f7e8852015-03-21 20:06:04 -0400167 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
Christoph Hellwigddad8dd2015-01-18 16:16:29 +0100168
Al Viro8f7e8852015-03-21 20:06:04 -0400169 if (unlikely(ret < 0))
170 return ret;
Christoph Hellwigddad8dd2015-01-18 16:16:29 +0100171
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100172 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
Christoph Hellwigddad8dd2015-01-18 16:16:29 +0100173}
174EXPORT_SYMBOL(blk_rq_map_user);
175
Jens Axboe86db1e22008-01-29 14:53:40 +0100176/**
177 * blk_rq_unmap_user - unmap a request with user data
178 * @bio: start of bio list
179 *
180 * Description:
181 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
182 * supply the original rq->bio from the blk_rq_map_user() return, since
Randy Dunlap710027a2008-08-19 20:13:11 +0200183 * the I/O completion may have changed rq->bio.
Jens Axboe86db1e22008-01-29 14:53:40 +0100184 */
185int blk_rq_unmap_user(struct bio *bio)
186{
187 struct bio *mapped_bio;
188 int ret = 0, ret2;
189
190 while (bio) {
191 mapped_bio = bio;
192 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
193 mapped_bio = bio->bi_private;
194
195 ret2 = __blk_rq_unmap_user(mapped_bio);
196 if (ret2 && !ret)
197 ret = ret2;
198
199 mapped_bio = bio;
200 bio = bio->bi_next;
201 bio_put(mapped_bio);
202 }
203
204 return ret;
205}
Jens Axboe86db1e22008-01-29 14:53:40 +0100206EXPORT_SYMBOL(blk_rq_unmap_user);
207
208/**
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100209 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
Jens Axboe86db1e22008-01-29 14:53:40 +0100210 * @q: request queue where request should be inserted
211 * @rq: request to fill
212 * @kbuf: the kernel buffer
213 * @len: length of user data
214 * @gfp_mask: memory allocation flags
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200215 *
216 * Description:
217 * Data will be mapped directly if possible. Otherwise a bounce
Masanari Iidae2278672014-02-18 22:54:36 +0900218 * buffer is used. Can be called multiple times to append multiple
James Bottomley3a5a3922009-05-17 18:55:18 +0300219 * buffers.
Jens Axboe86db1e22008-01-29 14:53:40 +0100220 */
221int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
222 unsigned int len, gfp_t gfp_mask)
223{
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200224 int reading = rq_data_dir(rq) == READ;
Namhyung Kim14417792010-09-15 13:08:27 +0200225 unsigned long addr = (unsigned long) kbuf;
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200226 int do_copy = 0;
Jens Axboe0abc2a12017-12-18 15:40:44 +0800227 struct bio *bio, *orig_bio;
James Bottomley3a5a3922009-05-17 18:55:18 +0300228 int ret;
Jens Axboe86db1e22008-01-29 14:53:40 +0100229
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400230 if (len > (queue_max_hw_sectors(q) << 9))
Jens Axboe86db1e22008-01-29 14:53:40 +0100231 return -EINVAL;
232 if (!len || !kbuf)
233 return -EINVAL;
234
Namhyung Kim14417792010-09-15 13:08:27 +0200235 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200236 if (do_copy)
237 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
238 else
239 bio = bio_map_kern(q, kbuf, len, gfp_mask);
240
Jens Axboe86db1e22008-01-29 14:53:40 +0100241 if (IS_ERR(bio))
242 return PTR_ERR(bio);
243
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100244 bio->bi_opf &= ~REQ_OP_MASK;
245 bio->bi_opf |= req_op(rq);
Jens Axboe86db1e22008-01-29 14:53:40 +0100246
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200247 if (do_copy)
Christoph Hellwige8064022016-10-20 15:12:13 +0200248 rq->rq_flags |= RQF_COPY_USER;
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200249
Jens Axboe0abc2a12017-12-18 15:40:44 +0800250 orig_bio = bio;
251 ret = blk_rq_append_bio(rq, &bio);
James Bottomley3a5a3922009-05-17 18:55:18 +0300252 if (unlikely(ret)) {
253 /* request is too big */
Jens Axboe0abc2a12017-12-18 15:40:44 +0800254 bio_put(orig_bio);
James Bottomley3a5a3922009-05-17 18:55:18 +0300255 return ret;
256 }
257
Jens Axboe86db1e22008-01-29 14:53:40 +0100258 return 0;
259}
Jens Axboe86db1e22008-01-29 14:53:40 +0100260EXPORT_SYMBOL(blk_rq_map_kern);