blob: b9f88b7751fbd87742b1d1439a1d89c97818f9ce [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
Kent Overstreet26e49cf2015-01-18 16:16:31 +01008#include <linux/uio.h>
Jens Axboe86db1e22008-01-29 14:53:40 +01009
10#include "blk.h"
11
12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 struct bio *bio)
14{
15 if (!rq->bio)
16 blk_rq_bio_prep(q, rq, bio);
17 else if (!ll_back_merge_fn(q, rq, bio))
18 return -EINVAL;
19 else {
20 rq->biotail->bi_next = bio;
21 rq->biotail = bio;
22
Kent Overstreet4f024f32013-10-11 15:44:27 -070023 rq->__data_len += bio->bi_iter.bi_size;
Jens Axboe86db1e22008-01-29 14:53:40 +010024 }
25 return 0;
26}
Jens Axboe86db1e22008-01-29 14:53:40 +010027
28static int __blk_rq_unmap_user(struct bio *bio)
29{
30 int ret = 0;
31
32 if (bio) {
33 if (bio_flagged(bio, BIO_USER_MAPPED))
34 bio_unmap_user(bio);
35 else
36 ret = bio_uncopy_user(bio);
37 }
38
39 return ret;
40}
41
Christoph Hellwig4d6af732016-03-02 18:07:14 +010042static int __blk_rq_map_user_iov(struct request *rq,
43 struct rq_map_data *map_data, struct iov_iter *iter,
44 gfp_t gfp_mask, bool copy)
45{
46 struct request_queue *q = rq->q;
47 struct bio *bio, *orig_bio;
48 int ret;
49
50 if (copy)
51 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
52 else
53 bio = bio_map_user_iov(q, iter, gfp_mask);
54
55 if (IS_ERR(bio))
56 return PTR_ERR(bio);
57
58 if (map_data && map_data->null_mapped)
59 bio_set_flag(bio, BIO_NULL_MAPPED);
60
61 iov_iter_advance(iter, bio->bi_iter.bi_size);
62 if (map_data)
63 map_data->offset += bio->bi_iter.bi_size;
64
65 orig_bio = bio;
66 blk_queue_bounce(q, &bio);
67
68 /*
69 * We link the bounce buffer in and could have to traverse it
70 * later so we have to get a ref to prevent it from being freed
71 */
72 bio_get(bio);
73
74 ret = blk_rq_append_bio(q, rq, bio);
75 if (ret) {
76 bio_endio(bio);
77 __blk_rq_unmap_user(orig_bio);
78 bio_put(bio);
79 return ret;
80 }
81
82 return 0;
83}
84
Jens Axboe86db1e22008-01-29 14:53:40 +010085/**
Randy Dunlap710027a2008-08-19 20:13:11 +020086 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
Jens Axboe86db1e22008-01-29 14:53:40 +010087 * @q: request queue where request should be inserted
88 * @rq: request to map data to
FUJITA Tomonori152e2832008-08-28 16:17:06 +090089 * @map_data: pointer to the rq_map_data holding pages (if necessary)
Kent Overstreet26e49cf2015-01-18 16:16:31 +010090 * @iter: iovec iterator
FUJITA Tomonoria3bce902008-08-28 16:17:05 +090091 * @gfp_mask: memory allocation flags
Jens Axboe86db1e22008-01-29 14:53:40 +010092 *
93 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +020094 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
Jens Axboe86db1e22008-01-29 14:53:40 +010095 * a kernel bounce buffer is used.
96 *
Randy Dunlap710027a2008-08-19 20:13:11 +020097 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
Jens Axboe86db1e22008-01-29 14:53:40 +010098 * still in process context.
99 *
100 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
101 * before being submitted to the device, as pages mapped may be out of
102 * reach. It's the callers responsibility to make sure this happens. The
103 * original bio must be passed back in to blk_rq_unmap_user() for proper
104 * unmapping.
105 */
106int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100107 struct rq_map_data *map_data,
108 const struct iov_iter *iter, gfp_t gfp_mask)
Jens Axboe86db1e22008-01-29 14:53:40 +0100109{
Al Viro357f4352016-04-08 19:05:19 -0400110 bool copy = false;
111 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
Christoph Hellwig4d6af732016-03-02 18:07:14 +0100112 struct bio *bio = NULL;
113 struct iov_iter i;
114 int ret;
Jens Axboe86db1e22008-01-29 14:53:40 +0100115
Al Viro357f4352016-04-08 19:05:19 -0400116 if (map_data)
117 copy = true;
118 else if (iov_iter_alignment(iter) & align)
119 copy = true;
120 else if (queue_virt_boundary(q))
121 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +0200122
Christoph Hellwig4d6af732016-03-02 18:07:14 +0100123 i = *iter;
124 do {
125 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
126 if (ret)
127 goto unmap_rq;
128 if (!bio)
129 bio = rq->bio;
130 } while (iov_iter_count(&i));
Jens Axboe86db1e22008-01-29 14:53:40 +0100131
FUJITA Tomonorif18573a2008-04-11 12:56:52 +0200132 if (!bio_flagged(bio, BIO_USER_MAPPED))
133 rq->cmd_flags |= REQ_COPY_USER;
Jens Axboe86db1e22008-01-29 14:53:40 +0100134 return 0;
Christoph Hellwig4d6af732016-03-02 18:07:14 +0100135
136unmap_rq:
137 __blk_rq_unmap_user(bio);
138 rq->bio = NULL;
139 return -EINVAL;
Jens Axboe86db1e22008-01-29 14:53:40 +0100140}
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900141EXPORT_SYMBOL(blk_rq_map_user_iov);
Jens Axboe86db1e22008-01-29 14:53:40 +0100142
Christoph Hellwigddad8dd2015-01-18 16:16:29 +0100143int blk_rq_map_user(struct request_queue *q, struct request *rq,
144 struct rq_map_data *map_data, void __user *ubuf,
145 unsigned long len, gfp_t gfp_mask)
146{
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100147 struct iovec iov;
148 struct iov_iter i;
Al Viro8f7e8852015-03-21 20:06:04 -0400149 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
Christoph Hellwigddad8dd2015-01-18 16:16:29 +0100150
Al Viro8f7e8852015-03-21 20:06:04 -0400151 if (unlikely(ret < 0))
152 return ret;
Christoph Hellwigddad8dd2015-01-18 16:16:29 +0100153
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100154 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
Christoph Hellwigddad8dd2015-01-18 16:16:29 +0100155}
156EXPORT_SYMBOL(blk_rq_map_user);
157
Jens Axboe86db1e22008-01-29 14:53:40 +0100158/**
159 * blk_rq_unmap_user - unmap a request with user data
160 * @bio: start of bio list
161 *
162 * Description:
163 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
164 * supply the original rq->bio from the blk_rq_map_user() return, since
Randy Dunlap710027a2008-08-19 20:13:11 +0200165 * the I/O completion may have changed rq->bio.
Jens Axboe86db1e22008-01-29 14:53:40 +0100166 */
167int blk_rq_unmap_user(struct bio *bio)
168{
169 struct bio *mapped_bio;
170 int ret = 0, ret2;
171
172 while (bio) {
173 mapped_bio = bio;
174 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
175 mapped_bio = bio->bi_private;
176
177 ret2 = __blk_rq_unmap_user(mapped_bio);
178 if (ret2 && !ret)
179 ret = ret2;
180
181 mapped_bio = bio;
182 bio = bio->bi_next;
183 bio_put(mapped_bio);
184 }
185
186 return ret;
187}
Jens Axboe86db1e22008-01-29 14:53:40 +0100188EXPORT_SYMBOL(blk_rq_unmap_user);
189
190/**
Randy Dunlap710027a2008-08-19 20:13:11 +0200191 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
Jens Axboe86db1e22008-01-29 14:53:40 +0100192 * @q: request queue where request should be inserted
193 * @rq: request to fill
194 * @kbuf: the kernel buffer
195 * @len: length of user data
196 * @gfp_mask: memory allocation flags
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200197 *
198 * Description:
199 * Data will be mapped directly if possible. Otherwise a bounce
Masanari Iidae2278672014-02-18 22:54:36 +0900200 * buffer is used. Can be called multiple times to append multiple
James Bottomley3a5a3922009-05-17 18:55:18 +0300201 * buffers.
Jens Axboe86db1e22008-01-29 14:53:40 +0100202 */
203int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
204 unsigned int len, gfp_t gfp_mask)
205{
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200206 int reading = rq_data_dir(rq) == READ;
Namhyung Kim14417792010-09-15 13:08:27 +0200207 unsigned long addr = (unsigned long) kbuf;
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200208 int do_copy = 0;
Jens Axboe86db1e22008-01-29 14:53:40 +0100209 struct bio *bio;
James Bottomley3a5a3922009-05-17 18:55:18 +0300210 int ret;
Jens Axboe86db1e22008-01-29 14:53:40 +0100211
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400212 if (len > (queue_max_hw_sectors(q) << 9))
Jens Axboe86db1e22008-01-29 14:53:40 +0100213 return -EINVAL;
214 if (!len || !kbuf)
215 return -EINVAL;
216
Namhyung Kim14417792010-09-15 13:08:27 +0200217 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200218 if (do_copy)
219 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
220 else
221 bio = bio_map_kern(q, kbuf, len, gfp_mask);
222
Jens Axboe86db1e22008-01-29 14:53:40 +0100223 if (IS_ERR(bio))
224 return PTR_ERR(bio);
225
majianpeng609f6ea2011-12-21 15:27:24 +0100226 if (!reading)
Benny Halevya45dc2d2010-09-13 21:32:19 +0200227 bio->bi_rw |= REQ_WRITE;
Jens Axboe86db1e22008-01-29 14:53:40 +0100228
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200229 if (do_copy)
230 rq->cmd_flags |= REQ_COPY_USER;
231
James Bottomley3a5a3922009-05-17 18:55:18 +0300232 ret = blk_rq_append_bio(q, rq, bio);
233 if (unlikely(ret)) {
234 /* request is too big */
235 bio_put(bio);
236 return ret;
237 }
238
Jens Axboe86db1e22008-01-29 14:53:40 +0100239 blk_queue_bounce(q, &rq->bio);
Jens Axboe86db1e22008-01-29 14:53:40 +0100240 return 0;
241}
Jens Axboe86db1e22008-01-29 14:53:40 +0100242EXPORT_SYMBOL(blk_rq_map_kern);