blob: f890d4345b0cb63f9faa88e70d466a3cec3e6b3f [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +02008#include <scsi/sg.h> /* for struct sg_iovec */
Jens Axboe86db1e22008-01-29 14:53:40 +01009
10#include "blk.h"
11
12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 struct bio *bio)
14{
15 if (!rq->bio)
16 blk_rq_bio_prep(q, rq, bio);
17 else if (!ll_back_merge_fn(q, rq, bio))
18 return -EINVAL;
19 else {
20 rq->biotail->bi_next = bio;
21 rq->biotail = bio;
22
Kent Overstreet4f024f32013-10-11 15:44:27 -070023 rq->__data_len += bio->bi_iter.bi_size;
Jens Axboe86db1e22008-01-29 14:53:40 +010024 }
25 return 0;
26}
Jens Axboe86db1e22008-01-29 14:53:40 +010027
28static int __blk_rq_unmap_user(struct bio *bio)
29{
30 int ret = 0;
31
32 if (bio) {
33 if (bio_flagged(bio, BIO_USER_MAPPED))
34 bio_unmap_user(bio);
35 else
36 ret = bio_uncopy_user(bio);
37 }
38
39 return ret;
40}
41
42static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
FUJITA Tomonori152e2832008-08-28 16:17:06 +090043 struct rq_map_data *map_data, void __user *ubuf,
FUJITA Tomonori97ae77a2008-12-18 14:49:38 +090044 unsigned int len, gfp_t gfp_mask)
Jens Axboe86db1e22008-01-29 14:53:40 +010045{
46 unsigned long uaddr;
47 struct bio *bio, *orig_bio;
48 int reading, ret;
49
50 reading = rq_data_dir(rq) == READ;
51
52 /*
53 * if alignment requirement is satisfied, map in user pages for
54 * direct dma. else, set up kernel bounce buffers
55 */
56 uaddr = (unsigned long) ubuf;
Namhyung Kim14417792010-09-15 13:08:27 +020057 if (blk_rq_aligned(q, uaddr, len) && !map_data)
FUJITA Tomonoria3bce902008-08-28 16:17:05 +090058 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
Jens Axboe86db1e22008-01-29 14:53:40 +010059 else
FUJITA Tomonori152e2832008-08-28 16:17:06 +090060 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
Jens Axboe86db1e22008-01-29 14:53:40 +010061
62 if (IS_ERR(bio))
63 return PTR_ERR(bio);
64
FUJITA Tomonori97ae77a2008-12-18 14:49:38 +090065 if (map_data && map_data->null_mapped)
FUJITA Tomonori81882762008-09-02 16:20:19 +090066 bio->bi_flags |= (1 << BIO_NULL_MAPPED);
67
Jens Axboe86db1e22008-01-29 14:53:40 +010068 orig_bio = bio;
69 blk_queue_bounce(q, &bio);
70
71 /*
72 * We link the bounce buffer in and could have to traverse it
73 * later so we have to get a ref to prevent it from being freed
74 */
75 bio_get(bio);
76
77 ret = blk_rq_append_bio(q, rq, bio);
78 if (!ret)
Kent Overstreet4f024f32013-10-11 15:44:27 -070079 return bio->bi_iter.bi_size;
Jens Axboe86db1e22008-01-29 14:53:40 +010080
81 /* if it was boucned we must call the end io function */
82 bio_endio(bio, 0);
83 __blk_rq_unmap_user(orig_bio);
84 bio_put(bio);
85 return ret;
86}
87
88/**
Randy Dunlap710027a2008-08-19 20:13:11 +020089 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
Jens Axboe86db1e22008-01-29 14:53:40 +010090 * @q: request queue where request should be inserted
91 * @rq: request structure to fill
FUJITA Tomonori152e2832008-08-28 16:17:06 +090092 * @map_data: pointer to the rq_map_data holding pages (if necessary)
Jens Axboe86db1e22008-01-29 14:53:40 +010093 * @ubuf: the user buffer
94 * @len: length of user data
FUJITA Tomonoria3bce902008-08-28 16:17:05 +090095 * @gfp_mask: memory allocation flags
Jens Axboe86db1e22008-01-29 14:53:40 +010096 *
97 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +020098 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
Jens Axboe86db1e22008-01-29 14:53:40 +010099 * a kernel bounce buffer is used.
100 *
Randy Dunlap710027a2008-08-19 20:13:11 +0200101 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
Jens Axboe86db1e22008-01-29 14:53:40 +0100102 * still in process context.
103 *
104 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
105 * before being submitted to the device, as pages mapped may be out of
106 * reach. It's the callers responsibility to make sure this happens. The
107 * original bio must be passed back in to blk_rq_unmap_user() for proper
108 * unmapping.
109 */
110int blk_rq_map_user(struct request_queue *q, struct request *rq,
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900111 struct rq_map_data *map_data, void __user *ubuf,
112 unsigned long len, gfp_t gfp_mask)
Jens Axboe86db1e22008-01-29 14:53:40 +0100113{
114 unsigned long bytes_read = 0;
115 struct bio *bio = NULL;
FUJITA Tomonori97ae77a2008-12-18 14:49:38 +0900116 int ret;
Jens Axboe86db1e22008-01-29 14:53:40 +0100117
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400118 if (len > (queue_max_hw_sectors(q) << 9))
Jens Axboe86db1e22008-01-29 14:53:40 +0100119 return -EINVAL;
FUJITA Tomonori81882762008-09-02 16:20:19 +0900120 if (!len)
Jens Axboe86db1e22008-01-29 14:53:40 +0100121 return -EINVAL;
FUJITA Tomonori97ae77a2008-12-18 14:49:38 +0900122
123 if (!ubuf && (!map_data || !map_data->null_mapped))
124 return -EINVAL;
Jens Axboe86db1e22008-01-29 14:53:40 +0100125
126 while (bytes_read != len) {
127 unsigned long map_len, end, start;
128
129 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
130 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
131 >> PAGE_SHIFT;
132 start = (unsigned long)ubuf >> PAGE_SHIFT;
133
134 /*
135 * A bad offset could cause us to require BIO_MAX_PAGES + 1
136 * pages. If this happens we just lower the requested
137 * mapping len by a page so that we can fit
138 */
139 if (end - start > BIO_MAX_PAGES)
140 map_len -= PAGE_SIZE;
141
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900142 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
FUJITA Tomonori97ae77a2008-12-18 14:49:38 +0900143 gfp_mask);
Jens Axboe86db1e22008-01-29 14:53:40 +0100144 if (ret < 0)
145 goto unmap_rq;
146 if (!bio)
147 bio = rq->bio;
148 bytes_read += ret;
149 ubuf += ret;
FUJITA Tomonori56c451f2008-12-18 14:49:37 +0900150
151 if (map_data)
152 map_data->offset += ret;
Jens Axboe86db1e22008-01-29 14:53:40 +0100153 }
154
FUJITA Tomonorif18573a2008-04-11 12:56:52 +0200155 if (!bio_flagged(bio, BIO_USER_MAPPED))
156 rq->cmd_flags |= REQ_COPY_USER;
Tejun Heo40b01b92008-02-19 11:35:38 +0100157
Jens Axboe86db1e22008-01-29 14:53:40 +0100158 return 0;
159unmap_rq:
160 blk_rq_unmap_user(bio);
Jens Axboe84e9e032008-02-18 13:51:56 +0100161 rq->bio = NULL;
Jens Axboe86db1e22008-01-29 14:53:40 +0100162 return ret;
163}
Jens Axboe86db1e22008-01-29 14:53:40 +0100164EXPORT_SYMBOL(blk_rq_map_user);
165
166/**
Randy Dunlap710027a2008-08-19 20:13:11 +0200167 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
Jens Axboe86db1e22008-01-29 14:53:40 +0100168 * @q: request queue where request should be inserted
169 * @rq: request to map data to
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900170 * @map_data: pointer to the rq_map_data holding pages (if necessary)
Jens Axboe86db1e22008-01-29 14:53:40 +0100171 * @iov: pointer to the iovec
172 * @iov_count: number of elements in the iovec
173 * @len: I/O byte count
FUJITA Tomonoria3bce902008-08-28 16:17:05 +0900174 * @gfp_mask: memory allocation flags
Jens Axboe86db1e22008-01-29 14:53:40 +0100175 *
176 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +0200177 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
Jens Axboe86db1e22008-01-29 14:53:40 +0100178 * a kernel bounce buffer is used.
179 *
Randy Dunlap710027a2008-08-19 20:13:11 +0200180 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
Jens Axboe86db1e22008-01-29 14:53:40 +0100181 * still in process context.
182 *
183 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
184 * before being submitted to the device, as pages mapped may be out of
185 * reach. It's the callers responsibility to make sure this happens. The
186 * original bio must be passed back in to blk_rq_unmap_user() for proper
187 * unmapping.
188 */
189int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
Al Viro86d564c2014-02-08 20:42:52 -0500190 struct rq_map_data *map_data, const struct sg_iovec *iov,
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900191 int iov_count, unsigned int len, gfp_t gfp_mask)
Jens Axboe86db1e22008-01-29 14:53:40 +0100192{
193 struct bio *bio;
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +0200194 int i, read = rq_data_dir(rq) == READ;
195 int unaligned = 0;
Jens Axboe86db1e22008-01-29 14:53:40 +0100196
197 if (!iov || iov_count <= 0)
198 return -EINVAL;
199
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +0200200 for (i = 0; i < iov_count; i++) {
201 unsigned long uaddr = (unsigned long)iov[i].iov_base;
202
Xiaotian Feng54787552010-11-29 10:03:55 +0100203 if (!iov[i].iov_len)
204 return -EINVAL;
205
Ben Hutchings6b761062011-11-13 19:58:09 +0100206 /*
207 * Keep going so we check length of all segments
208 */
209 if (uaddr & queue_dma_alignment(q))
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +0200210 unaligned = 1;
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +0200211 }
212
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900213 if (unaligned || (q->dma_pad_mask & len) || map_data)
214 bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
215 gfp_mask);
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +0200216 else
FUJITA Tomonoria3bce902008-08-28 16:17:05 +0900217 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +0200218
Jens Axboe86db1e22008-01-29 14:53:40 +0100219 if (IS_ERR(bio))
220 return PTR_ERR(bio);
221
Kent Overstreet4f024f32013-10-11 15:44:27 -0700222 if (bio->bi_iter.bi_size != len) {
Jens Axboec26156b2008-11-18 15:07:05 +0100223 /*
224 * Grab an extra reference to this bio, as bio_unmap_user()
225 * expects to be able to drop it twice as it happens on the
226 * normal IO completion path
227 */
228 bio_get(bio);
Jens Axboe86db1e22008-01-29 14:53:40 +0100229 bio_endio(bio, 0);
Petr Vandrovec53cc0b22008-11-19 11:12:14 +0100230 __blk_rq_unmap_user(bio);
Jens Axboe86db1e22008-01-29 14:53:40 +0100231 return -EINVAL;
232 }
233
FUJITA Tomonorif18573a2008-04-11 12:56:52 +0200234 if (!bio_flagged(bio, BIO_USER_MAPPED))
235 rq->cmd_flags |= REQ_COPY_USER;
236
FUJITA Tomonori07359fc2008-06-26 19:39:23 +0200237 blk_queue_bounce(q, &bio);
Jens Axboe86db1e22008-01-29 14:53:40 +0100238 bio_get(bio);
239 blk_rq_bio_prep(q, rq, bio);
Jens Axboe86db1e22008-01-29 14:53:40 +0100240 return 0;
241}
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900242EXPORT_SYMBOL(blk_rq_map_user_iov);
Jens Axboe86db1e22008-01-29 14:53:40 +0100243
244/**
245 * blk_rq_unmap_user - unmap a request with user data
246 * @bio: start of bio list
247 *
248 * Description:
249 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
250 * supply the original rq->bio from the blk_rq_map_user() return, since
Randy Dunlap710027a2008-08-19 20:13:11 +0200251 * the I/O completion may have changed rq->bio.
Jens Axboe86db1e22008-01-29 14:53:40 +0100252 */
253int blk_rq_unmap_user(struct bio *bio)
254{
255 struct bio *mapped_bio;
256 int ret = 0, ret2;
257
258 while (bio) {
259 mapped_bio = bio;
260 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
261 mapped_bio = bio->bi_private;
262
263 ret2 = __blk_rq_unmap_user(mapped_bio);
264 if (ret2 && !ret)
265 ret = ret2;
266
267 mapped_bio = bio;
268 bio = bio->bi_next;
269 bio_put(mapped_bio);
270 }
271
272 return ret;
273}
Jens Axboe86db1e22008-01-29 14:53:40 +0100274EXPORT_SYMBOL(blk_rq_unmap_user);
275
276/**
Randy Dunlap710027a2008-08-19 20:13:11 +0200277 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
Jens Axboe86db1e22008-01-29 14:53:40 +0100278 * @q: request queue where request should be inserted
279 * @rq: request to fill
280 * @kbuf: the kernel buffer
281 * @len: length of user data
282 * @gfp_mask: memory allocation flags
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200283 *
284 * Description:
285 * Data will be mapped directly if possible. Otherwise a bounce
Masanari Iidae2278672014-02-18 22:54:36 +0900286 * buffer is used. Can be called multiple times to append multiple
James Bottomley3a5a3922009-05-17 18:55:18 +0300287 * buffers.
Jens Axboe86db1e22008-01-29 14:53:40 +0100288 */
289int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
290 unsigned int len, gfp_t gfp_mask)
291{
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200292 int reading = rq_data_dir(rq) == READ;
Namhyung Kim14417792010-09-15 13:08:27 +0200293 unsigned long addr = (unsigned long) kbuf;
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200294 int do_copy = 0;
Jens Axboe86db1e22008-01-29 14:53:40 +0100295 struct bio *bio;
James Bottomley3a5a3922009-05-17 18:55:18 +0300296 int ret;
Jens Axboe86db1e22008-01-29 14:53:40 +0100297
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400298 if (len > (queue_max_hw_sectors(q) << 9))
Jens Axboe86db1e22008-01-29 14:53:40 +0100299 return -EINVAL;
300 if (!len || !kbuf)
301 return -EINVAL;
302
Namhyung Kim14417792010-09-15 13:08:27 +0200303 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200304 if (do_copy)
305 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
306 else
307 bio = bio_map_kern(q, kbuf, len, gfp_mask);
308
Jens Axboe86db1e22008-01-29 14:53:40 +0100309 if (IS_ERR(bio))
310 return PTR_ERR(bio);
311
majianpeng609f6ea2011-12-21 15:27:24 +0100312 if (!reading)
Benny Halevya45dc2d2010-09-13 21:32:19 +0200313 bio->bi_rw |= REQ_WRITE;
Jens Axboe86db1e22008-01-29 14:53:40 +0100314
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200315 if (do_copy)
316 rq->cmd_flags |= REQ_COPY_USER;
317
James Bottomley3a5a3922009-05-17 18:55:18 +0300318 ret = blk_rq_append_bio(q, rq, bio);
319 if (unlikely(ret)) {
320 /* request is too big */
321 bio_put(bio);
322 return ret;
323 }
324
Jens Axboe86db1e22008-01-29 14:53:40 +0100325 blk_queue_bounce(q, &rq->bio);
Jens Axboe86db1e22008-01-29 14:53:40 +0100326 return 0;
327}
Jens Axboe86db1e22008-01-29 14:53:40 +0100328EXPORT_SYMBOL(blk_rq_map_kern);