blob: dad6a2907835c8aca4830b3c1478cfb6a923b639 [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +02008#include <scsi/sg.h> /* for struct sg_iovec */
Jens Axboe86db1e22008-01-29 14:53:40 +01009
10#include "blk.h"
11
12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 struct bio *bio)
14{
15 if (!rq->bio)
16 blk_rq_bio_prep(q, rq, bio);
17 else if (!ll_back_merge_fn(q, rq, bio))
18 return -EINVAL;
19 else {
20 rq->biotail->bi_next = bio;
21 rq->biotail = bio;
22
23 rq->data_len += bio->bi_size;
24 }
25 return 0;
26}
27EXPORT_SYMBOL(blk_rq_append_bio);
28
29static int __blk_rq_unmap_user(struct bio *bio)
30{
31 int ret = 0;
32
33 if (bio) {
34 if (bio_flagged(bio, BIO_USER_MAPPED))
35 bio_unmap_user(bio);
36 else
37 ret = bio_uncopy_user(bio);
38 }
39
40 return ret;
41}
42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
FUJITA Tomonori152e2832008-08-28 16:17:06 +090044 struct rq_map_data *map_data, void __user *ubuf,
45 unsigned int len, gfp_t gfp_mask)
Jens Axboe86db1e22008-01-29 14:53:40 +010046{
47 unsigned long uaddr;
Tejun Heoe3790c72008-03-04 11:18:17 +010048 unsigned int alignment;
Jens Axboe86db1e22008-01-29 14:53:40 +010049 struct bio *bio, *orig_bio;
50 int reading, ret;
51
52 reading = rq_data_dir(rq) == READ;
53
54 /*
55 * if alignment requirement is satisfied, map in user pages for
56 * direct dma. else, set up kernel bounce buffers
57 */
58 uaddr = (unsigned long) ubuf;
Tejun Heoe3790c72008-03-04 11:18:17 +010059 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
FUJITA Tomonori152e2832008-08-28 16:17:06 +090060 if (!(uaddr & alignment) && !(len & alignment) && !map_data)
FUJITA Tomonoria3bce902008-08-28 16:17:05 +090061 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
Jens Axboe86db1e22008-01-29 14:53:40 +010062 else
FUJITA Tomonori152e2832008-08-28 16:17:06 +090063 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
Jens Axboe86db1e22008-01-29 14:53:40 +010064
65 if (IS_ERR(bio))
66 return PTR_ERR(bio);
67
68 orig_bio = bio;
69 blk_queue_bounce(q, &bio);
70
71 /*
72 * We link the bounce buffer in and could have to traverse it
73 * later so we have to get a ref to prevent it from being freed
74 */
75 bio_get(bio);
76
77 ret = blk_rq_append_bio(q, rq, bio);
78 if (!ret)
79 return bio->bi_size;
80
81 /* if it was boucned we must call the end io function */
82 bio_endio(bio, 0);
83 __blk_rq_unmap_user(orig_bio);
84 bio_put(bio);
85 return ret;
86}
87
88/**
Randy Dunlap710027a2008-08-19 20:13:11 +020089 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
Jens Axboe86db1e22008-01-29 14:53:40 +010090 * @q: request queue where request should be inserted
91 * @rq: request structure to fill
FUJITA Tomonori152e2832008-08-28 16:17:06 +090092 * @map_data: pointer to the rq_map_data holding pages (if necessary)
Jens Axboe86db1e22008-01-29 14:53:40 +010093 * @ubuf: the user buffer
94 * @len: length of user data
FUJITA Tomonoria3bce902008-08-28 16:17:05 +090095 * @gfp_mask: memory allocation flags
Jens Axboe86db1e22008-01-29 14:53:40 +010096 *
97 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +020098 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
Jens Axboe86db1e22008-01-29 14:53:40 +010099 * a kernel bounce buffer is used.
100 *
Randy Dunlap710027a2008-08-19 20:13:11 +0200101 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
Jens Axboe86db1e22008-01-29 14:53:40 +0100102 * still in process context.
103 *
104 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
105 * before being submitted to the device, as pages mapped may be out of
106 * reach. It's the callers responsibility to make sure this happens. The
107 * original bio must be passed back in to blk_rq_unmap_user() for proper
108 * unmapping.
109 */
110int blk_rq_map_user(struct request_queue *q, struct request *rq,
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900111 struct rq_map_data *map_data, void __user *ubuf,
112 unsigned long len, gfp_t gfp_mask)
Jens Axboe86db1e22008-01-29 14:53:40 +0100113{
114 unsigned long bytes_read = 0;
115 struct bio *bio = NULL;
116 int ret;
117
118 if (len > (q->max_hw_sectors << 9))
119 return -EINVAL;
120 if (!len || !ubuf)
121 return -EINVAL;
122
123 while (bytes_read != len) {
124 unsigned long map_len, end, start;
125
126 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
127 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
128 >> PAGE_SHIFT;
129 start = (unsigned long)ubuf >> PAGE_SHIFT;
130
131 /*
132 * A bad offset could cause us to require BIO_MAX_PAGES + 1
133 * pages. If this happens we just lower the requested
134 * mapping len by a page so that we can fit
135 */
136 if (end - start > BIO_MAX_PAGES)
137 map_len -= PAGE_SIZE;
138
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900139 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
140 gfp_mask);
Jens Axboe86db1e22008-01-29 14:53:40 +0100141 if (ret < 0)
142 goto unmap_rq;
143 if (!bio)
144 bio = rq->bio;
145 bytes_read += ret;
146 ubuf += ret;
147 }
148
FUJITA Tomonorif18573a2008-04-11 12:56:52 +0200149 if (!bio_flagged(bio, BIO_USER_MAPPED))
150 rq->cmd_flags |= REQ_COPY_USER;
Tejun Heo40b01b92008-02-19 11:35:38 +0100151
Jens Axboe86db1e22008-01-29 14:53:40 +0100152 rq->buffer = rq->data = NULL;
153 return 0;
154unmap_rq:
155 blk_rq_unmap_user(bio);
Jens Axboe84e9e032008-02-18 13:51:56 +0100156 rq->bio = NULL;
Jens Axboe86db1e22008-01-29 14:53:40 +0100157 return ret;
158}
Jens Axboe86db1e22008-01-29 14:53:40 +0100159EXPORT_SYMBOL(blk_rq_map_user);
160
161/**
Randy Dunlap710027a2008-08-19 20:13:11 +0200162 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
Jens Axboe86db1e22008-01-29 14:53:40 +0100163 * @q: request queue where request should be inserted
164 * @rq: request to map data to
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900165 * @map_data: pointer to the rq_map_data holding pages (if necessary)
Jens Axboe86db1e22008-01-29 14:53:40 +0100166 * @iov: pointer to the iovec
167 * @iov_count: number of elements in the iovec
168 * @len: I/O byte count
FUJITA Tomonoria3bce902008-08-28 16:17:05 +0900169 * @gfp_mask: memory allocation flags
Jens Axboe86db1e22008-01-29 14:53:40 +0100170 *
171 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +0200172 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
Jens Axboe86db1e22008-01-29 14:53:40 +0100173 * a kernel bounce buffer is used.
174 *
Randy Dunlap710027a2008-08-19 20:13:11 +0200175 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
Jens Axboe86db1e22008-01-29 14:53:40 +0100176 * still in process context.
177 *
178 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
179 * before being submitted to the device, as pages mapped may be out of
180 * reach. It's the callers responsibility to make sure this happens. The
181 * original bio must be passed back in to blk_rq_unmap_user() for proper
182 * unmapping.
183 */
184int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900185 struct rq_map_data *map_data, struct sg_iovec *iov,
186 int iov_count, unsigned int len, gfp_t gfp_mask)
Jens Axboe86db1e22008-01-29 14:53:40 +0100187{
188 struct bio *bio;
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +0200189 int i, read = rq_data_dir(rq) == READ;
190 int unaligned = 0;
Jens Axboe86db1e22008-01-29 14:53:40 +0100191
192 if (!iov || iov_count <= 0)
193 return -EINVAL;
194
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +0200195 for (i = 0; i < iov_count; i++) {
196 unsigned long uaddr = (unsigned long)iov[i].iov_base;
197
198 if (uaddr & queue_dma_alignment(q)) {
199 unaligned = 1;
200 break;
201 }
202 }
203
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900204 if (unaligned || (q->dma_pad_mask & len) || map_data)
205 bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
206 gfp_mask);
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +0200207 else
FUJITA Tomonoria3bce902008-08-28 16:17:05 +0900208 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
FUJITA Tomonoriafdc1a72008-04-11 12:56:51 +0200209
Jens Axboe86db1e22008-01-29 14:53:40 +0100210 if (IS_ERR(bio))
211 return PTR_ERR(bio);
212
213 if (bio->bi_size != len) {
214 bio_endio(bio, 0);
215 bio_unmap_user(bio);
216 return -EINVAL;
217 }
218
FUJITA Tomonorif18573a2008-04-11 12:56:52 +0200219 if (!bio_flagged(bio, BIO_USER_MAPPED))
220 rq->cmd_flags |= REQ_COPY_USER;
221
FUJITA Tomonori07359fc2008-06-26 19:39:23 +0200222 blk_queue_bounce(q, &bio);
Jens Axboe86db1e22008-01-29 14:53:40 +0100223 bio_get(bio);
224 blk_rq_bio_prep(q, rq, bio);
225 rq->buffer = rq->data = NULL;
226 return 0;
227}
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900228EXPORT_SYMBOL(blk_rq_map_user_iov);
Jens Axboe86db1e22008-01-29 14:53:40 +0100229
230/**
231 * blk_rq_unmap_user - unmap a request with user data
232 * @bio: start of bio list
233 *
234 * Description:
235 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
236 * supply the original rq->bio from the blk_rq_map_user() return, since
Randy Dunlap710027a2008-08-19 20:13:11 +0200237 * the I/O completion may have changed rq->bio.
Jens Axboe86db1e22008-01-29 14:53:40 +0100238 */
239int blk_rq_unmap_user(struct bio *bio)
240{
241 struct bio *mapped_bio;
242 int ret = 0, ret2;
243
244 while (bio) {
245 mapped_bio = bio;
246 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
247 mapped_bio = bio->bi_private;
248
249 ret2 = __blk_rq_unmap_user(mapped_bio);
250 if (ret2 && !ret)
251 ret = ret2;
252
253 mapped_bio = bio;
254 bio = bio->bi_next;
255 bio_put(mapped_bio);
256 }
257
258 return ret;
259}
Jens Axboe86db1e22008-01-29 14:53:40 +0100260EXPORT_SYMBOL(blk_rq_unmap_user);
261
262/**
Randy Dunlap710027a2008-08-19 20:13:11 +0200263 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
Jens Axboe86db1e22008-01-29 14:53:40 +0100264 * @q: request queue where request should be inserted
265 * @rq: request to fill
266 * @kbuf: the kernel buffer
267 * @len: length of user data
268 * @gfp_mask: memory allocation flags
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200269 *
270 * Description:
271 * Data will be mapped directly if possible. Otherwise a bounce
272 * buffer is used.
Jens Axboe86db1e22008-01-29 14:53:40 +0100273 */
274int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
275 unsigned int len, gfp_t gfp_mask)
276{
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200277 unsigned long kaddr;
278 unsigned int alignment;
279 int reading = rq_data_dir(rq) == READ;
280 int do_copy = 0;
Jens Axboe86db1e22008-01-29 14:53:40 +0100281 struct bio *bio;
282
283 if (len > (q->max_hw_sectors << 9))
284 return -EINVAL;
285 if (!len || !kbuf)
286 return -EINVAL;
287
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200288 kaddr = (unsigned long)kbuf;
289 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
FUJITA Tomonoria76eef92008-07-25 19:44:39 -0700290 do_copy = ((kaddr & alignment) || (len & alignment) ||
291 object_is_on_stack(kbuf));
FUJITA Tomonori30c00ed2008-07-04 09:31:11 +0200292
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200293 if (do_copy)
294 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
295 else
296 bio = bio_map_kern(q, kbuf, len, gfp_mask);
297
Jens Axboe86db1e22008-01-29 14:53:40 +0100298 if (IS_ERR(bio))
299 return PTR_ERR(bio);
300
301 if (rq_data_dir(rq) == WRITE)
302 bio->bi_rw |= (1 << BIO_RW);
303
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200304 if (do_copy)
305 rq->cmd_flags |= REQ_COPY_USER;
306
Jens Axboe86db1e22008-01-29 14:53:40 +0100307 blk_rq_bio_prep(q, rq, bio);
308 blk_queue_bounce(q, &rq->bio);
309 rq->buffer = rq->data = NULL;
310 return 0;
311}
Jens Axboe86db1e22008-01-29 14:53:40 +0100312EXPORT_SYMBOL(blk_rq_map_kern);