blob: a7cf63ccb5cfd19191e822c768a395567a77892a [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8
9#include "blk.h"
10
11int blk_rq_append_bio(struct request_queue *q, struct request *rq,
12 struct bio *bio)
13{
14 if (!rq->bio)
15 blk_rq_bio_prep(q, rq, bio);
16 else if (!ll_back_merge_fn(q, rq, bio))
17 return -EINVAL;
18 else {
19 rq->biotail->bi_next = bio;
20 rq->biotail = bio;
21
22 rq->data_len += bio->bi_size;
23 }
24 return 0;
25}
26EXPORT_SYMBOL(blk_rq_append_bio);
27
28static int __blk_rq_unmap_user(struct bio *bio)
29{
30 int ret = 0;
31
32 if (bio) {
33 if (bio_flagged(bio, BIO_USER_MAPPED))
34 bio_unmap_user(bio);
35 else
36 ret = bio_uncopy_user(bio);
37 }
38
39 return ret;
40}
41
42static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
43 void __user *ubuf, unsigned int len)
44{
45 unsigned long uaddr;
46 struct bio *bio, *orig_bio;
47 int reading, ret;
48
49 reading = rq_data_dir(rq) == READ;
50
51 /*
52 * if alignment requirement is satisfied, map in user pages for
53 * direct dma. else, set up kernel bounce buffers
54 */
55 uaddr = (unsigned long) ubuf;
Jens Axboe6728cb02008-01-31 13:03:55 +010056 if (!(uaddr & queue_dma_alignment(q)) &&
57 !(len & queue_dma_alignment(q)))
Jens Axboe86db1e22008-01-29 14:53:40 +010058 bio = bio_map_user(q, NULL, uaddr, len, reading);
59 else
60 bio = bio_copy_user(q, uaddr, len, reading);
61
62 if (IS_ERR(bio))
63 return PTR_ERR(bio);
64
65 orig_bio = bio;
66 blk_queue_bounce(q, &bio);
67
68 /*
69 * We link the bounce buffer in and could have to traverse it
70 * later so we have to get a ref to prevent it from being freed
71 */
72 bio_get(bio);
73
74 ret = blk_rq_append_bio(q, rq, bio);
75 if (!ret)
76 return bio->bi_size;
77
78 /* if it was boucned we must call the end io function */
79 bio_endio(bio, 0);
80 __blk_rq_unmap_user(orig_bio);
81 bio_put(bio);
82 return ret;
83}
84
85/**
86 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
87 * @q: request queue where request should be inserted
88 * @rq: request structure to fill
89 * @ubuf: the user buffer
90 * @len: length of user data
91 *
92 * Description:
93 * Data will be mapped directly for zero copy io, if possible. Otherwise
94 * a kernel bounce buffer is used.
95 *
96 * A matching blk_rq_unmap_user() must be issued at the end of io, while
97 * still in process context.
98 *
99 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
100 * before being submitted to the device, as pages mapped may be out of
101 * reach. It's the callers responsibility to make sure this happens. The
102 * original bio must be passed back in to blk_rq_unmap_user() for proper
103 * unmapping.
104 */
105int blk_rq_map_user(struct request_queue *q, struct request *rq,
106 void __user *ubuf, unsigned long len)
107{
108 unsigned long bytes_read = 0;
109 struct bio *bio = NULL;
110 int ret;
111
112 if (len > (q->max_hw_sectors << 9))
113 return -EINVAL;
114 if (!len || !ubuf)
115 return -EINVAL;
116
117 while (bytes_read != len) {
118 unsigned long map_len, end, start;
119
120 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
121 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
122 >> PAGE_SHIFT;
123 start = (unsigned long)ubuf >> PAGE_SHIFT;
124
125 /*
126 * A bad offset could cause us to require BIO_MAX_PAGES + 1
127 * pages. If this happens we just lower the requested
128 * mapping len by a page so that we can fit
129 */
130 if (end - start > BIO_MAX_PAGES)
131 map_len -= PAGE_SIZE;
132
133 ret = __blk_rq_map_user(q, rq, ubuf, map_len);
134 if (ret < 0)
135 goto unmap_rq;
136 if (!bio)
137 bio = rq->bio;
138 bytes_read += ret;
139 ubuf += ret;
140 }
141
Tejun Heo40b01b92008-02-19 11:35:38 +0100142 /*
143 * __blk_rq_map_user() copies the buffers if starting address
144 * or length isn't aligned. As the copied buffer is always
145 * page aligned, we know that there's enough room for padding.
146 * Extend the last bio and update rq->data_len accordingly.
147 *
148 * On unmap, bio_uncopy_user() will use unmodified
149 * bio_map_data pointed to by bio->bi_private.
150 */
151 if (len & queue_dma_alignment(q)) {
152 unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1;
153 struct bio *bio = rq->biotail;
154
155 bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
156 bio->bi_size += pad_len;
157 }
158
Jens Axboe86db1e22008-01-29 14:53:40 +0100159 rq->buffer = rq->data = NULL;
160 return 0;
161unmap_rq:
162 blk_rq_unmap_user(bio);
Jens Axboe84e9e032008-02-18 13:51:56 +0100163 rq->bio = NULL;
Jens Axboe86db1e22008-01-29 14:53:40 +0100164 return ret;
165}
Jens Axboe86db1e22008-01-29 14:53:40 +0100166EXPORT_SYMBOL(blk_rq_map_user);
167
168/**
169 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
170 * @q: request queue where request should be inserted
171 * @rq: request to map data to
172 * @iov: pointer to the iovec
173 * @iov_count: number of elements in the iovec
174 * @len: I/O byte count
175 *
176 * Description:
177 * Data will be mapped directly for zero copy io, if possible. Otherwise
178 * a kernel bounce buffer is used.
179 *
180 * A matching blk_rq_unmap_user() must be issued at the end of io, while
181 * still in process context.
182 *
183 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
184 * before being submitted to the device, as pages mapped may be out of
185 * reach. It's the callers responsibility to make sure this happens. The
186 * original bio must be passed back in to blk_rq_unmap_user() for proper
187 * unmapping.
188 */
189int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
190 struct sg_iovec *iov, int iov_count, unsigned int len)
191{
192 struct bio *bio;
193
194 if (!iov || iov_count <= 0)
195 return -EINVAL;
196
197 /* we don't allow misaligned data like bio_map_user() does. If the
198 * user is using sg, they're expected to know the alignment constraints
199 * and respect them accordingly */
Jens Axboe6728cb02008-01-31 13:03:55 +0100200 bio = bio_map_user_iov(q, NULL, iov, iov_count,
201 rq_data_dir(rq) == READ);
Jens Axboe86db1e22008-01-29 14:53:40 +0100202 if (IS_ERR(bio))
203 return PTR_ERR(bio);
204
205 if (bio->bi_size != len) {
206 bio_endio(bio, 0);
207 bio_unmap_user(bio);
208 return -EINVAL;
209 }
210
211 bio_get(bio);
212 blk_rq_bio_prep(q, rq, bio);
213 rq->buffer = rq->data = NULL;
214 return 0;
215}
Jens Axboe86db1e22008-01-29 14:53:40 +0100216EXPORT_SYMBOL(blk_rq_map_user_iov);
217
218/**
219 * blk_rq_unmap_user - unmap a request with user data
220 * @bio: start of bio list
221 *
222 * Description:
223 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
224 * supply the original rq->bio from the blk_rq_map_user() return, since
225 * the io completion may have changed rq->bio.
226 */
227int blk_rq_unmap_user(struct bio *bio)
228{
229 struct bio *mapped_bio;
230 int ret = 0, ret2;
231
232 while (bio) {
233 mapped_bio = bio;
234 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
235 mapped_bio = bio->bi_private;
236
237 ret2 = __blk_rq_unmap_user(mapped_bio);
238 if (ret2 && !ret)
239 ret = ret2;
240
241 mapped_bio = bio;
242 bio = bio->bi_next;
243 bio_put(mapped_bio);
244 }
245
246 return ret;
247}
Jens Axboe86db1e22008-01-29 14:53:40 +0100248EXPORT_SYMBOL(blk_rq_unmap_user);
249
250/**
251 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
252 * @q: request queue where request should be inserted
253 * @rq: request to fill
254 * @kbuf: the kernel buffer
255 * @len: length of user data
256 * @gfp_mask: memory allocation flags
257 */
258int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
259 unsigned int len, gfp_t gfp_mask)
260{
261 struct bio *bio;
262
263 if (len > (q->max_hw_sectors << 9))
264 return -EINVAL;
265 if (!len || !kbuf)
266 return -EINVAL;
267
268 bio = bio_map_kern(q, kbuf, len, gfp_mask);
269 if (IS_ERR(bio))
270 return PTR_ERR(bio);
271
272 if (rq_data_dir(rq) == WRITE)
273 bio->bi_rw |= (1 << BIO_RW);
274
275 blk_rq_bio_prep(q, rq, bio);
276 blk_queue_bounce(q, &rq->bio);
277 rq->buffer = rq->data = NULL;
278 return 0;
279}
Jens Axboe86db1e22008-01-29 14:53:40 +0100280EXPORT_SYMBOL(blk_rq_map_kern);