blob: e91bf0accc47ad9e017c690755ecad8fe2bcd6f2 [file] [log] [blame]
Al Viro4f18cd32014-02-05 19:11:33 -05001#include <linux/export.h>
2#include <linux/uio.h>
3#include <linux/pagemap.h>
Al Viro91f79c42014-03-21 04:58:33 -04004#include <linux/slab.h>
5#include <linux/vmalloc.h>
Al Viro4f18cd32014-02-05 19:11:33 -05006
Al Viro04a31162014-11-27 13:51:41 -05007#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
8 size_t left; \
9 size_t wanted = n; \
10 __p = i->iov; \
11 __v.iov_len = min(n, __p->iov_len - skip); \
12 if (likely(__v.iov_len)) { \
13 __v.iov_base = __p->iov_base + skip; \
14 left = (STEP); \
15 __v.iov_len -= left; \
16 skip += __v.iov_len; \
17 n -= __v.iov_len; \
18 } else { \
19 left = 0; \
20 } \
21 while (unlikely(!left && n)) { \
22 __p++; \
23 __v.iov_len = min(n, __p->iov_len); \
24 if (unlikely(!__v.iov_len)) \
25 continue; \
26 __v.iov_base = __p->iov_base; \
27 left = (STEP); \
28 __v.iov_len -= left; \
29 skip = __v.iov_len; \
30 n -= __v.iov_len; \
31 } \
32 n = wanted - n; \
33}
34
35#define iterate_bvec(i, n, __v, __p, skip, STEP) { \
36 size_t wanted = n; \
37 __p = i->bvec; \
38 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
39 if (likely(__v.bv_len)) { \
40 __v.bv_page = __p->bv_page; \
41 __v.bv_offset = __p->bv_offset + skip; \
42 (void)(STEP); \
43 skip += __v.bv_len; \
44 n -= __v.bv_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.bv_len = min_t(size_t, n, __p->bv_len); \
49 if (unlikely(!__v.bv_len)) \
50 continue; \
51 __v.bv_page = __p->bv_page; \
52 __v.bv_offset = __p->bv_offset; \
53 (void)(STEP); \
54 skip = __v.bv_len; \
55 n -= __v.bv_len; \
56 } \
57 n = wanted; \
58}
59
60#define iterate_all_kinds(i, n, v, I, B) { \
61 size_t skip = i->iov_offset; \
62 if (unlikely(i->type & ITER_BVEC)) { \
63 const struct bio_vec *bvec; \
64 struct bio_vec v; \
65 iterate_bvec(i, n, v, bvec, skip, (B)) \
66 } else { \
67 const struct iovec *iov; \
68 struct iovec v; \
69 iterate_iovec(i, n, v, iov, skip, (I)) \
70 } \
71}
72
Al Viro7ce2a912014-11-27 13:59:45 -050073#define iterate_and_advance(i, n, v, I, B) { \
74 size_t skip = i->iov_offset; \
75 if (unlikely(i->type & ITER_BVEC)) { \
76 const struct bio_vec *bvec; \
77 struct bio_vec v; \
78 iterate_bvec(i, n, v, bvec, skip, (B)) \
79 if (skip == bvec->bv_len) { \
80 bvec++; \
81 skip = 0; \
82 } \
83 i->nr_segs -= bvec - i->bvec; \
84 i->bvec = bvec; \
85 } else { \
86 const struct iovec *iov; \
87 struct iovec v; \
88 iterate_iovec(i, n, v, iov, skip, (I)) \
89 if (skip == iov->iov_len) { \
90 iov++; \
91 skip = 0; \
92 } \
93 i->nr_segs -= iov - i->iov; \
94 i->iov = iov; \
95 } \
96 i->count -= n; \
97 i->iov_offset = skip; \
98}
99
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400100static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i)
101{
102 size_t skip, copy, left, wanted;
103 const struct iovec *iov;
104 char __user *buf;
105
106 if (unlikely(bytes > i->count))
107 bytes = i->count;
108
109 if (unlikely(!bytes))
110 return 0;
111
112 wanted = bytes;
113 iov = i->iov;
114 skip = i->iov_offset;
115 buf = iov->iov_base + skip;
116 copy = min(bytes, iov->iov_len - skip);
117
118 left = __copy_to_user(buf, from, copy);
119 copy -= left;
120 skip += copy;
121 from += copy;
122 bytes -= copy;
123 while (unlikely(!left && bytes)) {
124 iov++;
125 buf = iov->iov_base;
126 copy = min(bytes, iov->iov_len);
127 left = __copy_to_user(buf, from, copy);
128 copy -= left;
129 skip = copy;
130 from += copy;
131 bytes -= copy;
132 }
133
134 if (skip == iov->iov_len) {
135 iov++;
136 skip = 0;
137 }
138 i->count -= wanted - bytes;
139 i->nr_segs -= iov - i->iov;
140 i->iov = iov;
141 i->iov_offset = skip;
142 return wanted - bytes;
143}
144
145static size_t copy_from_iter_iovec(void *to, size_t bytes, struct iov_iter *i)
146{
147 size_t skip, copy, left, wanted;
148 const struct iovec *iov;
149 char __user *buf;
150
151 if (unlikely(bytes > i->count))
152 bytes = i->count;
153
154 if (unlikely(!bytes))
155 return 0;
156
157 wanted = bytes;
158 iov = i->iov;
159 skip = i->iov_offset;
160 buf = iov->iov_base + skip;
161 copy = min(bytes, iov->iov_len - skip);
162
163 left = __copy_from_user(to, buf, copy);
164 copy -= left;
165 skip += copy;
166 to += copy;
167 bytes -= copy;
168 while (unlikely(!left && bytes)) {
169 iov++;
170 buf = iov->iov_base;
171 copy = min(bytes, iov->iov_len);
172 left = __copy_from_user(to, buf, copy);
173 copy -= left;
174 skip = copy;
175 to += copy;
176 bytes -= copy;
177 }
178
179 if (skip == iov->iov_len) {
180 iov++;
181 skip = 0;
182 }
183 i->count -= wanted - bytes;
184 i->nr_segs -= iov - i->iov;
185 i->iov = iov;
186 i->iov_offset = skip;
187 return wanted - bytes;
188}
189
Al Viro62a80672014-04-04 23:12:29 -0400190static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
Al Viro4f18cd32014-02-05 19:11:33 -0500191 struct iov_iter *i)
192{
193 size_t skip, copy, left, wanted;
194 const struct iovec *iov;
195 char __user *buf;
196 void *kaddr, *from;
197
198 if (unlikely(bytes > i->count))
199 bytes = i->count;
200
201 if (unlikely(!bytes))
202 return 0;
203
204 wanted = bytes;
205 iov = i->iov;
206 skip = i->iov_offset;
207 buf = iov->iov_base + skip;
208 copy = min(bytes, iov->iov_len - skip);
209
210 if (!fault_in_pages_writeable(buf, copy)) {
211 kaddr = kmap_atomic(page);
212 from = kaddr + offset;
213
214 /* first chunk, usually the only one */
215 left = __copy_to_user_inatomic(buf, from, copy);
216 copy -= left;
217 skip += copy;
218 from += copy;
219 bytes -= copy;
220
221 while (unlikely(!left && bytes)) {
222 iov++;
223 buf = iov->iov_base;
224 copy = min(bytes, iov->iov_len);
225 left = __copy_to_user_inatomic(buf, from, copy);
226 copy -= left;
227 skip = copy;
228 from += copy;
229 bytes -= copy;
230 }
231 if (likely(!bytes)) {
232 kunmap_atomic(kaddr);
233 goto done;
234 }
235 offset = from - kaddr;
236 buf += copy;
237 kunmap_atomic(kaddr);
238 copy = min(bytes, iov->iov_len - skip);
239 }
240 /* Too bad - revert to non-atomic kmap */
241 kaddr = kmap(page);
242 from = kaddr + offset;
243 left = __copy_to_user(buf, from, copy);
244 copy -= left;
245 skip += copy;
246 from += copy;
247 bytes -= copy;
248 while (unlikely(!left && bytes)) {
249 iov++;
250 buf = iov->iov_base;
251 copy = min(bytes, iov->iov_len);
252 left = __copy_to_user(buf, from, copy);
253 copy -= left;
254 skip = copy;
255 from += copy;
256 bytes -= copy;
257 }
258 kunmap(page);
259done:
Al Viro81055e52014-04-04 19:23:46 -0400260 if (skip == iov->iov_len) {
261 iov++;
262 skip = 0;
263 }
Al Viro4f18cd32014-02-05 19:11:33 -0500264 i->count -= wanted - bytes;
265 i->nr_segs -= iov - i->iov;
266 i->iov = iov;
267 i->iov_offset = skip;
268 return wanted - bytes;
269}
Al Viro4f18cd32014-02-05 19:11:33 -0500270
Al Viro62a80672014-04-04 23:12:29 -0400271static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
Al Virof0d1bec2014-04-03 15:05:18 -0400272 struct iov_iter *i)
273{
274 size_t skip, copy, left, wanted;
275 const struct iovec *iov;
276 char __user *buf;
277 void *kaddr, *to;
278
279 if (unlikely(bytes > i->count))
280 bytes = i->count;
281
282 if (unlikely(!bytes))
283 return 0;
284
285 wanted = bytes;
286 iov = i->iov;
287 skip = i->iov_offset;
288 buf = iov->iov_base + skip;
289 copy = min(bytes, iov->iov_len - skip);
290
291 if (!fault_in_pages_readable(buf, copy)) {
292 kaddr = kmap_atomic(page);
293 to = kaddr + offset;
294
295 /* first chunk, usually the only one */
296 left = __copy_from_user_inatomic(to, buf, copy);
297 copy -= left;
298 skip += copy;
299 to += copy;
300 bytes -= copy;
301
302 while (unlikely(!left && bytes)) {
303 iov++;
304 buf = iov->iov_base;
305 copy = min(bytes, iov->iov_len);
306 left = __copy_from_user_inatomic(to, buf, copy);
307 copy -= left;
308 skip = copy;
309 to += copy;
310 bytes -= copy;
311 }
312 if (likely(!bytes)) {
313 kunmap_atomic(kaddr);
314 goto done;
315 }
316 offset = to - kaddr;
317 buf += copy;
318 kunmap_atomic(kaddr);
319 copy = min(bytes, iov->iov_len - skip);
320 }
321 /* Too bad - revert to non-atomic kmap */
322 kaddr = kmap(page);
323 to = kaddr + offset;
324 left = __copy_from_user(to, buf, copy);
325 copy -= left;
326 skip += copy;
327 to += copy;
328 bytes -= copy;
329 while (unlikely(!left && bytes)) {
330 iov++;
331 buf = iov->iov_base;
332 copy = min(bytes, iov->iov_len);
333 left = __copy_from_user(to, buf, copy);
334 copy -= left;
335 skip = copy;
336 to += copy;
337 bytes -= copy;
338 }
339 kunmap(page);
340done:
Al Viro81055e52014-04-04 19:23:46 -0400341 if (skip == iov->iov_len) {
342 iov++;
343 skip = 0;
344 }
Al Virof0d1bec2014-04-03 15:05:18 -0400345 i->count -= wanted - bytes;
346 i->nr_segs -= iov - i->iov;
347 i->iov = iov;
348 i->iov_offset = skip;
349 return wanted - bytes;
350}
Al Virof0d1bec2014-04-03 15:05:18 -0400351
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400352static size_t zero_iovec(size_t bytes, struct iov_iter *i)
353{
354 size_t skip, copy, left, wanted;
355 const struct iovec *iov;
356 char __user *buf;
357
358 if (unlikely(bytes > i->count))
359 bytes = i->count;
360
361 if (unlikely(!bytes))
362 return 0;
363
364 wanted = bytes;
365 iov = i->iov;
366 skip = i->iov_offset;
367 buf = iov->iov_base + skip;
368 copy = min(bytes, iov->iov_len - skip);
369
370 left = __clear_user(buf, copy);
371 copy -= left;
372 skip += copy;
373 bytes -= copy;
374
375 while (unlikely(!left && bytes)) {
376 iov++;
377 buf = iov->iov_base;
378 copy = min(bytes, iov->iov_len);
379 left = __clear_user(buf, copy);
380 copy -= left;
381 skip = copy;
382 bytes -= copy;
383 }
384
385 if (skip == iov->iov_len) {
386 iov++;
387 skip = 0;
388 }
389 i->count -= wanted - bytes;
390 i->nr_segs -= iov - i->iov;
391 i->iov = iov;
392 i->iov_offset = skip;
393 return wanted - bytes;
394}
395
Al Viro4f18cd32014-02-05 19:11:33 -0500396/*
397 * Fault in the first iovec of the given iov_iter, to a maximum length
398 * of bytes. Returns 0 on success, or non-zero if the memory could not be
399 * accessed (ie. because it is an invalid address).
400 *
401 * writev-intensive code may want this to prefault several iovecs -- that
402 * would be possible (callers must not rely on the fact that _only_ the
403 * first iovec will be faulted with the current implementation).
404 */
405int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
406{
Al Viro62a80672014-04-04 23:12:29 -0400407 if (!(i->type & ITER_BVEC)) {
408 char __user *buf = i->iov->iov_base + i->iov_offset;
409 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
410 return fault_in_pages_readable(buf, bytes);
411 }
412 return 0;
Al Viro4f18cd32014-02-05 19:11:33 -0500413}
414EXPORT_SYMBOL(iov_iter_fault_in_readable);
415
Al Viro71d8e532014-03-05 19:28:09 -0500416void iov_iter_init(struct iov_iter *i, int direction,
417 const struct iovec *iov, unsigned long nr_segs,
418 size_t count)
419{
420 /* It will get better. Eventually... */
421 if (segment_eq(get_fs(), KERNEL_DS))
Al Viro62a80672014-04-04 23:12:29 -0400422 direction |= ITER_KVEC;
Al Viro71d8e532014-03-05 19:28:09 -0500423 i->type = direction;
424 i->iov = iov;
425 i->nr_segs = nr_segs;
426 i->iov_offset = 0;
427 i->count = count;
428}
429EXPORT_SYMBOL(iov_iter_init);
Al Viro7b2c99d2014-03-15 04:05:57 -0400430
Al Viro62a80672014-04-04 23:12:29 -0400431static ssize_t get_pages_iovec(struct iov_iter *i,
Miklos Szeredi2c809292014-09-24 17:09:11 +0200432 struct page **pages, size_t maxsize, unsigned maxpages,
Al Viro7b2c99d2014-03-15 04:05:57 -0400433 size_t *start)
434{
435 size_t offset = i->iov_offset;
436 const struct iovec *iov = i->iov;
437 size_t len;
438 unsigned long addr;
439 int n;
440 int res;
441
442 len = iov->iov_len - offset;
443 if (len > i->count)
444 len = i->count;
Miklos Szeredi2c809292014-09-24 17:09:11 +0200445 if (len > maxsize)
446 len = maxsize;
Al Viro7b2c99d2014-03-15 04:05:57 -0400447 addr = (unsigned long)iov->iov_base + offset;
448 len += *start = addr & (PAGE_SIZE - 1);
Al Viroc7f38882014-06-18 20:34:33 -0400449 if (len > maxpages * PAGE_SIZE)
450 len = maxpages * PAGE_SIZE;
Al Viro7b2c99d2014-03-15 04:05:57 -0400451 addr &= ~(PAGE_SIZE - 1);
452 n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
453 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
454 if (unlikely(res < 0))
455 return res;
456 return (res == n ? len : res * PAGE_SIZE) - *start;
457}
Al Virof67da302014-03-19 01:16:16 -0400458
Al Viro62a80672014-04-04 23:12:29 -0400459static ssize_t get_pages_alloc_iovec(struct iov_iter *i,
Al Viro91f79c42014-03-21 04:58:33 -0400460 struct page ***pages, size_t maxsize,
461 size_t *start)
462{
463 size_t offset = i->iov_offset;
464 const struct iovec *iov = i->iov;
465 size_t len;
466 unsigned long addr;
467 void *p;
468 int n;
469 int res;
470
471 len = iov->iov_len - offset;
472 if (len > i->count)
473 len = i->count;
474 if (len > maxsize)
475 len = maxsize;
476 addr = (unsigned long)iov->iov_base + offset;
477 len += *start = addr & (PAGE_SIZE - 1);
478 addr &= ~(PAGE_SIZE - 1);
479 n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
480
481 p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
482 if (!p)
483 p = vmalloc(n * sizeof(struct page *));
484 if (!p)
485 return -ENOMEM;
486
487 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
488 if (unlikely(res < 0)) {
489 kvfree(p);
490 return res;
491 }
492 *pages = p;
493 return (res == n ? len : res * PAGE_SIZE) - *start;
494}
Al Viro91f79c42014-03-21 04:58:33 -0400495
Al Viro62a80672014-04-04 23:12:29 -0400496static int iov_iter_npages_iovec(const struct iov_iter *i, int maxpages)
Al Virof67da302014-03-19 01:16:16 -0400497{
498 size_t offset = i->iov_offset;
499 size_t size = i->count;
500 const struct iovec *iov = i->iov;
501 int npages = 0;
502 int n;
503
504 for (n = 0; size && n < i->nr_segs; n++, iov++) {
505 unsigned long addr = (unsigned long)iov->iov_base + offset;
506 size_t len = iov->iov_len - offset;
507 offset = 0;
508 if (unlikely(!len)) /* empty segment */
509 continue;
510 if (len > size)
511 len = size;
512 npages += (addr + len + PAGE_SIZE - 1) / PAGE_SIZE
513 - addr / PAGE_SIZE;
514 if (npages >= maxpages) /* don't bother going further */
515 return maxpages;
516 size -= len;
517 offset = 0;
518 }
519 return min(npages, maxpages);
520}
Al Viro62a80672014-04-04 23:12:29 -0400521
522static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
523{
524 char *from = kmap_atomic(page);
525 memcpy(to, from + offset, len);
526 kunmap_atomic(from);
527}
528
529static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
530{
531 char *to = kmap_atomic(page);
532 memcpy(to + offset, from, len);
533 kunmap_atomic(to);
534}
535
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400536static void memzero_page(struct page *page, size_t offset, size_t len)
537{
538 char *addr = kmap_atomic(page);
539 memset(addr + offset, 0, len);
540 kunmap_atomic(addr);
541}
542
543static size_t copy_to_iter_bvec(void *from, size_t bytes, struct iov_iter *i)
Al Viro62a80672014-04-04 23:12:29 -0400544{
545 size_t skip, copy, wanted;
546 const struct bio_vec *bvec;
Al Viro62a80672014-04-04 23:12:29 -0400547
548 if (unlikely(bytes > i->count))
549 bytes = i->count;
550
551 if (unlikely(!bytes))
552 return 0;
553
554 wanted = bytes;
555 bvec = i->bvec;
556 skip = i->iov_offset;
557 copy = min_t(size_t, bytes, bvec->bv_len - skip);
558
Al Viro62a80672014-04-04 23:12:29 -0400559 memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy);
560 skip += copy;
561 from += copy;
562 bytes -= copy;
563 while (bytes) {
564 bvec++;
565 copy = min(bytes, (size_t)bvec->bv_len);
566 memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, copy);
567 skip = copy;
568 from += copy;
569 bytes -= copy;
570 }
Al Viro62a80672014-04-04 23:12:29 -0400571 if (skip == bvec->bv_len) {
572 bvec++;
573 skip = 0;
574 }
575 i->count -= wanted - bytes;
576 i->nr_segs -= bvec - i->bvec;
577 i->bvec = bvec;
578 i->iov_offset = skip;
579 return wanted - bytes;
580}
581
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400582static size_t copy_from_iter_bvec(void *to, size_t bytes, struct iov_iter *i)
Al Viro62a80672014-04-04 23:12:29 -0400583{
584 size_t skip, copy, wanted;
585 const struct bio_vec *bvec;
Al Viro62a80672014-04-04 23:12:29 -0400586
587 if (unlikely(bytes > i->count))
588 bytes = i->count;
589
590 if (unlikely(!bytes))
591 return 0;
592
593 wanted = bytes;
594 bvec = i->bvec;
595 skip = i->iov_offset;
596
Al Viro62a80672014-04-04 23:12:29 -0400597 copy = min(bytes, bvec->bv_len - skip);
598
599 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy);
600
601 to += copy;
602 skip += copy;
603 bytes -= copy;
604
605 while (bytes) {
606 bvec++;
607 copy = min(bytes, (size_t)bvec->bv_len);
608 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, copy);
609 skip = copy;
610 to += copy;
611 bytes -= copy;
612 }
Al Viro62a80672014-04-04 23:12:29 -0400613 if (skip == bvec->bv_len) {
614 bvec++;
615 skip = 0;
616 }
617 i->count -= wanted;
618 i->nr_segs -= bvec - i->bvec;
619 i->bvec = bvec;
620 i->iov_offset = skip;
621 return wanted;
622}
623
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400624static size_t copy_page_to_iter_bvec(struct page *page, size_t offset,
625 size_t bytes, struct iov_iter *i)
626{
627 void *kaddr = kmap_atomic(page);
628 size_t wanted = copy_to_iter_bvec(kaddr + offset, bytes, i);
629 kunmap_atomic(kaddr);
630 return wanted;
631}
632
633static size_t copy_page_from_iter_bvec(struct page *page, size_t offset,
634 size_t bytes, struct iov_iter *i)
635{
636 void *kaddr = kmap_atomic(page);
637 size_t wanted = copy_from_iter_bvec(kaddr + offset, bytes, i);
638 kunmap_atomic(kaddr);
639 return wanted;
640}
641
642static size_t zero_bvec(size_t bytes, struct iov_iter *i)
643{
644 size_t skip, copy, wanted;
645 const struct bio_vec *bvec;
646
647 if (unlikely(bytes > i->count))
648 bytes = i->count;
649
650 if (unlikely(!bytes))
651 return 0;
652
653 wanted = bytes;
654 bvec = i->bvec;
655 skip = i->iov_offset;
656 copy = min_t(size_t, bytes, bvec->bv_len - skip);
657
658 memzero_page(bvec->bv_page, skip + bvec->bv_offset, copy);
659 skip += copy;
660 bytes -= copy;
661 while (bytes) {
662 bvec++;
663 copy = min(bytes, (size_t)bvec->bv_len);
664 memzero_page(bvec->bv_page, bvec->bv_offset, copy);
665 skip = copy;
666 bytes -= copy;
667 }
668 if (skip == bvec->bv_len) {
669 bvec++;
670 skip = 0;
671 }
672 i->count -= wanted - bytes;
673 i->nr_segs -= bvec - i->bvec;
674 i->bvec = bvec;
675 i->iov_offset = skip;
676 return wanted - bytes;
677}
678
Al Viro62a80672014-04-04 23:12:29 -0400679static ssize_t get_pages_bvec(struct iov_iter *i,
Miklos Szeredi2c809292014-09-24 17:09:11 +0200680 struct page **pages, size_t maxsize, unsigned maxpages,
Al Viro62a80672014-04-04 23:12:29 -0400681 size_t *start)
682{
683 const struct bio_vec *bvec = i->bvec;
684 size_t len = bvec->bv_len - i->iov_offset;
685 if (len > i->count)
686 len = i->count;
Miklos Szeredi2c809292014-09-24 17:09:11 +0200687 if (len > maxsize)
688 len = maxsize;
Al Viroc7f38882014-06-18 20:34:33 -0400689 /* can't be more than PAGE_SIZE */
Al Viro62a80672014-04-04 23:12:29 -0400690 *start = bvec->bv_offset + i->iov_offset;
691
692 get_page(*pages = bvec->bv_page);
693
694 return len;
695}
696
697static ssize_t get_pages_alloc_bvec(struct iov_iter *i,
698 struct page ***pages, size_t maxsize,
699 size_t *start)
700{
701 const struct bio_vec *bvec = i->bvec;
702 size_t len = bvec->bv_len - i->iov_offset;
703 if (len > i->count)
704 len = i->count;
705 if (len > maxsize)
706 len = maxsize;
707 *start = bvec->bv_offset + i->iov_offset;
708
709 *pages = kmalloc(sizeof(struct page *), GFP_KERNEL);
710 if (!*pages)
711 return -ENOMEM;
712
713 get_page(**pages = bvec->bv_page);
714
715 return len;
716}
717
718static int iov_iter_npages_bvec(const struct iov_iter *i, int maxpages)
719{
720 size_t offset = i->iov_offset;
721 size_t size = i->count;
722 const struct bio_vec *bvec = i->bvec;
723 int npages = 0;
724 int n;
725
726 for (n = 0; size && n < i->nr_segs; n++, bvec++) {
727 size_t len = bvec->bv_len - offset;
728 offset = 0;
729 if (unlikely(!len)) /* empty segment */
730 continue;
731 if (len > size)
732 len = size;
733 npages++;
734 if (npages >= maxpages) /* don't bother going further */
735 return maxpages;
736 size -= len;
737 offset = 0;
738 }
739 return min(npages, maxpages);
740}
741
742size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
743 struct iov_iter *i)
744{
745 if (i->type & ITER_BVEC)
746 return copy_page_to_iter_bvec(page, offset, bytes, i);
747 else
748 return copy_page_to_iter_iovec(page, offset, bytes, i);
749}
750EXPORT_SYMBOL(copy_page_to_iter);
751
752size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
753 struct iov_iter *i)
754{
755 if (i->type & ITER_BVEC)
756 return copy_page_from_iter_bvec(page, offset, bytes, i);
757 else
758 return copy_page_from_iter_iovec(page, offset, bytes, i);
759}
760EXPORT_SYMBOL(copy_page_from_iter);
761
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400762size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
763{
764 if (i->type & ITER_BVEC)
765 return copy_to_iter_bvec(addr, bytes, i);
766 else
767 return copy_to_iter_iovec(addr, bytes, i);
768}
769EXPORT_SYMBOL(copy_to_iter);
770
771size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
772{
773 if (i->type & ITER_BVEC)
774 return copy_from_iter_bvec(addr, bytes, i);
775 else
776 return copy_from_iter_iovec(addr, bytes, i);
777}
778EXPORT_SYMBOL(copy_from_iter);
779
780size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
781{
782 if (i->type & ITER_BVEC) {
783 return zero_bvec(bytes, i);
784 } else {
785 return zero_iovec(bytes, i);
786 }
787}
788EXPORT_SYMBOL(iov_iter_zero);
789
Al Viro62a80672014-04-04 23:12:29 -0400790size_t iov_iter_copy_from_user_atomic(struct page *page,
791 struct iov_iter *i, unsigned long offset, size_t bytes)
792{
Al Viro04a31162014-11-27 13:51:41 -0500793 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
794 iterate_all_kinds(i, bytes, v,
795 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
796 v.iov_base, v.iov_len),
797 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
798 v.bv_offset, v.bv_len)
799 )
800 kunmap_atomic(kaddr);
801 return bytes;
Al Viro62a80672014-04-04 23:12:29 -0400802}
803EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
804
805void iov_iter_advance(struct iov_iter *i, size_t size)
806{
Al Viro7ce2a912014-11-27 13:59:45 -0500807 iterate_and_advance(i, size, v, 0, 0)
Al Viro62a80672014-04-04 23:12:29 -0400808}
809EXPORT_SYMBOL(iov_iter_advance);
810
811/*
812 * Return the count of just the current iov_iter segment.
813 */
814size_t iov_iter_single_seg_count(const struct iov_iter *i)
815{
816 if (i->nr_segs == 1)
817 return i->count;
818 else if (i->type & ITER_BVEC)
Al Viro62a80672014-04-04 23:12:29 -0400819 return min(i->count, i->bvec->bv_len - i->iov_offset);
Paul Mackerrasad0eab92014-11-13 20:15:23 +1100820 else
821 return min(i->count, i->iov->iov_len - i->iov_offset);
Al Viro62a80672014-04-04 23:12:29 -0400822}
823EXPORT_SYMBOL(iov_iter_single_seg_count);
824
825unsigned long iov_iter_alignment(const struct iov_iter *i)
826{
Al Viro04a31162014-11-27 13:51:41 -0500827 unsigned long res = 0;
828 size_t size = i->count;
829
830 if (!size)
831 return 0;
832
833 iterate_all_kinds(i, size, v,
834 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
835 res |= v.bv_offset | v.bv_len
836 )
837 return res;
Al Viro62a80672014-04-04 23:12:29 -0400838}
839EXPORT_SYMBOL(iov_iter_alignment);
840
841ssize_t iov_iter_get_pages(struct iov_iter *i,
Miklos Szeredi2c809292014-09-24 17:09:11 +0200842 struct page **pages, size_t maxsize, unsigned maxpages,
Al Viro62a80672014-04-04 23:12:29 -0400843 size_t *start)
844{
845 if (i->type & ITER_BVEC)
Miklos Szeredi2c809292014-09-24 17:09:11 +0200846 return get_pages_bvec(i, pages, maxsize, maxpages, start);
Al Viro62a80672014-04-04 23:12:29 -0400847 else
Miklos Szeredi2c809292014-09-24 17:09:11 +0200848 return get_pages_iovec(i, pages, maxsize, maxpages, start);
Al Viro62a80672014-04-04 23:12:29 -0400849}
850EXPORT_SYMBOL(iov_iter_get_pages);
851
852ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
853 struct page ***pages, size_t maxsize,
854 size_t *start)
855{
856 if (i->type & ITER_BVEC)
857 return get_pages_alloc_bvec(i, pages, maxsize, start);
858 else
859 return get_pages_alloc_iovec(i, pages, maxsize, start);
860}
861EXPORT_SYMBOL(iov_iter_get_pages_alloc);
862
863int iov_iter_npages(const struct iov_iter *i, int maxpages)
864{
865 if (i->type & ITER_BVEC)
866 return iov_iter_npages_bvec(i, maxpages);
867 else
868 return iov_iter_npages_iovec(i, maxpages);
869}
Al Virof67da302014-03-19 01:16:16 -0400870EXPORT_SYMBOL(iov_iter_npages);