blob: 7b5dbd1517b5594b05d5590cae29c3eb3a1dada2 [file] [log] [blame]
Al Viro4f18cd32014-02-05 19:11:33 -05001#include <linux/export.h>
2#include <linux/uio.h>
3#include <linux/pagemap.h>
Al Viro91f79c42014-03-21 04:58:33 -04004#include <linux/slab.h>
5#include <linux/vmalloc.h>
Al Viro4f18cd32014-02-05 19:11:33 -05006
Al Viro62a80672014-04-04 23:12:29 -04007static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
Al Viro4f18cd32014-02-05 19:11:33 -05008 struct iov_iter *i)
9{
10 size_t skip, copy, left, wanted;
11 const struct iovec *iov;
12 char __user *buf;
13 void *kaddr, *from;
14
15 if (unlikely(bytes > i->count))
16 bytes = i->count;
17
18 if (unlikely(!bytes))
19 return 0;
20
21 wanted = bytes;
22 iov = i->iov;
23 skip = i->iov_offset;
24 buf = iov->iov_base + skip;
25 copy = min(bytes, iov->iov_len - skip);
26
27 if (!fault_in_pages_writeable(buf, copy)) {
28 kaddr = kmap_atomic(page);
29 from = kaddr + offset;
30
31 /* first chunk, usually the only one */
32 left = __copy_to_user_inatomic(buf, from, copy);
33 copy -= left;
34 skip += copy;
35 from += copy;
36 bytes -= copy;
37
38 while (unlikely(!left && bytes)) {
39 iov++;
40 buf = iov->iov_base;
41 copy = min(bytes, iov->iov_len);
42 left = __copy_to_user_inatomic(buf, from, copy);
43 copy -= left;
44 skip = copy;
45 from += copy;
46 bytes -= copy;
47 }
48 if (likely(!bytes)) {
49 kunmap_atomic(kaddr);
50 goto done;
51 }
52 offset = from - kaddr;
53 buf += copy;
54 kunmap_atomic(kaddr);
55 copy = min(bytes, iov->iov_len - skip);
56 }
57 /* Too bad - revert to non-atomic kmap */
58 kaddr = kmap(page);
59 from = kaddr + offset;
60 left = __copy_to_user(buf, from, copy);
61 copy -= left;
62 skip += copy;
63 from += copy;
64 bytes -= copy;
65 while (unlikely(!left && bytes)) {
66 iov++;
67 buf = iov->iov_base;
68 copy = min(bytes, iov->iov_len);
69 left = __copy_to_user(buf, from, copy);
70 copy -= left;
71 skip = copy;
72 from += copy;
73 bytes -= copy;
74 }
75 kunmap(page);
76done:
Al Viro81055e52014-04-04 19:23:46 -040077 if (skip == iov->iov_len) {
78 iov++;
79 skip = 0;
80 }
Al Viro4f18cd32014-02-05 19:11:33 -050081 i->count -= wanted - bytes;
82 i->nr_segs -= iov - i->iov;
83 i->iov = iov;
84 i->iov_offset = skip;
85 return wanted - bytes;
86}
Al Viro4f18cd32014-02-05 19:11:33 -050087
Al Viro62a80672014-04-04 23:12:29 -040088static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
Al Virof0d1bec2014-04-03 15:05:18 -040089 struct iov_iter *i)
90{
91 size_t skip, copy, left, wanted;
92 const struct iovec *iov;
93 char __user *buf;
94 void *kaddr, *to;
95
96 if (unlikely(bytes > i->count))
97 bytes = i->count;
98
99 if (unlikely(!bytes))
100 return 0;
101
102 wanted = bytes;
103 iov = i->iov;
104 skip = i->iov_offset;
105 buf = iov->iov_base + skip;
106 copy = min(bytes, iov->iov_len - skip);
107
108 if (!fault_in_pages_readable(buf, copy)) {
109 kaddr = kmap_atomic(page);
110 to = kaddr + offset;
111
112 /* first chunk, usually the only one */
113 left = __copy_from_user_inatomic(to, buf, copy);
114 copy -= left;
115 skip += copy;
116 to += copy;
117 bytes -= copy;
118
119 while (unlikely(!left && bytes)) {
120 iov++;
121 buf = iov->iov_base;
122 copy = min(bytes, iov->iov_len);
123 left = __copy_from_user_inatomic(to, buf, copy);
124 copy -= left;
125 skip = copy;
126 to += copy;
127 bytes -= copy;
128 }
129 if (likely(!bytes)) {
130 kunmap_atomic(kaddr);
131 goto done;
132 }
133 offset = to - kaddr;
134 buf += copy;
135 kunmap_atomic(kaddr);
136 copy = min(bytes, iov->iov_len - skip);
137 }
138 /* Too bad - revert to non-atomic kmap */
139 kaddr = kmap(page);
140 to = kaddr + offset;
141 left = __copy_from_user(to, buf, copy);
142 copy -= left;
143 skip += copy;
144 to += copy;
145 bytes -= copy;
146 while (unlikely(!left && bytes)) {
147 iov++;
148 buf = iov->iov_base;
149 copy = min(bytes, iov->iov_len);
150 left = __copy_from_user(to, buf, copy);
151 copy -= left;
152 skip = copy;
153 to += copy;
154 bytes -= copy;
155 }
156 kunmap(page);
157done:
Al Viro81055e52014-04-04 19:23:46 -0400158 if (skip == iov->iov_len) {
159 iov++;
160 skip = 0;
161 }
Al Virof0d1bec2014-04-03 15:05:18 -0400162 i->count -= wanted - bytes;
163 i->nr_segs -= iov - i->iov;
164 i->iov = iov;
165 i->iov_offset = skip;
166 return wanted - bytes;
167}
Al Virof0d1bec2014-04-03 15:05:18 -0400168
Al Viro4f18cd32014-02-05 19:11:33 -0500169static size_t __iovec_copy_from_user_inatomic(char *vaddr,
170 const struct iovec *iov, size_t base, size_t bytes)
171{
172 size_t copied = 0, left = 0;
173
174 while (bytes) {
175 char __user *buf = iov->iov_base + base;
176 int copy = min(bytes, iov->iov_len - base);
177
178 base = 0;
179 left = __copy_from_user_inatomic(vaddr, buf, copy);
180 copied += copy;
181 bytes -= copy;
182 vaddr += copy;
183 iov++;
184
185 if (unlikely(left))
186 break;
187 }
188 return copied - left;
189}
190
191/*
192 * Copy as much as we can into the page and return the number of bytes which
193 * were successfully copied. If a fault is encountered then return the number of
194 * bytes which were copied.
195 */
Al Viro62a80672014-04-04 23:12:29 -0400196static size_t copy_from_user_atomic_iovec(struct page *page,
Al Viro4f18cd32014-02-05 19:11:33 -0500197 struct iov_iter *i, unsigned long offset, size_t bytes)
198{
199 char *kaddr;
200 size_t copied;
201
202 kaddr = kmap_atomic(page);
203 if (likely(i->nr_segs == 1)) {
204 int left;
205 char __user *buf = i->iov->iov_base + i->iov_offset;
206 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
207 copied = bytes - left;
208 } else {
209 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
210 i->iov, i->iov_offset, bytes);
211 }
212 kunmap_atomic(kaddr);
213
214 return copied;
215}
Al Viro4f18cd32014-02-05 19:11:33 -0500216
Al Viro62a80672014-04-04 23:12:29 -0400217static void advance_iovec(struct iov_iter *i, size_t bytes)
Al Viro4f18cd32014-02-05 19:11:33 -0500218{
219 BUG_ON(i->count < bytes);
220
221 if (likely(i->nr_segs == 1)) {
222 i->iov_offset += bytes;
223 i->count -= bytes;
224 } else {
225 const struct iovec *iov = i->iov;
226 size_t base = i->iov_offset;
227 unsigned long nr_segs = i->nr_segs;
228
229 /*
230 * The !iov->iov_len check ensures we skip over unlikely
231 * zero-length segments (without overruning the iovec).
232 */
233 while (bytes || unlikely(i->count && !iov->iov_len)) {
234 int copy;
235
236 copy = min(bytes, iov->iov_len - base);
237 BUG_ON(!i->count || i->count < copy);
238 i->count -= copy;
239 bytes -= copy;
240 base += copy;
241 if (iov->iov_len == base) {
242 iov++;
243 nr_segs--;
244 base = 0;
245 }
246 }
247 i->iov = iov;
248 i->iov_offset = base;
249 i->nr_segs = nr_segs;
250 }
251}
Al Viro4f18cd32014-02-05 19:11:33 -0500252
253/*
254 * Fault in the first iovec of the given iov_iter, to a maximum length
255 * of bytes. Returns 0 on success, or non-zero if the memory could not be
256 * accessed (ie. because it is an invalid address).
257 *
258 * writev-intensive code may want this to prefault several iovecs -- that
259 * would be possible (callers must not rely on the fact that _only_ the
260 * first iovec will be faulted with the current implementation).
261 */
262int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
263{
Al Viro62a80672014-04-04 23:12:29 -0400264 if (!(i->type & ITER_BVEC)) {
265 char __user *buf = i->iov->iov_base + i->iov_offset;
266 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
267 return fault_in_pages_readable(buf, bytes);
268 }
269 return 0;
Al Viro4f18cd32014-02-05 19:11:33 -0500270}
271EXPORT_SYMBOL(iov_iter_fault_in_readable);
272
Al Viro62a80672014-04-04 23:12:29 -0400273static unsigned long alignment_iovec(const struct iov_iter *i)
Al Viro886a3912014-03-05 13:50:45 -0500274{
275 const struct iovec *iov = i->iov;
276 unsigned long res;
277 size_t size = i->count;
278 size_t n;
279
280 if (!size)
281 return 0;
282
283 res = (unsigned long)iov->iov_base + i->iov_offset;
284 n = iov->iov_len - i->iov_offset;
285 if (n >= size)
286 return res | size;
287 size -= n;
288 res |= n;
289 while (size > (++iov)->iov_len) {
290 res |= (unsigned long)iov->iov_base | iov->iov_len;
291 size -= iov->iov_len;
292 }
293 res |= (unsigned long)iov->iov_base | size;
294 return res;
295}
Al Viro71d8e532014-03-05 19:28:09 -0500296
297void iov_iter_init(struct iov_iter *i, int direction,
298 const struct iovec *iov, unsigned long nr_segs,
299 size_t count)
300{
301 /* It will get better. Eventually... */
302 if (segment_eq(get_fs(), KERNEL_DS))
Al Viro62a80672014-04-04 23:12:29 -0400303 direction |= ITER_KVEC;
Al Viro71d8e532014-03-05 19:28:09 -0500304 i->type = direction;
305 i->iov = iov;
306 i->nr_segs = nr_segs;
307 i->iov_offset = 0;
308 i->count = count;
309}
310EXPORT_SYMBOL(iov_iter_init);
Al Viro7b2c99d2014-03-15 04:05:57 -0400311
Al Viro62a80672014-04-04 23:12:29 -0400312static ssize_t get_pages_iovec(struct iov_iter *i,
Al Viro7b2c99d2014-03-15 04:05:57 -0400313 struct page **pages, size_t maxsize,
314 size_t *start)
315{
316 size_t offset = i->iov_offset;
317 const struct iovec *iov = i->iov;
318 size_t len;
319 unsigned long addr;
320 int n;
321 int res;
322
323 len = iov->iov_len - offset;
324 if (len > i->count)
325 len = i->count;
326 if (len > maxsize)
327 len = maxsize;
328 addr = (unsigned long)iov->iov_base + offset;
329 len += *start = addr & (PAGE_SIZE - 1);
330 addr &= ~(PAGE_SIZE - 1);
331 n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
332 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
333 if (unlikely(res < 0))
334 return res;
335 return (res == n ? len : res * PAGE_SIZE) - *start;
336}
Al Virof67da302014-03-19 01:16:16 -0400337
Al Viro62a80672014-04-04 23:12:29 -0400338static ssize_t get_pages_alloc_iovec(struct iov_iter *i,
Al Viro91f79c42014-03-21 04:58:33 -0400339 struct page ***pages, size_t maxsize,
340 size_t *start)
341{
342 size_t offset = i->iov_offset;
343 const struct iovec *iov = i->iov;
344 size_t len;
345 unsigned long addr;
346 void *p;
347 int n;
348 int res;
349
350 len = iov->iov_len - offset;
351 if (len > i->count)
352 len = i->count;
353 if (len > maxsize)
354 len = maxsize;
355 addr = (unsigned long)iov->iov_base + offset;
356 len += *start = addr & (PAGE_SIZE - 1);
357 addr &= ~(PAGE_SIZE - 1);
358 n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
359
360 p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
361 if (!p)
362 p = vmalloc(n * sizeof(struct page *));
363 if (!p)
364 return -ENOMEM;
365
366 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
367 if (unlikely(res < 0)) {
368 kvfree(p);
369 return res;
370 }
371 *pages = p;
372 return (res == n ? len : res * PAGE_SIZE) - *start;
373}
Al Viro91f79c42014-03-21 04:58:33 -0400374
Al Viro62a80672014-04-04 23:12:29 -0400375static int iov_iter_npages_iovec(const struct iov_iter *i, int maxpages)
Al Virof67da302014-03-19 01:16:16 -0400376{
377 size_t offset = i->iov_offset;
378 size_t size = i->count;
379 const struct iovec *iov = i->iov;
380 int npages = 0;
381 int n;
382
383 for (n = 0; size && n < i->nr_segs; n++, iov++) {
384 unsigned long addr = (unsigned long)iov->iov_base + offset;
385 size_t len = iov->iov_len - offset;
386 offset = 0;
387 if (unlikely(!len)) /* empty segment */
388 continue;
389 if (len > size)
390 len = size;
391 npages += (addr + len + PAGE_SIZE - 1) / PAGE_SIZE
392 - addr / PAGE_SIZE;
393 if (npages >= maxpages) /* don't bother going further */
394 return maxpages;
395 size -= len;
396 offset = 0;
397 }
398 return min(npages, maxpages);
399}
Al Viro62a80672014-04-04 23:12:29 -0400400
401static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
402{
403 char *from = kmap_atomic(page);
404 memcpy(to, from + offset, len);
405 kunmap_atomic(from);
406}
407
408static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
409{
410 char *to = kmap_atomic(page);
411 memcpy(to + offset, from, len);
412 kunmap_atomic(to);
413}
414
415static size_t copy_page_to_iter_bvec(struct page *page, size_t offset, size_t bytes,
416 struct iov_iter *i)
417{
418 size_t skip, copy, wanted;
419 const struct bio_vec *bvec;
420 void *kaddr, *from;
421
422 if (unlikely(bytes > i->count))
423 bytes = i->count;
424
425 if (unlikely(!bytes))
426 return 0;
427
428 wanted = bytes;
429 bvec = i->bvec;
430 skip = i->iov_offset;
431 copy = min_t(size_t, bytes, bvec->bv_len - skip);
432
433 kaddr = kmap_atomic(page);
434 from = kaddr + offset;
435 memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy);
436 skip += copy;
437 from += copy;
438 bytes -= copy;
439 while (bytes) {
440 bvec++;
441 copy = min(bytes, (size_t)bvec->bv_len);
442 memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, copy);
443 skip = copy;
444 from += copy;
445 bytes -= copy;
446 }
447 kunmap_atomic(kaddr);
448 if (skip == bvec->bv_len) {
449 bvec++;
450 skip = 0;
451 }
452 i->count -= wanted - bytes;
453 i->nr_segs -= bvec - i->bvec;
454 i->bvec = bvec;
455 i->iov_offset = skip;
456 return wanted - bytes;
457}
458
459static size_t copy_page_from_iter_bvec(struct page *page, size_t offset, size_t bytes,
460 struct iov_iter *i)
461{
462 size_t skip, copy, wanted;
463 const struct bio_vec *bvec;
464 void *kaddr, *to;
465
466 if (unlikely(bytes > i->count))
467 bytes = i->count;
468
469 if (unlikely(!bytes))
470 return 0;
471
472 wanted = bytes;
473 bvec = i->bvec;
474 skip = i->iov_offset;
475
476 kaddr = kmap_atomic(page);
477
478 to = kaddr + offset;
479
480 copy = min(bytes, bvec->bv_len - skip);
481
482 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy);
483
484 to += copy;
485 skip += copy;
486 bytes -= copy;
487
488 while (bytes) {
489 bvec++;
490 copy = min(bytes, (size_t)bvec->bv_len);
491 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, copy);
492 skip = copy;
493 to += copy;
494 bytes -= copy;
495 }
496 kunmap_atomic(kaddr);
497 if (skip == bvec->bv_len) {
498 bvec++;
499 skip = 0;
500 }
501 i->count -= wanted;
502 i->nr_segs -= bvec - i->bvec;
503 i->bvec = bvec;
504 i->iov_offset = skip;
505 return wanted;
506}
507
508static size_t copy_from_user_bvec(struct page *page,
509 struct iov_iter *i, unsigned long offset, size_t bytes)
510{
511 char *kaddr;
512 size_t left;
513 const struct bio_vec *bvec;
514 size_t base = i->iov_offset;
515
516 kaddr = kmap_atomic(page);
517 for (left = bytes, bvec = i->bvec; left; bvec++, base = 0) {
518 size_t copy = min(left, bvec->bv_len - base);
519 if (!bvec->bv_len)
520 continue;
521 memcpy_from_page(kaddr + offset, bvec->bv_page,
522 bvec->bv_offset + base, copy);
523 offset += copy;
524 left -= copy;
525 }
526 kunmap_atomic(kaddr);
527 return bytes;
528}
529
530static void advance_bvec(struct iov_iter *i, size_t bytes)
531{
532 BUG_ON(i->count < bytes);
533
534 if (likely(i->nr_segs == 1)) {
535 i->iov_offset += bytes;
536 i->count -= bytes;
537 } else {
538 const struct bio_vec *bvec = i->bvec;
539 size_t base = i->iov_offset;
540 unsigned long nr_segs = i->nr_segs;
541
542 /*
543 * The !iov->iov_len check ensures we skip over unlikely
544 * zero-length segments (without overruning the iovec).
545 */
546 while (bytes || unlikely(i->count && !bvec->bv_len)) {
547 int copy;
548
549 copy = min(bytes, bvec->bv_len - base);
550 BUG_ON(!i->count || i->count < copy);
551 i->count -= copy;
552 bytes -= copy;
553 base += copy;
554 if (bvec->bv_len == base) {
555 bvec++;
556 nr_segs--;
557 base = 0;
558 }
559 }
560 i->bvec = bvec;
561 i->iov_offset = base;
562 i->nr_segs = nr_segs;
563 }
564}
565
566static unsigned long alignment_bvec(const struct iov_iter *i)
567{
568 const struct bio_vec *bvec = i->bvec;
569 unsigned long res;
570 size_t size = i->count;
571 size_t n;
572
573 if (!size)
574 return 0;
575
576 res = bvec->bv_offset + i->iov_offset;
577 n = bvec->bv_len - i->iov_offset;
578 if (n >= size)
579 return res | size;
580 size -= n;
581 res |= n;
582 while (size > (++bvec)->bv_len) {
583 res |= bvec->bv_offset | bvec->bv_len;
584 size -= bvec->bv_len;
585 }
586 res |= bvec->bv_offset | size;
587 return res;
588}
589
590static ssize_t get_pages_bvec(struct iov_iter *i,
591 struct page **pages, size_t maxsize,
592 size_t *start)
593{
594 const struct bio_vec *bvec = i->bvec;
595 size_t len = bvec->bv_len - i->iov_offset;
596 if (len > i->count)
597 len = i->count;
598 if (len > maxsize)
599 len = maxsize;
600 *start = bvec->bv_offset + i->iov_offset;
601
602 get_page(*pages = bvec->bv_page);
603
604 return len;
605}
606
607static ssize_t get_pages_alloc_bvec(struct iov_iter *i,
608 struct page ***pages, size_t maxsize,
609 size_t *start)
610{
611 const struct bio_vec *bvec = i->bvec;
612 size_t len = bvec->bv_len - i->iov_offset;
613 if (len > i->count)
614 len = i->count;
615 if (len > maxsize)
616 len = maxsize;
617 *start = bvec->bv_offset + i->iov_offset;
618
619 *pages = kmalloc(sizeof(struct page *), GFP_KERNEL);
620 if (!*pages)
621 return -ENOMEM;
622
623 get_page(**pages = bvec->bv_page);
624
625 return len;
626}
627
628static int iov_iter_npages_bvec(const struct iov_iter *i, int maxpages)
629{
630 size_t offset = i->iov_offset;
631 size_t size = i->count;
632 const struct bio_vec *bvec = i->bvec;
633 int npages = 0;
634 int n;
635
636 for (n = 0; size && n < i->nr_segs; n++, bvec++) {
637 size_t len = bvec->bv_len - offset;
638 offset = 0;
639 if (unlikely(!len)) /* empty segment */
640 continue;
641 if (len > size)
642 len = size;
643 npages++;
644 if (npages >= maxpages) /* don't bother going further */
645 return maxpages;
646 size -= len;
647 offset = 0;
648 }
649 return min(npages, maxpages);
650}
651
652size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
653 struct iov_iter *i)
654{
655 if (i->type & ITER_BVEC)
656 return copy_page_to_iter_bvec(page, offset, bytes, i);
657 else
658 return copy_page_to_iter_iovec(page, offset, bytes, i);
659}
660EXPORT_SYMBOL(copy_page_to_iter);
661
662size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
663 struct iov_iter *i)
664{
665 if (i->type & ITER_BVEC)
666 return copy_page_from_iter_bvec(page, offset, bytes, i);
667 else
668 return copy_page_from_iter_iovec(page, offset, bytes, i);
669}
670EXPORT_SYMBOL(copy_page_from_iter);
671
672size_t iov_iter_copy_from_user_atomic(struct page *page,
673 struct iov_iter *i, unsigned long offset, size_t bytes)
674{
675 if (i->type & ITER_BVEC)
676 return copy_from_user_bvec(page, i, offset, bytes);
677 else
678 return copy_from_user_atomic_iovec(page, i, offset, bytes);
679}
680EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
681
682void iov_iter_advance(struct iov_iter *i, size_t size)
683{
684 if (i->type & ITER_BVEC)
685 advance_bvec(i, size);
686 else
687 advance_iovec(i, size);
688}
689EXPORT_SYMBOL(iov_iter_advance);
690
691/*
692 * Return the count of just the current iov_iter segment.
693 */
694size_t iov_iter_single_seg_count(const struct iov_iter *i)
695{
696 if (i->nr_segs == 1)
697 return i->count;
698 else if (i->type & ITER_BVEC)
699 return min(i->count, i->iov->iov_len - i->iov_offset);
700 else
701 return min(i->count, i->bvec->bv_len - i->iov_offset);
702}
703EXPORT_SYMBOL(iov_iter_single_seg_count);
704
705unsigned long iov_iter_alignment(const struct iov_iter *i)
706{
707 if (i->type & ITER_BVEC)
708 return alignment_bvec(i);
709 else
710 return alignment_iovec(i);
711}
712EXPORT_SYMBOL(iov_iter_alignment);
713
714ssize_t iov_iter_get_pages(struct iov_iter *i,
715 struct page **pages, size_t maxsize,
716 size_t *start)
717{
718 if (i->type & ITER_BVEC)
719 return get_pages_bvec(i, pages, maxsize, start);
720 else
721 return get_pages_iovec(i, pages, maxsize, start);
722}
723EXPORT_SYMBOL(iov_iter_get_pages);
724
725ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
726 struct page ***pages, size_t maxsize,
727 size_t *start)
728{
729 if (i->type & ITER_BVEC)
730 return get_pages_alloc_bvec(i, pages, maxsize, start);
731 else
732 return get_pages_alloc_iovec(i, pages, maxsize, start);
733}
734EXPORT_SYMBOL(iov_iter_get_pages_alloc);
735
736int iov_iter_npages(const struct iov_iter *i, int maxpages)
737{
738 if (i->type & ITER_BVEC)
739 return iov_iter_npages_bvec(i, maxpages);
740 else
741 return iov_iter_npages_iovec(i, maxpages);
742}
Al Virof67da302014-03-19 01:16:16 -0400743EXPORT_SYMBOL(iov_iter_npages);