blob: 39ad7137f739ebac65fa43f1f393799c0743f3d1 [file] [log] [blame]
Al Viro4f18cd32014-02-05 19:11:33 -05001#include <linux/export.h>
2#include <linux/uio.h>
3#include <linux/pagemap.h>
Al Viro91f79c42014-03-21 04:58:33 -04004#include <linux/slab.h>
5#include <linux/vmalloc.h>
Al Viro4f18cd32014-02-05 19:11:33 -05006
Al Viro04a31162014-11-27 13:51:41 -05007#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
8 size_t left; \
9 size_t wanted = n; \
10 __p = i->iov; \
11 __v.iov_len = min(n, __p->iov_len - skip); \
12 if (likely(__v.iov_len)) { \
13 __v.iov_base = __p->iov_base + skip; \
14 left = (STEP); \
15 __v.iov_len -= left; \
16 skip += __v.iov_len; \
17 n -= __v.iov_len; \
18 } else { \
19 left = 0; \
20 } \
21 while (unlikely(!left && n)) { \
22 __p++; \
23 __v.iov_len = min(n, __p->iov_len); \
24 if (unlikely(!__v.iov_len)) \
25 continue; \
26 __v.iov_base = __p->iov_base; \
27 left = (STEP); \
28 __v.iov_len -= left; \
29 skip = __v.iov_len; \
30 n -= __v.iov_len; \
31 } \
32 n = wanted - n; \
33}
34
35#define iterate_bvec(i, n, __v, __p, skip, STEP) { \
36 size_t wanted = n; \
37 __p = i->bvec; \
38 __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \
39 if (likely(__v.bv_len)) { \
40 __v.bv_page = __p->bv_page; \
41 __v.bv_offset = __p->bv_offset + skip; \
42 (void)(STEP); \
43 skip += __v.bv_len; \
44 n -= __v.bv_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.bv_len = min_t(size_t, n, __p->bv_len); \
49 if (unlikely(!__v.bv_len)) \
50 continue; \
51 __v.bv_page = __p->bv_page; \
52 __v.bv_offset = __p->bv_offset; \
53 (void)(STEP); \
54 skip = __v.bv_len; \
55 n -= __v.bv_len; \
56 } \
57 n = wanted; \
58}
59
60#define iterate_all_kinds(i, n, v, I, B) { \
61 size_t skip = i->iov_offset; \
62 if (unlikely(i->type & ITER_BVEC)) { \
63 const struct bio_vec *bvec; \
64 struct bio_vec v; \
65 iterate_bvec(i, n, v, bvec, skip, (B)) \
66 } else { \
67 const struct iovec *iov; \
68 struct iovec v; \
69 iterate_iovec(i, n, v, iov, skip, (I)) \
70 } \
71}
72
Al Viro7ce2a912014-11-27 13:59:45 -050073#define iterate_and_advance(i, n, v, I, B) { \
74 size_t skip = i->iov_offset; \
75 if (unlikely(i->type & ITER_BVEC)) { \
76 const struct bio_vec *bvec; \
77 struct bio_vec v; \
78 iterate_bvec(i, n, v, bvec, skip, (B)) \
79 if (skip == bvec->bv_len) { \
80 bvec++; \
81 skip = 0; \
82 } \
83 i->nr_segs -= bvec - i->bvec; \
84 i->bvec = bvec; \
85 } else { \
86 const struct iovec *iov; \
87 struct iovec v; \
88 iterate_iovec(i, n, v, iov, skip, (I)) \
89 if (skip == iov->iov_len) { \
90 iov++; \
91 skip = 0; \
92 } \
93 i->nr_segs -= iov - i->iov; \
94 i->iov = iov; \
95 } \
96 i->count -= n; \
97 i->iov_offset = skip; \
98}
99
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400100static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i)
101{
102 size_t skip, copy, left, wanted;
103 const struct iovec *iov;
104 char __user *buf;
105
106 if (unlikely(bytes > i->count))
107 bytes = i->count;
108
109 if (unlikely(!bytes))
110 return 0;
111
112 wanted = bytes;
113 iov = i->iov;
114 skip = i->iov_offset;
115 buf = iov->iov_base + skip;
116 copy = min(bytes, iov->iov_len - skip);
117
118 left = __copy_to_user(buf, from, copy);
119 copy -= left;
120 skip += copy;
121 from += copy;
122 bytes -= copy;
123 while (unlikely(!left && bytes)) {
124 iov++;
125 buf = iov->iov_base;
126 copy = min(bytes, iov->iov_len);
127 left = __copy_to_user(buf, from, copy);
128 copy -= left;
129 skip = copy;
130 from += copy;
131 bytes -= copy;
132 }
133
134 if (skip == iov->iov_len) {
135 iov++;
136 skip = 0;
137 }
138 i->count -= wanted - bytes;
139 i->nr_segs -= iov - i->iov;
140 i->iov = iov;
141 i->iov_offset = skip;
142 return wanted - bytes;
143}
144
145static size_t copy_from_iter_iovec(void *to, size_t bytes, struct iov_iter *i)
146{
147 size_t skip, copy, left, wanted;
148 const struct iovec *iov;
149 char __user *buf;
150
151 if (unlikely(bytes > i->count))
152 bytes = i->count;
153
154 if (unlikely(!bytes))
155 return 0;
156
157 wanted = bytes;
158 iov = i->iov;
159 skip = i->iov_offset;
160 buf = iov->iov_base + skip;
161 copy = min(bytes, iov->iov_len - skip);
162
163 left = __copy_from_user(to, buf, copy);
164 copy -= left;
165 skip += copy;
166 to += copy;
167 bytes -= copy;
168 while (unlikely(!left && bytes)) {
169 iov++;
170 buf = iov->iov_base;
171 copy = min(bytes, iov->iov_len);
172 left = __copy_from_user(to, buf, copy);
173 copy -= left;
174 skip = copy;
175 to += copy;
176 bytes -= copy;
177 }
178
179 if (skip == iov->iov_len) {
180 iov++;
181 skip = 0;
182 }
183 i->count -= wanted - bytes;
184 i->nr_segs -= iov - i->iov;
185 i->iov = iov;
186 i->iov_offset = skip;
187 return wanted - bytes;
188}
189
Al Viro62a80672014-04-04 23:12:29 -0400190static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
Al Viro4f18cd32014-02-05 19:11:33 -0500191 struct iov_iter *i)
192{
193 size_t skip, copy, left, wanted;
194 const struct iovec *iov;
195 char __user *buf;
196 void *kaddr, *from;
197
198 if (unlikely(bytes > i->count))
199 bytes = i->count;
200
201 if (unlikely(!bytes))
202 return 0;
203
204 wanted = bytes;
205 iov = i->iov;
206 skip = i->iov_offset;
207 buf = iov->iov_base + skip;
208 copy = min(bytes, iov->iov_len - skip);
209
210 if (!fault_in_pages_writeable(buf, copy)) {
211 kaddr = kmap_atomic(page);
212 from = kaddr + offset;
213
214 /* first chunk, usually the only one */
215 left = __copy_to_user_inatomic(buf, from, copy);
216 copy -= left;
217 skip += copy;
218 from += copy;
219 bytes -= copy;
220
221 while (unlikely(!left && bytes)) {
222 iov++;
223 buf = iov->iov_base;
224 copy = min(bytes, iov->iov_len);
225 left = __copy_to_user_inatomic(buf, from, copy);
226 copy -= left;
227 skip = copy;
228 from += copy;
229 bytes -= copy;
230 }
231 if (likely(!bytes)) {
232 kunmap_atomic(kaddr);
233 goto done;
234 }
235 offset = from - kaddr;
236 buf += copy;
237 kunmap_atomic(kaddr);
238 copy = min(bytes, iov->iov_len - skip);
239 }
240 /* Too bad - revert to non-atomic kmap */
241 kaddr = kmap(page);
242 from = kaddr + offset;
243 left = __copy_to_user(buf, from, copy);
244 copy -= left;
245 skip += copy;
246 from += copy;
247 bytes -= copy;
248 while (unlikely(!left && bytes)) {
249 iov++;
250 buf = iov->iov_base;
251 copy = min(bytes, iov->iov_len);
252 left = __copy_to_user(buf, from, copy);
253 copy -= left;
254 skip = copy;
255 from += copy;
256 bytes -= copy;
257 }
258 kunmap(page);
259done:
Al Viro81055e52014-04-04 19:23:46 -0400260 if (skip == iov->iov_len) {
261 iov++;
262 skip = 0;
263 }
Al Viro4f18cd32014-02-05 19:11:33 -0500264 i->count -= wanted - bytes;
265 i->nr_segs -= iov - i->iov;
266 i->iov = iov;
267 i->iov_offset = skip;
268 return wanted - bytes;
269}
Al Viro4f18cd32014-02-05 19:11:33 -0500270
Al Viro62a80672014-04-04 23:12:29 -0400271static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
Al Virof0d1bec2014-04-03 15:05:18 -0400272 struct iov_iter *i)
273{
274 size_t skip, copy, left, wanted;
275 const struct iovec *iov;
276 char __user *buf;
277 void *kaddr, *to;
278
279 if (unlikely(bytes > i->count))
280 bytes = i->count;
281
282 if (unlikely(!bytes))
283 return 0;
284
285 wanted = bytes;
286 iov = i->iov;
287 skip = i->iov_offset;
288 buf = iov->iov_base + skip;
289 copy = min(bytes, iov->iov_len - skip);
290
291 if (!fault_in_pages_readable(buf, copy)) {
292 kaddr = kmap_atomic(page);
293 to = kaddr + offset;
294
295 /* first chunk, usually the only one */
296 left = __copy_from_user_inatomic(to, buf, copy);
297 copy -= left;
298 skip += copy;
299 to += copy;
300 bytes -= copy;
301
302 while (unlikely(!left && bytes)) {
303 iov++;
304 buf = iov->iov_base;
305 copy = min(bytes, iov->iov_len);
306 left = __copy_from_user_inatomic(to, buf, copy);
307 copy -= left;
308 skip = copy;
309 to += copy;
310 bytes -= copy;
311 }
312 if (likely(!bytes)) {
313 kunmap_atomic(kaddr);
314 goto done;
315 }
316 offset = to - kaddr;
317 buf += copy;
318 kunmap_atomic(kaddr);
319 copy = min(bytes, iov->iov_len - skip);
320 }
321 /* Too bad - revert to non-atomic kmap */
322 kaddr = kmap(page);
323 to = kaddr + offset;
324 left = __copy_from_user(to, buf, copy);
325 copy -= left;
326 skip += copy;
327 to += copy;
328 bytes -= copy;
329 while (unlikely(!left && bytes)) {
330 iov++;
331 buf = iov->iov_base;
332 copy = min(bytes, iov->iov_len);
333 left = __copy_from_user(to, buf, copy);
334 copy -= left;
335 skip = copy;
336 to += copy;
337 bytes -= copy;
338 }
339 kunmap(page);
340done:
Al Viro81055e52014-04-04 19:23:46 -0400341 if (skip == iov->iov_len) {
342 iov++;
343 skip = 0;
344 }
Al Virof0d1bec2014-04-03 15:05:18 -0400345 i->count -= wanted - bytes;
346 i->nr_segs -= iov - i->iov;
347 i->iov = iov;
348 i->iov_offset = skip;
349 return wanted - bytes;
350}
Al Virof0d1bec2014-04-03 15:05:18 -0400351
Al Viro4f18cd32014-02-05 19:11:33 -0500352/*
353 * Fault in the first iovec of the given iov_iter, to a maximum length
354 * of bytes. Returns 0 on success, or non-zero if the memory could not be
355 * accessed (ie. because it is an invalid address).
356 *
357 * writev-intensive code may want this to prefault several iovecs -- that
358 * would be possible (callers must not rely on the fact that _only_ the
359 * first iovec will be faulted with the current implementation).
360 */
361int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
362{
Al Viro62a80672014-04-04 23:12:29 -0400363 if (!(i->type & ITER_BVEC)) {
364 char __user *buf = i->iov->iov_base + i->iov_offset;
365 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
366 return fault_in_pages_readable(buf, bytes);
367 }
368 return 0;
Al Viro4f18cd32014-02-05 19:11:33 -0500369}
370EXPORT_SYMBOL(iov_iter_fault_in_readable);
371
Al Viro71d8e532014-03-05 19:28:09 -0500372void iov_iter_init(struct iov_iter *i, int direction,
373 const struct iovec *iov, unsigned long nr_segs,
374 size_t count)
375{
376 /* It will get better. Eventually... */
377 if (segment_eq(get_fs(), KERNEL_DS))
Al Viro62a80672014-04-04 23:12:29 -0400378 direction |= ITER_KVEC;
Al Viro71d8e532014-03-05 19:28:09 -0500379 i->type = direction;
380 i->iov = iov;
381 i->nr_segs = nr_segs;
382 i->iov_offset = 0;
383 i->count = count;
384}
385EXPORT_SYMBOL(iov_iter_init);
Al Viro7b2c99d2014-03-15 04:05:57 -0400386
Al Viro62a80672014-04-04 23:12:29 -0400387static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
388{
389 char *from = kmap_atomic(page);
390 memcpy(to, from + offset, len);
391 kunmap_atomic(from);
392}
393
394static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
395{
396 char *to = kmap_atomic(page);
397 memcpy(to + offset, from, len);
398 kunmap_atomic(to);
399}
400
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400401static void memzero_page(struct page *page, size_t offset, size_t len)
402{
403 char *addr = kmap_atomic(page);
404 memset(addr + offset, 0, len);
405 kunmap_atomic(addr);
406}
407
408static size_t copy_to_iter_bvec(void *from, size_t bytes, struct iov_iter *i)
Al Viro62a80672014-04-04 23:12:29 -0400409{
410 size_t skip, copy, wanted;
411 const struct bio_vec *bvec;
Al Viro62a80672014-04-04 23:12:29 -0400412
413 if (unlikely(bytes > i->count))
414 bytes = i->count;
415
416 if (unlikely(!bytes))
417 return 0;
418
419 wanted = bytes;
420 bvec = i->bvec;
421 skip = i->iov_offset;
422 copy = min_t(size_t, bytes, bvec->bv_len - skip);
423
Al Viro62a80672014-04-04 23:12:29 -0400424 memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy);
425 skip += copy;
426 from += copy;
427 bytes -= copy;
428 while (bytes) {
429 bvec++;
430 copy = min(bytes, (size_t)bvec->bv_len);
431 memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, copy);
432 skip = copy;
433 from += copy;
434 bytes -= copy;
435 }
Al Viro62a80672014-04-04 23:12:29 -0400436 if (skip == bvec->bv_len) {
437 bvec++;
438 skip = 0;
439 }
440 i->count -= wanted - bytes;
441 i->nr_segs -= bvec - i->bvec;
442 i->bvec = bvec;
443 i->iov_offset = skip;
444 return wanted - bytes;
445}
446
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400447static size_t copy_from_iter_bvec(void *to, size_t bytes, struct iov_iter *i)
Al Viro62a80672014-04-04 23:12:29 -0400448{
449 size_t skip, copy, wanted;
450 const struct bio_vec *bvec;
Al Viro62a80672014-04-04 23:12:29 -0400451
452 if (unlikely(bytes > i->count))
453 bytes = i->count;
454
455 if (unlikely(!bytes))
456 return 0;
457
458 wanted = bytes;
459 bvec = i->bvec;
460 skip = i->iov_offset;
461
Al Viro62a80672014-04-04 23:12:29 -0400462 copy = min(bytes, bvec->bv_len - skip);
463
464 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy);
465
466 to += copy;
467 skip += copy;
468 bytes -= copy;
469
470 while (bytes) {
471 bvec++;
472 copy = min(bytes, (size_t)bvec->bv_len);
473 memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, copy);
474 skip = copy;
475 to += copy;
476 bytes -= copy;
477 }
Al Viro62a80672014-04-04 23:12:29 -0400478 if (skip == bvec->bv_len) {
479 bvec++;
480 skip = 0;
481 }
482 i->count -= wanted;
483 i->nr_segs -= bvec - i->bvec;
484 i->bvec = bvec;
485 i->iov_offset = skip;
486 return wanted;
487}
488
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400489static size_t copy_page_to_iter_bvec(struct page *page, size_t offset,
490 size_t bytes, struct iov_iter *i)
491{
492 void *kaddr = kmap_atomic(page);
493 size_t wanted = copy_to_iter_bvec(kaddr + offset, bytes, i);
494 kunmap_atomic(kaddr);
495 return wanted;
496}
497
498static size_t copy_page_from_iter_bvec(struct page *page, size_t offset,
499 size_t bytes, struct iov_iter *i)
500{
501 void *kaddr = kmap_atomic(page);
502 size_t wanted = copy_from_iter_bvec(kaddr + offset, bytes, i);
503 kunmap_atomic(kaddr);
504 return wanted;
505}
506
Al Viro62a80672014-04-04 23:12:29 -0400507size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
508 struct iov_iter *i)
509{
510 if (i->type & ITER_BVEC)
511 return copy_page_to_iter_bvec(page, offset, bytes, i);
512 else
513 return copy_page_to_iter_iovec(page, offset, bytes, i);
514}
515EXPORT_SYMBOL(copy_page_to_iter);
516
517size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
518 struct iov_iter *i)
519{
520 if (i->type & ITER_BVEC)
521 return copy_page_from_iter_bvec(page, offset, bytes, i);
522 else
523 return copy_page_from_iter_iovec(page, offset, bytes, i);
524}
525EXPORT_SYMBOL(copy_page_from_iter);
526
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400527size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
528{
529 if (i->type & ITER_BVEC)
530 return copy_to_iter_bvec(addr, bytes, i);
531 else
532 return copy_to_iter_iovec(addr, bytes, i);
533}
534EXPORT_SYMBOL(copy_to_iter);
535
536size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
537{
538 if (i->type & ITER_BVEC)
539 return copy_from_iter_bvec(addr, bytes, i);
540 else
541 return copy_from_iter_iovec(addr, bytes, i);
542}
543EXPORT_SYMBOL(copy_from_iter);
544
545size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
546{
Al Viro8442fa42014-11-27 14:18:54 -0500547 if (unlikely(bytes > i->count))
548 bytes = i->count;
549
550 if (unlikely(!bytes))
551 return 0;
552
553 iterate_and_advance(i, bytes, v,
554 __clear_user(v.iov_base, v.iov_len),
555 memzero_page(v.bv_page, v.bv_offset, v.bv_len)
556 )
557
558 return bytes;
Matthew Wilcoxc35e0242014-08-01 09:27:22 -0400559}
560EXPORT_SYMBOL(iov_iter_zero);
561
Al Viro62a80672014-04-04 23:12:29 -0400562size_t iov_iter_copy_from_user_atomic(struct page *page,
563 struct iov_iter *i, unsigned long offset, size_t bytes)
564{
Al Viro04a31162014-11-27 13:51:41 -0500565 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
566 iterate_all_kinds(i, bytes, v,
567 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
568 v.iov_base, v.iov_len),
569 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
570 v.bv_offset, v.bv_len)
571 )
572 kunmap_atomic(kaddr);
573 return bytes;
Al Viro62a80672014-04-04 23:12:29 -0400574}
575EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
576
577void iov_iter_advance(struct iov_iter *i, size_t size)
578{
Al Viro7ce2a912014-11-27 13:59:45 -0500579 iterate_and_advance(i, size, v, 0, 0)
Al Viro62a80672014-04-04 23:12:29 -0400580}
581EXPORT_SYMBOL(iov_iter_advance);
582
583/*
584 * Return the count of just the current iov_iter segment.
585 */
586size_t iov_iter_single_seg_count(const struct iov_iter *i)
587{
588 if (i->nr_segs == 1)
589 return i->count;
590 else if (i->type & ITER_BVEC)
Al Viro62a80672014-04-04 23:12:29 -0400591 return min(i->count, i->bvec->bv_len - i->iov_offset);
Paul Mackerrasad0eab92014-11-13 20:15:23 +1100592 else
593 return min(i->count, i->iov->iov_len - i->iov_offset);
Al Viro62a80672014-04-04 23:12:29 -0400594}
595EXPORT_SYMBOL(iov_iter_single_seg_count);
596
597unsigned long iov_iter_alignment(const struct iov_iter *i)
598{
Al Viro04a31162014-11-27 13:51:41 -0500599 unsigned long res = 0;
600 size_t size = i->count;
601
602 if (!size)
603 return 0;
604
605 iterate_all_kinds(i, size, v,
606 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
607 res |= v.bv_offset | v.bv_len
608 )
609 return res;
Al Viro62a80672014-04-04 23:12:29 -0400610}
611EXPORT_SYMBOL(iov_iter_alignment);
612
613ssize_t iov_iter_get_pages(struct iov_iter *i,
Miklos Szeredi2c809292014-09-24 17:09:11 +0200614 struct page **pages, size_t maxsize, unsigned maxpages,
Al Viro62a80672014-04-04 23:12:29 -0400615 size_t *start)
616{
Al Viroe5393fa2014-11-27 14:12:09 -0500617 if (maxsize > i->count)
618 maxsize = i->count;
619
620 if (!maxsize)
621 return 0;
622
623 iterate_all_kinds(i, maxsize, v, ({
624 unsigned long addr = (unsigned long)v.iov_base;
625 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
626 int n;
627 int res;
628
629 if (len > maxpages * PAGE_SIZE)
630 len = maxpages * PAGE_SIZE;
631 addr &= ~(PAGE_SIZE - 1);
632 n = DIV_ROUND_UP(len, PAGE_SIZE);
633 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
634 if (unlikely(res < 0))
635 return res;
636 return (res == n ? len : res * PAGE_SIZE) - *start;
637 0;}),({
638 /* can't be more than PAGE_SIZE */
639 *start = v.bv_offset;
640 get_page(*pages = v.bv_page);
641 return v.bv_len;
642 })
643 )
644 return 0;
Al Viro62a80672014-04-04 23:12:29 -0400645}
646EXPORT_SYMBOL(iov_iter_get_pages);
647
Al Viro1b17f1f2014-11-27 14:14:31 -0500648static struct page **get_pages_array(size_t n)
649{
650 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
651 if (!p)
652 p = vmalloc(n * sizeof(struct page *));
653 return p;
654}
655
Al Viro62a80672014-04-04 23:12:29 -0400656ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
657 struct page ***pages, size_t maxsize,
658 size_t *start)
659{
Al Viro1b17f1f2014-11-27 14:14:31 -0500660 struct page **p;
661
662 if (maxsize > i->count)
663 maxsize = i->count;
664
665 if (!maxsize)
666 return 0;
667
668 iterate_all_kinds(i, maxsize, v, ({
669 unsigned long addr = (unsigned long)v.iov_base;
670 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
671 int n;
672 int res;
673
674 addr &= ~(PAGE_SIZE - 1);
675 n = DIV_ROUND_UP(len, PAGE_SIZE);
676 p = get_pages_array(n);
677 if (!p)
678 return -ENOMEM;
679 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
680 if (unlikely(res < 0)) {
681 kvfree(p);
682 return res;
683 }
684 *pages = p;
685 return (res == n ? len : res * PAGE_SIZE) - *start;
686 0;}),({
687 /* can't be more than PAGE_SIZE */
688 *start = v.bv_offset;
689 *pages = p = get_pages_array(1);
690 if (!p)
691 return -ENOMEM;
692 get_page(*p = v.bv_page);
693 return v.bv_len;
694 })
695 )
696 return 0;
Al Viro62a80672014-04-04 23:12:29 -0400697}
698EXPORT_SYMBOL(iov_iter_get_pages_alloc);
699
700int iov_iter_npages(const struct iov_iter *i, int maxpages)
701{
Al Viroe0f2dc42014-11-27 14:09:46 -0500702 size_t size = i->count;
703 int npages = 0;
704
705 if (!size)
706 return 0;
707
708 iterate_all_kinds(i, size, v, ({
709 unsigned long p = (unsigned long)v.iov_base;
710 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
711 - p / PAGE_SIZE;
712 if (npages >= maxpages)
713 return maxpages;
714 0;}),({
715 npages++;
716 if (npages >= maxpages)
717 return maxpages;
718 })
719 )
720 return npages;
Al Viro62a80672014-04-04 23:12:29 -0400721}
Al Virof67da302014-03-19 01:16:16 -0400722EXPORT_SYMBOL(iov_iter_npages);