blob: fcdaaab438b6771a5e44889b7744e9c6f6916a9e [file] [log] [blame]
Al Viro4f18cd32014-02-05 19:11:33 -05001#include <linux/export.h>
2#include <linux/uio.h>
3#include <linux/pagemap.h>
Al Viro91f79c42014-03-21 04:58:33 -04004#include <linux/slab.h>
5#include <linux/vmalloc.h>
Al Viro4f18cd32014-02-05 19:11:33 -05006
7size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
8 struct iov_iter *i)
9{
10 size_t skip, copy, left, wanted;
11 const struct iovec *iov;
12 char __user *buf;
13 void *kaddr, *from;
14
15 if (unlikely(bytes > i->count))
16 bytes = i->count;
17
18 if (unlikely(!bytes))
19 return 0;
20
21 wanted = bytes;
22 iov = i->iov;
23 skip = i->iov_offset;
24 buf = iov->iov_base + skip;
25 copy = min(bytes, iov->iov_len - skip);
26
27 if (!fault_in_pages_writeable(buf, copy)) {
28 kaddr = kmap_atomic(page);
29 from = kaddr + offset;
30
31 /* first chunk, usually the only one */
32 left = __copy_to_user_inatomic(buf, from, copy);
33 copy -= left;
34 skip += copy;
35 from += copy;
36 bytes -= copy;
37
38 while (unlikely(!left && bytes)) {
39 iov++;
40 buf = iov->iov_base;
41 copy = min(bytes, iov->iov_len);
42 left = __copy_to_user_inatomic(buf, from, copy);
43 copy -= left;
44 skip = copy;
45 from += copy;
46 bytes -= copy;
47 }
48 if (likely(!bytes)) {
49 kunmap_atomic(kaddr);
50 goto done;
51 }
52 offset = from - kaddr;
53 buf += copy;
54 kunmap_atomic(kaddr);
55 copy = min(bytes, iov->iov_len - skip);
56 }
57 /* Too bad - revert to non-atomic kmap */
58 kaddr = kmap(page);
59 from = kaddr + offset;
60 left = __copy_to_user(buf, from, copy);
61 copy -= left;
62 skip += copy;
63 from += copy;
64 bytes -= copy;
65 while (unlikely(!left && bytes)) {
66 iov++;
67 buf = iov->iov_base;
68 copy = min(bytes, iov->iov_len);
69 left = __copy_to_user(buf, from, copy);
70 copy -= left;
71 skip = copy;
72 from += copy;
73 bytes -= copy;
74 }
75 kunmap(page);
76done:
Al Viro81055e52014-04-04 19:23:46 -040077 if (skip == iov->iov_len) {
78 iov++;
79 skip = 0;
80 }
Al Viro4f18cd32014-02-05 19:11:33 -050081 i->count -= wanted - bytes;
82 i->nr_segs -= iov - i->iov;
83 i->iov = iov;
84 i->iov_offset = skip;
85 return wanted - bytes;
86}
87EXPORT_SYMBOL(copy_page_to_iter);
88
Al Virof0d1bec2014-04-03 15:05:18 -040089size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
90 struct iov_iter *i)
91{
92 size_t skip, copy, left, wanted;
93 const struct iovec *iov;
94 char __user *buf;
95 void *kaddr, *to;
96
97 if (unlikely(bytes > i->count))
98 bytes = i->count;
99
100 if (unlikely(!bytes))
101 return 0;
102
103 wanted = bytes;
104 iov = i->iov;
105 skip = i->iov_offset;
106 buf = iov->iov_base + skip;
107 copy = min(bytes, iov->iov_len - skip);
108
109 if (!fault_in_pages_readable(buf, copy)) {
110 kaddr = kmap_atomic(page);
111 to = kaddr + offset;
112
113 /* first chunk, usually the only one */
114 left = __copy_from_user_inatomic(to, buf, copy);
115 copy -= left;
116 skip += copy;
117 to += copy;
118 bytes -= copy;
119
120 while (unlikely(!left && bytes)) {
121 iov++;
122 buf = iov->iov_base;
123 copy = min(bytes, iov->iov_len);
124 left = __copy_from_user_inatomic(to, buf, copy);
125 copy -= left;
126 skip = copy;
127 to += copy;
128 bytes -= copy;
129 }
130 if (likely(!bytes)) {
131 kunmap_atomic(kaddr);
132 goto done;
133 }
134 offset = to - kaddr;
135 buf += copy;
136 kunmap_atomic(kaddr);
137 copy = min(bytes, iov->iov_len - skip);
138 }
139 /* Too bad - revert to non-atomic kmap */
140 kaddr = kmap(page);
141 to = kaddr + offset;
142 left = __copy_from_user(to, buf, copy);
143 copy -= left;
144 skip += copy;
145 to += copy;
146 bytes -= copy;
147 while (unlikely(!left && bytes)) {
148 iov++;
149 buf = iov->iov_base;
150 copy = min(bytes, iov->iov_len);
151 left = __copy_from_user(to, buf, copy);
152 copy -= left;
153 skip = copy;
154 to += copy;
155 bytes -= copy;
156 }
157 kunmap(page);
158done:
Al Viro81055e52014-04-04 19:23:46 -0400159 if (skip == iov->iov_len) {
160 iov++;
161 skip = 0;
162 }
Al Virof0d1bec2014-04-03 15:05:18 -0400163 i->count -= wanted - bytes;
164 i->nr_segs -= iov - i->iov;
165 i->iov = iov;
166 i->iov_offset = skip;
167 return wanted - bytes;
168}
169EXPORT_SYMBOL(copy_page_from_iter);
170
Al Viro4f18cd32014-02-05 19:11:33 -0500171static size_t __iovec_copy_from_user_inatomic(char *vaddr,
172 const struct iovec *iov, size_t base, size_t bytes)
173{
174 size_t copied = 0, left = 0;
175
176 while (bytes) {
177 char __user *buf = iov->iov_base + base;
178 int copy = min(bytes, iov->iov_len - base);
179
180 base = 0;
181 left = __copy_from_user_inatomic(vaddr, buf, copy);
182 copied += copy;
183 bytes -= copy;
184 vaddr += copy;
185 iov++;
186
187 if (unlikely(left))
188 break;
189 }
190 return copied - left;
191}
192
193/*
194 * Copy as much as we can into the page and return the number of bytes which
195 * were successfully copied. If a fault is encountered then return the number of
196 * bytes which were copied.
197 */
198size_t iov_iter_copy_from_user_atomic(struct page *page,
199 struct iov_iter *i, unsigned long offset, size_t bytes)
200{
201 char *kaddr;
202 size_t copied;
203
204 kaddr = kmap_atomic(page);
205 if (likely(i->nr_segs == 1)) {
206 int left;
207 char __user *buf = i->iov->iov_base + i->iov_offset;
208 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
209 copied = bytes - left;
210 } else {
211 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
212 i->iov, i->iov_offset, bytes);
213 }
214 kunmap_atomic(kaddr);
215
216 return copied;
217}
218EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
219
Al Viro4f18cd32014-02-05 19:11:33 -0500220void iov_iter_advance(struct iov_iter *i, size_t bytes)
221{
222 BUG_ON(i->count < bytes);
223
224 if (likely(i->nr_segs == 1)) {
225 i->iov_offset += bytes;
226 i->count -= bytes;
227 } else {
228 const struct iovec *iov = i->iov;
229 size_t base = i->iov_offset;
230 unsigned long nr_segs = i->nr_segs;
231
232 /*
233 * The !iov->iov_len check ensures we skip over unlikely
234 * zero-length segments (without overruning the iovec).
235 */
236 while (bytes || unlikely(i->count && !iov->iov_len)) {
237 int copy;
238
239 copy = min(bytes, iov->iov_len - base);
240 BUG_ON(!i->count || i->count < copy);
241 i->count -= copy;
242 bytes -= copy;
243 base += copy;
244 if (iov->iov_len == base) {
245 iov++;
246 nr_segs--;
247 base = 0;
248 }
249 }
250 i->iov = iov;
251 i->iov_offset = base;
252 i->nr_segs = nr_segs;
253 }
254}
255EXPORT_SYMBOL(iov_iter_advance);
256
257/*
258 * Fault in the first iovec of the given iov_iter, to a maximum length
259 * of bytes. Returns 0 on success, or non-zero if the memory could not be
260 * accessed (ie. because it is an invalid address).
261 *
262 * writev-intensive code may want this to prefault several iovecs -- that
263 * would be possible (callers must not rely on the fact that _only_ the
264 * first iovec will be faulted with the current implementation).
265 */
266int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
267{
268 char __user *buf = i->iov->iov_base + i->iov_offset;
269 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
270 return fault_in_pages_readable(buf, bytes);
271}
272EXPORT_SYMBOL(iov_iter_fault_in_readable);
273
274/*
275 * Return the count of just the current iov_iter segment.
276 */
277size_t iov_iter_single_seg_count(const struct iov_iter *i)
278{
279 const struct iovec *iov = i->iov;
280 if (i->nr_segs == 1)
281 return i->count;
282 else
283 return min(i->count, iov->iov_len - i->iov_offset);
284}
285EXPORT_SYMBOL(iov_iter_single_seg_count);
Al Viro886a3912014-03-05 13:50:45 -0500286
287unsigned long iov_iter_alignment(const struct iov_iter *i)
288{
289 const struct iovec *iov = i->iov;
290 unsigned long res;
291 size_t size = i->count;
292 size_t n;
293
294 if (!size)
295 return 0;
296
297 res = (unsigned long)iov->iov_base + i->iov_offset;
298 n = iov->iov_len - i->iov_offset;
299 if (n >= size)
300 return res | size;
301 size -= n;
302 res |= n;
303 while (size > (++iov)->iov_len) {
304 res |= (unsigned long)iov->iov_base | iov->iov_len;
305 size -= iov->iov_len;
306 }
307 res |= (unsigned long)iov->iov_base | size;
308 return res;
309}
310EXPORT_SYMBOL(iov_iter_alignment);
Al Viro71d8e532014-03-05 19:28:09 -0500311
312void iov_iter_init(struct iov_iter *i, int direction,
313 const struct iovec *iov, unsigned long nr_segs,
314 size_t count)
315{
316 /* It will get better. Eventually... */
317 if (segment_eq(get_fs(), KERNEL_DS))
318 direction |= REQ_KERNEL;
319 i->type = direction;
320 i->iov = iov;
321 i->nr_segs = nr_segs;
322 i->iov_offset = 0;
323 i->count = count;
324}
325EXPORT_SYMBOL(iov_iter_init);
Al Viro7b2c99d2014-03-15 04:05:57 -0400326
327ssize_t iov_iter_get_pages(struct iov_iter *i,
328 struct page **pages, size_t maxsize,
329 size_t *start)
330{
331 size_t offset = i->iov_offset;
332 const struct iovec *iov = i->iov;
333 size_t len;
334 unsigned long addr;
335 int n;
336 int res;
337
338 len = iov->iov_len - offset;
339 if (len > i->count)
340 len = i->count;
341 if (len > maxsize)
342 len = maxsize;
343 addr = (unsigned long)iov->iov_base + offset;
344 len += *start = addr & (PAGE_SIZE - 1);
345 addr &= ~(PAGE_SIZE - 1);
346 n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
347 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
348 if (unlikely(res < 0))
349 return res;
350 return (res == n ? len : res * PAGE_SIZE) - *start;
351}
352EXPORT_SYMBOL(iov_iter_get_pages);
Al Virof67da302014-03-19 01:16:16 -0400353
Al Viro91f79c42014-03-21 04:58:33 -0400354ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
355 struct page ***pages, size_t maxsize,
356 size_t *start)
357{
358 size_t offset = i->iov_offset;
359 const struct iovec *iov = i->iov;
360 size_t len;
361 unsigned long addr;
362 void *p;
363 int n;
364 int res;
365
366 len = iov->iov_len - offset;
367 if (len > i->count)
368 len = i->count;
369 if (len > maxsize)
370 len = maxsize;
371 addr = (unsigned long)iov->iov_base + offset;
372 len += *start = addr & (PAGE_SIZE - 1);
373 addr &= ~(PAGE_SIZE - 1);
374 n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
375
376 p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
377 if (!p)
378 p = vmalloc(n * sizeof(struct page *));
379 if (!p)
380 return -ENOMEM;
381
382 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
383 if (unlikely(res < 0)) {
384 kvfree(p);
385 return res;
386 }
387 *pages = p;
388 return (res == n ? len : res * PAGE_SIZE) - *start;
389}
390EXPORT_SYMBOL(iov_iter_get_pages_alloc);
391
Al Virof67da302014-03-19 01:16:16 -0400392int iov_iter_npages(const struct iov_iter *i, int maxpages)
393{
394 size_t offset = i->iov_offset;
395 size_t size = i->count;
396 const struct iovec *iov = i->iov;
397 int npages = 0;
398 int n;
399
400 for (n = 0; size && n < i->nr_segs; n++, iov++) {
401 unsigned long addr = (unsigned long)iov->iov_base + offset;
402 size_t len = iov->iov_len - offset;
403 offset = 0;
404 if (unlikely(!len)) /* empty segment */
405 continue;
406 if (len > size)
407 len = size;
408 npages += (addr + len + PAGE_SIZE - 1) / PAGE_SIZE
409 - addr / PAGE_SIZE;
410 if (npages >= maxpages) /* don't bother going further */
411 return maxpages;
412 size -= len;
413 offset = 0;
414 }
415 return min(npages, maxpages);
416}
417EXPORT_SYMBOL(iov_iter_npages);