blob: b77eeecc00115eefd2395648e71cb337ee53757a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * iovec manipulation routines.
3 *
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * Fixes:
11 * Andrew Lunn : Errors in iovec copying.
12 * Pedro Roque : Added memcpy_fromiovecend and
13 * csum_..._fromiovecend.
14 * Andi Kleen : fixed error handling for 2.1
15 * Alexey Kuznetsov: 2.1 optimisations
16 * Andi Kleen : Fix csum*fromiovecend for IPv6.
17 */
18
19#include <linux/errno.h>
20#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/kernel.h>
22#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/net.h>
24#include <linux/in6.h>
25#include <asm/uaccess.h>
26#include <asm/byteorder.h>
27#include <net/checksum.h>
28#include <net/sock.h>
29
30/*
31 * Verify iovec. The caller must ensure that the iovec is big enough
32 * to hold the message iovec.
33 *
Jesper Juhle49332b2005-05-01 08:59:08 -070034 * Save time not doing access_ok. copy_*_user will make this work
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * in any case.
36 */
37
Maciej Żenczykowski43db3622012-03-11 12:51:50 +000038int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -070039{
David S. Miller8acfe462010-10-28 11:41:55 -070040 int size, ct, err;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090041
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 if (m->msg_namelen) {
43 if (mode == VERIFY_READ) {
Namhyung Kima700d8b2010-09-08 03:48:47 +000044 void __user *namep;
45 namep = (void __user __force *) m->msg_name;
46 err = move_addr_to_kernel(namep, m->msg_namelen,
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 address);
48 if (err < 0)
49 return err;
50 }
51 m->msg_name = address;
52 } else {
53 m->msg_name = NULL;
54 }
55
56 size = m->msg_iovlen * sizeof(struct iovec);
Namhyung Kima700d8b2010-09-08 03:48:47 +000057 if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 return -EFAULT;
59
60 m->msg_iov = iov;
61 err = 0;
62
63 for (ct = 0; ct < m->msg_iovlen; ct++) {
David S. Miller8acfe462010-10-28 11:41:55 -070064 size_t len = iov[ct].iov_len;
65
66 if (len > INT_MAX - err) {
67 len = INT_MAX - err;
68 iov[ct].iov_len = len;
69 }
70 err += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 }
72
73 return err;
74}
75
76/*
77 * Copy kernel to iovec. Returns -EFAULT on error.
Michael S. Tsirkin0a1ec072009-04-20 01:25:46 +000078 */
79
80int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
81 int offset, int len)
82{
83 int copy;
84 for (; len > 0; ++iov) {
85 /* Skip over the finished iovecs */
86 if (unlikely(offset >= iov->iov_len)) {
87 offset -= iov->iov_len;
88 continue;
89 }
90 copy = min_t(unsigned int, iov->iov_len - offset, len);
Sridhar Samudrala2faef522009-06-05 09:35:44 +000091 if (copy_to_user(iov->iov_base + offset, kdata, copy))
Michael S. Tsirkin0a1ec072009-04-20 01:25:46 +000092 return -EFAULT;
Sridhar Samudrala2faef522009-06-05 09:35:44 +000093 offset = 0;
Michael S. Tsirkin0a1ec072009-04-20 01:25:46 +000094 kdata += copy;
95 len -= copy;
96 }
97
98 return 0;
99}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000100EXPORT_SYMBOL(memcpy_toiovecend);
Michael S. Tsirkin0a1ec072009-04-20 01:25:46 +0000101
102/*
Michael S. Tsirkin6f26c9a2009-04-20 01:26:11 +0000103 * Copy iovec from kernel. Returns -EFAULT on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 */
Michael S. Tsirkin6f26c9a2009-04-20 01:26:11 +0000105
106int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
107 int offset, int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108{
109 /* Skip over the finished iovecs */
110 while (offset >= iov->iov_len) {
111 offset -= iov->iov_len;
112 iov++;
113 }
114
115 while (len > 0) {
116 u8 __user *base = iov->iov_base + offset;
117 int copy = min_t(unsigned int, len, iov->iov_len - offset);
118
119 offset = 0;
120 if (copy_from_user(kdata, base, copy))
121 return -EFAULT;
122 len -= copy;
123 kdata += copy;
124 iov++;
125 }
126
127 return 0;
128}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +0000129EXPORT_SYMBOL(memcpy_fromiovecend);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131/*
132 * And now for the all-in-one: copy and checksum from a user iovec
133 * directly to a datagram
134 * Calls to csum_partial but the last must be in 32 bit chunks
135 *
136 * ip_build_xmit must ensure that when fragmenting only the last
137 * call to this function will be unaligned also.
138 */
139int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
Al Viro44bb9362006-11-14 21:36:14 -0800140 int offset, unsigned int len, __wsum *csump)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
Al Viro44bb9362006-11-14 21:36:14 -0800142 __wsum csum = *csump;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 int partial_cnt = 0, err = 0;
144
145 /* Skip over the finished iovecs */
146 while (offset >= iov->iov_len) {
147 offset -= iov->iov_len;
148 iov++;
149 }
150
151 while (len > 0) {
152 u8 __user *base = iov->iov_base + offset;
153 int copy = min_t(unsigned int, len, iov->iov_len - offset);
154
155 offset = 0;
156
157 /* There is a remnant from previous iov. */
158 if (partial_cnt) {
159 int par_len = 4 - partial_cnt;
160
161 /* iov component is too short ... */
162 if (par_len > copy) {
163 if (copy_from_user(kdata, base, copy))
164 goto out_fault;
165 kdata += copy;
166 base += copy;
167 partial_cnt += copy;
168 len -= copy;
169 iov++;
170 if (len)
171 continue;
172 *csump = csum_partial(kdata - partial_cnt,
173 partial_cnt, csum);
174 goto out;
175 }
176 if (copy_from_user(kdata, base, par_len))
177 goto out_fault;
178 csum = csum_partial(kdata - partial_cnt, 4, csum);
179 kdata += par_len;
180 base += par_len;
181 copy -= par_len;
182 len -= par_len;
183 partial_cnt = 0;
184 }
185
186 if (len > copy) {
187 partial_cnt = copy % 4;
188 if (partial_cnt) {
189 copy -= partial_cnt;
190 if (copy_from_user(kdata + copy, base + copy,
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900191 partial_cnt))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 goto out_fault;
193 }
194 }
195
196 if (copy) {
197 csum = csum_and_copy_from_user(base, kdata, copy,
198 csum, &err);
199 if (err)
200 goto out;
201 }
202 len -= copy + partial_cnt;
203 kdata += copy + partial_cnt;
204 iov++;
205 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900206 *csump = csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207out:
208 return err;
209
210out_fault:
211 err = -EFAULT;
212 goto out;
213}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
Jason Wangb4bf0772013-08-06 17:45:03 +0800215
216unsigned long iov_pages(const struct iovec *iov, int offset,
217 unsigned long nr_segs)
218{
219 unsigned long seg, base;
220 int pages = 0, len, size;
221
222 while (nr_segs && (offset >= iov->iov_len)) {
223 offset -= iov->iov_len;
224 ++iov;
225 --nr_segs;
226 }
227
228 for (seg = 0; seg < nr_segs; seg++) {
229 base = (unsigned long)iov[seg].iov_base + offset;
230 len = iov[seg].iov_len - offset;
231 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
232 pages += size;
233 offset = 0;
234 }
235
236 return pages;
237}
238EXPORT_SYMBOL(iov_pages);