Al Viro | 4f18cd3 | 2014-02-05 19:11:33 -0500 | [diff] [blame] | 1 | #include <linux/export.h> |
| 2 | #include <linux/uio.h> |
| 3 | #include <linux/pagemap.h> |
Al Viro | 91f79c4 | 2014-03-21 04:58:33 -0400 | [diff] [blame] | 4 | #include <linux/slab.h> |
| 5 | #include <linux/vmalloc.h> |
Al Viro | a604ec7 | 2014-11-24 01:08:00 -0500 | [diff] [blame] | 6 | #include <net/checksum.h> |
Al Viro | 4f18cd3 | 2014-02-05 19:11:33 -0500 | [diff] [blame] | 7 | |
Al Viro | 04a3116 | 2014-11-27 13:51:41 -0500 | [diff] [blame] | 8 | #define iterate_iovec(i, n, __v, __p, skip, STEP) { \ |
| 9 | size_t left; \ |
| 10 | size_t wanted = n; \ |
| 11 | __p = i->iov; \ |
| 12 | __v.iov_len = min(n, __p->iov_len - skip); \ |
| 13 | if (likely(__v.iov_len)) { \ |
| 14 | __v.iov_base = __p->iov_base + skip; \ |
| 15 | left = (STEP); \ |
| 16 | __v.iov_len -= left; \ |
| 17 | skip += __v.iov_len; \ |
| 18 | n -= __v.iov_len; \ |
| 19 | } else { \ |
| 20 | left = 0; \ |
| 21 | } \ |
| 22 | while (unlikely(!left && n)) { \ |
| 23 | __p++; \ |
| 24 | __v.iov_len = min(n, __p->iov_len); \ |
| 25 | if (unlikely(!__v.iov_len)) \ |
| 26 | continue; \ |
| 27 | __v.iov_base = __p->iov_base; \ |
| 28 | left = (STEP); \ |
| 29 | __v.iov_len -= left; \ |
| 30 | skip = __v.iov_len; \ |
| 31 | n -= __v.iov_len; \ |
| 32 | } \ |
| 33 | n = wanted - n; \ |
| 34 | } |
| 35 | |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 36 | #define iterate_kvec(i, n, __v, __p, skip, STEP) { \ |
| 37 | size_t wanted = n; \ |
| 38 | __p = i->kvec; \ |
| 39 | __v.iov_len = min(n, __p->iov_len - skip); \ |
| 40 | if (likely(__v.iov_len)) { \ |
| 41 | __v.iov_base = __p->iov_base + skip; \ |
| 42 | (void)(STEP); \ |
| 43 | skip += __v.iov_len; \ |
| 44 | n -= __v.iov_len; \ |
| 45 | } \ |
| 46 | while (unlikely(n)) { \ |
| 47 | __p++; \ |
| 48 | __v.iov_len = min(n, __p->iov_len); \ |
| 49 | if (unlikely(!__v.iov_len)) \ |
| 50 | continue; \ |
| 51 | __v.iov_base = __p->iov_base; \ |
| 52 | (void)(STEP); \ |
| 53 | skip = __v.iov_len; \ |
| 54 | n -= __v.iov_len; \ |
| 55 | } \ |
| 56 | n = wanted; \ |
| 57 | } |
| 58 | |
Ming Lei | 1bdc76a | 2016-05-30 21:34:32 +0800 | [diff] [blame] | 59 | #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \ |
| 60 | struct bvec_iter __start; \ |
| 61 | __start.bi_size = n; \ |
| 62 | __start.bi_bvec_done = skip; \ |
| 63 | __start.bi_idx = 0; \ |
| 64 | for_each_bvec(__v, i->bvec, __bi, __start) { \ |
| 65 | if (!__v.bv_len) \ |
Al Viro | 04a3116 | 2014-11-27 13:51:41 -0500 | [diff] [blame] | 66 | continue; \ |
Al Viro | 04a3116 | 2014-11-27 13:51:41 -0500 | [diff] [blame] | 67 | (void)(STEP); \ |
Al Viro | 04a3116 | 2014-11-27 13:51:41 -0500 | [diff] [blame] | 68 | } \ |
Al Viro | 04a3116 | 2014-11-27 13:51:41 -0500 | [diff] [blame] | 69 | } |
| 70 | |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 71 | #define iterate_all_kinds(i, n, v, I, B, K) { \ |
Al Viro | 04a3116 | 2014-11-27 13:51:41 -0500 | [diff] [blame] | 72 | size_t skip = i->iov_offset; \ |
| 73 | if (unlikely(i->type & ITER_BVEC)) { \ |
Al Viro | 04a3116 | 2014-11-27 13:51:41 -0500 | [diff] [blame] | 74 | struct bio_vec v; \ |
Ming Lei | 1bdc76a | 2016-05-30 21:34:32 +0800 | [diff] [blame] | 75 | struct bvec_iter __bi; \ |
| 76 | iterate_bvec(i, n, v, __bi, skip, (B)) \ |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 77 | } else if (unlikely(i->type & ITER_KVEC)) { \ |
| 78 | const struct kvec *kvec; \ |
| 79 | struct kvec v; \ |
| 80 | iterate_kvec(i, n, v, kvec, skip, (K)) \ |
Al Viro | 04a3116 | 2014-11-27 13:51:41 -0500 | [diff] [blame] | 81 | } else { \ |
| 82 | const struct iovec *iov; \ |
| 83 | struct iovec v; \ |
| 84 | iterate_iovec(i, n, v, iov, skip, (I)) \ |
| 85 | } \ |
| 86 | } |
| 87 | |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 88 | #define iterate_and_advance(i, n, v, I, B, K) { \ |
Al Viro | dd254f5 | 2016-05-09 11:54:48 -0400 | [diff] [blame] | 89 | if (unlikely(i->count < n)) \ |
| 90 | n = i->count; \ |
Al Viro | 19f1845 | 2016-05-25 17:36:19 -0400 | [diff] [blame] | 91 | if (i->count) { \ |
Al Viro | dd254f5 | 2016-05-09 11:54:48 -0400 | [diff] [blame] | 92 | size_t skip = i->iov_offset; \ |
| 93 | if (unlikely(i->type & ITER_BVEC)) { \ |
Ming Lei | 1bdc76a | 2016-05-30 21:34:32 +0800 | [diff] [blame] | 94 | const struct bio_vec *bvec = i->bvec; \ |
Al Viro | dd254f5 | 2016-05-09 11:54:48 -0400 | [diff] [blame] | 95 | struct bio_vec v; \ |
Ming Lei | 1bdc76a | 2016-05-30 21:34:32 +0800 | [diff] [blame] | 96 | struct bvec_iter __bi; \ |
| 97 | iterate_bvec(i, n, v, __bi, skip, (B)) \ |
| 98 | i->bvec = __bvec_iter_bvec(i->bvec, __bi); \ |
| 99 | i->nr_segs -= i->bvec - bvec; \ |
| 100 | skip = __bi.bi_bvec_done; \ |
Al Viro | dd254f5 | 2016-05-09 11:54:48 -0400 | [diff] [blame] | 101 | } else if (unlikely(i->type & ITER_KVEC)) { \ |
| 102 | const struct kvec *kvec; \ |
| 103 | struct kvec v; \ |
| 104 | iterate_kvec(i, n, v, kvec, skip, (K)) \ |
| 105 | if (skip == kvec->iov_len) { \ |
| 106 | kvec++; \ |
| 107 | skip = 0; \ |
| 108 | } \ |
| 109 | i->nr_segs -= kvec - i->kvec; \ |
| 110 | i->kvec = kvec; \ |
| 111 | } else { \ |
| 112 | const struct iovec *iov; \ |
| 113 | struct iovec v; \ |
| 114 | iterate_iovec(i, n, v, iov, skip, (I)) \ |
| 115 | if (skip == iov->iov_len) { \ |
| 116 | iov++; \ |
| 117 | skip = 0; \ |
| 118 | } \ |
| 119 | i->nr_segs -= iov - i->iov; \ |
| 120 | i->iov = iov; \ |
Al Viro | 7ce2a91 | 2014-11-27 13:59:45 -0500 | [diff] [blame] | 121 | } \ |
Al Viro | dd254f5 | 2016-05-09 11:54:48 -0400 | [diff] [blame] | 122 | i->count -= n; \ |
| 123 | i->iov_offset = skip; \ |
Al Viro | 7ce2a91 | 2014-11-27 13:59:45 -0500 | [diff] [blame] | 124 | } \ |
Al Viro | 7ce2a91 | 2014-11-27 13:59:45 -0500 | [diff] [blame] | 125 | } |
| 126 | |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 127 | static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, |
Al Viro | 4f18cd3 | 2014-02-05 19:11:33 -0500 | [diff] [blame] | 128 | struct iov_iter *i) |
| 129 | { |
| 130 | size_t skip, copy, left, wanted; |
| 131 | const struct iovec *iov; |
| 132 | char __user *buf; |
| 133 | void *kaddr, *from; |
| 134 | |
| 135 | if (unlikely(bytes > i->count)) |
| 136 | bytes = i->count; |
| 137 | |
| 138 | if (unlikely(!bytes)) |
| 139 | return 0; |
| 140 | |
| 141 | wanted = bytes; |
| 142 | iov = i->iov; |
| 143 | skip = i->iov_offset; |
| 144 | buf = iov->iov_base + skip; |
| 145 | copy = min(bytes, iov->iov_len - skip); |
| 146 | |
Mikulas Patocka | 3fa6c50 | 2016-07-28 15:48:50 -0700 | [diff] [blame] | 147 | if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) { |
Al Viro | 4f18cd3 | 2014-02-05 19:11:33 -0500 | [diff] [blame] | 148 | kaddr = kmap_atomic(page); |
| 149 | from = kaddr + offset; |
| 150 | |
| 151 | /* first chunk, usually the only one */ |
| 152 | left = __copy_to_user_inatomic(buf, from, copy); |
| 153 | copy -= left; |
| 154 | skip += copy; |
| 155 | from += copy; |
| 156 | bytes -= copy; |
| 157 | |
| 158 | while (unlikely(!left && bytes)) { |
| 159 | iov++; |
| 160 | buf = iov->iov_base; |
| 161 | copy = min(bytes, iov->iov_len); |
| 162 | left = __copy_to_user_inatomic(buf, from, copy); |
| 163 | copy -= left; |
| 164 | skip = copy; |
| 165 | from += copy; |
| 166 | bytes -= copy; |
| 167 | } |
| 168 | if (likely(!bytes)) { |
| 169 | kunmap_atomic(kaddr); |
| 170 | goto done; |
| 171 | } |
| 172 | offset = from - kaddr; |
| 173 | buf += copy; |
| 174 | kunmap_atomic(kaddr); |
| 175 | copy = min(bytes, iov->iov_len - skip); |
| 176 | } |
| 177 | /* Too bad - revert to non-atomic kmap */ |
Mikulas Patocka | 3fa6c50 | 2016-07-28 15:48:50 -0700 | [diff] [blame] | 178 | |
Al Viro | 4f18cd3 | 2014-02-05 19:11:33 -0500 | [diff] [blame] | 179 | kaddr = kmap(page); |
| 180 | from = kaddr + offset; |
| 181 | left = __copy_to_user(buf, from, copy); |
| 182 | copy -= left; |
| 183 | skip += copy; |
| 184 | from += copy; |
| 185 | bytes -= copy; |
| 186 | while (unlikely(!left && bytes)) { |
| 187 | iov++; |
| 188 | buf = iov->iov_base; |
| 189 | copy = min(bytes, iov->iov_len); |
| 190 | left = __copy_to_user(buf, from, copy); |
| 191 | copy -= left; |
| 192 | skip = copy; |
| 193 | from += copy; |
| 194 | bytes -= copy; |
| 195 | } |
| 196 | kunmap(page); |
Mikulas Patocka | 3fa6c50 | 2016-07-28 15:48:50 -0700 | [diff] [blame] | 197 | |
Al Viro | 4f18cd3 | 2014-02-05 19:11:33 -0500 | [diff] [blame] | 198 | done: |
Al Viro | 81055e5 | 2014-04-04 19:23:46 -0400 | [diff] [blame] | 199 | if (skip == iov->iov_len) { |
| 200 | iov++; |
| 201 | skip = 0; |
| 202 | } |
Al Viro | 4f18cd3 | 2014-02-05 19:11:33 -0500 | [diff] [blame] | 203 | i->count -= wanted - bytes; |
| 204 | i->nr_segs -= iov - i->iov; |
| 205 | i->iov = iov; |
| 206 | i->iov_offset = skip; |
| 207 | return wanted - bytes; |
| 208 | } |
Al Viro | 4f18cd3 | 2014-02-05 19:11:33 -0500 | [diff] [blame] | 209 | |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 210 | static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, |
Al Viro | f0d1bec | 2014-04-03 15:05:18 -0400 | [diff] [blame] | 211 | struct iov_iter *i) |
| 212 | { |
| 213 | size_t skip, copy, left, wanted; |
| 214 | const struct iovec *iov; |
| 215 | char __user *buf; |
| 216 | void *kaddr, *to; |
| 217 | |
| 218 | if (unlikely(bytes > i->count)) |
| 219 | bytes = i->count; |
| 220 | |
| 221 | if (unlikely(!bytes)) |
| 222 | return 0; |
| 223 | |
| 224 | wanted = bytes; |
| 225 | iov = i->iov; |
| 226 | skip = i->iov_offset; |
| 227 | buf = iov->iov_base + skip; |
| 228 | copy = min(bytes, iov->iov_len - skip); |
| 229 | |
Mikulas Patocka | 3fa6c50 | 2016-07-28 15:48:50 -0700 | [diff] [blame] | 230 | if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) { |
Al Viro | f0d1bec | 2014-04-03 15:05:18 -0400 | [diff] [blame] | 231 | kaddr = kmap_atomic(page); |
| 232 | to = kaddr + offset; |
| 233 | |
| 234 | /* first chunk, usually the only one */ |
| 235 | left = __copy_from_user_inatomic(to, buf, copy); |
| 236 | copy -= left; |
| 237 | skip += copy; |
| 238 | to += copy; |
| 239 | bytes -= copy; |
| 240 | |
| 241 | while (unlikely(!left && bytes)) { |
| 242 | iov++; |
| 243 | buf = iov->iov_base; |
| 244 | copy = min(bytes, iov->iov_len); |
| 245 | left = __copy_from_user_inatomic(to, buf, copy); |
| 246 | copy -= left; |
| 247 | skip = copy; |
| 248 | to += copy; |
| 249 | bytes -= copy; |
| 250 | } |
| 251 | if (likely(!bytes)) { |
| 252 | kunmap_atomic(kaddr); |
| 253 | goto done; |
| 254 | } |
| 255 | offset = to - kaddr; |
| 256 | buf += copy; |
| 257 | kunmap_atomic(kaddr); |
| 258 | copy = min(bytes, iov->iov_len - skip); |
| 259 | } |
| 260 | /* Too bad - revert to non-atomic kmap */ |
Mikulas Patocka | 3fa6c50 | 2016-07-28 15:48:50 -0700 | [diff] [blame] | 261 | |
Al Viro | f0d1bec | 2014-04-03 15:05:18 -0400 | [diff] [blame] | 262 | kaddr = kmap(page); |
| 263 | to = kaddr + offset; |
| 264 | left = __copy_from_user(to, buf, copy); |
| 265 | copy -= left; |
| 266 | skip += copy; |
| 267 | to += copy; |
| 268 | bytes -= copy; |
| 269 | while (unlikely(!left && bytes)) { |
| 270 | iov++; |
| 271 | buf = iov->iov_base; |
| 272 | copy = min(bytes, iov->iov_len); |
| 273 | left = __copy_from_user(to, buf, copy); |
| 274 | copy -= left; |
| 275 | skip = copy; |
| 276 | to += copy; |
| 277 | bytes -= copy; |
| 278 | } |
| 279 | kunmap(page); |
Mikulas Patocka | 3fa6c50 | 2016-07-28 15:48:50 -0700 | [diff] [blame] | 280 | |
Al Viro | f0d1bec | 2014-04-03 15:05:18 -0400 | [diff] [blame] | 281 | done: |
Al Viro | 81055e5 | 2014-04-04 19:23:46 -0400 | [diff] [blame] | 282 | if (skip == iov->iov_len) { |
| 283 | iov++; |
| 284 | skip = 0; |
| 285 | } |
Al Viro | f0d1bec | 2014-04-03 15:05:18 -0400 | [diff] [blame] | 286 | i->count -= wanted - bytes; |
| 287 | i->nr_segs -= iov - i->iov; |
| 288 | i->iov = iov; |
| 289 | i->iov_offset = skip; |
| 290 | return wanted - bytes; |
| 291 | } |
Al Viro | f0d1bec | 2014-04-03 15:05:18 -0400 | [diff] [blame] | 292 | |
Al Viro | 4f18cd3 | 2014-02-05 19:11:33 -0500 | [diff] [blame] | 293 | /* |
| 294 | * Fault in the first iovec of the given iov_iter, to a maximum length |
| 295 | * of bytes. Returns 0 on success, or non-zero if the memory could not be |
| 296 | * accessed (ie. because it is an invalid address). |
| 297 | * |
| 298 | * writev-intensive code may want this to prefault several iovecs -- that |
| 299 | * would be possible (callers must not rely on the fact that _only_ the |
| 300 | * first iovec will be faulted with the current implementation). |
| 301 | */ |
| 302 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) |
| 303 | { |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 304 | if (!(i->type & (ITER_BVEC|ITER_KVEC))) { |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 305 | char __user *buf = i->iov->iov_base + i->iov_offset; |
| 306 | bytes = min(bytes, i->iov->iov_len - i->iov_offset); |
| 307 | return fault_in_pages_readable(buf, bytes); |
| 308 | } |
| 309 | return 0; |
Al Viro | 4f18cd3 | 2014-02-05 19:11:33 -0500 | [diff] [blame] | 310 | } |
| 311 | EXPORT_SYMBOL(iov_iter_fault_in_readable); |
| 312 | |
Anton Altaparmakov | 171a020 | 2015-03-11 10:43:31 -0400 | [diff] [blame] | 313 | /* |
| 314 | * Fault in one or more iovecs of the given iov_iter, to a maximum length of |
| 315 | * bytes. For each iovec, fault in each page that constitutes the iovec. |
| 316 | * |
| 317 | * Return 0 on success, or non-zero if the memory could not be accessed (i.e. |
| 318 | * because it is an invalid address). |
| 319 | */ |
| 320 | int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes) |
| 321 | { |
| 322 | size_t skip = i->iov_offset; |
| 323 | const struct iovec *iov; |
| 324 | int err; |
| 325 | struct iovec v; |
| 326 | |
| 327 | if (!(i->type & (ITER_BVEC|ITER_KVEC))) { |
| 328 | iterate_iovec(i, bytes, v, iov, skip, ({ |
| 329 | err = fault_in_multipages_readable(v.iov_base, |
| 330 | v.iov_len); |
| 331 | if (unlikely(err)) |
| 332 | return err; |
| 333 | 0;})) |
| 334 | } |
| 335 | return 0; |
| 336 | } |
| 337 | EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable); |
| 338 | |
Al Viro | 71d8e53 | 2014-03-05 19:28:09 -0500 | [diff] [blame] | 339 | void iov_iter_init(struct iov_iter *i, int direction, |
| 340 | const struct iovec *iov, unsigned long nr_segs, |
| 341 | size_t count) |
| 342 | { |
| 343 | /* It will get better. Eventually... */ |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 344 | if (segment_eq(get_fs(), KERNEL_DS)) { |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 345 | direction |= ITER_KVEC; |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 346 | i->type = direction; |
| 347 | i->kvec = (struct kvec *)iov; |
| 348 | } else { |
| 349 | i->type = direction; |
| 350 | i->iov = iov; |
| 351 | } |
Al Viro | 71d8e53 | 2014-03-05 19:28:09 -0500 | [diff] [blame] | 352 | i->nr_segs = nr_segs; |
| 353 | i->iov_offset = 0; |
| 354 | i->count = count; |
| 355 | } |
| 356 | EXPORT_SYMBOL(iov_iter_init); |
Al Viro | 7b2c99d | 2014-03-15 04:05:57 -0400 | [diff] [blame] | 357 | |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 358 | static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) |
| 359 | { |
| 360 | char *from = kmap_atomic(page); |
| 361 | memcpy(to, from + offset, len); |
| 362 | kunmap_atomic(from); |
| 363 | } |
| 364 | |
Al Viro | 36f7a8a | 2015-12-06 16:49:22 -0500 | [diff] [blame] | 365 | static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len) |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 366 | { |
| 367 | char *to = kmap_atomic(page); |
| 368 | memcpy(to + offset, from, len); |
| 369 | kunmap_atomic(to); |
| 370 | } |
| 371 | |
Matthew Wilcox | c35e024 | 2014-08-01 09:27:22 -0400 | [diff] [blame] | 372 | static void memzero_page(struct page *page, size_t offset, size_t len) |
| 373 | { |
| 374 | char *addr = kmap_atomic(page); |
| 375 | memset(addr + offset, 0, len); |
| 376 | kunmap_atomic(addr); |
| 377 | } |
| 378 | |
Al Viro | 36f7a8a | 2015-12-06 16:49:22 -0500 | [diff] [blame] | 379 | size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 380 | { |
Al Viro | 36f7a8a | 2015-12-06 16:49:22 -0500 | [diff] [blame] | 381 | const char *from = addr; |
Al Viro | 3d4d3e4 | 2014-11-27 14:28:06 -0500 | [diff] [blame] | 382 | iterate_and_advance(i, bytes, v, |
| 383 | __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, |
| 384 | v.iov_len), |
| 385 | memcpy_to_page(v.bv_page, v.bv_offset, |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 386 | (from += v.bv_len) - v.bv_len, v.bv_len), |
| 387 | memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) |
Al Viro | 3d4d3e4 | 2014-11-27 14:28:06 -0500 | [diff] [blame] | 388 | ) |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 389 | |
Al Viro | 3d4d3e4 | 2014-11-27 14:28:06 -0500 | [diff] [blame] | 390 | return bytes; |
Matthew Wilcox | c35e024 | 2014-08-01 09:27:22 -0400 | [diff] [blame] | 391 | } |
| 392 | EXPORT_SYMBOL(copy_to_iter); |
| 393 | |
| 394 | size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) |
| 395 | { |
Al Viro | 0dbca9a | 2014-11-27 14:26:43 -0500 | [diff] [blame] | 396 | char *to = addr; |
Al Viro | 0dbca9a | 2014-11-27 14:26:43 -0500 | [diff] [blame] | 397 | iterate_and_advance(i, bytes, v, |
| 398 | __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base, |
| 399 | v.iov_len), |
| 400 | memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 401 | v.bv_offset, v.bv_len), |
| 402 | memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) |
Al Viro | 0dbca9a | 2014-11-27 14:26:43 -0500 | [diff] [blame] | 403 | ) |
| 404 | |
| 405 | return bytes; |
Matthew Wilcox | c35e024 | 2014-08-01 09:27:22 -0400 | [diff] [blame] | 406 | } |
| 407 | EXPORT_SYMBOL(copy_from_iter); |
| 408 | |
Al Viro | aa58309 | 2014-11-27 20:27:08 -0500 | [diff] [blame] | 409 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) |
| 410 | { |
| 411 | char *to = addr; |
Al Viro | aa58309 | 2014-11-27 20:27:08 -0500 | [diff] [blame] | 412 | iterate_and_advance(i, bytes, v, |
| 413 | __copy_from_user_nocache((to += v.iov_len) - v.iov_len, |
| 414 | v.iov_base, v.iov_len), |
| 415 | memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, |
| 416 | v.bv_offset, v.bv_len), |
| 417 | memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) |
| 418 | ) |
| 419 | |
| 420 | return bytes; |
| 421 | } |
| 422 | EXPORT_SYMBOL(copy_from_iter_nocache); |
| 423 | |
Al Viro | d271524 | 2014-11-27 14:22:37 -0500 | [diff] [blame] | 424 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, |
| 425 | struct iov_iter *i) |
| 426 | { |
| 427 | if (i->type & (ITER_BVEC|ITER_KVEC)) { |
| 428 | void *kaddr = kmap_atomic(page); |
| 429 | size_t wanted = copy_to_iter(kaddr + offset, bytes, i); |
| 430 | kunmap_atomic(kaddr); |
| 431 | return wanted; |
| 432 | } else |
| 433 | return copy_page_to_iter_iovec(page, offset, bytes, i); |
| 434 | } |
| 435 | EXPORT_SYMBOL(copy_page_to_iter); |
| 436 | |
| 437 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, |
| 438 | struct iov_iter *i) |
| 439 | { |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 440 | if (i->type & (ITER_BVEC|ITER_KVEC)) { |
Al Viro | d271524 | 2014-11-27 14:22:37 -0500 | [diff] [blame] | 441 | void *kaddr = kmap_atomic(page); |
| 442 | size_t wanted = copy_from_iter(kaddr + offset, bytes, i); |
| 443 | kunmap_atomic(kaddr); |
| 444 | return wanted; |
| 445 | } else |
| 446 | return copy_page_from_iter_iovec(page, offset, bytes, i); |
| 447 | } |
| 448 | EXPORT_SYMBOL(copy_page_from_iter); |
| 449 | |
Matthew Wilcox | c35e024 | 2014-08-01 09:27:22 -0400 | [diff] [blame] | 450 | size_t iov_iter_zero(size_t bytes, struct iov_iter *i) |
| 451 | { |
Al Viro | 8442fa4 | 2014-11-27 14:18:54 -0500 | [diff] [blame] | 452 | iterate_and_advance(i, bytes, v, |
| 453 | __clear_user(v.iov_base, v.iov_len), |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 454 | memzero_page(v.bv_page, v.bv_offset, v.bv_len), |
| 455 | memset(v.iov_base, 0, v.iov_len) |
Al Viro | 8442fa4 | 2014-11-27 14:18:54 -0500 | [diff] [blame] | 456 | ) |
| 457 | |
| 458 | return bytes; |
Matthew Wilcox | c35e024 | 2014-08-01 09:27:22 -0400 | [diff] [blame] | 459 | } |
| 460 | EXPORT_SYMBOL(iov_iter_zero); |
| 461 | |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 462 | size_t iov_iter_copy_from_user_atomic(struct page *page, |
| 463 | struct iov_iter *i, unsigned long offset, size_t bytes) |
| 464 | { |
Al Viro | 04a3116 | 2014-11-27 13:51:41 -0500 | [diff] [blame] | 465 | char *kaddr = kmap_atomic(page), *p = kaddr + offset; |
| 466 | iterate_all_kinds(i, bytes, v, |
| 467 | __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, |
| 468 | v.iov_base, v.iov_len), |
| 469 | memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 470 | v.bv_offset, v.bv_len), |
| 471 | memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) |
Al Viro | 04a3116 | 2014-11-27 13:51:41 -0500 | [diff] [blame] | 472 | ) |
| 473 | kunmap_atomic(kaddr); |
| 474 | return bytes; |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 475 | } |
| 476 | EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); |
| 477 | |
| 478 | void iov_iter_advance(struct iov_iter *i, size_t size) |
| 479 | { |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 480 | iterate_and_advance(i, size, v, 0, 0, 0) |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 481 | } |
| 482 | EXPORT_SYMBOL(iov_iter_advance); |
| 483 | |
| 484 | /* |
| 485 | * Return the count of just the current iov_iter segment. |
| 486 | */ |
| 487 | size_t iov_iter_single_seg_count(const struct iov_iter *i) |
| 488 | { |
| 489 | if (i->nr_segs == 1) |
| 490 | return i->count; |
| 491 | else if (i->type & ITER_BVEC) |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 492 | return min(i->count, i->bvec->bv_len - i->iov_offset); |
Paul Mackerras | ad0eab9 | 2014-11-13 20:15:23 +1100 | [diff] [blame] | 493 | else |
| 494 | return min(i->count, i->iov->iov_len - i->iov_offset); |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 495 | } |
| 496 | EXPORT_SYMBOL(iov_iter_single_seg_count); |
| 497 | |
Al Viro | abb78f8 | 2014-11-24 14:46:11 -0500 | [diff] [blame] | 498 | void iov_iter_kvec(struct iov_iter *i, int direction, |
Al Viro | 05afcb7 | 2015-01-23 01:08:07 -0500 | [diff] [blame] | 499 | const struct kvec *kvec, unsigned long nr_segs, |
Al Viro | abb78f8 | 2014-11-24 14:46:11 -0500 | [diff] [blame] | 500 | size_t count) |
| 501 | { |
| 502 | BUG_ON(!(direction & ITER_KVEC)); |
| 503 | i->type = direction; |
Al Viro | 05afcb7 | 2015-01-23 01:08:07 -0500 | [diff] [blame] | 504 | i->kvec = kvec; |
Al Viro | abb78f8 | 2014-11-24 14:46:11 -0500 | [diff] [blame] | 505 | i->nr_segs = nr_segs; |
| 506 | i->iov_offset = 0; |
| 507 | i->count = count; |
| 508 | } |
| 509 | EXPORT_SYMBOL(iov_iter_kvec); |
| 510 | |
Al Viro | 05afcb7 | 2015-01-23 01:08:07 -0500 | [diff] [blame] | 511 | void iov_iter_bvec(struct iov_iter *i, int direction, |
| 512 | const struct bio_vec *bvec, unsigned long nr_segs, |
| 513 | size_t count) |
| 514 | { |
| 515 | BUG_ON(!(direction & ITER_BVEC)); |
| 516 | i->type = direction; |
| 517 | i->bvec = bvec; |
| 518 | i->nr_segs = nr_segs; |
| 519 | i->iov_offset = 0; |
| 520 | i->count = count; |
| 521 | } |
| 522 | EXPORT_SYMBOL(iov_iter_bvec); |
| 523 | |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 524 | unsigned long iov_iter_alignment(const struct iov_iter *i) |
| 525 | { |
Al Viro | 04a3116 | 2014-11-27 13:51:41 -0500 | [diff] [blame] | 526 | unsigned long res = 0; |
| 527 | size_t size = i->count; |
| 528 | |
| 529 | if (!size) |
| 530 | return 0; |
| 531 | |
| 532 | iterate_all_kinds(i, size, v, |
| 533 | (res |= (unsigned long)v.iov_base | v.iov_len, 0), |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 534 | res |= v.bv_offset | v.bv_len, |
| 535 | res |= (unsigned long)v.iov_base | v.iov_len |
Al Viro | 04a3116 | 2014-11-27 13:51:41 -0500 | [diff] [blame] | 536 | ) |
| 537 | return res; |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 538 | } |
| 539 | EXPORT_SYMBOL(iov_iter_alignment); |
| 540 | |
Al Viro | 357f435 | 2016-04-08 19:05:19 -0400 | [diff] [blame] | 541 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i) |
| 542 | { |
| 543 | unsigned long res = 0; |
| 544 | size_t size = i->count; |
| 545 | if (!size) |
| 546 | return 0; |
| 547 | |
| 548 | iterate_all_kinds(i, size, v, |
| 549 | (res |= (!res ? 0 : (unsigned long)v.iov_base) | |
| 550 | (size != v.iov_len ? size : 0), 0), |
| 551 | (res |= (!res ? 0 : (unsigned long)v.bv_offset) | |
| 552 | (size != v.bv_len ? size : 0)), |
| 553 | (res |= (!res ? 0 : (unsigned long)v.iov_base) | |
| 554 | (size != v.iov_len ? size : 0)) |
| 555 | ); |
| 556 | return res; |
| 557 | } |
| 558 | EXPORT_SYMBOL(iov_iter_gap_alignment); |
| 559 | |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 560 | ssize_t iov_iter_get_pages(struct iov_iter *i, |
Miklos Szeredi | 2c80929 | 2014-09-24 17:09:11 +0200 | [diff] [blame] | 561 | struct page **pages, size_t maxsize, unsigned maxpages, |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 562 | size_t *start) |
| 563 | { |
Al Viro | e5393fa | 2014-11-27 14:12:09 -0500 | [diff] [blame] | 564 | if (maxsize > i->count) |
| 565 | maxsize = i->count; |
| 566 | |
| 567 | if (!maxsize) |
| 568 | return 0; |
| 569 | |
| 570 | iterate_all_kinds(i, maxsize, v, ({ |
| 571 | unsigned long addr = (unsigned long)v.iov_base; |
| 572 | size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); |
| 573 | int n; |
| 574 | int res; |
| 575 | |
| 576 | if (len > maxpages * PAGE_SIZE) |
| 577 | len = maxpages * PAGE_SIZE; |
| 578 | addr &= ~(PAGE_SIZE - 1); |
| 579 | n = DIV_ROUND_UP(len, PAGE_SIZE); |
| 580 | res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); |
| 581 | if (unlikely(res < 0)) |
| 582 | return res; |
| 583 | return (res == n ? len : res * PAGE_SIZE) - *start; |
| 584 | 0;}),({ |
| 585 | /* can't be more than PAGE_SIZE */ |
| 586 | *start = v.bv_offset; |
| 587 | get_page(*pages = v.bv_page); |
| 588 | return v.bv_len; |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 589 | }),({ |
| 590 | return -EFAULT; |
Al Viro | e5393fa | 2014-11-27 14:12:09 -0500 | [diff] [blame] | 591 | }) |
| 592 | ) |
| 593 | return 0; |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 594 | } |
| 595 | EXPORT_SYMBOL(iov_iter_get_pages); |
| 596 | |
Al Viro | 1b17f1f | 2014-11-27 14:14:31 -0500 | [diff] [blame] | 597 | static struct page **get_pages_array(size_t n) |
| 598 | { |
| 599 | struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL); |
| 600 | if (!p) |
| 601 | p = vmalloc(n * sizeof(struct page *)); |
| 602 | return p; |
| 603 | } |
| 604 | |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 605 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, |
| 606 | struct page ***pages, size_t maxsize, |
| 607 | size_t *start) |
| 608 | { |
Al Viro | 1b17f1f | 2014-11-27 14:14:31 -0500 | [diff] [blame] | 609 | struct page **p; |
| 610 | |
| 611 | if (maxsize > i->count) |
| 612 | maxsize = i->count; |
| 613 | |
| 614 | if (!maxsize) |
| 615 | return 0; |
| 616 | |
| 617 | iterate_all_kinds(i, maxsize, v, ({ |
| 618 | unsigned long addr = (unsigned long)v.iov_base; |
| 619 | size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); |
| 620 | int n; |
| 621 | int res; |
| 622 | |
| 623 | addr &= ~(PAGE_SIZE - 1); |
| 624 | n = DIV_ROUND_UP(len, PAGE_SIZE); |
| 625 | p = get_pages_array(n); |
| 626 | if (!p) |
| 627 | return -ENOMEM; |
| 628 | res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); |
| 629 | if (unlikely(res < 0)) { |
| 630 | kvfree(p); |
| 631 | return res; |
| 632 | } |
| 633 | *pages = p; |
| 634 | return (res == n ? len : res * PAGE_SIZE) - *start; |
| 635 | 0;}),({ |
| 636 | /* can't be more than PAGE_SIZE */ |
| 637 | *start = v.bv_offset; |
| 638 | *pages = p = get_pages_array(1); |
| 639 | if (!p) |
| 640 | return -ENOMEM; |
| 641 | get_page(*p = v.bv_page); |
| 642 | return v.bv_len; |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 643 | }),({ |
| 644 | return -EFAULT; |
Al Viro | 1b17f1f | 2014-11-27 14:14:31 -0500 | [diff] [blame] | 645 | }) |
| 646 | ) |
| 647 | return 0; |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 648 | } |
| 649 | EXPORT_SYMBOL(iov_iter_get_pages_alloc); |
| 650 | |
Al Viro | a604ec7 | 2014-11-24 01:08:00 -0500 | [diff] [blame] | 651 | size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, |
| 652 | struct iov_iter *i) |
| 653 | { |
| 654 | char *to = addr; |
| 655 | __wsum sum, next; |
| 656 | size_t off = 0; |
Al Viro | a604ec7 | 2014-11-24 01:08:00 -0500 | [diff] [blame] | 657 | sum = *csum; |
| 658 | iterate_and_advance(i, bytes, v, ({ |
| 659 | int err = 0; |
| 660 | next = csum_and_copy_from_user(v.iov_base, |
| 661 | (to += v.iov_len) - v.iov_len, |
| 662 | v.iov_len, 0, &err); |
| 663 | if (!err) { |
| 664 | sum = csum_block_add(sum, next, off); |
| 665 | off += v.iov_len; |
| 666 | } |
| 667 | err ? v.iov_len : 0; |
| 668 | }), ({ |
| 669 | char *p = kmap_atomic(v.bv_page); |
| 670 | next = csum_partial_copy_nocheck(p + v.bv_offset, |
| 671 | (to += v.bv_len) - v.bv_len, |
| 672 | v.bv_len, 0); |
| 673 | kunmap_atomic(p); |
| 674 | sum = csum_block_add(sum, next, off); |
| 675 | off += v.bv_len; |
| 676 | }),({ |
| 677 | next = csum_partial_copy_nocheck(v.iov_base, |
| 678 | (to += v.iov_len) - v.iov_len, |
| 679 | v.iov_len, 0); |
| 680 | sum = csum_block_add(sum, next, off); |
| 681 | off += v.iov_len; |
| 682 | }) |
| 683 | ) |
| 684 | *csum = sum; |
| 685 | return bytes; |
| 686 | } |
| 687 | EXPORT_SYMBOL(csum_and_copy_from_iter); |
| 688 | |
Al Viro | 36f7a8a | 2015-12-06 16:49:22 -0500 | [diff] [blame] | 689 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, |
Al Viro | a604ec7 | 2014-11-24 01:08:00 -0500 | [diff] [blame] | 690 | struct iov_iter *i) |
| 691 | { |
Al Viro | 36f7a8a | 2015-12-06 16:49:22 -0500 | [diff] [blame] | 692 | const char *from = addr; |
Al Viro | a604ec7 | 2014-11-24 01:08:00 -0500 | [diff] [blame] | 693 | __wsum sum, next; |
| 694 | size_t off = 0; |
Al Viro | a604ec7 | 2014-11-24 01:08:00 -0500 | [diff] [blame] | 695 | sum = *csum; |
| 696 | iterate_and_advance(i, bytes, v, ({ |
| 697 | int err = 0; |
| 698 | next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len, |
| 699 | v.iov_base, |
| 700 | v.iov_len, 0, &err); |
| 701 | if (!err) { |
| 702 | sum = csum_block_add(sum, next, off); |
| 703 | off += v.iov_len; |
| 704 | } |
| 705 | err ? v.iov_len : 0; |
| 706 | }), ({ |
| 707 | char *p = kmap_atomic(v.bv_page); |
| 708 | next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len, |
| 709 | p + v.bv_offset, |
| 710 | v.bv_len, 0); |
| 711 | kunmap_atomic(p); |
| 712 | sum = csum_block_add(sum, next, off); |
| 713 | off += v.bv_len; |
| 714 | }),({ |
| 715 | next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len, |
| 716 | v.iov_base, |
| 717 | v.iov_len, 0); |
| 718 | sum = csum_block_add(sum, next, off); |
| 719 | off += v.iov_len; |
| 720 | }) |
| 721 | ) |
| 722 | *csum = sum; |
| 723 | return bytes; |
| 724 | } |
| 725 | EXPORT_SYMBOL(csum_and_copy_to_iter); |
| 726 | |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 727 | int iov_iter_npages(const struct iov_iter *i, int maxpages) |
| 728 | { |
Al Viro | e0f2dc4 | 2014-11-27 14:09:46 -0500 | [diff] [blame] | 729 | size_t size = i->count; |
| 730 | int npages = 0; |
| 731 | |
| 732 | if (!size) |
| 733 | return 0; |
| 734 | |
| 735 | iterate_all_kinds(i, size, v, ({ |
| 736 | unsigned long p = (unsigned long)v.iov_base; |
| 737 | npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) |
| 738 | - p / PAGE_SIZE; |
| 739 | if (npages >= maxpages) |
| 740 | return maxpages; |
| 741 | 0;}),({ |
| 742 | npages++; |
| 743 | if (npages >= maxpages) |
| 744 | return maxpages; |
Al Viro | a280455 | 2014-11-27 14:48:42 -0500 | [diff] [blame] | 745 | }),({ |
| 746 | unsigned long p = (unsigned long)v.iov_base; |
| 747 | npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) |
| 748 | - p / PAGE_SIZE; |
| 749 | if (npages >= maxpages) |
| 750 | return maxpages; |
Al Viro | e0f2dc4 | 2014-11-27 14:09:46 -0500 | [diff] [blame] | 751 | }) |
| 752 | ) |
| 753 | return npages; |
Al Viro | 62a8067 | 2014-04-04 23:12:29 -0400 | [diff] [blame] | 754 | } |
Al Viro | f67da30 | 2014-03-19 01:16:16 -0400 | [diff] [blame] | 755 | EXPORT_SYMBOL(iov_iter_npages); |
Al Viro | 4b8164b | 2015-01-31 20:08:47 -0500 | [diff] [blame] | 756 | |
| 757 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) |
| 758 | { |
| 759 | *new = *old; |
| 760 | if (new->type & ITER_BVEC) |
| 761 | return new->bvec = kmemdup(new->bvec, |
| 762 | new->nr_segs * sizeof(struct bio_vec), |
| 763 | flags); |
| 764 | else |
| 765 | /* iovec and kvec have identical layout */ |
| 766 | return new->iov = kmemdup(new->iov, |
| 767 | new->nr_segs * sizeof(struct iovec), |
| 768 | flags); |
| 769 | } |
| 770 | EXPORT_SYMBOL(dup_iter); |
Al Viro | bc917be | 2015-03-21 17:45:43 -0400 | [diff] [blame] | 771 | |
| 772 | int import_iovec(int type, const struct iovec __user * uvector, |
| 773 | unsigned nr_segs, unsigned fast_segs, |
| 774 | struct iovec **iov, struct iov_iter *i) |
| 775 | { |
| 776 | ssize_t n; |
| 777 | struct iovec *p; |
| 778 | n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, |
| 779 | *iov, &p); |
| 780 | if (n < 0) { |
| 781 | if (p != *iov) |
| 782 | kfree(p); |
| 783 | *iov = NULL; |
| 784 | return n; |
| 785 | } |
| 786 | iov_iter_init(i, type, p, nr_segs, n); |
| 787 | *iov = p == *iov ? NULL : p; |
| 788 | return 0; |
| 789 | } |
| 790 | EXPORT_SYMBOL(import_iovec); |
| 791 | |
| 792 | #ifdef CONFIG_COMPAT |
| 793 | #include <linux/compat.h> |
| 794 | |
| 795 | int compat_import_iovec(int type, const struct compat_iovec __user * uvector, |
| 796 | unsigned nr_segs, unsigned fast_segs, |
| 797 | struct iovec **iov, struct iov_iter *i) |
| 798 | { |
| 799 | ssize_t n; |
| 800 | struct iovec *p; |
| 801 | n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, |
| 802 | *iov, &p); |
| 803 | if (n < 0) { |
| 804 | if (p != *iov) |
| 805 | kfree(p); |
| 806 | *iov = NULL; |
| 807 | return n; |
| 808 | } |
| 809 | iov_iter_init(i, type, p, nr_segs, n); |
| 810 | *iov = p == *iov ? NULL : p; |
| 811 | return 0; |
| 812 | } |
| 813 | #endif |
| 814 | |
| 815 | int import_single_range(int rw, void __user *buf, size_t len, |
| 816 | struct iovec *iov, struct iov_iter *i) |
| 817 | { |
| 818 | if (len > MAX_RW_COUNT) |
| 819 | len = MAX_RW_COUNT; |
| 820 | if (unlikely(!access_ok(!rw, buf, len))) |
| 821 | return -EFAULT; |
| 822 | |
| 823 | iov->iov_base = buf; |
| 824 | iov->iov_len = len; |
| 825 | iov_iter_init(i, rw, iov, 1, len); |
| 826 | return 0; |
| 827 | } |
Al Viro | e126758 | 2015-12-06 20:38:56 -0500 | [diff] [blame] | 828 | EXPORT_SYMBOL(import_single_range); |