blob: c2bff04c84ed42cfd7b1c85e9819785ae90f6c87 [file] [log] [blame]
Carsten Otteceffc072005-06-23 22:05:25 -07001/*
2 * linux/mm/filemap.h
3 *
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
6
7#ifndef __FILEMAP_H
8#define __FILEMAP_H
9
10#include <linux/types.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
13#include <linux/highmem.h>
14#include <linux/uio.h>
Hiro Yoshiokac22ce142006-06-23 02:04:16 -070015#include <linux/uaccess.h>
Carsten Otteceffc072005-06-23 22:05:25 -070016
Carsten Otteeb6fe0c2005-06-23 22:05:28 -070017size_t
NeilBrown01408c42006-06-25 05:47:58 -070018__filemap_copy_from_user_iovec_inatomic(char *vaddr,
19 const struct iovec *iov,
20 size_t base,
21 size_t bytes);
Carsten Otteceffc072005-06-23 22:05:25 -070022
23/*
24 * Copy as much as we can into the page and return the number of bytes which
25 * were sucessfully copied. If a fault is encountered then clear the page
26 * out to (offset+bytes) and return the number of bytes which were copied.
NeilBrown01408c42006-06-25 05:47:58 -070027 *
28 * NOTE: For this to work reliably we really want copy_from_user_inatomic_nocache
29 * to *NOT* zero any tail of the buffer that it failed to copy. If it does,
30 * and if the following non-atomic copy succeeds, then there is a small window
31 * where the target page contains neither the data before the write, nor the
32 * data after the write (it contains zero). A read at this time will see
33 * data that is inconsistent with any ordering of the read and the write.
34 * (This has been detected in practice).
Carsten Otteceffc072005-06-23 22:05:25 -070035 */
36static inline size_t
37filemap_copy_from_user(struct page *page, unsigned long offset,
38 const char __user *buf, unsigned bytes)
39{
40 char *kaddr;
41 int left;
42
43 kaddr = kmap_atomic(page, KM_USER0);
Hiro Yoshiokac22ce142006-06-23 02:04:16 -070044 left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
Carsten Otteceffc072005-06-23 22:05:25 -070045 kunmap_atomic(kaddr, KM_USER0);
46
47 if (left != 0) {
48 /* Do it the slow way */
49 kaddr = kmap(page);
Hiro Yoshiokac22ce142006-06-23 02:04:16 -070050 left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
Carsten Otteceffc072005-06-23 22:05:25 -070051 kunmap(page);
52 }
53 return bytes - left;
54}
55
56/*
57 * This has the same sideeffects and return value as filemap_copy_from_user().
58 * The difference is that on a fault we need to memset the remainder of the
59 * page (out to offset+bytes), to emulate filemap_copy_from_user()'s
60 * single-segment behaviour.
61 */
62static inline size_t
63filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
64 const struct iovec *iov, size_t base, size_t bytes)
65{
66 char *kaddr;
67 size_t copied;
68
69 kaddr = kmap_atomic(page, KM_USER0);
NeilBrown01408c42006-06-25 05:47:58 -070070 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
71 base, bytes);
Carsten Otteceffc072005-06-23 22:05:25 -070072 kunmap_atomic(kaddr, KM_USER0);
73 if (copied != bytes) {
74 kaddr = kmap(page);
NeilBrown01408c42006-06-25 05:47:58 -070075 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
76 base, bytes);
77 if (bytes - copied)
78 memset(kaddr + offset + copied, 0, bytes - copied);
Carsten Otteceffc072005-06-23 22:05:25 -070079 kunmap(page);
80 }
81 return copied;
82}
83
84static inline void
85filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
86{
87 const struct iovec *iov = *iovp;
88 size_t base = *basep;
89
Andrew Morton81b0c872006-06-29 02:24:26 -070090 do {
Carsten Otteceffc072005-06-23 22:05:25 -070091 int copy = min(bytes, iov->iov_len - base);
92
93 bytes -= copy;
94 base += copy;
95 if (iov->iov_len == base) {
96 iov++;
97 base = 0;
98 }
Andrew Morton81b0c872006-06-29 02:24:26 -070099 } while (bytes);
Carsten Otteceffc072005-06-23 22:05:25 -0700100 *iovp = iov;
101 *basep = base;
102}
103#endif