blob: 1a040e64c69f23545f98e282bae934b3ed33baa9 [file] [log] [blame]
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -07001#include <linux/ceph/ceph_debug.h>
2
3#include <linux/module.h>
4#include <linux/sched.h>
5#include <linux/slab.h>
6#include <linux/file.h>
7#include <linux/namei.h>
8#include <linux/writeback.h>
9
10#include <linux/ceph/libceph.h>
11
12/*
13 * build a vector of user pages
14 */
15struct page **ceph_get_direct_page_vector(const char __user *data,
Henry C Changb6aa5902010-12-15 20:45:41 -080016 int num_pages, bool write_page)
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070017{
18 struct page **pages;
19 int rc;
20
21 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
22 if (!pages)
23 return ERR_PTR(-ENOMEM);
24
25 down_read(&current->mm->mmap_sem);
26 rc = get_user_pages(current, current->mm, (unsigned long)data,
Henry C Changb6aa5902010-12-15 20:45:41 -080027 num_pages, write_page, 0, pages, NULL);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070028 up_read(&current->mm->mmap_sem);
Henry C Chang361cf402010-12-17 09:55:59 -080029 if (rc < num_pages)
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070030 goto fail;
31 return pages;
32
33fail:
Henry C Chang361cf402010-12-17 09:55:59 -080034 ceph_put_page_vector(pages, rc > 0 ? rc : 0, false);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070035 return ERR_PTR(rc);
36}
37EXPORT_SYMBOL(ceph_get_direct_page_vector);
38
Henry C Changb6aa5902010-12-15 20:45:41 -080039void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070040{
41 int i;
42
Henry C Changb6aa5902010-12-15 20:45:41 -080043 for (i = 0; i < num_pages; i++) {
44 if (dirty)
45 set_page_dirty_lock(pages[i]);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070046 put_page(pages[i]);
Henry C Changb6aa5902010-12-15 20:45:41 -080047 }
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070048 kfree(pages);
49}
50EXPORT_SYMBOL(ceph_put_page_vector);
51
52void ceph_release_page_vector(struct page **pages, int num_pages)
53{
54 int i;
55
56 for (i = 0; i < num_pages; i++)
57 __free_pages(pages[i], 0);
58 kfree(pages);
59}
60EXPORT_SYMBOL(ceph_release_page_vector);
61
62/*
63 * allocate a vector new pages
64 */
65struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
66{
67 struct page **pages;
68 int i;
69
70 pages = kmalloc(sizeof(*pages) * num_pages, flags);
71 if (!pages)
72 return ERR_PTR(-ENOMEM);
73 for (i = 0; i < num_pages; i++) {
74 pages[i] = __page_cache_alloc(flags);
75 if (pages[i] == NULL) {
76 ceph_release_page_vector(pages, i);
77 return ERR_PTR(-ENOMEM);
78 }
79 }
80 return pages;
81}
82EXPORT_SYMBOL(ceph_alloc_page_vector);
83
84/*
85 * copy user data into a page vector
86 */
87int ceph_copy_user_to_page_vector(struct page **pages,
88 const char __user *data,
89 loff_t off, size_t len)
90{
91 int i = 0;
92 int po = off & ~PAGE_CACHE_MASK;
93 int left = len;
94 int l, bad;
95
96 while (left > 0) {
97 l = min_t(int, PAGE_CACHE_SIZE-po, left);
98 bad = copy_from_user(page_address(pages[i]) + po, data, l);
99 if (bad == l)
100 return -EFAULT;
101 data += l - bad;
102 left -= l - bad;
103 po += l - bad;
104 if (po == PAGE_CACHE_SIZE) {
105 po = 0;
106 i++;
107 }
108 }
109 return len;
110}
111EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
112
113int ceph_copy_to_page_vector(struct page **pages,
114 const char *data,
115 loff_t off, size_t len)
116{
117 int i = 0;
118 size_t po = off & ~PAGE_CACHE_MASK;
119 size_t left = len;
120 size_t l;
121
122 while (left > 0) {
123 l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
124 memcpy(page_address(pages[i]) + po, data, l);
125 data += l;
126 left -= l;
127 po += l;
128 if (po == PAGE_CACHE_SIZE) {
129 po = 0;
130 i++;
131 }
132 }
133 return len;
134}
135EXPORT_SYMBOL(ceph_copy_to_page_vector);
136
137int ceph_copy_from_page_vector(struct page **pages,
138 char *data,
139 loff_t off, size_t len)
140{
141 int i = 0;
142 size_t po = off & ~PAGE_CACHE_MASK;
143 size_t left = len;
144 size_t l;
145
146 while (left > 0) {
147 l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
148 memcpy(data, page_address(pages[i]) + po, l);
149 data += l;
150 left -= l;
151 po += l;
152 if (po == PAGE_CACHE_SIZE) {
153 po = 0;
154 i++;
155 }
156 }
157 return len;
158}
159EXPORT_SYMBOL(ceph_copy_from_page_vector);
160
161/*
162 * copy user data from a page vector into a user pointer
163 */
164int ceph_copy_page_vector_to_user(struct page **pages,
165 char __user *data,
166 loff_t off, size_t len)
167{
168 int i = 0;
169 int po = off & ~PAGE_CACHE_MASK;
170 int left = len;
171 int l, bad;
172
173 while (left > 0) {
174 l = min_t(int, left, PAGE_CACHE_SIZE-po);
175 bad = copy_to_user(data, page_address(pages[i]) + po, l);
176 if (bad == l)
177 return -EFAULT;
178 data += l - bad;
179 left -= l - bad;
180 if (po) {
181 po += l - bad;
182 if (po == PAGE_CACHE_SIZE)
183 po = 0;
184 }
185 i++;
186 }
187 return len;
188}
189EXPORT_SYMBOL(ceph_copy_page_vector_to_user);
190
191/*
192 * Zero an extent within a page vector. Offset is relative to the
193 * start of the first page.
194 */
195void ceph_zero_page_vector_range(int off, int len, struct page **pages)
196{
197 int i = off >> PAGE_CACHE_SHIFT;
198
199 off &= ~PAGE_CACHE_MASK;
200
201 dout("zero_page_vector_page %u~%u\n", off, len);
202
203 /* leading partial page? */
204 if (off) {
205 int end = min((int)PAGE_CACHE_SIZE, off + len);
206 dout("zeroing %d %p head from %d\n", i, pages[i],
207 (int)off);
208 zero_user_segment(pages[i], off, end);
209 len -= (end - off);
210 i++;
211 }
212 while (len >= PAGE_CACHE_SIZE) {
213 dout("zeroing %d %p len=%d\n", i, pages[i], len);
214 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
215 len -= PAGE_CACHE_SIZE;
216 i++;
217 }
218 /* trailing partial page? */
219 if (len) {
220 dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
221 zero_user_segment(pages[i], 0, len);
222 }
223}
224EXPORT_SYMBOL(ceph_zero_page_vector_range);
225