blob: 1a7c9a79a53c22e8e61e3b6e8e1720db71d9f9a9 [file] [log] [blame]
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -07001#include <linux/ceph/ceph_debug.h>
2
3#include <linux/module.h>
4#include <linux/sched.h>
5#include <linux/slab.h>
6#include <linux/file.h>
7#include <linux/namei.h>
8#include <linux/writeback.h>
9
10#include <linux/ceph/libceph.h>
11
12/*
13 * build a vector of user pages
14 */
Alex Elderb3248142013-02-06 13:11:38 -060015struct page **ceph_get_direct_page_vector(const void __user *data,
Henry C Changb6aa5902010-12-15 20:45:41 -080016 int num_pages, bool write_page)
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070017{
18 struct page **pages;
Sage Weil38815b72011-03-02 16:55:21 -080019 int got = 0;
20 int rc = 0;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070021
22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
23 if (!pages)
24 return ERR_PTR(-ENOMEM);
25
Sage Weil38815b72011-03-02 16:55:21 -080026 while (got < num_pages) {
Dave Hansend4edcf02016-02-12 13:01:56 -080027 rc = get_user_pages_unlocked(
Sage Weil38815b72011-03-02 16:55:21 -080028 (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
Lorenzo Stoakesc1641542016-10-13 01:20:13 +010029 num_pages - got, pages + got, write_page ? FOLL_WRITE : 0);
Sage Weil38815b72011-03-02 16:55:21 -080030 if (rc < 0)
31 break;
32 BUG_ON(rc == 0);
33 got += rc;
34 }
Sage Weil38815b72011-03-02 16:55:21 -080035 if (rc < 0)
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070036 goto fail;
37 return pages;
38
39fail:
Sage Weil38815b72011-03-02 16:55:21 -080040 ceph_put_page_vector(pages, got, false);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070041 return ERR_PTR(rc);
42}
43EXPORT_SYMBOL(ceph_get_direct_page_vector);
44
Henry C Changb6aa5902010-12-15 20:45:41 -080045void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070046{
47 int i;
48
Henry C Changb6aa5902010-12-15 20:45:41 -080049 for (i = 0; i < num_pages; i++) {
50 if (dirty)
51 set_page_dirty_lock(pages[i]);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070052 put_page(pages[i]);
Henry C Changb6aa5902010-12-15 20:45:41 -080053 }
Ilya Dryomovb01da6a2015-05-04 14:10:11 +030054 kvfree(pages);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070055}
56EXPORT_SYMBOL(ceph_put_page_vector);
57
58void ceph_release_page_vector(struct page **pages, int num_pages)
59{
60 int i;
61
62 for (i = 0; i < num_pages; i++)
63 __free_pages(pages[i], 0);
64 kfree(pages);
65}
66EXPORT_SYMBOL(ceph_release_page_vector);
67
68/*
69 * allocate a vector new pages
70 */
71struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
72{
73 struct page **pages;
74 int i;
75
76 pages = kmalloc(sizeof(*pages) * num_pages, flags);
77 if (!pages)
78 return ERR_PTR(-ENOMEM);
79 for (i = 0; i < num_pages; i++) {
80 pages[i] = __page_cache_alloc(flags);
81 if (pages[i] == NULL) {
82 ceph_release_page_vector(pages, i);
83 return ERR_PTR(-ENOMEM);
84 }
85 }
86 return pages;
87}
88EXPORT_SYMBOL(ceph_alloc_page_vector);
89
90/*
91 * copy user data into a page vector
92 */
93int ceph_copy_user_to_page_vector(struct page **pages,
Alex Elderb3248142013-02-06 13:11:38 -060094 const void __user *data,
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070095 loff_t off, size_t len)
96{
97 int i = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030098 int po = off & ~PAGE_MASK;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070099 int left = len;
100 int l, bad;
101
102 while (left > 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300103 l = min_t(int, PAGE_SIZE-po, left);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700104 bad = copy_from_user(page_address(pages[i]) + po, data, l);
105 if (bad == l)
106 return -EFAULT;
107 data += l - bad;
108 left -= l - bad;
109 po += l - bad;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300110 if (po == PAGE_SIZE) {
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700111 po = 0;
112 i++;
113 }
114 }
115 return len;
116}
117EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
118
Alex Elder903bb322013-02-06 13:11:38 -0600119void ceph_copy_to_page_vector(struct page **pages,
Alex Elderb3248142013-02-06 13:11:38 -0600120 const void *data,
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700121 loff_t off, size_t len)
122{
123 int i = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300124 size_t po = off & ~PAGE_MASK;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700125 size_t left = len;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700126
127 while (left > 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300128 size_t l = min_t(size_t, PAGE_SIZE-po, left);
Alex Elder903bb322013-02-06 13:11:38 -0600129
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700130 memcpy(page_address(pages[i]) + po, data, l);
131 data += l;
132 left -= l;
133 po += l;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300134 if (po == PAGE_SIZE) {
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700135 po = 0;
136 i++;
137 }
138 }
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700139}
140EXPORT_SYMBOL(ceph_copy_to_page_vector);
141
Alex Elder903bb322013-02-06 13:11:38 -0600142void ceph_copy_from_page_vector(struct page **pages,
Alex Elderb3248142013-02-06 13:11:38 -0600143 void *data,
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700144 loff_t off, size_t len)
145{
146 int i = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300147 size_t po = off & ~PAGE_MASK;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700148 size_t left = len;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700149
150 while (left > 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300151 size_t l = min_t(size_t, PAGE_SIZE-po, left);
Alex Elder903bb322013-02-06 13:11:38 -0600152
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700153 memcpy(data, page_address(pages[i]) + po, l);
154 data += l;
155 left -= l;
156 po += l;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300157 if (po == PAGE_SIZE) {
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700158 po = 0;
159 i++;
160 }
161 }
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700162}
163EXPORT_SYMBOL(ceph_copy_from_page_vector);
164
165/*
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700166 * Zero an extent within a page vector. Offset is relative to the
167 * start of the first page.
168 */
169void ceph_zero_page_vector_range(int off, int len, struct page **pages)
170{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300171 int i = off >> PAGE_SHIFT;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700172
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300173 off &= ~PAGE_MASK;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700174
175 dout("zero_page_vector_page %u~%u\n", off, len);
176
177 /* leading partial page? */
178 if (off) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300179 int end = min((int)PAGE_SIZE, off + len);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700180 dout("zeroing %d %p head from %d\n", i, pages[i],
181 (int)off);
182 zero_user_segment(pages[i], off, end);
183 len -= (end - off);
184 i++;
185 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300186 while (len >= PAGE_SIZE) {
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700187 dout("zeroing %d %p len=%d\n", i, pages[i], len);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300188 zero_user_segment(pages[i], 0, PAGE_SIZE);
189 len -= PAGE_SIZE;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700190 i++;
191 }
192 /* trailing partial page? */
193 if (len) {
194 dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
195 zero_user_segment(pages[i], 0, len);
196 }
197}
198EXPORT_SYMBOL(ceph_zero_page_vector_range);
199