blob: a3d0adc828e6417e7772420fddbdff1313064f65 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -07002#include <linux/ceph/ceph_debug.h>
3
4#include <linux/module.h>
5#include <linux/sched.h>
6#include <linux/slab.h>
7#include <linux/file.h>
8#include <linux/namei.h>
9#include <linux/writeback.h>
10
11#include <linux/ceph/libceph.h>
12
13/*
14 * build a vector of user pages
15 */
Alex Elderb3248142013-02-06 13:11:38 -060016struct page **ceph_get_direct_page_vector(const void __user *data,
Henry C Changb6aa5902010-12-15 20:45:41 -080017 int num_pages, bool write_page)
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070018{
19 struct page **pages;
Sage Weil38815b72011-03-02 16:55:21 -080020 int got = 0;
21 int rc = 0;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070022
23 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
24 if (!pages)
25 return ERR_PTR(-ENOMEM);
26
Sage Weil38815b72011-03-02 16:55:21 -080027 while (got < num_pages) {
Al Viro77478712017-09-22 18:23:17 -040028 rc = get_user_pages_fast(
Sage Weil38815b72011-03-02 16:55:21 -080029 (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
Al Viro77478712017-09-22 18:23:17 -040030 num_pages - got, write_page, pages + got);
Sage Weil38815b72011-03-02 16:55:21 -080031 if (rc < 0)
32 break;
33 BUG_ON(rc == 0);
34 got += rc;
35 }
Sage Weil38815b72011-03-02 16:55:21 -080036 if (rc < 0)
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070037 goto fail;
38 return pages;
39
40fail:
Sage Weil38815b72011-03-02 16:55:21 -080041 ceph_put_page_vector(pages, got, false);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070042 return ERR_PTR(rc);
43}
44EXPORT_SYMBOL(ceph_get_direct_page_vector);
45
Henry C Changb6aa5902010-12-15 20:45:41 -080046void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070047{
48 int i;
49
Henry C Changb6aa5902010-12-15 20:45:41 -080050 for (i = 0; i < num_pages; i++) {
51 if (dirty)
52 set_page_dirty_lock(pages[i]);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070053 put_page(pages[i]);
Henry C Changb6aa5902010-12-15 20:45:41 -080054 }
Ilya Dryomovb01da6a2015-05-04 14:10:11 +030055 kvfree(pages);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070056}
57EXPORT_SYMBOL(ceph_put_page_vector);
58
59void ceph_release_page_vector(struct page **pages, int num_pages)
60{
61 int i;
62
63 for (i = 0; i < num_pages; i++)
64 __free_pages(pages[i], 0);
65 kfree(pages);
66}
67EXPORT_SYMBOL(ceph_release_page_vector);
68
69/*
70 * allocate a vector new pages
71 */
72struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
73{
74 struct page **pages;
75 int i;
76
77 pages = kmalloc(sizeof(*pages) * num_pages, flags);
78 if (!pages)
79 return ERR_PTR(-ENOMEM);
80 for (i = 0; i < num_pages; i++) {
81 pages[i] = __page_cache_alloc(flags);
82 if (pages[i] == NULL) {
83 ceph_release_page_vector(pages, i);
84 return ERR_PTR(-ENOMEM);
85 }
86 }
87 return pages;
88}
89EXPORT_SYMBOL(ceph_alloc_page_vector);
90
91/*
92 * copy user data into a page vector
93 */
94int ceph_copy_user_to_page_vector(struct page **pages,
Alex Elderb3248142013-02-06 13:11:38 -060095 const void __user *data,
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -070096 loff_t off, size_t len)
97{
98 int i = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030099 int po = off & ~PAGE_MASK;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700100 int left = len;
101 int l, bad;
102
103 while (left > 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300104 l = min_t(int, PAGE_SIZE-po, left);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700105 bad = copy_from_user(page_address(pages[i]) + po, data, l);
106 if (bad == l)
107 return -EFAULT;
108 data += l - bad;
109 left -= l - bad;
110 po += l - bad;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300111 if (po == PAGE_SIZE) {
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700112 po = 0;
113 i++;
114 }
115 }
116 return len;
117}
118EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
119
Alex Elder903bb322013-02-06 13:11:38 -0600120void ceph_copy_to_page_vector(struct page **pages,
Alex Elderb3248142013-02-06 13:11:38 -0600121 const void *data,
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700122 loff_t off, size_t len)
123{
124 int i = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300125 size_t po = off & ~PAGE_MASK;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700126 size_t left = len;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700127
128 while (left > 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300129 size_t l = min_t(size_t, PAGE_SIZE-po, left);
Alex Elder903bb322013-02-06 13:11:38 -0600130
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700131 memcpy(page_address(pages[i]) + po, data, l);
132 data += l;
133 left -= l;
134 po += l;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300135 if (po == PAGE_SIZE) {
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700136 po = 0;
137 i++;
138 }
139 }
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700140}
141EXPORT_SYMBOL(ceph_copy_to_page_vector);
142
Alex Elder903bb322013-02-06 13:11:38 -0600143void ceph_copy_from_page_vector(struct page **pages,
Alex Elderb3248142013-02-06 13:11:38 -0600144 void *data,
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700145 loff_t off, size_t len)
146{
147 int i = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300148 size_t po = off & ~PAGE_MASK;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700149 size_t left = len;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700150
151 while (left > 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300152 size_t l = min_t(size_t, PAGE_SIZE-po, left);
Alex Elder903bb322013-02-06 13:11:38 -0600153
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700154 memcpy(data, page_address(pages[i]) + po, l);
155 data += l;
156 left -= l;
157 po += l;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300158 if (po == PAGE_SIZE) {
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700159 po = 0;
160 i++;
161 }
162 }
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700163}
164EXPORT_SYMBOL(ceph_copy_from_page_vector);
165
166/*
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700167 * Zero an extent within a page vector. Offset is relative to the
168 * start of the first page.
169 */
170void ceph_zero_page_vector_range(int off, int len, struct page **pages)
171{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300172 int i = off >> PAGE_SHIFT;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700173
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300174 off &= ~PAGE_MASK;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700175
176 dout("zero_page_vector_page %u~%u\n", off, len);
177
178 /* leading partial page? */
179 if (off) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300180 int end = min((int)PAGE_SIZE, off + len);
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700181 dout("zeroing %d %p head from %d\n", i, pages[i],
182 (int)off);
183 zero_user_segment(pages[i], off, end);
184 len -= (end - off);
185 i++;
186 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300187 while (len >= PAGE_SIZE) {
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700188 dout("zeroing %d %p len=%d\n", i, pages[i], len);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300189 zero_user_segment(pages[i], 0, PAGE_SIZE);
190 len -= PAGE_SIZE;
Yehuda Sadeh3d14c5d2010-04-06 15:14:15 -0700191 i++;
192 }
193 /* trailing partial page? */
194 if (len) {
195 dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
196 zero_user_segment(pages[i], 0, len);
197 }
198}
199EXPORT_SYMBOL(ceph_zero_page_vector_range);
200