blob: da33a0281d9dbccc3444a2305b6a3ff4bff9d39f [file] [log] [blame]
Martin Schwidefsky80217142010-10-25 16:10:11 +02001/*
2 * Lockless get_user_pages_fast for s390
3 *
4 * Copyright IBM Corp. 2010
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7#include <linux/sched.h>
8#include <linux/mm.h>
9#include <linux/hugetlb.h>
10#include <linux/vmstat.h>
11#include <linux/pagemap.h>
12#include <linux/rwsem.h>
13#include <asm/pgtable.h>
14
15/*
16 * The performance critical leaf functions are made noinline otherwise gcc
17 * inlines everything into a single function which results in too much
18 * register pressure.
19 */
20static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
21 unsigned long end, int write, struct page **pages, int *nr)
22{
Martin Schwidefsky25591b02010-11-10 10:05:51 +010023 unsigned long mask;
Martin Schwidefsky80217142010-10-25 16:10:11 +020024 pte_t *ptep, pte;
25 struct page *page;
26
Martin Schwidefsky25591b02010-11-10 10:05:51 +010027 mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
Martin Schwidefsky80217142010-10-25 16:10:11 +020028
29 ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
30 do {
31 pte = *ptep;
32 barrier();
Martin Schwidefsky25591b02010-11-10 10:05:51 +010033 if ((pte_val(pte) & mask) != 0)
Martin Schwidefsky80217142010-10-25 16:10:11 +020034 return 0;
35 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
36 page = pte_page(pte);
37 if (!page_cache_get_speculative(page))
38 return 0;
39 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
40 put_page(page);
41 return 0;
42 }
43 pages[*nr] = page;
44 (*nr)++;
45
46 } while (ptep++, addr += PAGE_SIZE, addr != end);
47
48 return 1;
49}
50
Andrea Arcangeli220a2eb2011-11-02 13:37:25 -070051static inline void get_huge_page_tail(struct page *page)
52{
53 /*
54 * __split_huge_page_refcount() cannot run
55 * from under us.
56 */
57 VM_BUG_ON(page_mapcount(page) < 0);
58 VM_BUG_ON(atomic_read(&page->_count) != 0);
59 atomic_inc(&page->_mapcount);
60}
61
Martin Schwidefsky80217142010-10-25 16:10:11 +020062static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
63 unsigned long end, int write, struct page **pages, int *nr)
64{
65 unsigned long mask, result;
Andrea Arcangeli220a2eb2011-11-02 13:37:25 -070066 struct page *head, *page, *tail;
Martin Schwidefsky80217142010-10-25 16:10:11 +020067 int refs;
68
69 result = write ? 0 : _SEGMENT_ENTRY_RO;
70 mask = result | _SEGMENT_ENTRY_INV;
71 if ((pmd_val(pmd) & mask) != result)
72 return 0;
73 VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
74
75 refs = 0;
76 head = pmd_page(pmd);
77 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
Andrea Arcangeli220a2eb2011-11-02 13:37:25 -070078 tail = page;
Martin Schwidefsky80217142010-10-25 16:10:11 +020079 do {
80 VM_BUG_ON(compound_head(page) != head);
81 pages[*nr] = page;
82 (*nr)++;
83 page++;
84 refs++;
85 } while (addr += PAGE_SIZE, addr != end);
86
87 if (!page_cache_add_speculative(head, refs)) {
88 *nr -= refs;
89 return 0;
90 }
91
92 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
93 *nr -= refs;
94 while (refs--)
95 put_page(head);
Andrea Arcangeli0693bc92011-11-02 13:37:28 -070096 return 0;
97 }
98
99 /*
100 * Any tail page need their mapcount reference taken before we
101 * return.
102 */
103 while (refs--) {
104 if (PageTail(tail))
105 get_huge_page_tail(tail);
106 tail++;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200107 }
108
109 return 1;
110}
111
112
113static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
114 unsigned long end, int write, struct page **pages, int *nr)
115{
116 unsigned long next;
117 pmd_t *pmdp, pmd;
118
119 pmdp = (pmd_t *) pudp;
120#ifdef CONFIG_64BIT
121 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
122 pmdp = (pmd_t *) pud_deref(pud);
123 pmdp += pmd_index(addr);
124#endif
125 do {
126 pmd = *pmdp;
127 barrier();
128 next = pmd_addr_end(addr, end);
129 if (pmd_none(pmd))
130 return 0;
131 if (unlikely(pmd_huge(pmd))) {
132 if (!gup_huge_pmd(pmdp, pmd, addr, next,
133 write, pages, nr))
134 return 0;
135 } else if (!gup_pte_range(pmdp, pmd, addr, next,
136 write, pages, nr))
137 return 0;
138 } while (pmdp++, addr = next, addr != end);
139
140 return 1;
141}
142
143static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
144 unsigned long end, int write, struct page **pages, int *nr)
145{
146 unsigned long next;
147 pud_t *pudp, pud;
148
149 pudp = (pud_t *) pgdp;
150#ifdef CONFIG_64BIT
151 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
152 pudp = (pud_t *) pgd_deref(pgd);
153 pudp += pud_index(addr);
154#endif
155 do {
156 pud = *pudp;
157 barrier();
158 next = pud_addr_end(addr, end);
159 if (pud_none(pud))
160 return 0;
161 if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
162 return 0;
163 } while (pudp++, addr = next, addr != end);
164
165 return 1;
166}
167
168/**
169 * get_user_pages_fast() - pin user pages in memory
170 * @start: starting user address
171 * @nr_pages: number of pages from start to pin
172 * @write: whether pages will be written to
173 * @pages: array that receives pointers to the pages pinned.
174 * Should be at least nr_pages long.
175 *
176 * Attempt to pin user pages in memory without taking mm->mmap_sem.
177 * If not successful, it will fall back to taking the lock and
178 * calling get_user_pages().
179 *
180 * Returns number of pages pinned. This may be fewer than the number
181 * requested. If nr_pages is 0 or negative, returns 0. If no pages
182 * were pinned, returns -errno.
183 */
184int get_user_pages_fast(unsigned long start, int nr_pages, int write,
185 struct page **pages)
186{
187 struct mm_struct *mm = current->mm;
188 unsigned long addr, len, end;
189 unsigned long next;
190 pgd_t *pgdp, pgd;
191 int nr = 0;
192
193 start &= PAGE_MASK;
194 addr = start;
195 len = (unsigned long) nr_pages << PAGE_SHIFT;
196 end = start + len;
197 if (end < start)
198 goto slow_irqon;
199
200 /*
201 * local_irq_disable() doesn't prevent pagetable teardown, but does
202 * prevent the pagetables from being freed on s390.
203 *
204 * So long as we atomically load page table pointers versus teardown,
205 * we can follow the address down to the the page and take a ref on it.
206 */
207 local_irq_disable();
208 pgdp = pgd_offset(mm, addr);
209 do {
210 pgd = *pgdp;
211 barrier();
212 next = pgd_addr_end(addr, end);
213 if (pgd_none(pgd))
214 goto slow;
215 if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
216 goto slow;
217 } while (pgdp++, addr = next, addr != end);
218 local_irq_enable();
219
220 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
221 return nr;
222
223 {
224 int ret;
225slow:
226 local_irq_enable();
227slow_irqon:
228 /* Try to get the remaining pages with get_user_pages */
229 start += nr << PAGE_SHIFT;
230 pages += nr;
231
232 down_read(&mm->mmap_sem);
233 ret = get_user_pages(current, mm, start,
234 (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
235 up_read(&mm->mmap_sem);
236
237 /* Have to be a bit careful with return values */
238 if (nr > 0) {
239 if (ret < 0)
240 ret = nr;
241 else
242 ret += nr;
243 }
244
245 return ret;
246 }
247}