blob: 38e641cdd9779ca3c96a1accf6faeb0d7148f82c [file] [log] [blame]
Martin Schwidefsky80217142010-10-25 16:10:11 +02001/*
2 * Lockless get_user_pages_fast for s390
3 *
4 * Copyright IBM Corp. 2010
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7#include <linux/sched.h>
8#include <linux/mm.h>
9#include <linux/hugetlb.h>
10#include <linux/vmstat.h>
11#include <linux/pagemap.h>
12#include <linux/rwsem.h>
13#include <asm/pgtable.h>
14
15/*
16 * The performance critical leaf functions are made noinline otherwise gcc
17 * inlines everything into a single function which results in too much
18 * register pressure.
19 */
20static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
21 unsigned long end, int write, struct page **pages, int *nr)
22{
23 unsigned long mask, result;
24 pte_t *ptep, pte;
25 struct page *page;
26
27 result = write ? 0 : _PAGE_RO;
28 mask = result | _PAGE_INVALID | _PAGE_SPECIAL;
29
30 ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
31 do {
32 pte = *ptep;
33 barrier();
34 if ((pte_val(pte) & mask) != result)
35 return 0;
36 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
37 page = pte_page(pte);
38 if (!page_cache_get_speculative(page))
39 return 0;
40 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
41 put_page(page);
42 return 0;
43 }
44 pages[*nr] = page;
45 (*nr)++;
46
47 } while (ptep++, addr += PAGE_SIZE, addr != end);
48
49 return 1;
50}
51
52static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
53 unsigned long end, int write, struct page **pages, int *nr)
54{
55 unsigned long mask, result;
56 struct page *head, *page;
57 int refs;
58
59 result = write ? 0 : _SEGMENT_ENTRY_RO;
60 mask = result | _SEGMENT_ENTRY_INV;
61 if ((pmd_val(pmd) & mask) != result)
62 return 0;
63 VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
64
65 refs = 0;
66 head = pmd_page(pmd);
67 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
68 do {
69 VM_BUG_ON(compound_head(page) != head);
70 pages[*nr] = page;
71 (*nr)++;
72 page++;
73 refs++;
74 } while (addr += PAGE_SIZE, addr != end);
75
76 if (!page_cache_add_speculative(head, refs)) {
77 *nr -= refs;
78 return 0;
79 }
80
81 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
82 *nr -= refs;
83 while (refs--)
84 put_page(head);
85 }
86
87 return 1;
88}
89
90
91static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
92 unsigned long end, int write, struct page **pages, int *nr)
93{
94 unsigned long next;
95 pmd_t *pmdp, pmd;
96
97 pmdp = (pmd_t *) pudp;
98#ifdef CONFIG_64BIT
99 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
100 pmdp = (pmd_t *) pud_deref(pud);
101 pmdp += pmd_index(addr);
102#endif
103 do {
104 pmd = *pmdp;
105 barrier();
106 next = pmd_addr_end(addr, end);
107 if (pmd_none(pmd))
108 return 0;
109 if (unlikely(pmd_huge(pmd))) {
110 if (!gup_huge_pmd(pmdp, pmd, addr, next,
111 write, pages, nr))
112 return 0;
113 } else if (!gup_pte_range(pmdp, pmd, addr, next,
114 write, pages, nr))
115 return 0;
116 } while (pmdp++, addr = next, addr != end);
117
118 return 1;
119}
120
121static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
122 unsigned long end, int write, struct page **pages, int *nr)
123{
124 unsigned long next;
125 pud_t *pudp, pud;
126
127 pudp = (pud_t *) pgdp;
128#ifdef CONFIG_64BIT
129 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
130 pudp = (pud_t *) pgd_deref(pgd);
131 pudp += pud_index(addr);
132#endif
133 do {
134 pud = *pudp;
135 barrier();
136 next = pud_addr_end(addr, end);
137 if (pud_none(pud))
138 return 0;
139 if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
140 return 0;
141 } while (pudp++, addr = next, addr != end);
142
143 return 1;
144}
145
146/**
147 * get_user_pages_fast() - pin user pages in memory
148 * @start: starting user address
149 * @nr_pages: number of pages from start to pin
150 * @write: whether pages will be written to
151 * @pages: array that receives pointers to the pages pinned.
152 * Should be at least nr_pages long.
153 *
154 * Attempt to pin user pages in memory without taking mm->mmap_sem.
155 * If not successful, it will fall back to taking the lock and
156 * calling get_user_pages().
157 *
158 * Returns number of pages pinned. This may be fewer than the number
159 * requested. If nr_pages is 0 or negative, returns 0. If no pages
160 * were pinned, returns -errno.
161 */
162int get_user_pages_fast(unsigned long start, int nr_pages, int write,
163 struct page **pages)
164{
165 struct mm_struct *mm = current->mm;
166 unsigned long addr, len, end;
167 unsigned long next;
168 pgd_t *pgdp, pgd;
169 int nr = 0;
170
171 start &= PAGE_MASK;
172 addr = start;
173 len = (unsigned long) nr_pages << PAGE_SHIFT;
174 end = start + len;
175 if (end < start)
176 goto slow_irqon;
177
178 /*
179 * local_irq_disable() doesn't prevent pagetable teardown, but does
180 * prevent the pagetables from being freed on s390.
181 *
182 * So long as we atomically load page table pointers versus teardown,
183 * we can follow the address down to the the page and take a ref on it.
184 */
185 local_irq_disable();
186 pgdp = pgd_offset(mm, addr);
187 do {
188 pgd = *pgdp;
189 barrier();
190 next = pgd_addr_end(addr, end);
191 if (pgd_none(pgd))
192 goto slow;
193 if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
194 goto slow;
195 } while (pgdp++, addr = next, addr != end);
196 local_irq_enable();
197
198 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
199 return nr;
200
201 {
202 int ret;
203slow:
204 local_irq_enable();
205slow_irqon:
206 /* Try to get the remaining pages with get_user_pages */
207 start += nr << PAGE_SHIFT;
208 pages += nr;
209
210 down_read(&mm->mmap_sem);
211 ret = get_user_pages(current, mm, start,
212 (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
213 up_read(&mm->mmap_sem);
214
215 /* Have to be a bit careful with return values */
216 if (nr > 0) {
217 if (ret < 0)
218 ret = nr;
219 else
220 ret += nr;
221 }
222
223 return ret;
224 }
225}