blob: da8a211414c94950014365f745f86c5e3e67dfc9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/nodemask.h>
David Gibson63551ae2005-06-21 17:14:44 -070013#include <linux/pagemap.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
16
17#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
20static unsigned long nr_huge_pages, free_huge_pages;
21unsigned long max_huge_pages;
22static struct list_head hugepage_freelists[MAX_NUMNODES];
23static unsigned int nr_huge_pages_node[MAX_NUMNODES];
24static unsigned int free_huge_pages_node[MAX_NUMNODES];
Eric Paris0bd0f9f2005-11-21 21:32:28 -080025
26/*
27 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
28 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070029static DEFINE_SPINLOCK(hugetlb_lock);
30
31static void enqueue_huge_page(struct page *page)
32{
33 int nid = page_to_nid(page);
34 list_add(&page->lru, &hugepage_freelists[nid]);
35 free_huge_pages++;
36 free_huge_pages_node[nid]++;
37}
38
39static struct page *dequeue_huge_page(void)
40{
41 int nid = numa_node_id();
42 struct page *page = NULL;
43
44 if (list_empty(&hugepage_freelists[nid])) {
45 for (nid = 0; nid < MAX_NUMNODES; ++nid)
46 if (!list_empty(&hugepage_freelists[nid]))
47 break;
48 }
49 if (nid >= 0 && nid < MAX_NUMNODES &&
50 !list_empty(&hugepage_freelists[nid])) {
51 page = list_entry(hugepage_freelists[nid].next,
52 struct page, lru);
53 list_del(&page->lru);
54 free_huge_pages--;
55 free_huge_pages_node[nid]--;
56 }
57 return page;
58}
59
60static struct page *alloc_fresh_huge_page(void)
61{
62 static int nid = 0;
63 struct page *page;
64 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
65 HUGETLB_PAGE_ORDER);
66 nid = (nid + 1) % num_online_nodes();
67 if (page) {
Eric Paris0bd0f9f2005-11-21 21:32:28 -080068 spin_lock(&hugetlb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 nr_huge_pages++;
70 nr_huge_pages_node[page_to_nid(page)]++;
Eric Paris0bd0f9f2005-11-21 21:32:28 -080071 spin_unlock(&hugetlb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 }
73 return page;
74}
75
76void free_huge_page(struct page *page)
77{
78 BUG_ON(page_count(page));
79
80 INIT_LIST_HEAD(&page->lru);
81 page[1].mapping = NULL;
82
83 spin_lock(&hugetlb_lock);
84 enqueue_huge_page(page);
85 spin_unlock(&hugetlb_lock);
86}
87
88struct page *alloc_huge_page(void)
89{
90 struct page *page;
91 int i;
92
93 spin_lock(&hugetlb_lock);
94 page = dequeue_huge_page();
95 if (!page) {
96 spin_unlock(&hugetlb_lock);
97 return NULL;
98 }
99 spin_unlock(&hugetlb_lock);
100 set_page_count(page, 1);
101 page[1].mapping = (void *)free_huge_page;
102 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
103 clear_highpage(&page[i]);
104 return page;
105}
106
107static int __init hugetlb_init(void)
108{
109 unsigned long i;
110 struct page *page;
111
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100112 if (HPAGE_SHIFT == 0)
113 return 0;
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 for (i = 0; i < MAX_NUMNODES; ++i)
116 INIT_LIST_HEAD(&hugepage_freelists[i]);
117
118 for (i = 0; i < max_huge_pages; ++i) {
119 page = alloc_fresh_huge_page();
120 if (!page)
121 break;
122 spin_lock(&hugetlb_lock);
123 enqueue_huge_page(page);
124 spin_unlock(&hugetlb_lock);
125 }
126 max_huge_pages = free_huge_pages = nr_huge_pages = i;
127 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
128 return 0;
129}
130module_init(hugetlb_init);
131
132static int __init hugetlb_setup(char *s)
133{
134 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
135 max_huge_pages = 0;
136 return 1;
137}
138__setup("hugepages=", hugetlb_setup);
139
140#ifdef CONFIG_SYSCTL
141static void update_and_free_page(struct page *page)
142{
143 int i;
144 nr_huge_pages--;
145 nr_huge_pages_node[page_zone(page)->zone_pgdat->node_id]--;
146 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
147 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
148 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
149 1 << PG_private | 1<< PG_writeback);
150 set_page_count(&page[i], 0);
151 }
152 set_page_count(page, 1);
153 __free_pages(page, HUGETLB_PAGE_ORDER);
154}
155
156#ifdef CONFIG_HIGHMEM
157static void try_to_free_low(unsigned long count)
158{
159 int i, nid;
160 for (i = 0; i < MAX_NUMNODES; ++i) {
161 struct page *page, *next;
162 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
163 if (PageHighMem(page))
164 continue;
165 list_del(&page->lru);
166 update_and_free_page(page);
167 nid = page_zone(page)->zone_pgdat->node_id;
168 free_huge_pages--;
169 free_huge_pages_node[nid]--;
170 if (count >= nr_huge_pages)
171 return;
172 }
173 }
174}
175#else
176static inline void try_to_free_low(unsigned long count)
177{
178}
179#endif
180
181static unsigned long set_max_huge_pages(unsigned long count)
182{
183 while (count > nr_huge_pages) {
184 struct page *page = alloc_fresh_huge_page();
185 if (!page)
186 return nr_huge_pages;
187 spin_lock(&hugetlb_lock);
188 enqueue_huge_page(page);
189 spin_unlock(&hugetlb_lock);
190 }
191 if (count >= nr_huge_pages)
192 return nr_huge_pages;
193
194 spin_lock(&hugetlb_lock);
195 try_to_free_low(count);
196 while (count < nr_huge_pages) {
197 struct page *page = dequeue_huge_page();
198 if (!page)
199 break;
200 update_and_free_page(page);
201 }
202 spin_unlock(&hugetlb_lock);
203 return nr_huge_pages;
204}
205
206int hugetlb_sysctl_handler(struct ctl_table *table, int write,
207 struct file *file, void __user *buffer,
208 size_t *length, loff_t *ppos)
209{
210 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
211 max_huge_pages = set_max_huge_pages(max_huge_pages);
212 return 0;
213}
214#endif /* CONFIG_SYSCTL */
215
216int hugetlb_report_meminfo(char *buf)
217{
218 return sprintf(buf,
219 "HugePages_Total: %5lu\n"
220 "HugePages_Free: %5lu\n"
221 "Hugepagesize: %5lu kB\n",
222 nr_huge_pages,
223 free_huge_pages,
224 HPAGE_SIZE/1024);
225}
226
227int hugetlb_report_node_meminfo(int nid, char *buf)
228{
229 return sprintf(buf,
230 "Node %d HugePages_Total: %5u\n"
231 "Node %d HugePages_Free: %5u\n",
232 nid, nr_huge_pages_node[nid],
233 nid, free_huge_pages_node[nid]);
234}
235
236int is_hugepage_mem_enough(size_t size)
237{
238 return (size + ~HPAGE_MASK)/HPAGE_SIZE <= free_huge_pages;
239}
240
241/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
242unsigned long hugetlb_total_pages(void)
243{
244 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
245}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
247/*
248 * We cannot handle pagefaults against hugetlb pages at all. They cause
249 * handle_mm_fault() to try to instantiate regular-sized pages in the
250 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
251 * this far.
252 */
253static struct page *hugetlb_nopage(struct vm_area_struct *vma,
254 unsigned long address, int *unused)
255{
256 BUG();
257 return NULL;
258}
259
260struct vm_operations_struct hugetlb_vm_ops = {
261 .nopage = hugetlb_nopage,
262};
263
David Gibson1e8f8892006-01-06 00:10:44 -0800264static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
265 int writable)
David Gibson63551ae2005-06-21 17:14:44 -0700266{
267 pte_t entry;
268
David Gibson1e8f8892006-01-06 00:10:44 -0800269 if (writable) {
David Gibson63551ae2005-06-21 17:14:44 -0700270 entry =
271 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
272 } else {
273 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
274 }
275 entry = pte_mkyoung(entry);
276 entry = pte_mkhuge(entry);
277
278 return entry;
279}
280
David Gibson1e8f8892006-01-06 00:10:44 -0800281static void set_huge_ptep_writable(struct vm_area_struct *vma,
282 unsigned long address, pte_t *ptep)
283{
284 pte_t entry;
285
286 entry = pte_mkwrite(pte_mkdirty(*ptep));
287 ptep_set_access_flags(vma, address, ptep, entry, 1);
288 update_mmu_cache(vma, address, entry);
289 lazy_mmu_prot_update(entry);
290}
291
292
David Gibson63551ae2005-06-21 17:14:44 -0700293int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
294 struct vm_area_struct *vma)
295{
296 pte_t *src_pte, *dst_pte, entry;
297 struct page *ptepage;
Hugh Dickins1c598272005-10-19 21:23:43 -0700298 unsigned long addr;
David Gibson1e8f8892006-01-06 00:10:44 -0800299 int cow;
300
301 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson63551ae2005-06-21 17:14:44 -0700302
Hugh Dickins1c598272005-10-19 21:23:43 -0700303 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
Hugh Dickinsc74df322005-10-29 18:16:23 -0700304 src_pte = huge_pte_offset(src, addr);
305 if (!src_pte)
306 continue;
David Gibson63551ae2005-06-21 17:14:44 -0700307 dst_pte = huge_pte_alloc(dst, addr);
308 if (!dst_pte)
309 goto nomem;
Hugh Dickinsc74df322005-10-29 18:16:23 -0700310 spin_lock(&dst->page_table_lock);
Hugh Dickins1c598272005-10-19 21:23:43 -0700311 spin_lock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700312 if (!pte_none(*src_pte)) {
David Gibson1e8f8892006-01-06 00:10:44 -0800313 if (cow)
314 ptep_set_wrprotect(src, addr, src_pte);
Hugh Dickins1c598272005-10-19 21:23:43 -0700315 entry = *src_pte;
316 ptepage = pte_page(entry);
317 get_page(ptepage);
Hugh Dickins42946212005-10-29 18:16:05 -0700318 add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
Hugh Dickins1c598272005-10-19 21:23:43 -0700319 set_huge_pte_at(dst, addr, dst_pte, entry);
320 }
321 spin_unlock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700322 spin_unlock(&dst->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700323 }
324 return 0;
325
326nomem:
327 return -ENOMEM;
328}
329
330void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
331 unsigned long end)
332{
333 struct mm_struct *mm = vma->vm_mm;
334 unsigned long address;
David Gibsonc7546f82005-08-05 11:59:35 -0700335 pte_t *ptep;
David Gibson63551ae2005-06-21 17:14:44 -0700336 pte_t pte;
337 struct page *page;
338
339 WARN_ON(!is_vm_hugetlb_page(vma));
340 BUG_ON(start & ~HPAGE_MASK);
341 BUG_ON(end & ~HPAGE_MASK);
342
Hugh Dickins508034a2005-10-29 18:16:30 -0700343 spin_lock(&mm->page_table_lock);
344
Hugh Dickins365e9c872005-10-29 18:16:18 -0700345 /* Update high watermark before we lower rss */
346 update_hiwater_rss(mm);
347
David Gibson63551ae2005-06-21 17:14:44 -0700348 for (address = start; address < end; address += HPAGE_SIZE) {
David Gibsonc7546f82005-08-05 11:59:35 -0700349 ptep = huge_pte_offset(mm, address);
Adam Litke4c887262005-10-29 18:16:46 -0700350 if (!ptep)
David Gibsonc7546f82005-08-05 11:59:35 -0700351 continue;
352
353 pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson63551ae2005-06-21 17:14:44 -0700354 if (pte_none(pte))
355 continue;
David Gibsonc7546f82005-08-05 11:59:35 -0700356
David Gibson63551ae2005-06-21 17:14:44 -0700357 page = pte_page(pte);
358 put_page(page);
Hugh Dickins42946212005-10-29 18:16:05 -0700359 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
David Gibson63551ae2005-06-21 17:14:44 -0700360 }
David Gibson63551ae2005-06-21 17:14:44 -0700361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 spin_unlock(&mm->page_table_lock);
Hugh Dickins508034a2005-10-29 18:16:30 -0700363 flush_tlb_range(vma, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364}
David Gibson63551ae2005-06-21 17:14:44 -0700365
Adam Litke85ef47f2006-01-06 00:10:42 -0800366static struct page *find_or_alloc_huge_page(struct address_space *mapping,
David Gibson1e8f8892006-01-06 00:10:44 -0800367 unsigned long idx, int shared)
David Gibson63551ae2005-06-21 17:14:44 -0700368{
Adam Litke4c887262005-10-29 18:16:46 -0700369 struct page *page;
370 int err;
David Gibson63551ae2005-06-21 17:14:44 -0700371
Adam Litke4c887262005-10-29 18:16:46 -0700372retry:
373 page = find_lock_page(mapping, idx);
374 if (page)
375 goto out;
David Gibson63551ae2005-06-21 17:14:44 -0700376
Adam Litke4c887262005-10-29 18:16:46 -0700377 if (hugetlb_get_quota(mapping))
378 goto out;
379 page = alloc_huge_page();
380 if (!page) {
381 hugetlb_put_quota(mapping);
382 goto out;
383 }
David Gibson63551ae2005-06-21 17:14:44 -0700384
David Gibson1e8f8892006-01-06 00:10:44 -0800385 if (shared) {
386 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
387 if (err) {
388 put_page(page);
389 hugetlb_put_quota(mapping);
390 if (err == -EEXIST)
391 goto retry;
392 page = NULL;
393 }
394 } else {
395 /* Caller expects a locked page */
396 lock_page(page);
David Gibson63551ae2005-06-21 17:14:44 -0700397 }
398out:
Adam Litke4c887262005-10-29 18:16:46 -0700399 return page;
David Gibson63551ae2005-06-21 17:14:44 -0700400}
401
David Gibson1e8f8892006-01-06 00:10:44 -0800402static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
403 unsigned long address, pte_t *ptep, pte_t pte)
404{
405 struct page *old_page, *new_page;
406 int i, avoidcopy;
407
408 old_page = pte_page(pte);
409
410 /* If no-one else is actually using this page, avoid the copy
411 * and just make the page writable */
412 avoidcopy = (page_count(old_page) == 1);
413 if (avoidcopy) {
414 set_huge_ptep_writable(vma, address, ptep);
415 return VM_FAULT_MINOR;
416 }
417
418 page_cache_get(old_page);
419 new_page = alloc_huge_page();
420
421 if (!new_page) {
422 page_cache_release(old_page);
423
424 /* Logically this is OOM, not a SIGBUS, but an OOM
425 * could cause the kernel to go killing other
426 * processes which won't help the hugepage situation
427 * at all (?) */
428 return VM_FAULT_SIGBUS;
429 }
430
431 spin_unlock(&mm->page_table_lock);
432 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++)
433 copy_user_highpage(new_page + i, old_page + i,
434 address + i*PAGE_SIZE);
435 spin_lock(&mm->page_table_lock);
436
437 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
438 if (likely(pte_same(*ptep, pte))) {
439 /* Break COW */
440 set_huge_pte_at(mm, address, ptep,
441 make_huge_pte(vma, new_page, 1));
442 /* Make the old page be freed below */
443 new_page = old_page;
444 }
445 page_cache_release(new_page);
446 page_cache_release(old_page);
447 return VM_FAULT_MINOR;
448}
449
Adam Litke86e52162006-01-06 00:10:43 -0800450int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
David Gibson1e8f8892006-01-06 00:10:44 -0800451 unsigned long address, pte_t *ptep, int write_access)
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100452{
453 int ret = VM_FAULT_SIGBUS;
Adam Litke4c887262005-10-29 18:16:46 -0700454 unsigned long idx;
455 unsigned long size;
Adam Litke4c887262005-10-29 18:16:46 -0700456 struct page *page;
457 struct address_space *mapping;
David Gibson1e8f8892006-01-06 00:10:44 -0800458 pte_t new_pte;
Adam Litke4c887262005-10-29 18:16:46 -0700459
Adam Litke4c887262005-10-29 18:16:46 -0700460 mapping = vma->vm_file->f_mapping;
461 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
462 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
463
464 /*
465 * Use page lock to guard against racing truncation
466 * before we get page_table_lock.
467 */
David Gibson1e8f8892006-01-06 00:10:44 -0800468 page = find_or_alloc_huge_page(mapping, idx,
469 vma->vm_flags & VM_SHARED);
Adam Litke4c887262005-10-29 18:16:46 -0700470 if (!page)
471 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100472
David Gibson1e8f8892006-01-06 00:10:44 -0800473 BUG_ON(!PageLocked(page));
474
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100475 spin_lock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700476 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
477 if (idx >= size)
478 goto backout;
479
480 ret = VM_FAULT_MINOR;
Adam Litke86e52162006-01-06 00:10:43 -0800481 if (!pte_none(*ptep))
Adam Litke4c887262005-10-29 18:16:46 -0700482 goto backout;
483
484 add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
David Gibson1e8f8892006-01-06 00:10:44 -0800485 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
486 && (vma->vm_flags & VM_SHARED)));
487 set_huge_pte_at(mm, address, ptep, new_pte);
488
489 if (write_access && !(vma->vm_flags & VM_SHARED)) {
490 /* Optimization, do the COW without a second fault */
491 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
492 }
493
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100494 spin_unlock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700495 unlock_page(page);
496out:
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100497 return ret;
Adam Litke4c887262005-10-29 18:16:46 -0700498
499backout:
500 spin_unlock(&mm->page_table_lock);
501 hugetlb_put_quota(mapping);
502 unlock_page(page);
503 put_page(page);
504 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100505}
506
Adam Litke86e52162006-01-06 00:10:43 -0800507int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
508 unsigned long address, int write_access)
509{
510 pte_t *ptep;
511 pte_t entry;
David Gibson1e8f8892006-01-06 00:10:44 -0800512 int ret;
Adam Litke86e52162006-01-06 00:10:43 -0800513
514 ptep = huge_pte_alloc(mm, address);
515 if (!ptep)
516 return VM_FAULT_OOM;
517
518 entry = *ptep;
519 if (pte_none(entry))
David Gibson1e8f8892006-01-06 00:10:44 -0800520 return hugetlb_no_page(mm, vma, address, ptep, write_access);
Adam Litke86e52162006-01-06 00:10:43 -0800521
David Gibson1e8f8892006-01-06 00:10:44 -0800522 ret = VM_FAULT_MINOR;
523
524 spin_lock(&mm->page_table_lock);
525 /* Check for a racing update before calling hugetlb_cow */
526 if (likely(pte_same(entry, *ptep)))
527 if (write_access && !pte_write(entry))
528 ret = hugetlb_cow(mm, vma, address, ptep, entry);
529 spin_unlock(&mm->page_table_lock);
530
531 return ret;
Adam Litke86e52162006-01-06 00:10:43 -0800532}
533
David Gibson63551ae2005-06-21 17:14:44 -0700534int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
535 struct page **pages, struct vm_area_struct **vmas,
536 unsigned long *position, int *length, int i)
537{
538 unsigned long vpfn, vaddr = *position;
539 int remainder = *length;
540
David Gibson63551ae2005-06-21 17:14:44 -0700541 vpfn = vaddr/PAGE_SIZE;
Hugh Dickins1c598272005-10-19 21:23:43 -0700542 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700543 while (vaddr < vma->vm_end && remainder) {
Adam Litke4c887262005-10-29 18:16:46 -0700544 pte_t *pte;
545 struct page *page;
546
547 /*
548 * Some archs (sparc64, sh*) have multiple pte_ts to
549 * each hugepage. We have to make * sure we get the
550 * first, for the page indexing below to work.
551 */
552 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
553
554 if (!pte || pte_none(*pte)) {
555 int ret;
556
557 spin_unlock(&mm->page_table_lock);
558 ret = hugetlb_fault(mm, vma, vaddr, 0);
559 spin_lock(&mm->page_table_lock);
560 if (ret == VM_FAULT_MINOR)
561 continue;
562
563 remainder = 0;
564 if (!i)
565 i = -EFAULT;
566 break;
567 }
David Gibson63551ae2005-06-21 17:14:44 -0700568
569 if (pages) {
David Gibson63551ae2005-06-21 17:14:44 -0700570 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
David Gibson63551ae2005-06-21 17:14:44 -0700571 get_page(page);
572 pages[i] = page;
573 }
574
575 if (vmas)
576 vmas[i] = vma;
577
578 vaddr += PAGE_SIZE;
579 ++vpfn;
580 --remainder;
581 ++i;
582 }
Hugh Dickins1c598272005-10-19 21:23:43 -0700583 spin_unlock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700584 *length = remainder;
585 *position = vaddr;
586
587 return i;
588}