blob: eb405565949da5bd2bee78df2ea2b6dbb478c64c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/nodemask.h>
David Gibson63551ae2005-06-21 17:14:44 -070013#include <linux/pagemap.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080014#include <linux/mempolicy.h>
15
David Gibson63551ae2005-06-21 17:14:44 -070016#include <asm/page.h>
17#include <asm/pgtable.h>
18
19#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
22static unsigned long nr_huge_pages, free_huge_pages;
23unsigned long max_huge_pages;
24static struct list_head hugepage_freelists[MAX_NUMNODES];
25static unsigned int nr_huge_pages_node[MAX_NUMNODES];
26static unsigned int free_huge_pages_node[MAX_NUMNODES];
Eric Paris0bd0f9f2005-11-21 21:32:28 -080027
28/*
29 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
30 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070031static DEFINE_SPINLOCK(hugetlb_lock);
32
33static void enqueue_huge_page(struct page *page)
34{
35 int nid = page_to_nid(page);
36 list_add(&page->lru, &hugepage_freelists[nid]);
37 free_huge_pages++;
38 free_huge_pages_node[nid]++;
39}
40
Christoph Lameter5da7ca82006-01-06 00:10:46 -080041static struct page *dequeue_huge_page(struct vm_area_struct *vma,
42 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -070043{
44 int nid = numa_node_id();
45 struct page *page = NULL;
Christoph Lameter5da7ca82006-01-06 00:10:46 -080046 struct zonelist *zonelist = huge_zonelist(vma, address);
Christoph Lameter96df9332006-01-06 00:10:45 -080047 struct zone **z;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Christoph Lameter96df9332006-01-06 00:10:45 -080049 for (z = zonelist->zones; *z; z++) {
50 nid = (*z)->zone_pgdat->node_id;
51 if (!list_empty(&hugepage_freelists[nid]))
52 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 }
Christoph Lameter96df9332006-01-06 00:10:45 -080054
55 if (*z) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 page = list_entry(hugepage_freelists[nid].next,
57 struct page, lru);
58 list_del(&page->lru);
59 free_huge_pages--;
60 free_huge_pages_node[nid]--;
61 }
62 return page;
63}
64
65static struct page *alloc_fresh_huge_page(void)
66{
67 static int nid = 0;
68 struct page *page;
69 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
70 HUGETLB_PAGE_ORDER);
71 nid = (nid + 1) % num_online_nodes();
72 if (page) {
Eric Paris0bd0f9f2005-11-21 21:32:28 -080073 spin_lock(&hugetlb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 nr_huge_pages++;
75 nr_huge_pages_node[page_to_nid(page)]++;
Eric Paris0bd0f9f2005-11-21 21:32:28 -080076 spin_unlock(&hugetlb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 }
78 return page;
79}
80
81void free_huge_page(struct page *page)
82{
83 BUG_ON(page_count(page));
84
85 INIT_LIST_HEAD(&page->lru);
86 page[1].mapping = NULL;
87
88 spin_lock(&hugetlb_lock);
89 enqueue_huge_page(page);
90 spin_unlock(&hugetlb_lock);
91}
92
Christoph Lameter5da7ca82006-01-06 00:10:46 -080093struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
95 struct page *page;
96 int i;
97
98 spin_lock(&hugetlb_lock);
Christoph Lameter5da7ca82006-01-06 00:10:46 -080099 page = dequeue_huge_page(vma, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 if (!page) {
101 spin_unlock(&hugetlb_lock);
102 return NULL;
103 }
104 spin_unlock(&hugetlb_lock);
105 set_page_count(page, 1);
106 page[1].mapping = (void *)free_huge_page;
107 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
108 clear_highpage(&page[i]);
109 return page;
110}
111
112static int __init hugetlb_init(void)
113{
114 unsigned long i;
115 struct page *page;
116
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100117 if (HPAGE_SHIFT == 0)
118 return 0;
119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 for (i = 0; i < MAX_NUMNODES; ++i)
121 INIT_LIST_HEAD(&hugepage_freelists[i]);
122
123 for (i = 0; i < max_huge_pages; ++i) {
124 page = alloc_fresh_huge_page();
125 if (!page)
126 break;
127 spin_lock(&hugetlb_lock);
128 enqueue_huge_page(page);
129 spin_unlock(&hugetlb_lock);
130 }
131 max_huge_pages = free_huge_pages = nr_huge_pages = i;
132 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
133 return 0;
134}
135module_init(hugetlb_init);
136
137static int __init hugetlb_setup(char *s)
138{
139 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
140 max_huge_pages = 0;
141 return 1;
142}
143__setup("hugepages=", hugetlb_setup);
144
145#ifdef CONFIG_SYSCTL
146static void update_and_free_page(struct page *page)
147{
148 int i;
149 nr_huge_pages--;
150 nr_huge_pages_node[page_zone(page)->zone_pgdat->node_id]--;
151 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
152 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
153 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
154 1 << PG_private | 1<< PG_writeback);
155 set_page_count(&page[i], 0);
156 }
157 set_page_count(page, 1);
158 __free_pages(page, HUGETLB_PAGE_ORDER);
159}
160
161#ifdef CONFIG_HIGHMEM
162static void try_to_free_low(unsigned long count)
163{
164 int i, nid;
165 for (i = 0; i < MAX_NUMNODES; ++i) {
166 struct page *page, *next;
167 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
168 if (PageHighMem(page))
169 continue;
170 list_del(&page->lru);
171 update_and_free_page(page);
172 nid = page_zone(page)->zone_pgdat->node_id;
173 free_huge_pages--;
174 free_huge_pages_node[nid]--;
175 if (count >= nr_huge_pages)
176 return;
177 }
178 }
179}
180#else
181static inline void try_to_free_low(unsigned long count)
182{
183}
184#endif
185
186static unsigned long set_max_huge_pages(unsigned long count)
187{
188 while (count > nr_huge_pages) {
189 struct page *page = alloc_fresh_huge_page();
190 if (!page)
191 return nr_huge_pages;
192 spin_lock(&hugetlb_lock);
193 enqueue_huge_page(page);
194 spin_unlock(&hugetlb_lock);
195 }
196 if (count >= nr_huge_pages)
197 return nr_huge_pages;
198
199 spin_lock(&hugetlb_lock);
200 try_to_free_low(count);
201 while (count < nr_huge_pages) {
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800202 struct page *page = dequeue_huge_page(NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 if (!page)
204 break;
205 update_and_free_page(page);
206 }
207 spin_unlock(&hugetlb_lock);
208 return nr_huge_pages;
209}
210
211int hugetlb_sysctl_handler(struct ctl_table *table, int write,
212 struct file *file, void __user *buffer,
213 size_t *length, loff_t *ppos)
214{
215 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
216 max_huge_pages = set_max_huge_pages(max_huge_pages);
217 return 0;
218}
219#endif /* CONFIG_SYSCTL */
220
221int hugetlb_report_meminfo(char *buf)
222{
223 return sprintf(buf,
224 "HugePages_Total: %5lu\n"
225 "HugePages_Free: %5lu\n"
226 "Hugepagesize: %5lu kB\n",
227 nr_huge_pages,
228 free_huge_pages,
229 HPAGE_SIZE/1024);
230}
231
232int hugetlb_report_node_meminfo(int nid, char *buf)
233{
234 return sprintf(buf,
235 "Node %d HugePages_Total: %5u\n"
236 "Node %d HugePages_Free: %5u\n",
237 nid, nr_huge_pages_node[nid],
238 nid, free_huge_pages_node[nid]);
239}
240
241int is_hugepage_mem_enough(size_t size)
242{
243 return (size + ~HPAGE_MASK)/HPAGE_SIZE <= free_huge_pages;
244}
245
246/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
247unsigned long hugetlb_total_pages(void)
248{
249 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
250}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252/*
253 * We cannot handle pagefaults against hugetlb pages at all. They cause
254 * handle_mm_fault() to try to instantiate regular-sized pages in the
255 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
256 * this far.
257 */
258static struct page *hugetlb_nopage(struct vm_area_struct *vma,
259 unsigned long address, int *unused)
260{
261 BUG();
262 return NULL;
263}
264
265struct vm_operations_struct hugetlb_vm_ops = {
266 .nopage = hugetlb_nopage,
267};
268
David Gibson1e8f8892006-01-06 00:10:44 -0800269static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
270 int writable)
David Gibson63551ae2005-06-21 17:14:44 -0700271{
272 pte_t entry;
273
David Gibson1e8f8892006-01-06 00:10:44 -0800274 if (writable) {
David Gibson63551ae2005-06-21 17:14:44 -0700275 entry =
276 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
277 } else {
278 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
279 }
280 entry = pte_mkyoung(entry);
281 entry = pte_mkhuge(entry);
282
283 return entry;
284}
285
David Gibson1e8f8892006-01-06 00:10:44 -0800286static void set_huge_ptep_writable(struct vm_area_struct *vma,
287 unsigned long address, pte_t *ptep)
288{
289 pte_t entry;
290
291 entry = pte_mkwrite(pte_mkdirty(*ptep));
292 ptep_set_access_flags(vma, address, ptep, entry, 1);
293 update_mmu_cache(vma, address, entry);
294 lazy_mmu_prot_update(entry);
295}
296
297
David Gibson63551ae2005-06-21 17:14:44 -0700298int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
299 struct vm_area_struct *vma)
300{
301 pte_t *src_pte, *dst_pte, entry;
302 struct page *ptepage;
Hugh Dickins1c598272005-10-19 21:23:43 -0700303 unsigned long addr;
David Gibson1e8f8892006-01-06 00:10:44 -0800304 int cow;
305
306 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson63551ae2005-06-21 17:14:44 -0700307
Hugh Dickins1c598272005-10-19 21:23:43 -0700308 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
Hugh Dickinsc74df322005-10-29 18:16:23 -0700309 src_pte = huge_pte_offset(src, addr);
310 if (!src_pte)
311 continue;
David Gibson63551ae2005-06-21 17:14:44 -0700312 dst_pte = huge_pte_alloc(dst, addr);
313 if (!dst_pte)
314 goto nomem;
Hugh Dickinsc74df322005-10-29 18:16:23 -0700315 spin_lock(&dst->page_table_lock);
Hugh Dickins1c598272005-10-19 21:23:43 -0700316 spin_lock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700317 if (!pte_none(*src_pte)) {
David Gibson1e8f8892006-01-06 00:10:44 -0800318 if (cow)
319 ptep_set_wrprotect(src, addr, src_pte);
Hugh Dickins1c598272005-10-19 21:23:43 -0700320 entry = *src_pte;
321 ptepage = pte_page(entry);
322 get_page(ptepage);
Hugh Dickins42946212005-10-29 18:16:05 -0700323 add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
Hugh Dickins1c598272005-10-19 21:23:43 -0700324 set_huge_pte_at(dst, addr, dst_pte, entry);
325 }
326 spin_unlock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700327 spin_unlock(&dst->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700328 }
329 return 0;
330
331nomem:
332 return -ENOMEM;
333}
334
335void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
336 unsigned long end)
337{
338 struct mm_struct *mm = vma->vm_mm;
339 unsigned long address;
David Gibsonc7546f82005-08-05 11:59:35 -0700340 pte_t *ptep;
David Gibson63551ae2005-06-21 17:14:44 -0700341 pte_t pte;
342 struct page *page;
343
344 WARN_ON(!is_vm_hugetlb_page(vma));
345 BUG_ON(start & ~HPAGE_MASK);
346 BUG_ON(end & ~HPAGE_MASK);
347
Hugh Dickins508034a2005-10-29 18:16:30 -0700348 spin_lock(&mm->page_table_lock);
349
Hugh Dickins365e9c872005-10-29 18:16:18 -0700350 /* Update high watermark before we lower rss */
351 update_hiwater_rss(mm);
352
David Gibson63551ae2005-06-21 17:14:44 -0700353 for (address = start; address < end; address += HPAGE_SIZE) {
David Gibsonc7546f82005-08-05 11:59:35 -0700354 ptep = huge_pte_offset(mm, address);
Adam Litke4c887262005-10-29 18:16:46 -0700355 if (!ptep)
David Gibsonc7546f82005-08-05 11:59:35 -0700356 continue;
357
358 pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson63551ae2005-06-21 17:14:44 -0700359 if (pte_none(pte))
360 continue;
David Gibsonc7546f82005-08-05 11:59:35 -0700361
David Gibson63551ae2005-06-21 17:14:44 -0700362 page = pte_page(pte);
363 put_page(page);
Hugh Dickins42946212005-10-29 18:16:05 -0700364 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
David Gibson63551ae2005-06-21 17:14:44 -0700365 }
David Gibson63551ae2005-06-21 17:14:44 -0700366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 spin_unlock(&mm->page_table_lock);
Hugh Dickins508034a2005-10-29 18:16:30 -0700368 flush_tlb_range(vma, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369}
David Gibson63551ae2005-06-21 17:14:44 -0700370
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800371static struct page *find_or_alloc_huge_page(struct vm_area_struct *vma,
372 unsigned long addr, struct address_space *mapping,
373 unsigned long idx, int shared)
David Gibson63551ae2005-06-21 17:14:44 -0700374{
Adam Litke4c887262005-10-29 18:16:46 -0700375 struct page *page;
376 int err;
David Gibson63551ae2005-06-21 17:14:44 -0700377
Adam Litke4c887262005-10-29 18:16:46 -0700378retry:
379 page = find_lock_page(mapping, idx);
380 if (page)
381 goto out;
David Gibson63551ae2005-06-21 17:14:44 -0700382
Adam Litke4c887262005-10-29 18:16:46 -0700383 if (hugetlb_get_quota(mapping))
384 goto out;
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800385 page = alloc_huge_page(vma, addr);
Adam Litke4c887262005-10-29 18:16:46 -0700386 if (!page) {
387 hugetlb_put_quota(mapping);
388 goto out;
389 }
David Gibson63551ae2005-06-21 17:14:44 -0700390
David Gibson1e8f8892006-01-06 00:10:44 -0800391 if (shared) {
392 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
393 if (err) {
394 put_page(page);
395 hugetlb_put_quota(mapping);
396 if (err == -EEXIST)
397 goto retry;
398 page = NULL;
399 }
400 } else {
401 /* Caller expects a locked page */
402 lock_page(page);
David Gibson63551ae2005-06-21 17:14:44 -0700403 }
404out:
Adam Litke4c887262005-10-29 18:16:46 -0700405 return page;
David Gibson63551ae2005-06-21 17:14:44 -0700406}
407
David Gibson1e8f8892006-01-06 00:10:44 -0800408static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
409 unsigned long address, pte_t *ptep, pte_t pte)
410{
411 struct page *old_page, *new_page;
412 int i, avoidcopy;
413
414 old_page = pte_page(pte);
415
416 /* If no-one else is actually using this page, avoid the copy
417 * and just make the page writable */
418 avoidcopy = (page_count(old_page) == 1);
419 if (avoidcopy) {
420 set_huge_ptep_writable(vma, address, ptep);
421 return VM_FAULT_MINOR;
422 }
423
424 page_cache_get(old_page);
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800425 new_page = alloc_huge_page(vma, address);
David Gibson1e8f8892006-01-06 00:10:44 -0800426
427 if (!new_page) {
428 page_cache_release(old_page);
429
430 /* Logically this is OOM, not a SIGBUS, but an OOM
431 * could cause the kernel to go killing other
432 * processes which won't help the hugepage situation
433 * at all (?) */
434 return VM_FAULT_SIGBUS;
435 }
436
437 spin_unlock(&mm->page_table_lock);
438 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++)
439 copy_user_highpage(new_page + i, old_page + i,
440 address + i*PAGE_SIZE);
441 spin_lock(&mm->page_table_lock);
442
443 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
444 if (likely(pte_same(*ptep, pte))) {
445 /* Break COW */
446 set_huge_pte_at(mm, address, ptep,
447 make_huge_pte(vma, new_page, 1));
448 /* Make the old page be freed below */
449 new_page = old_page;
450 }
451 page_cache_release(new_page);
452 page_cache_release(old_page);
453 return VM_FAULT_MINOR;
454}
455
Adam Litke86e52162006-01-06 00:10:43 -0800456int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
David Gibson1e8f8892006-01-06 00:10:44 -0800457 unsigned long address, pte_t *ptep, int write_access)
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100458{
459 int ret = VM_FAULT_SIGBUS;
Adam Litke4c887262005-10-29 18:16:46 -0700460 unsigned long idx;
461 unsigned long size;
Adam Litke4c887262005-10-29 18:16:46 -0700462 struct page *page;
463 struct address_space *mapping;
David Gibson1e8f8892006-01-06 00:10:44 -0800464 pte_t new_pte;
Adam Litke4c887262005-10-29 18:16:46 -0700465
Adam Litke4c887262005-10-29 18:16:46 -0700466 mapping = vma->vm_file->f_mapping;
467 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
468 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
469
470 /*
471 * Use page lock to guard against racing truncation
472 * before we get page_table_lock.
473 */
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800474 page = find_or_alloc_huge_page(vma, address, mapping, idx,
David Gibson1e8f8892006-01-06 00:10:44 -0800475 vma->vm_flags & VM_SHARED);
Adam Litke4c887262005-10-29 18:16:46 -0700476 if (!page)
477 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100478
David Gibson1e8f8892006-01-06 00:10:44 -0800479 BUG_ON(!PageLocked(page));
480
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100481 spin_lock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700482 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
483 if (idx >= size)
484 goto backout;
485
486 ret = VM_FAULT_MINOR;
Adam Litke86e52162006-01-06 00:10:43 -0800487 if (!pte_none(*ptep))
Adam Litke4c887262005-10-29 18:16:46 -0700488 goto backout;
489
490 add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
David Gibson1e8f8892006-01-06 00:10:44 -0800491 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
492 && (vma->vm_flags & VM_SHARED)));
493 set_huge_pte_at(mm, address, ptep, new_pte);
494
495 if (write_access && !(vma->vm_flags & VM_SHARED)) {
496 /* Optimization, do the COW without a second fault */
497 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
498 }
499
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100500 spin_unlock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700501 unlock_page(page);
502out:
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100503 return ret;
Adam Litke4c887262005-10-29 18:16:46 -0700504
505backout:
506 spin_unlock(&mm->page_table_lock);
507 hugetlb_put_quota(mapping);
508 unlock_page(page);
509 put_page(page);
510 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100511}
512
Adam Litke86e52162006-01-06 00:10:43 -0800513int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
514 unsigned long address, int write_access)
515{
516 pte_t *ptep;
517 pte_t entry;
David Gibson1e8f8892006-01-06 00:10:44 -0800518 int ret;
Adam Litke86e52162006-01-06 00:10:43 -0800519
520 ptep = huge_pte_alloc(mm, address);
521 if (!ptep)
522 return VM_FAULT_OOM;
523
524 entry = *ptep;
525 if (pte_none(entry))
David Gibson1e8f8892006-01-06 00:10:44 -0800526 return hugetlb_no_page(mm, vma, address, ptep, write_access);
Adam Litke86e52162006-01-06 00:10:43 -0800527
David Gibson1e8f8892006-01-06 00:10:44 -0800528 ret = VM_FAULT_MINOR;
529
530 spin_lock(&mm->page_table_lock);
531 /* Check for a racing update before calling hugetlb_cow */
532 if (likely(pte_same(entry, *ptep)))
533 if (write_access && !pte_write(entry))
534 ret = hugetlb_cow(mm, vma, address, ptep, entry);
535 spin_unlock(&mm->page_table_lock);
536
537 return ret;
Adam Litke86e52162006-01-06 00:10:43 -0800538}
539
David Gibson63551ae2005-06-21 17:14:44 -0700540int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
541 struct page **pages, struct vm_area_struct **vmas,
542 unsigned long *position, int *length, int i)
543{
544 unsigned long vpfn, vaddr = *position;
545 int remainder = *length;
546
David Gibson63551ae2005-06-21 17:14:44 -0700547 vpfn = vaddr/PAGE_SIZE;
Hugh Dickins1c598272005-10-19 21:23:43 -0700548 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700549 while (vaddr < vma->vm_end && remainder) {
Adam Litke4c887262005-10-29 18:16:46 -0700550 pte_t *pte;
551 struct page *page;
552
553 /*
554 * Some archs (sparc64, sh*) have multiple pte_ts to
555 * each hugepage. We have to make * sure we get the
556 * first, for the page indexing below to work.
557 */
558 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
559
560 if (!pte || pte_none(*pte)) {
561 int ret;
562
563 spin_unlock(&mm->page_table_lock);
564 ret = hugetlb_fault(mm, vma, vaddr, 0);
565 spin_lock(&mm->page_table_lock);
566 if (ret == VM_FAULT_MINOR)
567 continue;
568
569 remainder = 0;
570 if (!i)
571 i = -EFAULT;
572 break;
573 }
David Gibson63551ae2005-06-21 17:14:44 -0700574
575 if (pages) {
David Gibson63551ae2005-06-21 17:14:44 -0700576 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
David Gibson63551ae2005-06-21 17:14:44 -0700577 get_page(page);
578 pages[i] = page;
579 }
580
581 if (vmas)
582 vmas[i] = vma;
583
584 vaddr += PAGE_SIZE;
585 ++vpfn;
586 --remainder;
587 ++i;
588 }
Hugh Dickins1c598272005-10-19 21:23:43 -0700589 spin_unlock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700590 *length = remainder;
591 *position = vaddr;
592
593 return i;
594}