blob: 06699d871a8e568325361f5935ea72dacbd4f635 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/nodemask.h>
David Gibson63551ae2005-06-21 17:14:44 -070013#include <linux/pagemap.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080014#include <linux/mempolicy.h>
Christoph Lameteraea47ff2006-01-08 01:00:57 -080015#include <linux/cpuset.h>
David Gibson3935baa2006-03-22 00:08:53 -080016#include <linux/mutex.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080017
David Gibson63551ae2005-06-21 17:14:44 -070018#include <asm/page.h>
19#include <asm/pgtable.h>
20
21#include <linux/hugetlb.h>
Nick Piggin7835e982006-03-22 00:08:40 -080022#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
David Gibsonb45b5bd2006-03-22 00:08:55 -080025static unsigned long nr_huge_pages, free_huge_pages, reserved_huge_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026unsigned long max_huge_pages;
27static struct list_head hugepage_freelists[MAX_NUMNODES];
28static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29static unsigned int free_huge_pages_node[MAX_NUMNODES];
David Gibson3935baa2006-03-22 00:08:53 -080030/*
31 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
32 */
33static DEFINE_SPINLOCK(hugetlb_lock);
Eric Paris0bd0f9f2005-11-21 21:32:28 -080034
David Gibson79ac6ba2006-03-22 00:08:51 -080035static void clear_huge_page(struct page *page, unsigned long addr)
36{
37 int i;
38
39 might_sleep();
40 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
41 cond_resched();
42 clear_user_highpage(page + i, addr);
43 }
44}
45
46static void copy_huge_page(struct page *dst, struct page *src,
47 unsigned long addr)
48{
49 int i;
50
51 might_sleep();
52 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
53 cond_resched();
54 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE);
55 }
56}
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058static void enqueue_huge_page(struct page *page)
59{
60 int nid = page_to_nid(page);
61 list_add(&page->lru, &hugepage_freelists[nid]);
62 free_huge_pages++;
63 free_huge_pages_node[nid]++;
64}
65
Christoph Lameter5da7ca82006-01-06 00:10:46 -080066static struct page *dequeue_huge_page(struct vm_area_struct *vma,
67 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068{
69 int nid = numa_node_id();
70 struct page *page = NULL;
Christoph Lameter5da7ca82006-01-06 00:10:46 -080071 struct zonelist *zonelist = huge_zonelist(vma, address);
Christoph Lameter96df9332006-01-06 00:10:45 -080072 struct zone **z;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Christoph Lameter96df9332006-01-06 00:10:45 -080074 for (z = zonelist->zones; *z; z++) {
75 nid = (*z)->zone_pgdat->node_id;
Christoph Lameteraea47ff2006-01-08 01:00:57 -080076 if (cpuset_zone_allowed(*z, GFP_HIGHUSER) &&
77 !list_empty(&hugepage_freelists[nid]))
Christoph Lameter96df9332006-01-06 00:10:45 -080078 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 }
Christoph Lameter96df9332006-01-06 00:10:45 -080080
81 if (*z) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 page = list_entry(hugepage_freelists[nid].next,
83 struct page, lru);
84 list_del(&page->lru);
85 free_huge_pages--;
86 free_huge_pages_node[nid]--;
87 }
88 return page;
89}
90
David Gibson27a85ef2006-03-22 00:08:56 -080091static void free_huge_page(struct page *page)
92{
93 BUG_ON(page_count(page));
94
95 INIT_LIST_HEAD(&page->lru);
96
97 spin_lock(&hugetlb_lock);
98 enqueue_huge_page(page);
99 spin_unlock(&hugetlb_lock);
100}
101
Nick Piggina4822892006-03-22 00:08:08 -0800102static int alloc_fresh_huge_page(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103{
104 static int nid = 0;
105 struct page *page;
106 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
107 HUGETLB_PAGE_ORDER);
108 nid = (nid + 1) % num_online_nodes();
109 if (page) {
Nick Piggina4822892006-03-22 00:08:08 -0800110 page[1].lru.next = (void *)free_huge_page; /* dtor */
Eric Paris0bd0f9f2005-11-21 21:32:28 -0800111 spin_lock(&hugetlb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 nr_huge_pages++;
113 nr_huge_pages_node[page_to_nid(page)]++;
Eric Paris0bd0f9f2005-11-21 21:32:28 -0800114 spin_unlock(&hugetlb_lock);
Nick Piggina4822892006-03-22 00:08:08 -0800115 put_page(page); /* free it into the hugepage allocator */
116 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 }
Nick Piggina4822892006-03-22 00:08:08 -0800118 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
David Gibson27a85ef2006-03-22 00:08:56 -0800121static struct page *alloc_huge_page(struct vm_area_struct *vma,
122 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
David Gibsonb45b5bd2006-03-22 00:08:55 -0800124 struct inode *inode = vma->vm_file->f_dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 struct page *page;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800126 int use_reserve = 0;
127 unsigned long idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129 spin_lock(&hugetlb_lock);
David Gibsonb45b5bd2006-03-22 00:08:55 -0800130
131 if (vma->vm_flags & VM_MAYSHARE) {
132
133 /* idx = radix tree index, i.e. offset into file in
134 * HPAGE_SIZE units */
135 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
136 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
137
138 /* The hugetlbfs specific inode info stores the number
139 * of "guaranteed available" (huge) pages. That is,
140 * the first 'prereserved_hpages' pages of the inode
141 * are either already instantiated, or have been
142 * pre-reserved (by hugetlb_reserve_for_inode()). Here
143 * we're in the process of instantiating the page, so
144 * we use this to determine whether to draw from the
145 * pre-reserved pool or the truly free pool. */
146 if (idx < HUGETLBFS_I(inode)->prereserved_hpages)
147 use_reserve = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 }
David Gibsonb45b5bd2006-03-22 00:08:55 -0800149
150 if (!use_reserve) {
151 if (free_huge_pages <= reserved_huge_pages)
152 goto fail;
153 } else {
154 BUG_ON(reserved_huge_pages == 0);
155 reserved_huge_pages--;
156 }
157
158 page = dequeue_huge_page(vma, addr);
159 if (!page)
160 goto fail;
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 spin_unlock(&hugetlb_lock);
Nick Piggin7835e982006-03-22 00:08:40 -0800163 set_page_refcounted(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 return page;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800165
166 fail:
167 WARN_ON(use_reserve); /* reserved allocations shouldn't fail */
168 spin_unlock(&hugetlb_lock);
169 return NULL;
170}
171
172/* hugetlb_extend_reservation()
173 *
174 * Ensure that at least 'atleast' hugepages are, and will remain,
175 * available to instantiate the first 'atleast' pages of the given
176 * inode. If the inode doesn't already have this many pages reserved
177 * or instantiated, set aside some hugepages in the reserved pool to
178 * satisfy later faults (or fail now if there aren't enough, rather
179 * than getting the SIGBUS later).
180 */
181int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
182 unsigned long atleast)
183{
184 struct inode *inode = &info->vfs_inode;
185 unsigned long change_in_reserve = 0;
186 int ret = 0;
187
188 spin_lock(&hugetlb_lock);
189 read_lock_irq(&inode->i_mapping->tree_lock);
190
191 if (info->prereserved_hpages >= atleast)
192 goto out;
193
194 /* Because we always call this on shared mappings, none of the
195 * pages beyond info->prereserved_hpages can have been
196 * instantiated, so we need to reserve all of them now. */
197 change_in_reserve = atleast - info->prereserved_hpages;
198
199 if ((reserved_huge_pages + change_in_reserve) > free_huge_pages) {
200 ret = -ENOMEM;
201 goto out;
202 }
203
204 reserved_huge_pages += change_in_reserve;
205 info->prereserved_hpages = atleast;
206
207 out:
208 read_unlock_irq(&inode->i_mapping->tree_lock);
209 spin_unlock(&hugetlb_lock);
210
211 return ret;
212}
213
214/* hugetlb_truncate_reservation()
215 *
216 * This returns pages reserved for the given inode to the general free
217 * hugepage pool. If the inode has any pages prereserved, but not
218 * instantiated, beyond offset (atmost << HPAGE_SIZE), then release
219 * them.
220 */
221void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info,
222 unsigned long atmost)
223{
224 struct inode *inode = &info->vfs_inode;
225 struct address_space *mapping = inode->i_mapping;
226 unsigned long idx;
227 unsigned long change_in_reserve = 0;
228 struct page *page;
229
230 spin_lock(&hugetlb_lock);
231 read_lock_irq(&inode->i_mapping->tree_lock);
232
233 if (info->prereserved_hpages <= atmost)
234 goto out;
235
236 /* Count pages which were reserved, but not instantiated, and
237 * which we can now release. */
238 for (idx = atmost; idx < info->prereserved_hpages; idx++) {
239 page = radix_tree_lookup(&mapping->page_tree, idx);
240 if (!page)
241 /* Pages which are already instantiated can't
242 * be unreserved (and in fact have already
243 * been removed from the reserved pool) */
244 change_in_reserve++;
245 }
246
247 BUG_ON(reserved_huge_pages < change_in_reserve);
248 reserved_huge_pages -= change_in_reserve;
249 info->prereserved_hpages = atmost;
250
251 out:
252 read_unlock_irq(&inode->i_mapping->tree_lock);
253 spin_unlock(&hugetlb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255
256static int __init hugetlb_init(void)
257{
258 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100260 if (HPAGE_SHIFT == 0)
261 return 0;
262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 for (i = 0; i < MAX_NUMNODES; ++i)
264 INIT_LIST_HEAD(&hugepage_freelists[i]);
265
266 for (i = 0; i < max_huge_pages; ++i) {
Nick Piggina4822892006-03-22 00:08:08 -0800267 if (!alloc_fresh_huge_page())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 }
270 max_huge_pages = free_huge_pages = nr_huge_pages = i;
271 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
272 return 0;
273}
274module_init(hugetlb_init);
275
276static int __init hugetlb_setup(char *s)
277{
278 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
279 max_huge_pages = 0;
280 return 1;
281}
282__setup("hugepages=", hugetlb_setup);
283
284#ifdef CONFIG_SYSCTL
285static void update_and_free_page(struct page *page)
286{
287 int i;
288 nr_huge_pages--;
289 nr_huge_pages_node[page_zone(page)->zone_pgdat->node_id]--;
290 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
291 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
292 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
293 1 << PG_private | 1<< PG_writeback);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 }
Nick Piggina4822892006-03-22 00:08:08 -0800295 page[1].lru.next = NULL;
Nick Piggin7835e982006-03-22 00:08:40 -0800296 set_page_refcounted(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 __free_pages(page, HUGETLB_PAGE_ORDER);
298}
299
300#ifdef CONFIG_HIGHMEM
301static void try_to_free_low(unsigned long count)
302{
303 int i, nid;
304 for (i = 0; i < MAX_NUMNODES; ++i) {
305 struct page *page, *next;
306 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
307 if (PageHighMem(page))
308 continue;
309 list_del(&page->lru);
310 update_and_free_page(page);
311 nid = page_zone(page)->zone_pgdat->node_id;
312 free_huge_pages--;
313 free_huge_pages_node[nid]--;
314 if (count >= nr_huge_pages)
315 return;
316 }
317 }
318}
319#else
320static inline void try_to_free_low(unsigned long count)
321{
322}
323#endif
324
325static unsigned long set_max_huge_pages(unsigned long count)
326{
327 while (count > nr_huge_pages) {
Nick Piggina4822892006-03-22 00:08:08 -0800328 if (!alloc_fresh_huge_page())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 return nr_huge_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 }
331 if (count >= nr_huge_pages)
332 return nr_huge_pages;
333
334 spin_lock(&hugetlb_lock);
335 try_to_free_low(count);
336 while (count < nr_huge_pages) {
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800337 struct page *page = dequeue_huge_page(NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 if (!page)
339 break;
340 update_and_free_page(page);
341 }
342 spin_unlock(&hugetlb_lock);
343 return nr_huge_pages;
344}
345
346int hugetlb_sysctl_handler(struct ctl_table *table, int write,
347 struct file *file, void __user *buffer,
348 size_t *length, loff_t *ppos)
349{
350 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
351 max_huge_pages = set_max_huge_pages(max_huge_pages);
352 return 0;
353}
354#endif /* CONFIG_SYSCTL */
355
356int hugetlb_report_meminfo(char *buf)
357{
358 return sprintf(buf,
359 "HugePages_Total: %5lu\n"
360 "HugePages_Free: %5lu\n"
David Gibsonb45b5bd2006-03-22 00:08:55 -0800361 "HugePages_Rsvd: %5lu\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 "Hugepagesize: %5lu kB\n",
363 nr_huge_pages,
364 free_huge_pages,
David Gibsonb45b5bd2006-03-22 00:08:55 -0800365 reserved_huge_pages,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 HPAGE_SIZE/1024);
367}
368
369int hugetlb_report_node_meminfo(int nid, char *buf)
370{
371 return sprintf(buf,
372 "Node %d HugePages_Total: %5u\n"
373 "Node %d HugePages_Free: %5u\n",
374 nid, nr_huge_pages_node[nid],
375 nid, free_huge_pages_node[nid]);
376}
377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
379unsigned long hugetlb_total_pages(void)
380{
381 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
382}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
384/*
385 * We cannot handle pagefaults against hugetlb pages at all. They cause
386 * handle_mm_fault() to try to instantiate regular-sized pages in the
387 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
388 * this far.
389 */
390static struct page *hugetlb_nopage(struct vm_area_struct *vma,
391 unsigned long address, int *unused)
392{
393 BUG();
394 return NULL;
395}
396
397struct vm_operations_struct hugetlb_vm_ops = {
398 .nopage = hugetlb_nopage,
399};
400
David Gibson1e8f8892006-01-06 00:10:44 -0800401static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
402 int writable)
David Gibson63551ae2005-06-21 17:14:44 -0700403{
404 pte_t entry;
405
David Gibson1e8f8892006-01-06 00:10:44 -0800406 if (writable) {
David Gibson63551ae2005-06-21 17:14:44 -0700407 entry =
408 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
409 } else {
410 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
411 }
412 entry = pte_mkyoung(entry);
413 entry = pte_mkhuge(entry);
414
415 return entry;
416}
417
David Gibson1e8f8892006-01-06 00:10:44 -0800418static void set_huge_ptep_writable(struct vm_area_struct *vma,
419 unsigned long address, pte_t *ptep)
420{
421 pte_t entry;
422
423 entry = pte_mkwrite(pte_mkdirty(*ptep));
424 ptep_set_access_flags(vma, address, ptep, entry, 1);
425 update_mmu_cache(vma, address, entry);
426 lazy_mmu_prot_update(entry);
427}
428
429
David Gibson63551ae2005-06-21 17:14:44 -0700430int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
431 struct vm_area_struct *vma)
432{
433 pte_t *src_pte, *dst_pte, entry;
434 struct page *ptepage;
Hugh Dickins1c598272005-10-19 21:23:43 -0700435 unsigned long addr;
David Gibson1e8f8892006-01-06 00:10:44 -0800436 int cow;
437
438 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson63551ae2005-06-21 17:14:44 -0700439
Hugh Dickins1c598272005-10-19 21:23:43 -0700440 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
Hugh Dickinsc74df322005-10-29 18:16:23 -0700441 src_pte = huge_pte_offset(src, addr);
442 if (!src_pte)
443 continue;
David Gibson63551ae2005-06-21 17:14:44 -0700444 dst_pte = huge_pte_alloc(dst, addr);
445 if (!dst_pte)
446 goto nomem;
Hugh Dickinsc74df322005-10-29 18:16:23 -0700447 spin_lock(&dst->page_table_lock);
Hugh Dickins1c598272005-10-19 21:23:43 -0700448 spin_lock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700449 if (!pte_none(*src_pte)) {
David Gibson1e8f8892006-01-06 00:10:44 -0800450 if (cow)
451 ptep_set_wrprotect(src, addr, src_pte);
Hugh Dickins1c598272005-10-19 21:23:43 -0700452 entry = *src_pte;
453 ptepage = pte_page(entry);
454 get_page(ptepage);
Hugh Dickins42946212005-10-29 18:16:05 -0700455 add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
Hugh Dickins1c598272005-10-19 21:23:43 -0700456 set_huge_pte_at(dst, addr, dst_pte, entry);
457 }
458 spin_unlock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700459 spin_unlock(&dst->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700460 }
461 return 0;
462
463nomem:
464 return -ENOMEM;
465}
466
467void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
468 unsigned long end)
469{
470 struct mm_struct *mm = vma->vm_mm;
471 unsigned long address;
David Gibsonc7546f82005-08-05 11:59:35 -0700472 pte_t *ptep;
David Gibson63551ae2005-06-21 17:14:44 -0700473 pte_t pte;
474 struct page *page;
475
476 WARN_ON(!is_vm_hugetlb_page(vma));
477 BUG_ON(start & ~HPAGE_MASK);
478 BUG_ON(end & ~HPAGE_MASK);
479
Hugh Dickins508034a2005-10-29 18:16:30 -0700480 spin_lock(&mm->page_table_lock);
481
Hugh Dickins365e9c872005-10-29 18:16:18 -0700482 /* Update high watermark before we lower rss */
483 update_hiwater_rss(mm);
484
David Gibson63551ae2005-06-21 17:14:44 -0700485 for (address = start; address < end; address += HPAGE_SIZE) {
David Gibsonc7546f82005-08-05 11:59:35 -0700486 ptep = huge_pte_offset(mm, address);
Adam Litke4c887262005-10-29 18:16:46 -0700487 if (!ptep)
David Gibsonc7546f82005-08-05 11:59:35 -0700488 continue;
489
490 pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson63551ae2005-06-21 17:14:44 -0700491 if (pte_none(pte))
492 continue;
David Gibsonc7546f82005-08-05 11:59:35 -0700493
David Gibson63551ae2005-06-21 17:14:44 -0700494 page = pte_page(pte);
495 put_page(page);
Hugh Dickins42946212005-10-29 18:16:05 -0700496 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
David Gibson63551ae2005-06-21 17:14:44 -0700497 }
David Gibson63551ae2005-06-21 17:14:44 -0700498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 spin_unlock(&mm->page_table_lock);
Hugh Dickins508034a2005-10-29 18:16:30 -0700500 flush_tlb_range(vma, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501}
David Gibson63551ae2005-06-21 17:14:44 -0700502
David Gibson1e8f8892006-01-06 00:10:44 -0800503static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
504 unsigned long address, pte_t *ptep, pte_t pte)
505{
506 struct page *old_page, *new_page;
David Gibson79ac6ba2006-03-22 00:08:51 -0800507 int avoidcopy;
David Gibson1e8f8892006-01-06 00:10:44 -0800508
509 old_page = pte_page(pte);
510
511 /* If no-one else is actually using this page, avoid the copy
512 * and just make the page writable */
513 avoidcopy = (page_count(old_page) == 1);
514 if (avoidcopy) {
515 set_huge_ptep_writable(vma, address, ptep);
516 return VM_FAULT_MINOR;
517 }
518
519 page_cache_get(old_page);
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800520 new_page = alloc_huge_page(vma, address);
David Gibson1e8f8892006-01-06 00:10:44 -0800521
522 if (!new_page) {
523 page_cache_release(old_page);
Christoph Lameter0df420d2006-02-07 12:58:30 -0800524 return VM_FAULT_OOM;
David Gibson1e8f8892006-01-06 00:10:44 -0800525 }
526
527 spin_unlock(&mm->page_table_lock);
David Gibson79ac6ba2006-03-22 00:08:51 -0800528 copy_huge_page(new_page, old_page, address);
David Gibson1e8f8892006-01-06 00:10:44 -0800529 spin_lock(&mm->page_table_lock);
530
531 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
532 if (likely(pte_same(*ptep, pte))) {
533 /* Break COW */
534 set_huge_pte_at(mm, address, ptep,
535 make_huge_pte(vma, new_page, 1));
536 /* Make the old page be freed below */
537 new_page = old_page;
538 }
539 page_cache_release(new_page);
540 page_cache_release(old_page);
541 return VM_FAULT_MINOR;
542}
543
Adam Litke86e52162006-01-06 00:10:43 -0800544int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
David Gibson1e8f8892006-01-06 00:10:44 -0800545 unsigned long address, pte_t *ptep, int write_access)
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100546{
547 int ret = VM_FAULT_SIGBUS;
Adam Litke4c887262005-10-29 18:16:46 -0700548 unsigned long idx;
549 unsigned long size;
Adam Litke4c887262005-10-29 18:16:46 -0700550 struct page *page;
551 struct address_space *mapping;
David Gibson1e8f8892006-01-06 00:10:44 -0800552 pte_t new_pte;
Adam Litke4c887262005-10-29 18:16:46 -0700553
Adam Litke4c887262005-10-29 18:16:46 -0700554 mapping = vma->vm_file->f_mapping;
555 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
556 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
557
558 /*
559 * Use page lock to guard against racing truncation
560 * before we get page_table_lock.
561 */
Christoph Lameter6bda6662006-01-06 00:10:49 -0800562retry:
563 page = find_lock_page(mapping, idx);
564 if (!page) {
565 if (hugetlb_get_quota(mapping))
566 goto out;
567 page = alloc_huge_page(vma, address);
568 if (!page) {
569 hugetlb_put_quota(mapping);
Christoph Lameter0df420d2006-02-07 12:58:30 -0800570 ret = VM_FAULT_OOM;
Christoph Lameter6bda6662006-01-06 00:10:49 -0800571 goto out;
572 }
David Gibson79ac6ba2006-03-22 00:08:51 -0800573 clear_huge_page(page, address);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100574
Christoph Lameter6bda6662006-01-06 00:10:49 -0800575 if (vma->vm_flags & VM_SHARED) {
576 int err;
577
578 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
579 if (err) {
580 put_page(page);
581 hugetlb_put_quota(mapping);
582 if (err == -EEXIST)
583 goto retry;
584 goto out;
585 }
586 } else
587 lock_page(page);
588 }
David Gibson1e8f8892006-01-06 00:10:44 -0800589
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100590 spin_lock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700591 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
592 if (idx >= size)
593 goto backout;
594
595 ret = VM_FAULT_MINOR;
Adam Litke86e52162006-01-06 00:10:43 -0800596 if (!pte_none(*ptep))
Adam Litke4c887262005-10-29 18:16:46 -0700597 goto backout;
598
599 add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
David Gibson1e8f8892006-01-06 00:10:44 -0800600 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
601 && (vma->vm_flags & VM_SHARED)));
602 set_huge_pte_at(mm, address, ptep, new_pte);
603
604 if (write_access && !(vma->vm_flags & VM_SHARED)) {
605 /* Optimization, do the COW without a second fault */
606 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
607 }
608
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100609 spin_unlock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700610 unlock_page(page);
611out:
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100612 return ret;
Adam Litke4c887262005-10-29 18:16:46 -0700613
614backout:
615 spin_unlock(&mm->page_table_lock);
616 hugetlb_put_quota(mapping);
617 unlock_page(page);
618 put_page(page);
619 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100620}
621
Adam Litke86e52162006-01-06 00:10:43 -0800622int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
623 unsigned long address, int write_access)
624{
625 pte_t *ptep;
626 pte_t entry;
David Gibson1e8f8892006-01-06 00:10:44 -0800627 int ret;
David Gibson3935baa2006-03-22 00:08:53 -0800628 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
Adam Litke86e52162006-01-06 00:10:43 -0800629
630 ptep = huge_pte_alloc(mm, address);
631 if (!ptep)
632 return VM_FAULT_OOM;
633
David Gibson3935baa2006-03-22 00:08:53 -0800634 /*
635 * Serialize hugepage allocation and instantiation, so that we don't
636 * get spurious allocation failures if two CPUs race to instantiate
637 * the same page in the page cache.
638 */
639 mutex_lock(&hugetlb_instantiation_mutex);
Adam Litke86e52162006-01-06 00:10:43 -0800640 entry = *ptep;
David Gibson3935baa2006-03-22 00:08:53 -0800641 if (pte_none(entry)) {
642 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
643 mutex_unlock(&hugetlb_instantiation_mutex);
644 return ret;
645 }
Adam Litke86e52162006-01-06 00:10:43 -0800646
David Gibson1e8f8892006-01-06 00:10:44 -0800647 ret = VM_FAULT_MINOR;
648
649 spin_lock(&mm->page_table_lock);
650 /* Check for a racing update before calling hugetlb_cow */
651 if (likely(pte_same(entry, *ptep)))
652 if (write_access && !pte_write(entry))
653 ret = hugetlb_cow(mm, vma, address, ptep, entry);
654 spin_unlock(&mm->page_table_lock);
David Gibson3935baa2006-03-22 00:08:53 -0800655 mutex_unlock(&hugetlb_instantiation_mutex);
David Gibson1e8f8892006-01-06 00:10:44 -0800656
657 return ret;
Adam Litke86e52162006-01-06 00:10:43 -0800658}
659
David Gibson63551ae2005-06-21 17:14:44 -0700660int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
661 struct page **pages, struct vm_area_struct **vmas,
662 unsigned long *position, int *length, int i)
663{
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800664 unsigned long pfn_offset;
665 unsigned long vaddr = *position;
David Gibson63551ae2005-06-21 17:14:44 -0700666 int remainder = *length;
667
Hugh Dickins1c598272005-10-19 21:23:43 -0700668 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700669 while (vaddr < vma->vm_end && remainder) {
Adam Litke4c887262005-10-29 18:16:46 -0700670 pte_t *pte;
671 struct page *page;
672
673 /*
674 * Some archs (sparc64, sh*) have multiple pte_ts to
675 * each hugepage. We have to make * sure we get the
676 * first, for the page indexing below to work.
677 */
678 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
679
680 if (!pte || pte_none(*pte)) {
681 int ret;
682
683 spin_unlock(&mm->page_table_lock);
684 ret = hugetlb_fault(mm, vma, vaddr, 0);
685 spin_lock(&mm->page_table_lock);
686 if (ret == VM_FAULT_MINOR)
687 continue;
688
689 remainder = 0;
690 if (!i)
691 i = -EFAULT;
692 break;
693 }
David Gibson63551ae2005-06-21 17:14:44 -0700694
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800695 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
696 page = pte_page(*pte);
697same_page:
698 get_page(page);
699 if (pages)
700 pages[i] = page + pfn_offset;
David Gibson63551ae2005-06-21 17:14:44 -0700701
702 if (vmas)
703 vmas[i] = vma;
704
705 vaddr += PAGE_SIZE;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800706 ++pfn_offset;
David Gibson63551ae2005-06-21 17:14:44 -0700707 --remainder;
708 ++i;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800709 if (vaddr < vma->vm_end && remainder &&
710 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
711 /*
712 * We use pfn_offset to avoid touching the pageframes
713 * of this compound page.
714 */
715 goto same_page;
716 }
David Gibson63551ae2005-06-21 17:14:44 -0700717 }
Hugh Dickins1c598272005-10-19 21:23:43 -0700718 spin_unlock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700719 *length = remainder;
720 *position = vaddr;
721
722 return i;
723}
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800724
725void hugetlb_change_protection(struct vm_area_struct *vma,
726 unsigned long address, unsigned long end, pgprot_t newprot)
727{
728 struct mm_struct *mm = vma->vm_mm;
729 unsigned long start = address;
730 pte_t *ptep;
731 pte_t pte;
732
733 BUG_ON(address >= end);
734 flush_cache_range(vma, address, end);
735
736 spin_lock(&mm->page_table_lock);
737 for (; address < end; address += HPAGE_SIZE) {
738 ptep = huge_pte_offset(mm, address);
739 if (!ptep)
740 continue;
741 if (!pte_none(*ptep)) {
742 pte = huge_ptep_get_and_clear(mm, address, ptep);
743 pte = pte_mkhuge(pte_modify(pte, newprot));
744 set_huge_pte_at(mm, address, ptep, pte);
745 lazy_mmu_prot_update(pte);
746 }
747 }
748 spin_unlock(&mm->page_table_lock);
749
750 flush_tlb_range(vma, start, end);
751}
752