blob: cb362f761f174b926c1c41b72ec2940c5d45763d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/nodemask.h>
David Gibson63551ae2005-06-21 17:14:44 -070013#include <linux/pagemap.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080014#include <linux/mempolicy.h>
Christoph Lameteraea47ff2006-01-08 01:00:57 -080015#include <linux/cpuset.h>
David Gibson3935baa2006-03-22 00:08:53 -080016#include <linux/mutex.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080017
David Gibson63551ae2005-06-21 17:14:44 -070018#include <asm/page.h>
19#include <asm/pgtable.h>
20
21#include <linux/hugetlb.h>
Nick Piggin7835e982006-03-22 00:08:40 -080022#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -070025static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026unsigned long max_huge_pages;
27static struct list_head hugepage_freelists[MAX_NUMNODES];
28static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29static unsigned int free_huge_pages_node[MAX_NUMNODES];
David Gibson3935baa2006-03-22 00:08:53 -080030/*
31 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
32 */
33static DEFINE_SPINLOCK(hugetlb_lock);
Eric Paris0bd0f9f2005-11-21 21:32:28 -080034
David Gibson79ac6ba2006-03-22 00:08:51 -080035static void clear_huge_page(struct page *page, unsigned long addr)
36{
37 int i;
38
39 might_sleep();
40 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
41 cond_resched();
42 clear_user_highpage(page + i, addr);
43 }
44}
45
46static void copy_huge_page(struct page *dst, struct page *src,
Atsushi Nemoto9de455b2006-12-12 17:14:55 +000047 unsigned long addr, struct vm_area_struct *vma)
David Gibson79ac6ba2006-03-22 00:08:51 -080048{
49 int i;
50
51 might_sleep();
52 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
53 cond_resched();
Atsushi Nemoto9de455b2006-12-12 17:14:55 +000054 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
David Gibson79ac6ba2006-03-22 00:08:51 -080055 }
56}
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058static void enqueue_huge_page(struct page *page)
59{
60 int nid = page_to_nid(page);
61 list_add(&page->lru, &hugepage_freelists[nid]);
62 free_huge_pages++;
63 free_huge_pages_node[nid]++;
64}
65
Christoph Lameter5da7ca82006-01-06 00:10:46 -080066static struct page *dequeue_huge_page(struct vm_area_struct *vma,
67 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068{
69 int nid = numa_node_id();
70 struct page *page = NULL;
Christoph Lameter5da7ca82006-01-06 00:10:46 -080071 struct zonelist *zonelist = huge_zonelist(vma, address);
Christoph Lameter96df9332006-01-06 00:10:45 -080072 struct zone **z;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Christoph Lameter96df9332006-01-06 00:10:45 -080074 for (z = zonelist->zones; *z; z++) {
Christoph Lameter89fa3022006-09-25 23:31:55 -070075 nid = zone_to_nid(*z);
Paul Jackson02a0e532006-12-13 00:34:25 -080076 if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) &&
Christoph Lameteraea47ff2006-01-08 01:00:57 -080077 !list_empty(&hugepage_freelists[nid]))
Christoph Lameter96df9332006-01-06 00:10:45 -080078 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 }
Christoph Lameter96df9332006-01-06 00:10:45 -080080
81 if (*z) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 page = list_entry(hugepage_freelists[nid].next,
83 struct page, lru);
84 list_del(&page->lru);
85 free_huge_pages--;
86 free_huge_pages_node[nid]--;
87 }
88 return page;
89}
90
David Gibson27a85ef2006-03-22 00:08:56 -080091static void free_huge_page(struct page *page)
92{
93 BUG_ON(page_count(page));
94
95 INIT_LIST_HEAD(&page->lru);
96
97 spin_lock(&hugetlb_lock);
98 enqueue_huge_page(page);
99 spin_unlock(&hugetlb_lock);
100}
101
Nick Piggina4822892006-03-22 00:08:08 -0800102static int alloc_fresh_huge_page(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103{
104 static int nid = 0;
105 struct page *page;
106 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
107 HUGETLB_PAGE_ORDER);
Paul Jacksonfdb7cc52006-03-22 00:09:10 -0800108 nid = next_node(nid, node_online_map);
109 if (nid == MAX_NUMNODES)
110 nid = first_node(node_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 if (page) {
Andy Whitcroft33f2ef82006-12-06 20:33:32 -0800112 set_compound_page_dtor(page, free_huge_page);
Eric Paris0bd0f9f2005-11-21 21:32:28 -0800113 spin_lock(&hugetlb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 nr_huge_pages++;
115 nr_huge_pages_node[page_to_nid(page)]++;
Eric Paris0bd0f9f2005-11-21 21:32:28 -0800116 spin_unlock(&hugetlb_lock);
Nick Piggina4822892006-03-22 00:08:08 -0800117 put_page(page); /* free it into the hugepage allocator */
118 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 }
Nick Piggina4822892006-03-22 00:08:08 -0800120 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121}
122
David Gibson27a85ef2006-03-22 00:08:56 -0800123static struct page *alloc_huge_page(struct vm_area_struct *vma,
124 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128 spin_lock(&hugetlb_lock);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700129 if (vma->vm_flags & VM_MAYSHARE)
130 resv_huge_pages--;
131 else if (free_huge_pages <= resv_huge_pages)
132 goto fail;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800133
134 page = dequeue_huge_page(vma, addr);
135 if (!page)
136 goto fail;
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 spin_unlock(&hugetlb_lock);
Nick Piggin7835e982006-03-22 00:08:40 -0800139 set_page_refcounted(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 return page;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800141
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700142fail:
David Gibsonb45b5bd2006-03-22 00:08:55 -0800143 spin_unlock(&hugetlb_lock);
144 return NULL;
145}
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147static int __init hugetlb_init(void)
148{
149 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100151 if (HPAGE_SHIFT == 0)
152 return 0;
153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 for (i = 0; i < MAX_NUMNODES; ++i)
155 INIT_LIST_HEAD(&hugepage_freelists[i]);
156
157 for (i = 0; i < max_huge_pages; ++i) {
Nick Piggina4822892006-03-22 00:08:08 -0800158 if (!alloc_fresh_huge_page())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 }
161 max_huge_pages = free_huge_pages = nr_huge_pages = i;
162 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
163 return 0;
164}
165module_init(hugetlb_init);
166
167static int __init hugetlb_setup(char *s)
168{
169 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
170 max_huge_pages = 0;
171 return 1;
172}
173__setup("hugepages=", hugetlb_setup);
174
175#ifdef CONFIG_SYSCTL
176static void update_and_free_page(struct page *page)
177{
178 int i;
179 nr_huge_pages--;
Christoph Lameter4415cc82006-09-25 23:31:55 -0700180 nr_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
182 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
183 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
184 1 << PG_private | 1<< PG_writeback);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 }
Nick Piggina4822892006-03-22 00:08:08 -0800186 page[1].lru.next = NULL;
Nick Piggin7835e982006-03-22 00:08:40 -0800187 set_page_refcounted(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 __free_pages(page, HUGETLB_PAGE_ORDER);
189}
190
191#ifdef CONFIG_HIGHMEM
192static void try_to_free_low(unsigned long count)
193{
Christoph Lameter4415cc82006-09-25 23:31:55 -0700194 int i;
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 for (i = 0; i < MAX_NUMNODES; ++i) {
197 struct page *page, *next;
198 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
199 if (PageHighMem(page))
200 continue;
201 list_del(&page->lru);
202 update_and_free_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 free_huge_pages--;
Christoph Lameter4415cc82006-09-25 23:31:55 -0700204 free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 if (count >= nr_huge_pages)
206 return;
207 }
208 }
209}
210#else
211static inline void try_to_free_low(unsigned long count)
212{
213}
214#endif
215
216static unsigned long set_max_huge_pages(unsigned long count)
217{
218 while (count > nr_huge_pages) {
Nick Piggina4822892006-03-22 00:08:08 -0800219 if (!alloc_fresh_huge_page())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 return nr_huge_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 }
222 if (count >= nr_huge_pages)
223 return nr_huge_pages;
224
225 spin_lock(&hugetlb_lock);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700226 count = max(count, resv_huge_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 try_to_free_low(count);
228 while (count < nr_huge_pages) {
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800229 struct page *page = dequeue_huge_page(NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 if (!page)
231 break;
232 update_and_free_page(page);
233 }
234 spin_unlock(&hugetlb_lock);
235 return nr_huge_pages;
236}
237
238int hugetlb_sysctl_handler(struct ctl_table *table, int write,
239 struct file *file, void __user *buffer,
240 size_t *length, loff_t *ppos)
241{
242 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
243 max_huge_pages = set_max_huge_pages(max_huge_pages);
244 return 0;
245}
246#endif /* CONFIG_SYSCTL */
247
248int hugetlb_report_meminfo(char *buf)
249{
250 return sprintf(buf,
251 "HugePages_Total: %5lu\n"
252 "HugePages_Free: %5lu\n"
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700253 "HugePages_Rsvd: %5lu\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 "Hugepagesize: %5lu kB\n",
255 nr_huge_pages,
256 free_huge_pages,
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700257 resv_huge_pages,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 HPAGE_SIZE/1024);
259}
260
261int hugetlb_report_node_meminfo(int nid, char *buf)
262{
263 return sprintf(buf,
264 "Node %d HugePages_Total: %5u\n"
265 "Node %d HugePages_Free: %5u\n",
266 nid, nr_huge_pages_node[nid],
267 nid, free_huge_pages_node[nid]);
268}
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
271unsigned long hugetlb_total_pages(void)
272{
273 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
274}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276/*
277 * We cannot handle pagefaults against hugetlb pages at all. They cause
278 * handle_mm_fault() to try to instantiate regular-sized pages in the
279 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
280 * this far.
281 */
282static struct page *hugetlb_nopage(struct vm_area_struct *vma,
283 unsigned long address, int *unused)
284{
285 BUG();
286 return NULL;
287}
288
289struct vm_operations_struct hugetlb_vm_ops = {
290 .nopage = hugetlb_nopage,
291};
292
David Gibson1e8f8892006-01-06 00:10:44 -0800293static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
294 int writable)
David Gibson63551ae2005-06-21 17:14:44 -0700295{
296 pte_t entry;
297
David Gibson1e8f8892006-01-06 00:10:44 -0800298 if (writable) {
David Gibson63551ae2005-06-21 17:14:44 -0700299 entry =
300 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
301 } else {
302 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
303 }
304 entry = pte_mkyoung(entry);
305 entry = pte_mkhuge(entry);
306
307 return entry;
308}
309
David Gibson1e8f8892006-01-06 00:10:44 -0800310static void set_huge_ptep_writable(struct vm_area_struct *vma,
311 unsigned long address, pte_t *ptep)
312{
313 pte_t entry;
314
315 entry = pte_mkwrite(pte_mkdirty(*ptep));
316 ptep_set_access_flags(vma, address, ptep, entry, 1);
317 update_mmu_cache(vma, address, entry);
318 lazy_mmu_prot_update(entry);
319}
320
321
David Gibson63551ae2005-06-21 17:14:44 -0700322int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
323 struct vm_area_struct *vma)
324{
325 pte_t *src_pte, *dst_pte, entry;
326 struct page *ptepage;
Hugh Dickins1c598272005-10-19 21:23:43 -0700327 unsigned long addr;
David Gibson1e8f8892006-01-06 00:10:44 -0800328 int cow;
329
330 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson63551ae2005-06-21 17:14:44 -0700331
Hugh Dickins1c598272005-10-19 21:23:43 -0700332 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
Hugh Dickinsc74df322005-10-29 18:16:23 -0700333 src_pte = huge_pte_offset(src, addr);
334 if (!src_pte)
335 continue;
David Gibson63551ae2005-06-21 17:14:44 -0700336 dst_pte = huge_pte_alloc(dst, addr);
337 if (!dst_pte)
338 goto nomem;
Hugh Dickinsc74df322005-10-29 18:16:23 -0700339 spin_lock(&dst->page_table_lock);
Hugh Dickins1c598272005-10-19 21:23:43 -0700340 spin_lock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700341 if (!pte_none(*src_pte)) {
David Gibson1e8f8892006-01-06 00:10:44 -0800342 if (cow)
343 ptep_set_wrprotect(src, addr, src_pte);
Hugh Dickins1c598272005-10-19 21:23:43 -0700344 entry = *src_pte;
345 ptepage = pte_page(entry);
346 get_page(ptepage);
Hugh Dickins1c598272005-10-19 21:23:43 -0700347 set_huge_pte_at(dst, addr, dst_pte, entry);
348 }
349 spin_unlock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700350 spin_unlock(&dst->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700351 }
352 return 0;
353
354nomem:
355 return -ENOMEM;
356}
357
Chen, Kenneth W502717f2006-10-11 01:20:46 -0700358void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
359 unsigned long end)
David Gibson63551ae2005-06-21 17:14:44 -0700360{
361 struct mm_struct *mm = vma->vm_mm;
362 unsigned long address;
David Gibsonc7546f82005-08-05 11:59:35 -0700363 pte_t *ptep;
David Gibson63551ae2005-06-21 17:14:44 -0700364 pte_t pte;
365 struct page *page;
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700366 struct page *tmp;
Chen, Kenneth Wc0a499c2006-12-06 20:31:39 -0800367 /*
368 * A page gathering list, protected by per file i_mmap_lock. The
369 * lock is used to avoid list corruption from multiple unmapping
370 * of the same page since we are using page->lru.
371 */
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700372 LIST_HEAD(page_list);
David Gibson63551ae2005-06-21 17:14:44 -0700373
374 WARN_ON(!is_vm_hugetlb_page(vma));
375 BUG_ON(start & ~HPAGE_MASK);
376 BUG_ON(end & ~HPAGE_MASK);
377
Hugh Dickins508034a2005-10-29 18:16:30 -0700378 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700379 for (address = start; address < end; address += HPAGE_SIZE) {
David Gibsonc7546f82005-08-05 11:59:35 -0700380 ptep = huge_pte_offset(mm, address);
Adam Litke4c887262005-10-29 18:16:46 -0700381 if (!ptep)
David Gibsonc7546f82005-08-05 11:59:35 -0700382 continue;
383
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800384 if (huge_pmd_unshare(mm, &address, ptep))
385 continue;
386
David Gibsonc7546f82005-08-05 11:59:35 -0700387 pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson63551ae2005-06-21 17:14:44 -0700388 if (pte_none(pte))
389 continue;
David Gibsonc7546f82005-08-05 11:59:35 -0700390
David Gibson63551ae2005-06-21 17:14:44 -0700391 page = pte_page(pte);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700392 list_add(&page->lru, &page_list);
David Gibson63551ae2005-06-21 17:14:44 -0700393 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 spin_unlock(&mm->page_table_lock);
Hugh Dickins508034a2005-10-29 18:16:30 -0700395 flush_tlb_range(vma, start, end);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700396 list_for_each_entry_safe(page, tmp, &page_list, lru) {
397 list_del(&page->lru);
398 put_page(page);
399 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400}
David Gibson63551ae2005-06-21 17:14:44 -0700401
Chen, Kenneth W502717f2006-10-11 01:20:46 -0700402void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
403 unsigned long end)
404{
405 /*
406 * It is undesirable to test vma->vm_file as it should be non-null
407 * for valid hugetlb area. However, vm_file will be NULL in the error
408 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
409 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
410 * to clean up. Since no pte has actually been setup, it is safe to
411 * do nothing in this case.
412 */
413 if (vma->vm_file) {
414 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
415 __unmap_hugepage_range(vma, start, end);
416 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
417 }
418}
419
David Gibson1e8f8892006-01-06 00:10:44 -0800420static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
421 unsigned long address, pte_t *ptep, pte_t pte)
422{
423 struct page *old_page, *new_page;
David Gibson79ac6ba2006-03-22 00:08:51 -0800424 int avoidcopy;
David Gibson1e8f8892006-01-06 00:10:44 -0800425
426 old_page = pte_page(pte);
427
428 /* If no-one else is actually using this page, avoid the copy
429 * and just make the page writable */
430 avoidcopy = (page_count(old_page) == 1);
431 if (avoidcopy) {
432 set_huge_ptep_writable(vma, address, ptep);
433 return VM_FAULT_MINOR;
434 }
435
436 page_cache_get(old_page);
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800437 new_page = alloc_huge_page(vma, address);
David Gibson1e8f8892006-01-06 00:10:44 -0800438
439 if (!new_page) {
440 page_cache_release(old_page);
Christoph Lameter0df420d2006-02-07 12:58:30 -0800441 return VM_FAULT_OOM;
David Gibson1e8f8892006-01-06 00:10:44 -0800442 }
443
444 spin_unlock(&mm->page_table_lock);
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000445 copy_huge_page(new_page, old_page, address, vma);
David Gibson1e8f8892006-01-06 00:10:44 -0800446 spin_lock(&mm->page_table_lock);
447
448 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
449 if (likely(pte_same(*ptep, pte))) {
450 /* Break COW */
451 set_huge_pte_at(mm, address, ptep,
452 make_huge_pte(vma, new_page, 1));
453 /* Make the old page be freed below */
454 new_page = old_page;
455 }
456 page_cache_release(new_page);
457 page_cache_release(old_page);
458 return VM_FAULT_MINOR;
459}
460
Adam Litke86e52162006-01-06 00:10:43 -0800461int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
David Gibson1e8f8892006-01-06 00:10:44 -0800462 unsigned long address, pte_t *ptep, int write_access)
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100463{
464 int ret = VM_FAULT_SIGBUS;
Adam Litke4c887262005-10-29 18:16:46 -0700465 unsigned long idx;
466 unsigned long size;
Adam Litke4c887262005-10-29 18:16:46 -0700467 struct page *page;
468 struct address_space *mapping;
David Gibson1e8f8892006-01-06 00:10:44 -0800469 pte_t new_pte;
Adam Litke4c887262005-10-29 18:16:46 -0700470
Adam Litke4c887262005-10-29 18:16:46 -0700471 mapping = vma->vm_file->f_mapping;
472 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
473 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
474
475 /*
476 * Use page lock to guard against racing truncation
477 * before we get page_table_lock.
478 */
Christoph Lameter6bda6662006-01-06 00:10:49 -0800479retry:
480 page = find_lock_page(mapping, idx);
481 if (!page) {
Hugh Dickinsebed4bf2006-10-28 10:38:43 -0700482 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
483 if (idx >= size)
484 goto out;
Christoph Lameter6bda6662006-01-06 00:10:49 -0800485 if (hugetlb_get_quota(mapping))
486 goto out;
487 page = alloc_huge_page(vma, address);
488 if (!page) {
489 hugetlb_put_quota(mapping);
Christoph Lameter0df420d2006-02-07 12:58:30 -0800490 ret = VM_FAULT_OOM;
Christoph Lameter6bda6662006-01-06 00:10:49 -0800491 goto out;
492 }
David Gibson79ac6ba2006-03-22 00:08:51 -0800493 clear_huge_page(page, address);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100494
Christoph Lameter6bda6662006-01-06 00:10:49 -0800495 if (vma->vm_flags & VM_SHARED) {
496 int err;
497
498 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
499 if (err) {
500 put_page(page);
501 hugetlb_put_quota(mapping);
502 if (err == -EEXIST)
503 goto retry;
504 goto out;
505 }
506 } else
507 lock_page(page);
508 }
David Gibson1e8f8892006-01-06 00:10:44 -0800509
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100510 spin_lock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700511 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
512 if (idx >= size)
513 goto backout;
514
515 ret = VM_FAULT_MINOR;
Adam Litke86e52162006-01-06 00:10:43 -0800516 if (!pte_none(*ptep))
Adam Litke4c887262005-10-29 18:16:46 -0700517 goto backout;
518
David Gibson1e8f8892006-01-06 00:10:44 -0800519 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
520 && (vma->vm_flags & VM_SHARED)));
521 set_huge_pte_at(mm, address, ptep, new_pte);
522
523 if (write_access && !(vma->vm_flags & VM_SHARED)) {
524 /* Optimization, do the COW without a second fault */
525 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
526 }
527
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100528 spin_unlock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700529 unlock_page(page);
530out:
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100531 return ret;
Adam Litke4c887262005-10-29 18:16:46 -0700532
533backout:
534 spin_unlock(&mm->page_table_lock);
535 hugetlb_put_quota(mapping);
536 unlock_page(page);
537 put_page(page);
538 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100539}
540
Adam Litke86e52162006-01-06 00:10:43 -0800541int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
542 unsigned long address, int write_access)
543{
544 pte_t *ptep;
545 pte_t entry;
David Gibson1e8f8892006-01-06 00:10:44 -0800546 int ret;
David Gibson3935baa2006-03-22 00:08:53 -0800547 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
Adam Litke86e52162006-01-06 00:10:43 -0800548
549 ptep = huge_pte_alloc(mm, address);
550 if (!ptep)
551 return VM_FAULT_OOM;
552
David Gibson3935baa2006-03-22 00:08:53 -0800553 /*
554 * Serialize hugepage allocation and instantiation, so that we don't
555 * get spurious allocation failures if two CPUs race to instantiate
556 * the same page in the page cache.
557 */
558 mutex_lock(&hugetlb_instantiation_mutex);
Adam Litke86e52162006-01-06 00:10:43 -0800559 entry = *ptep;
David Gibson3935baa2006-03-22 00:08:53 -0800560 if (pte_none(entry)) {
561 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
562 mutex_unlock(&hugetlb_instantiation_mutex);
563 return ret;
564 }
Adam Litke86e52162006-01-06 00:10:43 -0800565
David Gibson1e8f8892006-01-06 00:10:44 -0800566 ret = VM_FAULT_MINOR;
567
568 spin_lock(&mm->page_table_lock);
569 /* Check for a racing update before calling hugetlb_cow */
570 if (likely(pte_same(entry, *ptep)))
571 if (write_access && !pte_write(entry))
572 ret = hugetlb_cow(mm, vma, address, ptep, entry);
573 spin_unlock(&mm->page_table_lock);
David Gibson3935baa2006-03-22 00:08:53 -0800574 mutex_unlock(&hugetlb_instantiation_mutex);
David Gibson1e8f8892006-01-06 00:10:44 -0800575
576 return ret;
Adam Litke86e52162006-01-06 00:10:43 -0800577}
578
David Gibson63551ae2005-06-21 17:14:44 -0700579int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
580 struct page **pages, struct vm_area_struct **vmas,
581 unsigned long *position, int *length, int i)
582{
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800583 unsigned long pfn_offset;
584 unsigned long vaddr = *position;
David Gibson63551ae2005-06-21 17:14:44 -0700585 int remainder = *length;
586
Hugh Dickins1c598272005-10-19 21:23:43 -0700587 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700588 while (vaddr < vma->vm_end && remainder) {
Adam Litke4c887262005-10-29 18:16:46 -0700589 pte_t *pte;
590 struct page *page;
591
592 /*
593 * Some archs (sparc64, sh*) have multiple pte_ts to
594 * each hugepage. We have to make * sure we get the
595 * first, for the page indexing below to work.
596 */
597 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
598
599 if (!pte || pte_none(*pte)) {
600 int ret;
601
602 spin_unlock(&mm->page_table_lock);
603 ret = hugetlb_fault(mm, vma, vaddr, 0);
604 spin_lock(&mm->page_table_lock);
605 if (ret == VM_FAULT_MINOR)
606 continue;
607
608 remainder = 0;
609 if (!i)
610 i = -EFAULT;
611 break;
612 }
David Gibson63551ae2005-06-21 17:14:44 -0700613
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800614 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
615 page = pte_page(*pte);
616same_page:
Chen, Kenneth Wd6692182006-03-31 02:29:57 -0800617 if (pages) {
618 get_page(page);
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800619 pages[i] = page + pfn_offset;
Chen, Kenneth Wd6692182006-03-31 02:29:57 -0800620 }
David Gibson63551ae2005-06-21 17:14:44 -0700621
622 if (vmas)
623 vmas[i] = vma;
624
625 vaddr += PAGE_SIZE;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800626 ++pfn_offset;
David Gibson63551ae2005-06-21 17:14:44 -0700627 --remainder;
628 ++i;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800629 if (vaddr < vma->vm_end && remainder &&
630 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
631 /*
632 * We use pfn_offset to avoid touching the pageframes
633 * of this compound page.
634 */
635 goto same_page;
636 }
David Gibson63551ae2005-06-21 17:14:44 -0700637 }
Hugh Dickins1c598272005-10-19 21:23:43 -0700638 spin_unlock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700639 *length = remainder;
640 *position = vaddr;
641
642 return i;
643}
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800644
645void hugetlb_change_protection(struct vm_area_struct *vma,
646 unsigned long address, unsigned long end, pgprot_t newprot)
647{
648 struct mm_struct *mm = vma->vm_mm;
649 unsigned long start = address;
650 pte_t *ptep;
651 pte_t pte;
652
653 BUG_ON(address >= end);
654 flush_cache_range(vma, address, end);
655
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800656 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800657 spin_lock(&mm->page_table_lock);
658 for (; address < end; address += HPAGE_SIZE) {
659 ptep = huge_pte_offset(mm, address);
660 if (!ptep)
661 continue;
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800662 if (huge_pmd_unshare(mm, &address, ptep))
663 continue;
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800664 if (!pte_none(*ptep)) {
665 pte = huge_ptep_get_and_clear(mm, address, ptep);
666 pte = pte_mkhuge(pte_modify(pte, newprot));
667 set_huge_pte_at(mm, address, ptep, pte);
668 lazy_mmu_prot_update(pte);
669 }
670 }
671 spin_unlock(&mm->page_table_lock);
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800672 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800673
674 flush_tlb_range(vma, start, end);
675}
676
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700677struct file_region {
678 struct list_head link;
679 long from;
680 long to;
681};
682
683static long region_add(struct list_head *head, long f, long t)
684{
685 struct file_region *rg, *nrg, *trg;
686
687 /* Locate the region we are either in or before. */
688 list_for_each_entry(rg, head, link)
689 if (f <= rg->to)
690 break;
691
692 /* Round our left edge to the current segment if it encloses us. */
693 if (f > rg->from)
694 f = rg->from;
695
696 /* Check for and consume any regions we now overlap with. */
697 nrg = rg;
698 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
699 if (&rg->link == head)
700 break;
701 if (rg->from > t)
702 break;
703
704 /* If this area reaches higher then extend our area to
705 * include it completely. If this is not the first area
706 * which we intend to reuse, free it. */
707 if (rg->to > t)
708 t = rg->to;
709 if (rg != nrg) {
710 list_del(&rg->link);
711 kfree(rg);
712 }
713 }
714 nrg->from = f;
715 nrg->to = t;
716 return 0;
717}
718
719static long region_chg(struct list_head *head, long f, long t)
720{
721 struct file_region *rg, *nrg;
722 long chg = 0;
723
724 /* Locate the region we are before or in. */
725 list_for_each_entry(rg, head, link)
726 if (f <= rg->to)
727 break;
728
729 /* If we are below the current region then a new region is required.
730 * Subtle, allocate a new region at the position but make it zero
731 * size such that we can guarentee to record the reservation. */
732 if (&rg->link == head || t < rg->from) {
733 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
734 if (nrg == 0)
735 return -ENOMEM;
736 nrg->from = f;
737 nrg->to = f;
738 INIT_LIST_HEAD(&nrg->link);
739 list_add(&nrg->link, rg->link.prev);
740
741 return t - f;
742 }
743
744 /* Round our left edge to the current segment if it encloses us. */
745 if (f > rg->from)
746 f = rg->from;
747 chg = t - f;
748
749 /* Check for and consume any regions we now overlap with. */
750 list_for_each_entry(rg, rg->link.prev, link) {
751 if (&rg->link == head)
752 break;
753 if (rg->from > t)
754 return chg;
755
756 /* We overlap with this area, if it extends futher than
757 * us then we must extend ourselves. Account for its
758 * existing reservation. */
759 if (rg->to > t) {
760 chg += rg->to - t;
761 t = rg->to;
762 }
763 chg -= rg->to - rg->from;
764 }
765 return chg;
766}
767
768static long region_truncate(struct list_head *head, long end)
769{
770 struct file_region *rg, *trg;
771 long chg = 0;
772
773 /* Locate the region we are either in or before. */
774 list_for_each_entry(rg, head, link)
775 if (end <= rg->to)
776 break;
777 if (&rg->link == head)
778 return 0;
779
780 /* If we are in the middle of a region then adjust it. */
781 if (end > rg->from) {
782 chg = rg->to - end;
783 rg->to = end;
784 rg = list_entry(rg->link.next, typeof(*rg), link);
785 }
786
787 /* Drop any remaining regions. */
788 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
789 if (&rg->link == head)
790 break;
791 chg += rg->to - rg->from;
792 list_del(&rg->link);
793 kfree(rg);
794 }
795 return chg;
796}
797
798static int hugetlb_acct_memory(long delta)
799{
800 int ret = -ENOMEM;
801
802 spin_lock(&hugetlb_lock);
803 if ((delta + resv_huge_pages) <= free_huge_pages) {
804 resv_huge_pages += delta;
805 ret = 0;
806 }
807 spin_unlock(&hugetlb_lock);
808 return ret;
809}
810
811int hugetlb_reserve_pages(struct inode *inode, long from, long to)
812{
813 long ret, chg;
814
815 chg = region_chg(&inode->i_mapping->private_list, from, to);
816 if (chg < 0)
817 return chg;
818 ret = hugetlb_acct_memory(chg);
819 if (ret < 0)
820 return ret;
821 region_add(&inode->i_mapping->private_list, from, to);
822 return 0;
823}
824
825void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
826{
827 long chg = region_truncate(&inode->i_mapping->private_list, offset);
828 hugetlb_acct_memory(freed - chg);
829}