blob: aaa7c1a682d972cd95a4c7c62a01d7ab7708ed43 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/nodemask.h>
David Gibson63551ae2005-06-21 17:14:44 -070013#include <linux/pagemap.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080014#include <linux/mempolicy.h>
Christoph Lameteraea47ff2006-01-08 01:00:57 -080015#include <linux/cpuset.h>
David Gibson3935baa2006-03-22 00:08:53 -080016#include <linux/mutex.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080017
David Gibson63551ae2005-06-21 17:14:44 -070018#include <asm/page.h>
19#include <asm/pgtable.h>
20
21#include <linux/hugetlb.h>
Nick Piggin7835e982006-03-22 00:08:40 -080022#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -070025static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026unsigned long max_huge_pages;
27static struct list_head hugepage_freelists[MAX_NUMNODES];
28static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29static unsigned int free_huge_pages_node[MAX_NUMNODES];
Mel Gorman396faf02007-07-17 04:03:13 -070030static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
31unsigned long hugepages_treat_as_movable;
32
David Gibson3935baa2006-03-22 00:08:53 -080033/*
34 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
35 */
36static DEFINE_SPINLOCK(hugetlb_lock);
Eric Paris0bd0f9f2005-11-21 21:32:28 -080037
David Gibson79ac6ba2006-03-22 00:08:51 -080038static void clear_huge_page(struct page *page, unsigned long addr)
39{
40 int i;
41
42 might_sleep();
43 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
44 cond_resched();
45 clear_user_highpage(page + i, addr);
46 }
47}
48
49static void copy_huge_page(struct page *dst, struct page *src,
Atsushi Nemoto9de455b2006-12-12 17:14:55 +000050 unsigned long addr, struct vm_area_struct *vma)
David Gibson79ac6ba2006-03-22 00:08:51 -080051{
52 int i;
53
54 might_sleep();
55 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
56 cond_resched();
Atsushi Nemoto9de455b2006-12-12 17:14:55 +000057 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
David Gibson79ac6ba2006-03-22 00:08:51 -080058 }
59}
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061static void enqueue_huge_page(struct page *page)
62{
63 int nid = page_to_nid(page);
64 list_add(&page->lru, &hugepage_freelists[nid]);
65 free_huge_pages++;
66 free_huge_pages_node[nid]++;
67}
68
Christoph Lameter5da7ca82006-01-06 00:10:46 -080069static struct page *dequeue_huge_page(struct vm_area_struct *vma,
70 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Nishanth Aravamudan31a5c6e2007-07-15 23:38:02 -070072 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 struct page *page = NULL;
Mel Gorman396faf02007-07-17 04:03:13 -070074 struct zonelist *zonelist = huge_zonelist(vma, address,
75 htlb_alloc_mask);
Christoph Lameter96df9332006-01-06 00:10:45 -080076 struct zone **z;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Christoph Lameter96df9332006-01-06 00:10:45 -080078 for (z = zonelist->zones; *z; z++) {
Christoph Lameter89fa3022006-09-25 23:31:55 -070079 nid = zone_to_nid(*z);
Mel Gorman396faf02007-07-17 04:03:13 -070080 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
Christoph Lameteraea47ff2006-01-08 01:00:57 -080081 !list_empty(&hugepage_freelists[nid]))
Christoph Lameter96df9332006-01-06 00:10:45 -080082 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 }
Christoph Lameter96df9332006-01-06 00:10:45 -080084
85 if (*z) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 page = list_entry(hugepage_freelists[nid].next,
87 struct page, lru);
88 list_del(&page->lru);
89 free_huge_pages--;
90 free_huge_pages_node[nid]--;
91 }
92 return page;
93}
94
David Gibson27a85ef2006-03-22 00:08:56 -080095static void free_huge_page(struct page *page)
96{
97 BUG_ON(page_count(page));
98
99 INIT_LIST_HEAD(&page->lru);
100
101 spin_lock(&hugetlb_lock);
102 enqueue_huge_page(page);
103 spin_unlock(&hugetlb_lock);
104}
105
Nick Piggina4822892006-03-22 00:08:08 -0800106static int alloc_fresh_huge_page(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107{
Joe Jinf96efd52007-07-15 23:38:12 -0700108 static int prev_nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 struct page *page;
Joe Jinf96efd52007-07-15 23:38:12 -0700110 static DEFINE_SPINLOCK(nid_lock);
111 int nid;
112
113 spin_lock(&nid_lock);
114 nid = next_node(prev_nid, node_online_map);
Paul Jacksonfdb7cc52006-03-22 00:09:10 -0800115 if (nid == MAX_NUMNODES)
116 nid = first_node(node_online_map);
Joe Jinf96efd52007-07-15 23:38:12 -0700117 prev_nid = nid;
118 spin_unlock(&nid_lock);
119
Mel Gorman396faf02007-07-17 04:03:13 -0700120 page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
Joe Jinf96efd52007-07-15 23:38:12 -0700121 HUGETLB_PAGE_ORDER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 if (page) {
Andy Whitcroft33f2ef82006-12-06 20:33:32 -0800123 set_compound_page_dtor(page, free_huge_page);
Eric Paris0bd0f9f2005-11-21 21:32:28 -0800124 spin_lock(&hugetlb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 nr_huge_pages++;
126 nr_huge_pages_node[page_to_nid(page)]++;
Eric Paris0bd0f9f2005-11-21 21:32:28 -0800127 spin_unlock(&hugetlb_lock);
Nick Piggina4822892006-03-22 00:08:08 -0800128 put_page(page); /* free it into the hugepage allocator */
129 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
Nick Piggina4822892006-03-22 00:08:08 -0800131 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
133
David Gibson27a85ef2006-03-22 00:08:56 -0800134static struct page *alloc_huge_page(struct vm_area_struct *vma,
135 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136{
137 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139 spin_lock(&hugetlb_lock);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700140 if (vma->vm_flags & VM_MAYSHARE)
141 resv_huge_pages--;
142 else if (free_huge_pages <= resv_huge_pages)
143 goto fail;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800144
145 page = dequeue_huge_page(vma, addr);
146 if (!page)
147 goto fail;
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 spin_unlock(&hugetlb_lock);
Nick Piggin7835e982006-03-22 00:08:40 -0800150 set_page_refcounted(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 return page;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800152
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700153fail:
Ken Chenace4bd22007-05-09 02:33:09 -0700154 if (vma->vm_flags & VM_MAYSHARE)
155 resv_huge_pages++;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800156 spin_unlock(&hugetlb_lock);
157 return NULL;
158}
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160static int __init hugetlb_init(void)
161{
162 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100164 if (HPAGE_SHIFT == 0)
165 return 0;
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 for (i = 0; i < MAX_NUMNODES; ++i)
168 INIT_LIST_HEAD(&hugepage_freelists[i]);
169
170 for (i = 0; i < max_huge_pages; ++i) {
Nick Piggina4822892006-03-22 00:08:08 -0800171 if (!alloc_fresh_huge_page())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 }
174 max_huge_pages = free_huge_pages = nr_huge_pages = i;
175 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
176 return 0;
177}
178module_init(hugetlb_init);
179
180static int __init hugetlb_setup(char *s)
181{
182 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
183 max_huge_pages = 0;
184 return 1;
185}
186__setup("hugepages=", hugetlb_setup);
187
Ken Chen8a630112007-05-09 02:33:34 -0700188static unsigned int cpuset_mems_nr(unsigned int *array)
189{
190 int node;
191 unsigned int nr = 0;
192
193 for_each_node_mask(node, cpuset_current_mems_allowed)
194 nr += array[node];
195
196 return nr;
197}
198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199#ifdef CONFIG_SYSCTL
200static void update_and_free_page(struct page *page)
201{
202 int i;
203 nr_huge_pages--;
Christoph Lameter4415cc82006-09-25 23:31:55 -0700204 nr_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
206 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
207 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
208 1 << PG_private | 1<< PG_writeback);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 }
Nick Piggina4822892006-03-22 00:08:08 -0800210 page[1].lru.next = NULL;
Nick Piggin7835e982006-03-22 00:08:40 -0800211 set_page_refcounted(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 __free_pages(page, HUGETLB_PAGE_ORDER);
213}
214
215#ifdef CONFIG_HIGHMEM
216static void try_to_free_low(unsigned long count)
217{
Christoph Lameter4415cc82006-09-25 23:31:55 -0700218 int i;
219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 for (i = 0; i < MAX_NUMNODES; ++i) {
221 struct page *page, *next;
222 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
223 if (PageHighMem(page))
224 continue;
225 list_del(&page->lru);
226 update_and_free_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 free_huge_pages--;
Christoph Lameter4415cc82006-09-25 23:31:55 -0700228 free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 if (count >= nr_huge_pages)
230 return;
231 }
232 }
233}
234#else
235static inline void try_to_free_low(unsigned long count)
236{
237}
238#endif
239
240static unsigned long set_max_huge_pages(unsigned long count)
241{
242 while (count > nr_huge_pages) {
Nick Piggina4822892006-03-22 00:08:08 -0800243 if (!alloc_fresh_huge_page())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 return nr_huge_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 }
246 if (count >= nr_huge_pages)
247 return nr_huge_pages;
248
249 spin_lock(&hugetlb_lock);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700250 count = max(count, resv_huge_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 try_to_free_low(count);
252 while (count < nr_huge_pages) {
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800253 struct page *page = dequeue_huge_page(NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 if (!page)
255 break;
256 update_and_free_page(page);
257 }
258 spin_unlock(&hugetlb_lock);
259 return nr_huge_pages;
260}
261
262int hugetlb_sysctl_handler(struct ctl_table *table, int write,
263 struct file *file, void __user *buffer,
264 size_t *length, loff_t *ppos)
265{
266 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
267 max_huge_pages = set_max_huge_pages(max_huge_pages);
268 return 0;
269}
Mel Gorman396faf02007-07-17 04:03:13 -0700270
271int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
272 struct file *file, void __user *buffer,
273 size_t *length, loff_t *ppos)
274{
275 proc_dointvec(table, write, file, buffer, length, ppos);
276 if (hugepages_treat_as_movable)
277 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
278 else
279 htlb_alloc_mask = GFP_HIGHUSER;
280 return 0;
281}
282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283#endif /* CONFIG_SYSCTL */
284
285int hugetlb_report_meminfo(char *buf)
286{
287 return sprintf(buf,
288 "HugePages_Total: %5lu\n"
289 "HugePages_Free: %5lu\n"
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700290 "HugePages_Rsvd: %5lu\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 "Hugepagesize: %5lu kB\n",
292 nr_huge_pages,
293 free_huge_pages,
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700294 resv_huge_pages,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 HPAGE_SIZE/1024);
296}
297
298int hugetlb_report_node_meminfo(int nid, char *buf)
299{
300 return sprintf(buf,
301 "Node %d HugePages_Total: %5u\n"
302 "Node %d HugePages_Free: %5u\n",
303 nid, nr_huge_pages_node[nid],
304 nid, free_huge_pages_node[nid]);
305}
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
308unsigned long hugetlb_total_pages(void)
309{
310 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
311}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
313/*
314 * We cannot handle pagefaults against hugetlb pages at all. They cause
315 * handle_mm_fault() to try to instantiate regular-sized pages in the
316 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
317 * this far.
318 */
Nick Piggind0217ac2007-07-19 01:47:03 -0700319static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320{
321 BUG();
Nick Piggind0217ac2007-07-19 01:47:03 -0700322 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
325struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggind0217ac2007-07-19 01:47:03 -0700326 .fault = hugetlb_vm_op_fault,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327};
328
David Gibson1e8f8892006-01-06 00:10:44 -0800329static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
330 int writable)
David Gibson63551ae2005-06-21 17:14:44 -0700331{
332 pte_t entry;
333
David Gibson1e8f8892006-01-06 00:10:44 -0800334 if (writable) {
David Gibson63551ae2005-06-21 17:14:44 -0700335 entry =
336 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
337 } else {
338 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
339 }
340 entry = pte_mkyoung(entry);
341 entry = pte_mkhuge(entry);
342
343 return entry;
344}
345
David Gibson1e8f8892006-01-06 00:10:44 -0800346static void set_huge_ptep_writable(struct vm_area_struct *vma,
347 unsigned long address, pte_t *ptep)
348{
349 pte_t entry;
350
351 entry = pte_mkwrite(pte_mkdirty(*ptep));
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -0700352 if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
353 update_mmu_cache(vma, address, entry);
354 lazy_mmu_prot_update(entry);
355 }
David Gibson1e8f8892006-01-06 00:10:44 -0800356}
357
358
David Gibson63551ae2005-06-21 17:14:44 -0700359int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
360 struct vm_area_struct *vma)
361{
362 pte_t *src_pte, *dst_pte, entry;
363 struct page *ptepage;
Hugh Dickins1c598272005-10-19 21:23:43 -0700364 unsigned long addr;
David Gibson1e8f8892006-01-06 00:10:44 -0800365 int cow;
366
367 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson63551ae2005-06-21 17:14:44 -0700368
Hugh Dickins1c598272005-10-19 21:23:43 -0700369 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
Hugh Dickinsc74df322005-10-29 18:16:23 -0700370 src_pte = huge_pte_offset(src, addr);
371 if (!src_pte)
372 continue;
David Gibson63551ae2005-06-21 17:14:44 -0700373 dst_pte = huge_pte_alloc(dst, addr);
374 if (!dst_pte)
375 goto nomem;
Hugh Dickinsc74df322005-10-29 18:16:23 -0700376 spin_lock(&dst->page_table_lock);
Hugh Dickins1c598272005-10-19 21:23:43 -0700377 spin_lock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700378 if (!pte_none(*src_pte)) {
David Gibson1e8f8892006-01-06 00:10:44 -0800379 if (cow)
380 ptep_set_wrprotect(src, addr, src_pte);
Hugh Dickins1c598272005-10-19 21:23:43 -0700381 entry = *src_pte;
382 ptepage = pte_page(entry);
383 get_page(ptepage);
Hugh Dickins1c598272005-10-19 21:23:43 -0700384 set_huge_pte_at(dst, addr, dst_pte, entry);
385 }
386 spin_unlock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700387 spin_unlock(&dst->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700388 }
389 return 0;
390
391nomem:
392 return -ENOMEM;
393}
394
Chen, Kenneth W502717f2006-10-11 01:20:46 -0700395void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
396 unsigned long end)
David Gibson63551ae2005-06-21 17:14:44 -0700397{
398 struct mm_struct *mm = vma->vm_mm;
399 unsigned long address;
David Gibsonc7546f82005-08-05 11:59:35 -0700400 pte_t *ptep;
David Gibson63551ae2005-06-21 17:14:44 -0700401 pte_t pte;
402 struct page *page;
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700403 struct page *tmp;
Chen, Kenneth Wc0a499c2006-12-06 20:31:39 -0800404 /*
405 * A page gathering list, protected by per file i_mmap_lock. The
406 * lock is used to avoid list corruption from multiple unmapping
407 * of the same page since we are using page->lru.
408 */
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700409 LIST_HEAD(page_list);
David Gibson63551ae2005-06-21 17:14:44 -0700410
411 WARN_ON(!is_vm_hugetlb_page(vma));
412 BUG_ON(start & ~HPAGE_MASK);
413 BUG_ON(end & ~HPAGE_MASK);
414
Hugh Dickins508034a2005-10-29 18:16:30 -0700415 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700416 for (address = start; address < end; address += HPAGE_SIZE) {
David Gibsonc7546f82005-08-05 11:59:35 -0700417 ptep = huge_pte_offset(mm, address);
Adam Litke4c887262005-10-29 18:16:46 -0700418 if (!ptep)
David Gibsonc7546f82005-08-05 11:59:35 -0700419 continue;
420
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800421 if (huge_pmd_unshare(mm, &address, ptep))
422 continue;
423
David Gibsonc7546f82005-08-05 11:59:35 -0700424 pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson63551ae2005-06-21 17:14:44 -0700425 if (pte_none(pte))
426 continue;
David Gibsonc7546f82005-08-05 11:59:35 -0700427
David Gibson63551ae2005-06-21 17:14:44 -0700428 page = pte_page(pte);
Ken Chen6649a382007-02-08 14:20:27 -0800429 if (pte_dirty(pte))
430 set_page_dirty(page);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700431 list_add(&page->lru, &page_list);
David Gibson63551ae2005-06-21 17:14:44 -0700432 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 spin_unlock(&mm->page_table_lock);
Hugh Dickins508034a2005-10-29 18:16:30 -0700434 flush_tlb_range(vma, start, end);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700435 list_for_each_entry_safe(page, tmp, &page_list, lru) {
436 list_del(&page->lru);
437 put_page(page);
438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439}
David Gibson63551ae2005-06-21 17:14:44 -0700440
Chen, Kenneth W502717f2006-10-11 01:20:46 -0700441void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
442 unsigned long end)
443{
444 /*
445 * It is undesirable to test vma->vm_file as it should be non-null
446 * for valid hugetlb area. However, vm_file will be NULL in the error
447 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
448 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
449 * to clean up. Since no pte has actually been setup, it is safe to
450 * do nothing in this case.
451 */
452 if (vma->vm_file) {
453 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
454 __unmap_hugepage_range(vma, start, end);
455 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
456 }
457}
458
David Gibson1e8f8892006-01-06 00:10:44 -0800459static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
460 unsigned long address, pte_t *ptep, pte_t pte)
461{
462 struct page *old_page, *new_page;
David Gibson79ac6ba2006-03-22 00:08:51 -0800463 int avoidcopy;
David Gibson1e8f8892006-01-06 00:10:44 -0800464
465 old_page = pte_page(pte);
466
467 /* If no-one else is actually using this page, avoid the copy
468 * and just make the page writable */
469 avoidcopy = (page_count(old_page) == 1);
470 if (avoidcopy) {
471 set_huge_ptep_writable(vma, address, ptep);
472 return VM_FAULT_MINOR;
473 }
474
475 page_cache_get(old_page);
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800476 new_page = alloc_huge_page(vma, address);
David Gibson1e8f8892006-01-06 00:10:44 -0800477
478 if (!new_page) {
479 page_cache_release(old_page);
Christoph Lameter0df420d2006-02-07 12:58:30 -0800480 return VM_FAULT_OOM;
David Gibson1e8f8892006-01-06 00:10:44 -0800481 }
482
483 spin_unlock(&mm->page_table_lock);
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000484 copy_huge_page(new_page, old_page, address, vma);
David Gibson1e8f8892006-01-06 00:10:44 -0800485 spin_lock(&mm->page_table_lock);
486
487 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
488 if (likely(pte_same(*ptep, pte))) {
489 /* Break COW */
490 set_huge_pte_at(mm, address, ptep,
491 make_huge_pte(vma, new_page, 1));
492 /* Make the old page be freed below */
493 new_page = old_page;
494 }
495 page_cache_release(new_page);
496 page_cache_release(old_page);
497 return VM_FAULT_MINOR;
498}
499
Robert P. J. Daya1ed3dd2007-07-17 04:03:33 -0700500static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
David Gibson1e8f8892006-01-06 00:10:44 -0800501 unsigned long address, pte_t *ptep, int write_access)
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100502{
503 int ret = VM_FAULT_SIGBUS;
Adam Litke4c887262005-10-29 18:16:46 -0700504 unsigned long idx;
505 unsigned long size;
Adam Litke4c887262005-10-29 18:16:46 -0700506 struct page *page;
507 struct address_space *mapping;
David Gibson1e8f8892006-01-06 00:10:44 -0800508 pte_t new_pte;
Adam Litke4c887262005-10-29 18:16:46 -0700509
Adam Litke4c887262005-10-29 18:16:46 -0700510 mapping = vma->vm_file->f_mapping;
511 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
512 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
513
514 /*
515 * Use page lock to guard against racing truncation
516 * before we get page_table_lock.
517 */
Christoph Lameter6bda6662006-01-06 00:10:49 -0800518retry:
519 page = find_lock_page(mapping, idx);
520 if (!page) {
Hugh Dickinsebed4bf2006-10-28 10:38:43 -0700521 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
522 if (idx >= size)
523 goto out;
Christoph Lameter6bda6662006-01-06 00:10:49 -0800524 if (hugetlb_get_quota(mapping))
525 goto out;
526 page = alloc_huge_page(vma, address);
527 if (!page) {
528 hugetlb_put_quota(mapping);
Christoph Lameter0df420d2006-02-07 12:58:30 -0800529 ret = VM_FAULT_OOM;
Christoph Lameter6bda6662006-01-06 00:10:49 -0800530 goto out;
531 }
David Gibson79ac6ba2006-03-22 00:08:51 -0800532 clear_huge_page(page, address);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100533
Christoph Lameter6bda6662006-01-06 00:10:49 -0800534 if (vma->vm_flags & VM_SHARED) {
535 int err;
536
537 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
538 if (err) {
539 put_page(page);
540 hugetlb_put_quota(mapping);
541 if (err == -EEXIST)
542 goto retry;
543 goto out;
544 }
545 } else
546 lock_page(page);
547 }
David Gibson1e8f8892006-01-06 00:10:44 -0800548
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100549 spin_lock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700550 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
551 if (idx >= size)
552 goto backout;
553
554 ret = VM_FAULT_MINOR;
Adam Litke86e52162006-01-06 00:10:43 -0800555 if (!pte_none(*ptep))
Adam Litke4c887262005-10-29 18:16:46 -0700556 goto backout;
557
David Gibson1e8f8892006-01-06 00:10:44 -0800558 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
559 && (vma->vm_flags & VM_SHARED)));
560 set_huge_pte_at(mm, address, ptep, new_pte);
561
562 if (write_access && !(vma->vm_flags & VM_SHARED)) {
563 /* Optimization, do the COW without a second fault */
564 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
565 }
566
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100567 spin_unlock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700568 unlock_page(page);
569out:
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100570 return ret;
Adam Litke4c887262005-10-29 18:16:46 -0700571
572backout:
573 spin_unlock(&mm->page_table_lock);
574 hugetlb_put_quota(mapping);
575 unlock_page(page);
576 put_page(page);
577 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100578}
579
Adam Litke86e52162006-01-06 00:10:43 -0800580int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
581 unsigned long address, int write_access)
582{
583 pte_t *ptep;
584 pte_t entry;
David Gibson1e8f8892006-01-06 00:10:44 -0800585 int ret;
David Gibson3935baa2006-03-22 00:08:53 -0800586 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
Adam Litke86e52162006-01-06 00:10:43 -0800587
588 ptep = huge_pte_alloc(mm, address);
589 if (!ptep)
590 return VM_FAULT_OOM;
591
David Gibson3935baa2006-03-22 00:08:53 -0800592 /*
593 * Serialize hugepage allocation and instantiation, so that we don't
594 * get spurious allocation failures if two CPUs race to instantiate
595 * the same page in the page cache.
596 */
597 mutex_lock(&hugetlb_instantiation_mutex);
Adam Litke86e52162006-01-06 00:10:43 -0800598 entry = *ptep;
David Gibson3935baa2006-03-22 00:08:53 -0800599 if (pte_none(entry)) {
600 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
601 mutex_unlock(&hugetlb_instantiation_mutex);
602 return ret;
603 }
Adam Litke86e52162006-01-06 00:10:43 -0800604
David Gibson1e8f8892006-01-06 00:10:44 -0800605 ret = VM_FAULT_MINOR;
606
607 spin_lock(&mm->page_table_lock);
608 /* Check for a racing update before calling hugetlb_cow */
609 if (likely(pte_same(entry, *ptep)))
610 if (write_access && !pte_write(entry))
611 ret = hugetlb_cow(mm, vma, address, ptep, entry);
612 spin_unlock(&mm->page_table_lock);
David Gibson3935baa2006-03-22 00:08:53 -0800613 mutex_unlock(&hugetlb_instantiation_mutex);
David Gibson1e8f8892006-01-06 00:10:44 -0800614
615 return ret;
Adam Litke86e52162006-01-06 00:10:43 -0800616}
617
David Gibson63551ae2005-06-21 17:14:44 -0700618int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
619 struct page **pages, struct vm_area_struct **vmas,
620 unsigned long *position, int *length, int i)
621{
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800622 unsigned long pfn_offset;
623 unsigned long vaddr = *position;
David Gibson63551ae2005-06-21 17:14:44 -0700624 int remainder = *length;
625
Hugh Dickins1c598272005-10-19 21:23:43 -0700626 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700627 while (vaddr < vma->vm_end && remainder) {
Adam Litke4c887262005-10-29 18:16:46 -0700628 pte_t *pte;
629 struct page *page;
630
631 /*
632 * Some archs (sparc64, sh*) have multiple pte_ts to
633 * each hugepage. We have to make * sure we get the
634 * first, for the page indexing below to work.
635 */
636 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
637
638 if (!pte || pte_none(*pte)) {
639 int ret;
640
641 spin_unlock(&mm->page_table_lock);
642 ret = hugetlb_fault(mm, vma, vaddr, 0);
643 spin_lock(&mm->page_table_lock);
644 if (ret == VM_FAULT_MINOR)
645 continue;
646
647 remainder = 0;
648 if (!i)
649 i = -EFAULT;
650 break;
651 }
David Gibson63551ae2005-06-21 17:14:44 -0700652
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800653 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
654 page = pte_page(*pte);
655same_page:
Chen, Kenneth Wd6692182006-03-31 02:29:57 -0800656 if (pages) {
657 get_page(page);
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800658 pages[i] = page + pfn_offset;
Chen, Kenneth Wd6692182006-03-31 02:29:57 -0800659 }
David Gibson63551ae2005-06-21 17:14:44 -0700660
661 if (vmas)
662 vmas[i] = vma;
663
664 vaddr += PAGE_SIZE;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800665 ++pfn_offset;
David Gibson63551ae2005-06-21 17:14:44 -0700666 --remainder;
667 ++i;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800668 if (vaddr < vma->vm_end && remainder &&
669 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
670 /*
671 * We use pfn_offset to avoid touching the pageframes
672 * of this compound page.
673 */
674 goto same_page;
675 }
David Gibson63551ae2005-06-21 17:14:44 -0700676 }
Hugh Dickins1c598272005-10-19 21:23:43 -0700677 spin_unlock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700678 *length = remainder;
679 *position = vaddr;
680
681 return i;
682}
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800683
684void hugetlb_change_protection(struct vm_area_struct *vma,
685 unsigned long address, unsigned long end, pgprot_t newprot)
686{
687 struct mm_struct *mm = vma->vm_mm;
688 unsigned long start = address;
689 pte_t *ptep;
690 pte_t pte;
691
692 BUG_ON(address >= end);
693 flush_cache_range(vma, address, end);
694
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800695 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800696 spin_lock(&mm->page_table_lock);
697 for (; address < end; address += HPAGE_SIZE) {
698 ptep = huge_pte_offset(mm, address);
699 if (!ptep)
700 continue;
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800701 if (huge_pmd_unshare(mm, &address, ptep))
702 continue;
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800703 if (!pte_none(*ptep)) {
704 pte = huge_ptep_get_and_clear(mm, address, ptep);
705 pte = pte_mkhuge(pte_modify(pte, newprot));
706 set_huge_pte_at(mm, address, ptep, pte);
707 lazy_mmu_prot_update(pte);
708 }
709 }
710 spin_unlock(&mm->page_table_lock);
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800711 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800712
713 flush_tlb_range(vma, start, end);
714}
715
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700716struct file_region {
717 struct list_head link;
718 long from;
719 long to;
720};
721
722static long region_add(struct list_head *head, long f, long t)
723{
724 struct file_region *rg, *nrg, *trg;
725
726 /* Locate the region we are either in or before. */
727 list_for_each_entry(rg, head, link)
728 if (f <= rg->to)
729 break;
730
731 /* Round our left edge to the current segment if it encloses us. */
732 if (f > rg->from)
733 f = rg->from;
734
735 /* Check for and consume any regions we now overlap with. */
736 nrg = rg;
737 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
738 if (&rg->link == head)
739 break;
740 if (rg->from > t)
741 break;
742
743 /* If this area reaches higher then extend our area to
744 * include it completely. If this is not the first area
745 * which we intend to reuse, free it. */
746 if (rg->to > t)
747 t = rg->to;
748 if (rg != nrg) {
749 list_del(&rg->link);
750 kfree(rg);
751 }
752 }
753 nrg->from = f;
754 nrg->to = t;
755 return 0;
756}
757
758static long region_chg(struct list_head *head, long f, long t)
759{
760 struct file_region *rg, *nrg;
761 long chg = 0;
762
763 /* Locate the region we are before or in. */
764 list_for_each_entry(rg, head, link)
765 if (f <= rg->to)
766 break;
767
768 /* If we are below the current region then a new region is required.
769 * Subtle, allocate a new region at the position but make it zero
770 * size such that we can guarentee to record the reservation. */
771 if (&rg->link == head || t < rg->from) {
772 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
773 if (nrg == 0)
774 return -ENOMEM;
775 nrg->from = f;
776 nrg->to = f;
777 INIT_LIST_HEAD(&nrg->link);
778 list_add(&nrg->link, rg->link.prev);
779
780 return t - f;
781 }
782
783 /* Round our left edge to the current segment if it encloses us. */
784 if (f > rg->from)
785 f = rg->from;
786 chg = t - f;
787
788 /* Check for and consume any regions we now overlap with. */
789 list_for_each_entry(rg, rg->link.prev, link) {
790 if (&rg->link == head)
791 break;
792 if (rg->from > t)
793 return chg;
794
795 /* We overlap with this area, if it extends futher than
796 * us then we must extend ourselves. Account for its
797 * existing reservation. */
798 if (rg->to > t) {
799 chg += rg->to - t;
800 t = rg->to;
801 }
802 chg -= rg->to - rg->from;
803 }
804 return chg;
805}
806
807static long region_truncate(struct list_head *head, long end)
808{
809 struct file_region *rg, *trg;
810 long chg = 0;
811
812 /* Locate the region we are either in or before. */
813 list_for_each_entry(rg, head, link)
814 if (end <= rg->to)
815 break;
816 if (&rg->link == head)
817 return 0;
818
819 /* If we are in the middle of a region then adjust it. */
820 if (end > rg->from) {
821 chg = rg->to - end;
822 rg->to = end;
823 rg = list_entry(rg->link.next, typeof(*rg), link);
824 }
825
826 /* Drop any remaining regions. */
827 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
828 if (&rg->link == head)
829 break;
830 chg += rg->to - rg->from;
831 list_del(&rg->link);
832 kfree(rg);
833 }
834 return chg;
835}
836
837static int hugetlb_acct_memory(long delta)
838{
839 int ret = -ENOMEM;
840
841 spin_lock(&hugetlb_lock);
842 if ((delta + resv_huge_pages) <= free_huge_pages) {
843 resv_huge_pages += delta;
844 ret = 0;
845 }
846 spin_unlock(&hugetlb_lock);
847 return ret;
848}
849
850int hugetlb_reserve_pages(struct inode *inode, long from, long to)
851{
852 long ret, chg;
853
854 chg = region_chg(&inode->i_mapping->private_list, from, to);
855 if (chg < 0)
856 return chg;
Ken Chen8a630112007-05-09 02:33:34 -0700857 /*
858 * When cpuset is configured, it breaks the strict hugetlb page
859 * reservation as the accounting is done on a global variable. Such
860 * reservation is completely rubbish in the presence of cpuset because
861 * the reservation is not checked against page availability for the
862 * current cpuset. Application can still potentially OOM'ed by kernel
863 * with lack of free htlb page in cpuset that the task is in.
864 * Attempt to enforce strict accounting with cpuset is almost
865 * impossible (or too ugly) because cpuset is too fluid that
866 * task or memory node can be dynamically moved between cpusets.
867 *
868 * The change of semantics for shared hugetlb mapping with cpuset is
869 * undesirable. However, in order to preserve some of the semantics,
870 * we fall back to check against current free page availability as
871 * a best attempt and hopefully to minimize the impact of changing
872 * semantics that cpuset has.
873 */
874 if (chg > cpuset_mems_nr(free_huge_pages_node))
875 return -ENOMEM;
876
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700877 ret = hugetlb_acct_memory(chg);
878 if (ret < 0)
879 return ret;
880 region_add(&inode->i_mapping->private_list, from, to);
881 return 0;
882}
883
884void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
885{
886 long chg = region_truncate(&inode->i_mapping->private_list, offset);
887 hugetlb_acct_memory(freed - chg);
888}