blob: de4cf458d6e1979432aad46613d0816146347fc1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/nodemask.h>
David Gibson63551ae2005-06-21 17:14:44 -070013#include <linux/pagemap.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080014#include <linux/mempolicy.h>
Christoph Lameteraea47ff2006-01-08 01:00:57 -080015#include <linux/cpuset.h>
David Gibson3935baa2006-03-22 00:08:53 -080016#include <linux/mutex.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080017
David Gibson63551ae2005-06-21 17:14:44 -070018#include <asm/page.h>
19#include <asm/pgtable.h>
20
21#include <linux/hugetlb.h>
Nick Piggin7835e982006-03-22 00:08:40 -080022#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -070025static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026unsigned long max_huge_pages;
27static struct list_head hugepage_freelists[MAX_NUMNODES];
28static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29static unsigned int free_huge_pages_node[MAX_NUMNODES];
Mel Gorman396faf02007-07-17 04:03:13 -070030static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
31unsigned long hugepages_treat_as_movable;
32
David Gibson3935baa2006-03-22 00:08:53 -080033/*
34 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
35 */
36static DEFINE_SPINLOCK(hugetlb_lock);
Eric Paris0bd0f9f2005-11-21 21:32:28 -080037
David Gibson79ac6ba2006-03-22 00:08:51 -080038static void clear_huge_page(struct page *page, unsigned long addr)
39{
40 int i;
41
42 might_sleep();
43 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
44 cond_resched();
45 clear_user_highpage(page + i, addr);
46 }
47}
48
49static void copy_huge_page(struct page *dst, struct page *src,
Atsushi Nemoto9de455b2006-12-12 17:14:55 +000050 unsigned long addr, struct vm_area_struct *vma)
David Gibson79ac6ba2006-03-22 00:08:51 -080051{
52 int i;
53
54 might_sleep();
55 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
56 cond_resched();
Atsushi Nemoto9de455b2006-12-12 17:14:55 +000057 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
David Gibson79ac6ba2006-03-22 00:08:51 -080058 }
59}
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061static void enqueue_huge_page(struct page *page)
62{
63 int nid = page_to_nid(page);
64 list_add(&page->lru, &hugepage_freelists[nid]);
65 free_huge_pages++;
66 free_huge_pages_node[nid]++;
67}
68
Christoph Lameter5da7ca82006-01-06 00:10:46 -080069static struct page *dequeue_huge_page(struct vm_area_struct *vma,
70 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Nishanth Aravamudan31a5c6e2007-07-15 23:38:02 -070072 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 struct page *page = NULL;
Mel Gorman396faf02007-07-17 04:03:13 -070074 struct zonelist *zonelist = huge_zonelist(vma, address,
75 htlb_alloc_mask);
Christoph Lameter96df9332006-01-06 00:10:45 -080076 struct zone **z;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Christoph Lameter96df9332006-01-06 00:10:45 -080078 for (z = zonelist->zones; *z; z++) {
Christoph Lameter89fa3022006-09-25 23:31:55 -070079 nid = zone_to_nid(*z);
Mel Gorman396faf02007-07-17 04:03:13 -070080 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
Andrew Morton3abf7af2007-07-19 01:49:08 -070081 !list_empty(&hugepage_freelists[nid])) {
82 page = list_entry(hugepage_freelists[nid].next,
83 struct page, lru);
84 list_del(&page->lru);
85 free_huge_pages--;
86 free_huge_pages_node[nid]--;
Ken Chen5ab3ee72007-07-23 18:44:00 -070087 break;
Andrew Morton3abf7af2007-07-19 01:49:08 -070088 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 }
90 return page;
91}
92
David Gibson27a85ef2006-03-22 00:08:56 -080093static void free_huge_page(struct page *page)
94{
95 BUG_ON(page_count(page));
96
97 INIT_LIST_HEAD(&page->lru);
98
99 spin_lock(&hugetlb_lock);
100 enqueue_huge_page(page);
101 spin_unlock(&hugetlb_lock);
102}
103
Nick Piggina4822892006-03-22 00:08:08 -0800104static int alloc_fresh_huge_page(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
Joe Jinf96efd52007-07-15 23:38:12 -0700106 static int prev_nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 struct page *page;
Joe Jinf96efd52007-07-15 23:38:12 -0700108 int nid;
109
Hugh Dickins7ed5cb22007-07-19 01:49:11 -0700110 /*
111 * Copy static prev_nid to local nid, work on that, then copy it
112 * back to prev_nid afterwards: otherwise there's a window in which
113 * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
114 * But we don't need to use a spin_lock here: it really doesn't
115 * matter if occasionally a racer chooses the same nid as we do.
116 */
Joe Jinf96efd52007-07-15 23:38:12 -0700117 nid = next_node(prev_nid, node_online_map);
Paul Jacksonfdb7cc52006-03-22 00:09:10 -0800118 if (nid == MAX_NUMNODES)
119 nid = first_node(node_online_map);
Joe Jinf96efd52007-07-15 23:38:12 -0700120 prev_nid = nid;
Joe Jinf96efd52007-07-15 23:38:12 -0700121
Mel Gorman396faf02007-07-17 04:03:13 -0700122 page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
Joe Jinf96efd52007-07-15 23:38:12 -0700123 HUGETLB_PAGE_ORDER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 if (page) {
Andy Whitcroft33f2ef82006-12-06 20:33:32 -0800125 set_compound_page_dtor(page, free_huge_page);
Eric Paris0bd0f9f2005-11-21 21:32:28 -0800126 spin_lock(&hugetlb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 nr_huge_pages++;
128 nr_huge_pages_node[page_to_nid(page)]++;
Eric Paris0bd0f9f2005-11-21 21:32:28 -0800129 spin_unlock(&hugetlb_lock);
Nick Piggina4822892006-03-22 00:08:08 -0800130 put_page(page); /* free it into the hugepage allocator */
131 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 }
Nick Piggina4822892006-03-22 00:08:08 -0800133 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134}
135
David Gibson27a85ef2006-03-22 00:08:56 -0800136static struct page *alloc_huge_page(struct vm_area_struct *vma,
137 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
139 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 spin_lock(&hugetlb_lock);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700142 if (vma->vm_flags & VM_MAYSHARE)
143 resv_huge_pages--;
144 else if (free_huge_pages <= resv_huge_pages)
145 goto fail;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800146
147 page = dequeue_huge_page(vma, addr);
148 if (!page)
149 goto fail;
150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 spin_unlock(&hugetlb_lock);
Nick Piggin7835e982006-03-22 00:08:40 -0800152 set_page_refcounted(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 return page;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800154
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700155fail:
Ken Chenace4bd22007-05-09 02:33:09 -0700156 if (vma->vm_flags & VM_MAYSHARE)
157 resv_huge_pages++;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800158 spin_unlock(&hugetlb_lock);
159 return NULL;
160}
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162static int __init hugetlb_init(void)
163{
164 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100166 if (HPAGE_SHIFT == 0)
167 return 0;
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 for (i = 0; i < MAX_NUMNODES; ++i)
170 INIT_LIST_HEAD(&hugepage_freelists[i]);
171
172 for (i = 0; i < max_huge_pages; ++i) {
Nick Piggina4822892006-03-22 00:08:08 -0800173 if (!alloc_fresh_huge_page())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 }
176 max_huge_pages = free_huge_pages = nr_huge_pages = i;
177 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
178 return 0;
179}
180module_init(hugetlb_init);
181
182static int __init hugetlb_setup(char *s)
183{
184 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
185 max_huge_pages = 0;
186 return 1;
187}
188__setup("hugepages=", hugetlb_setup);
189
Ken Chen8a630112007-05-09 02:33:34 -0700190static unsigned int cpuset_mems_nr(unsigned int *array)
191{
192 int node;
193 unsigned int nr = 0;
194
195 for_each_node_mask(node, cpuset_current_mems_allowed)
196 nr += array[node];
197
198 return nr;
199}
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201#ifdef CONFIG_SYSCTL
202static void update_and_free_page(struct page *page)
203{
204 int i;
205 nr_huge_pages--;
Christoph Lameter4415cc82006-09-25 23:31:55 -0700206 nr_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
208 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
209 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
210 1 << PG_private | 1<< PG_writeback);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 }
Akinobu Mitaf8af0bb2007-07-19 01:49:12 -0700212 set_compound_page_dtor(page, NULL);
Nick Piggin7835e982006-03-22 00:08:40 -0800213 set_page_refcounted(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 __free_pages(page, HUGETLB_PAGE_ORDER);
215}
216
217#ifdef CONFIG_HIGHMEM
218static void try_to_free_low(unsigned long count)
219{
Christoph Lameter4415cc82006-09-25 23:31:55 -0700220 int i;
221
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 for (i = 0; i < MAX_NUMNODES; ++i) {
223 struct page *page, *next;
224 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
225 if (PageHighMem(page))
226 continue;
227 list_del(&page->lru);
228 update_and_free_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 free_huge_pages--;
Christoph Lameter4415cc82006-09-25 23:31:55 -0700230 free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 if (count >= nr_huge_pages)
232 return;
233 }
234 }
235}
236#else
237static inline void try_to_free_low(unsigned long count)
238{
239}
240#endif
241
242static unsigned long set_max_huge_pages(unsigned long count)
243{
244 while (count > nr_huge_pages) {
Nick Piggina4822892006-03-22 00:08:08 -0800245 if (!alloc_fresh_huge_page())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 return nr_huge_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 }
248 if (count >= nr_huge_pages)
249 return nr_huge_pages;
250
251 spin_lock(&hugetlb_lock);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700252 count = max(count, resv_huge_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 try_to_free_low(count);
254 while (count < nr_huge_pages) {
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800255 struct page *page = dequeue_huge_page(NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 if (!page)
257 break;
258 update_and_free_page(page);
259 }
260 spin_unlock(&hugetlb_lock);
261 return nr_huge_pages;
262}
263
264int hugetlb_sysctl_handler(struct ctl_table *table, int write,
265 struct file *file, void __user *buffer,
266 size_t *length, loff_t *ppos)
267{
268 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
269 max_huge_pages = set_max_huge_pages(max_huge_pages);
270 return 0;
271}
Mel Gorman396faf02007-07-17 04:03:13 -0700272
273int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
274 struct file *file, void __user *buffer,
275 size_t *length, loff_t *ppos)
276{
277 proc_dointvec(table, write, file, buffer, length, ppos);
278 if (hugepages_treat_as_movable)
279 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
280 else
281 htlb_alloc_mask = GFP_HIGHUSER;
282 return 0;
283}
284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285#endif /* CONFIG_SYSCTL */
286
287int hugetlb_report_meminfo(char *buf)
288{
289 return sprintf(buf,
290 "HugePages_Total: %5lu\n"
291 "HugePages_Free: %5lu\n"
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700292 "HugePages_Rsvd: %5lu\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 "Hugepagesize: %5lu kB\n",
294 nr_huge_pages,
295 free_huge_pages,
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700296 resv_huge_pages,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 HPAGE_SIZE/1024);
298}
299
300int hugetlb_report_node_meminfo(int nid, char *buf)
301{
302 return sprintf(buf,
303 "Node %d HugePages_Total: %5u\n"
304 "Node %d HugePages_Free: %5u\n",
305 nid, nr_huge_pages_node[nid],
306 nid, free_huge_pages_node[nid]);
307}
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
310unsigned long hugetlb_total_pages(void)
311{
312 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
313}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
315/*
316 * We cannot handle pagefaults against hugetlb pages at all. They cause
317 * handle_mm_fault() to try to instantiate regular-sized pages in the
318 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
319 * this far.
320 */
Nick Piggind0217ac2007-07-19 01:47:03 -0700321static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322{
323 BUG();
Nick Piggind0217ac2007-07-19 01:47:03 -0700324 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325}
326
327struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggind0217ac2007-07-19 01:47:03 -0700328 .fault = hugetlb_vm_op_fault,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329};
330
David Gibson1e8f8892006-01-06 00:10:44 -0800331static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
332 int writable)
David Gibson63551ae2005-06-21 17:14:44 -0700333{
334 pte_t entry;
335
David Gibson1e8f8892006-01-06 00:10:44 -0800336 if (writable) {
David Gibson63551ae2005-06-21 17:14:44 -0700337 entry =
338 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
339 } else {
340 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
341 }
342 entry = pte_mkyoung(entry);
343 entry = pte_mkhuge(entry);
344
345 return entry;
346}
347
David Gibson1e8f8892006-01-06 00:10:44 -0800348static void set_huge_ptep_writable(struct vm_area_struct *vma,
349 unsigned long address, pte_t *ptep)
350{
351 pte_t entry;
352
353 entry = pte_mkwrite(pte_mkdirty(*ptep));
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -0700354 if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
355 update_mmu_cache(vma, address, entry);
356 lazy_mmu_prot_update(entry);
357 }
David Gibson1e8f8892006-01-06 00:10:44 -0800358}
359
360
David Gibson63551ae2005-06-21 17:14:44 -0700361int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
362 struct vm_area_struct *vma)
363{
364 pte_t *src_pte, *dst_pte, entry;
365 struct page *ptepage;
Hugh Dickins1c598272005-10-19 21:23:43 -0700366 unsigned long addr;
David Gibson1e8f8892006-01-06 00:10:44 -0800367 int cow;
368
369 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson63551ae2005-06-21 17:14:44 -0700370
Hugh Dickins1c598272005-10-19 21:23:43 -0700371 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
Hugh Dickinsc74df322005-10-29 18:16:23 -0700372 src_pte = huge_pte_offset(src, addr);
373 if (!src_pte)
374 continue;
David Gibson63551ae2005-06-21 17:14:44 -0700375 dst_pte = huge_pte_alloc(dst, addr);
376 if (!dst_pte)
377 goto nomem;
Hugh Dickinsc74df322005-10-29 18:16:23 -0700378 spin_lock(&dst->page_table_lock);
Hugh Dickins1c598272005-10-19 21:23:43 -0700379 spin_lock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700380 if (!pte_none(*src_pte)) {
David Gibson1e8f8892006-01-06 00:10:44 -0800381 if (cow)
382 ptep_set_wrprotect(src, addr, src_pte);
Hugh Dickins1c598272005-10-19 21:23:43 -0700383 entry = *src_pte;
384 ptepage = pte_page(entry);
385 get_page(ptepage);
Hugh Dickins1c598272005-10-19 21:23:43 -0700386 set_huge_pte_at(dst, addr, dst_pte, entry);
387 }
388 spin_unlock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700389 spin_unlock(&dst->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700390 }
391 return 0;
392
393nomem:
394 return -ENOMEM;
395}
396
Chen, Kenneth W502717f2006-10-11 01:20:46 -0700397void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
398 unsigned long end)
David Gibson63551ae2005-06-21 17:14:44 -0700399{
400 struct mm_struct *mm = vma->vm_mm;
401 unsigned long address;
David Gibsonc7546f82005-08-05 11:59:35 -0700402 pte_t *ptep;
David Gibson63551ae2005-06-21 17:14:44 -0700403 pte_t pte;
404 struct page *page;
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700405 struct page *tmp;
Chen, Kenneth Wc0a499c2006-12-06 20:31:39 -0800406 /*
407 * A page gathering list, protected by per file i_mmap_lock. The
408 * lock is used to avoid list corruption from multiple unmapping
409 * of the same page since we are using page->lru.
410 */
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700411 LIST_HEAD(page_list);
David Gibson63551ae2005-06-21 17:14:44 -0700412
413 WARN_ON(!is_vm_hugetlb_page(vma));
414 BUG_ON(start & ~HPAGE_MASK);
415 BUG_ON(end & ~HPAGE_MASK);
416
Hugh Dickins508034a2005-10-29 18:16:30 -0700417 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700418 for (address = start; address < end; address += HPAGE_SIZE) {
David Gibsonc7546f82005-08-05 11:59:35 -0700419 ptep = huge_pte_offset(mm, address);
Adam Litke4c887262005-10-29 18:16:46 -0700420 if (!ptep)
David Gibsonc7546f82005-08-05 11:59:35 -0700421 continue;
422
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800423 if (huge_pmd_unshare(mm, &address, ptep))
424 continue;
425
David Gibsonc7546f82005-08-05 11:59:35 -0700426 pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson63551ae2005-06-21 17:14:44 -0700427 if (pte_none(pte))
428 continue;
David Gibsonc7546f82005-08-05 11:59:35 -0700429
David Gibson63551ae2005-06-21 17:14:44 -0700430 page = pte_page(pte);
Ken Chen6649a382007-02-08 14:20:27 -0800431 if (pte_dirty(pte))
432 set_page_dirty(page);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700433 list_add(&page->lru, &page_list);
David Gibson63551ae2005-06-21 17:14:44 -0700434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 spin_unlock(&mm->page_table_lock);
Hugh Dickins508034a2005-10-29 18:16:30 -0700436 flush_tlb_range(vma, start, end);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700437 list_for_each_entry_safe(page, tmp, &page_list, lru) {
438 list_del(&page->lru);
439 put_page(page);
440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441}
David Gibson63551ae2005-06-21 17:14:44 -0700442
Chen, Kenneth W502717f2006-10-11 01:20:46 -0700443void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
444 unsigned long end)
445{
446 /*
447 * It is undesirable to test vma->vm_file as it should be non-null
448 * for valid hugetlb area. However, vm_file will be NULL in the error
449 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
450 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
451 * to clean up. Since no pte has actually been setup, it is safe to
452 * do nothing in this case.
453 */
454 if (vma->vm_file) {
455 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
456 __unmap_hugepage_range(vma, start, end);
457 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
458 }
459}
460
David Gibson1e8f8892006-01-06 00:10:44 -0800461static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
462 unsigned long address, pte_t *ptep, pte_t pte)
463{
464 struct page *old_page, *new_page;
David Gibson79ac6ba2006-03-22 00:08:51 -0800465 int avoidcopy;
David Gibson1e8f8892006-01-06 00:10:44 -0800466
467 old_page = pte_page(pte);
468
469 /* If no-one else is actually using this page, avoid the copy
470 * and just make the page writable */
471 avoidcopy = (page_count(old_page) == 1);
472 if (avoidcopy) {
473 set_huge_ptep_writable(vma, address, ptep);
Nick Piggin83c54072007-07-19 01:47:05 -0700474 return 0;
David Gibson1e8f8892006-01-06 00:10:44 -0800475 }
476
477 page_cache_get(old_page);
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800478 new_page = alloc_huge_page(vma, address);
David Gibson1e8f8892006-01-06 00:10:44 -0800479
480 if (!new_page) {
481 page_cache_release(old_page);
Christoph Lameter0df420d2006-02-07 12:58:30 -0800482 return VM_FAULT_OOM;
David Gibson1e8f8892006-01-06 00:10:44 -0800483 }
484
485 spin_unlock(&mm->page_table_lock);
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000486 copy_huge_page(new_page, old_page, address, vma);
David Gibson1e8f8892006-01-06 00:10:44 -0800487 spin_lock(&mm->page_table_lock);
488
489 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
490 if (likely(pte_same(*ptep, pte))) {
491 /* Break COW */
492 set_huge_pte_at(mm, address, ptep,
493 make_huge_pte(vma, new_page, 1));
494 /* Make the old page be freed below */
495 new_page = old_page;
496 }
497 page_cache_release(new_page);
498 page_cache_release(old_page);
Nick Piggin83c54072007-07-19 01:47:05 -0700499 return 0;
David Gibson1e8f8892006-01-06 00:10:44 -0800500}
501
Robert P. J. Daya1ed3dd2007-07-17 04:03:33 -0700502static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
David Gibson1e8f8892006-01-06 00:10:44 -0800503 unsigned long address, pte_t *ptep, int write_access)
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100504{
505 int ret = VM_FAULT_SIGBUS;
Adam Litke4c887262005-10-29 18:16:46 -0700506 unsigned long idx;
507 unsigned long size;
Adam Litke4c887262005-10-29 18:16:46 -0700508 struct page *page;
509 struct address_space *mapping;
David Gibson1e8f8892006-01-06 00:10:44 -0800510 pte_t new_pte;
Adam Litke4c887262005-10-29 18:16:46 -0700511
Adam Litke4c887262005-10-29 18:16:46 -0700512 mapping = vma->vm_file->f_mapping;
513 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
514 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
515
516 /*
517 * Use page lock to guard against racing truncation
518 * before we get page_table_lock.
519 */
Christoph Lameter6bda6662006-01-06 00:10:49 -0800520retry:
521 page = find_lock_page(mapping, idx);
522 if (!page) {
Hugh Dickinsebed4bf2006-10-28 10:38:43 -0700523 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
524 if (idx >= size)
525 goto out;
Christoph Lameter6bda6662006-01-06 00:10:49 -0800526 if (hugetlb_get_quota(mapping))
527 goto out;
528 page = alloc_huge_page(vma, address);
529 if (!page) {
530 hugetlb_put_quota(mapping);
Christoph Lameter0df420d2006-02-07 12:58:30 -0800531 ret = VM_FAULT_OOM;
Christoph Lameter6bda6662006-01-06 00:10:49 -0800532 goto out;
533 }
David Gibson79ac6ba2006-03-22 00:08:51 -0800534 clear_huge_page(page, address);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100535
Christoph Lameter6bda6662006-01-06 00:10:49 -0800536 if (vma->vm_flags & VM_SHARED) {
537 int err;
538
539 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
540 if (err) {
541 put_page(page);
542 hugetlb_put_quota(mapping);
543 if (err == -EEXIST)
544 goto retry;
545 goto out;
546 }
547 } else
548 lock_page(page);
549 }
David Gibson1e8f8892006-01-06 00:10:44 -0800550
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100551 spin_lock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700552 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
553 if (idx >= size)
554 goto backout;
555
Nick Piggin83c54072007-07-19 01:47:05 -0700556 ret = 0;
Adam Litke86e52162006-01-06 00:10:43 -0800557 if (!pte_none(*ptep))
Adam Litke4c887262005-10-29 18:16:46 -0700558 goto backout;
559
David Gibson1e8f8892006-01-06 00:10:44 -0800560 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
561 && (vma->vm_flags & VM_SHARED)));
562 set_huge_pte_at(mm, address, ptep, new_pte);
563
564 if (write_access && !(vma->vm_flags & VM_SHARED)) {
565 /* Optimization, do the COW without a second fault */
566 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
567 }
568
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100569 spin_unlock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700570 unlock_page(page);
571out:
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100572 return ret;
Adam Litke4c887262005-10-29 18:16:46 -0700573
574backout:
575 spin_unlock(&mm->page_table_lock);
576 hugetlb_put_quota(mapping);
577 unlock_page(page);
578 put_page(page);
579 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100580}
581
Adam Litke86e52162006-01-06 00:10:43 -0800582int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
583 unsigned long address, int write_access)
584{
585 pte_t *ptep;
586 pte_t entry;
David Gibson1e8f8892006-01-06 00:10:44 -0800587 int ret;
David Gibson3935baa2006-03-22 00:08:53 -0800588 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
Adam Litke86e52162006-01-06 00:10:43 -0800589
590 ptep = huge_pte_alloc(mm, address);
591 if (!ptep)
592 return VM_FAULT_OOM;
593
David Gibson3935baa2006-03-22 00:08:53 -0800594 /*
595 * Serialize hugepage allocation and instantiation, so that we don't
596 * get spurious allocation failures if two CPUs race to instantiate
597 * the same page in the page cache.
598 */
599 mutex_lock(&hugetlb_instantiation_mutex);
Adam Litke86e52162006-01-06 00:10:43 -0800600 entry = *ptep;
David Gibson3935baa2006-03-22 00:08:53 -0800601 if (pte_none(entry)) {
602 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
603 mutex_unlock(&hugetlb_instantiation_mutex);
604 return ret;
605 }
Adam Litke86e52162006-01-06 00:10:43 -0800606
Nick Piggin83c54072007-07-19 01:47:05 -0700607 ret = 0;
David Gibson1e8f8892006-01-06 00:10:44 -0800608
609 spin_lock(&mm->page_table_lock);
610 /* Check for a racing update before calling hugetlb_cow */
611 if (likely(pte_same(entry, *ptep)))
612 if (write_access && !pte_write(entry))
613 ret = hugetlb_cow(mm, vma, address, ptep, entry);
614 spin_unlock(&mm->page_table_lock);
David Gibson3935baa2006-03-22 00:08:53 -0800615 mutex_unlock(&hugetlb_instantiation_mutex);
David Gibson1e8f8892006-01-06 00:10:44 -0800616
617 return ret;
Adam Litke86e52162006-01-06 00:10:43 -0800618}
619
David Gibson63551ae2005-06-21 17:14:44 -0700620int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
621 struct page **pages, struct vm_area_struct **vmas,
622 unsigned long *position, int *length, int i)
623{
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800624 unsigned long pfn_offset;
625 unsigned long vaddr = *position;
David Gibson63551ae2005-06-21 17:14:44 -0700626 int remainder = *length;
627
Hugh Dickins1c598272005-10-19 21:23:43 -0700628 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700629 while (vaddr < vma->vm_end && remainder) {
Adam Litke4c887262005-10-29 18:16:46 -0700630 pte_t *pte;
631 struct page *page;
632
633 /*
634 * Some archs (sparc64, sh*) have multiple pte_ts to
635 * each hugepage. We have to make * sure we get the
636 * first, for the page indexing below to work.
637 */
638 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
639
640 if (!pte || pte_none(*pte)) {
641 int ret;
642
643 spin_unlock(&mm->page_table_lock);
644 ret = hugetlb_fault(mm, vma, vaddr, 0);
645 spin_lock(&mm->page_table_lock);
Adam Litkea89182c2007-08-22 14:01:51 -0700646 if (!(ret & VM_FAULT_ERROR))
Adam Litke4c887262005-10-29 18:16:46 -0700647 continue;
648
649 remainder = 0;
650 if (!i)
651 i = -EFAULT;
652 break;
653 }
David Gibson63551ae2005-06-21 17:14:44 -0700654
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800655 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
656 page = pte_page(*pte);
657same_page:
Chen, Kenneth Wd6692182006-03-31 02:29:57 -0800658 if (pages) {
659 get_page(page);
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800660 pages[i] = page + pfn_offset;
Chen, Kenneth Wd6692182006-03-31 02:29:57 -0800661 }
David Gibson63551ae2005-06-21 17:14:44 -0700662
663 if (vmas)
664 vmas[i] = vma;
665
666 vaddr += PAGE_SIZE;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800667 ++pfn_offset;
David Gibson63551ae2005-06-21 17:14:44 -0700668 --remainder;
669 ++i;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800670 if (vaddr < vma->vm_end && remainder &&
671 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
672 /*
673 * We use pfn_offset to avoid touching the pageframes
674 * of this compound page.
675 */
676 goto same_page;
677 }
David Gibson63551ae2005-06-21 17:14:44 -0700678 }
Hugh Dickins1c598272005-10-19 21:23:43 -0700679 spin_unlock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700680 *length = remainder;
681 *position = vaddr;
682
683 return i;
684}
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800685
686void hugetlb_change_protection(struct vm_area_struct *vma,
687 unsigned long address, unsigned long end, pgprot_t newprot)
688{
689 struct mm_struct *mm = vma->vm_mm;
690 unsigned long start = address;
691 pte_t *ptep;
692 pte_t pte;
693
694 BUG_ON(address >= end);
695 flush_cache_range(vma, address, end);
696
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800697 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800698 spin_lock(&mm->page_table_lock);
699 for (; address < end; address += HPAGE_SIZE) {
700 ptep = huge_pte_offset(mm, address);
701 if (!ptep)
702 continue;
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800703 if (huge_pmd_unshare(mm, &address, ptep))
704 continue;
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800705 if (!pte_none(*ptep)) {
706 pte = huge_ptep_get_and_clear(mm, address, ptep);
707 pte = pte_mkhuge(pte_modify(pte, newprot));
708 set_huge_pte_at(mm, address, ptep, pte);
709 lazy_mmu_prot_update(pte);
710 }
711 }
712 spin_unlock(&mm->page_table_lock);
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800713 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800714
715 flush_tlb_range(vma, start, end);
716}
717
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700718struct file_region {
719 struct list_head link;
720 long from;
721 long to;
722};
723
724static long region_add(struct list_head *head, long f, long t)
725{
726 struct file_region *rg, *nrg, *trg;
727
728 /* Locate the region we are either in or before. */
729 list_for_each_entry(rg, head, link)
730 if (f <= rg->to)
731 break;
732
733 /* Round our left edge to the current segment if it encloses us. */
734 if (f > rg->from)
735 f = rg->from;
736
737 /* Check for and consume any regions we now overlap with. */
738 nrg = rg;
739 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
740 if (&rg->link == head)
741 break;
742 if (rg->from > t)
743 break;
744
745 /* If this area reaches higher then extend our area to
746 * include it completely. If this is not the first area
747 * which we intend to reuse, free it. */
748 if (rg->to > t)
749 t = rg->to;
750 if (rg != nrg) {
751 list_del(&rg->link);
752 kfree(rg);
753 }
754 }
755 nrg->from = f;
756 nrg->to = t;
757 return 0;
758}
759
760static long region_chg(struct list_head *head, long f, long t)
761{
762 struct file_region *rg, *nrg;
763 long chg = 0;
764
765 /* Locate the region we are before or in. */
766 list_for_each_entry(rg, head, link)
767 if (f <= rg->to)
768 break;
769
770 /* If we are below the current region then a new region is required.
771 * Subtle, allocate a new region at the position but make it zero
772 * size such that we can guarentee to record the reservation. */
773 if (&rg->link == head || t < rg->from) {
774 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
775 if (nrg == 0)
776 return -ENOMEM;
777 nrg->from = f;
778 nrg->to = f;
779 INIT_LIST_HEAD(&nrg->link);
780 list_add(&nrg->link, rg->link.prev);
781
782 return t - f;
783 }
784
785 /* Round our left edge to the current segment if it encloses us. */
786 if (f > rg->from)
787 f = rg->from;
788 chg = t - f;
789
790 /* Check for and consume any regions we now overlap with. */
791 list_for_each_entry(rg, rg->link.prev, link) {
792 if (&rg->link == head)
793 break;
794 if (rg->from > t)
795 return chg;
796
797 /* We overlap with this area, if it extends futher than
798 * us then we must extend ourselves. Account for its
799 * existing reservation. */
800 if (rg->to > t) {
801 chg += rg->to - t;
802 t = rg->to;
803 }
804 chg -= rg->to - rg->from;
805 }
806 return chg;
807}
808
809static long region_truncate(struct list_head *head, long end)
810{
811 struct file_region *rg, *trg;
812 long chg = 0;
813
814 /* Locate the region we are either in or before. */
815 list_for_each_entry(rg, head, link)
816 if (end <= rg->to)
817 break;
818 if (&rg->link == head)
819 return 0;
820
821 /* If we are in the middle of a region then adjust it. */
822 if (end > rg->from) {
823 chg = rg->to - end;
824 rg->to = end;
825 rg = list_entry(rg->link.next, typeof(*rg), link);
826 }
827
828 /* Drop any remaining regions. */
829 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
830 if (&rg->link == head)
831 break;
832 chg += rg->to - rg->from;
833 list_del(&rg->link);
834 kfree(rg);
835 }
836 return chg;
837}
838
839static int hugetlb_acct_memory(long delta)
840{
841 int ret = -ENOMEM;
842
843 spin_lock(&hugetlb_lock);
844 if ((delta + resv_huge_pages) <= free_huge_pages) {
845 resv_huge_pages += delta;
846 ret = 0;
847 }
848 spin_unlock(&hugetlb_lock);
849 return ret;
850}
851
852int hugetlb_reserve_pages(struct inode *inode, long from, long to)
853{
854 long ret, chg;
855
856 chg = region_chg(&inode->i_mapping->private_list, from, to);
857 if (chg < 0)
858 return chg;
Ken Chen8a630112007-05-09 02:33:34 -0700859 /*
860 * When cpuset is configured, it breaks the strict hugetlb page
861 * reservation as the accounting is done on a global variable. Such
862 * reservation is completely rubbish in the presence of cpuset because
863 * the reservation is not checked against page availability for the
864 * current cpuset. Application can still potentially OOM'ed by kernel
865 * with lack of free htlb page in cpuset that the task is in.
866 * Attempt to enforce strict accounting with cpuset is almost
867 * impossible (or too ugly) because cpuset is too fluid that
868 * task or memory node can be dynamically moved between cpusets.
869 *
870 * The change of semantics for shared hugetlb mapping with cpuset is
871 * undesirable. However, in order to preserve some of the semantics,
872 * we fall back to check against current free page availability as
873 * a best attempt and hopefully to minimize the impact of changing
874 * semantics that cpuset has.
875 */
876 if (chg > cpuset_mems_nr(free_huge_pages_node))
877 return -ENOMEM;
878
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700879 ret = hugetlb_acct_memory(chg);
880 if (ret < 0)
881 return ret;
882 region_add(&inode->i_mapping->private_list, from, to);
883 return 0;
884}
885
886void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
887{
888 long chg = region_truncate(&inode->i_mapping->private_list, offset);
889 hugetlb_acct_memory(freed - chg);
890}