blob: eab8c428cc932028e202b4a8b8d547642fd10fa6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/nodemask.h>
David Gibson63551ae2005-06-21 17:14:44 -070013#include <linux/pagemap.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080014#include <linux/mempolicy.h>
Christoph Lameteraea47ff2006-01-08 01:00:57 -080015#include <linux/cpuset.h>
David Gibson3935baa2006-03-22 00:08:53 -080016#include <linux/mutex.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080017
David Gibson63551ae2005-06-21 17:14:44 -070018#include <asm/page.h>
19#include <asm/pgtable.h>
20
21#include <linux/hugetlb.h>
Nick Piggin7835e982006-03-22 00:08:40 -080022#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -070025static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026unsigned long max_huge_pages;
27static struct list_head hugepage_freelists[MAX_NUMNODES];
28static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29static unsigned int free_huge_pages_node[MAX_NUMNODES];
Mel Gorman396faf02007-07-17 04:03:13 -070030static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
31unsigned long hugepages_treat_as_movable;
32
David Gibson3935baa2006-03-22 00:08:53 -080033/*
34 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
35 */
36static DEFINE_SPINLOCK(hugetlb_lock);
Eric Paris0bd0f9f2005-11-21 21:32:28 -080037
David Gibson79ac6ba2006-03-22 00:08:51 -080038static void clear_huge_page(struct page *page, unsigned long addr)
39{
40 int i;
41
42 might_sleep();
43 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
44 cond_resched();
Ralf Baechle281e0e32007-10-01 01:20:10 -070045 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
David Gibson79ac6ba2006-03-22 00:08:51 -080046 }
47}
48
49static void copy_huge_page(struct page *dst, struct page *src,
Atsushi Nemoto9de455b2006-12-12 17:14:55 +000050 unsigned long addr, struct vm_area_struct *vma)
David Gibson79ac6ba2006-03-22 00:08:51 -080051{
52 int i;
53
54 might_sleep();
55 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
56 cond_resched();
Atsushi Nemoto9de455b2006-12-12 17:14:55 +000057 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
David Gibson79ac6ba2006-03-22 00:08:51 -080058 }
59}
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061static void enqueue_huge_page(struct page *page)
62{
63 int nid = page_to_nid(page);
64 list_add(&page->lru, &hugepage_freelists[nid]);
65 free_huge_pages++;
66 free_huge_pages_node[nid]++;
67}
68
Christoph Lameter5da7ca82006-01-06 00:10:46 -080069static struct page *dequeue_huge_page(struct vm_area_struct *vma,
70 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Nishanth Aravamudan31a5c6e2007-07-15 23:38:02 -070072 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 struct page *page = NULL;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -070074 struct mempolicy *mpol;
Mel Gorman396faf02007-07-17 04:03:13 -070075 struct zonelist *zonelist = huge_zonelist(vma, address,
Lee Schermerhorn480eccf2007-09-18 22:46:47 -070076 htlb_alloc_mask, &mpol);
Christoph Lameter96df9332006-01-06 00:10:45 -080077 struct zone **z;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Christoph Lameter96df9332006-01-06 00:10:45 -080079 for (z = zonelist->zones; *z; z++) {
Christoph Lameter89fa3022006-09-25 23:31:55 -070080 nid = zone_to_nid(*z);
Mel Gorman396faf02007-07-17 04:03:13 -070081 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
Andrew Morton3abf7af2007-07-19 01:49:08 -070082 !list_empty(&hugepage_freelists[nid])) {
83 page = list_entry(hugepage_freelists[nid].next,
84 struct page, lru);
85 list_del(&page->lru);
86 free_huge_pages--;
87 free_huge_pages_node[nid]--;
Ken Chen5ab3ee72007-07-23 18:44:00 -070088 break;
Andrew Morton3abf7af2007-07-19 01:49:08 -070089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 }
Lee Schermerhorn480eccf2007-09-18 22:46:47 -070091 mpol_free(mpol); /* unref if mpol !NULL */
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 return page;
93}
94
David Gibson27a85ef2006-03-22 00:08:56 -080095static void free_huge_page(struct page *page)
96{
97 BUG_ON(page_count(page));
98
99 INIT_LIST_HEAD(&page->lru);
100
101 spin_lock(&hugetlb_lock);
102 enqueue_huge_page(page);
103 spin_unlock(&hugetlb_lock);
104}
105
Nick Piggina4822892006-03-22 00:08:08 -0800106static int alloc_fresh_huge_page(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107{
Joe Jinf96efd52007-07-15 23:38:12 -0700108 static int prev_nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 struct page *page;
Joe Jinf96efd52007-07-15 23:38:12 -0700110 int nid;
111
Hugh Dickins7ed5cb22007-07-19 01:49:11 -0700112 /*
113 * Copy static prev_nid to local nid, work on that, then copy it
114 * back to prev_nid afterwards: otherwise there's a window in which
115 * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
116 * But we don't need to use a spin_lock here: it really doesn't
117 * matter if occasionally a racer chooses the same nid as we do.
118 */
Joe Jinf96efd52007-07-15 23:38:12 -0700119 nid = next_node(prev_nid, node_online_map);
Paul Jacksonfdb7cc52006-03-22 00:09:10 -0800120 if (nid == MAX_NUMNODES)
121 nid = first_node(node_online_map);
Joe Jinf96efd52007-07-15 23:38:12 -0700122 prev_nid = nid;
Joe Jinf96efd52007-07-15 23:38:12 -0700123
Mel Gorman396faf02007-07-17 04:03:13 -0700124 page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
Joe Jinf96efd52007-07-15 23:38:12 -0700125 HUGETLB_PAGE_ORDER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 if (page) {
Andy Whitcroft33f2ef82006-12-06 20:33:32 -0800127 set_compound_page_dtor(page, free_huge_page);
Eric Paris0bd0f9f2005-11-21 21:32:28 -0800128 spin_lock(&hugetlb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 nr_huge_pages++;
130 nr_huge_pages_node[page_to_nid(page)]++;
Eric Paris0bd0f9f2005-11-21 21:32:28 -0800131 spin_unlock(&hugetlb_lock);
Nick Piggina4822892006-03-22 00:08:08 -0800132 put_page(page); /* free it into the hugepage allocator */
133 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 }
Nick Piggina4822892006-03-22 00:08:08 -0800135 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136}
137
David Gibson27a85ef2006-03-22 00:08:56 -0800138static struct page *alloc_huge_page(struct vm_area_struct *vma,
139 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140{
141 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143 spin_lock(&hugetlb_lock);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700144 if (vma->vm_flags & VM_MAYSHARE)
145 resv_huge_pages--;
146 else if (free_huge_pages <= resv_huge_pages)
147 goto fail;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800148
149 page = dequeue_huge_page(vma, addr);
150 if (!page)
151 goto fail;
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 spin_unlock(&hugetlb_lock);
Nick Piggin7835e982006-03-22 00:08:40 -0800154 set_page_refcounted(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 return page;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800156
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700157fail:
Ken Chenace4bd22007-05-09 02:33:09 -0700158 if (vma->vm_flags & VM_MAYSHARE)
159 resv_huge_pages++;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800160 spin_unlock(&hugetlb_lock);
161 return NULL;
162}
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164static int __init hugetlb_init(void)
165{
166 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100168 if (HPAGE_SHIFT == 0)
169 return 0;
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 for (i = 0; i < MAX_NUMNODES; ++i)
172 INIT_LIST_HEAD(&hugepage_freelists[i]);
173
174 for (i = 0; i < max_huge_pages; ++i) {
Nick Piggina4822892006-03-22 00:08:08 -0800175 if (!alloc_fresh_huge_page())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 }
178 max_huge_pages = free_huge_pages = nr_huge_pages = i;
179 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
180 return 0;
181}
182module_init(hugetlb_init);
183
184static int __init hugetlb_setup(char *s)
185{
186 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
187 max_huge_pages = 0;
188 return 1;
189}
190__setup("hugepages=", hugetlb_setup);
191
Ken Chen8a630112007-05-09 02:33:34 -0700192static unsigned int cpuset_mems_nr(unsigned int *array)
193{
194 int node;
195 unsigned int nr = 0;
196
197 for_each_node_mask(node, cpuset_current_mems_allowed)
198 nr += array[node];
199
200 return nr;
201}
202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203#ifdef CONFIG_SYSCTL
204static void update_and_free_page(struct page *page)
205{
206 int i;
207 nr_huge_pages--;
Christoph Lameter4415cc82006-09-25 23:31:55 -0700208 nr_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
210 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
211 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
212 1 << PG_private | 1<< PG_writeback);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 }
Akinobu Mitaf8af0bb2007-07-19 01:49:12 -0700214 set_compound_page_dtor(page, NULL);
Nick Piggin7835e982006-03-22 00:08:40 -0800215 set_page_refcounted(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 __free_pages(page, HUGETLB_PAGE_ORDER);
217}
218
219#ifdef CONFIG_HIGHMEM
220static void try_to_free_low(unsigned long count)
221{
Christoph Lameter4415cc82006-09-25 23:31:55 -0700222 int i;
223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 for (i = 0; i < MAX_NUMNODES; ++i) {
225 struct page *page, *next;
226 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
227 if (PageHighMem(page))
228 continue;
229 list_del(&page->lru);
230 update_and_free_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 free_huge_pages--;
Christoph Lameter4415cc82006-09-25 23:31:55 -0700232 free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 if (count >= nr_huge_pages)
234 return;
235 }
236 }
237}
238#else
239static inline void try_to_free_low(unsigned long count)
240{
241}
242#endif
243
244static unsigned long set_max_huge_pages(unsigned long count)
245{
246 while (count > nr_huge_pages) {
Nick Piggina4822892006-03-22 00:08:08 -0800247 if (!alloc_fresh_huge_page())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 return nr_huge_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 }
250 if (count >= nr_huge_pages)
251 return nr_huge_pages;
252
253 spin_lock(&hugetlb_lock);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700254 count = max(count, resv_huge_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 try_to_free_low(count);
256 while (count < nr_huge_pages) {
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800257 struct page *page = dequeue_huge_page(NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 if (!page)
259 break;
260 update_and_free_page(page);
261 }
262 spin_unlock(&hugetlb_lock);
263 return nr_huge_pages;
264}
265
266int hugetlb_sysctl_handler(struct ctl_table *table, int write,
267 struct file *file, void __user *buffer,
268 size_t *length, loff_t *ppos)
269{
270 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
271 max_huge_pages = set_max_huge_pages(max_huge_pages);
272 return 0;
273}
Mel Gorman396faf02007-07-17 04:03:13 -0700274
275int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
276 struct file *file, void __user *buffer,
277 size_t *length, loff_t *ppos)
278{
279 proc_dointvec(table, write, file, buffer, length, ppos);
280 if (hugepages_treat_as_movable)
281 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
282 else
283 htlb_alloc_mask = GFP_HIGHUSER;
284 return 0;
285}
286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287#endif /* CONFIG_SYSCTL */
288
289int hugetlb_report_meminfo(char *buf)
290{
291 return sprintf(buf,
292 "HugePages_Total: %5lu\n"
293 "HugePages_Free: %5lu\n"
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700294 "HugePages_Rsvd: %5lu\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 "Hugepagesize: %5lu kB\n",
296 nr_huge_pages,
297 free_huge_pages,
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700298 resv_huge_pages,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 HPAGE_SIZE/1024);
300}
301
302int hugetlb_report_node_meminfo(int nid, char *buf)
303{
304 return sprintf(buf,
305 "Node %d HugePages_Total: %5u\n"
306 "Node %d HugePages_Free: %5u\n",
307 nid, nr_huge_pages_node[nid],
308 nid, free_huge_pages_node[nid]);
309}
310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
312unsigned long hugetlb_total_pages(void)
313{
314 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
315}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
317/*
318 * We cannot handle pagefaults against hugetlb pages at all. They cause
319 * handle_mm_fault() to try to instantiate regular-sized pages in the
320 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
321 * this far.
322 */
Nick Piggind0217ac2007-07-19 01:47:03 -0700323static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324{
325 BUG();
Nick Piggind0217ac2007-07-19 01:47:03 -0700326 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
329struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggind0217ac2007-07-19 01:47:03 -0700330 .fault = hugetlb_vm_op_fault,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331};
332
David Gibson1e8f8892006-01-06 00:10:44 -0800333static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
334 int writable)
David Gibson63551ae2005-06-21 17:14:44 -0700335{
336 pte_t entry;
337
David Gibson1e8f8892006-01-06 00:10:44 -0800338 if (writable) {
David Gibson63551ae2005-06-21 17:14:44 -0700339 entry =
340 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
341 } else {
342 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
343 }
344 entry = pte_mkyoung(entry);
345 entry = pte_mkhuge(entry);
346
347 return entry;
348}
349
David Gibson1e8f8892006-01-06 00:10:44 -0800350static void set_huge_ptep_writable(struct vm_area_struct *vma,
351 unsigned long address, pte_t *ptep)
352{
353 pte_t entry;
354
355 entry = pte_mkwrite(pte_mkdirty(*ptep));
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -0700356 if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
357 update_mmu_cache(vma, address, entry);
358 lazy_mmu_prot_update(entry);
359 }
David Gibson1e8f8892006-01-06 00:10:44 -0800360}
361
362
David Gibson63551ae2005-06-21 17:14:44 -0700363int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
364 struct vm_area_struct *vma)
365{
366 pte_t *src_pte, *dst_pte, entry;
367 struct page *ptepage;
Hugh Dickins1c598272005-10-19 21:23:43 -0700368 unsigned long addr;
David Gibson1e8f8892006-01-06 00:10:44 -0800369 int cow;
370
371 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson63551ae2005-06-21 17:14:44 -0700372
Hugh Dickins1c598272005-10-19 21:23:43 -0700373 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
Hugh Dickinsc74df322005-10-29 18:16:23 -0700374 src_pte = huge_pte_offset(src, addr);
375 if (!src_pte)
376 continue;
David Gibson63551ae2005-06-21 17:14:44 -0700377 dst_pte = huge_pte_alloc(dst, addr);
378 if (!dst_pte)
379 goto nomem;
Hugh Dickinsc74df322005-10-29 18:16:23 -0700380 spin_lock(&dst->page_table_lock);
Hugh Dickins1c598272005-10-19 21:23:43 -0700381 spin_lock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700382 if (!pte_none(*src_pte)) {
David Gibson1e8f8892006-01-06 00:10:44 -0800383 if (cow)
384 ptep_set_wrprotect(src, addr, src_pte);
Hugh Dickins1c598272005-10-19 21:23:43 -0700385 entry = *src_pte;
386 ptepage = pte_page(entry);
387 get_page(ptepage);
Hugh Dickins1c598272005-10-19 21:23:43 -0700388 set_huge_pte_at(dst, addr, dst_pte, entry);
389 }
390 spin_unlock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700391 spin_unlock(&dst->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700392 }
393 return 0;
394
395nomem:
396 return -ENOMEM;
397}
398
Chen, Kenneth W502717f2006-10-11 01:20:46 -0700399void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
400 unsigned long end)
David Gibson63551ae2005-06-21 17:14:44 -0700401{
402 struct mm_struct *mm = vma->vm_mm;
403 unsigned long address;
David Gibsonc7546f82005-08-05 11:59:35 -0700404 pte_t *ptep;
David Gibson63551ae2005-06-21 17:14:44 -0700405 pte_t pte;
406 struct page *page;
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700407 struct page *tmp;
Chen, Kenneth Wc0a499c2006-12-06 20:31:39 -0800408 /*
409 * A page gathering list, protected by per file i_mmap_lock. The
410 * lock is used to avoid list corruption from multiple unmapping
411 * of the same page since we are using page->lru.
412 */
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700413 LIST_HEAD(page_list);
David Gibson63551ae2005-06-21 17:14:44 -0700414
415 WARN_ON(!is_vm_hugetlb_page(vma));
416 BUG_ON(start & ~HPAGE_MASK);
417 BUG_ON(end & ~HPAGE_MASK);
418
Hugh Dickins508034a2005-10-29 18:16:30 -0700419 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700420 for (address = start; address < end; address += HPAGE_SIZE) {
David Gibsonc7546f82005-08-05 11:59:35 -0700421 ptep = huge_pte_offset(mm, address);
Adam Litke4c887262005-10-29 18:16:46 -0700422 if (!ptep)
David Gibsonc7546f82005-08-05 11:59:35 -0700423 continue;
424
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800425 if (huge_pmd_unshare(mm, &address, ptep))
426 continue;
427
David Gibsonc7546f82005-08-05 11:59:35 -0700428 pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson63551ae2005-06-21 17:14:44 -0700429 if (pte_none(pte))
430 continue;
David Gibsonc7546f82005-08-05 11:59:35 -0700431
David Gibson63551ae2005-06-21 17:14:44 -0700432 page = pte_page(pte);
Ken Chen6649a382007-02-08 14:20:27 -0800433 if (pte_dirty(pte))
434 set_page_dirty(page);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700435 list_add(&page->lru, &page_list);
David Gibson63551ae2005-06-21 17:14:44 -0700436 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 spin_unlock(&mm->page_table_lock);
Hugh Dickins508034a2005-10-29 18:16:30 -0700438 flush_tlb_range(vma, start, end);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700439 list_for_each_entry_safe(page, tmp, &page_list, lru) {
440 list_del(&page->lru);
441 put_page(page);
442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443}
David Gibson63551ae2005-06-21 17:14:44 -0700444
Chen, Kenneth W502717f2006-10-11 01:20:46 -0700445void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
446 unsigned long end)
447{
448 /*
449 * It is undesirable to test vma->vm_file as it should be non-null
450 * for valid hugetlb area. However, vm_file will be NULL in the error
451 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
452 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
453 * to clean up. Since no pte has actually been setup, it is safe to
454 * do nothing in this case.
455 */
456 if (vma->vm_file) {
457 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
458 __unmap_hugepage_range(vma, start, end);
459 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
460 }
461}
462
David Gibson1e8f8892006-01-06 00:10:44 -0800463static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
464 unsigned long address, pte_t *ptep, pte_t pte)
465{
466 struct page *old_page, *new_page;
David Gibson79ac6ba2006-03-22 00:08:51 -0800467 int avoidcopy;
David Gibson1e8f8892006-01-06 00:10:44 -0800468
469 old_page = pte_page(pte);
470
471 /* If no-one else is actually using this page, avoid the copy
472 * and just make the page writable */
473 avoidcopy = (page_count(old_page) == 1);
474 if (avoidcopy) {
475 set_huge_ptep_writable(vma, address, ptep);
Nick Piggin83c54072007-07-19 01:47:05 -0700476 return 0;
David Gibson1e8f8892006-01-06 00:10:44 -0800477 }
478
479 page_cache_get(old_page);
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800480 new_page = alloc_huge_page(vma, address);
David Gibson1e8f8892006-01-06 00:10:44 -0800481
482 if (!new_page) {
483 page_cache_release(old_page);
Christoph Lameter0df420d2006-02-07 12:58:30 -0800484 return VM_FAULT_OOM;
David Gibson1e8f8892006-01-06 00:10:44 -0800485 }
486
487 spin_unlock(&mm->page_table_lock);
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000488 copy_huge_page(new_page, old_page, address, vma);
David Gibson1e8f8892006-01-06 00:10:44 -0800489 spin_lock(&mm->page_table_lock);
490
491 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
492 if (likely(pte_same(*ptep, pte))) {
493 /* Break COW */
494 set_huge_pte_at(mm, address, ptep,
495 make_huge_pte(vma, new_page, 1));
496 /* Make the old page be freed below */
497 new_page = old_page;
498 }
499 page_cache_release(new_page);
500 page_cache_release(old_page);
Nick Piggin83c54072007-07-19 01:47:05 -0700501 return 0;
David Gibson1e8f8892006-01-06 00:10:44 -0800502}
503
Robert P. J. Daya1ed3dd2007-07-17 04:03:33 -0700504static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
David Gibson1e8f8892006-01-06 00:10:44 -0800505 unsigned long address, pte_t *ptep, int write_access)
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100506{
507 int ret = VM_FAULT_SIGBUS;
Adam Litke4c887262005-10-29 18:16:46 -0700508 unsigned long idx;
509 unsigned long size;
Adam Litke4c887262005-10-29 18:16:46 -0700510 struct page *page;
511 struct address_space *mapping;
David Gibson1e8f8892006-01-06 00:10:44 -0800512 pte_t new_pte;
Adam Litke4c887262005-10-29 18:16:46 -0700513
Adam Litke4c887262005-10-29 18:16:46 -0700514 mapping = vma->vm_file->f_mapping;
515 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
516 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
517
518 /*
519 * Use page lock to guard against racing truncation
520 * before we get page_table_lock.
521 */
Christoph Lameter6bda6662006-01-06 00:10:49 -0800522retry:
523 page = find_lock_page(mapping, idx);
524 if (!page) {
Hugh Dickinsebed4bf2006-10-28 10:38:43 -0700525 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
526 if (idx >= size)
527 goto out;
Christoph Lameter6bda6662006-01-06 00:10:49 -0800528 if (hugetlb_get_quota(mapping))
529 goto out;
530 page = alloc_huge_page(vma, address);
531 if (!page) {
532 hugetlb_put_quota(mapping);
Christoph Lameter0df420d2006-02-07 12:58:30 -0800533 ret = VM_FAULT_OOM;
Christoph Lameter6bda6662006-01-06 00:10:49 -0800534 goto out;
535 }
David Gibson79ac6ba2006-03-22 00:08:51 -0800536 clear_huge_page(page, address);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100537
Christoph Lameter6bda6662006-01-06 00:10:49 -0800538 if (vma->vm_flags & VM_SHARED) {
539 int err;
540
541 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
542 if (err) {
543 put_page(page);
544 hugetlb_put_quota(mapping);
545 if (err == -EEXIST)
546 goto retry;
547 goto out;
548 }
549 } else
550 lock_page(page);
551 }
David Gibson1e8f8892006-01-06 00:10:44 -0800552
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100553 spin_lock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700554 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
555 if (idx >= size)
556 goto backout;
557
Nick Piggin83c54072007-07-19 01:47:05 -0700558 ret = 0;
Adam Litke86e52162006-01-06 00:10:43 -0800559 if (!pte_none(*ptep))
Adam Litke4c887262005-10-29 18:16:46 -0700560 goto backout;
561
David Gibson1e8f8892006-01-06 00:10:44 -0800562 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
563 && (vma->vm_flags & VM_SHARED)));
564 set_huge_pte_at(mm, address, ptep, new_pte);
565
566 if (write_access && !(vma->vm_flags & VM_SHARED)) {
567 /* Optimization, do the COW without a second fault */
568 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
569 }
570
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100571 spin_unlock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700572 unlock_page(page);
573out:
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100574 return ret;
Adam Litke4c887262005-10-29 18:16:46 -0700575
576backout:
577 spin_unlock(&mm->page_table_lock);
578 hugetlb_put_quota(mapping);
579 unlock_page(page);
580 put_page(page);
581 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100582}
583
Adam Litke86e52162006-01-06 00:10:43 -0800584int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
585 unsigned long address, int write_access)
586{
587 pte_t *ptep;
588 pte_t entry;
David Gibson1e8f8892006-01-06 00:10:44 -0800589 int ret;
David Gibson3935baa2006-03-22 00:08:53 -0800590 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
Adam Litke86e52162006-01-06 00:10:43 -0800591
592 ptep = huge_pte_alloc(mm, address);
593 if (!ptep)
594 return VM_FAULT_OOM;
595
David Gibson3935baa2006-03-22 00:08:53 -0800596 /*
597 * Serialize hugepage allocation and instantiation, so that we don't
598 * get spurious allocation failures if two CPUs race to instantiate
599 * the same page in the page cache.
600 */
601 mutex_lock(&hugetlb_instantiation_mutex);
Adam Litke86e52162006-01-06 00:10:43 -0800602 entry = *ptep;
David Gibson3935baa2006-03-22 00:08:53 -0800603 if (pte_none(entry)) {
604 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
605 mutex_unlock(&hugetlb_instantiation_mutex);
606 return ret;
607 }
Adam Litke86e52162006-01-06 00:10:43 -0800608
Nick Piggin83c54072007-07-19 01:47:05 -0700609 ret = 0;
David Gibson1e8f8892006-01-06 00:10:44 -0800610
611 spin_lock(&mm->page_table_lock);
612 /* Check for a racing update before calling hugetlb_cow */
613 if (likely(pte_same(entry, *ptep)))
614 if (write_access && !pte_write(entry))
615 ret = hugetlb_cow(mm, vma, address, ptep, entry);
616 spin_unlock(&mm->page_table_lock);
David Gibson3935baa2006-03-22 00:08:53 -0800617 mutex_unlock(&hugetlb_instantiation_mutex);
David Gibson1e8f8892006-01-06 00:10:44 -0800618
619 return ret;
Adam Litke86e52162006-01-06 00:10:43 -0800620}
621
David Gibson63551ae2005-06-21 17:14:44 -0700622int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
623 struct page **pages, struct vm_area_struct **vmas,
624 unsigned long *position, int *length, int i)
625{
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800626 unsigned long pfn_offset;
627 unsigned long vaddr = *position;
David Gibson63551ae2005-06-21 17:14:44 -0700628 int remainder = *length;
629
Hugh Dickins1c598272005-10-19 21:23:43 -0700630 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700631 while (vaddr < vma->vm_end && remainder) {
Adam Litke4c887262005-10-29 18:16:46 -0700632 pte_t *pte;
633 struct page *page;
634
635 /*
636 * Some archs (sparc64, sh*) have multiple pte_ts to
637 * each hugepage. We have to make * sure we get the
638 * first, for the page indexing below to work.
639 */
640 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
641
642 if (!pte || pte_none(*pte)) {
643 int ret;
644
645 spin_unlock(&mm->page_table_lock);
646 ret = hugetlb_fault(mm, vma, vaddr, 0);
647 spin_lock(&mm->page_table_lock);
Adam Litkea89182c2007-08-22 14:01:51 -0700648 if (!(ret & VM_FAULT_ERROR))
Adam Litke4c887262005-10-29 18:16:46 -0700649 continue;
650
651 remainder = 0;
652 if (!i)
653 i = -EFAULT;
654 break;
655 }
David Gibson63551ae2005-06-21 17:14:44 -0700656
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800657 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
658 page = pte_page(*pte);
659same_page:
Chen, Kenneth Wd6692182006-03-31 02:29:57 -0800660 if (pages) {
661 get_page(page);
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800662 pages[i] = page + pfn_offset;
Chen, Kenneth Wd6692182006-03-31 02:29:57 -0800663 }
David Gibson63551ae2005-06-21 17:14:44 -0700664
665 if (vmas)
666 vmas[i] = vma;
667
668 vaddr += PAGE_SIZE;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800669 ++pfn_offset;
David Gibson63551ae2005-06-21 17:14:44 -0700670 --remainder;
671 ++i;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800672 if (vaddr < vma->vm_end && remainder &&
673 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
674 /*
675 * We use pfn_offset to avoid touching the pageframes
676 * of this compound page.
677 */
678 goto same_page;
679 }
David Gibson63551ae2005-06-21 17:14:44 -0700680 }
Hugh Dickins1c598272005-10-19 21:23:43 -0700681 spin_unlock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700682 *length = remainder;
683 *position = vaddr;
684
685 return i;
686}
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800687
688void hugetlb_change_protection(struct vm_area_struct *vma,
689 unsigned long address, unsigned long end, pgprot_t newprot)
690{
691 struct mm_struct *mm = vma->vm_mm;
692 unsigned long start = address;
693 pte_t *ptep;
694 pte_t pte;
695
696 BUG_ON(address >= end);
697 flush_cache_range(vma, address, end);
698
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800699 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800700 spin_lock(&mm->page_table_lock);
701 for (; address < end; address += HPAGE_SIZE) {
702 ptep = huge_pte_offset(mm, address);
703 if (!ptep)
704 continue;
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800705 if (huge_pmd_unshare(mm, &address, ptep))
706 continue;
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800707 if (!pte_none(*ptep)) {
708 pte = huge_ptep_get_and_clear(mm, address, ptep);
709 pte = pte_mkhuge(pte_modify(pte, newprot));
710 set_huge_pte_at(mm, address, ptep, pte);
711 lazy_mmu_prot_update(pte);
712 }
713 }
714 spin_unlock(&mm->page_table_lock);
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800715 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800716
717 flush_tlb_range(vma, start, end);
718}
719
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700720struct file_region {
721 struct list_head link;
722 long from;
723 long to;
724};
725
726static long region_add(struct list_head *head, long f, long t)
727{
728 struct file_region *rg, *nrg, *trg;
729
730 /* Locate the region we are either in or before. */
731 list_for_each_entry(rg, head, link)
732 if (f <= rg->to)
733 break;
734
735 /* Round our left edge to the current segment if it encloses us. */
736 if (f > rg->from)
737 f = rg->from;
738
739 /* Check for and consume any regions we now overlap with. */
740 nrg = rg;
741 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
742 if (&rg->link == head)
743 break;
744 if (rg->from > t)
745 break;
746
747 /* If this area reaches higher then extend our area to
748 * include it completely. If this is not the first area
749 * which we intend to reuse, free it. */
750 if (rg->to > t)
751 t = rg->to;
752 if (rg != nrg) {
753 list_del(&rg->link);
754 kfree(rg);
755 }
756 }
757 nrg->from = f;
758 nrg->to = t;
759 return 0;
760}
761
762static long region_chg(struct list_head *head, long f, long t)
763{
764 struct file_region *rg, *nrg;
765 long chg = 0;
766
767 /* Locate the region we are before or in. */
768 list_for_each_entry(rg, head, link)
769 if (f <= rg->to)
770 break;
771
772 /* If we are below the current region then a new region is required.
773 * Subtle, allocate a new region at the position but make it zero
774 * size such that we can guarentee to record the reservation. */
775 if (&rg->link == head || t < rg->from) {
776 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
777 if (nrg == 0)
778 return -ENOMEM;
779 nrg->from = f;
780 nrg->to = f;
781 INIT_LIST_HEAD(&nrg->link);
782 list_add(&nrg->link, rg->link.prev);
783
784 return t - f;
785 }
786
787 /* Round our left edge to the current segment if it encloses us. */
788 if (f > rg->from)
789 f = rg->from;
790 chg = t - f;
791
792 /* Check for and consume any regions we now overlap with. */
793 list_for_each_entry(rg, rg->link.prev, link) {
794 if (&rg->link == head)
795 break;
796 if (rg->from > t)
797 return chg;
798
799 /* We overlap with this area, if it extends futher than
800 * us then we must extend ourselves. Account for its
801 * existing reservation. */
802 if (rg->to > t) {
803 chg += rg->to - t;
804 t = rg->to;
805 }
806 chg -= rg->to - rg->from;
807 }
808 return chg;
809}
810
811static long region_truncate(struct list_head *head, long end)
812{
813 struct file_region *rg, *trg;
814 long chg = 0;
815
816 /* Locate the region we are either in or before. */
817 list_for_each_entry(rg, head, link)
818 if (end <= rg->to)
819 break;
820 if (&rg->link == head)
821 return 0;
822
823 /* If we are in the middle of a region then adjust it. */
824 if (end > rg->from) {
825 chg = rg->to - end;
826 rg->to = end;
827 rg = list_entry(rg->link.next, typeof(*rg), link);
828 }
829
830 /* Drop any remaining regions. */
831 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
832 if (&rg->link == head)
833 break;
834 chg += rg->to - rg->from;
835 list_del(&rg->link);
836 kfree(rg);
837 }
838 return chg;
839}
840
841static int hugetlb_acct_memory(long delta)
842{
843 int ret = -ENOMEM;
844
845 spin_lock(&hugetlb_lock);
846 if ((delta + resv_huge_pages) <= free_huge_pages) {
847 resv_huge_pages += delta;
848 ret = 0;
849 }
850 spin_unlock(&hugetlb_lock);
851 return ret;
852}
853
854int hugetlb_reserve_pages(struct inode *inode, long from, long to)
855{
856 long ret, chg;
857
858 chg = region_chg(&inode->i_mapping->private_list, from, to);
859 if (chg < 0)
860 return chg;
Ken Chen8a630112007-05-09 02:33:34 -0700861 /*
862 * When cpuset is configured, it breaks the strict hugetlb page
863 * reservation as the accounting is done on a global variable. Such
864 * reservation is completely rubbish in the presence of cpuset because
865 * the reservation is not checked against page availability for the
866 * current cpuset. Application can still potentially OOM'ed by kernel
867 * with lack of free htlb page in cpuset that the task is in.
868 * Attempt to enforce strict accounting with cpuset is almost
869 * impossible (or too ugly) because cpuset is too fluid that
870 * task or memory node can be dynamically moved between cpusets.
871 *
872 * The change of semantics for shared hugetlb mapping with cpuset is
873 * undesirable. However, in order to preserve some of the semantics,
874 * we fall back to check against current free page availability as
875 * a best attempt and hopefully to minimize the impact of changing
876 * semantics that cpuset has.
877 */
878 if (chg > cpuset_mems_nr(free_huge_pages_node))
879 return -ENOMEM;
880
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700881 ret = hugetlb_acct_memory(chg);
882 if (ret < 0)
883 return ret;
884 region_add(&inode->i_mapping->private_list, from, to);
885 return 0;
886}
887
888void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
889{
890 long chg = region_truncate(&inode->i_mapping->private_list, offset);
891 hugetlb_acct_memory(freed - chg);
892}