blob: 15fc7b00077287665fc5e65d2d41ac87c4fffc47 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/nodemask.h>
David Gibson63551ae2005-06-21 17:14:44 -070013#include <linux/pagemap.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080014#include <linux/mempolicy.h>
Christoph Lameteraea47ff2006-01-08 01:00:57 -080015#include <linux/cpuset.h>
David Gibson3935baa2006-03-22 00:08:53 -080016#include <linux/mutex.h>
Christoph Lameter5da7ca82006-01-06 00:10:46 -080017
David Gibson63551ae2005-06-21 17:14:44 -070018#include <asm/page.h>
19#include <asm/pgtable.h>
20
21#include <linux/hugetlb.h>
Nick Piggin7835e982006-03-22 00:08:40 -080022#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -070025static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026unsigned long max_huge_pages;
27static struct list_head hugepage_freelists[MAX_NUMNODES];
28static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29static unsigned int free_huge_pages_node[MAX_NUMNODES];
Mel Gorman396faf02007-07-17 04:03:13 -070030static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
31unsigned long hugepages_treat_as_movable;
32
David Gibson3935baa2006-03-22 00:08:53 -080033/*
34 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
35 */
36static DEFINE_SPINLOCK(hugetlb_lock);
Eric Paris0bd0f9f2005-11-21 21:32:28 -080037
David Gibson79ac6ba2006-03-22 00:08:51 -080038static void clear_huge_page(struct page *page, unsigned long addr)
39{
40 int i;
41
42 might_sleep();
43 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
44 cond_resched();
45 clear_user_highpage(page + i, addr);
46 }
47}
48
49static void copy_huge_page(struct page *dst, struct page *src,
Atsushi Nemoto9de455b2006-12-12 17:14:55 +000050 unsigned long addr, struct vm_area_struct *vma)
David Gibson79ac6ba2006-03-22 00:08:51 -080051{
52 int i;
53
54 might_sleep();
55 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
56 cond_resched();
Atsushi Nemoto9de455b2006-12-12 17:14:55 +000057 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
David Gibson79ac6ba2006-03-22 00:08:51 -080058 }
59}
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061static void enqueue_huge_page(struct page *page)
62{
63 int nid = page_to_nid(page);
64 list_add(&page->lru, &hugepage_freelists[nid]);
65 free_huge_pages++;
66 free_huge_pages_node[nid]++;
67}
68
Christoph Lameter5da7ca82006-01-06 00:10:46 -080069static struct page *dequeue_huge_page(struct vm_area_struct *vma,
70 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Nishanth Aravamudan31a5c6e2007-07-15 23:38:02 -070072 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 struct page *page = NULL;
Mel Gorman396faf02007-07-17 04:03:13 -070074 struct zonelist *zonelist = huge_zonelist(vma, address,
75 htlb_alloc_mask);
Christoph Lameter96df9332006-01-06 00:10:45 -080076 struct zone **z;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Christoph Lameter96df9332006-01-06 00:10:45 -080078 for (z = zonelist->zones; *z; z++) {
Christoph Lameter89fa3022006-09-25 23:31:55 -070079 nid = zone_to_nid(*z);
Mel Gorman396faf02007-07-17 04:03:13 -070080 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
Andrew Morton3abf7af2007-07-19 01:49:08 -070081 !list_empty(&hugepage_freelists[nid])) {
82 page = list_entry(hugepage_freelists[nid].next,
83 struct page, lru);
84 list_del(&page->lru);
85 free_huge_pages--;
86 free_huge_pages_node[nid]--;
87 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 }
89 return page;
90}
91
David Gibson27a85ef2006-03-22 00:08:56 -080092static void free_huge_page(struct page *page)
93{
94 BUG_ON(page_count(page));
95
96 INIT_LIST_HEAD(&page->lru);
97
98 spin_lock(&hugetlb_lock);
99 enqueue_huge_page(page);
100 spin_unlock(&hugetlb_lock);
101}
102
Nick Piggina4822892006-03-22 00:08:08 -0800103static int alloc_fresh_huge_page(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Joe Jinf96efd52007-07-15 23:38:12 -0700105 static int prev_nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 struct page *page;
Joe Jinf96efd52007-07-15 23:38:12 -0700107 static DEFINE_SPINLOCK(nid_lock);
108 int nid;
109
110 spin_lock(&nid_lock);
111 nid = next_node(prev_nid, node_online_map);
Paul Jacksonfdb7cc52006-03-22 00:09:10 -0800112 if (nid == MAX_NUMNODES)
113 nid = first_node(node_online_map);
Joe Jinf96efd52007-07-15 23:38:12 -0700114 prev_nid = nid;
115 spin_unlock(&nid_lock);
116
Mel Gorman396faf02007-07-17 04:03:13 -0700117 page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
Joe Jinf96efd52007-07-15 23:38:12 -0700118 HUGETLB_PAGE_ORDER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 if (page) {
Andy Whitcroft33f2ef82006-12-06 20:33:32 -0800120 set_compound_page_dtor(page, free_huge_page);
Eric Paris0bd0f9f2005-11-21 21:32:28 -0800121 spin_lock(&hugetlb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 nr_huge_pages++;
123 nr_huge_pages_node[page_to_nid(page)]++;
Eric Paris0bd0f9f2005-11-21 21:32:28 -0800124 spin_unlock(&hugetlb_lock);
Nick Piggina4822892006-03-22 00:08:08 -0800125 put_page(page); /* free it into the hugepage allocator */
126 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 }
Nick Piggina4822892006-03-22 00:08:08 -0800128 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129}
130
David Gibson27a85ef2006-03-22 00:08:56 -0800131static struct page *alloc_huge_page(struct vm_area_struct *vma,
132 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133{
134 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
136 spin_lock(&hugetlb_lock);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700137 if (vma->vm_flags & VM_MAYSHARE)
138 resv_huge_pages--;
139 else if (free_huge_pages <= resv_huge_pages)
140 goto fail;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800141
142 page = dequeue_huge_page(vma, addr);
143 if (!page)
144 goto fail;
145
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 spin_unlock(&hugetlb_lock);
Nick Piggin7835e982006-03-22 00:08:40 -0800147 set_page_refcounted(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 return page;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800149
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700150fail:
Ken Chenace4bd22007-05-09 02:33:09 -0700151 if (vma->vm_flags & VM_MAYSHARE)
152 resv_huge_pages++;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800153 spin_unlock(&hugetlb_lock);
154 return NULL;
155}
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157static int __init hugetlb_init(void)
158{
159 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100161 if (HPAGE_SHIFT == 0)
162 return 0;
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 for (i = 0; i < MAX_NUMNODES; ++i)
165 INIT_LIST_HEAD(&hugepage_freelists[i]);
166
167 for (i = 0; i < max_huge_pages; ++i) {
Nick Piggina4822892006-03-22 00:08:08 -0800168 if (!alloc_fresh_huge_page())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 }
171 max_huge_pages = free_huge_pages = nr_huge_pages = i;
172 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
173 return 0;
174}
175module_init(hugetlb_init);
176
177static int __init hugetlb_setup(char *s)
178{
179 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
180 max_huge_pages = 0;
181 return 1;
182}
183__setup("hugepages=", hugetlb_setup);
184
Ken Chen8a630112007-05-09 02:33:34 -0700185static unsigned int cpuset_mems_nr(unsigned int *array)
186{
187 int node;
188 unsigned int nr = 0;
189
190 for_each_node_mask(node, cpuset_current_mems_allowed)
191 nr += array[node];
192
193 return nr;
194}
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196#ifdef CONFIG_SYSCTL
197static void update_and_free_page(struct page *page)
198{
199 int i;
200 nr_huge_pages--;
Christoph Lameter4415cc82006-09-25 23:31:55 -0700201 nr_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
203 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
204 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
205 1 << PG_private | 1<< PG_writeback);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 }
Nick Piggina4822892006-03-22 00:08:08 -0800207 page[1].lru.next = NULL;
Nick Piggin7835e982006-03-22 00:08:40 -0800208 set_page_refcounted(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 __free_pages(page, HUGETLB_PAGE_ORDER);
210}
211
212#ifdef CONFIG_HIGHMEM
213static void try_to_free_low(unsigned long count)
214{
Christoph Lameter4415cc82006-09-25 23:31:55 -0700215 int i;
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 for (i = 0; i < MAX_NUMNODES; ++i) {
218 struct page *page, *next;
219 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
220 if (PageHighMem(page))
221 continue;
222 list_del(&page->lru);
223 update_and_free_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 free_huge_pages--;
Christoph Lameter4415cc82006-09-25 23:31:55 -0700225 free_huge_pages_node[page_to_nid(page)]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 if (count >= nr_huge_pages)
227 return;
228 }
229 }
230}
231#else
232static inline void try_to_free_low(unsigned long count)
233{
234}
235#endif
236
237static unsigned long set_max_huge_pages(unsigned long count)
238{
239 while (count > nr_huge_pages) {
Nick Piggina4822892006-03-22 00:08:08 -0800240 if (!alloc_fresh_huge_page())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 return nr_huge_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 }
243 if (count >= nr_huge_pages)
244 return nr_huge_pages;
245
246 spin_lock(&hugetlb_lock);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700247 count = max(count, resv_huge_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 try_to_free_low(count);
249 while (count < nr_huge_pages) {
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800250 struct page *page = dequeue_huge_page(NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 if (!page)
252 break;
253 update_and_free_page(page);
254 }
255 spin_unlock(&hugetlb_lock);
256 return nr_huge_pages;
257}
258
259int hugetlb_sysctl_handler(struct ctl_table *table, int write,
260 struct file *file, void __user *buffer,
261 size_t *length, loff_t *ppos)
262{
263 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
264 max_huge_pages = set_max_huge_pages(max_huge_pages);
265 return 0;
266}
Mel Gorman396faf02007-07-17 04:03:13 -0700267
268int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
269 struct file *file, void __user *buffer,
270 size_t *length, loff_t *ppos)
271{
272 proc_dointvec(table, write, file, buffer, length, ppos);
273 if (hugepages_treat_as_movable)
274 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
275 else
276 htlb_alloc_mask = GFP_HIGHUSER;
277 return 0;
278}
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280#endif /* CONFIG_SYSCTL */
281
282int hugetlb_report_meminfo(char *buf)
283{
284 return sprintf(buf,
285 "HugePages_Total: %5lu\n"
286 "HugePages_Free: %5lu\n"
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700287 "HugePages_Rsvd: %5lu\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 "Hugepagesize: %5lu kB\n",
289 nr_huge_pages,
290 free_huge_pages,
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700291 resv_huge_pages,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 HPAGE_SIZE/1024);
293}
294
295int hugetlb_report_node_meminfo(int nid, char *buf)
296{
297 return sprintf(buf,
298 "Node %d HugePages_Total: %5u\n"
299 "Node %d HugePages_Free: %5u\n",
300 nid, nr_huge_pages_node[nid],
301 nid, free_huge_pages_node[nid]);
302}
303
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
305unsigned long hugetlb_total_pages(void)
306{
307 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
308}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
310/*
311 * We cannot handle pagefaults against hugetlb pages at all. They cause
312 * handle_mm_fault() to try to instantiate regular-sized pages in the
313 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
314 * this far.
315 */
Nick Piggind0217ac2007-07-19 01:47:03 -0700316static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317{
318 BUG();
Nick Piggind0217ac2007-07-19 01:47:03 -0700319 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320}
321
322struct vm_operations_struct hugetlb_vm_ops = {
Nick Piggind0217ac2007-07-19 01:47:03 -0700323 .fault = hugetlb_vm_op_fault,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324};
325
David Gibson1e8f8892006-01-06 00:10:44 -0800326static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
327 int writable)
David Gibson63551ae2005-06-21 17:14:44 -0700328{
329 pte_t entry;
330
David Gibson1e8f8892006-01-06 00:10:44 -0800331 if (writable) {
David Gibson63551ae2005-06-21 17:14:44 -0700332 entry =
333 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
334 } else {
335 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
336 }
337 entry = pte_mkyoung(entry);
338 entry = pte_mkhuge(entry);
339
340 return entry;
341}
342
David Gibson1e8f8892006-01-06 00:10:44 -0800343static void set_huge_ptep_writable(struct vm_area_struct *vma,
344 unsigned long address, pte_t *ptep)
345{
346 pte_t entry;
347
348 entry = pte_mkwrite(pte_mkdirty(*ptep));
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -0700349 if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
350 update_mmu_cache(vma, address, entry);
351 lazy_mmu_prot_update(entry);
352 }
David Gibson1e8f8892006-01-06 00:10:44 -0800353}
354
355
David Gibson63551ae2005-06-21 17:14:44 -0700356int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
357 struct vm_area_struct *vma)
358{
359 pte_t *src_pte, *dst_pte, entry;
360 struct page *ptepage;
Hugh Dickins1c598272005-10-19 21:23:43 -0700361 unsigned long addr;
David Gibson1e8f8892006-01-06 00:10:44 -0800362 int cow;
363
364 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
David Gibson63551ae2005-06-21 17:14:44 -0700365
Hugh Dickins1c598272005-10-19 21:23:43 -0700366 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
Hugh Dickinsc74df322005-10-29 18:16:23 -0700367 src_pte = huge_pte_offset(src, addr);
368 if (!src_pte)
369 continue;
David Gibson63551ae2005-06-21 17:14:44 -0700370 dst_pte = huge_pte_alloc(dst, addr);
371 if (!dst_pte)
372 goto nomem;
Hugh Dickinsc74df322005-10-29 18:16:23 -0700373 spin_lock(&dst->page_table_lock);
Hugh Dickins1c598272005-10-19 21:23:43 -0700374 spin_lock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700375 if (!pte_none(*src_pte)) {
David Gibson1e8f8892006-01-06 00:10:44 -0800376 if (cow)
377 ptep_set_wrprotect(src, addr, src_pte);
Hugh Dickins1c598272005-10-19 21:23:43 -0700378 entry = *src_pte;
379 ptepage = pte_page(entry);
380 get_page(ptepage);
Hugh Dickins1c598272005-10-19 21:23:43 -0700381 set_huge_pte_at(dst, addr, dst_pte, entry);
382 }
383 spin_unlock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700384 spin_unlock(&dst->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700385 }
386 return 0;
387
388nomem:
389 return -ENOMEM;
390}
391
Chen, Kenneth W502717f2006-10-11 01:20:46 -0700392void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
393 unsigned long end)
David Gibson63551ae2005-06-21 17:14:44 -0700394{
395 struct mm_struct *mm = vma->vm_mm;
396 unsigned long address;
David Gibsonc7546f82005-08-05 11:59:35 -0700397 pte_t *ptep;
David Gibson63551ae2005-06-21 17:14:44 -0700398 pte_t pte;
399 struct page *page;
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700400 struct page *tmp;
Chen, Kenneth Wc0a499c2006-12-06 20:31:39 -0800401 /*
402 * A page gathering list, protected by per file i_mmap_lock. The
403 * lock is used to avoid list corruption from multiple unmapping
404 * of the same page since we are using page->lru.
405 */
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700406 LIST_HEAD(page_list);
David Gibson63551ae2005-06-21 17:14:44 -0700407
408 WARN_ON(!is_vm_hugetlb_page(vma));
409 BUG_ON(start & ~HPAGE_MASK);
410 BUG_ON(end & ~HPAGE_MASK);
411
Hugh Dickins508034a2005-10-29 18:16:30 -0700412 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700413 for (address = start; address < end; address += HPAGE_SIZE) {
David Gibsonc7546f82005-08-05 11:59:35 -0700414 ptep = huge_pte_offset(mm, address);
Adam Litke4c887262005-10-29 18:16:46 -0700415 if (!ptep)
David Gibsonc7546f82005-08-05 11:59:35 -0700416 continue;
417
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800418 if (huge_pmd_unshare(mm, &address, ptep))
419 continue;
420
David Gibsonc7546f82005-08-05 11:59:35 -0700421 pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson63551ae2005-06-21 17:14:44 -0700422 if (pte_none(pte))
423 continue;
David Gibsonc7546f82005-08-05 11:59:35 -0700424
David Gibson63551ae2005-06-21 17:14:44 -0700425 page = pte_page(pte);
Ken Chen6649a382007-02-08 14:20:27 -0800426 if (pte_dirty(pte))
427 set_page_dirty(page);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700428 list_add(&page->lru, &page_list);
David Gibson63551ae2005-06-21 17:14:44 -0700429 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 spin_unlock(&mm->page_table_lock);
Hugh Dickins508034a2005-10-29 18:16:30 -0700431 flush_tlb_range(vma, start, end);
Chen, Kenneth Wfe1668a2006-10-04 02:15:24 -0700432 list_for_each_entry_safe(page, tmp, &page_list, lru) {
433 list_del(&page->lru);
434 put_page(page);
435 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436}
David Gibson63551ae2005-06-21 17:14:44 -0700437
Chen, Kenneth W502717f2006-10-11 01:20:46 -0700438void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
439 unsigned long end)
440{
441 /*
442 * It is undesirable to test vma->vm_file as it should be non-null
443 * for valid hugetlb area. However, vm_file will be NULL in the error
444 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
445 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
446 * to clean up. Since no pte has actually been setup, it is safe to
447 * do nothing in this case.
448 */
449 if (vma->vm_file) {
450 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
451 __unmap_hugepage_range(vma, start, end);
452 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
453 }
454}
455
David Gibson1e8f8892006-01-06 00:10:44 -0800456static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
457 unsigned long address, pte_t *ptep, pte_t pte)
458{
459 struct page *old_page, *new_page;
David Gibson79ac6ba2006-03-22 00:08:51 -0800460 int avoidcopy;
David Gibson1e8f8892006-01-06 00:10:44 -0800461
462 old_page = pte_page(pte);
463
464 /* If no-one else is actually using this page, avoid the copy
465 * and just make the page writable */
466 avoidcopy = (page_count(old_page) == 1);
467 if (avoidcopy) {
468 set_huge_ptep_writable(vma, address, ptep);
Nick Piggin83c54072007-07-19 01:47:05 -0700469 return 0;
David Gibson1e8f8892006-01-06 00:10:44 -0800470 }
471
472 page_cache_get(old_page);
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800473 new_page = alloc_huge_page(vma, address);
David Gibson1e8f8892006-01-06 00:10:44 -0800474
475 if (!new_page) {
476 page_cache_release(old_page);
Christoph Lameter0df420d2006-02-07 12:58:30 -0800477 return VM_FAULT_OOM;
David Gibson1e8f8892006-01-06 00:10:44 -0800478 }
479
480 spin_unlock(&mm->page_table_lock);
Atsushi Nemoto9de455b2006-12-12 17:14:55 +0000481 copy_huge_page(new_page, old_page, address, vma);
David Gibson1e8f8892006-01-06 00:10:44 -0800482 spin_lock(&mm->page_table_lock);
483
484 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
485 if (likely(pte_same(*ptep, pte))) {
486 /* Break COW */
487 set_huge_pte_at(mm, address, ptep,
488 make_huge_pte(vma, new_page, 1));
489 /* Make the old page be freed below */
490 new_page = old_page;
491 }
492 page_cache_release(new_page);
493 page_cache_release(old_page);
Nick Piggin83c54072007-07-19 01:47:05 -0700494 return 0;
David Gibson1e8f8892006-01-06 00:10:44 -0800495}
496
Robert P. J. Daya1ed3dd2007-07-17 04:03:33 -0700497static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
David Gibson1e8f8892006-01-06 00:10:44 -0800498 unsigned long address, pte_t *ptep, int write_access)
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100499{
500 int ret = VM_FAULT_SIGBUS;
Adam Litke4c887262005-10-29 18:16:46 -0700501 unsigned long idx;
502 unsigned long size;
Adam Litke4c887262005-10-29 18:16:46 -0700503 struct page *page;
504 struct address_space *mapping;
David Gibson1e8f8892006-01-06 00:10:44 -0800505 pte_t new_pte;
Adam Litke4c887262005-10-29 18:16:46 -0700506
Adam Litke4c887262005-10-29 18:16:46 -0700507 mapping = vma->vm_file->f_mapping;
508 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
509 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
510
511 /*
512 * Use page lock to guard against racing truncation
513 * before we get page_table_lock.
514 */
Christoph Lameter6bda6662006-01-06 00:10:49 -0800515retry:
516 page = find_lock_page(mapping, idx);
517 if (!page) {
Hugh Dickinsebed4bf2006-10-28 10:38:43 -0700518 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
519 if (idx >= size)
520 goto out;
Christoph Lameter6bda6662006-01-06 00:10:49 -0800521 if (hugetlb_get_quota(mapping))
522 goto out;
523 page = alloc_huge_page(vma, address);
524 if (!page) {
525 hugetlb_put_quota(mapping);
Christoph Lameter0df420d2006-02-07 12:58:30 -0800526 ret = VM_FAULT_OOM;
Christoph Lameter6bda6662006-01-06 00:10:49 -0800527 goto out;
528 }
David Gibson79ac6ba2006-03-22 00:08:51 -0800529 clear_huge_page(page, address);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100530
Christoph Lameter6bda6662006-01-06 00:10:49 -0800531 if (vma->vm_flags & VM_SHARED) {
532 int err;
533
534 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
535 if (err) {
536 put_page(page);
537 hugetlb_put_quota(mapping);
538 if (err == -EEXIST)
539 goto retry;
540 goto out;
541 }
542 } else
543 lock_page(page);
544 }
David Gibson1e8f8892006-01-06 00:10:44 -0800545
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100546 spin_lock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700547 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
548 if (idx >= size)
549 goto backout;
550
Nick Piggin83c54072007-07-19 01:47:05 -0700551 ret = 0;
Adam Litke86e52162006-01-06 00:10:43 -0800552 if (!pte_none(*ptep))
Adam Litke4c887262005-10-29 18:16:46 -0700553 goto backout;
554
David Gibson1e8f8892006-01-06 00:10:44 -0800555 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
556 && (vma->vm_flags & VM_SHARED)));
557 set_huge_pte_at(mm, address, ptep, new_pte);
558
559 if (write_access && !(vma->vm_flags & VM_SHARED)) {
560 /* Optimization, do the COW without a second fault */
561 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
562 }
563
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100564 spin_unlock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700565 unlock_page(page);
566out:
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100567 return ret;
Adam Litke4c887262005-10-29 18:16:46 -0700568
569backout:
570 spin_unlock(&mm->page_table_lock);
571 hugetlb_put_quota(mapping);
572 unlock_page(page);
573 put_page(page);
574 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100575}
576
Adam Litke86e52162006-01-06 00:10:43 -0800577int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
578 unsigned long address, int write_access)
579{
580 pte_t *ptep;
581 pte_t entry;
David Gibson1e8f8892006-01-06 00:10:44 -0800582 int ret;
David Gibson3935baa2006-03-22 00:08:53 -0800583 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
Adam Litke86e52162006-01-06 00:10:43 -0800584
585 ptep = huge_pte_alloc(mm, address);
586 if (!ptep)
587 return VM_FAULT_OOM;
588
David Gibson3935baa2006-03-22 00:08:53 -0800589 /*
590 * Serialize hugepage allocation and instantiation, so that we don't
591 * get spurious allocation failures if two CPUs race to instantiate
592 * the same page in the page cache.
593 */
594 mutex_lock(&hugetlb_instantiation_mutex);
Adam Litke86e52162006-01-06 00:10:43 -0800595 entry = *ptep;
David Gibson3935baa2006-03-22 00:08:53 -0800596 if (pte_none(entry)) {
597 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
598 mutex_unlock(&hugetlb_instantiation_mutex);
599 return ret;
600 }
Adam Litke86e52162006-01-06 00:10:43 -0800601
Nick Piggin83c54072007-07-19 01:47:05 -0700602 ret = 0;
David Gibson1e8f8892006-01-06 00:10:44 -0800603
604 spin_lock(&mm->page_table_lock);
605 /* Check for a racing update before calling hugetlb_cow */
606 if (likely(pte_same(entry, *ptep)))
607 if (write_access && !pte_write(entry))
608 ret = hugetlb_cow(mm, vma, address, ptep, entry);
609 spin_unlock(&mm->page_table_lock);
David Gibson3935baa2006-03-22 00:08:53 -0800610 mutex_unlock(&hugetlb_instantiation_mutex);
David Gibson1e8f8892006-01-06 00:10:44 -0800611
612 return ret;
Adam Litke86e52162006-01-06 00:10:43 -0800613}
614
David Gibson63551ae2005-06-21 17:14:44 -0700615int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
616 struct page **pages, struct vm_area_struct **vmas,
617 unsigned long *position, int *length, int i)
618{
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800619 unsigned long pfn_offset;
620 unsigned long vaddr = *position;
David Gibson63551ae2005-06-21 17:14:44 -0700621 int remainder = *length;
622
Hugh Dickins1c598272005-10-19 21:23:43 -0700623 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700624 while (vaddr < vma->vm_end && remainder) {
Adam Litke4c887262005-10-29 18:16:46 -0700625 pte_t *pte;
626 struct page *page;
627
628 /*
629 * Some archs (sparc64, sh*) have multiple pte_ts to
630 * each hugepage. We have to make * sure we get the
631 * first, for the page indexing below to work.
632 */
633 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
634
635 if (!pte || pte_none(*pte)) {
636 int ret;
637
638 spin_unlock(&mm->page_table_lock);
639 ret = hugetlb_fault(mm, vma, vaddr, 0);
640 spin_lock(&mm->page_table_lock);
Nick Piggin83c54072007-07-19 01:47:05 -0700641 if (!(ret & VM_FAULT_MAJOR))
Adam Litke4c887262005-10-29 18:16:46 -0700642 continue;
643
644 remainder = 0;
645 if (!i)
646 i = -EFAULT;
647 break;
648 }
David Gibson63551ae2005-06-21 17:14:44 -0700649
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800650 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
651 page = pte_page(*pte);
652same_page:
Chen, Kenneth Wd6692182006-03-31 02:29:57 -0800653 if (pages) {
654 get_page(page);
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800655 pages[i] = page + pfn_offset;
Chen, Kenneth Wd6692182006-03-31 02:29:57 -0800656 }
David Gibson63551ae2005-06-21 17:14:44 -0700657
658 if (vmas)
659 vmas[i] = vma;
660
661 vaddr += PAGE_SIZE;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800662 ++pfn_offset;
David Gibson63551ae2005-06-21 17:14:44 -0700663 --remainder;
664 ++i;
Chen, Kenneth Wd5d4b0a2006-03-22 00:09:03 -0800665 if (vaddr < vma->vm_end && remainder &&
666 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
667 /*
668 * We use pfn_offset to avoid touching the pageframes
669 * of this compound page.
670 */
671 goto same_page;
672 }
David Gibson63551ae2005-06-21 17:14:44 -0700673 }
Hugh Dickins1c598272005-10-19 21:23:43 -0700674 spin_unlock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700675 *length = remainder;
676 *position = vaddr;
677
678 return i;
679}
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800680
681void hugetlb_change_protection(struct vm_area_struct *vma,
682 unsigned long address, unsigned long end, pgprot_t newprot)
683{
684 struct mm_struct *mm = vma->vm_mm;
685 unsigned long start = address;
686 pte_t *ptep;
687 pte_t pte;
688
689 BUG_ON(address >= end);
690 flush_cache_range(vma, address, end);
691
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800692 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800693 spin_lock(&mm->page_table_lock);
694 for (; address < end; address += HPAGE_SIZE) {
695 ptep = huge_pte_offset(mm, address);
696 if (!ptep)
697 continue;
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800698 if (huge_pmd_unshare(mm, &address, ptep))
699 continue;
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800700 if (!pte_none(*ptep)) {
701 pte = huge_ptep_get_and_clear(mm, address, ptep);
702 pte = pte_mkhuge(pte_modify(pte, newprot));
703 set_huge_pte_at(mm, address, ptep, pte);
704 lazy_mmu_prot_update(pte);
705 }
706 }
707 spin_unlock(&mm->page_table_lock);
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800708 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800709
710 flush_tlb_range(vma, start, end);
711}
712
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700713struct file_region {
714 struct list_head link;
715 long from;
716 long to;
717};
718
719static long region_add(struct list_head *head, long f, long t)
720{
721 struct file_region *rg, *nrg, *trg;
722
723 /* Locate the region we are either in or before. */
724 list_for_each_entry(rg, head, link)
725 if (f <= rg->to)
726 break;
727
728 /* Round our left edge to the current segment if it encloses us. */
729 if (f > rg->from)
730 f = rg->from;
731
732 /* Check for and consume any regions we now overlap with. */
733 nrg = rg;
734 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
735 if (&rg->link == head)
736 break;
737 if (rg->from > t)
738 break;
739
740 /* If this area reaches higher then extend our area to
741 * include it completely. If this is not the first area
742 * which we intend to reuse, free it. */
743 if (rg->to > t)
744 t = rg->to;
745 if (rg != nrg) {
746 list_del(&rg->link);
747 kfree(rg);
748 }
749 }
750 nrg->from = f;
751 nrg->to = t;
752 return 0;
753}
754
755static long region_chg(struct list_head *head, long f, long t)
756{
757 struct file_region *rg, *nrg;
758 long chg = 0;
759
760 /* Locate the region we are before or in. */
761 list_for_each_entry(rg, head, link)
762 if (f <= rg->to)
763 break;
764
765 /* If we are below the current region then a new region is required.
766 * Subtle, allocate a new region at the position but make it zero
767 * size such that we can guarentee to record the reservation. */
768 if (&rg->link == head || t < rg->from) {
769 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
770 if (nrg == 0)
771 return -ENOMEM;
772 nrg->from = f;
773 nrg->to = f;
774 INIT_LIST_HEAD(&nrg->link);
775 list_add(&nrg->link, rg->link.prev);
776
777 return t - f;
778 }
779
780 /* Round our left edge to the current segment if it encloses us. */
781 if (f > rg->from)
782 f = rg->from;
783 chg = t - f;
784
785 /* Check for and consume any regions we now overlap with. */
786 list_for_each_entry(rg, rg->link.prev, link) {
787 if (&rg->link == head)
788 break;
789 if (rg->from > t)
790 return chg;
791
792 /* We overlap with this area, if it extends futher than
793 * us then we must extend ourselves. Account for its
794 * existing reservation. */
795 if (rg->to > t) {
796 chg += rg->to - t;
797 t = rg->to;
798 }
799 chg -= rg->to - rg->from;
800 }
801 return chg;
802}
803
804static long region_truncate(struct list_head *head, long end)
805{
806 struct file_region *rg, *trg;
807 long chg = 0;
808
809 /* Locate the region we are either in or before. */
810 list_for_each_entry(rg, head, link)
811 if (end <= rg->to)
812 break;
813 if (&rg->link == head)
814 return 0;
815
816 /* If we are in the middle of a region then adjust it. */
817 if (end > rg->from) {
818 chg = rg->to - end;
819 rg->to = end;
820 rg = list_entry(rg->link.next, typeof(*rg), link);
821 }
822
823 /* Drop any remaining regions. */
824 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
825 if (&rg->link == head)
826 break;
827 chg += rg->to - rg->from;
828 list_del(&rg->link);
829 kfree(rg);
830 }
831 return chg;
832}
833
834static int hugetlb_acct_memory(long delta)
835{
836 int ret = -ENOMEM;
837
838 spin_lock(&hugetlb_lock);
839 if ((delta + resv_huge_pages) <= free_huge_pages) {
840 resv_huge_pages += delta;
841 ret = 0;
842 }
843 spin_unlock(&hugetlb_lock);
844 return ret;
845}
846
847int hugetlb_reserve_pages(struct inode *inode, long from, long to)
848{
849 long ret, chg;
850
851 chg = region_chg(&inode->i_mapping->private_list, from, to);
852 if (chg < 0)
853 return chg;
Ken Chen8a630112007-05-09 02:33:34 -0700854 /*
855 * When cpuset is configured, it breaks the strict hugetlb page
856 * reservation as the accounting is done on a global variable. Such
857 * reservation is completely rubbish in the presence of cpuset because
858 * the reservation is not checked against page availability for the
859 * current cpuset. Application can still potentially OOM'ed by kernel
860 * with lack of free htlb page in cpuset that the task is in.
861 * Attempt to enforce strict accounting with cpuset is almost
862 * impossible (or too ugly) because cpuset is too fluid that
863 * task or memory node can be dynamically moved between cpusets.
864 *
865 * The change of semantics for shared hugetlb mapping with cpuset is
866 * undesirable. However, in order to preserve some of the semantics,
867 * we fall back to check against current free page availability as
868 * a best attempt and hopefully to minimize the impact of changing
869 * semantics that cpuset has.
870 */
871 if (chg > cpuset_mems_nr(free_huge_pages_node))
872 return -ENOMEM;
873
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700874 ret = hugetlb_acct_memory(chg);
875 if (ret < 0)
876 return ret;
877 region_add(&inode->i_mapping->private_list, from, to);
878 return 0;
879}
880
881void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
882{
883 long chg = region_truncate(&inode->i_mapping->private_list, offset);
884 hugetlb_acct_memory(freed - chg);
885}