blob: 728e9bda12ea9971c1cf0212184b1ca2c2d94020 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/nodemask.h>
David Gibson63551ae2005-06-21 17:14:44 -070013#include <linux/pagemap.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
16
17#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
20static unsigned long nr_huge_pages, free_huge_pages;
21unsigned long max_huge_pages;
22static struct list_head hugepage_freelists[MAX_NUMNODES];
23static unsigned int nr_huge_pages_node[MAX_NUMNODES];
24static unsigned int free_huge_pages_node[MAX_NUMNODES];
25static DEFINE_SPINLOCK(hugetlb_lock);
26
27static void enqueue_huge_page(struct page *page)
28{
29 int nid = page_to_nid(page);
30 list_add(&page->lru, &hugepage_freelists[nid]);
31 free_huge_pages++;
32 free_huge_pages_node[nid]++;
33}
34
35static struct page *dequeue_huge_page(void)
36{
37 int nid = numa_node_id();
38 struct page *page = NULL;
39
40 if (list_empty(&hugepage_freelists[nid])) {
41 for (nid = 0; nid < MAX_NUMNODES; ++nid)
42 if (!list_empty(&hugepage_freelists[nid]))
43 break;
44 }
45 if (nid >= 0 && nid < MAX_NUMNODES &&
46 !list_empty(&hugepage_freelists[nid])) {
47 page = list_entry(hugepage_freelists[nid].next,
48 struct page, lru);
49 list_del(&page->lru);
50 free_huge_pages--;
51 free_huge_pages_node[nid]--;
52 }
53 return page;
54}
55
56static struct page *alloc_fresh_huge_page(void)
57{
58 static int nid = 0;
59 struct page *page;
60 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
61 HUGETLB_PAGE_ORDER);
62 nid = (nid + 1) % num_online_nodes();
63 if (page) {
64 nr_huge_pages++;
65 nr_huge_pages_node[page_to_nid(page)]++;
66 }
67 return page;
68}
69
70void free_huge_page(struct page *page)
71{
72 BUG_ON(page_count(page));
73
74 INIT_LIST_HEAD(&page->lru);
75 page[1].mapping = NULL;
76
77 spin_lock(&hugetlb_lock);
78 enqueue_huge_page(page);
79 spin_unlock(&hugetlb_lock);
80}
81
82struct page *alloc_huge_page(void)
83{
84 struct page *page;
85 int i;
86
87 spin_lock(&hugetlb_lock);
88 page = dequeue_huge_page();
89 if (!page) {
90 spin_unlock(&hugetlb_lock);
91 return NULL;
92 }
93 spin_unlock(&hugetlb_lock);
94 set_page_count(page, 1);
95 page[1].mapping = (void *)free_huge_page;
96 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
97 clear_highpage(&page[i]);
98 return page;
99}
100
101static int __init hugetlb_init(void)
102{
103 unsigned long i;
104 struct page *page;
105
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100106 if (HPAGE_SHIFT == 0)
107 return 0;
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 for (i = 0; i < MAX_NUMNODES; ++i)
110 INIT_LIST_HEAD(&hugepage_freelists[i]);
111
112 for (i = 0; i < max_huge_pages; ++i) {
113 page = alloc_fresh_huge_page();
114 if (!page)
115 break;
116 spin_lock(&hugetlb_lock);
117 enqueue_huge_page(page);
118 spin_unlock(&hugetlb_lock);
119 }
120 max_huge_pages = free_huge_pages = nr_huge_pages = i;
121 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
122 return 0;
123}
124module_init(hugetlb_init);
125
126static int __init hugetlb_setup(char *s)
127{
128 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
129 max_huge_pages = 0;
130 return 1;
131}
132__setup("hugepages=", hugetlb_setup);
133
134#ifdef CONFIG_SYSCTL
135static void update_and_free_page(struct page *page)
136{
137 int i;
138 nr_huge_pages--;
139 nr_huge_pages_node[page_zone(page)->zone_pgdat->node_id]--;
140 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
141 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
142 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
143 1 << PG_private | 1<< PG_writeback);
144 set_page_count(&page[i], 0);
145 }
146 set_page_count(page, 1);
147 __free_pages(page, HUGETLB_PAGE_ORDER);
148}
149
150#ifdef CONFIG_HIGHMEM
151static void try_to_free_low(unsigned long count)
152{
153 int i, nid;
154 for (i = 0; i < MAX_NUMNODES; ++i) {
155 struct page *page, *next;
156 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
157 if (PageHighMem(page))
158 continue;
159 list_del(&page->lru);
160 update_and_free_page(page);
161 nid = page_zone(page)->zone_pgdat->node_id;
162 free_huge_pages--;
163 free_huge_pages_node[nid]--;
164 if (count >= nr_huge_pages)
165 return;
166 }
167 }
168}
169#else
170static inline void try_to_free_low(unsigned long count)
171{
172}
173#endif
174
175static unsigned long set_max_huge_pages(unsigned long count)
176{
177 while (count > nr_huge_pages) {
178 struct page *page = alloc_fresh_huge_page();
179 if (!page)
180 return nr_huge_pages;
181 spin_lock(&hugetlb_lock);
182 enqueue_huge_page(page);
183 spin_unlock(&hugetlb_lock);
184 }
185 if (count >= nr_huge_pages)
186 return nr_huge_pages;
187
188 spin_lock(&hugetlb_lock);
189 try_to_free_low(count);
190 while (count < nr_huge_pages) {
191 struct page *page = dequeue_huge_page();
192 if (!page)
193 break;
194 update_and_free_page(page);
195 }
196 spin_unlock(&hugetlb_lock);
197 return nr_huge_pages;
198}
199
200int hugetlb_sysctl_handler(struct ctl_table *table, int write,
201 struct file *file, void __user *buffer,
202 size_t *length, loff_t *ppos)
203{
204 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
205 max_huge_pages = set_max_huge_pages(max_huge_pages);
206 return 0;
207}
208#endif /* CONFIG_SYSCTL */
209
210int hugetlb_report_meminfo(char *buf)
211{
212 return sprintf(buf,
213 "HugePages_Total: %5lu\n"
214 "HugePages_Free: %5lu\n"
215 "Hugepagesize: %5lu kB\n",
216 nr_huge_pages,
217 free_huge_pages,
218 HPAGE_SIZE/1024);
219}
220
221int hugetlb_report_node_meminfo(int nid, char *buf)
222{
223 return sprintf(buf,
224 "Node %d HugePages_Total: %5u\n"
225 "Node %d HugePages_Free: %5u\n",
226 nid, nr_huge_pages_node[nid],
227 nid, free_huge_pages_node[nid]);
228}
229
230int is_hugepage_mem_enough(size_t size)
231{
232 return (size + ~HPAGE_MASK)/HPAGE_SIZE <= free_huge_pages;
233}
234
235/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
236unsigned long hugetlb_total_pages(void)
237{
238 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
239}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241/*
242 * We cannot handle pagefaults against hugetlb pages at all. They cause
243 * handle_mm_fault() to try to instantiate regular-sized pages in the
244 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
245 * this far.
246 */
247static struct page *hugetlb_nopage(struct vm_area_struct *vma,
248 unsigned long address, int *unused)
249{
250 BUG();
251 return NULL;
252}
253
254struct vm_operations_struct hugetlb_vm_ops = {
255 .nopage = hugetlb_nopage,
256};
257
David Gibson63551ae2005-06-21 17:14:44 -0700258static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page)
259{
260 pte_t entry;
261
262 if (vma->vm_flags & VM_WRITE) {
263 entry =
264 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
265 } else {
266 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
267 }
268 entry = pte_mkyoung(entry);
269 entry = pte_mkhuge(entry);
270
271 return entry;
272}
273
274int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
275 struct vm_area_struct *vma)
276{
277 pte_t *src_pte, *dst_pte, entry;
278 struct page *ptepage;
Hugh Dickins1c598272005-10-19 21:23:43 -0700279 unsigned long addr;
David Gibson63551ae2005-06-21 17:14:44 -0700280
Hugh Dickins1c598272005-10-19 21:23:43 -0700281 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
Hugh Dickinsc74df322005-10-29 18:16:23 -0700282 src_pte = huge_pte_offset(src, addr);
283 if (!src_pte)
284 continue;
David Gibson63551ae2005-06-21 17:14:44 -0700285 dst_pte = huge_pte_alloc(dst, addr);
286 if (!dst_pte)
287 goto nomem;
Hugh Dickinsc74df322005-10-29 18:16:23 -0700288 spin_lock(&dst->page_table_lock);
Hugh Dickins1c598272005-10-19 21:23:43 -0700289 spin_lock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700290 if (!pte_none(*src_pte)) {
Hugh Dickins1c598272005-10-19 21:23:43 -0700291 entry = *src_pte;
292 ptepage = pte_page(entry);
293 get_page(ptepage);
Hugh Dickins42946212005-10-29 18:16:05 -0700294 add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
Hugh Dickins1c598272005-10-19 21:23:43 -0700295 set_huge_pte_at(dst, addr, dst_pte, entry);
296 }
297 spin_unlock(&src->page_table_lock);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700298 spin_unlock(&dst->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700299 }
300 return 0;
301
302nomem:
303 return -ENOMEM;
304}
305
306void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
307 unsigned long end)
308{
309 struct mm_struct *mm = vma->vm_mm;
310 unsigned long address;
David Gibsonc7546f82005-08-05 11:59:35 -0700311 pte_t *ptep;
David Gibson63551ae2005-06-21 17:14:44 -0700312 pte_t pte;
313 struct page *page;
314
315 WARN_ON(!is_vm_hugetlb_page(vma));
316 BUG_ON(start & ~HPAGE_MASK);
317 BUG_ON(end & ~HPAGE_MASK);
318
Hugh Dickins508034a2005-10-29 18:16:30 -0700319 spin_lock(&mm->page_table_lock);
320
Hugh Dickins365e9c872005-10-29 18:16:18 -0700321 /* Update high watermark before we lower rss */
322 update_hiwater_rss(mm);
323
David Gibson63551ae2005-06-21 17:14:44 -0700324 for (address = start; address < end; address += HPAGE_SIZE) {
David Gibsonc7546f82005-08-05 11:59:35 -0700325 ptep = huge_pte_offset(mm, address);
Adam Litke4c887262005-10-29 18:16:46 -0700326 if (!ptep)
David Gibsonc7546f82005-08-05 11:59:35 -0700327 continue;
328
329 pte = huge_ptep_get_and_clear(mm, address, ptep);
David Gibson63551ae2005-06-21 17:14:44 -0700330 if (pte_none(pte))
331 continue;
David Gibsonc7546f82005-08-05 11:59:35 -0700332
David Gibson63551ae2005-06-21 17:14:44 -0700333 page = pte_page(pte);
334 put_page(page);
Hugh Dickins42946212005-10-29 18:16:05 -0700335 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
David Gibson63551ae2005-06-21 17:14:44 -0700336 }
David Gibson63551ae2005-06-21 17:14:44 -0700337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 spin_unlock(&mm->page_table_lock);
Hugh Dickins508034a2005-10-29 18:16:30 -0700339 flush_tlb_range(vma, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340}
David Gibson63551ae2005-06-21 17:14:44 -0700341
Adam Litke4c887262005-10-29 18:16:46 -0700342static struct page *find_lock_huge_page(struct address_space *mapping,
343 unsigned long idx)
David Gibson63551ae2005-06-21 17:14:44 -0700344{
Adam Litke4c887262005-10-29 18:16:46 -0700345 struct page *page;
346 int err;
347 struct inode *inode = mapping->host;
348 unsigned long size;
David Gibson63551ae2005-06-21 17:14:44 -0700349
Adam Litke4c887262005-10-29 18:16:46 -0700350retry:
351 page = find_lock_page(mapping, idx);
352 if (page)
353 goto out;
David Gibson63551ae2005-06-21 17:14:44 -0700354
Adam Litke4c887262005-10-29 18:16:46 -0700355 /* Check to make sure the mapping hasn't been truncated */
356 size = i_size_read(inode) >> HPAGE_SHIFT;
357 if (idx >= size)
358 goto out;
David Gibson63551ae2005-06-21 17:14:44 -0700359
Adam Litke4c887262005-10-29 18:16:46 -0700360 if (hugetlb_get_quota(mapping))
361 goto out;
362 page = alloc_huge_page();
363 if (!page) {
364 hugetlb_put_quota(mapping);
365 goto out;
366 }
David Gibson63551ae2005-06-21 17:14:44 -0700367
Adam Litke4c887262005-10-29 18:16:46 -0700368 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
369 if (err) {
370 put_page(page);
371 hugetlb_put_quota(mapping);
372 if (err == -EEXIST)
373 goto retry;
374 page = NULL;
David Gibson63551ae2005-06-21 17:14:44 -0700375 }
376out:
Adam Litke4c887262005-10-29 18:16:46 -0700377 return page;
David Gibson63551ae2005-06-21 17:14:44 -0700378}
379
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100380int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
381 unsigned long address, int write_access)
382{
383 int ret = VM_FAULT_SIGBUS;
Adam Litke4c887262005-10-29 18:16:46 -0700384 unsigned long idx;
385 unsigned long size;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100386 pte_t *pte;
Adam Litke4c887262005-10-29 18:16:46 -0700387 struct page *page;
388 struct address_space *mapping;
389
390 pte = huge_pte_alloc(mm, address);
391 if (!pte)
392 goto out;
393
394 mapping = vma->vm_file->f_mapping;
395 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
396 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
397
398 /*
399 * Use page lock to guard against racing truncation
400 * before we get page_table_lock.
401 */
402 page = find_lock_huge_page(mapping, idx);
403 if (!page)
404 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100405
406 spin_lock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700407 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
408 if (idx >= size)
409 goto backout;
410
411 ret = VM_FAULT_MINOR;
412 if (!pte_none(*pte))
413 goto backout;
414
415 add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
416 set_huge_pte_at(mm, address, pte, make_huge_pte(vma, page));
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100417 spin_unlock(&mm->page_table_lock);
Adam Litke4c887262005-10-29 18:16:46 -0700418 unlock_page(page);
419out:
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100420 return ret;
Adam Litke4c887262005-10-29 18:16:46 -0700421
422backout:
423 spin_unlock(&mm->page_table_lock);
424 hugetlb_put_quota(mapping);
425 unlock_page(page);
426 put_page(page);
427 goto out;
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100428}
429
David Gibson63551ae2005-06-21 17:14:44 -0700430int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
431 struct page **pages, struct vm_area_struct **vmas,
432 unsigned long *position, int *length, int i)
433{
434 unsigned long vpfn, vaddr = *position;
435 int remainder = *length;
436
David Gibson63551ae2005-06-21 17:14:44 -0700437 vpfn = vaddr/PAGE_SIZE;
Hugh Dickins1c598272005-10-19 21:23:43 -0700438 spin_lock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700439 while (vaddr < vma->vm_end && remainder) {
Adam Litke4c887262005-10-29 18:16:46 -0700440 pte_t *pte;
441 struct page *page;
442
443 /*
444 * Some archs (sparc64, sh*) have multiple pte_ts to
445 * each hugepage. We have to make * sure we get the
446 * first, for the page indexing below to work.
447 */
448 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
449
450 if (!pte || pte_none(*pte)) {
451 int ret;
452
453 spin_unlock(&mm->page_table_lock);
454 ret = hugetlb_fault(mm, vma, vaddr, 0);
455 spin_lock(&mm->page_table_lock);
456 if (ret == VM_FAULT_MINOR)
457 continue;
458
459 remainder = 0;
460 if (!i)
461 i = -EFAULT;
462 break;
463 }
David Gibson63551ae2005-06-21 17:14:44 -0700464
465 if (pages) {
David Gibson63551ae2005-06-21 17:14:44 -0700466 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
David Gibson63551ae2005-06-21 17:14:44 -0700467 get_page(page);
468 pages[i] = page;
469 }
470
471 if (vmas)
472 vmas[i] = vma;
473
474 vaddr += PAGE_SIZE;
475 ++vpfn;
476 --remainder;
477 ++i;
478 }
Hugh Dickins1c598272005-10-19 21:23:43 -0700479 spin_unlock(&mm->page_table_lock);
David Gibson63551ae2005-06-21 17:14:44 -0700480 *length = remainder;
481 *position = vaddr;
482
483 return i;
484}