blob: 937bff5cdaa79a54f5a15174f1717259b79c5291 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * IA-32 Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5 */
6
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/init.h>
8#include <linux/fs.h>
9#include <linux/mm.h>
10#include <linux/hugetlb.h>
11#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/err.h>
13#include <linux/sysctl.h>
14#include <asm/mman.h>
15#include <asm/tlb.h>
16#include <asm/tlbflush.h>
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +010017#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Chen, Kenneth W39dde652006-12-06 20:32:03 -080019static unsigned long page_table_shareable(struct vm_area_struct *svma,
20 struct vm_area_struct *vma,
21 unsigned long addr, pgoff_t idx)
22{
23 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
24 svma->vm_start;
25 unsigned long sbase = saddr & PUD_MASK;
26 unsigned long s_end = sbase + PUD_SIZE;
27
Mel Gorman32b154c2009-05-28 14:34:37 -070028 /* Allow segments to share if only one is marked locked */
29 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
30 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
31
Chen, Kenneth W39dde652006-12-06 20:32:03 -080032 /*
33 * match the virtual addresses, permission and the alignment of the
34 * page table page.
35 */
36 if (pmd_index(addr) != pmd_index(saddr) ||
Mel Gorman32b154c2009-05-28 14:34:37 -070037 vm_flags != svm_flags ||
Chen, Kenneth W39dde652006-12-06 20:32:03 -080038 sbase < svma->vm_start || svma->vm_end < s_end)
39 return 0;
40
41 return saddr;
42}
43
44static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
45{
46 unsigned long base = addr & PUD_MASK;
47 unsigned long end = base + PUD_SIZE;
48
49 /*
50 * check on proper vm_flags and page table alignment
51 */
52 if (vma->vm_flags & VM_MAYSHARE &&
53 vma->vm_start <= base && end <= vma->vm_end)
54 return 1;
55 return 0;
56}
57
58/*
Michal Hockoeb48c072012-08-21 16:15:52 -070059 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
60 * and returns the corresponding pte. While this is not necessary for the
61 * !shared pmd case because we can allocate the pmd later as well, it makes the
62 * code much cleaner. pmd allocation is essential for the shared case because
63 * pud has to be populated inside the same i_mmap_mutex section - otherwise
64 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
65 * bad pmd for sharing.
Chen, Kenneth W39dde652006-12-06 20:32:03 -080066 */
Michal Hockoeb48c072012-08-21 16:15:52 -070067static pte_t *
68huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
Chen, Kenneth W39dde652006-12-06 20:32:03 -080069{
70 struct vm_area_struct *vma = find_vma(mm, addr);
71 struct address_space *mapping = vma->vm_file->f_mapping;
72 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
73 vma->vm_pgoff;
Chen, Kenneth W39dde652006-12-06 20:32:03 -080074 struct vm_area_struct *svma;
75 unsigned long saddr;
76 pte_t *spte = NULL;
Michal Hockoeb48c072012-08-21 16:15:52 -070077 pte_t *pte;
Chen, Kenneth W39dde652006-12-06 20:32:03 -080078
79 if (!vma_shareable(vma, addr))
Michal Hockoeb48c072012-08-21 16:15:52 -070080 return (pte_t *)pmd_alloc(mm, pud, addr);
Chen, Kenneth W39dde652006-12-06 20:32:03 -080081
Peter Zijlstra3d48ae42011-05-24 17:12:06 -070082 mutex_lock(&mapping->i_mmap_mutex);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -070083 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
Chen, Kenneth W39dde652006-12-06 20:32:03 -080084 if (svma == vma)
85 continue;
86
87 saddr = page_table_shareable(svma, vma, addr, idx);
88 if (saddr) {
89 spte = huge_pte_offset(svma->vm_mm, saddr);
90 if (spte) {
91 get_page(virt_to_page(spte));
92 break;
93 }
94 }
95 }
96
97 if (!spte)
98 goto out;
99
100 spin_lock(&mm->page_table_lock);
101 if (pud_none(*pud))
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +0100102 pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK));
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800103 else
104 put_page(virt_to_page(spte));
105 spin_unlock(&mm->page_table_lock);
106out:
Michal Hockoeb48c072012-08-21 16:15:52 -0700107 pte = (pte_t *)pmd_alloc(mm, pud, addr);
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700108 mutex_unlock(&mapping->i_mmap_mutex);
Michal Hockoeb48c072012-08-21 16:15:52 -0700109 return pte;
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800110}
111
112/*
113 * unmap huge page backed by shared pte.
114 *
115 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
116 * indicated by page_count > 1, unmap is achieved by clearing pud and
117 * decrementing the ref count. If count == 1, the pte page is not shared.
118 *
119 * called with vma->vm_mm->page_table_lock held.
120 *
121 * returns: 1 successfully unmapped a shared pte page
122 * 0 the underlying pte page is not shared, or it is the last user
123 */
124int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
125{
126 pgd_t *pgd = pgd_offset(mm, *addr);
127 pud_t *pud = pud_offset(pgd, *addr);
128
129 BUG_ON(page_count(virt_to_page(ptep)) == 0);
130 if (page_count(virt_to_page(ptep)) == 1)
131 return 0;
132
133 pud_clear(pud);
134 put_page(virt_to_page(ptep));
135 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
136 return 1;
137}
138
Andi Kleena5516432008-07-23 21:27:41 -0700139pte_t *huge_pte_alloc(struct mm_struct *mm,
140 unsigned long addr, unsigned long sz)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
142 pgd_t *pgd;
143 pud_t *pud;
Adam Litke7bf07f32005-09-03 15:55:00 -0700144 pte_t *pte = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146 pgd = pgd_offset(mm, addr);
147 pud = pud_alloc(mm, pgd, addr);
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800148 if (pud) {
Andi Kleen39c11e62008-07-23 21:27:50 -0700149 if (sz == PUD_SIZE) {
150 pte = (pte_t *)pud;
151 } else {
152 BUG_ON(sz != PMD_SIZE);
153 if (pud_none(*pud))
Michal Hockoeb48c072012-08-21 16:15:52 -0700154 pte = huge_pmd_share(mm, addr, pud);
155 else
156 pte = (pte_t *)pmd_alloc(mm, pud, addr);
Andi Kleen39c11e62008-07-23 21:27:50 -0700157 }
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800158 }
Chen, Kenneth W0e5c9f32005-09-03 15:55:02 -0700159 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
Adam Litke7bf07f32005-09-03 15:55:00 -0700160
Adam Litke7bf07f32005-09-03 15:55:00 -0700161 return pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162}
163
David Gibson63551ae2005-06-21 17:14:44 -0700164pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
166 pgd_t *pgd;
167 pud_t *pud;
168 pmd_t *pmd = NULL;
169
170 pgd = pgd_offset(mm, addr);
Adam Litke02b0cce2005-09-03 15:55:01 -0700171 if (pgd_present(*pgd)) {
172 pud = pud_offset(pgd, addr);
Andi Kleen39c11e62008-07-23 21:27:50 -0700173 if (pud_present(*pud)) {
174 if (pud_large(*pud))
175 return (pte_t *)pud;
Adam Litke02b0cce2005-09-03 15:55:01 -0700176 pmd = pmd_offset(pud, addr);
Andi Kleen39c11e62008-07-23 21:27:50 -0700177 }
Adam Litke02b0cce2005-09-03 15:55:01 -0700178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 return (pte_t *) pmd;
180}
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182#if 0 /* This is just for testing */
183struct page *
184follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
185{
186 unsigned long start = address;
187 int length = 1;
188 int nr;
189 struct page *page;
190 struct vm_area_struct *vma;
191
192 vma = find_vma(mm, addr);
193 if (!vma || !is_vm_hugetlb_page(vma))
194 return ERR_PTR(-EINVAL);
195
196 pte = huge_pte_offset(mm, address);
197
198 /* hugetlb should be locked, and hence, prefaulted */
199 WARN_ON(!pte || pte_none(*pte));
200
201 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
202
Christoph Lameter25e59882008-03-26 21:03:04 -0700203 WARN_ON(!PageHead(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
205 return page;
206}
207
208int pmd_huge(pmd_t pmd)
209{
210 return 0;
211}
212
Andi Kleenceb86872008-07-23 21:27:50 -0700213int pud_huge(pud_t pud)
214{
215 return 0;
216}
217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218struct page *
219follow_huge_pmd(struct mm_struct *mm, unsigned long address,
220 pmd_t *pmd, int write)
221{
222 return NULL;
223}
224
225#else
226
227struct page *
228follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
229{
230 return ERR_PTR(-EINVAL);
231}
232
233int pmd_huge(pmd_t pmd)
234{
235 return !!(pmd_val(pmd) & _PAGE_PSE);
236}
237
Andi Kleenceb86872008-07-23 21:27:50 -0700238int pud_huge(pud_t pud)
239{
Andi Kleen39c11e62008-07-23 21:27:50 -0700240 return !!(pud_val(pud) & _PAGE_PSE);
Andi Kleenceb86872008-07-23 21:27:50 -0700241}
242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243struct page *
244follow_huge_pmd(struct mm_struct *mm, unsigned long address,
245 pmd_t *pmd, int write)
246{
247 struct page *page;
248
249 page = pte_page(*(pte_t *)pmd);
250 if (page)
Andi Kleenceb86872008-07-23 21:27:50 -0700251 page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 return page;
253}
Andi Kleenceb86872008-07-23 21:27:50 -0700254
255struct page *
256follow_huge_pud(struct mm_struct *mm, unsigned long address,
257 pud_t *pud, int write)
258{
259 struct page *page;
260
261 page = pte_page(*(pte_t *)pud);
262 if (page)
263 page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
264 return page;
265}
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267#endif
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269/* x86_64 also uses this file */
270
271#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
272static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
273 unsigned long addr, unsigned long len,
274 unsigned long pgoff, unsigned long flags)
275{
Andi Kleen39c11e62008-07-23 21:27:50 -0700276 struct hstate *h = hstate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 struct mm_struct *mm = current->mm;
278 struct vm_area_struct *vma;
279 unsigned long start_addr;
280
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700281 if (len > mm->cached_hole_size) {
282 start_addr = mm->free_area_cache;
283 } else {
284 start_addr = TASK_UNMAPPED_BASE;
285 mm->cached_hole_size = 0;
286 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
288full_search:
Andi Kleen39c11e62008-07-23 21:27:50 -0700289 addr = ALIGN(start_addr, huge_page_size(h));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
291 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
292 /* At this point: (!vma || addr < vma->vm_end). */
293 if (TASK_SIZE - len < addr) {
294 /*
295 * Start a new search - just in case we missed
296 * some holes.
297 */
298 if (start_addr != TASK_UNMAPPED_BASE) {
299 start_addr = TASK_UNMAPPED_BASE;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700300 mm->cached_hole_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 goto full_search;
302 }
303 return -ENOMEM;
304 }
305 if (!vma || addr + len <= vma->vm_start) {
306 mm->free_area_cache = addr + len;
307 return addr;
308 }
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700309 if (addr + mm->cached_hole_size < vma->vm_start)
310 mm->cached_hole_size = vma->vm_start - addr;
Andi Kleen39c11e62008-07-23 21:27:50 -0700311 addr = ALIGN(vma->vm_end, huge_page_size(h));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 }
313}
314
315static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
316 unsigned long addr0, unsigned long len,
317 unsigned long pgoff, unsigned long flags)
318{
Andi Kleen39c11e62008-07-23 21:27:50 -0700319 struct hstate *h = hstate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 struct mm_struct *mm = current->mm;
Xiao Guangrongb69add22012-03-21 16:34:14 -0700321 struct vm_area_struct *vma;
Xiao Guangrongcbde83e2012-03-21 16:33:55 -0700322 unsigned long base = mm->mmap_base;
323 unsigned long addr = addr0;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700324 unsigned long largest_hole = mm->cached_hole_size;
Xiao Guangrongcbde83e2012-03-21 16:33:55 -0700325 unsigned long start_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
327 /* don't allow allocations above current base */
328 if (mm->free_area_cache > base)
329 mm->free_area_cache = base;
330
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700331 if (len <= largest_hole) {
332 largest_hole = 0;
333 mm->free_area_cache = base;
334 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335try_again:
Xiao Guangrongcbde83e2012-03-21 16:33:55 -0700336 start_addr = mm->free_area_cache;
337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 /* make sure it can fit in the remaining address space */
339 if (mm->free_area_cache < len)
340 goto fail;
341
Lucas De Marchi0d2eb442011-03-17 16:24:16 -0300342 /* either no address requested or can't fit in requested address hole */
Andi Kleen39c11e62008-07-23 21:27:50 -0700343 addr = (mm->free_area_cache - len) & huge_page_mask(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 do {
345 /*
346 * Lookup failure means no vma is above this address,
347 * i.e. return with success:
348 */
Linus Torvalds55062d02012-03-06 18:48:13 -0800349 vma = find_vma(mm, addr);
Linus Torvalds097d5912012-03-06 18:23:36 -0800350 if (!vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 return addr;
352
Xiao Guangrongb69add22012-03-21 16:34:14 -0700353 if (addr + len <= vma->vm_start) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 /* remember the address as a hint for next time */
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700355 mm->cached_hole_size = largest_hole;
356 return (mm->free_area_cache = addr);
Xiao Guangrongb69add22012-03-21 16:34:14 -0700357 } else if (mm->free_area_cache == vma->vm_end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 /* pull free_area_cache down to the first hole */
Xiao Guangrongb69add22012-03-21 16:34:14 -0700359 mm->free_area_cache = vma->vm_start;
360 mm->cached_hole_size = largest_hole;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700361 }
362
363 /* remember the largest hole we saw so far */
364 if (addr + largest_hole < vma->vm_start)
365 largest_hole = vma->vm_start - addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
367 /* try just below the current vma->vm_start */
Andi Kleen39c11e62008-07-23 21:27:50 -0700368 addr = (vma->vm_start - len) & huge_page_mask(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 } while (len <= vma->vm_start);
370
371fail:
372 /*
373 * if hint left us with no space for the requested
374 * mapping then try again:
375 */
Xiao Guangrongcbde83e2012-03-21 16:33:55 -0700376 if (start_addr != base) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 mm->free_area_cache = base;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700378 largest_hole = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 goto try_again;
380 }
381 /*
382 * A failed mmap() very likely causes application failure,
383 * so fall back to the bottom-up function here. This scenario
384 * can happen with large stack limits and large mmap()
385 * allocations.
386 */
387 mm->free_area_cache = TASK_UNMAPPED_BASE;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700388 mm->cached_hole_size = ~0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
390 len, pgoff, flags);
391
392 /*
393 * Restore the topdown base:
394 */
395 mm->free_area_cache = base;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700396 mm->cached_hole_size = ~0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
398 return addr;
399}
400
401unsigned long
402hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
403 unsigned long len, unsigned long pgoff, unsigned long flags)
404{
Andi Kleen39c11e62008-07-23 21:27:50 -0700405 struct hstate *h = hstate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 struct mm_struct *mm = current->mm;
407 struct vm_area_struct *vma;
408
Andi Kleen39c11e62008-07-23 21:27:50 -0700409 if (len & ~huge_page_mask(h))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 return -EINVAL;
411 if (len > TASK_SIZE)
412 return -ENOMEM;
413
Benjamin Herrenschmidt5a8130f2007-05-06 14:50:08 -0700414 if (flags & MAP_FIXED) {
Andi Kleena5516432008-07-23 21:27:41 -0700415 if (prepare_hugepage_range(file, addr, len))
Benjamin Herrenschmidt5a8130f2007-05-06 14:50:08 -0700416 return -EINVAL;
417 return addr;
418 }
419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 if (addr) {
Andi Kleen39c11e62008-07-23 21:27:50 -0700421 addr = ALIGN(addr, huge_page_size(h));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 vma = find_vma(mm, addr);
423 if (TASK_SIZE - len >= addr &&
424 (!vma || addr + len <= vma->vm_start))
425 return addr;
426 }
427 if (mm->get_unmapped_area == arch_get_unmapped_area)
428 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
429 pgoff, flags);
430 else
431 return hugetlb_get_unmapped_area_topdown(file, addr, len,
432 pgoff, flags);
433}
434
435#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
436
Andi Kleenb4718e62008-07-23 21:27:51 -0700437#ifdef CONFIG_X86_64
438static __init int setup_hugepagesz(char *opt)
439{
440 unsigned long ps = memparse(opt, &opt);
441 if (ps == PMD_SIZE) {
442 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
443 } else if (ps == PUD_SIZE && cpu_has_gbpages) {
444 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
445 } else {
446 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
447 ps >> 20);
448 return 0;
449 }
450 return 1;
451}
452__setup("hugepagesz=", setup_hugepagesz);
453#endif