blob: bca0aa3a003f6b46620eca395935a918c569088a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * IA-32 Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5 */
6
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/init.h>
8#include <linux/fs.h>
9#include <linux/mm.h>
10#include <linux/hugetlb.h>
11#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/err.h>
13#include <linux/sysctl.h>
14#include <asm/mman.h>
15#include <asm/tlb.h>
16#include <asm/tlbflush.h>
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +010017#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#if 0 /* This is just for testing */
20struct page *
21follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
22{
23 unsigned long start = address;
24 int length = 1;
25 int nr;
26 struct page *page;
27 struct vm_area_struct *vma;
28
29 vma = find_vma(mm, addr);
30 if (!vma || !is_vm_hugetlb_page(vma))
31 return ERR_PTR(-EINVAL);
32
33 pte = huge_pte_offset(mm, address);
34
35 /* hugetlb should be locked, and hence, prefaulted */
36 WARN_ON(!pte || pte_none(*pte));
37
38 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
39
Christoph Lameter25e59882008-03-26 21:03:04 -070040 WARN_ON(!PageHead(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42 return page;
43}
44
45int pmd_huge(pmd_t pmd)
46{
47 return 0;
48}
49
Andi Kleenceb86872008-07-23 21:27:50 -070050int pud_huge(pud_t pud)
51{
52 return 0;
53}
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055struct page *
56follow_huge_pmd(struct mm_struct *mm, unsigned long address,
57 pmd_t *pmd, int write)
58{
59 return NULL;
60}
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#else
62
63struct page *
64follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
65{
66 return ERR_PTR(-EINVAL);
67}
68
69int pmd_huge(pmd_t pmd)
70{
71 return !!(pmd_val(pmd) & _PAGE_PSE);
72}
73
Andi Kleenceb86872008-07-23 21:27:50 -070074int pud_huge(pud_t pud)
75{
Andi Kleen39c11e62008-07-23 21:27:50 -070076 return !!(pud_val(pud) & _PAGE_PSE);
Andi Kleenceb86872008-07-23 21:27:50 -070077}
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#endif
79
Kirill A. Shutemovfd8526a2013-11-19 15:17:50 +020080#ifdef CONFIG_HUGETLB_PAGE
Linus Torvalds1da177e2005-04-16 15:20:36 -070081static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
82 unsigned long addr, unsigned long len,
83 unsigned long pgoff, unsigned long flags)
84{
Andi Kleen39c11e62008-07-23 21:27:50 -070085 struct hstate *h = hstate_file(file);
Michel Lespinassecdc17342012-12-11 16:02:02 -080086 struct vm_unmapped_area_info info;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Michel Lespinassecdc17342012-12-11 16:02:02 -080088 info.flags = 0;
89 info.length = len;
Kirill A. Shutemovfd8526a2013-11-19 15:17:50 +020090 info.low_limit = current->mm->mmap_legacy_base;
Michel Lespinassecdc17342012-12-11 16:02:02 -080091 info.high_limit = TASK_SIZE;
92 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
93 info.align_offset = 0;
94 return vm_unmapped_area(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
96
97static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
98 unsigned long addr0, unsigned long len,
99 unsigned long pgoff, unsigned long flags)
100{
Andi Kleen39c11e62008-07-23 21:27:50 -0700101 struct hstate *h = hstate_file(file);
Michel Lespinassecdc17342012-12-11 16:02:02 -0800102 struct vm_unmapped_area_info info;
103 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
Michel Lespinassecdc17342012-12-11 16:02:02 -0800105 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
106 info.length = len;
107 info.low_limit = PAGE_SIZE;
108 info.high_limit = current->mm->mmap_base;
109 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
110 info.align_offset = 0;
111 addr = vm_unmapped_area(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 /*
114 * A failed mmap() very likely causes application failure,
115 * so fall back to the bottom-up function here. This scenario
116 * can happen with large stack limits and large mmap()
117 * allocations.
118 */
Michel Lespinassecdc17342012-12-11 16:02:02 -0800119 if (addr & ~PAGE_MASK) {
120 VM_BUG_ON(addr != -ENOMEM);
121 info.flags = 0;
122 info.low_limit = TASK_UNMAPPED_BASE;
123 info.high_limit = TASK_SIZE;
124 addr = vm_unmapped_area(&info);
125 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127 return addr;
128}
129
130unsigned long
131hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
132 unsigned long len, unsigned long pgoff, unsigned long flags)
133{
Andi Kleen39c11e62008-07-23 21:27:50 -0700134 struct hstate *h = hstate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 struct mm_struct *mm = current->mm;
136 struct vm_area_struct *vma;
137
Andi Kleen39c11e62008-07-23 21:27:50 -0700138 if (len & ~huge_page_mask(h))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 return -EINVAL;
140 if (len > TASK_SIZE)
141 return -ENOMEM;
142
Benjamin Herrenschmidt5a8130f2007-05-06 14:50:08 -0700143 if (flags & MAP_FIXED) {
Andi Kleena5516432008-07-23 21:27:41 -0700144 if (prepare_hugepage_range(file, addr, len))
Benjamin Herrenschmidt5a8130f2007-05-06 14:50:08 -0700145 return -EINVAL;
146 return addr;
147 }
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 if (addr) {
Andi Kleen39c11e62008-07-23 21:27:50 -0700150 addr = ALIGN(addr, huge_page_size(h));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 vma = find_vma(mm, addr);
152 if (TASK_SIZE - len >= addr &&
153 (!vma || addr + len <= vma->vm_start))
154 return addr;
155 }
156 if (mm->get_unmapped_area == arch_get_unmapped_area)
157 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
158 pgoff, flags);
159 else
160 return hugetlb_get_unmapped_area_topdown(file, addr, len,
161 pgoff, flags);
162}
Kirill A. Shutemovfd8526a2013-11-19 15:17:50 +0200163#endif /* CONFIG_HUGETLB_PAGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
Andi Kleenb4718e62008-07-23 21:27:51 -0700165#ifdef CONFIG_X86_64
166static __init int setup_hugepagesz(char *opt)
167{
168 unsigned long ps = memparse(opt, &opt);
169 if (ps == PMD_SIZE) {
170 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
171 } else if (ps == PUD_SIZE && cpu_has_gbpages) {
172 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
173 } else {
174 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
175 ps >> 20);
176 return 0;
177 }
178 return 1;
179}
180__setup("hugepagesz=", setup_hugepagesz);
Kirill A. Shutemovece84b32015-02-10 14:08:19 -0800181
182#ifdef CONFIG_CMA
183static __init int gigantic_pages_init(void)
184{
185 /* With CMA we can allocate gigantic pages at runtime */
186 if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
187 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
188 return 0;
189}
190arch_initcall(gigantic_pages_init);
191#endif
Andi Kleenb4718e62008-07-23 21:27:51 -0700192#endif