blob: d329537739c6f39bb1aa8fa0b7b21d3d4c7ed52c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * SPARC64 Huge TLB page support.
3 *
David S. Millerf6b83f02006-03-20 01:17:17 -08004 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
6
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/fs.h>
8#include <linux/mm.h>
9#include <linux/hugetlb.h>
10#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/sysctl.h>
12
13#include <asm/mman.h>
14#include <asm/pgalloc.h>
15#include <asm/tlb.h>
16#include <asm/tlbflush.h>
17#include <asm/cacheflush.h>
18#include <asm/mmu_context.h>
19
David S. Millerf6b83f02006-03-20 01:17:17 -080020/* Slightly simplified from the non-hugepage variant because by
21 * definition we don't have to worry about any page coloring stuff
22 */
David S. Millerf6b83f02006-03-20 01:17:17 -080023
24static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
25 unsigned long addr,
26 unsigned long len,
27 unsigned long pgoff,
28 unsigned long flags)
29{
David S. Millerf6b83f02006-03-20 01:17:17 -080030 unsigned long task_size = TASK_SIZE;
Michel Lespinasse2aea28b2012-12-11 16:02:25 -080031 struct vm_unmapped_area_info info;
David S. Millerf6b83f02006-03-20 01:17:17 -080032
33 if (test_thread_flag(TIF_32BIT))
34 task_size = STACK_TOP32;
David S. Millerf6b83f02006-03-20 01:17:17 -080035
Michel Lespinasse2aea28b2012-12-11 16:02:25 -080036 info.flags = 0;
37 info.length = len;
38 info.low_limit = TASK_UNMAPPED_BASE;
39 info.high_limit = min(task_size, VA_EXCLUDE_START);
40 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
41 info.align_offset = 0;
42 addr = vm_unmapped_area(&info);
43
44 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
45 VM_BUG_ON(addr != -ENOMEM);
46 info.low_limit = VA_EXCLUDE_END;
47 info.high_limit = task_size;
48 addr = vm_unmapped_area(&info);
David S. Millerf6b83f02006-03-20 01:17:17 -080049 }
50
Michel Lespinasse2aea28b2012-12-11 16:02:25 -080051 return addr;
David S. Millerf6b83f02006-03-20 01:17:17 -080052}
53
54static unsigned long
55hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
56 const unsigned long len,
57 const unsigned long pgoff,
58 const unsigned long flags)
59{
David S. Millerf6b83f02006-03-20 01:17:17 -080060 struct mm_struct *mm = current->mm;
61 unsigned long addr = addr0;
Michel Lespinasse2aea28b2012-12-11 16:02:25 -080062 struct vm_unmapped_area_info info;
David S. Millerf6b83f02006-03-20 01:17:17 -080063
64 /* This should only ever run for 32-bit processes. */
65 BUG_ON(!test_thread_flag(TIF_32BIT));
66
Michel Lespinasse2aea28b2012-12-11 16:02:25 -080067 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
68 info.length = len;
69 info.low_limit = PAGE_SIZE;
70 info.high_limit = mm->mmap_base;
71 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
72 info.align_offset = 0;
73 addr = vm_unmapped_area(&info);
David S. Millerf6b83f02006-03-20 01:17:17 -080074
David S. Millerf6b83f02006-03-20 01:17:17 -080075 /*
76 * A failed mmap() very likely causes application failure,
77 * so fall back to the bottom-up function here. This scenario
78 * can happen with large stack limits and large mmap()
79 * allocations.
80 */
Michel Lespinasse2aea28b2012-12-11 16:02:25 -080081 if (addr & ~PAGE_MASK) {
82 VM_BUG_ON(addr != -ENOMEM);
83 info.flags = 0;
84 info.low_limit = TASK_UNMAPPED_BASE;
85 info.high_limit = STACK_TOP32;
86 addr = vm_unmapped_area(&info);
87 }
David S. Millerf6b83f02006-03-20 01:17:17 -080088
89 return addr;
90}
91
92unsigned long
93hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
94 unsigned long len, unsigned long pgoff, unsigned long flags)
95{
96 struct mm_struct *mm = current->mm;
97 struct vm_area_struct *vma;
98 unsigned long task_size = TASK_SIZE;
99
100 if (test_thread_flag(TIF_32BIT))
101 task_size = STACK_TOP32;
102
103 if (len & ~HPAGE_MASK)
104 return -EINVAL;
105 if (len > task_size)
106 return -ENOMEM;
107
Benjamin Herrenschmidtac35ee42007-05-06 14:50:10 -0700108 if (flags & MAP_FIXED) {
Andi Kleena5516432008-07-23 21:27:41 -0700109 if (prepare_hugepage_range(file, addr, len))
Benjamin Herrenschmidtac35ee42007-05-06 14:50:10 -0700110 return -EINVAL;
111 return addr;
112 }
113
David S. Millerf6b83f02006-03-20 01:17:17 -0800114 if (addr) {
115 addr = ALIGN(addr, HPAGE_SIZE);
116 vma = find_vma(mm, addr);
117 if (task_size - len >= addr &&
118 (!vma || addr + len <= vma->vm_start))
119 return addr;
120 }
121 if (mm->get_unmapped_area == arch_get_unmapped_area)
122 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
123 pgoff, flags);
124 else
125 return hugetlb_get_unmapped_area_topdown(file, addr, len,
126 pgoff, flags);
127}
128
Andi Kleena5516432008-07-23 21:27:41 -0700129pte_t *huge_pte_alloc(struct mm_struct *mm,
130 unsigned long addr, unsigned long sz)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
132 pgd_t *pgd;
133 pud_t *pud;
134 pmd_t *pmd;
135 pte_t *pte = NULL;
136
David S. Miller9df1dab2006-03-31 00:36:25 -0800137 /* We must align the address, because our caller will run
138 * set_huge_pte_at() on whatever we return, which writes out
139 * all of the sub-ptes for the hugepage range. So we have
140 * to give it the first such sub-pte.
141 */
142 addr &= HPAGE_MASK;
143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 pgd = pgd_offset(mm, addr);
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800145 pud = pud_alloc(mm, pgd, addr);
146 if (pud) {
147 pmd = pmd_alloc(mm, pud, addr);
148 if (pmd)
Andrea Arcangeli8ac1f832011-01-13 15:46:43 -0800149 pte = pte_alloc_map(mm, NULL, pmd, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 }
151 return pte;
152}
153
David Gibson63551ae2005-06-21 17:14:44 -0700154pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155{
156 pgd_t *pgd;
157 pud_t *pud;
158 pmd_t *pmd;
159 pte_t *pte = NULL;
160
David S. Millerf6b83f02006-03-20 01:17:17 -0800161 addr &= HPAGE_MASK;
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 pgd = pgd_offset(mm, addr);
David S. Millerf6b83f02006-03-20 01:17:17 -0800164 if (!pgd_none(*pgd)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 pud = pud_offset(pgd, addr);
David S. Millerf6b83f02006-03-20 01:17:17 -0800166 if (!pud_none(*pud)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 pmd = pmd_offset(pud, addr);
David S. Millerf6b83f02006-03-20 01:17:17 -0800168 if (!pmd_none(*pmd))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 pte = pte_offset_map(pmd, addr);
170 }
171 }
172 return pte;
173}
174
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800175int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
176{
177 return 0;
178}
179
David Gibson63551ae2005-06-21 17:14:44 -0700180void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
181 pte_t *ptep, pte_t entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
David Gibson63551ae2005-06-21 17:14:44 -0700183 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800185 if (!pte_present(*ptep) && pte_present(entry))
186 mm->context.huge_pte_count++;
187
David S. Millerbb8236f2007-03-12 22:55:39 -0700188 addr &= HPAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
David Gibson63551ae2005-06-21 17:14:44 -0700190 set_pte_at(mm, addr, ptep, entry);
191 ptep++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 addr += PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 pte_val(entry) += PAGE_SIZE;
194 }
195}
196
David Gibson63551ae2005-06-21 17:14:44 -0700197pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
198 pte_t *ptep)
199{
200 pte_t entry;
201 int i;
202
203 entry = *ptep;
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800204 if (pte_present(entry))
205 mm->context.huge_pte_count--;
David Gibson63551ae2005-06-21 17:14:44 -0700206
David S. Millerbb8236f2007-03-12 22:55:39 -0700207 addr &= HPAGE_MASK;
208
David Gibson63551ae2005-06-21 17:14:44 -0700209 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
210 pte_clear(mm, addr, ptep);
211 addr += PAGE_SIZE;
212 ptep++;
213 }
214
215 return entry;
216}
217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218struct page *follow_huge_addr(struct mm_struct *mm,
219 unsigned long address, int write)
220{
221 return ERR_PTR(-EINVAL);
222}
223
224int pmd_huge(pmd_t pmd)
225{
226 return 0;
227}
228
Andi Kleenceb86872008-07-23 21:27:50 -0700229int pud_huge(pud_t pud)
230{
231 return 0;
232}
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
235 pmd_t *pmd, int write)
236{
237 return NULL;
238}