blob: da5eb388570210ca9aa70ba0de41444659c49d02 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 *
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 */
9
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mm.h>
David Gibson883a3e52009-10-26 19:24:31 +000011#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/hugetlb.h>
David Gibson883a3e52009-10-26 19:24:31 +000014#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/pgalloc.h>
16#include <asm/tlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Jon Tollefson91224342008-07-23 21:27:55 -070018#define PAGE_SHIFT_64K 16
19#define PAGE_SHIFT_16M 24
20#define PAGE_SHIFT_16G 34
Jon Tollefson4ec161c2008-01-04 09:59:50 +110021
Jon Tollefsonec4b2c02008-07-23 21:27:53 -070022#define MAX_NUMBER_GPAGES 1024
23
24/* Tracks the 16G pages after the device tree is scanned and before the
25 * huge_boot_pages list is ready. */
26static unsigned long gpage_freearray[MAX_NUMBER_GPAGES];
27static unsigned nr_gpages;
David Gibsonc594ada2005-08-11 16:55:21 +100028
David Gibsonf10a04c2006-04-28 15:02:51 +100029/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
30 * will choke on pointers to hugepte tables, which is handy for
31 * catching screwups early. */
David Gibsonf10a04c2006-04-28 15:02:51 +100032
Jon Tollefson0d9ea752008-07-23 21:27:56 -070033static inline int shift_to_mmu_psize(unsigned int shift)
34{
David Gibsond1837cb2009-10-26 19:24:31 +000035 int psize;
36
37 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
38 if (mmu_psize_defs[psize].shift == shift)
39 return psize;
Jon Tollefson0d9ea752008-07-23 21:27:56 -070040 return -1;
41}
42
43static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
44{
45 if (mmu_psize_defs[mmu_psize].shift)
46 return mmu_psize_defs[mmu_psize].shift;
47 BUG();
48}
49
David Gibsona4fe3ce2009-10-26 19:24:31 +000050#define hugepd_none(hpd) ((hpd).pd == 0)
51
David Gibsonf10a04c2006-04-28 15:02:51 +100052static inline pte_t *hugepd_page(hugepd_t hpd)
53{
David Gibsona4fe3ce2009-10-26 19:24:31 +000054 BUG_ON(!hugepd_ok(hpd));
55 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | 0xc000000000000000);
David Gibsonf10a04c2006-04-28 15:02:51 +100056}
57
David Gibsona4fe3ce2009-10-26 19:24:31 +000058static inline unsigned int hugepd_shift(hugepd_t hpd)
David Gibsonf10a04c2006-04-28 15:02:51 +100059{
David Gibsona4fe3ce2009-10-26 19:24:31 +000060 return hpd.pd & HUGEPD_SHIFT_MASK;
61}
62
63static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, unsigned pdshift)
64{
65 unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
David Gibsonf10a04c2006-04-28 15:02:51 +100066 pte_t *dir = hugepd_page(*hpdp);
67
68 return dir + idx;
69}
70
David Gibsona4fe3ce2009-10-26 19:24:31 +000071pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
David Gibsonf10a04c2006-04-28 15:02:51 +100072{
David Gibsona4fe3ce2009-10-26 19:24:31 +000073 pgd_t *pg;
74 pud_t *pu;
75 pmd_t *pm;
76 hugepd_t *hpdp = NULL;
77 unsigned pdshift = PGDIR_SHIFT;
78
79 if (shift)
80 *shift = 0;
81
82 pg = pgdir + pgd_index(ea);
83 if (is_hugepd(pg)) {
84 hpdp = (hugepd_t *)pg;
85 } else if (!pgd_none(*pg)) {
86 pdshift = PUD_SHIFT;
87 pu = pud_offset(pg, ea);
88 if (is_hugepd(pu))
89 hpdp = (hugepd_t *)pu;
90 else if (!pud_none(*pu)) {
91 pdshift = PMD_SHIFT;
92 pm = pmd_offset(pu, ea);
93 if (is_hugepd(pm))
94 hpdp = (hugepd_t *)pm;
95 else if (!pmd_none(*pm)) {
96 return pte_offset_map(pm, ea);
97 }
98 }
99 }
100
101 if (!hpdp)
102 return NULL;
103
104 if (shift)
105 *shift = hugepd_shift(*hpdp);
106 return hugepte_offset(hpdp, ea, pdshift);
107}
108
109pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
110{
111 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
112}
113
114static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
115 unsigned long address, unsigned pdshift, unsigned pshift)
116{
117 pte_t *new = kmem_cache_zalloc(PGT_CACHE(pdshift - pshift),
David Gibsona0668cd2009-10-28 16:27:18 +0000118 GFP_KERNEL|__GFP_REPEAT);
David Gibsonf10a04c2006-04-28 15:02:51 +1000119
David Gibsona4fe3ce2009-10-26 19:24:31 +0000120 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
121 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
122
David Gibsonf10a04c2006-04-28 15:02:51 +1000123 if (! new)
124 return -ENOMEM;
125
126 spin_lock(&mm->page_table_lock);
127 if (!hugepd_none(*hpdp))
David Gibsona4fe3ce2009-10-26 19:24:31 +0000128 kmem_cache_free(PGT_CACHE(pdshift - pshift), new);
David Gibsonf10a04c2006-04-28 15:02:51 +1000129 else
David Gibsona4fe3ce2009-10-26 19:24:31 +0000130 hpdp->pd = ((unsigned long)new & ~0x8000000000000000) | pshift;
David Gibsonf10a04c2006-04-28 15:02:51 +1000131 spin_unlock(&mm->page_table_lock);
132 return 0;
133}
134
David Gibsona4fe3ce2009-10-26 19:24:31 +0000135pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
136{
137 pgd_t *pg;
138 pud_t *pu;
139 pmd_t *pm;
140 hugepd_t *hpdp = NULL;
141 unsigned pshift = __ffs(sz);
142 unsigned pdshift = PGDIR_SHIFT;
David Gibson0b264252008-09-05 11:49:54 +1000143
David Gibsona4fe3ce2009-10-26 19:24:31 +0000144 addr &= ~(sz-1);
145
146 pg = pgd_offset(mm, addr);
147 if (pshift >= PUD_SHIFT) {
148 hpdp = (hugepd_t *)pg;
149 } else {
150 pdshift = PUD_SHIFT;
151 pu = pud_alloc(mm, pg, addr);
152 if (pshift >= PMD_SHIFT) {
153 hpdp = (hugepd_t *)pu;
154 } else {
155 pdshift = PMD_SHIFT;
156 pm = pmd_alloc(mm, pu, addr);
157 hpdp = (hugepd_t *)pm;
158 }
159 }
160
161 if (!hpdp)
162 return NULL;
163
164 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
165
166 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
167 return NULL;
168
169 return hugepte_offset(hpdp, addr, pdshift);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100170}
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100171
Jon Tollefson658013e2008-07-23 21:27:54 -0700172/* Build list of addresses of gigantic pages. This function is used in early
173 * boot before the buddy or bootmem allocator is setup.
174 */
175void add_gpage(unsigned long addr, unsigned long page_size,
176 unsigned long number_of_pages)
177{
178 if (!addr)
179 return;
180 while (number_of_pages > 0) {
181 gpage_freearray[nr_gpages] = addr;
182 nr_gpages++;
183 number_of_pages--;
184 addr += page_size;
185 }
186}
187
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700188/* Moves the gigantic page addresses from the temporary list to the
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700189 * huge_boot_pages list.
190 */
191int alloc_bootmem_huge_page(struct hstate *hstate)
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700192{
193 struct huge_bootmem_page *m;
194 if (nr_gpages == 0)
195 return 0;
196 m = phys_to_virt(gpage_freearray[--nr_gpages]);
197 gpage_freearray[nr_gpages] = 0;
198 list_add(&m->list, &huge_boot_pages);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700199 m->hstate = hstate;
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700200 return 1;
201}
202
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800203int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
204{
205 return 0;
206}
207
David Gibsona4fe3ce2009-10-26 19:24:31 +0000208static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
209 unsigned long start, unsigned long end,
210 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000211{
212 pte_t *hugepte = hugepd_page(*hpdp);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000213 unsigned shift = hugepd_shift(*hpdp);
214 unsigned long pdmask = ~((1UL << pdshift) - 1);
215
216 start &= pdmask;
217 if (start < floor)
218 return;
219 if (ceiling) {
220 ceiling &= pdmask;
221 if (! ceiling)
222 return;
223 }
224 if (end - 1 > ceiling - 1)
225 return;
David Gibsonf10a04c2006-04-28 15:02:51 +1000226
227 hpdp->pd = 0;
228 tlb->need_flush = 1;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000229 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
David Gibsonf10a04c2006-04-28 15:02:51 +1000230}
231
David Gibsonf10a04c2006-04-28 15:02:51 +1000232static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
233 unsigned long addr, unsigned long end,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000234 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000235{
236 pmd_t *pmd;
237 unsigned long next;
238 unsigned long start;
239
240 start = addr;
241 pmd = pmd_offset(pud, addr);
242 do {
243 next = pmd_addr_end(addr, end);
244 if (pmd_none(*pmd))
245 continue;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000246 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
247 addr, next, floor, ceiling);
David Gibsonf10a04c2006-04-28 15:02:51 +1000248 } while (pmd++, addr = next, addr != end);
249
250 start &= PUD_MASK;
251 if (start < floor)
252 return;
253 if (ceiling) {
254 ceiling &= PUD_MASK;
255 if (!ceiling)
256 return;
257 }
258 if (end - 1 > ceiling - 1)
259 return;
260
261 pmd = pmd_offset(pud, start);
262 pud_clear(pud);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000263 pmd_free_tlb(tlb, pmd, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000264}
David Gibsonf10a04c2006-04-28 15:02:51 +1000265
266static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
267 unsigned long addr, unsigned long end,
268 unsigned long floor, unsigned long ceiling)
269{
270 pud_t *pud;
271 unsigned long next;
272 unsigned long start;
273
274 start = addr;
275 pud = pud_offset(pgd, addr);
276 do {
277 next = pud_addr_end(addr, end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000278 if (!is_hugepd(pud)) {
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100279 if (pud_none_or_clear_bad(pud))
280 continue;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700281 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000282 ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100283 } else {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000284 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
285 addr, next, floor, ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100286 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000287 } while (pud++, addr = next, addr != end);
288
289 start &= PGDIR_MASK;
290 if (start < floor)
291 return;
292 if (ceiling) {
293 ceiling &= PGDIR_MASK;
294 if (!ceiling)
295 return;
296 }
297 if (end - 1 > ceiling - 1)
298 return;
299
300 pud = pud_offset(pgd, start);
301 pgd_clear(pgd);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000302 pud_free_tlb(tlb, pud, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000303}
304
305/*
306 * This function frees user-level page tables of a process.
307 *
308 * Must be called with pagetable lock held.
309 */
Jan Beulich42b77722008-07-23 21:27:10 -0700310void hugetlb_free_pgd_range(struct mmu_gather *tlb,
David Gibsonf10a04c2006-04-28 15:02:51 +1000311 unsigned long addr, unsigned long end,
312 unsigned long floor, unsigned long ceiling)
313{
314 pgd_t *pgd;
315 unsigned long next;
David Gibsonf10a04c2006-04-28 15:02:51 +1000316
317 /*
David Gibsona4fe3ce2009-10-26 19:24:31 +0000318 * Because there are a number of different possible pagetable
319 * layouts for hugepage ranges, we limit knowledge of how
320 * things should be laid out to the allocation path
321 * (huge_pte_alloc(), above). Everything else works out the
322 * structure as it goes from information in the hugepd
323 * pointers. That means that we can't here use the
324 * optimization used in the normal page free_pgd_range(), of
325 * checking whether we're actually covering a large enough
326 * range to have to do anything at the top level of the walk
327 * instead of at the bottom.
David Gibsonf10a04c2006-04-28 15:02:51 +1000328 *
David Gibsona4fe3ce2009-10-26 19:24:31 +0000329 * To make sense of this, you should probably go read the big
330 * block comment at the top of the normal free_pgd_range(),
331 * too.
David Gibsonf10a04c2006-04-28 15:02:51 +1000332 */
333
Jan Beulich42b77722008-07-23 21:27:10 -0700334 pgd = pgd_offset(tlb->mm, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000335 do {
David Gibsonf10a04c2006-04-28 15:02:51 +1000336 next = pgd_addr_end(addr, end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000337 if (!is_hugepd(pgd)) {
David Gibson0b264252008-09-05 11:49:54 +1000338 if (pgd_none_or_clear_bad(pgd))
339 continue;
340 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
341 } else {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000342 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
343 addr, next, floor, ceiling);
David Gibson0b264252008-09-05 11:49:54 +1000344 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000345 } while (pgd++, addr = next, addr != end);
David Gibsone28f7fa2005-08-05 19:39:06 +1000346}
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348struct page *
349follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
350{
351 pte_t *ptep;
352 struct page *page;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000353 unsigned shift;
354 unsigned long mask;
355
356 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700358 /* Verify it is a huge page else bail. */
David Gibsona4fe3ce2009-10-26 19:24:31 +0000359 if (!ptep || !shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 return ERR_PTR(-EINVAL);
361
David Gibsona4fe3ce2009-10-26 19:24:31 +0000362 mask = (1UL << shift) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 page = pte_page(*ptep);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000364 if (page)
365 page += (address & mask) / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
367 return page;
368}
369
370int pmd_huge(pmd_t pmd)
371{
372 return 0;
373}
374
Andi Kleenceb86872008-07-23 21:27:50 -0700375int pud_huge(pud_t pud)
376{
377 return 0;
378}
379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380struct page *
381follow_huge_pmd(struct mm_struct *mm, unsigned long address,
382 pmd_t *pmd, int write)
383{
384 BUG();
385 return NULL;
386}
387
David Gibsona4fe3ce2009-10-26 19:24:31 +0000388static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
389 unsigned long end, int write, struct page **pages, int *nr)
390{
391 unsigned long mask;
392 unsigned long pte_end;
Andrea Arcangeli35267412011-11-02 13:37:15 -0700393 struct page *head, *page, *tail;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000394 pte_t pte;
395 int refs;
396
397 pte_end = (addr + sz) & ~(sz-1);
398 if (pte_end < end)
399 end = pte_end;
400
401 pte = *ptep;
402 mask = _PAGE_PRESENT | _PAGE_USER;
403 if (write)
404 mask |= _PAGE_RW;
405
406 if ((pte_val(pte) & mask) != mask)
407 return 0;
408
409 /* hugepages are never "special" */
410 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
411
412 refs = 0;
413 head = pte_page(pte);
414
415 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
Andrea Arcangeli35267412011-11-02 13:37:15 -0700416 tail = page;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000417 do {
418 VM_BUG_ON(compound_head(page) != head);
419 pages[*nr] = page;
420 (*nr)++;
421 page++;
422 refs++;
423 } while (addr += PAGE_SIZE, addr != end);
424
425 if (!page_cache_add_speculative(head, refs)) {
426 *nr -= refs;
427 return 0;
428 }
429
430 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
431 /* Could be optimized better */
Andrea Arcangeli85964682011-11-02 13:37:11 -0700432 *nr -= refs;
433 while (refs--)
Andrea Arcangeli405e44f2011-11-02 13:37:08 -0700434 put_page(head);
Andrea Arcangelicf592bf2011-11-02 13:37:19 -0700435 return 0;
436 }
437
438 /*
439 * Any tail page need their mapcount reference taken before we
440 * return.
441 */
442 while (refs--) {
443 if (PageTail(tail))
444 get_huge_page_tail(tail);
445 tail++;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000446 }
447
448 return 1;
449}
450
David Gibson39adfa52009-11-23 20:03:40 +0000451static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
452 unsigned long sz)
453{
454 unsigned long __boundary = (addr + sz) & ~(sz-1);
455 return (__boundary - 1 < end - 1) ? __boundary : end;
456}
457
David Gibsona4fe3ce2009-10-26 19:24:31 +0000458int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
459 unsigned long addr, unsigned long end,
460 int write, struct page **pages, int *nr)
461{
462 pte_t *ptep;
463 unsigned long sz = 1UL << hugepd_shift(*hugepd);
David Gibson39adfa52009-11-23 20:03:40 +0000464 unsigned long next;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000465
466 ptep = hugepte_offset(hugepd, addr, pdshift);
467 do {
David Gibson39adfa52009-11-23 20:03:40 +0000468 next = hugepte_addr_end(addr, end, sz);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000469 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
470 return 0;
David Gibson39adfa52009-11-23 20:03:40 +0000471 } while (ptep++, addr = next, addr != end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000472
473 return 1;
474}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
476unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
477 unsigned long len, unsigned long pgoff,
478 unsigned long flags)
479{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700480 struct hstate *hstate = hstate_file(file);
481 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
Brian King48f797d2008-12-04 04:07:54 +0000482
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700483 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484}
485
Mel Gorman33402892009-01-06 14:38:54 -0800486unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
487{
488 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
489
490 return 1UL << mmu_psize_to_shift(psize);
491}
492
David Gibsond1837cb2009-10-26 19:24:31 +0000493static int __init add_huge_page_size(unsigned long long size)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100494{
David Gibsond1837cb2009-10-26 19:24:31 +0000495 int shift = __ffs(size);
496 int mmu_psize;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000497
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100498 /* Check that it is a page size supported by the hardware and
David Gibsond1837cb2009-10-26 19:24:31 +0000499 * that it fits within pagetable and slice limits. */
500 if (!is_power_of_2(size)
501 || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
502 return -EINVAL;
Jon Tollefson91224342008-07-23 21:27:55 -0700503
David Gibsond1837cb2009-10-26 19:24:31 +0000504 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
505 return -EINVAL;
506
507#ifdef CONFIG_SPU_FS_64K_LS
508 /* Disable support for 64K huge pages when 64K SPU local store
509 * support is enabled as the current implementation conflicts.
510 */
511 if (shift == PAGE_SHIFT_64K)
512 return -EINVAL;
513#endif /* CONFIG_SPU_FS_64K_LS */
514
515 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
516
517 /* Return if huge page size has already been setup */
518 if (size_to_hstate(size))
519 return 0;
520
521 hugetlb_add_hstate(shift - PAGE_SHIFT);
522
523 return 0;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100524}
525
526static int __init hugepage_setup_sz(char *str)
527{
528 unsigned long long size;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100529
530 size = memparse(str, &str);
531
David Gibsond1837cb2009-10-26 19:24:31 +0000532 if (add_huge_page_size(size) != 0)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100533 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
534
535 return 1;
536}
537__setup("hugepagesz=", hugepage_setup_sz);
538
David Gibsonf10a04c2006-04-28 15:02:51 +1000539static int __init hugetlbpage_init(void)
540{
David Gibsona4fe3ce2009-10-26 19:24:31 +0000541 int psize;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700542
Matt Evans44ae3ab2011-04-06 19:48:50 +0000543 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
David Gibsonf10a04c2006-04-28 15:02:51 +1000544 return -ENODEV;
Benjamin Herrenschmidt00df4382008-07-28 16:13:18 +1000545
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700546 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
David Gibsond1837cb2009-10-26 19:24:31 +0000547 unsigned shift;
548 unsigned pdshift;
549
550 if (!mmu_psize_defs[psize].shift)
551 continue;
552
553 shift = mmu_psize_to_shift(psize);
554
555 if (add_huge_page_size(1ULL << shift) < 0)
556 continue;
557
558 if (shift < PMD_SHIFT)
559 pdshift = PMD_SHIFT;
560 else if (shift < PUD_SHIFT)
561 pdshift = PUD_SHIFT;
562 else
563 pdshift = PGDIR_SHIFT;
564
565 pgtable_cache_add(pdshift - shift, NULL);
566 if (!PGT_CACHE(pdshift - shift))
567 panic("hugetlbpage_init(): could not create "
568 "pgtable cache for %d bit pagesize\n", shift);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700569 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000570
David Gibsond1837cb2009-10-26 19:24:31 +0000571 /* Set default large page size. Currently, we pick 16M or 1M
572 * depending on what is available
573 */
574 if (mmu_psize_defs[MMU_PAGE_16M].shift)
575 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
576 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
577 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
578
David Gibsonf10a04c2006-04-28 15:02:51 +1000579 return 0;
580}
581
582module_init(hugetlbpage_init);
David Gibson0895ecd2009-10-26 19:24:31 +0000583
584void flush_dcache_icache_hugepage(struct page *page)
585{
586 int i;
587
588 BUG_ON(!PageCompound(page));
589
590 for (i = 0; i < (1UL << compound_order(page)); i++)
591 __flush_dcache_icache(page_address(page+i));
592}