blob: 95220a5dee58feadd1da773f0978518e3b3f173f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 *
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 */
9
10#include <linux/init.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/slab.h>
16#include <linux/err.h>
17#include <linux/sysctl.h>
18#include <asm/mman.h>
19#include <asm/pgalloc.h>
20#include <asm/tlb.h>
21#include <asm/tlbflush.h>
22#include <asm/mmu_context.h>
23#include <asm/machdep.h>
24#include <asm/cputable.h>
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010025#include <asm/spu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Jon Tollefson91224342008-07-23 21:27:55 -070027#define PAGE_SHIFT_64K 16
28#define PAGE_SHIFT_16M 24
29#define PAGE_SHIFT_16G 34
Jon Tollefson4ec161c2008-01-04 09:59:50 +110030
David Gibsonc594ada2005-08-11 16:55:21 +100031#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
32#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
Jon Tollefsonec4b2c02008-07-23 21:27:53 -070033#define MAX_NUMBER_GPAGES 1024
34
35/* Tracks the 16G pages after the device tree is scanned and before the
36 * huge_boot_pages list is ready. */
37static unsigned long gpage_freearray[MAX_NUMBER_GPAGES];
38static unsigned nr_gpages;
David Gibsonc594ada2005-08-11 16:55:21 +100039
Jon Tollefson0d9ea752008-07-23 21:27:56 -070040/* Array of valid huge page sizes - non-zero value(hugepte_shift) is
41 * stored for the huge page sizes that are valid.
42 */
David Gibsona4fe3ce2009-10-26 19:24:31 +000043static unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */
Jon Tollefson0d9ea752008-07-23 21:27:56 -070044
David Gibsonf10a04c2006-04-28 15:02:51 +100045/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
46 * will choke on pointers to hugepte tables, which is handy for
47 * catching screwups early. */
David Gibsonf10a04c2006-04-28 15:02:51 +100048
Jon Tollefson0d9ea752008-07-23 21:27:56 -070049static inline int shift_to_mmu_psize(unsigned int shift)
50{
51 switch (shift) {
52#ifndef CONFIG_PPC_64K_PAGES
53 case PAGE_SHIFT_64K:
54 return MMU_PAGE_64K;
55#endif
56 case PAGE_SHIFT_16M:
57 return MMU_PAGE_16M;
58 case PAGE_SHIFT_16G:
59 return MMU_PAGE_16G;
60 }
61 return -1;
62}
63
64static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
65{
66 if (mmu_psize_defs[mmu_psize].shift)
67 return mmu_psize_defs[mmu_psize].shift;
68 BUG();
69}
70
David Gibsona4fe3ce2009-10-26 19:24:31 +000071#define hugepd_none(hpd) ((hpd).pd == 0)
72
David Gibsonf10a04c2006-04-28 15:02:51 +100073static inline pte_t *hugepd_page(hugepd_t hpd)
74{
David Gibsona4fe3ce2009-10-26 19:24:31 +000075 BUG_ON(!hugepd_ok(hpd));
76 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | 0xc000000000000000);
David Gibsonf10a04c2006-04-28 15:02:51 +100077}
78
David Gibsona4fe3ce2009-10-26 19:24:31 +000079static inline unsigned int hugepd_shift(hugepd_t hpd)
David Gibsonf10a04c2006-04-28 15:02:51 +100080{
David Gibsona4fe3ce2009-10-26 19:24:31 +000081 return hpd.pd & HUGEPD_SHIFT_MASK;
82}
83
84static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, unsigned pdshift)
85{
86 unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
David Gibsonf10a04c2006-04-28 15:02:51 +100087 pte_t *dir = hugepd_page(*hpdp);
88
89 return dir + idx;
90}
91
David Gibsona4fe3ce2009-10-26 19:24:31 +000092pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
David Gibsonf10a04c2006-04-28 15:02:51 +100093{
David Gibsona4fe3ce2009-10-26 19:24:31 +000094 pgd_t *pg;
95 pud_t *pu;
96 pmd_t *pm;
97 hugepd_t *hpdp = NULL;
98 unsigned pdshift = PGDIR_SHIFT;
99
100 if (shift)
101 *shift = 0;
102
103 pg = pgdir + pgd_index(ea);
104 if (is_hugepd(pg)) {
105 hpdp = (hugepd_t *)pg;
106 } else if (!pgd_none(*pg)) {
107 pdshift = PUD_SHIFT;
108 pu = pud_offset(pg, ea);
109 if (is_hugepd(pu))
110 hpdp = (hugepd_t *)pu;
111 else if (!pud_none(*pu)) {
112 pdshift = PMD_SHIFT;
113 pm = pmd_offset(pu, ea);
114 if (is_hugepd(pm))
115 hpdp = (hugepd_t *)pm;
116 else if (!pmd_none(*pm)) {
117 return pte_offset_map(pm, ea);
118 }
119 }
120 }
121
122 if (!hpdp)
123 return NULL;
124
125 if (shift)
126 *shift = hugepd_shift(*hpdp);
127 return hugepte_offset(hpdp, ea, pdshift);
128}
129
130pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
131{
132 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
133}
134
135static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
136 unsigned long address, unsigned pdshift, unsigned pshift)
137{
138 pte_t *new = kmem_cache_zalloc(PGT_CACHE(pdshift - pshift),
David Gibsona0668cd2009-10-28 16:27:18 +0000139 GFP_KERNEL|__GFP_REPEAT);
David Gibsonf10a04c2006-04-28 15:02:51 +1000140
David Gibsona4fe3ce2009-10-26 19:24:31 +0000141 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
142 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
143
David Gibsonf10a04c2006-04-28 15:02:51 +1000144 if (! new)
145 return -ENOMEM;
146
147 spin_lock(&mm->page_table_lock);
148 if (!hugepd_none(*hpdp))
David Gibsona4fe3ce2009-10-26 19:24:31 +0000149 kmem_cache_free(PGT_CACHE(pdshift - pshift), new);
David Gibsonf10a04c2006-04-28 15:02:51 +1000150 else
David Gibsona4fe3ce2009-10-26 19:24:31 +0000151 hpdp->pd = ((unsigned long)new & ~0x8000000000000000) | pshift;
David Gibsonf10a04c2006-04-28 15:02:51 +1000152 spin_unlock(&mm->page_table_lock);
153 return 0;
154}
155
David Gibsona4fe3ce2009-10-26 19:24:31 +0000156pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
157{
158 pgd_t *pg;
159 pud_t *pu;
160 pmd_t *pm;
161 hugepd_t *hpdp = NULL;
162 unsigned pshift = __ffs(sz);
163 unsigned pdshift = PGDIR_SHIFT;
David Gibson0b264252008-09-05 11:49:54 +1000164
David Gibsona4fe3ce2009-10-26 19:24:31 +0000165 addr &= ~(sz-1);
166
167 pg = pgd_offset(mm, addr);
168 if (pshift >= PUD_SHIFT) {
169 hpdp = (hugepd_t *)pg;
170 } else {
171 pdshift = PUD_SHIFT;
172 pu = pud_alloc(mm, pg, addr);
173 if (pshift >= PMD_SHIFT) {
174 hpdp = (hugepd_t *)pu;
175 } else {
176 pdshift = PMD_SHIFT;
177 pm = pmd_alloc(mm, pu, addr);
178 hpdp = (hugepd_t *)pm;
179 }
180 }
181
182 if (!hpdp)
183 return NULL;
184
185 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
186
187 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
188 return NULL;
189
190 return hugepte_offset(hpdp, addr, pdshift);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100191}
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100192
Jon Tollefson658013e2008-07-23 21:27:54 -0700193/* Build list of addresses of gigantic pages. This function is used in early
194 * boot before the buddy or bootmem allocator is setup.
195 */
196void add_gpage(unsigned long addr, unsigned long page_size,
197 unsigned long number_of_pages)
198{
199 if (!addr)
200 return;
201 while (number_of_pages > 0) {
202 gpage_freearray[nr_gpages] = addr;
203 nr_gpages++;
204 number_of_pages--;
205 addr += page_size;
206 }
207}
208
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700209/* Moves the gigantic page addresses from the temporary list to the
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700210 * huge_boot_pages list.
211 */
212int alloc_bootmem_huge_page(struct hstate *hstate)
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700213{
214 struct huge_bootmem_page *m;
215 if (nr_gpages == 0)
216 return 0;
217 m = phys_to_virt(gpage_freearray[--nr_gpages]);
218 gpage_freearray[nr_gpages] = 0;
219 list_add(&m->list, &huge_boot_pages);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700220 m->hstate = hstate;
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700221 return 1;
222}
223
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800224int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
225{
226 return 0;
227}
228
David Gibsona4fe3ce2009-10-26 19:24:31 +0000229static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
230 unsigned long start, unsigned long end,
231 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000232{
233 pte_t *hugepte = hugepd_page(*hpdp);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000234 unsigned shift = hugepd_shift(*hpdp);
235 unsigned long pdmask = ~((1UL << pdshift) - 1);
236
237 start &= pdmask;
238 if (start < floor)
239 return;
240 if (ceiling) {
241 ceiling &= pdmask;
242 if (! ceiling)
243 return;
244 }
245 if (end - 1 > ceiling - 1)
246 return;
David Gibsonf10a04c2006-04-28 15:02:51 +1000247
248 hpdp->pd = 0;
249 tlb->need_flush = 1;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000250 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
David Gibsonf10a04c2006-04-28 15:02:51 +1000251}
252
David Gibsonf10a04c2006-04-28 15:02:51 +1000253static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
254 unsigned long addr, unsigned long end,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000255 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000256{
257 pmd_t *pmd;
258 unsigned long next;
259 unsigned long start;
260
261 start = addr;
262 pmd = pmd_offset(pud, addr);
263 do {
264 next = pmd_addr_end(addr, end);
265 if (pmd_none(*pmd))
266 continue;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000267 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
268 addr, next, floor, ceiling);
David Gibsonf10a04c2006-04-28 15:02:51 +1000269 } while (pmd++, addr = next, addr != end);
270
271 start &= PUD_MASK;
272 if (start < floor)
273 return;
274 if (ceiling) {
275 ceiling &= PUD_MASK;
276 if (!ceiling)
277 return;
278 }
279 if (end - 1 > ceiling - 1)
280 return;
281
282 pmd = pmd_offset(pud, start);
283 pud_clear(pud);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000284 pmd_free_tlb(tlb, pmd, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000285}
David Gibsonf10a04c2006-04-28 15:02:51 +1000286
287static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
288 unsigned long addr, unsigned long end,
289 unsigned long floor, unsigned long ceiling)
290{
291 pud_t *pud;
292 unsigned long next;
293 unsigned long start;
294
295 start = addr;
296 pud = pud_offset(pgd, addr);
297 do {
298 next = pud_addr_end(addr, end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000299 if (!is_hugepd(pud)) {
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100300 if (pud_none_or_clear_bad(pud))
301 continue;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700302 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000303 ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100304 } else {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000305 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
306 addr, next, floor, ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100307 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000308 } while (pud++, addr = next, addr != end);
309
310 start &= PGDIR_MASK;
311 if (start < floor)
312 return;
313 if (ceiling) {
314 ceiling &= PGDIR_MASK;
315 if (!ceiling)
316 return;
317 }
318 if (end - 1 > ceiling - 1)
319 return;
320
321 pud = pud_offset(pgd, start);
322 pgd_clear(pgd);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000323 pud_free_tlb(tlb, pud, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000324}
325
326/*
327 * This function frees user-level page tables of a process.
328 *
329 * Must be called with pagetable lock held.
330 */
Jan Beulich42b77722008-07-23 21:27:10 -0700331void hugetlb_free_pgd_range(struct mmu_gather *tlb,
David Gibsonf10a04c2006-04-28 15:02:51 +1000332 unsigned long addr, unsigned long end,
333 unsigned long floor, unsigned long ceiling)
334{
335 pgd_t *pgd;
336 unsigned long next;
David Gibsonf10a04c2006-04-28 15:02:51 +1000337
338 /*
David Gibsona4fe3ce2009-10-26 19:24:31 +0000339 * Because there are a number of different possible pagetable
340 * layouts for hugepage ranges, we limit knowledge of how
341 * things should be laid out to the allocation path
342 * (huge_pte_alloc(), above). Everything else works out the
343 * structure as it goes from information in the hugepd
344 * pointers. That means that we can't here use the
345 * optimization used in the normal page free_pgd_range(), of
346 * checking whether we're actually covering a large enough
347 * range to have to do anything at the top level of the walk
348 * instead of at the bottom.
David Gibsonf10a04c2006-04-28 15:02:51 +1000349 *
David Gibsona4fe3ce2009-10-26 19:24:31 +0000350 * To make sense of this, you should probably go read the big
351 * block comment at the top of the normal free_pgd_range(),
352 * too.
David Gibsonf10a04c2006-04-28 15:02:51 +1000353 */
354
Jan Beulich42b77722008-07-23 21:27:10 -0700355 pgd = pgd_offset(tlb->mm, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000356 do {
David Gibsonf10a04c2006-04-28 15:02:51 +1000357 next = pgd_addr_end(addr, end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000358 if (!is_hugepd(pgd)) {
David Gibson0b264252008-09-05 11:49:54 +1000359 if (pgd_none_or_clear_bad(pgd))
360 continue;
361 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
362 } else {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000363 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
364 addr, next, floor, ceiling);
David Gibson0b264252008-09-05 11:49:54 +1000365 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000366 } while (pgd++, addr = next, addr != end);
David Gibsone28f7fa2005-08-05 19:39:06 +1000367}
368
David Gibsone28f7fa2005-08-05 19:39:06 +1000369void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
370 pte_t *ptep, pte_t pte)
371{
David Gibsone28f7fa2005-08-05 19:39:06 +1000372 if (pte_present(*ptep)) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100373 /* We open-code pte_clear because we need to pass the right
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000374 * argument to hpte_need_flush (huge / !huge). Might not be
375 * necessary anymore if we make hpte_need_flush() get the
376 * page size from the slices
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100377 */
David Gibsonf71dc172009-10-26 19:24:31 +0000378 pte_update(mm, addr, ptep, ~0UL, 1);
David Gibsone28f7fa2005-08-05 19:39:06 +1000379 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100380 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
David Gibsone28f7fa2005-08-05 19:39:06 +1000381}
382
383pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
384 pte_t *ptep)
385{
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000386 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
David Gibsone28f7fa2005-08-05 19:39:06 +1000387 return __pte(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388}
389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390struct page *
391follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
392{
393 pte_t *ptep;
394 struct page *page;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000395 unsigned shift;
396 unsigned long mask;
397
398 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700400 /* Verify it is a huge page else bail. */
David Gibsona4fe3ce2009-10-26 19:24:31 +0000401 if (!ptep || !shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 return ERR_PTR(-EINVAL);
403
David Gibsona4fe3ce2009-10-26 19:24:31 +0000404 mask = (1UL << shift) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 page = pte_page(*ptep);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000406 if (page)
407 page += (address & mask) / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
409 return page;
410}
411
412int pmd_huge(pmd_t pmd)
413{
414 return 0;
415}
416
Andi Kleenceb86872008-07-23 21:27:50 -0700417int pud_huge(pud_t pud)
418{
419 return 0;
420}
421
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422struct page *
423follow_huge_pmd(struct mm_struct *mm, unsigned long address,
424 pmd_t *pmd, int write)
425{
426 BUG();
427 return NULL;
428}
429
David Gibsona4fe3ce2009-10-26 19:24:31 +0000430static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
431 unsigned long end, int write, struct page **pages, int *nr)
432{
433 unsigned long mask;
434 unsigned long pte_end;
435 struct page *head, *page;
436 pte_t pte;
437 int refs;
438
439 pte_end = (addr + sz) & ~(sz-1);
440 if (pte_end < end)
441 end = pte_end;
442
443 pte = *ptep;
444 mask = _PAGE_PRESENT | _PAGE_USER;
445 if (write)
446 mask |= _PAGE_RW;
447
448 if ((pte_val(pte) & mask) != mask)
449 return 0;
450
451 /* hugepages are never "special" */
452 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
453
454 refs = 0;
455 head = pte_page(pte);
456
457 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
458 do {
459 VM_BUG_ON(compound_head(page) != head);
460 pages[*nr] = page;
461 (*nr)++;
462 page++;
463 refs++;
464 } while (addr += PAGE_SIZE, addr != end);
465
466 if (!page_cache_add_speculative(head, refs)) {
467 *nr -= refs;
468 return 0;
469 }
470
471 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
472 /* Could be optimized better */
473 while (*nr) {
474 put_page(page);
475 (*nr)--;
476 }
477 }
478
479 return 1;
480}
481
482int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
483 unsigned long addr, unsigned long end,
484 int write, struct page **pages, int *nr)
485{
486 pte_t *ptep;
487 unsigned long sz = 1UL << hugepd_shift(*hugepd);
488
489 ptep = hugepte_offset(hugepd, addr, pdshift);
490 do {
491 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
492 return 0;
493 } while (ptep++, addr += sz, addr != end);
494
495 return 1;
496}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
499 unsigned long len, unsigned long pgoff,
500 unsigned long flags)
501{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700502 struct hstate *hstate = hstate_file(file);
503 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
Brian King48f797d2008-12-04 04:07:54 +0000504
505 if (!mmu_huge_psizes[mmu_psize])
506 return -EINVAL;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700507 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508}
509
Mel Gorman33402892009-01-06 14:38:54 -0800510unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
511{
512 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
513
514 return 1UL << mmu_psize_to_shift(psize);
515}
516
David Gibsoncbf52af2005-12-09 14:20:52 +1100517/*
518 * Called by asm hashtable.S for doing lazy icache flush
519 */
520static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700521 pte_t pte, int trap, unsigned long sz)
David Gibsoncbf52af2005-12-09 14:20:52 +1100522{
523 struct page *page;
524 int i;
525
526 if (!pfn_valid(pte_pfn(pte)))
527 return rflags;
528
529 page = pte_page(pte);
530
531 /* page is dirty */
532 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
533 if (trap == 0x400) {
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700534 for (i = 0; i < (sz / PAGE_SIZE); i++)
David Gibsoncbf52af2005-12-09 14:20:52 +1100535 __flush_dcache_icache(page_address(page+i));
536 set_bit(PG_arch_1, &page->flags);
537 } else {
538 rflags |= HPTE_R_N;
539 }
540 }
541 return rflags;
542}
543
David Gibsona4fe3ce2009-10-26 19:24:31 +0000544int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
545 pte_t *ptep, unsigned long trap, int local, int ssize,
546 unsigned int shift, unsigned int mmu_psize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100548 unsigned long old_pte, new_pte;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700549 unsigned long va, rflags, pa, sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 long slot;
551 int err = 1;
552
David Gibsona4fe3ce2009-10-26 19:24:31 +0000553 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555 /* Search the Linux page table for a match with va */
Paul Mackerras1189be62007-10-11 20:37:10 +1000556 va = hpt_va(ea, vsid, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 /*
559 * Check the user's access rights to the page. If access should be
560 * prevented then send the problem up to do_page_fault.
561 */
562 if (unlikely(access & ~pte_val(*ptep)))
563 goto out;
564 /*
565 * At this point, we have a pte (old_pte) which can be used to build
566 * or update an HPTE. There are 2 cases:
567 *
568 * 1. There is a valid (present) pte with no associated HPTE (this is
569 * the most common case)
570 * 2. There is a valid (present) pte with an associated HPTE. The
571 * current values of the pp bits in the HPTE prevent access
572 * because we are doing software DIRTY bit management and the
573 * page is currently not DIRTY.
574 */
575
576
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100577 do {
578 old_pte = pte_val(*ptep);
579 if (old_pte & _PAGE_BUSY)
580 goto out;
Benjamin Herrenschmidt41743a42008-06-11 15:37:10 +1000581 new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100582 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
583 old_pte, new_pte));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100585 rflags = 0x2 | (!(new_pte & _PAGE_RW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100587 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700588 sz = ((1UL) << shift);
David Gibsoncbf52af2005-12-09 14:20:52 +1100589 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
590 /* No CPU has hugepages but lacks no execute, so we
591 * don't need to worry about that case */
592 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700593 trap, sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595 /* Check if pte already has an hpte (case 2) */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100596 if (unlikely(old_pte & _PAGE_HASHPTE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 /* There MIGHT be an HPTE for this pte */
598 unsigned long hash, slot;
599
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700600 hash = hpt_hash(va, shift, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100601 if (old_pte & _PAGE_F_SECOND)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 hash = ~hash;
603 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100604 slot += (old_pte & _PAGE_F_GIX) >> 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700606 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
Paul Mackerras1189be62007-10-11 20:37:10 +1000607 ssize, local) == -1)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100608 old_pte &= ~_PAGE_HPTEFLAGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 }
610
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100611 if (likely(!(old_pte & _PAGE_HASHPTE))) {
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700612 unsigned long hash = hpt_hash(va, shift, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 unsigned long hpte_group;
614
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100615 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617repeat:
618 hpte_group = ((hash & htab_hash_mask) *
619 HPTES_PER_GROUP) & ~0x7UL;
620
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100621 /* clear HPTE slot informations in new PTE */
Benjamin Herrenschmidt41743a42008-06-11 15:37:10 +1000622#ifdef CONFIG_PPC_64K_PAGES
623 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
624#else
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100625 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
Benjamin Herrenschmidt41743a42008-06-11 15:37:10 +1000626#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 /* Add in WIMG bits */
Dave Kleikamp87e9ab12008-06-19 08:32:56 +1000628 rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
629 _PAGE_COHERENT | _PAGE_GUARDED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100631 /* Insert into the hash table, primary slot */
632 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700633 mmu_psize, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
635 /* Primary is full, try the secondary */
636 if (unlikely(slot == -1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 hpte_group = ((~hash & htab_hash_mask) *
638 HPTES_PER_GROUP) & ~0x7UL;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100639 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
Benjamin Herrenschmidt67b10812005-09-23 13:24:07 -0700640 HPTE_V_SECONDARY,
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700641 mmu_psize, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 if (slot == -1) {
643 if (mftb() & 0x1)
Benjamin Herrenschmidt67b10812005-09-23 13:24:07 -0700644 hpte_group = ((hash & htab_hash_mask) *
645 HPTES_PER_GROUP)&~0x7UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
647 ppc_md.hpte_remove(hpte_group);
648 goto repeat;
649 }
650 }
651
652 if (unlikely(slot == -2))
653 panic("hash_huge_page: pte_insert failed\n");
654
Ishizaki Koud649bd72007-01-12 09:54:39 +0900655 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 }
657
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100658 /*
Hugh Dickins01edcd82005-11-23 13:37:39 -0800659 * No need to use ldarx/stdcx here
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100660 */
661 *ptep = __pte(new_pte & ~_PAGE_BUSY);
662
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 err = 0;
664
665 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 return err;
667}
David Gibsonf10a04c2006-04-28 15:02:51 +1000668
Al Viro4ea8fb92008-11-22 17:33:44 +0000669static void __init set_huge_psize(int psize)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100670{
David Gibsona4fe3ce2009-10-26 19:24:31 +0000671 unsigned pdshift;
672
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100673 /* Check that it is a page size supported by the hardware and
674 * that it fits within pagetable limits. */
Jon Tollefson91224342008-07-23 21:27:55 -0700675 if (mmu_psize_defs[psize].shift &&
676 mmu_psize_defs[psize].shift < SID_SHIFT_1T &&
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100677 (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
Jon Tollefson91224342008-07-23 21:27:55 -0700678 mmu_psize_defs[psize].shift == PAGE_SHIFT_64K ||
679 mmu_psize_defs[psize].shift == PAGE_SHIFT_16G)) {
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700680 /* Return if huge page size has already been setup or is the
681 * same as the base page size. */
682 if (mmu_huge_psizes[psize] ||
683 mmu_psize_defs[psize].shift == PAGE_SHIFT)
Jon Tollefson91224342008-07-23 21:27:55 -0700684 return;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700685 hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT);
Jon Tollefson91224342008-07-23 21:27:55 -0700686
David Gibsona4fe3ce2009-10-26 19:24:31 +0000687 if (mmu_psize_defs[psize].shift < PMD_SHIFT)
688 pdshift = PMD_SHIFT;
689 else if (mmu_psize_defs[psize].shift < PUD_SHIFT)
690 pdshift = PUD_SHIFT;
691 else
692 pdshift = PGDIR_SHIFT;
693 mmu_huge_psizes[psize] = pdshift - mmu_psize_defs[psize].shift;
694 }
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100695}
696
697static int __init hugepage_setup_sz(char *str)
698{
699 unsigned long long size;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700700 int mmu_psize;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100701 int shift;
702
703 size = memparse(str, &str);
704
705 shift = __ffs(size);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700706 mmu_psize = shift_to_mmu_psize(shift);
707 if (mmu_psize >= 0 && mmu_psize_defs[mmu_psize].shift)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100708 set_huge_psize(mmu_psize);
709 else
710 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
711
712 return 1;
713}
714__setup("hugepagesz=", hugepage_setup_sz);
715
David Gibsonf10a04c2006-04-28 15:02:51 +1000716static int __init hugetlbpage_init(void)
717{
David Gibsona4fe3ce2009-10-26 19:24:31 +0000718 int psize;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700719
David Gibsonf10a04c2006-04-28 15:02:51 +1000720 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
721 return -ENODEV;
Benjamin Herrenschmidt00df4382008-07-28 16:13:18 +1000722
David Gibsona0668cd2009-10-28 16:27:18 +0000723 /* Add supported huge page sizes. Need to change
724 * HUGE_MAX_HSTATE if the number of supported huge page sizes
725 * changes.
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700726 */
727 set_huge_psize(MMU_PAGE_16M);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700728 set_huge_psize(MMU_PAGE_16G);
David Gibsonf10a04c2006-04-28 15:02:51 +1000729
Benjamin Herrenschmidt00df4382008-07-28 16:13:18 +1000730 /* Temporarily disable support for 64K huge pages when 64K SPU local
731 * store support is enabled as the current implementation conflicts.
732 */
733#ifndef CONFIG_SPU_FS_64K_LS
734 set_huge_psize(MMU_PAGE_64K);
735#endif
736
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700737 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
738 if (mmu_huge_psizes[psize]) {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000739 pgtable_cache_add(mmu_huge_psizes[psize], NULL);
740 if (!PGT_CACHE(mmu_huge_psizes[psize]))
David Gibsona0668cd2009-10-28 16:27:18 +0000741 panic("hugetlbpage_init(): could not create "
742 "pgtable cache for %d bit pagesize\n",
743 mmu_psize_to_shift(psize));
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700744 }
745 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000746
747 return 0;
748}
749
750module_init(hugetlbpage_init);