blob: 96178e8fb046d67b2e121891ce34d521ad14f3b2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Becky Bruce41151e72011-06-28 09:54:48 +00002 * PPC Huge TLB Page Support for Kernel.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
Becky Bruce41151e72011-06-28 09:54:48 +00005 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/mm.h>
David Gibson883a3e52009-10-26 19:24:31 +000012#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/hugetlb.h>
Becky Bruce41151e72011-06-28 09:54:48 +000015#include <linux/of_fdt.h>
16#include <linux/memblock.h>
17#include <linux/bootmem.h>
David Gibson883a3e52009-10-26 19:24:31 +000018#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/pgalloc.h>
20#include <asm/tlb.h>
Becky Bruce41151e72011-06-28 09:54:48 +000021#include <asm/setup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Jon Tollefson91224342008-07-23 21:27:55 -070023#define PAGE_SHIFT_64K 16
24#define PAGE_SHIFT_16M 24
25#define PAGE_SHIFT_16G 34
Jon Tollefson4ec161c2008-01-04 09:59:50 +110026
Becky Bruce41151e72011-06-28 09:54:48 +000027unsigned int HPAGE_SHIFT;
28
29/*
30 * Tracks gpages after the device tree is scanned and before the
31 * huge_boot_pages list is ready. On 64-bit implementations, this is
32 * just used to track 16G pages and so is a single array. 32-bit
33 * implementations may have more than one gpage size due to limitations
34 * of the memory allocators, so we need multiple arrays
35 */
36#ifdef CONFIG_PPC64
Jon Tollefsonec4b2c02008-07-23 21:27:53 -070037#define MAX_NUMBER_GPAGES 1024
Becky Bruce41151e72011-06-28 09:54:48 +000038static u64 gpage_freearray[MAX_NUMBER_GPAGES];
Jon Tollefsonec4b2c02008-07-23 21:27:53 -070039static unsigned nr_gpages;
Becky Bruce41151e72011-06-28 09:54:48 +000040#else
41#define MAX_NUMBER_GPAGES 128
42struct psize_gpages {
43 u64 gpage_list[MAX_NUMBER_GPAGES];
44 unsigned int nr_gpages;
45};
46static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
47#endif
David Gibsonf10a04c2006-04-28 15:02:51 +100048
Jon Tollefson0d9ea752008-07-23 21:27:56 -070049static inline int shift_to_mmu_psize(unsigned int shift)
50{
David Gibsond1837cb2009-10-26 19:24:31 +000051 int psize;
52
53 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
54 if (mmu_psize_defs[psize].shift == shift)
55 return psize;
Jon Tollefson0d9ea752008-07-23 21:27:56 -070056 return -1;
57}
58
59static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
60{
61 if (mmu_psize_defs[mmu_psize].shift)
62 return mmu_psize_defs[mmu_psize].shift;
63 BUG();
64}
65
David Gibsona4fe3ce2009-10-26 19:24:31 +000066#define hugepd_none(hpd) ((hpd).pd == 0)
67
David Gibsona4fe3ce2009-10-26 19:24:31 +000068pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
David Gibsonf10a04c2006-04-28 15:02:51 +100069{
David Gibsona4fe3ce2009-10-26 19:24:31 +000070 pgd_t *pg;
71 pud_t *pu;
72 pmd_t *pm;
73 hugepd_t *hpdp = NULL;
74 unsigned pdshift = PGDIR_SHIFT;
75
76 if (shift)
77 *shift = 0;
78
79 pg = pgdir + pgd_index(ea);
80 if (is_hugepd(pg)) {
81 hpdp = (hugepd_t *)pg;
82 } else if (!pgd_none(*pg)) {
83 pdshift = PUD_SHIFT;
84 pu = pud_offset(pg, ea);
85 if (is_hugepd(pu))
86 hpdp = (hugepd_t *)pu;
87 else if (!pud_none(*pu)) {
88 pdshift = PMD_SHIFT;
89 pm = pmd_offset(pu, ea);
90 if (is_hugepd(pm))
91 hpdp = (hugepd_t *)pm;
92 else if (!pmd_none(*pm)) {
Becky Bruce41151e72011-06-28 09:54:48 +000093 return pte_offset_kernel(pm, ea);
David Gibsona4fe3ce2009-10-26 19:24:31 +000094 }
95 }
96 }
97
98 if (!hpdp)
99 return NULL;
100
101 if (shift)
102 *shift = hugepd_shift(*hpdp);
103 return hugepte_offset(hpdp, ea, pdshift);
104}
105
106pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
107{
108 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
109}
110
111static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
112 unsigned long address, unsigned pdshift, unsigned pshift)
113{
Becky Bruce41151e72011-06-28 09:54:48 +0000114 struct kmem_cache *cachep;
115 pte_t *new;
116
117#ifdef CONFIG_PPC64
118 cachep = PGT_CACHE(pdshift - pshift);
119#else
120 int i;
121 int num_hugepd = 1 << (pshift - pdshift);
122 cachep = hugepte_cache;
123#endif
124
125 new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
David Gibsonf10a04c2006-04-28 15:02:51 +1000126
David Gibsona4fe3ce2009-10-26 19:24:31 +0000127 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
128 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
129
David Gibsonf10a04c2006-04-28 15:02:51 +1000130 if (! new)
131 return -ENOMEM;
132
133 spin_lock(&mm->page_table_lock);
Becky Bruce41151e72011-06-28 09:54:48 +0000134#ifdef CONFIG_PPC64
David Gibsonf10a04c2006-04-28 15:02:51 +1000135 if (!hugepd_none(*hpdp))
Becky Bruce41151e72011-06-28 09:54:48 +0000136 kmem_cache_free(cachep, new);
David Gibsonf10a04c2006-04-28 15:02:51 +1000137 else
Becky Bruce41151e72011-06-28 09:54:48 +0000138 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
139#else
140 /*
141 * We have multiple higher-level entries that point to the same
142 * actual pte location. Fill in each as we go and backtrack on error.
143 * We need all of these so the DTLB pgtable walk code can find the
144 * right higher-level entry without knowing if it's a hugepage or not.
145 */
146 for (i = 0; i < num_hugepd; i++, hpdp++) {
147 if (unlikely(!hugepd_none(*hpdp)))
148 break;
149 else
150 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
151 }
152 /* If we bailed from the for loop early, an error occurred, clean up */
153 if (i < num_hugepd) {
154 for (i = i - 1 ; i >= 0; i--, hpdp--)
155 hpdp->pd = 0;
156 kmem_cache_free(cachep, new);
157 }
Becky Brucea1cd5412011-10-10 10:50:39 +0000158#else
159 if (!hugepd_none(*hpdp))
160 kmem_cache_free(cachep, new);
161 else
162 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
Becky Bruce41151e72011-06-28 09:54:48 +0000163#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000164 spin_unlock(&mm->page_table_lock);
165 return 0;
166}
167
Becky Brucea1cd5412011-10-10 10:50:39 +0000168/*
169 * These macros define how to determine which level of the page table holds
170 * the hpdp.
171 */
172#ifdef CONFIG_PPC_FSL_BOOK3E
173#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
174#define HUGEPD_PUD_SHIFT PUD_SHIFT
175#else
176#define HUGEPD_PGD_SHIFT PUD_SHIFT
177#define HUGEPD_PUD_SHIFT PMD_SHIFT
178#endif
179
David Gibsona4fe3ce2009-10-26 19:24:31 +0000180pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
181{
182 pgd_t *pg;
183 pud_t *pu;
184 pmd_t *pm;
185 hugepd_t *hpdp = NULL;
186 unsigned pshift = __ffs(sz);
187 unsigned pdshift = PGDIR_SHIFT;
David Gibson0b264252008-09-05 11:49:54 +1000188
David Gibsona4fe3ce2009-10-26 19:24:31 +0000189 addr &= ~(sz-1);
190
191 pg = pgd_offset(mm, addr);
Becky Brucea1cd5412011-10-10 10:50:39 +0000192
193 if (pshift >= HUGEPD_PGD_SHIFT) {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000194 hpdp = (hugepd_t *)pg;
195 } else {
196 pdshift = PUD_SHIFT;
197 pu = pud_alloc(mm, pg, addr);
Becky Brucea1cd5412011-10-10 10:50:39 +0000198 if (pshift >= HUGEPD_PUD_SHIFT) {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000199 hpdp = (hugepd_t *)pu;
200 } else {
201 pdshift = PMD_SHIFT;
202 pm = pmd_alloc(mm, pu, addr);
203 hpdp = (hugepd_t *)pm;
204 }
205 }
206
207 if (!hpdp)
208 return NULL;
209
210 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
211
212 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
213 return NULL;
214
215 return hugepte_offset(hpdp, addr, pdshift);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100216}
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100217
Becky Bruce41151e72011-06-28 09:54:48 +0000218#ifdef CONFIG_PPC32
Jon Tollefson658013e2008-07-23 21:27:54 -0700219/* Build list of addresses of gigantic pages. This function is used in early
220 * boot before the buddy or bootmem allocator is setup.
221 */
Becky Bruce41151e72011-06-28 09:54:48 +0000222void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
223{
224 unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
225 int i;
226
227 if (addr == 0)
228 return;
229
230 gpage_freearray[idx].nr_gpages = number_of_pages;
231
232 for (i = 0; i < number_of_pages; i++) {
233 gpage_freearray[idx].gpage_list[i] = addr;
234 addr += page_size;
235 }
236}
237
238/*
239 * Moves the gigantic page addresses from the temporary list to the
240 * huge_boot_pages list.
241 */
242int alloc_bootmem_huge_page(struct hstate *hstate)
243{
244 struct huge_bootmem_page *m;
245 int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT);
246 int nr_gpages = gpage_freearray[idx].nr_gpages;
247
248 if (nr_gpages == 0)
249 return 0;
250
251#ifdef CONFIG_HIGHMEM
252 /*
253 * If gpages can be in highmem we can't use the trick of storing the
254 * data structure in the page; allocate space for this
255 */
256 m = alloc_bootmem(sizeof(struct huge_bootmem_page));
257 m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
258#else
259 m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
260#endif
261
262 list_add(&m->list, &huge_boot_pages);
263 gpage_freearray[idx].nr_gpages = nr_gpages;
264 gpage_freearray[idx].gpage_list[nr_gpages] = 0;
265 m->hstate = hstate;
266
267 return 1;
268}
269/*
270 * Scan the command line hugepagesz= options for gigantic pages; store those in
271 * a list that we use to allocate the memory once all options are parsed.
272 */
273
274unsigned long gpage_npages[MMU_PAGE_COUNT];
275
276static int __init do_gpage_early_setup(char *param, char *val)
277{
278 static phys_addr_t size;
279 unsigned long npages;
280
281 /*
282 * The hugepagesz and hugepages cmdline options are interleaved. We
283 * use the size variable to keep track of whether or not this was done
284 * properly and skip over instances where it is incorrect. Other
285 * command-line parsing code will issue warnings, so we don't need to.
286 *
287 */
288 if ((strcmp(param, "default_hugepagesz") == 0) ||
289 (strcmp(param, "hugepagesz") == 0)) {
290 size = memparse(val, NULL);
291 } else if (strcmp(param, "hugepages") == 0) {
292 if (size != 0) {
293 if (sscanf(val, "%lu", &npages) <= 0)
294 npages = 0;
295 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
296 size = 0;
297 }
298 }
299 return 0;
300}
301
302
303/*
304 * This function allocates physical space for pages that are larger than the
305 * buddy allocator can handle. We want to allocate these in highmem because
306 * the amount of lowmem is limited. This means that this function MUST be
307 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
308 * allocate to grab highmem.
309 */
310void __init reserve_hugetlb_gpages(void)
311{
312 static __initdata char cmdline[COMMAND_LINE_SIZE];
313 phys_addr_t size, base;
314 int i;
315
316 strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
317 parse_args("hugetlb gpages", cmdline, NULL, 0, &do_gpage_early_setup);
318
319 /*
320 * Walk gpage list in reverse, allocating larger page sizes first.
321 * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
322 * When we reach the point in the list where pages are no longer
323 * considered gpages, we're done.
324 */
325 for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
326 if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
327 continue;
328 else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
329 break;
330
331 size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
332 base = memblock_alloc_base(size * gpage_npages[i], size,
333 MEMBLOCK_ALLOC_ANYWHERE);
334 add_gpage(base, size, gpage_npages[i]);
335 }
336}
337
338#else /* PPC64 */
339
340/* Build list of addresses of gigantic pages. This function is used in early
341 * boot before the buddy or bootmem allocator is setup.
342 */
343void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
Jon Tollefson658013e2008-07-23 21:27:54 -0700344{
345 if (!addr)
346 return;
347 while (number_of_pages > 0) {
348 gpage_freearray[nr_gpages] = addr;
349 nr_gpages++;
350 number_of_pages--;
351 addr += page_size;
352 }
353}
354
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700355/* Moves the gigantic page addresses from the temporary list to the
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700356 * huge_boot_pages list.
357 */
358int alloc_bootmem_huge_page(struct hstate *hstate)
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700359{
360 struct huge_bootmem_page *m;
361 if (nr_gpages == 0)
362 return 0;
363 m = phys_to_virt(gpage_freearray[--nr_gpages]);
364 gpage_freearray[nr_gpages] = 0;
365 list_add(&m->list, &huge_boot_pages);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700366 m->hstate = hstate;
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700367 return 1;
368}
Becky Bruce41151e72011-06-28 09:54:48 +0000369#endif
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700370
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800371int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
372{
373 return 0;
374}
375
Becky Bruce41151e72011-06-28 09:54:48 +0000376#ifdef CONFIG_PPC32
377#define HUGEPD_FREELIST_SIZE \
378 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
379
380struct hugepd_freelist {
381 struct rcu_head rcu;
382 unsigned int index;
383 void *ptes[0];
384};
385
386static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
387
388static void hugepd_free_rcu_callback(struct rcu_head *head)
389{
390 struct hugepd_freelist *batch =
391 container_of(head, struct hugepd_freelist, rcu);
392 unsigned int i;
393
394 for (i = 0; i < batch->index; i++)
395 kmem_cache_free(hugepte_cache, batch->ptes[i]);
396
397 free_page((unsigned long)batch);
398}
399
400static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
401{
402 struct hugepd_freelist **batchp;
403
404 batchp = &__get_cpu_var(hugepd_freelist_cur);
405
406 if (atomic_read(&tlb->mm->mm_users) < 2 ||
407 cpumask_equal(mm_cpumask(tlb->mm),
408 cpumask_of(smp_processor_id()))) {
409 kmem_cache_free(hugepte_cache, hugepte);
410 return;
411 }
412
413 if (*batchp == NULL) {
414 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
415 (*batchp)->index = 0;
416 }
417
418 (*batchp)->ptes[(*batchp)->index++] = hugepte;
419 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
420 call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
421 *batchp = NULL;
422 }
423}
424#endif
425
David Gibsona4fe3ce2009-10-26 19:24:31 +0000426static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
427 unsigned long start, unsigned long end,
428 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000429{
430 pte_t *hugepte = hugepd_page(*hpdp);
Becky Bruce41151e72011-06-28 09:54:48 +0000431 int i;
432
David Gibsona4fe3ce2009-10-26 19:24:31 +0000433 unsigned long pdmask = ~((1UL << pdshift) - 1);
Becky Bruce41151e72011-06-28 09:54:48 +0000434 unsigned int num_hugepd = 1;
435
436#ifdef CONFIG_PPC64
437 unsigned int shift = hugepd_shift(*hpdp);
438#else
439 /* Note: On 32-bit the hpdp may be the first of several */
440 num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
441#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000442
443 start &= pdmask;
444 if (start < floor)
445 return;
446 if (ceiling) {
447 ceiling &= pdmask;
448 if (! ceiling)
449 return;
450 }
451 if (end - 1 > ceiling - 1)
452 return;
David Gibsonf10a04c2006-04-28 15:02:51 +1000453
Becky Bruce41151e72011-06-28 09:54:48 +0000454 for (i = 0; i < num_hugepd; i++, hpdp++)
455 hpdp->pd = 0;
456
David Gibsonf10a04c2006-04-28 15:02:51 +1000457 tlb->need_flush = 1;
Becky Bruce41151e72011-06-28 09:54:48 +0000458#ifdef CONFIG_PPC64
David Gibsona4fe3ce2009-10-26 19:24:31 +0000459 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
Becky Bruce41151e72011-06-28 09:54:48 +0000460#else
461 hugepd_free(tlb, hugepte);
462#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000463}
464
David Gibsonf10a04c2006-04-28 15:02:51 +1000465static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
466 unsigned long addr, unsigned long end,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000467 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000468{
469 pmd_t *pmd;
470 unsigned long next;
471 unsigned long start;
472
473 start = addr;
David Gibsonf10a04c2006-04-28 15:02:51 +1000474 do {
Becky Brucea1cd5412011-10-10 10:50:39 +0000475 pmd = pmd_offset(pud, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000476 next = pmd_addr_end(addr, end);
477 if (pmd_none(*pmd))
478 continue;
Becky Brucea1cd5412011-10-10 10:50:39 +0000479#ifdef CONFIG_PPC_FSL_BOOK3E
480 /*
481 * Increment next by the size of the huge mapping since
482 * there may be more than one entry at this level for a
483 * single hugepage, but all of them point to
484 * the same kmem cache that holds the hugepte.
485 */
486 next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
487#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000488 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
489 addr, next, floor, ceiling);
Becky Brucea1cd5412011-10-10 10:50:39 +0000490 } while (addr = next, addr != end);
David Gibsonf10a04c2006-04-28 15:02:51 +1000491
492 start &= PUD_MASK;
493 if (start < floor)
494 return;
495 if (ceiling) {
496 ceiling &= PUD_MASK;
497 if (!ceiling)
498 return;
499 }
500 if (end - 1 > ceiling - 1)
501 return;
502
503 pmd = pmd_offset(pud, start);
504 pud_clear(pud);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000505 pmd_free_tlb(tlb, pmd, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000506}
David Gibsonf10a04c2006-04-28 15:02:51 +1000507
508static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
509 unsigned long addr, unsigned long end,
510 unsigned long floor, unsigned long ceiling)
511{
512 pud_t *pud;
513 unsigned long next;
514 unsigned long start;
515
516 start = addr;
David Gibsonf10a04c2006-04-28 15:02:51 +1000517 do {
Becky Brucea1cd5412011-10-10 10:50:39 +0000518 pud = pud_offset(pgd, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000519 next = pud_addr_end(addr, end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000520 if (!is_hugepd(pud)) {
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100521 if (pud_none_or_clear_bad(pud))
522 continue;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700523 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000524 ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100525 } else {
Becky Brucea1cd5412011-10-10 10:50:39 +0000526#ifdef CONFIG_PPC_FSL_BOOK3E
527 /*
528 * Increment next by the size of the huge mapping since
529 * there may be more than one entry at this level for a
530 * single hugepage, but all of them point to
531 * the same kmem cache that holds the hugepte.
532 */
533 next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
534#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000535 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
536 addr, next, floor, ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100537 }
Becky Brucea1cd5412011-10-10 10:50:39 +0000538 } while (addr = next, addr != end);
David Gibsonf10a04c2006-04-28 15:02:51 +1000539
540 start &= PGDIR_MASK;
541 if (start < floor)
542 return;
543 if (ceiling) {
544 ceiling &= PGDIR_MASK;
545 if (!ceiling)
546 return;
547 }
548 if (end - 1 > ceiling - 1)
549 return;
550
551 pud = pud_offset(pgd, start);
552 pgd_clear(pgd);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000553 pud_free_tlb(tlb, pud, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000554}
555
556/*
557 * This function frees user-level page tables of a process.
558 *
559 * Must be called with pagetable lock held.
560 */
Jan Beulich42b77722008-07-23 21:27:10 -0700561void hugetlb_free_pgd_range(struct mmu_gather *tlb,
David Gibsonf10a04c2006-04-28 15:02:51 +1000562 unsigned long addr, unsigned long end,
563 unsigned long floor, unsigned long ceiling)
564{
565 pgd_t *pgd;
566 unsigned long next;
David Gibsonf10a04c2006-04-28 15:02:51 +1000567
568 /*
David Gibsona4fe3ce2009-10-26 19:24:31 +0000569 * Because there are a number of different possible pagetable
570 * layouts for hugepage ranges, we limit knowledge of how
571 * things should be laid out to the allocation path
572 * (huge_pte_alloc(), above). Everything else works out the
573 * structure as it goes from information in the hugepd
574 * pointers. That means that we can't here use the
575 * optimization used in the normal page free_pgd_range(), of
576 * checking whether we're actually covering a large enough
577 * range to have to do anything at the top level of the walk
578 * instead of at the bottom.
David Gibsonf10a04c2006-04-28 15:02:51 +1000579 *
David Gibsona4fe3ce2009-10-26 19:24:31 +0000580 * To make sense of this, you should probably go read the big
581 * block comment at the top of the normal free_pgd_range(),
582 * too.
David Gibsonf10a04c2006-04-28 15:02:51 +1000583 */
584
David Gibsonf10a04c2006-04-28 15:02:51 +1000585 do {
David Gibsonf10a04c2006-04-28 15:02:51 +1000586 next = pgd_addr_end(addr, end);
Becky Bruce41151e72011-06-28 09:54:48 +0000587 pgd = pgd_offset(tlb->mm, addr);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000588 if (!is_hugepd(pgd)) {
David Gibson0b264252008-09-05 11:49:54 +1000589 if (pgd_none_or_clear_bad(pgd))
590 continue;
591 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
592 } else {
Becky Bruce41151e72011-06-28 09:54:48 +0000593#ifdef CONFIG_PPC32
594 /*
595 * Increment next by the size of the huge mapping since
596 * on 32-bit there may be more than one entry at the pgd
597 * level for a single hugepage, but all of them point to
598 * the same kmem cache that holds the hugepte.
599 */
600 next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
601#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000602 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
603 addr, next, floor, ceiling);
David Gibson0b264252008-09-05 11:49:54 +1000604 }
Becky Bruce41151e72011-06-28 09:54:48 +0000605 } while (addr = next, addr != end);
David Gibsone28f7fa2005-08-05 19:39:06 +1000606}
607
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608struct page *
609follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
610{
611 pte_t *ptep;
612 struct page *page;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000613 unsigned shift;
614 unsigned long mask;
615
616 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700618 /* Verify it is a huge page else bail. */
David Gibsona4fe3ce2009-10-26 19:24:31 +0000619 if (!ptep || !shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 return ERR_PTR(-EINVAL);
621
David Gibsona4fe3ce2009-10-26 19:24:31 +0000622 mask = (1UL << shift) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 page = pte_page(*ptep);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000624 if (page)
625 page += (address & mask) / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
627 return page;
628}
629
630int pmd_huge(pmd_t pmd)
631{
632 return 0;
633}
634
Andi Kleenceb86872008-07-23 21:27:50 -0700635int pud_huge(pud_t pud)
636{
637 return 0;
638}
639
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640struct page *
641follow_huge_pmd(struct mm_struct *mm, unsigned long address,
642 pmd_t *pmd, int write)
643{
644 BUG();
645 return NULL;
646}
647
David Gibsona4fe3ce2009-10-26 19:24:31 +0000648static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
649 unsigned long end, int write, struct page **pages, int *nr)
650{
651 unsigned long mask;
652 unsigned long pte_end;
Andrea Arcangeli35267412011-11-02 13:37:15 -0700653 struct page *head, *page, *tail;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000654 pte_t pte;
655 int refs;
656
657 pte_end = (addr + sz) & ~(sz-1);
658 if (pte_end < end)
659 end = pte_end;
660
661 pte = *ptep;
662 mask = _PAGE_PRESENT | _PAGE_USER;
663 if (write)
664 mask |= _PAGE_RW;
665
666 if ((pte_val(pte) & mask) != mask)
667 return 0;
668
669 /* hugepages are never "special" */
670 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
671
672 refs = 0;
673 head = pte_page(pte);
674
675 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
Andrea Arcangeli35267412011-11-02 13:37:15 -0700676 tail = page;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000677 do {
678 VM_BUG_ON(compound_head(page) != head);
679 pages[*nr] = page;
680 (*nr)++;
681 page++;
682 refs++;
683 } while (addr += PAGE_SIZE, addr != end);
684
685 if (!page_cache_add_speculative(head, refs)) {
686 *nr -= refs;
687 return 0;
688 }
689
690 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
691 /* Could be optimized better */
Andrea Arcangeli85964682011-11-02 13:37:11 -0700692 *nr -= refs;
693 while (refs--)
Andrea Arcangeli405e44f2011-11-02 13:37:08 -0700694 put_page(head);
Andrea Arcangelicf592bf2011-11-02 13:37:19 -0700695 return 0;
696 }
697
698 /*
699 * Any tail page need their mapcount reference taken before we
700 * return.
701 */
702 while (refs--) {
703 if (PageTail(tail))
704 get_huge_page_tail(tail);
705 tail++;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000706 }
707
708 return 1;
709}
710
David Gibson39adfa52009-11-23 20:03:40 +0000711static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
712 unsigned long sz)
713{
714 unsigned long __boundary = (addr + sz) & ~(sz-1);
715 return (__boundary - 1 < end - 1) ? __boundary : end;
716}
717
David Gibsona4fe3ce2009-10-26 19:24:31 +0000718int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
719 unsigned long addr, unsigned long end,
720 int write, struct page **pages, int *nr)
721{
722 pte_t *ptep;
723 unsigned long sz = 1UL << hugepd_shift(*hugepd);
David Gibson39adfa52009-11-23 20:03:40 +0000724 unsigned long next;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000725
726 ptep = hugepte_offset(hugepd, addr, pdshift);
727 do {
David Gibson39adfa52009-11-23 20:03:40 +0000728 next = hugepte_addr_end(addr, end, sz);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000729 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
730 return 0;
David Gibson39adfa52009-11-23 20:03:40 +0000731 } while (ptep++, addr = next, addr != end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000732
733 return 1;
734}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
Becky Bruce76512952011-10-10 10:50:36 +0000736#ifdef CONFIG_PPC_MM_SLICES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
738 unsigned long len, unsigned long pgoff,
739 unsigned long flags)
740{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700741 struct hstate *hstate = hstate_file(file);
742 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
Brian King48f797d2008-12-04 04:07:54 +0000743
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700744 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745}
Becky Bruce76512952011-10-10 10:50:36 +0000746#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
Mel Gorman33402892009-01-06 14:38:54 -0800748unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
749{
Paul Mackerras25c29f92011-09-20 19:58:10 +0000750#ifdef CONFIG_PPC_MM_SLICES
Mel Gorman33402892009-01-06 14:38:54 -0800751 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
752
753 return 1UL << mmu_psize_to_shift(psize);
Becky Bruce41151e72011-06-28 09:54:48 +0000754#else
755 if (!is_vm_hugetlb_page(vma))
756 return PAGE_SIZE;
757
758 return huge_page_size(hstate_vma(vma));
759#endif
760}
761
762static inline bool is_power_of_4(unsigned long x)
763{
764 if (is_power_of_2(x))
765 return (__ilog2(x) % 2) ? false : true;
766 return false;
Mel Gorman33402892009-01-06 14:38:54 -0800767}
768
David Gibsond1837cb2009-10-26 19:24:31 +0000769static int __init add_huge_page_size(unsigned long long size)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100770{
David Gibsond1837cb2009-10-26 19:24:31 +0000771 int shift = __ffs(size);
772 int mmu_psize;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000773
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100774 /* Check that it is a page size supported by the hardware and
David Gibsond1837cb2009-10-26 19:24:31 +0000775 * that it fits within pagetable and slice limits. */
Becky Bruce41151e72011-06-28 09:54:48 +0000776#ifdef CONFIG_PPC_FSL_BOOK3E
777 if ((size < PAGE_SIZE) || !is_power_of_4(size))
778 return -EINVAL;
779#else
David Gibsond1837cb2009-10-26 19:24:31 +0000780 if (!is_power_of_2(size)
781 || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
782 return -EINVAL;
Becky Bruce41151e72011-06-28 09:54:48 +0000783#endif
Jon Tollefson91224342008-07-23 21:27:55 -0700784
David Gibsond1837cb2009-10-26 19:24:31 +0000785 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
786 return -EINVAL;
787
788#ifdef CONFIG_SPU_FS_64K_LS
789 /* Disable support for 64K huge pages when 64K SPU local store
790 * support is enabled as the current implementation conflicts.
791 */
792 if (shift == PAGE_SHIFT_64K)
793 return -EINVAL;
794#endif /* CONFIG_SPU_FS_64K_LS */
795
796 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
797
798 /* Return if huge page size has already been setup */
799 if (size_to_hstate(size))
800 return 0;
801
802 hugetlb_add_hstate(shift - PAGE_SHIFT);
803
804 return 0;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100805}
806
807static int __init hugepage_setup_sz(char *str)
808{
809 unsigned long long size;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100810
811 size = memparse(str, &str);
812
David Gibsond1837cb2009-10-26 19:24:31 +0000813 if (add_huge_page_size(size) != 0)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100814 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
815
816 return 1;
817}
818__setup("hugepagesz=", hugepage_setup_sz);
819
Becky Bruce41151e72011-06-28 09:54:48 +0000820#ifdef CONFIG_FSL_BOOKE
821struct kmem_cache *hugepte_cache;
822static int __init hugetlbpage_init(void)
823{
824 int psize;
825
826 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
827 unsigned shift;
828
829 if (!mmu_psize_defs[psize].shift)
830 continue;
831
832 shift = mmu_psize_to_shift(psize);
833
834 /* Don't treat normal page sizes as huge... */
835 if (shift != PAGE_SHIFT)
836 if (add_huge_page_size(1ULL << shift) < 0)
837 continue;
838 }
839
840 /*
841 * Create a kmem cache for hugeptes. The bottom bits in the pte have
842 * size information encoded in them, so align them to allow this
843 */
844 hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t),
845 HUGEPD_SHIFT_MASK + 1, 0, NULL);
846 if (hugepte_cache == NULL)
847 panic("%s: Unable to create kmem cache for hugeptes\n",
848 __func__);
849
850 /* Default hpage size = 4M */
851 if (mmu_psize_defs[MMU_PAGE_4M].shift)
852 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
853 else
854 panic("%s: Unable to set default huge page size\n", __func__);
855
856
857 return 0;
858}
859#else
David Gibsonf10a04c2006-04-28 15:02:51 +1000860static int __init hugetlbpage_init(void)
861{
David Gibsona4fe3ce2009-10-26 19:24:31 +0000862 int psize;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700863
Matt Evans44ae3ab2011-04-06 19:48:50 +0000864 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
David Gibsonf10a04c2006-04-28 15:02:51 +1000865 return -ENODEV;
Benjamin Herrenschmidt00df4382008-07-28 16:13:18 +1000866
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700867 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
David Gibsond1837cb2009-10-26 19:24:31 +0000868 unsigned shift;
869 unsigned pdshift;
870
871 if (!mmu_psize_defs[psize].shift)
872 continue;
873
874 shift = mmu_psize_to_shift(psize);
875
876 if (add_huge_page_size(1ULL << shift) < 0)
877 continue;
878
879 if (shift < PMD_SHIFT)
880 pdshift = PMD_SHIFT;
881 else if (shift < PUD_SHIFT)
882 pdshift = PUD_SHIFT;
883 else
884 pdshift = PGDIR_SHIFT;
885
886 pgtable_cache_add(pdshift - shift, NULL);
887 if (!PGT_CACHE(pdshift - shift))
888 panic("hugetlbpage_init(): could not create "
889 "pgtable cache for %d bit pagesize\n", shift);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700890 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000891
David Gibsond1837cb2009-10-26 19:24:31 +0000892 /* Set default large page size. Currently, we pick 16M or 1M
893 * depending on what is available
894 */
895 if (mmu_psize_defs[MMU_PAGE_16M].shift)
896 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
897 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
898 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
899
David Gibsonf10a04c2006-04-28 15:02:51 +1000900 return 0;
901}
Becky Bruce41151e72011-06-28 09:54:48 +0000902#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000903module_init(hugetlbpage_init);
David Gibson0895ecd2009-10-26 19:24:31 +0000904
905void flush_dcache_icache_hugepage(struct page *page)
906{
907 int i;
Becky Bruce41151e72011-06-28 09:54:48 +0000908 void *start;
David Gibson0895ecd2009-10-26 19:24:31 +0000909
910 BUG_ON(!PageCompound(page));
911
Becky Bruce41151e72011-06-28 09:54:48 +0000912 for (i = 0; i < (1UL << compound_order(page)); i++) {
913 if (!PageHighMem(page)) {
914 __flush_dcache_icache(page_address(page+i));
915 } else {
916 start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE);
917 __flush_dcache_icache(start);
918 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
919 }
920 }
David Gibson0895ecd2009-10-26 19:24:31 +0000921}