blob: a3e628727697577a285ddc37e955353443496a59 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Becky Bruce41151e72011-06-28 09:54:48 +00002 * PPC Huge TLB Page Support for Kernel.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
Becky Bruce41151e72011-06-28 09:54:48 +00005 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/mm.h>
David Gibson883a3e52009-10-26 19:24:31 +000012#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/hugetlb.h>
Becky Bruce41151e72011-06-28 09:54:48 +000015#include <linux/of_fdt.h>
16#include <linux/memblock.h>
17#include <linux/bootmem.h>
Kumar Gala13020be2011-11-24 09:40:07 +000018#include <linux/moduleparam.h>
David Gibson883a3e52009-10-26 19:24:31 +000019#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/pgalloc.h>
21#include <asm/tlb.h>
Becky Bruce41151e72011-06-28 09:54:48 +000022#include <asm/setup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Jon Tollefson91224342008-07-23 21:27:55 -070024#define PAGE_SHIFT_64K 16
25#define PAGE_SHIFT_16M 24
26#define PAGE_SHIFT_16G 34
Jon Tollefson4ec161c2008-01-04 09:59:50 +110027
Becky Bruce41151e72011-06-28 09:54:48 +000028unsigned int HPAGE_SHIFT;
29
30/*
31 * Tracks gpages after the device tree is scanned and before the
Becky Brucea6146882011-10-10 10:50:43 +000032 * huge_boot_pages list is ready. On non-Freescale implementations, this is
33 * just used to track 16G pages and so is a single array. FSL-based
34 * implementations may have more than one gpage size, so we need multiple
35 * arrays
Becky Bruce41151e72011-06-28 09:54:48 +000036 */
Becky Bruce881fde12011-10-10 10:50:40 +000037#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +000038#define MAX_NUMBER_GPAGES 128
39struct psize_gpages {
40 u64 gpage_list[MAX_NUMBER_GPAGES];
41 unsigned int nr_gpages;
42};
43static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
Becky Bruce881fde12011-10-10 10:50:40 +000044#else
45#define MAX_NUMBER_GPAGES 1024
46static u64 gpage_freearray[MAX_NUMBER_GPAGES];
47static unsigned nr_gpages;
Becky Bruce41151e72011-06-28 09:54:48 +000048#endif
David Gibsonf10a04c2006-04-28 15:02:51 +100049
Jon Tollefson0d9ea752008-07-23 21:27:56 -070050static inline int shift_to_mmu_psize(unsigned int shift)
51{
David Gibsond1837cb2009-10-26 19:24:31 +000052 int psize;
53
54 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
55 if (mmu_psize_defs[psize].shift == shift)
56 return psize;
Jon Tollefson0d9ea752008-07-23 21:27:56 -070057 return -1;
58}
59
60static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
61{
62 if (mmu_psize_defs[mmu_psize].shift)
63 return mmu_psize_defs[mmu_psize].shift;
64 BUG();
65}
66
David Gibsona4fe3ce2009-10-26 19:24:31 +000067#define hugepd_none(hpd) ((hpd).pd == 0)
68
David Gibsona4fe3ce2009-10-26 19:24:31 +000069pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
David Gibsonf10a04c2006-04-28 15:02:51 +100070{
David Gibsona4fe3ce2009-10-26 19:24:31 +000071 pgd_t *pg;
72 pud_t *pu;
73 pmd_t *pm;
74 hugepd_t *hpdp = NULL;
75 unsigned pdshift = PGDIR_SHIFT;
76
77 if (shift)
78 *shift = 0;
79
80 pg = pgdir + pgd_index(ea);
81 if (is_hugepd(pg)) {
82 hpdp = (hugepd_t *)pg;
83 } else if (!pgd_none(*pg)) {
84 pdshift = PUD_SHIFT;
85 pu = pud_offset(pg, ea);
86 if (is_hugepd(pu))
87 hpdp = (hugepd_t *)pu;
88 else if (!pud_none(*pu)) {
89 pdshift = PMD_SHIFT;
90 pm = pmd_offset(pu, ea);
91 if (is_hugepd(pm))
92 hpdp = (hugepd_t *)pm;
93 else if (!pmd_none(*pm)) {
Becky Bruce41151e72011-06-28 09:54:48 +000094 return pte_offset_kernel(pm, ea);
David Gibsona4fe3ce2009-10-26 19:24:31 +000095 }
96 }
97 }
98
99 if (!hpdp)
100 return NULL;
101
102 if (shift)
103 *shift = hugepd_shift(*hpdp);
104 return hugepte_offset(hpdp, ea, pdshift);
105}
106
107pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
108{
109 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
110}
111
112static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
113 unsigned long address, unsigned pdshift, unsigned pshift)
114{
Becky Bruce41151e72011-06-28 09:54:48 +0000115 struct kmem_cache *cachep;
116 pte_t *new;
117
Becky Bruce881fde12011-10-10 10:50:40 +0000118#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +0000119 int i;
120 int num_hugepd = 1 << (pshift - pdshift);
121 cachep = hugepte_cache;
Becky Bruce881fde12011-10-10 10:50:40 +0000122#else
123 cachep = PGT_CACHE(pdshift - pshift);
Becky Bruce41151e72011-06-28 09:54:48 +0000124#endif
125
126 new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
David Gibsonf10a04c2006-04-28 15:02:51 +1000127
David Gibsona4fe3ce2009-10-26 19:24:31 +0000128 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
129 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
130
David Gibsonf10a04c2006-04-28 15:02:51 +1000131 if (! new)
132 return -ENOMEM;
133
134 spin_lock(&mm->page_table_lock);
Becky Bruce881fde12011-10-10 10:50:40 +0000135#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +0000136 /*
137 * We have multiple higher-level entries that point to the same
138 * actual pte location. Fill in each as we go and backtrack on error.
139 * We need all of these so the DTLB pgtable walk code can find the
140 * right higher-level entry without knowing if it's a hugepage or not.
141 */
142 for (i = 0; i < num_hugepd; i++, hpdp++) {
143 if (unlikely(!hugepd_none(*hpdp)))
144 break;
145 else
146 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
147 }
148 /* If we bailed from the for loop early, an error occurred, clean up */
149 if (i < num_hugepd) {
150 for (i = i - 1 ; i >= 0; i--, hpdp--)
151 hpdp->pd = 0;
152 kmem_cache_free(cachep, new);
153 }
Becky Brucea1cd5412011-10-10 10:50:39 +0000154#else
155 if (!hugepd_none(*hpdp))
156 kmem_cache_free(cachep, new);
157 else
158 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
Becky Bruce41151e72011-06-28 09:54:48 +0000159#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000160 spin_unlock(&mm->page_table_lock);
161 return 0;
162}
163
Becky Brucea1cd5412011-10-10 10:50:39 +0000164/*
165 * These macros define how to determine which level of the page table holds
166 * the hpdp.
167 */
168#ifdef CONFIG_PPC_FSL_BOOK3E
169#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
170#define HUGEPD_PUD_SHIFT PUD_SHIFT
171#else
172#define HUGEPD_PGD_SHIFT PUD_SHIFT
173#define HUGEPD_PUD_SHIFT PMD_SHIFT
174#endif
175
David Gibsona4fe3ce2009-10-26 19:24:31 +0000176pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
177{
178 pgd_t *pg;
179 pud_t *pu;
180 pmd_t *pm;
181 hugepd_t *hpdp = NULL;
182 unsigned pshift = __ffs(sz);
183 unsigned pdshift = PGDIR_SHIFT;
David Gibson0b264252008-09-05 11:49:54 +1000184
David Gibsona4fe3ce2009-10-26 19:24:31 +0000185 addr &= ~(sz-1);
186
187 pg = pgd_offset(mm, addr);
Becky Brucea1cd5412011-10-10 10:50:39 +0000188
189 if (pshift >= HUGEPD_PGD_SHIFT) {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000190 hpdp = (hugepd_t *)pg;
191 } else {
192 pdshift = PUD_SHIFT;
193 pu = pud_alloc(mm, pg, addr);
Becky Brucea1cd5412011-10-10 10:50:39 +0000194 if (pshift >= HUGEPD_PUD_SHIFT) {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000195 hpdp = (hugepd_t *)pu;
196 } else {
197 pdshift = PMD_SHIFT;
198 pm = pmd_alloc(mm, pu, addr);
199 hpdp = (hugepd_t *)pm;
200 }
201 }
202
203 if (!hpdp)
204 return NULL;
205
206 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
207
208 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
209 return NULL;
210
211 return hugepte_offset(hpdp, addr, pdshift);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100212}
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100213
Becky Bruce881fde12011-10-10 10:50:40 +0000214#ifdef CONFIG_PPC_FSL_BOOK3E
Jon Tollefson658013e2008-07-23 21:27:54 -0700215/* Build list of addresses of gigantic pages. This function is used in early
216 * boot before the buddy or bootmem allocator is setup.
217 */
Becky Bruce41151e72011-06-28 09:54:48 +0000218void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
219{
220 unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
221 int i;
222
223 if (addr == 0)
224 return;
225
226 gpage_freearray[idx].nr_gpages = number_of_pages;
227
228 for (i = 0; i < number_of_pages; i++) {
229 gpage_freearray[idx].gpage_list[i] = addr;
230 addr += page_size;
231 }
232}
233
234/*
235 * Moves the gigantic page addresses from the temporary list to the
236 * huge_boot_pages list.
237 */
238int alloc_bootmem_huge_page(struct hstate *hstate)
239{
240 struct huge_bootmem_page *m;
241 int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT);
242 int nr_gpages = gpage_freearray[idx].nr_gpages;
243
244 if (nr_gpages == 0)
245 return 0;
246
247#ifdef CONFIG_HIGHMEM
248 /*
249 * If gpages can be in highmem we can't use the trick of storing the
250 * data structure in the page; allocate space for this
251 */
252 m = alloc_bootmem(sizeof(struct huge_bootmem_page));
253 m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
254#else
255 m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
256#endif
257
258 list_add(&m->list, &huge_boot_pages);
259 gpage_freearray[idx].nr_gpages = nr_gpages;
260 gpage_freearray[idx].gpage_list[nr_gpages] = 0;
261 m->hstate = hstate;
262
263 return 1;
264}
265/*
266 * Scan the command line hugepagesz= options for gigantic pages; store those in
267 * a list that we use to allocate the memory once all options are parsed.
268 */
269
270unsigned long gpage_npages[MMU_PAGE_COUNT];
271
272static int __init do_gpage_early_setup(char *param, char *val)
273{
274 static phys_addr_t size;
275 unsigned long npages;
276
277 /*
278 * The hugepagesz and hugepages cmdline options are interleaved. We
279 * use the size variable to keep track of whether or not this was done
280 * properly and skip over instances where it is incorrect. Other
281 * command-line parsing code will issue warnings, so we don't need to.
282 *
283 */
284 if ((strcmp(param, "default_hugepagesz") == 0) ||
285 (strcmp(param, "hugepagesz") == 0)) {
286 size = memparse(val, NULL);
287 } else if (strcmp(param, "hugepages") == 0) {
288 if (size != 0) {
289 if (sscanf(val, "%lu", &npages) <= 0)
290 npages = 0;
291 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
292 size = 0;
293 }
294 }
295 return 0;
296}
297
298
299/*
300 * This function allocates physical space for pages that are larger than the
301 * buddy allocator can handle. We want to allocate these in highmem because
302 * the amount of lowmem is limited. This means that this function MUST be
303 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
304 * allocate to grab highmem.
305 */
306void __init reserve_hugetlb_gpages(void)
307{
308 static __initdata char cmdline[COMMAND_LINE_SIZE];
309 phys_addr_t size, base;
310 int i;
311
312 strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
Pawel Moll026cee02012-03-26 12:50:51 +1030313 parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
314 &do_gpage_early_setup);
Becky Bruce41151e72011-06-28 09:54:48 +0000315
316 /*
317 * Walk gpage list in reverse, allocating larger page sizes first.
318 * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
319 * When we reach the point in the list where pages are no longer
320 * considered gpages, we're done.
321 */
322 for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
323 if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
324 continue;
325 else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
326 break;
327
328 size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
329 base = memblock_alloc_base(size * gpage_npages[i], size,
330 MEMBLOCK_ALLOC_ANYWHERE);
331 add_gpage(base, size, gpage_npages[i]);
332 }
333}
334
Becky Bruce881fde12011-10-10 10:50:40 +0000335#else /* !PPC_FSL_BOOK3E */
Becky Bruce41151e72011-06-28 09:54:48 +0000336
337/* Build list of addresses of gigantic pages. This function is used in early
338 * boot before the buddy or bootmem allocator is setup.
339 */
340void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
Jon Tollefson658013e2008-07-23 21:27:54 -0700341{
342 if (!addr)
343 return;
344 while (number_of_pages > 0) {
345 gpage_freearray[nr_gpages] = addr;
346 nr_gpages++;
347 number_of_pages--;
348 addr += page_size;
349 }
350}
351
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700352/* Moves the gigantic page addresses from the temporary list to the
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700353 * huge_boot_pages list.
354 */
355int alloc_bootmem_huge_page(struct hstate *hstate)
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700356{
357 struct huge_bootmem_page *m;
358 if (nr_gpages == 0)
359 return 0;
360 m = phys_to_virt(gpage_freearray[--nr_gpages]);
361 gpage_freearray[nr_gpages] = 0;
362 list_add(&m->list, &huge_boot_pages);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700363 m->hstate = hstate;
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700364 return 1;
365}
Becky Bruce41151e72011-06-28 09:54:48 +0000366#endif
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700367
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800368int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
369{
370 return 0;
371}
372
Becky Bruce881fde12011-10-10 10:50:40 +0000373#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +0000374#define HUGEPD_FREELIST_SIZE \
375 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
376
377struct hugepd_freelist {
378 struct rcu_head rcu;
379 unsigned int index;
380 void *ptes[0];
381};
382
383static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
384
385static void hugepd_free_rcu_callback(struct rcu_head *head)
386{
387 struct hugepd_freelist *batch =
388 container_of(head, struct hugepd_freelist, rcu);
389 unsigned int i;
390
391 for (i = 0; i < batch->index; i++)
392 kmem_cache_free(hugepte_cache, batch->ptes[i]);
393
394 free_page((unsigned long)batch);
395}
396
397static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
398{
399 struct hugepd_freelist **batchp;
400
401 batchp = &__get_cpu_var(hugepd_freelist_cur);
402
403 if (atomic_read(&tlb->mm->mm_users) < 2 ||
404 cpumask_equal(mm_cpumask(tlb->mm),
405 cpumask_of(smp_processor_id()))) {
406 kmem_cache_free(hugepte_cache, hugepte);
407 return;
408 }
409
410 if (*batchp == NULL) {
411 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
412 (*batchp)->index = 0;
413 }
414
415 (*batchp)->ptes[(*batchp)->index++] = hugepte;
416 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
417 call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
418 *batchp = NULL;
419 }
420}
421#endif
422
David Gibsona4fe3ce2009-10-26 19:24:31 +0000423static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
424 unsigned long start, unsigned long end,
425 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000426{
427 pte_t *hugepte = hugepd_page(*hpdp);
Becky Bruce41151e72011-06-28 09:54:48 +0000428 int i;
429
David Gibsona4fe3ce2009-10-26 19:24:31 +0000430 unsigned long pdmask = ~((1UL << pdshift) - 1);
Becky Bruce41151e72011-06-28 09:54:48 +0000431 unsigned int num_hugepd = 1;
432
Becky Bruce881fde12011-10-10 10:50:40 +0000433#ifdef CONFIG_PPC_FSL_BOOK3E
434 /* Note: On fsl the hpdp may be the first of several */
Becky Bruce41151e72011-06-28 09:54:48 +0000435 num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
Becky Bruce881fde12011-10-10 10:50:40 +0000436#else
437 unsigned int shift = hugepd_shift(*hpdp);
Becky Bruce41151e72011-06-28 09:54:48 +0000438#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000439
440 start &= pdmask;
441 if (start < floor)
442 return;
443 if (ceiling) {
444 ceiling &= pdmask;
445 if (! ceiling)
446 return;
447 }
448 if (end - 1 > ceiling - 1)
449 return;
David Gibsonf10a04c2006-04-28 15:02:51 +1000450
Becky Bruce41151e72011-06-28 09:54:48 +0000451 for (i = 0; i < num_hugepd; i++, hpdp++)
452 hpdp->pd = 0;
453
David Gibsonf10a04c2006-04-28 15:02:51 +1000454 tlb->need_flush = 1;
Becky Bruce881fde12011-10-10 10:50:40 +0000455
456#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +0000457 hugepd_free(tlb, hugepte);
Becky Bruce881fde12011-10-10 10:50:40 +0000458#else
459 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
Becky Bruce41151e72011-06-28 09:54:48 +0000460#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000461}
462
David Gibsonf10a04c2006-04-28 15:02:51 +1000463static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
464 unsigned long addr, unsigned long end,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000465 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000466{
467 pmd_t *pmd;
468 unsigned long next;
469 unsigned long start;
470
471 start = addr;
David Gibsonf10a04c2006-04-28 15:02:51 +1000472 do {
Becky Brucea1cd5412011-10-10 10:50:39 +0000473 pmd = pmd_offset(pud, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000474 next = pmd_addr_end(addr, end);
475 if (pmd_none(*pmd))
476 continue;
Becky Brucea1cd5412011-10-10 10:50:39 +0000477#ifdef CONFIG_PPC_FSL_BOOK3E
478 /*
479 * Increment next by the size of the huge mapping since
480 * there may be more than one entry at this level for a
481 * single hugepage, but all of them point to
482 * the same kmem cache that holds the hugepte.
483 */
484 next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
485#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000486 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
487 addr, next, floor, ceiling);
Becky Brucea1cd5412011-10-10 10:50:39 +0000488 } while (addr = next, addr != end);
David Gibsonf10a04c2006-04-28 15:02:51 +1000489
490 start &= PUD_MASK;
491 if (start < floor)
492 return;
493 if (ceiling) {
494 ceiling &= PUD_MASK;
495 if (!ceiling)
496 return;
497 }
498 if (end - 1 > ceiling - 1)
499 return;
500
501 pmd = pmd_offset(pud, start);
502 pud_clear(pud);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000503 pmd_free_tlb(tlb, pmd, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000504}
David Gibsonf10a04c2006-04-28 15:02:51 +1000505
506static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
507 unsigned long addr, unsigned long end,
508 unsigned long floor, unsigned long ceiling)
509{
510 pud_t *pud;
511 unsigned long next;
512 unsigned long start;
513
514 start = addr;
David Gibsonf10a04c2006-04-28 15:02:51 +1000515 do {
Becky Brucea1cd5412011-10-10 10:50:39 +0000516 pud = pud_offset(pgd, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000517 next = pud_addr_end(addr, end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000518 if (!is_hugepd(pud)) {
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100519 if (pud_none_or_clear_bad(pud))
520 continue;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700521 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000522 ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100523 } else {
Becky Brucea1cd5412011-10-10 10:50:39 +0000524#ifdef CONFIG_PPC_FSL_BOOK3E
525 /*
526 * Increment next by the size of the huge mapping since
527 * there may be more than one entry at this level for a
528 * single hugepage, but all of them point to
529 * the same kmem cache that holds the hugepte.
530 */
531 next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
532#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000533 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
534 addr, next, floor, ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100535 }
Becky Brucea1cd5412011-10-10 10:50:39 +0000536 } while (addr = next, addr != end);
David Gibsonf10a04c2006-04-28 15:02:51 +1000537
538 start &= PGDIR_MASK;
539 if (start < floor)
540 return;
541 if (ceiling) {
542 ceiling &= PGDIR_MASK;
543 if (!ceiling)
544 return;
545 }
546 if (end - 1 > ceiling - 1)
547 return;
548
549 pud = pud_offset(pgd, start);
550 pgd_clear(pgd);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000551 pud_free_tlb(tlb, pud, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000552}
553
554/*
555 * This function frees user-level page tables of a process.
556 *
557 * Must be called with pagetable lock held.
558 */
Jan Beulich42b77722008-07-23 21:27:10 -0700559void hugetlb_free_pgd_range(struct mmu_gather *tlb,
David Gibsonf10a04c2006-04-28 15:02:51 +1000560 unsigned long addr, unsigned long end,
561 unsigned long floor, unsigned long ceiling)
562{
563 pgd_t *pgd;
564 unsigned long next;
David Gibsonf10a04c2006-04-28 15:02:51 +1000565
566 /*
David Gibsona4fe3ce2009-10-26 19:24:31 +0000567 * Because there are a number of different possible pagetable
568 * layouts for hugepage ranges, we limit knowledge of how
569 * things should be laid out to the allocation path
570 * (huge_pte_alloc(), above). Everything else works out the
571 * structure as it goes from information in the hugepd
572 * pointers. That means that we can't here use the
573 * optimization used in the normal page free_pgd_range(), of
574 * checking whether we're actually covering a large enough
575 * range to have to do anything at the top level of the walk
576 * instead of at the bottom.
David Gibsonf10a04c2006-04-28 15:02:51 +1000577 *
David Gibsona4fe3ce2009-10-26 19:24:31 +0000578 * To make sense of this, you should probably go read the big
579 * block comment at the top of the normal free_pgd_range(),
580 * too.
David Gibsonf10a04c2006-04-28 15:02:51 +1000581 */
582
David Gibsonf10a04c2006-04-28 15:02:51 +1000583 do {
David Gibsonf10a04c2006-04-28 15:02:51 +1000584 next = pgd_addr_end(addr, end);
Becky Bruce41151e72011-06-28 09:54:48 +0000585 pgd = pgd_offset(tlb->mm, addr);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000586 if (!is_hugepd(pgd)) {
David Gibson0b264252008-09-05 11:49:54 +1000587 if (pgd_none_or_clear_bad(pgd))
588 continue;
589 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
590 } else {
Becky Bruce881fde12011-10-10 10:50:40 +0000591#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +0000592 /*
593 * Increment next by the size of the huge mapping since
Becky Bruce881fde12011-10-10 10:50:40 +0000594 * there may be more than one entry at the pgd level
595 * for a single hugepage, but all of them point to the
596 * same kmem cache that holds the hugepte.
Becky Bruce41151e72011-06-28 09:54:48 +0000597 */
598 next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
599#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000600 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
601 addr, next, floor, ceiling);
David Gibson0b264252008-09-05 11:49:54 +1000602 }
Becky Bruce41151e72011-06-28 09:54:48 +0000603 } while (addr = next, addr != end);
David Gibsone28f7fa2005-08-05 19:39:06 +1000604}
605
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606struct page *
607follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
608{
609 pte_t *ptep;
610 struct page *page;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000611 unsigned shift;
612 unsigned long mask;
613
614 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700616 /* Verify it is a huge page else bail. */
David Gibsona4fe3ce2009-10-26 19:24:31 +0000617 if (!ptep || !shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 return ERR_PTR(-EINVAL);
619
David Gibsona4fe3ce2009-10-26 19:24:31 +0000620 mask = (1UL << shift) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 page = pte_page(*ptep);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000622 if (page)
623 page += (address & mask) / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
625 return page;
626}
627
628int pmd_huge(pmd_t pmd)
629{
630 return 0;
631}
632
Andi Kleenceb86872008-07-23 21:27:50 -0700633int pud_huge(pud_t pud)
634{
635 return 0;
636}
637
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638struct page *
639follow_huge_pmd(struct mm_struct *mm, unsigned long address,
640 pmd_t *pmd, int write)
641{
642 BUG();
643 return NULL;
644}
645
David Gibsona4fe3ce2009-10-26 19:24:31 +0000646static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
647 unsigned long end, int write, struct page **pages, int *nr)
648{
649 unsigned long mask;
650 unsigned long pte_end;
Andrea Arcangeli35267412011-11-02 13:37:15 -0700651 struct page *head, *page, *tail;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000652 pte_t pte;
653 int refs;
654
655 pte_end = (addr + sz) & ~(sz-1);
656 if (pte_end < end)
657 end = pte_end;
658
659 pte = *ptep;
660 mask = _PAGE_PRESENT | _PAGE_USER;
661 if (write)
662 mask |= _PAGE_RW;
663
664 if ((pte_val(pte) & mask) != mask)
665 return 0;
666
667 /* hugepages are never "special" */
668 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
669
670 refs = 0;
671 head = pte_page(pte);
672
673 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
Andrea Arcangeli35267412011-11-02 13:37:15 -0700674 tail = page;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000675 do {
676 VM_BUG_ON(compound_head(page) != head);
677 pages[*nr] = page;
678 (*nr)++;
679 page++;
680 refs++;
681 } while (addr += PAGE_SIZE, addr != end);
682
683 if (!page_cache_add_speculative(head, refs)) {
684 *nr -= refs;
685 return 0;
686 }
687
688 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
689 /* Could be optimized better */
Andrea Arcangeli85964682011-11-02 13:37:11 -0700690 *nr -= refs;
691 while (refs--)
Andrea Arcangeli405e44f2011-11-02 13:37:08 -0700692 put_page(head);
Andrea Arcangelicf592bf2011-11-02 13:37:19 -0700693 return 0;
694 }
695
696 /*
697 * Any tail page need their mapcount reference taken before we
698 * return.
699 */
700 while (refs--) {
701 if (PageTail(tail))
702 get_huge_page_tail(tail);
703 tail++;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000704 }
705
706 return 1;
707}
708
David Gibson39adfa52009-11-23 20:03:40 +0000709static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
710 unsigned long sz)
711{
712 unsigned long __boundary = (addr + sz) & ~(sz-1);
713 return (__boundary - 1 < end - 1) ? __boundary : end;
714}
715
David Gibsona4fe3ce2009-10-26 19:24:31 +0000716int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
717 unsigned long addr, unsigned long end,
718 int write, struct page **pages, int *nr)
719{
720 pte_t *ptep;
721 unsigned long sz = 1UL << hugepd_shift(*hugepd);
David Gibson39adfa52009-11-23 20:03:40 +0000722 unsigned long next;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000723
724 ptep = hugepte_offset(hugepd, addr, pdshift);
725 do {
David Gibson39adfa52009-11-23 20:03:40 +0000726 next = hugepte_addr_end(addr, end, sz);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000727 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
728 return 0;
David Gibson39adfa52009-11-23 20:03:40 +0000729 } while (ptep++, addr = next, addr != end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000730
731 return 1;
732}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
Becky Bruce76512952011-10-10 10:50:36 +0000734#ifdef CONFIG_PPC_MM_SLICES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
736 unsigned long len, unsigned long pgoff,
737 unsigned long flags)
738{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700739 struct hstate *hstate = hstate_file(file);
740 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
Brian King48f797d2008-12-04 04:07:54 +0000741
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700742 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743}
Becky Bruce76512952011-10-10 10:50:36 +0000744#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Mel Gorman33402892009-01-06 14:38:54 -0800746unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
747{
Paul Mackerras25c29f92011-09-20 19:58:10 +0000748#ifdef CONFIG_PPC_MM_SLICES
Mel Gorman33402892009-01-06 14:38:54 -0800749 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
750
751 return 1UL << mmu_psize_to_shift(psize);
Becky Bruce41151e72011-06-28 09:54:48 +0000752#else
753 if (!is_vm_hugetlb_page(vma))
754 return PAGE_SIZE;
755
756 return huge_page_size(hstate_vma(vma));
757#endif
758}
759
760static inline bool is_power_of_4(unsigned long x)
761{
762 if (is_power_of_2(x))
763 return (__ilog2(x) % 2) ? false : true;
764 return false;
Mel Gorman33402892009-01-06 14:38:54 -0800765}
766
David Gibsond1837cb2009-10-26 19:24:31 +0000767static int __init add_huge_page_size(unsigned long long size)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100768{
David Gibsond1837cb2009-10-26 19:24:31 +0000769 int shift = __ffs(size);
770 int mmu_psize;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000771
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100772 /* Check that it is a page size supported by the hardware and
David Gibsond1837cb2009-10-26 19:24:31 +0000773 * that it fits within pagetable and slice limits. */
Becky Bruce41151e72011-06-28 09:54:48 +0000774#ifdef CONFIG_PPC_FSL_BOOK3E
775 if ((size < PAGE_SIZE) || !is_power_of_4(size))
776 return -EINVAL;
777#else
David Gibsond1837cb2009-10-26 19:24:31 +0000778 if (!is_power_of_2(size)
779 || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
780 return -EINVAL;
Becky Bruce41151e72011-06-28 09:54:48 +0000781#endif
Jon Tollefson91224342008-07-23 21:27:55 -0700782
David Gibsond1837cb2009-10-26 19:24:31 +0000783 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
784 return -EINVAL;
785
786#ifdef CONFIG_SPU_FS_64K_LS
787 /* Disable support for 64K huge pages when 64K SPU local store
788 * support is enabled as the current implementation conflicts.
789 */
790 if (shift == PAGE_SHIFT_64K)
791 return -EINVAL;
792#endif /* CONFIG_SPU_FS_64K_LS */
793
794 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
795
796 /* Return if huge page size has already been setup */
797 if (size_to_hstate(size))
798 return 0;
799
800 hugetlb_add_hstate(shift - PAGE_SHIFT);
801
802 return 0;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100803}
804
805static int __init hugepage_setup_sz(char *str)
806{
807 unsigned long long size;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100808
809 size = memparse(str, &str);
810
David Gibsond1837cb2009-10-26 19:24:31 +0000811 if (add_huge_page_size(size) != 0)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100812 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
813
814 return 1;
815}
816__setup("hugepagesz=", hugepage_setup_sz);
817
Becky Bruce881fde12011-10-10 10:50:40 +0000818#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +0000819struct kmem_cache *hugepte_cache;
820static int __init hugetlbpage_init(void)
821{
822 int psize;
823
824 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
825 unsigned shift;
826
827 if (!mmu_psize_defs[psize].shift)
828 continue;
829
830 shift = mmu_psize_to_shift(psize);
831
832 /* Don't treat normal page sizes as huge... */
833 if (shift != PAGE_SHIFT)
834 if (add_huge_page_size(1ULL << shift) < 0)
835 continue;
836 }
837
838 /*
839 * Create a kmem cache for hugeptes. The bottom bits in the pte have
840 * size information encoded in them, so align them to allow this
841 */
842 hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t),
843 HUGEPD_SHIFT_MASK + 1, 0, NULL);
844 if (hugepte_cache == NULL)
845 panic("%s: Unable to create kmem cache for hugeptes\n",
846 __func__);
847
848 /* Default hpage size = 4M */
849 if (mmu_psize_defs[MMU_PAGE_4M].shift)
850 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
851 else
852 panic("%s: Unable to set default huge page size\n", __func__);
853
854
855 return 0;
856}
857#else
David Gibsonf10a04c2006-04-28 15:02:51 +1000858static int __init hugetlbpage_init(void)
859{
David Gibsona4fe3ce2009-10-26 19:24:31 +0000860 int psize;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700861
Matt Evans44ae3ab2011-04-06 19:48:50 +0000862 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
David Gibsonf10a04c2006-04-28 15:02:51 +1000863 return -ENODEV;
Benjamin Herrenschmidt00df4382008-07-28 16:13:18 +1000864
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700865 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
David Gibsond1837cb2009-10-26 19:24:31 +0000866 unsigned shift;
867 unsigned pdshift;
868
869 if (!mmu_psize_defs[psize].shift)
870 continue;
871
872 shift = mmu_psize_to_shift(psize);
873
874 if (add_huge_page_size(1ULL << shift) < 0)
875 continue;
876
877 if (shift < PMD_SHIFT)
878 pdshift = PMD_SHIFT;
879 else if (shift < PUD_SHIFT)
880 pdshift = PUD_SHIFT;
881 else
882 pdshift = PGDIR_SHIFT;
883
884 pgtable_cache_add(pdshift - shift, NULL);
885 if (!PGT_CACHE(pdshift - shift))
886 panic("hugetlbpage_init(): could not create "
887 "pgtable cache for %d bit pagesize\n", shift);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700888 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000889
David Gibsond1837cb2009-10-26 19:24:31 +0000890 /* Set default large page size. Currently, we pick 16M or 1M
891 * depending on what is available
892 */
893 if (mmu_psize_defs[MMU_PAGE_16M].shift)
894 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
895 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
896 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
897
David Gibsonf10a04c2006-04-28 15:02:51 +1000898 return 0;
899}
Becky Bruce41151e72011-06-28 09:54:48 +0000900#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000901module_init(hugetlbpage_init);
David Gibson0895ecd2009-10-26 19:24:31 +0000902
903void flush_dcache_icache_hugepage(struct page *page)
904{
905 int i;
Becky Bruce41151e72011-06-28 09:54:48 +0000906 void *start;
David Gibson0895ecd2009-10-26 19:24:31 +0000907
908 BUG_ON(!PageCompound(page));
909
Becky Bruce41151e72011-06-28 09:54:48 +0000910 for (i = 0; i < (1UL << compound_order(page)); i++) {
911 if (!PageHighMem(page)) {
912 __flush_dcache_icache(page_address(page+i));
913 } else {
Cong Wang2480b202011-11-25 23:14:16 +0800914 start = kmap_atomic(page+i);
Becky Bruce41151e72011-06-28 09:54:48 +0000915 __flush_dcache_icache(start);
Cong Wang2480b202011-11-25 23:14:16 +0800916 kunmap_atomic(start);
Becky Bruce41151e72011-06-28 09:54:48 +0000917 }
918 }
David Gibson0895ecd2009-10-26 19:24:31 +0000919}