blob: 7c7cb9797270543cd8a9c6bd5f971882ea124d61 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Becky Bruce41151e72011-06-28 09:54:48 +00002 * PPC Huge TLB Page Support for Kernel.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
Becky Bruce41151e72011-06-28 09:54:48 +00005 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/mm.h>
David Gibson883a3e52009-10-26 19:24:31 +000012#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/hugetlb.h>
Becky Bruce41151e72011-06-28 09:54:48 +000015#include <linux/of_fdt.h>
16#include <linux/memblock.h>
17#include <linux/bootmem.h>
David Gibson883a3e52009-10-26 19:24:31 +000018#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/pgalloc.h>
20#include <asm/tlb.h>
Becky Bruce41151e72011-06-28 09:54:48 +000021#include <asm/setup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Jon Tollefson91224342008-07-23 21:27:55 -070023#define PAGE_SHIFT_64K 16
24#define PAGE_SHIFT_16M 24
25#define PAGE_SHIFT_16G 34
Jon Tollefson4ec161c2008-01-04 09:59:50 +110026
Becky Bruce41151e72011-06-28 09:54:48 +000027unsigned int HPAGE_SHIFT;
28
29/*
30 * Tracks gpages after the device tree is scanned and before the
31 * huge_boot_pages list is ready. On 64-bit implementations, this is
32 * just used to track 16G pages and so is a single array. 32-bit
33 * implementations may have more than one gpage size due to limitations
34 * of the memory allocators, so we need multiple arrays
35 */
Becky Bruce881fde12011-10-10 10:50:40 +000036#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +000037#define MAX_NUMBER_GPAGES 128
38struct psize_gpages {
39 u64 gpage_list[MAX_NUMBER_GPAGES];
40 unsigned int nr_gpages;
41};
42static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
Becky Bruce881fde12011-10-10 10:50:40 +000043#else
44#define MAX_NUMBER_GPAGES 1024
45static u64 gpage_freearray[MAX_NUMBER_GPAGES];
46static unsigned nr_gpages;
Becky Bruce41151e72011-06-28 09:54:48 +000047#endif
David Gibsonf10a04c2006-04-28 15:02:51 +100048
Jon Tollefson0d9ea752008-07-23 21:27:56 -070049static inline int shift_to_mmu_psize(unsigned int shift)
50{
David Gibsond1837cb2009-10-26 19:24:31 +000051 int psize;
52
53 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
54 if (mmu_psize_defs[psize].shift == shift)
55 return psize;
Jon Tollefson0d9ea752008-07-23 21:27:56 -070056 return -1;
57}
58
59static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
60{
61 if (mmu_psize_defs[mmu_psize].shift)
62 return mmu_psize_defs[mmu_psize].shift;
63 BUG();
64}
65
David Gibsona4fe3ce2009-10-26 19:24:31 +000066#define hugepd_none(hpd) ((hpd).pd == 0)
67
David Gibsona4fe3ce2009-10-26 19:24:31 +000068pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
David Gibsonf10a04c2006-04-28 15:02:51 +100069{
David Gibsona4fe3ce2009-10-26 19:24:31 +000070 pgd_t *pg;
71 pud_t *pu;
72 pmd_t *pm;
73 hugepd_t *hpdp = NULL;
74 unsigned pdshift = PGDIR_SHIFT;
75
76 if (shift)
77 *shift = 0;
78
79 pg = pgdir + pgd_index(ea);
80 if (is_hugepd(pg)) {
81 hpdp = (hugepd_t *)pg;
82 } else if (!pgd_none(*pg)) {
83 pdshift = PUD_SHIFT;
84 pu = pud_offset(pg, ea);
85 if (is_hugepd(pu))
86 hpdp = (hugepd_t *)pu;
87 else if (!pud_none(*pu)) {
88 pdshift = PMD_SHIFT;
89 pm = pmd_offset(pu, ea);
90 if (is_hugepd(pm))
91 hpdp = (hugepd_t *)pm;
92 else if (!pmd_none(*pm)) {
Becky Bruce41151e72011-06-28 09:54:48 +000093 return pte_offset_kernel(pm, ea);
David Gibsona4fe3ce2009-10-26 19:24:31 +000094 }
95 }
96 }
97
98 if (!hpdp)
99 return NULL;
100
101 if (shift)
102 *shift = hugepd_shift(*hpdp);
103 return hugepte_offset(hpdp, ea, pdshift);
104}
105
106pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
107{
108 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
109}
110
111static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
112 unsigned long address, unsigned pdshift, unsigned pshift)
113{
Becky Bruce41151e72011-06-28 09:54:48 +0000114 struct kmem_cache *cachep;
115 pte_t *new;
116
Becky Bruce881fde12011-10-10 10:50:40 +0000117#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +0000118 int i;
119 int num_hugepd = 1 << (pshift - pdshift);
120 cachep = hugepte_cache;
Becky Bruce881fde12011-10-10 10:50:40 +0000121#else
122 cachep = PGT_CACHE(pdshift - pshift);
Becky Bruce41151e72011-06-28 09:54:48 +0000123#endif
124
125 new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
David Gibsonf10a04c2006-04-28 15:02:51 +1000126
David Gibsona4fe3ce2009-10-26 19:24:31 +0000127 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
128 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
129
David Gibsonf10a04c2006-04-28 15:02:51 +1000130 if (! new)
131 return -ENOMEM;
132
133 spin_lock(&mm->page_table_lock);
Becky Bruce881fde12011-10-10 10:50:40 +0000134#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +0000135 /*
136 * We have multiple higher-level entries that point to the same
137 * actual pte location. Fill in each as we go and backtrack on error.
138 * We need all of these so the DTLB pgtable walk code can find the
139 * right higher-level entry without knowing if it's a hugepage or not.
140 */
141 for (i = 0; i < num_hugepd; i++, hpdp++) {
142 if (unlikely(!hugepd_none(*hpdp)))
143 break;
144 else
145 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
146 }
147 /* If we bailed from the for loop early, an error occurred, clean up */
148 if (i < num_hugepd) {
149 for (i = i - 1 ; i >= 0; i--, hpdp--)
150 hpdp->pd = 0;
151 kmem_cache_free(cachep, new);
152 }
Becky Brucea1cd5412011-10-10 10:50:39 +0000153#else
154 if (!hugepd_none(*hpdp))
155 kmem_cache_free(cachep, new);
156 else
157 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
Becky Bruce41151e72011-06-28 09:54:48 +0000158#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000159 spin_unlock(&mm->page_table_lock);
160 return 0;
161}
162
Becky Brucea1cd5412011-10-10 10:50:39 +0000163/*
164 * These macros define how to determine which level of the page table holds
165 * the hpdp.
166 */
167#ifdef CONFIG_PPC_FSL_BOOK3E
168#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
169#define HUGEPD_PUD_SHIFT PUD_SHIFT
170#else
171#define HUGEPD_PGD_SHIFT PUD_SHIFT
172#define HUGEPD_PUD_SHIFT PMD_SHIFT
173#endif
174
David Gibsona4fe3ce2009-10-26 19:24:31 +0000175pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
176{
177 pgd_t *pg;
178 pud_t *pu;
179 pmd_t *pm;
180 hugepd_t *hpdp = NULL;
181 unsigned pshift = __ffs(sz);
182 unsigned pdshift = PGDIR_SHIFT;
David Gibson0b264252008-09-05 11:49:54 +1000183
David Gibsona4fe3ce2009-10-26 19:24:31 +0000184 addr &= ~(sz-1);
185
186 pg = pgd_offset(mm, addr);
Becky Brucea1cd5412011-10-10 10:50:39 +0000187
188 if (pshift >= HUGEPD_PGD_SHIFT) {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000189 hpdp = (hugepd_t *)pg;
190 } else {
191 pdshift = PUD_SHIFT;
192 pu = pud_alloc(mm, pg, addr);
Becky Brucea1cd5412011-10-10 10:50:39 +0000193 if (pshift >= HUGEPD_PUD_SHIFT) {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000194 hpdp = (hugepd_t *)pu;
195 } else {
196 pdshift = PMD_SHIFT;
197 pm = pmd_alloc(mm, pu, addr);
198 hpdp = (hugepd_t *)pm;
199 }
200 }
201
202 if (!hpdp)
203 return NULL;
204
205 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
206
207 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
208 return NULL;
209
210 return hugepte_offset(hpdp, addr, pdshift);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100211}
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100212
Becky Bruce881fde12011-10-10 10:50:40 +0000213#ifdef CONFIG_PPC_FSL_BOOK3E
Jon Tollefson658013e2008-07-23 21:27:54 -0700214/* Build list of addresses of gigantic pages. This function is used in early
215 * boot before the buddy or bootmem allocator is setup.
216 */
Becky Bruce41151e72011-06-28 09:54:48 +0000217void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
218{
219 unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
220 int i;
221
222 if (addr == 0)
223 return;
224
225 gpage_freearray[idx].nr_gpages = number_of_pages;
226
227 for (i = 0; i < number_of_pages; i++) {
228 gpage_freearray[idx].gpage_list[i] = addr;
229 addr += page_size;
230 }
231}
232
233/*
234 * Moves the gigantic page addresses from the temporary list to the
235 * huge_boot_pages list.
236 */
237int alloc_bootmem_huge_page(struct hstate *hstate)
238{
239 struct huge_bootmem_page *m;
240 int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT);
241 int nr_gpages = gpage_freearray[idx].nr_gpages;
242
243 if (nr_gpages == 0)
244 return 0;
245
246#ifdef CONFIG_HIGHMEM
247 /*
248 * If gpages can be in highmem we can't use the trick of storing the
249 * data structure in the page; allocate space for this
250 */
251 m = alloc_bootmem(sizeof(struct huge_bootmem_page));
252 m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
253#else
254 m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
255#endif
256
257 list_add(&m->list, &huge_boot_pages);
258 gpage_freearray[idx].nr_gpages = nr_gpages;
259 gpage_freearray[idx].gpage_list[nr_gpages] = 0;
260 m->hstate = hstate;
261
262 return 1;
263}
264/*
265 * Scan the command line hugepagesz= options for gigantic pages; store those in
266 * a list that we use to allocate the memory once all options are parsed.
267 */
268
269unsigned long gpage_npages[MMU_PAGE_COUNT];
270
271static int __init do_gpage_early_setup(char *param, char *val)
272{
273 static phys_addr_t size;
274 unsigned long npages;
275
276 /*
277 * The hugepagesz and hugepages cmdline options are interleaved. We
278 * use the size variable to keep track of whether or not this was done
279 * properly and skip over instances where it is incorrect. Other
280 * command-line parsing code will issue warnings, so we don't need to.
281 *
282 */
283 if ((strcmp(param, "default_hugepagesz") == 0) ||
284 (strcmp(param, "hugepagesz") == 0)) {
285 size = memparse(val, NULL);
286 } else if (strcmp(param, "hugepages") == 0) {
287 if (size != 0) {
288 if (sscanf(val, "%lu", &npages) <= 0)
289 npages = 0;
290 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
291 size = 0;
292 }
293 }
294 return 0;
295}
296
297
298/*
299 * This function allocates physical space for pages that are larger than the
300 * buddy allocator can handle. We want to allocate these in highmem because
301 * the amount of lowmem is limited. This means that this function MUST be
302 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
303 * allocate to grab highmem.
304 */
305void __init reserve_hugetlb_gpages(void)
306{
307 static __initdata char cmdline[COMMAND_LINE_SIZE];
308 phys_addr_t size, base;
309 int i;
310
311 strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
312 parse_args("hugetlb gpages", cmdline, NULL, 0, &do_gpage_early_setup);
313
314 /*
315 * Walk gpage list in reverse, allocating larger page sizes first.
316 * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
317 * When we reach the point in the list where pages are no longer
318 * considered gpages, we're done.
319 */
320 for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
321 if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
322 continue;
323 else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
324 break;
325
326 size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
327 base = memblock_alloc_base(size * gpage_npages[i], size,
328 MEMBLOCK_ALLOC_ANYWHERE);
329 add_gpage(base, size, gpage_npages[i]);
330 }
331}
332
Becky Bruce881fde12011-10-10 10:50:40 +0000333#else /* !PPC_FSL_BOOK3E */
Becky Bruce41151e72011-06-28 09:54:48 +0000334
335/* Build list of addresses of gigantic pages. This function is used in early
336 * boot before the buddy or bootmem allocator is setup.
337 */
338void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
Jon Tollefson658013e2008-07-23 21:27:54 -0700339{
340 if (!addr)
341 return;
342 while (number_of_pages > 0) {
343 gpage_freearray[nr_gpages] = addr;
344 nr_gpages++;
345 number_of_pages--;
346 addr += page_size;
347 }
348}
349
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700350/* Moves the gigantic page addresses from the temporary list to the
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700351 * huge_boot_pages list.
352 */
353int alloc_bootmem_huge_page(struct hstate *hstate)
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700354{
355 struct huge_bootmem_page *m;
356 if (nr_gpages == 0)
357 return 0;
358 m = phys_to_virt(gpage_freearray[--nr_gpages]);
359 gpage_freearray[nr_gpages] = 0;
360 list_add(&m->list, &huge_boot_pages);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700361 m->hstate = hstate;
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700362 return 1;
363}
Becky Bruce41151e72011-06-28 09:54:48 +0000364#endif
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700365
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800366int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
367{
368 return 0;
369}
370
Becky Bruce881fde12011-10-10 10:50:40 +0000371#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +0000372#define HUGEPD_FREELIST_SIZE \
373 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
374
375struct hugepd_freelist {
376 struct rcu_head rcu;
377 unsigned int index;
378 void *ptes[0];
379};
380
381static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
382
383static void hugepd_free_rcu_callback(struct rcu_head *head)
384{
385 struct hugepd_freelist *batch =
386 container_of(head, struct hugepd_freelist, rcu);
387 unsigned int i;
388
389 for (i = 0; i < batch->index; i++)
390 kmem_cache_free(hugepte_cache, batch->ptes[i]);
391
392 free_page((unsigned long)batch);
393}
394
395static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
396{
397 struct hugepd_freelist **batchp;
398
399 batchp = &__get_cpu_var(hugepd_freelist_cur);
400
401 if (atomic_read(&tlb->mm->mm_users) < 2 ||
402 cpumask_equal(mm_cpumask(tlb->mm),
403 cpumask_of(smp_processor_id()))) {
404 kmem_cache_free(hugepte_cache, hugepte);
405 return;
406 }
407
408 if (*batchp == NULL) {
409 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
410 (*batchp)->index = 0;
411 }
412
413 (*batchp)->ptes[(*batchp)->index++] = hugepte;
414 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
415 call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
416 *batchp = NULL;
417 }
418}
419#endif
420
David Gibsona4fe3ce2009-10-26 19:24:31 +0000421static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
422 unsigned long start, unsigned long end,
423 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000424{
425 pte_t *hugepte = hugepd_page(*hpdp);
Becky Bruce41151e72011-06-28 09:54:48 +0000426 int i;
427
David Gibsona4fe3ce2009-10-26 19:24:31 +0000428 unsigned long pdmask = ~((1UL << pdshift) - 1);
Becky Bruce41151e72011-06-28 09:54:48 +0000429 unsigned int num_hugepd = 1;
430
Becky Bruce881fde12011-10-10 10:50:40 +0000431#ifdef CONFIG_PPC_FSL_BOOK3E
432 /* Note: On fsl the hpdp may be the first of several */
Becky Bruce41151e72011-06-28 09:54:48 +0000433 num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
Becky Bruce881fde12011-10-10 10:50:40 +0000434#else
435 unsigned int shift = hugepd_shift(*hpdp);
Becky Bruce41151e72011-06-28 09:54:48 +0000436#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000437
438 start &= pdmask;
439 if (start < floor)
440 return;
441 if (ceiling) {
442 ceiling &= pdmask;
443 if (! ceiling)
444 return;
445 }
446 if (end - 1 > ceiling - 1)
447 return;
David Gibsonf10a04c2006-04-28 15:02:51 +1000448
Becky Bruce41151e72011-06-28 09:54:48 +0000449 for (i = 0; i < num_hugepd; i++, hpdp++)
450 hpdp->pd = 0;
451
David Gibsonf10a04c2006-04-28 15:02:51 +1000452 tlb->need_flush = 1;
Becky Bruce881fde12011-10-10 10:50:40 +0000453
454#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +0000455 hugepd_free(tlb, hugepte);
Becky Bruce881fde12011-10-10 10:50:40 +0000456#else
457 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
Becky Bruce41151e72011-06-28 09:54:48 +0000458#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000459}
460
David Gibsonf10a04c2006-04-28 15:02:51 +1000461static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
462 unsigned long addr, unsigned long end,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000463 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000464{
465 pmd_t *pmd;
466 unsigned long next;
467 unsigned long start;
468
469 start = addr;
David Gibsonf10a04c2006-04-28 15:02:51 +1000470 do {
Becky Brucea1cd5412011-10-10 10:50:39 +0000471 pmd = pmd_offset(pud, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000472 next = pmd_addr_end(addr, end);
473 if (pmd_none(*pmd))
474 continue;
Becky Brucea1cd5412011-10-10 10:50:39 +0000475#ifdef CONFIG_PPC_FSL_BOOK3E
476 /*
477 * Increment next by the size of the huge mapping since
478 * there may be more than one entry at this level for a
479 * single hugepage, but all of them point to
480 * the same kmem cache that holds the hugepte.
481 */
482 next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
483#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000484 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
485 addr, next, floor, ceiling);
Becky Brucea1cd5412011-10-10 10:50:39 +0000486 } while (addr = next, addr != end);
David Gibsonf10a04c2006-04-28 15:02:51 +1000487
488 start &= PUD_MASK;
489 if (start < floor)
490 return;
491 if (ceiling) {
492 ceiling &= PUD_MASK;
493 if (!ceiling)
494 return;
495 }
496 if (end - 1 > ceiling - 1)
497 return;
498
499 pmd = pmd_offset(pud, start);
500 pud_clear(pud);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000501 pmd_free_tlb(tlb, pmd, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000502}
David Gibsonf10a04c2006-04-28 15:02:51 +1000503
504static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
505 unsigned long addr, unsigned long end,
506 unsigned long floor, unsigned long ceiling)
507{
508 pud_t *pud;
509 unsigned long next;
510 unsigned long start;
511
512 start = addr;
David Gibsonf10a04c2006-04-28 15:02:51 +1000513 do {
Becky Brucea1cd5412011-10-10 10:50:39 +0000514 pud = pud_offset(pgd, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000515 next = pud_addr_end(addr, end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000516 if (!is_hugepd(pud)) {
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100517 if (pud_none_or_clear_bad(pud))
518 continue;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700519 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000520 ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100521 } else {
Becky Brucea1cd5412011-10-10 10:50:39 +0000522#ifdef CONFIG_PPC_FSL_BOOK3E
523 /*
524 * Increment next by the size of the huge mapping since
525 * there may be more than one entry at this level for a
526 * single hugepage, but all of them point to
527 * the same kmem cache that holds the hugepte.
528 */
529 next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
530#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000531 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
532 addr, next, floor, ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100533 }
Becky Brucea1cd5412011-10-10 10:50:39 +0000534 } while (addr = next, addr != end);
David Gibsonf10a04c2006-04-28 15:02:51 +1000535
536 start &= PGDIR_MASK;
537 if (start < floor)
538 return;
539 if (ceiling) {
540 ceiling &= PGDIR_MASK;
541 if (!ceiling)
542 return;
543 }
544 if (end - 1 > ceiling - 1)
545 return;
546
547 pud = pud_offset(pgd, start);
548 pgd_clear(pgd);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000549 pud_free_tlb(tlb, pud, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000550}
551
552/*
553 * This function frees user-level page tables of a process.
554 *
555 * Must be called with pagetable lock held.
556 */
Jan Beulich42b77722008-07-23 21:27:10 -0700557void hugetlb_free_pgd_range(struct mmu_gather *tlb,
David Gibsonf10a04c2006-04-28 15:02:51 +1000558 unsigned long addr, unsigned long end,
559 unsigned long floor, unsigned long ceiling)
560{
561 pgd_t *pgd;
562 unsigned long next;
David Gibsonf10a04c2006-04-28 15:02:51 +1000563
564 /*
David Gibsona4fe3ce2009-10-26 19:24:31 +0000565 * Because there are a number of different possible pagetable
566 * layouts for hugepage ranges, we limit knowledge of how
567 * things should be laid out to the allocation path
568 * (huge_pte_alloc(), above). Everything else works out the
569 * structure as it goes from information in the hugepd
570 * pointers. That means that we can't here use the
571 * optimization used in the normal page free_pgd_range(), of
572 * checking whether we're actually covering a large enough
573 * range to have to do anything at the top level of the walk
574 * instead of at the bottom.
David Gibsonf10a04c2006-04-28 15:02:51 +1000575 *
David Gibsona4fe3ce2009-10-26 19:24:31 +0000576 * To make sense of this, you should probably go read the big
577 * block comment at the top of the normal free_pgd_range(),
578 * too.
David Gibsonf10a04c2006-04-28 15:02:51 +1000579 */
580
David Gibsonf10a04c2006-04-28 15:02:51 +1000581 do {
David Gibsonf10a04c2006-04-28 15:02:51 +1000582 next = pgd_addr_end(addr, end);
Becky Bruce41151e72011-06-28 09:54:48 +0000583 pgd = pgd_offset(tlb->mm, addr);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000584 if (!is_hugepd(pgd)) {
David Gibson0b264252008-09-05 11:49:54 +1000585 if (pgd_none_or_clear_bad(pgd))
586 continue;
587 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
588 } else {
Becky Bruce881fde12011-10-10 10:50:40 +0000589#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +0000590 /*
591 * Increment next by the size of the huge mapping since
Becky Bruce881fde12011-10-10 10:50:40 +0000592 * there may be more than one entry at the pgd level
593 * for a single hugepage, but all of them point to the
594 * same kmem cache that holds the hugepte.
Becky Bruce41151e72011-06-28 09:54:48 +0000595 */
596 next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
597#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000598 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
599 addr, next, floor, ceiling);
David Gibson0b264252008-09-05 11:49:54 +1000600 }
Becky Bruce41151e72011-06-28 09:54:48 +0000601 } while (addr = next, addr != end);
David Gibsone28f7fa2005-08-05 19:39:06 +1000602}
603
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604struct page *
605follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
606{
607 pte_t *ptep;
608 struct page *page;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000609 unsigned shift;
610 unsigned long mask;
611
612 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700614 /* Verify it is a huge page else bail. */
David Gibsona4fe3ce2009-10-26 19:24:31 +0000615 if (!ptep || !shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 return ERR_PTR(-EINVAL);
617
David Gibsona4fe3ce2009-10-26 19:24:31 +0000618 mask = (1UL << shift) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 page = pte_page(*ptep);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000620 if (page)
621 page += (address & mask) / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
623 return page;
624}
625
626int pmd_huge(pmd_t pmd)
627{
628 return 0;
629}
630
Andi Kleenceb86872008-07-23 21:27:50 -0700631int pud_huge(pud_t pud)
632{
633 return 0;
634}
635
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636struct page *
637follow_huge_pmd(struct mm_struct *mm, unsigned long address,
638 pmd_t *pmd, int write)
639{
640 BUG();
641 return NULL;
642}
643
David Gibsona4fe3ce2009-10-26 19:24:31 +0000644static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
645 unsigned long end, int write, struct page **pages, int *nr)
646{
647 unsigned long mask;
648 unsigned long pte_end;
Andrea Arcangeli35267412011-11-02 13:37:15 -0700649 struct page *head, *page, *tail;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000650 pte_t pte;
651 int refs;
652
653 pte_end = (addr + sz) & ~(sz-1);
654 if (pte_end < end)
655 end = pte_end;
656
657 pte = *ptep;
658 mask = _PAGE_PRESENT | _PAGE_USER;
659 if (write)
660 mask |= _PAGE_RW;
661
662 if ((pte_val(pte) & mask) != mask)
663 return 0;
664
665 /* hugepages are never "special" */
666 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
667
668 refs = 0;
669 head = pte_page(pte);
670
671 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
Andrea Arcangeli35267412011-11-02 13:37:15 -0700672 tail = page;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000673 do {
674 VM_BUG_ON(compound_head(page) != head);
675 pages[*nr] = page;
676 (*nr)++;
677 page++;
678 refs++;
679 } while (addr += PAGE_SIZE, addr != end);
680
681 if (!page_cache_add_speculative(head, refs)) {
682 *nr -= refs;
683 return 0;
684 }
685
686 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
687 /* Could be optimized better */
Andrea Arcangeli85964682011-11-02 13:37:11 -0700688 *nr -= refs;
689 while (refs--)
Andrea Arcangeli405e44f2011-11-02 13:37:08 -0700690 put_page(head);
Andrea Arcangelicf592bf2011-11-02 13:37:19 -0700691 return 0;
692 }
693
694 /*
695 * Any tail page need their mapcount reference taken before we
696 * return.
697 */
698 while (refs--) {
699 if (PageTail(tail))
700 get_huge_page_tail(tail);
701 tail++;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000702 }
703
704 return 1;
705}
706
David Gibson39adfa52009-11-23 20:03:40 +0000707static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
708 unsigned long sz)
709{
710 unsigned long __boundary = (addr + sz) & ~(sz-1);
711 return (__boundary - 1 < end - 1) ? __boundary : end;
712}
713
David Gibsona4fe3ce2009-10-26 19:24:31 +0000714int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
715 unsigned long addr, unsigned long end,
716 int write, struct page **pages, int *nr)
717{
718 pte_t *ptep;
719 unsigned long sz = 1UL << hugepd_shift(*hugepd);
David Gibson39adfa52009-11-23 20:03:40 +0000720 unsigned long next;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000721
722 ptep = hugepte_offset(hugepd, addr, pdshift);
723 do {
David Gibson39adfa52009-11-23 20:03:40 +0000724 next = hugepte_addr_end(addr, end, sz);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000725 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
726 return 0;
David Gibson39adfa52009-11-23 20:03:40 +0000727 } while (ptep++, addr = next, addr != end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000728
729 return 1;
730}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
Becky Bruce76512952011-10-10 10:50:36 +0000732#ifdef CONFIG_PPC_MM_SLICES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
734 unsigned long len, unsigned long pgoff,
735 unsigned long flags)
736{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700737 struct hstate *hstate = hstate_file(file);
738 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
Brian King48f797d2008-12-04 04:07:54 +0000739
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700740 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741}
Becky Bruce76512952011-10-10 10:50:36 +0000742#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Mel Gorman33402892009-01-06 14:38:54 -0800744unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
745{
Paul Mackerras25c29f92011-09-20 19:58:10 +0000746#ifdef CONFIG_PPC_MM_SLICES
Mel Gorman33402892009-01-06 14:38:54 -0800747 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
748
749 return 1UL << mmu_psize_to_shift(psize);
Becky Bruce41151e72011-06-28 09:54:48 +0000750#else
751 if (!is_vm_hugetlb_page(vma))
752 return PAGE_SIZE;
753
754 return huge_page_size(hstate_vma(vma));
755#endif
756}
757
758static inline bool is_power_of_4(unsigned long x)
759{
760 if (is_power_of_2(x))
761 return (__ilog2(x) % 2) ? false : true;
762 return false;
Mel Gorman33402892009-01-06 14:38:54 -0800763}
764
David Gibsond1837cb2009-10-26 19:24:31 +0000765static int __init add_huge_page_size(unsigned long long size)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100766{
David Gibsond1837cb2009-10-26 19:24:31 +0000767 int shift = __ffs(size);
768 int mmu_psize;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000769
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100770 /* Check that it is a page size supported by the hardware and
David Gibsond1837cb2009-10-26 19:24:31 +0000771 * that it fits within pagetable and slice limits. */
Becky Bruce41151e72011-06-28 09:54:48 +0000772#ifdef CONFIG_PPC_FSL_BOOK3E
773 if ((size < PAGE_SIZE) || !is_power_of_4(size))
774 return -EINVAL;
775#else
David Gibsond1837cb2009-10-26 19:24:31 +0000776 if (!is_power_of_2(size)
777 || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
778 return -EINVAL;
Becky Bruce41151e72011-06-28 09:54:48 +0000779#endif
Jon Tollefson91224342008-07-23 21:27:55 -0700780
David Gibsond1837cb2009-10-26 19:24:31 +0000781 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
782 return -EINVAL;
783
784#ifdef CONFIG_SPU_FS_64K_LS
785 /* Disable support for 64K huge pages when 64K SPU local store
786 * support is enabled as the current implementation conflicts.
787 */
788 if (shift == PAGE_SHIFT_64K)
789 return -EINVAL;
790#endif /* CONFIG_SPU_FS_64K_LS */
791
792 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
793
794 /* Return if huge page size has already been setup */
795 if (size_to_hstate(size))
796 return 0;
797
798 hugetlb_add_hstate(shift - PAGE_SHIFT);
799
800 return 0;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100801}
802
803static int __init hugepage_setup_sz(char *str)
804{
805 unsigned long long size;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100806
807 size = memparse(str, &str);
808
David Gibsond1837cb2009-10-26 19:24:31 +0000809 if (add_huge_page_size(size) != 0)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100810 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
811
812 return 1;
813}
814__setup("hugepagesz=", hugepage_setup_sz);
815
Becky Bruce881fde12011-10-10 10:50:40 +0000816#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +0000817struct kmem_cache *hugepte_cache;
818static int __init hugetlbpage_init(void)
819{
820 int psize;
821
822 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
823 unsigned shift;
824
825 if (!mmu_psize_defs[psize].shift)
826 continue;
827
828 shift = mmu_psize_to_shift(psize);
829
830 /* Don't treat normal page sizes as huge... */
831 if (shift != PAGE_SHIFT)
832 if (add_huge_page_size(1ULL << shift) < 0)
833 continue;
834 }
835
836 /*
837 * Create a kmem cache for hugeptes. The bottom bits in the pte have
838 * size information encoded in them, so align them to allow this
839 */
840 hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t),
841 HUGEPD_SHIFT_MASK + 1, 0, NULL);
842 if (hugepte_cache == NULL)
843 panic("%s: Unable to create kmem cache for hugeptes\n",
844 __func__);
845
846 /* Default hpage size = 4M */
847 if (mmu_psize_defs[MMU_PAGE_4M].shift)
848 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
849 else
850 panic("%s: Unable to set default huge page size\n", __func__);
851
852
853 return 0;
854}
855#else
David Gibsonf10a04c2006-04-28 15:02:51 +1000856static int __init hugetlbpage_init(void)
857{
David Gibsona4fe3ce2009-10-26 19:24:31 +0000858 int psize;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700859
Matt Evans44ae3ab2011-04-06 19:48:50 +0000860 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
David Gibsonf10a04c2006-04-28 15:02:51 +1000861 return -ENODEV;
Benjamin Herrenschmidt00df4382008-07-28 16:13:18 +1000862
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700863 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
David Gibsond1837cb2009-10-26 19:24:31 +0000864 unsigned shift;
865 unsigned pdshift;
866
867 if (!mmu_psize_defs[psize].shift)
868 continue;
869
870 shift = mmu_psize_to_shift(psize);
871
872 if (add_huge_page_size(1ULL << shift) < 0)
873 continue;
874
875 if (shift < PMD_SHIFT)
876 pdshift = PMD_SHIFT;
877 else if (shift < PUD_SHIFT)
878 pdshift = PUD_SHIFT;
879 else
880 pdshift = PGDIR_SHIFT;
881
882 pgtable_cache_add(pdshift - shift, NULL);
883 if (!PGT_CACHE(pdshift - shift))
884 panic("hugetlbpage_init(): could not create "
885 "pgtable cache for %d bit pagesize\n", shift);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700886 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000887
David Gibsond1837cb2009-10-26 19:24:31 +0000888 /* Set default large page size. Currently, we pick 16M or 1M
889 * depending on what is available
890 */
891 if (mmu_psize_defs[MMU_PAGE_16M].shift)
892 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
893 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
894 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
895
David Gibsonf10a04c2006-04-28 15:02:51 +1000896 return 0;
897}
Becky Bruce41151e72011-06-28 09:54:48 +0000898#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000899module_init(hugetlbpage_init);
David Gibson0895ecd2009-10-26 19:24:31 +0000900
901void flush_dcache_icache_hugepage(struct page *page)
902{
903 int i;
Becky Bruce41151e72011-06-28 09:54:48 +0000904 void *start;
David Gibson0895ecd2009-10-26 19:24:31 +0000905
906 BUG_ON(!PageCompound(page));
907
Becky Bruce41151e72011-06-28 09:54:48 +0000908 for (i = 0; i < (1UL << compound_order(page)); i++) {
909 if (!PageHighMem(page)) {
910 __flush_dcache_icache(page_address(page+i));
911 } else {
912 start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE);
913 __flush_dcache_icache(start);
914 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
915 }
916 }
David Gibson0895ecd2009-10-26 19:24:31 +0000917}