blob: 8558b572e55d3e6ba2ee7b5656b9eb9f9e39b5c3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Becky Bruce41151e72011-06-28 09:54:48 +00002 * PPC Huge TLB Page Support for Kernel.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
Becky Bruce41151e72011-06-28 09:54:48 +00005 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/mm.h>
David Gibson883a3e52009-10-26 19:24:31 +000012#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/hugetlb.h>
Becky Bruce41151e72011-06-28 09:54:48 +000015#include <linux/of_fdt.h>
16#include <linux/memblock.h>
17#include <linux/bootmem.h>
Kumar Gala13020be2011-11-24 09:40:07 +000018#include <linux/moduleparam.h>
David Gibson883a3e52009-10-26 19:24:31 +000019#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/pgalloc.h>
21#include <asm/tlb.h>
Becky Bruce41151e72011-06-28 09:54:48 +000022#include <asm/setup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Jon Tollefson91224342008-07-23 21:27:55 -070024#define PAGE_SHIFT_64K 16
25#define PAGE_SHIFT_16M 24
26#define PAGE_SHIFT_16G 34
Jon Tollefson4ec161c2008-01-04 09:59:50 +110027
Becky Bruce41151e72011-06-28 09:54:48 +000028unsigned int HPAGE_SHIFT;
29
30/*
31 * Tracks gpages after the device tree is scanned and before the
32 * huge_boot_pages list is ready. On 64-bit implementations, this is
33 * just used to track 16G pages and so is a single array. 32-bit
34 * implementations may have more than one gpage size due to limitations
35 * of the memory allocators, so we need multiple arrays
36 */
37#ifdef CONFIG_PPC64
Jon Tollefsonec4b2c02008-07-23 21:27:53 -070038#define MAX_NUMBER_GPAGES 1024
Becky Bruce41151e72011-06-28 09:54:48 +000039static u64 gpage_freearray[MAX_NUMBER_GPAGES];
Jon Tollefsonec4b2c02008-07-23 21:27:53 -070040static unsigned nr_gpages;
Becky Bruce41151e72011-06-28 09:54:48 +000041#else
42#define MAX_NUMBER_GPAGES 128
43struct psize_gpages {
44 u64 gpage_list[MAX_NUMBER_GPAGES];
45 unsigned int nr_gpages;
46};
47static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
48#endif
David Gibsonf10a04c2006-04-28 15:02:51 +100049
Jon Tollefson0d9ea752008-07-23 21:27:56 -070050static inline int shift_to_mmu_psize(unsigned int shift)
51{
David Gibsond1837cb2009-10-26 19:24:31 +000052 int psize;
53
54 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
55 if (mmu_psize_defs[psize].shift == shift)
56 return psize;
Jon Tollefson0d9ea752008-07-23 21:27:56 -070057 return -1;
58}
59
60static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
61{
62 if (mmu_psize_defs[mmu_psize].shift)
63 return mmu_psize_defs[mmu_psize].shift;
64 BUG();
65}
66
David Gibsona4fe3ce2009-10-26 19:24:31 +000067#define hugepd_none(hpd) ((hpd).pd == 0)
68
David Gibsona4fe3ce2009-10-26 19:24:31 +000069pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
David Gibsonf10a04c2006-04-28 15:02:51 +100070{
David Gibsona4fe3ce2009-10-26 19:24:31 +000071 pgd_t *pg;
72 pud_t *pu;
73 pmd_t *pm;
74 hugepd_t *hpdp = NULL;
75 unsigned pdshift = PGDIR_SHIFT;
76
77 if (shift)
78 *shift = 0;
79
80 pg = pgdir + pgd_index(ea);
81 if (is_hugepd(pg)) {
82 hpdp = (hugepd_t *)pg;
83 } else if (!pgd_none(*pg)) {
84 pdshift = PUD_SHIFT;
85 pu = pud_offset(pg, ea);
86 if (is_hugepd(pu))
87 hpdp = (hugepd_t *)pu;
88 else if (!pud_none(*pu)) {
89 pdshift = PMD_SHIFT;
90 pm = pmd_offset(pu, ea);
91 if (is_hugepd(pm))
92 hpdp = (hugepd_t *)pm;
93 else if (!pmd_none(*pm)) {
Becky Bruce41151e72011-06-28 09:54:48 +000094 return pte_offset_kernel(pm, ea);
David Gibsona4fe3ce2009-10-26 19:24:31 +000095 }
96 }
97 }
98
99 if (!hpdp)
100 return NULL;
101
102 if (shift)
103 *shift = hugepd_shift(*hpdp);
104 return hugepte_offset(hpdp, ea, pdshift);
105}
106
107pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
108{
109 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
110}
111
112static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
113 unsigned long address, unsigned pdshift, unsigned pshift)
114{
Becky Bruce41151e72011-06-28 09:54:48 +0000115 struct kmem_cache *cachep;
116 pte_t *new;
117
118#ifdef CONFIG_PPC64
119 cachep = PGT_CACHE(pdshift - pshift);
120#else
121 int i;
122 int num_hugepd = 1 << (pshift - pdshift);
123 cachep = hugepte_cache;
124#endif
125
126 new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
David Gibsonf10a04c2006-04-28 15:02:51 +1000127
David Gibsona4fe3ce2009-10-26 19:24:31 +0000128 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
129 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
130
David Gibsonf10a04c2006-04-28 15:02:51 +1000131 if (! new)
132 return -ENOMEM;
133
134 spin_lock(&mm->page_table_lock);
Becky Bruce41151e72011-06-28 09:54:48 +0000135#ifdef CONFIG_PPC64
David Gibsonf10a04c2006-04-28 15:02:51 +1000136 if (!hugepd_none(*hpdp))
Becky Bruce41151e72011-06-28 09:54:48 +0000137 kmem_cache_free(cachep, new);
David Gibsonf10a04c2006-04-28 15:02:51 +1000138 else
Becky Bruce41151e72011-06-28 09:54:48 +0000139 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
140#else
141 /*
142 * We have multiple higher-level entries that point to the same
143 * actual pte location. Fill in each as we go and backtrack on error.
144 * We need all of these so the DTLB pgtable walk code can find the
145 * right higher-level entry without knowing if it's a hugepage or not.
146 */
147 for (i = 0; i < num_hugepd; i++, hpdp++) {
148 if (unlikely(!hugepd_none(*hpdp)))
149 break;
150 else
151 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
152 }
153 /* If we bailed from the for loop early, an error occurred, clean up */
154 if (i < num_hugepd) {
155 for (i = i - 1 ; i >= 0; i--, hpdp--)
156 hpdp->pd = 0;
157 kmem_cache_free(cachep, new);
158 }
159#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000160 spin_unlock(&mm->page_table_lock);
161 return 0;
162}
163
David Gibsona4fe3ce2009-10-26 19:24:31 +0000164pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
165{
166 pgd_t *pg;
167 pud_t *pu;
168 pmd_t *pm;
169 hugepd_t *hpdp = NULL;
170 unsigned pshift = __ffs(sz);
171 unsigned pdshift = PGDIR_SHIFT;
David Gibson0b264252008-09-05 11:49:54 +1000172
David Gibsona4fe3ce2009-10-26 19:24:31 +0000173 addr &= ~(sz-1);
174
175 pg = pgd_offset(mm, addr);
176 if (pshift >= PUD_SHIFT) {
177 hpdp = (hugepd_t *)pg;
178 } else {
179 pdshift = PUD_SHIFT;
180 pu = pud_alloc(mm, pg, addr);
181 if (pshift >= PMD_SHIFT) {
182 hpdp = (hugepd_t *)pu;
183 } else {
184 pdshift = PMD_SHIFT;
185 pm = pmd_alloc(mm, pu, addr);
186 hpdp = (hugepd_t *)pm;
187 }
188 }
189
190 if (!hpdp)
191 return NULL;
192
193 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
194
195 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
196 return NULL;
197
198 return hugepte_offset(hpdp, addr, pdshift);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100199}
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100200
Becky Bruce41151e72011-06-28 09:54:48 +0000201#ifdef CONFIG_PPC32
Jon Tollefson658013e2008-07-23 21:27:54 -0700202/* Build list of addresses of gigantic pages. This function is used in early
203 * boot before the buddy or bootmem allocator is setup.
204 */
Becky Bruce41151e72011-06-28 09:54:48 +0000205void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
206{
207 unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
208 int i;
209
210 if (addr == 0)
211 return;
212
213 gpage_freearray[idx].nr_gpages = number_of_pages;
214
215 for (i = 0; i < number_of_pages; i++) {
216 gpage_freearray[idx].gpage_list[i] = addr;
217 addr += page_size;
218 }
219}
220
221/*
222 * Moves the gigantic page addresses from the temporary list to the
223 * huge_boot_pages list.
224 */
225int alloc_bootmem_huge_page(struct hstate *hstate)
226{
227 struct huge_bootmem_page *m;
228 int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT);
229 int nr_gpages = gpage_freearray[idx].nr_gpages;
230
231 if (nr_gpages == 0)
232 return 0;
233
234#ifdef CONFIG_HIGHMEM
235 /*
236 * If gpages can be in highmem we can't use the trick of storing the
237 * data structure in the page; allocate space for this
238 */
239 m = alloc_bootmem(sizeof(struct huge_bootmem_page));
240 m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
241#else
242 m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
243#endif
244
245 list_add(&m->list, &huge_boot_pages);
246 gpage_freearray[idx].nr_gpages = nr_gpages;
247 gpage_freearray[idx].gpage_list[nr_gpages] = 0;
248 m->hstate = hstate;
249
250 return 1;
251}
252/*
253 * Scan the command line hugepagesz= options for gigantic pages; store those in
254 * a list that we use to allocate the memory once all options are parsed.
255 */
256
257unsigned long gpage_npages[MMU_PAGE_COUNT];
258
259static int __init do_gpage_early_setup(char *param, char *val)
260{
261 static phys_addr_t size;
262 unsigned long npages;
263
264 /*
265 * The hugepagesz and hugepages cmdline options are interleaved. We
266 * use the size variable to keep track of whether or not this was done
267 * properly and skip over instances where it is incorrect. Other
268 * command-line parsing code will issue warnings, so we don't need to.
269 *
270 */
271 if ((strcmp(param, "default_hugepagesz") == 0) ||
272 (strcmp(param, "hugepagesz") == 0)) {
273 size = memparse(val, NULL);
274 } else if (strcmp(param, "hugepages") == 0) {
275 if (size != 0) {
276 if (sscanf(val, "%lu", &npages) <= 0)
277 npages = 0;
278 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
279 size = 0;
280 }
281 }
282 return 0;
283}
284
285
286/*
287 * This function allocates physical space for pages that are larger than the
288 * buddy allocator can handle. We want to allocate these in highmem because
289 * the amount of lowmem is limited. This means that this function MUST be
290 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
291 * allocate to grab highmem.
292 */
293void __init reserve_hugetlb_gpages(void)
294{
295 static __initdata char cmdline[COMMAND_LINE_SIZE];
296 phys_addr_t size, base;
297 int i;
298
299 strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
300 parse_args("hugetlb gpages", cmdline, NULL, 0, &do_gpage_early_setup);
301
302 /*
303 * Walk gpage list in reverse, allocating larger page sizes first.
304 * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
305 * When we reach the point in the list where pages are no longer
306 * considered gpages, we're done.
307 */
308 for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
309 if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
310 continue;
311 else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
312 break;
313
314 size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
315 base = memblock_alloc_base(size * gpage_npages[i], size,
316 MEMBLOCK_ALLOC_ANYWHERE);
317 add_gpage(base, size, gpage_npages[i]);
318 }
319}
320
321#else /* PPC64 */
322
323/* Build list of addresses of gigantic pages. This function is used in early
324 * boot before the buddy or bootmem allocator is setup.
325 */
326void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
Jon Tollefson658013e2008-07-23 21:27:54 -0700327{
328 if (!addr)
329 return;
330 while (number_of_pages > 0) {
331 gpage_freearray[nr_gpages] = addr;
332 nr_gpages++;
333 number_of_pages--;
334 addr += page_size;
335 }
336}
337
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700338/* Moves the gigantic page addresses from the temporary list to the
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700339 * huge_boot_pages list.
340 */
341int alloc_bootmem_huge_page(struct hstate *hstate)
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700342{
343 struct huge_bootmem_page *m;
344 if (nr_gpages == 0)
345 return 0;
346 m = phys_to_virt(gpage_freearray[--nr_gpages]);
347 gpage_freearray[nr_gpages] = 0;
348 list_add(&m->list, &huge_boot_pages);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700349 m->hstate = hstate;
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700350 return 1;
351}
Becky Bruce41151e72011-06-28 09:54:48 +0000352#endif
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700353
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800354int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
355{
356 return 0;
357}
358
Becky Bruce41151e72011-06-28 09:54:48 +0000359#ifdef CONFIG_PPC32
360#define HUGEPD_FREELIST_SIZE \
361 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
362
363struct hugepd_freelist {
364 struct rcu_head rcu;
365 unsigned int index;
366 void *ptes[0];
367};
368
369static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
370
371static void hugepd_free_rcu_callback(struct rcu_head *head)
372{
373 struct hugepd_freelist *batch =
374 container_of(head, struct hugepd_freelist, rcu);
375 unsigned int i;
376
377 for (i = 0; i < batch->index; i++)
378 kmem_cache_free(hugepte_cache, batch->ptes[i]);
379
380 free_page((unsigned long)batch);
381}
382
383static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
384{
385 struct hugepd_freelist **batchp;
386
387 batchp = &__get_cpu_var(hugepd_freelist_cur);
388
389 if (atomic_read(&tlb->mm->mm_users) < 2 ||
390 cpumask_equal(mm_cpumask(tlb->mm),
391 cpumask_of(smp_processor_id()))) {
392 kmem_cache_free(hugepte_cache, hugepte);
393 return;
394 }
395
396 if (*batchp == NULL) {
397 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
398 (*batchp)->index = 0;
399 }
400
401 (*batchp)->ptes[(*batchp)->index++] = hugepte;
402 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
403 call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
404 *batchp = NULL;
405 }
406}
407#endif
408
David Gibsona4fe3ce2009-10-26 19:24:31 +0000409static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
410 unsigned long start, unsigned long end,
411 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000412{
413 pte_t *hugepte = hugepd_page(*hpdp);
Becky Bruce41151e72011-06-28 09:54:48 +0000414 int i;
415
David Gibsona4fe3ce2009-10-26 19:24:31 +0000416 unsigned long pdmask = ~((1UL << pdshift) - 1);
Becky Bruce41151e72011-06-28 09:54:48 +0000417 unsigned int num_hugepd = 1;
418
419#ifdef CONFIG_PPC64
420 unsigned int shift = hugepd_shift(*hpdp);
421#else
422 /* Note: On 32-bit the hpdp may be the first of several */
423 num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
424#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000425
426 start &= pdmask;
427 if (start < floor)
428 return;
429 if (ceiling) {
430 ceiling &= pdmask;
431 if (! ceiling)
432 return;
433 }
434 if (end - 1 > ceiling - 1)
435 return;
David Gibsonf10a04c2006-04-28 15:02:51 +1000436
Becky Bruce41151e72011-06-28 09:54:48 +0000437 for (i = 0; i < num_hugepd; i++, hpdp++)
438 hpdp->pd = 0;
439
David Gibsonf10a04c2006-04-28 15:02:51 +1000440 tlb->need_flush = 1;
Becky Bruce41151e72011-06-28 09:54:48 +0000441#ifdef CONFIG_PPC64
David Gibsona4fe3ce2009-10-26 19:24:31 +0000442 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
Becky Bruce41151e72011-06-28 09:54:48 +0000443#else
444 hugepd_free(tlb, hugepte);
445#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000446}
447
David Gibsonf10a04c2006-04-28 15:02:51 +1000448static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
449 unsigned long addr, unsigned long end,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000450 unsigned long floor, unsigned long ceiling)
David Gibsonf10a04c2006-04-28 15:02:51 +1000451{
452 pmd_t *pmd;
453 unsigned long next;
454 unsigned long start;
455
456 start = addr;
457 pmd = pmd_offset(pud, addr);
458 do {
459 next = pmd_addr_end(addr, end);
460 if (pmd_none(*pmd))
461 continue;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000462 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
463 addr, next, floor, ceiling);
David Gibsonf10a04c2006-04-28 15:02:51 +1000464 } while (pmd++, addr = next, addr != end);
465
466 start &= PUD_MASK;
467 if (start < floor)
468 return;
469 if (ceiling) {
470 ceiling &= PUD_MASK;
471 if (!ceiling)
472 return;
473 }
474 if (end - 1 > ceiling - 1)
475 return;
476
477 pmd = pmd_offset(pud, start);
478 pud_clear(pud);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000479 pmd_free_tlb(tlb, pmd, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000480}
David Gibsonf10a04c2006-04-28 15:02:51 +1000481
482static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
483 unsigned long addr, unsigned long end,
484 unsigned long floor, unsigned long ceiling)
485{
486 pud_t *pud;
487 unsigned long next;
488 unsigned long start;
489
490 start = addr;
491 pud = pud_offset(pgd, addr);
492 do {
493 next = pud_addr_end(addr, end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000494 if (!is_hugepd(pud)) {
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100495 if (pud_none_or_clear_bad(pud))
496 continue;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700497 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
David Gibsona4fe3ce2009-10-26 19:24:31 +0000498 ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100499 } else {
David Gibsona4fe3ce2009-10-26 19:24:31 +0000500 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
501 addr, next, floor, ceiling);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100502 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000503 } while (pud++, addr = next, addr != end);
504
505 start &= PGDIR_MASK;
506 if (start < floor)
507 return;
508 if (ceiling) {
509 ceiling &= PGDIR_MASK;
510 if (!ceiling)
511 return;
512 }
513 if (end - 1 > ceiling - 1)
514 return;
515
516 pud = pud_offset(pgd, start);
517 pgd_clear(pgd);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000518 pud_free_tlb(tlb, pud, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000519}
520
521/*
522 * This function frees user-level page tables of a process.
523 *
524 * Must be called with pagetable lock held.
525 */
Jan Beulich42b77722008-07-23 21:27:10 -0700526void hugetlb_free_pgd_range(struct mmu_gather *tlb,
David Gibsonf10a04c2006-04-28 15:02:51 +1000527 unsigned long addr, unsigned long end,
528 unsigned long floor, unsigned long ceiling)
529{
530 pgd_t *pgd;
531 unsigned long next;
David Gibsonf10a04c2006-04-28 15:02:51 +1000532
533 /*
David Gibsona4fe3ce2009-10-26 19:24:31 +0000534 * Because there are a number of different possible pagetable
535 * layouts for hugepage ranges, we limit knowledge of how
536 * things should be laid out to the allocation path
537 * (huge_pte_alloc(), above). Everything else works out the
538 * structure as it goes from information in the hugepd
539 * pointers. That means that we can't here use the
540 * optimization used in the normal page free_pgd_range(), of
541 * checking whether we're actually covering a large enough
542 * range to have to do anything at the top level of the walk
543 * instead of at the bottom.
David Gibsonf10a04c2006-04-28 15:02:51 +1000544 *
David Gibsona4fe3ce2009-10-26 19:24:31 +0000545 * To make sense of this, you should probably go read the big
546 * block comment at the top of the normal free_pgd_range(),
547 * too.
David Gibsonf10a04c2006-04-28 15:02:51 +1000548 */
549
David Gibsonf10a04c2006-04-28 15:02:51 +1000550 do {
David Gibsonf10a04c2006-04-28 15:02:51 +1000551 next = pgd_addr_end(addr, end);
Becky Bruce41151e72011-06-28 09:54:48 +0000552 pgd = pgd_offset(tlb->mm, addr);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000553 if (!is_hugepd(pgd)) {
David Gibson0b264252008-09-05 11:49:54 +1000554 if (pgd_none_or_clear_bad(pgd))
555 continue;
556 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
557 } else {
Becky Bruce41151e72011-06-28 09:54:48 +0000558#ifdef CONFIG_PPC32
559 /*
560 * Increment next by the size of the huge mapping since
561 * on 32-bit there may be more than one entry at the pgd
562 * level for a single hugepage, but all of them point to
563 * the same kmem cache that holds the hugepte.
564 */
565 next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
566#endif
David Gibsona4fe3ce2009-10-26 19:24:31 +0000567 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
568 addr, next, floor, ceiling);
David Gibson0b264252008-09-05 11:49:54 +1000569 }
Becky Bruce41151e72011-06-28 09:54:48 +0000570 } while (addr = next, addr != end);
David Gibsone28f7fa2005-08-05 19:39:06 +1000571}
572
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573struct page *
574follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
575{
576 pte_t *ptep;
577 struct page *page;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000578 unsigned shift;
579 unsigned long mask;
580
581 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700583 /* Verify it is a huge page else bail. */
David Gibsona4fe3ce2009-10-26 19:24:31 +0000584 if (!ptep || !shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 return ERR_PTR(-EINVAL);
586
David Gibsona4fe3ce2009-10-26 19:24:31 +0000587 mask = (1UL << shift) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 page = pte_page(*ptep);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000589 if (page)
590 page += (address & mask) / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
592 return page;
593}
594
595int pmd_huge(pmd_t pmd)
596{
597 return 0;
598}
599
Andi Kleenceb86872008-07-23 21:27:50 -0700600int pud_huge(pud_t pud)
601{
602 return 0;
603}
604
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605struct page *
606follow_huge_pmd(struct mm_struct *mm, unsigned long address,
607 pmd_t *pmd, int write)
608{
609 BUG();
610 return NULL;
611}
612
David Gibsona4fe3ce2009-10-26 19:24:31 +0000613static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
614 unsigned long end, int write, struct page **pages, int *nr)
615{
616 unsigned long mask;
617 unsigned long pte_end;
Andrea Arcangeli35267412011-11-02 13:37:15 -0700618 struct page *head, *page, *tail;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000619 pte_t pte;
620 int refs;
621
622 pte_end = (addr + sz) & ~(sz-1);
623 if (pte_end < end)
624 end = pte_end;
625
626 pte = *ptep;
627 mask = _PAGE_PRESENT | _PAGE_USER;
628 if (write)
629 mask |= _PAGE_RW;
630
631 if ((pte_val(pte) & mask) != mask)
632 return 0;
633
634 /* hugepages are never "special" */
635 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
636
637 refs = 0;
638 head = pte_page(pte);
639
640 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
Andrea Arcangeli35267412011-11-02 13:37:15 -0700641 tail = page;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000642 do {
643 VM_BUG_ON(compound_head(page) != head);
644 pages[*nr] = page;
645 (*nr)++;
646 page++;
647 refs++;
648 } while (addr += PAGE_SIZE, addr != end);
649
650 if (!page_cache_add_speculative(head, refs)) {
651 *nr -= refs;
652 return 0;
653 }
654
655 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
656 /* Could be optimized better */
Andrea Arcangeli85964682011-11-02 13:37:11 -0700657 *nr -= refs;
658 while (refs--)
Andrea Arcangeli405e44f2011-11-02 13:37:08 -0700659 put_page(head);
Andrea Arcangelicf592bf2011-11-02 13:37:19 -0700660 return 0;
661 }
662
663 /*
664 * Any tail page need their mapcount reference taken before we
665 * return.
666 */
667 while (refs--) {
668 if (PageTail(tail))
669 get_huge_page_tail(tail);
670 tail++;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000671 }
672
673 return 1;
674}
675
David Gibson39adfa52009-11-23 20:03:40 +0000676static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
677 unsigned long sz)
678{
679 unsigned long __boundary = (addr + sz) & ~(sz-1);
680 return (__boundary - 1 < end - 1) ? __boundary : end;
681}
682
David Gibsona4fe3ce2009-10-26 19:24:31 +0000683int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
684 unsigned long addr, unsigned long end,
685 int write, struct page **pages, int *nr)
686{
687 pte_t *ptep;
688 unsigned long sz = 1UL << hugepd_shift(*hugepd);
David Gibson39adfa52009-11-23 20:03:40 +0000689 unsigned long next;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000690
691 ptep = hugepte_offset(hugepd, addr, pdshift);
692 do {
David Gibson39adfa52009-11-23 20:03:40 +0000693 next = hugepte_addr_end(addr, end, sz);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000694 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
695 return 0;
David Gibson39adfa52009-11-23 20:03:40 +0000696 } while (ptep++, addr = next, addr != end);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000697
698 return 1;
699}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
701unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
702 unsigned long len, unsigned long pgoff,
703 unsigned long flags)
704{
Paul Mackerras25c29f92011-09-20 19:58:10 +0000705#ifdef CONFIG_PPC_MM_SLICES
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700706 struct hstate *hstate = hstate_file(file);
707 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
Brian King48f797d2008-12-04 04:07:54 +0000708
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700709 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
Becky Bruce41151e72011-06-28 09:54:48 +0000710#else
711 return get_unmapped_area(file, addr, len, pgoff, flags);
712#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713}
714
Mel Gorman33402892009-01-06 14:38:54 -0800715unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
716{
Paul Mackerras25c29f92011-09-20 19:58:10 +0000717#ifdef CONFIG_PPC_MM_SLICES
Mel Gorman33402892009-01-06 14:38:54 -0800718 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
719
720 return 1UL << mmu_psize_to_shift(psize);
Becky Bruce41151e72011-06-28 09:54:48 +0000721#else
722 if (!is_vm_hugetlb_page(vma))
723 return PAGE_SIZE;
724
725 return huge_page_size(hstate_vma(vma));
726#endif
727}
728
729static inline bool is_power_of_4(unsigned long x)
730{
731 if (is_power_of_2(x))
732 return (__ilog2(x) % 2) ? false : true;
733 return false;
Mel Gorman33402892009-01-06 14:38:54 -0800734}
735
David Gibsond1837cb2009-10-26 19:24:31 +0000736static int __init add_huge_page_size(unsigned long long size)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100737{
David Gibsond1837cb2009-10-26 19:24:31 +0000738 int shift = __ffs(size);
739 int mmu_psize;
David Gibsona4fe3ce2009-10-26 19:24:31 +0000740
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100741 /* Check that it is a page size supported by the hardware and
David Gibsond1837cb2009-10-26 19:24:31 +0000742 * that it fits within pagetable and slice limits. */
Becky Bruce41151e72011-06-28 09:54:48 +0000743#ifdef CONFIG_PPC_FSL_BOOK3E
744 if ((size < PAGE_SIZE) || !is_power_of_4(size))
745 return -EINVAL;
746#else
David Gibsond1837cb2009-10-26 19:24:31 +0000747 if (!is_power_of_2(size)
748 || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
749 return -EINVAL;
Becky Bruce41151e72011-06-28 09:54:48 +0000750#endif
Jon Tollefson91224342008-07-23 21:27:55 -0700751
David Gibsond1837cb2009-10-26 19:24:31 +0000752 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
753 return -EINVAL;
754
755#ifdef CONFIG_SPU_FS_64K_LS
756 /* Disable support for 64K huge pages when 64K SPU local store
757 * support is enabled as the current implementation conflicts.
758 */
759 if (shift == PAGE_SHIFT_64K)
760 return -EINVAL;
761#endif /* CONFIG_SPU_FS_64K_LS */
762
763 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
764
765 /* Return if huge page size has already been setup */
766 if (size_to_hstate(size))
767 return 0;
768
769 hugetlb_add_hstate(shift - PAGE_SHIFT);
770
771 return 0;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100772}
773
774static int __init hugepage_setup_sz(char *str)
775{
776 unsigned long long size;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100777
778 size = memparse(str, &str);
779
David Gibsond1837cb2009-10-26 19:24:31 +0000780 if (add_huge_page_size(size) != 0)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100781 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
782
783 return 1;
784}
785__setup("hugepagesz=", hugepage_setup_sz);
786
Becky Bruce41151e72011-06-28 09:54:48 +0000787#ifdef CONFIG_FSL_BOOKE
788struct kmem_cache *hugepte_cache;
789static int __init hugetlbpage_init(void)
790{
791 int psize;
792
793 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
794 unsigned shift;
795
796 if (!mmu_psize_defs[psize].shift)
797 continue;
798
799 shift = mmu_psize_to_shift(psize);
800
801 /* Don't treat normal page sizes as huge... */
802 if (shift != PAGE_SHIFT)
803 if (add_huge_page_size(1ULL << shift) < 0)
804 continue;
805 }
806
807 /*
808 * Create a kmem cache for hugeptes. The bottom bits in the pte have
809 * size information encoded in them, so align them to allow this
810 */
811 hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t),
812 HUGEPD_SHIFT_MASK + 1, 0, NULL);
813 if (hugepte_cache == NULL)
814 panic("%s: Unable to create kmem cache for hugeptes\n",
815 __func__);
816
817 /* Default hpage size = 4M */
818 if (mmu_psize_defs[MMU_PAGE_4M].shift)
819 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
820 else
821 panic("%s: Unable to set default huge page size\n", __func__);
822
823
824 return 0;
825}
826#else
David Gibsonf10a04c2006-04-28 15:02:51 +1000827static int __init hugetlbpage_init(void)
828{
David Gibsona4fe3ce2009-10-26 19:24:31 +0000829 int psize;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700830
Matt Evans44ae3ab2011-04-06 19:48:50 +0000831 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
David Gibsonf10a04c2006-04-28 15:02:51 +1000832 return -ENODEV;
Benjamin Herrenschmidt00df4382008-07-28 16:13:18 +1000833
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700834 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
David Gibsond1837cb2009-10-26 19:24:31 +0000835 unsigned shift;
836 unsigned pdshift;
837
838 if (!mmu_psize_defs[psize].shift)
839 continue;
840
841 shift = mmu_psize_to_shift(psize);
842
843 if (add_huge_page_size(1ULL << shift) < 0)
844 continue;
845
846 if (shift < PMD_SHIFT)
847 pdshift = PMD_SHIFT;
848 else if (shift < PUD_SHIFT)
849 pdshift = PUD_SHIFT;
850 else
851 pdshift = PGDIR_SHIFT;
852
853 pgtable_cache_add(pdshift - shift, NULL);
854 if (!PGT_CACHE(pdshift - shift))
855 panic("hugetlbpage_init(): could not create "
856 "pgtable cache for %d bit pagesize\n", shift);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700857 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000858
David Gibsond1837cb2009-10-26 19:24:31 +0000859 /* Set default large page size. Currently, we pick 16M or 1M
860 * depending on what is available
861 */
862 if (mmu_psize_defs[MMU_PAGE_16M].shift)
863 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
864 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
865 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
866
David Gibsonf10a04c2006-04-28 15:02:51 +1000867 return 0;
868}
Becky Bruce41151e72011-06-28 09:54:48 +0000869#endif
David Gibsonf10a04c2006-04-28 15:02:51 +1000870module_init(hugetlbpage_init);
David Gibson0895ecd2009-10-26 19:24:31 +0000871
872void flush_dcache_icache_hugepage(struct page *page)
873{
874 int i;
Becky Bruce41151e72011-06-28 09:54:48 +0000875 void *start;
David Gibson0895ecd2009-10-26 19:24:31 +0000876
877 BUG_ON(!PageCompound(page));
878
Becky Bruce41151e72011-06-28 09:54:48 +0000879 for (i = 0; i < (1UL << compound_order(page)); i++) {
880 if (!PageHighMem(page)) {
881 __flush_dcache_icache(page_address(page+i));
882 } else {
883 start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE);
884 __flush_dcache_icache(start);
885 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
886 }
887 }
David Gibson0895ecd2009-10-26 19:24:31 +0000888}