blob: 90df6ffe3a43140fcbfcb5ceaa312fdb7c642319 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 *
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 */
9
10#include <linux/init.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/slab.h>
16#include <linux/err.h>
17#include <linux/sysctl.h>
18#include <asm/mman.h>
19#include <asm/pgalloc.h>
20#include <asm/tlb.h>
21#include <asm/tlbflush.h>
22#include <asm/mmu_context.h>
23#include <asm/machdep.h>
24#include <asm/cputable.h>
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010025#include <asm/spu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Jon Tollefson91224342008-07-23 21:27:55 -070027#define PAGE_SHIFT_64K 16
28#define PAGE_SHIFT_16M 24
29#define PAGE_SHIFT_16G 34
Jon Tollefson4ec161c2008-01-04 09:59:50 +110030
David Gibsonc594ada2005-08-11 16:55:21 +100031#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
32#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
Jon Tollefsonec4b2c02008-07-23 21:27:53 -070033#define MAX_NUMBER_GPAGES 1024
34
35/* Tracks the 16G pages after the device tree is scanned and before the
36 * huge_boot_pages list is ready. */
37static unsigned long gpage_freearray[MAX_NUMBER_GPAGES];
38static unsigned nr_gpages;
David Gibsonc594ada2005-08-11 16:55:21 +100039
Jon Tollefson0d9ea752008-07-23 21:27:56 -070040/* Array of valid huge page sizes - non-zero value(hugepte_shift) is
41 * stored for the huge page sizes that are valid.
42 */
43unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */
David Gibsonf10a04c2006-04-28 15:02:51 +100044
Jon Tollefson0d9ea752008-07-23 21:27:56 -070045#define hugepte_shift mmu_huge_psizes
46#define PTRS_PER_HUGEPTE(psize) (1 << hugepte_shift[psize])
47#define HUGEPTE_TABLE_SIZE(psize) (sizeof(pte_t) << hugepte_shift[psize])
David Gibsonf10a04c2006-04-28 15:02:51 +100048
Jon Tollefson0d9ea752008-07-23 21:27:56 -070049#define HUGEPD_SHIFT(psize) (mmu_psize_to_shift(psize) \
50 + hugepte_shift[psize])
51#define HUGEPD_SIZE(psize) (1UL << HUGEPD_SHIFT(psize))
52#define HUGEPD_MASK(psize) (~(HUGEPD_SIZE(psize)-1))
53
54/* Subtract one from array size because we don't need a cache for 4K since
55 * is not a huge page size */
Jon Tollefson7d4320f2008-10-30 12:03:57 +000056#define HUGE_PGTABLE_INDEX(psize) (HUGEPTE_CACHE_NUM + psize - 1)
Jon Tollefson0d9ea752008-07-23 21:27:56 -070057#define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize])
58
59static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = {
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100060 [MMU_PAGE_64K] = "hugepte_cache_64K",
61 [MMU_PAGE_1M] = "hugepte_cache_1M",
62 [MMU_PAGE_16M] = "hugepte_cache_16M",
63 [MMU_PAGE_16G] = "hugepte_cache_16G",
Jon Tollefson0d9ea752008-07-23 21:27:56 -070064};
David Gibsonf10a04c2006-04-28 15:02:51 +100065
66/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
67 * will choke on pointers to hugepte tables, which is handy for
68 * catching screwups early. */
69#define HUGEPD_OK 0x1
70
71typedef struct { unsigned long pd; } hugepd_t;
72
73#define hugepd_none(hpd) ((hpd).pd == 0)
74
Jon Tollefson0d9ea752008-07-23 21:27:56 -070075static inline int shift_to_mmu_psize(unsigned int shift)
76{
77 switch (shift) {
78#ifndef CONFIG_PPC_64K_PAGES
79 case PAGE_SHIFT_64K:
80 return MMU_PAGE_64K;
81#endif
82 case PAGE_SHIFT_16M:
83 return MMU_PAGE_16M;
84 case PAGE_SHIFT_16G:
85 return MMU_PAGE_16G;
86 }
87 return -1;
88}
89
90static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
91{
92 if (mmu_psize_defs[mmu_psize].shift)
93 return mmu_psize_defs[mmu_psize].shift;
94 BUG();
95}
96
David Gibsonf10a04c2006-04-28 15:02:51 +100097static inline pte_t *hugepd_page(hugepd_t hpd)
98{
99 BUG_ON(!(hpd.pd & HUGEPD_OK));
100 return (pte_t *)(hpd.pd & ~HUGEPD_OK);
101}
102
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700103static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
104 struct hstate *hstate)
David Gibsonf10a04c2006-04-28 15:02:51 +1000105{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700106 unsigned int shift = huge_page_shift(hstate);
107 int psize = shift_to_mmu_psize(shift);
108 unsigned long idx = ((addr >> shift) & (PTRS_PER_HUGEPTE(psize)-1));
David Gibsonf10a04c2006-04-28 15:02:51 +1000109 pte_t *dir = hugepd_page(*hpdp);
110
111 return dir + idx;
112}
113
114static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700115 unsigned long address, unsigned int psize)
David Gibsonf10a04c2006-04-28 15:02:51 +1000116{
Jon Tollefson7d4320f2008-10-30 12:03:57 +0000117 pte_t *new = kmem_cache_zalloc(pgtable_cache[HUGE_PGTABLE_INDEX(psize)],
David Gibsonf10a04c2006-04-28 15:02:51 +1000118 GFP_KERNEL|__GFP_REPEAT);
119
120 if (! new)
121 return -ENOMEM;
122
123 spin_lock(&mm->page_table_lock);
124 if (!hugepd_none(*hpdp))
Jon Tollefson7d4320f2008-10-30 12:03:57 +0000125 kmem_cache_free(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], new);
David Gibsonf10a04c2006-04-28 15:02:51 +1000126 else
127 hpdp->pd = (unsigned long)new | HUGEPD_OK;
128 spin_unlock(&mm->page_table_lock);
129 return 0;
130}
131
David Gibson0b264252008-09-05 11:49:54 +1000132
133static pud_t *hpud_offset(pgd_t *pgd, unsigned long addr, struct hstate *hstate)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100134{
David Gibson0b264252008-09-05 11:49:54 +1000135 if (huge_page_shift(hstate) < PUD_SHIFT)
136 return pud_offset(pgd, addr);
137 else
138 return (pud_t *) pgd;
139}
140static pud_t *hpud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr,
141 struct hstate *hstate)
142{
143 if (huge_page_shift(hstate) < PUD_SHIFT)
144 return pud_alloc(mm, pgd, addr);
145 else
146 return (pud_t *) pgd;
147}
148static pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate)
149{
150 if (huge_page_shift(hstate) < PMD_SHIFT)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100151 return pmd_offset(pud, addr);
152 else
153 return (pmd_t *) pud;
154}
David Gibson0b264252008-09-05 11:49:54 +1000155static pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr,
156 struct hstate *hstate)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100157{
David Gibson0b264252008-09-05 11:49:54 +1000158 if (huge_page_shift(hstate) < PMD_SHIFT)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100159 return pmd_alloc(mm, pud, addr);
160 else
161 return (pmd_t *) pud;
162}
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100163
Jon Tollefson658013e2008-07-23 21:27:54 -0700164/* Build list of addresses of gigantic pages. This function is used in early
165 * boot before the buddy or bootmem allocator is setup.
166 */
167void add_gpage(unsigned long addr, unsigned long page_size,
168 unsigned long number_of_pages)
169{
170 if (!addr)
171 return;
172 while (number_of_pages > 0) {
173 gpage_freearray[nr_gpages] = addr;
174 nr_gpages++;
175 number_of_pages--;
176 addr += page_size;
177 }
178}
179
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700180/* Moves the gigantic page addresses from the temporary list to the
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700181 * huge_boot_pages list.
182 */
183int alloc_bootmem_huge_page(struct hstate *hstate)
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700184{
185 struct huge_bootmem_page *m;
186 if (nr_gpages == 0)
187 return 0;
188 m = phys_to_virt(gpage_freearray[--nr_gpages]);
189 gpage_freearray[nr_gpages] = 0;
190 list_add(&m->list, &huge_boot_pages);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700191 m->hstate = hstate;
Jon Tollefsonec4b2c02008-07-23 21:27:53 -0700192 return 1;
193}
194
195
David Gibsone28f7fa2005-08-05 19:39:06 +1000196/* Modelled after find_linux_pte() */
197pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
David Gibsone28f7fa2005-08-05 19:39:06 +1000199 pgd_t *pg;
200 pud_t *pu;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100201 pmd_t *pm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700203 unsigned int psize;
204 unsigned int shift;
205 unsigned long sz;
206 struct hstate *hstate;
207 psize = get_slice_psize(mm, addr);
208 shift = mmu_psize_to_shift(psize);
209 sz = ((1UL) << shift);
210 hstate = size_to_hstate(sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700212 addr &= hstate->mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
David Gibsone28f7fa2005-08-05 19:39:06 +1000214 pg = pgd_offset(mm, addr);
215 if (!pgd_none(*pg)) {
David Gibson0b264252008-09-05 11:49:54 +1000216 pu = hpud_offset(pg, addr, hstate);
David Gibsone28f7fa2005-08-05 19:39:06 +1000217 if (!pud_none(*pu)) {
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700218 pm = hpmd_offset(pu, addr, hstate);
David Gibsonf10a04c2006-04-28 15:02:51 +1000219 if (!pmd_none(*pm))
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700220 return hugepte_offset((hugepd_t *)pm, addr,
221 hstate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 }
223 }
224
David Gibsone28f7fa2005-08-05 19:39:06 +1000225 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226}
227
Andi Kleena5516432008-07-23 21:27:41 -0700228pte_t *huge_pte_alloc(struct mm_struct *mm,
229 unsigned long addr, unsigned long sz)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230{
David Gibsone28f7fa2005-08-05 19:39:06 +1000231 pgd_t *pg;
232 pud_t *pu;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100233 pmd_t *pm;
David Gibsonf10a04c2006-04-28 15:02:51 +1000234 hugepd_t *hpdp = NULL;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700235 struct hstate *hstate;
236 unsigned int psize;
237 hstate = size_to_hstate(sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700239 psize = get_slice_psize(mm, addr);
240 BUG_ON(!mmu_huge_psizes[psize]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700242 addr &= hstate->mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
David Gibsone28f7fa2005-08-05 19:39:06 +1000244 pg = pgd_offset(mm, addr);
David Gibson0b264252008-09-05 11:49:54 +1000245 pu = hpud_alloc(mm, pg, addr, hstate);
David Gibsone28f7fa2005-08-05 19:39:06 +1000246
247 if (pu) {
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700248 pm = hpmd_alloc(mm, pu, addr, hstate);
David Gibsonf10a04c2006-04-28 15:02:51 +1000249 if (pm)
250 hpdp = (hugepd_t *)pm;
David Gibsone28f7fa2005-08-05 19:39:06 +1000251 }
252
David Gibsonf10a04c2006-04-28 15:02:51 +1000253 if (! hpdp)
254 return NULL;
255
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700256 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, psize))
David Gibsonf10a04c2006-04-28 15:02:51 +1000257 return NULL;
258
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700259 return hugepte_offset(hpdp, addr, hstate);
David Gibsonf10a04c2006-04-28 15:02:51 +1000260}
261
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800262int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
263{
264 return 0;
265}
266
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700267static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp,
268 unsigned int psize)
David Gibsonf10a04c2006-04-28 15:02:51 +1000269{
270 pte_t *hugepte = hugepd_page(*hpdp);
271
272 hpdp->pd = 0;
273 tlb->need_flush = 1;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700274 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte,
275 HUGEPTE_CACHE_NUM+psize-1,
Adam Litkec9169f82006-08-18 11:22:21 -0700276 PGF_CACHENUM_MASK));
David Gibsonf10a04c2006-04-28 15:02:51 +1000277}
278
David Gibsonf10a04c2006-04-28 15:02:51 +1000279static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
280 unsigned long addr, unsigned long end,
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700281 unsigned long floor, unsigned long ceiling,
282 unsigned int psize)
David Gibsonf10a04c2006-04-28 15:02:51 +1000283{
284 pmd_t *pmd;
285 unsigned long next;
286 unsigned long start;
287
288 start = addr;
289 pmd = pmd_offset(pud, addr);
290 do {
291 next = pmd_addr_end(addr, end);
292 if (pmd_none(*pmd))
293 continue;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700294 free_hugepte_range(tlb, (hugepd_t *)pmd, psize);
David Gibsonf10a04c2006-04-28 15:02:51 +1000295 } while (pmd++, addr = next, addr != end);
296
297 start &= PUD_MASK;
298 if (start < floor)
299 return;
300 if (ceiling) {
301 ceiling &= PUD_MASK;
302 if (!ceiling)
303 return;
304 }
305 if (end - 1 > ceiling - 1)
306 return;
307
308 pmd = pmd_offset(pud, start);
309 pud_clear(pud);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000310 pmd_free_tlb(tlb, pmd, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000311}
David Gibsonf10a04c2006-04-28 15:02:51 +1000312
313static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
314 unsigned long addr, unsigned long end,
315 unsigned long floor, unsigned long ceiling)
316{
317 pud_t *pud;
318 unsigned long next;
319 unsigned long start;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700320 unsigned int shift;
321 unsigned int psize = get_slice_psize(tlb->mm, addr);
322 shift = mmu_psize_to_shift(psize);
David Gibsonf10a04c2006-04-28 15:02:51 +1000323
324 start = addr;
325 pud = pud_offset(pgd, addr);
326 do {
327 next = pud_addr_end(addr, end);
David Gibson0b264252008-09-05 11:49:54 +1000328 if (shift < PMD_SHIFT) {
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100329 if (pud_none_or_clear_bad(pud))
330 continue;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700331 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
332 ceiling, psize);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100333 } else {
334 if (pud_none(*pud))
335 continue;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700336 free_hugepte_range(tlb, (hugepd_t *)pud, psize);
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100337 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000338 } while (pud++, addr = next, addr != end);
339
340 start &= PGDIR_MASK;
341 if (start < floor)
342 return;
343 if (ceiling) {
344 ceiling &= PGDIR_MASK;
345 if (!ceiling)
346 return;
347 }
348 if (end - 1 > ceiling - 1)
349 return;
350
351 pud = pud_offset(pgd, start);
352 pgd_clear(pgd);
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000353 pud_free_tlb(tlb, pud, start);
David Gibsonf10a04c2006-04-28 15:02:51 +1000354}
355
356/*
357 * This function frees user-level page tables of a process.
358 *
359 * Must be called with pagetable lock held.
360 */
Jan Beulich42b77722008-07-23 21:27:10 -0700361void hugetlb_free_pgd_range(struct mmu_gather *tlb,
David Gibsonf10a04c2006-04-28 15:02:51 +1000362 unsigned long addr, unsigned long end,
363 unsigned long floor, unsigned long ceiling)
364{
365 pgd_t *pgd;
366 unsigned long next;
367 unsigned long start;
368
369 /*
370 * Comments below take from the normal free_pgd_range(). They
371 * apply here too. The tests against HUGEPD_MASK below are
372 * essential, because we *don't* test for this at the bottom
373 * level. Without them we'll attempt to free a hugepte table
374 * when we unmap just part of it, even if there are other
375 * active mappings using it.
376 *
377 * The next few lines have given us lots of grief...
378 *
379 * Why are we testing HUGEPD* at this top level? Because
380 * often there will be no work to do at all, and we'd prefer
381 * not to go all the way down to the bottom just to discover
382 * that.
383 *
384 * Why all these "- 1"s? Because 0 represents both the bottom
385 * of the address space and the top of it (using -1 for the
386 * top wouldn't help much: the masks would do the wrong thing).
387 * The rule is that addr 0 and floor 0 refer to the bottom of
388 * the address space, but end 0 and ceiling 0 refer to the top
389 * Comparisons need to use "end - 1" and "ceiling - 1" (though
390 * that end 0 case should be mythical).
391 *
392 * Wherever addr is brought up or ceiling brought down, we
393 * must be careful to reject "the opposite 0" before it
394 * confuses the subsequent tests. But what about where end is
395 * brought down by HUGEPD_SIZE below? no, end can't go down to
396 * 0 there.
397 *
398 * Whereas we round start (addr) and ceiling down, by different
399 * masks at different levels, in order to test whether a table
400 * now has no other vmas using it, so can be freed, we don't
401 * bother to round floor or end up - the tests don't need that.
402 */
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700403 unsigned int psize = get_slice_psize(tlb->mm, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000404
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700405 addr &= HUGEPD_MASK(psize);
David Gibsonf10a04c2006-04-28 15:02:51 +1000406 if (addr < floor) {
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700407 addr += HUGEPD_SIZE(psize);
David Gibsonf10a04c2006-04-28 15:02:51 +1000408 if (!addr)
409 return;
410 }
411 if (ceiling) {
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700412 ceiling &= HUGEPD_MASK(psize);
David Gibsonf10a04c2006-04-28 15:02:51 +1000413 if (!ceiling)
414 return;
415 }
416 if (end - 1 > ceiling - 1)
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700417 end -= HUGEPD_SIZE(psize);
David Gibsonf10a04c2006-04-28 15:02:51 +1000418 if (addr > end - 1)
419 return;
420
421 start = addr;
Jan Beulich42b77722008-07-23 21:27:10 -0700422 pgd = pgd_offset(tlb->mm, addr);
David Gibsonf10a04c2006-04-28 15:02:51 +1000423 do {
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700424 psize = get_slice_psize(tlb->mm, addr);
425 BUG_ON(!mmu_huge_psizes[psize]);
David Gibsonf10a04c2006-04-28 15:02:51 +1000426 next = pgd_addr_end(addr, end);
David Gibson0b264252008-09-05 11:49:54 +1000427 if (mmu_psize_to_shift(psize) < PUD_SHIFT) {
428 if (pgd_none_or_clear_bad(pgd))
429 continue;
430 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
431 } else {
432 if (pgd_none(*pgd))
433 continue;
434 free_hugepte_range(tlb, (hugepd_t *)pgd, psize);
435 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000436 } while (pgd++, addr = next, addr != end);
David Gibsone28f7fa2005-08-05 19:39:06 +1000437}
438
David Gibsone28f7fa2005-08-05 19:39:06 +1000439void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
440 pte_t *ptep, pte_t pte)
441{
David Gibsone28f7fa2005-08-05 19:39:06 +1000442 if (pte_present(*ptep)) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100443 /* We open-code pte_clear because we need to pass the right
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000444 * argument to hpte_need_flush (huge / !huge). Might not be
445 * necessary anymore if we make hpte_need_flush() get the
446 * page size from the slices
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100447 */
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700448 unsigned int psize = get_slice_psize(mm, addr);
449 unsigned int shift = mmu_psize_to_shift(psize);
450 unsigned long sz = ((1UL) << shift);
451 struct hstate *hstate = size_to_hstate(sz);
452 pte_update(mm, addr & hstate->mask, ptep, ~0UL, 1);
David Gibsone28f7fa2005-08-05 19:39:06 +1000453 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100454 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
David Gibsone28f7fa2005-08-05 19:39:06 +1000455}
456
457pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
458 pte_t *ptep)
459{
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000460 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
David Gibsone28f7fa2005-08-05 19:39:06 +1000461 return __pte(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462}
463
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464struct page *
465follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
466{
467 pte_t *ptep;
468 struct page *page;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700469 unsigned int mmu_psize = get_slice_psize(mm, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700471 /* Verify it is a huge page else bail. */
472 if (!mmu_huge_psizes[mmu_psize])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 return ERR_PTR(-EINVAL);
474
475 ptep = huge_pte_offset(mm, address);
476 page = pte_page(*ptep);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700477 if (page) {
478 unsigned int shift = mmu_psize_to_shift(mmu_psize);
479 unsigned long sz = ((1UL) << shift);
480 page += (address % sz) / PAGE_SIZE;
481 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
483 return page;
484}
485
486int pmd_huge(pmd_t pmd)
487{
488 return 0;
489}
490
Andi Kleenceb86872008-07-23 21:27:50 -0700491int pud_huge(pud_t pud)
492{
493 return 0;
494}
495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496struct page *
497follow_huge_pmd(struct mm_struct *mm, unsigned long address,
498 pmd_t *pmd, int write)
499{
500 BUG();
501 return NULL;
502}
503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
505unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
506 unsigned long len, unsigned long pgoff,
507 unsigned long flags)
508{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700509 struct hstate *hstate = hstate_file(file);
510 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
Brian King48f797d2008-12-04 04:07:54 +0000511
512 if (!mmu_huge_psizes[mmu_psize])
513 return -EINVAL;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700514 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515}
516
Mel Gorman33402892009-01-06 14:38:54 -0800517unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
518{
519 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
520
521 return 1UL << mmu_psize_to_shift(psize);
522}
523
David Gibsoncbf52af2005-12-09 14:20:52 +1100524/*
525 * Called by asm hashtable.S for doing lazy icache flush
526 */
527static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700528 pte_t pte, int trap, unsigned long sz)
David Gibsoncbf52af2005-12-09 14:20:52 +1100529{
530 struct page *page;
531 int i;
532
533 if (!pfn_valid(pte_pfn(pte)))
534 return rflags;
535
536 page = pte_page(pte);
537
538 /* page is dirty */
539 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
540 if (trap == 0x400) {
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700541 for (i = 0; i < (sz / PAGE_SIZE); i++)
David Gibsoncbf52af2005-12-09 14:20:52 +1100542 __flush_dcache_icache(page_address(page+i));
543 set_bit(PG_arch_1, &page->flags);
544 } else {
545 rflags |= HPTE_R_N;
546 }
547 }
548 return rflags;
549}
550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551int hash_huge_page(struct mm_struct *mm, unsigned long access,
David Gibsoncbf52af2005-12-09 14:20:52 +1100552 unsigned long ea, unsigned long vsid, int local,
553 unsigned long trap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
555 pte_t *ptep;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100556 unsigned long old_pte, new_pte;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700557 unsigned long va, rflags, pa, sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 long slot;
559 int err = 1;
Paul Mackerras1189be62007-10-11 20:37:10 +1000560 int ssize = user_segment_size(ea);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700561 unsigned int mmu_psize;
562 int shift;
563 mmu_psize = get_slice_psize(mm, ea);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700565 if (!mmu_huge_psizes[mmu_psize])
566 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 ptep = huge_pte_offset(mm, ea);
568
569 /* Search the Linux page table for a match with va */
Paul Mackerras1189be62007-10-11 20:37:10 +1000570 va = hpt_va(ea, vsid, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
572 /*
573 * If no pte found or not present, send the problem up to
574 * do_page_fault
575 */
576 if (unlikely(!ptep || pte_none(*ptep)))
577 goto out;
578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 /*
580 * Check the user's access rights to the page. If access should be
581 * prevented then send the problem up to do_page_fault.
582 */
583 if (unlikely(access & ~pte_val(*ptep)))
584 goto out;
585 /*
586 * At this point, we have a pte (old_pte) which can be used to build
587 * or update an HPTE. There are 2 cases:
588 *
589 * 1. There is a valid (present) pte with no associated HPTE (this is
590 * the most common case)
591 * 2. There is a valid (present) pte with an associated HPTE. The
592 * current values of the pp bits in the HPTE prevent access
593 * because we are doing software DIRTY bit management and the
594 * page is currently not DIRTY.
595 */
596
597
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100598 do {
599 old_pte = pte_val(*ptep);
600 if (old_pte & _PAGE_BUSY)
601 goto out;
Benjamin Herrenschmidt41743a42008-06-11 15:37:10 +1000602 new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100603 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
604 old_pte, new_pte));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100606 rflags = 0x2 | (!(new_pte & _PAGE_RW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100608 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700609 shift = mmu_psize_to_shift(mmu_psize);
610 sz = ((1UL) << shift);
David Gibsoncbf52af2005-12-09 14:20:52 +1100611 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
612 /* No CPU has hugepages but lacks no execute, so we
613 * don't need to worry about that case */
614 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700615 trap, sz);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617 /* Check if pte already has an hpte (case 2) */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100618 if (unlikely(old_pte & _PAGE_HASHPTE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 /* There MIGHT be an HPTE for this pte */
620 unsigned long hash, slot;
621
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700622 hash = hpt_hash(va, shift, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100623 if (old_pte & _PAGE_F_SECOND)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 hash = ~hash;
625 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100626 slot += (old_pte & _PAGE_F_GIX) >> 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700628 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
Paul Mackerras1189be62007-10-11 20:37:10 +1000629 ssize, local) == -1)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100630 old_pte &= ~_PAGE_HPTEFLAGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 }
632
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100633 if (likely(!(old_pte & _PAGE_HASHPTE))) {
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700634 unsigned long hash = hpt_hash(va, shift, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 unsigned long hpte_group;
636
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100637 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
639repeat:
640 hpte_group = ((hash & htab_hash_mask) *
641 HPTES_PER_GROUP) & ~0x7UL;
642
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100643 /* clear HPTE slot informations in new PTE */
Benjamin Herrenschmidt41743a42008-06-11 15:37:10 +1000644#ifdef CONFIG_PPC_64K_PAGES
645 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
646#else
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100647 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
Benjamin Herrenschmidt41743a42008-06-11 15:37:10 +1000648#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 /* Add in WIMG bits */
Dave Kleikamp87e9ab12008-06-19 08:32:56 +1000650 rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
651 _PAGE_COHERENT | _PAGE_GUARDED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100653 /* Insert into the hash table, primary slot */
654 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700655 mmu_psize, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
657 /* Primary is full, try the secondary */
658 if (unlikely(slot == -1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 hpte_group = ((~hash & htab_hash_mask) *
660 HPTES_PER_GROUP) & ~0x7UL;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100661 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
Benjamin Herrenschmidt67b10812005-09-23 13:24:07 -0700662 HPTE_V_SECONDARY,
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700663 mmu_psize, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 if (slot == -1) {
665 if (mftb() & 0x1)
Benjamin Herrenschmidt67b10812005-09-23 13:24:07 -0700666 hpte_group = ((hash & htab_hash_mask) *
667 HPTES_PER_GROUP)&~0x7UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
669 ppc_md.hpte_remove(hpte_group);
670 goto repeat;
671 }
672 }
673
674 if (unlikely(slot == -2))
675 panic("hash_huge_page: pte_insert failed\n");
676
Ishizaki Koud649bd72007-01-12 09:54:39 +0900677 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 }
679
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100680 /*
Hugh Dickins01edcd82005-11-23 13:37:39 -0800681 * No need to use ldarx/stdcx here
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100682 */
683 *ptep = __pte(new_pte & ~_PAGE_BUSY);
684
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 err = 0;
686
687 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 return err;
689}
David Gibsonf10a04c2006-04-28 15:02:51 +1000690
Al Viro4ea8fb92008-11-22 17:33:44 +0000691static void __init set_huge_psize(int psize)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100692{
693 /* Check that it is a page size supported by the hardware and
694 * that it fits within pagetable limits. */
Jon Tollefson91224342008-07-23 21:27:55 -0700695 if (mmu_psize_defs[psize].shift &&
696 mmu_psize_defs[psize].shift < SID_SHIFT_1T &&
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100697 (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
Jon Tollefson91224342008-07-23 21:27:55 -0700698 mmu_psize_defs[psize].shift == PAGE_SHIFT_64K ||
699 mmu_psize_defs[psize].shift == PAGE_SHIFT_16G)) {
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700700 /* Return if huge page size has already been setup or is the
701 * same as the base page size. */
702 if (mmu_huge_psizes[psize] ||
703 mmu_psize_defs[psize].shift == PAGE_SHIFT)
Jon Tollefson91224342008-07-23 21:27:55 -0700704 return;
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +1000705 if (WARN_ON(HUGEPTE_CACHE_NAME(psize) == NULL))
706 return;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700707 hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT);
Jon Tollefson91224342008-07-23 21:27:55 -0700708
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700709 switch (mmu_psize_defs[psize].shift) {
Jon Tollefson91224342008-07-23 21:27:55 -0700710 case PAGE_SHIFT_64K:
711 /* We only allow 64k hpages with 4k base page,
712 * which was checked above, and always put them
713 * at the PMD */
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700714 hugepte_shift[psize] = PMD_SHIFT;
Jon Tollefson91224342008-07-23 21:27:55 -0700715 break;
716 case PAGE_SHIFT_16M:
717 /* 16M pages can be at two different levels
718 * of pagestables based on base page size */
719 if (PAGE_SHIFT == PAGE_SHIFT_64K)
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700720 hugepte_shift[psize] = PMD_SHIFT;
Jon Tollefson91224342008-07-23 21:27:55 -0700721 else /* 4k base page */
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700722 hugepte_shift[psize] = PUD_SHIFT;
Jon Tollefson91224342008-07-23 21:27:55 -0700723 break;
724 case PAGE_SHIFT_16G:
725 /* 16G pages are always at PGD level */
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700726 hugepte_shift[psize] = PGDIR_SHIFT;
Jon Tollefson91224342008-07-23 21:27:55 -0700727 break;
728 }
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700729 hugepte_shift[psize] -= mmu_psize_defs[psize].shift;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100730 } else
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700731 hugepte_shift[psize] = 0;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100732}
733
734static int __init hugepage_setup_sz(char *str)
735{
736 unsigned long long size;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700737 int mmu_psize;
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100738 int shift;
739
740 size = memparse(str, &str);
741
742 shift = __ffs(size);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700743 mmu_psize = shift_to_mmu_psize(shift);
744 if (mmu_psize >= 0 && mmu_psize_defs[mmu_psize].shift)
Jon Tollefson4ec161c2008-01-04 09:59:50 +1100745 set_huge_psize(mmu_psize);
746 else
747 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
748
749 return 1;
750}
751__setup("hugepagesz=", hugepage_setup_sz);
752
David Gibsonf10a04c2006-04-28 15:02:51 +1000753static int __init hugetlbpage_init(void)
754{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700755 unsigned int psize;
756
David Gibsonf10a04c2006-04-28 15:02:51 +1000757 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
758 return -ENODEV;
Benjamin Herrenschmidt00df4382008-07-28 16:13:18 +1000759
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700760 /* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE
761 * and adjust PTE_NONCACHE_NUM if the number of supported huge page
762 * sizes changes.
763 */
764 set_huge_psize(MMU_PAGE_16M);
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700765 set_huge_psize(MMU_PAGE_16G);
David Gibsonf10a04c2006-04-28 15:02:51 +1000766
Benjamin Herrenschmidt00df4382008-07-28 16:13:18 +1000767 /* Temporarily disable support for 64K huge pages when 64K SPU local
768 * store support is enabled as the current implementation conflicts.
769 */
770#ifndef CONFIG_SPU_FS_64K_LS
771 set_huge_psize(MMU_PAGE_64K);
772#endif
773
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700774 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
775 if (mmu_huge_psizes[psize]) {
Jon Tollefson7d4320f2008-10-30 12:03:57 +0000776 pgtable_cache[HUGE_PGTABLE_INDEX(psize)] =
777 kmem_cache_create(
778 HUGEPTE_CACHE_NAME(psize),
779 HUGEPTE_TABLE_SIZE(psize),
780 HUGEPTE_TABLE_SIZE(psize),
781 0,
782 NULL);
783 if (!pgtable_cache[HUGE_PGTABLE_INDEX(psize)])
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700784 panic("hugetlbpage_init(): could not create %s"\
785 "\n", HUGEPTE_CACHE_NAME(psize));
786 }
787 }
David Gibsonf10a04c2006-04-28 15:02:51 +1000788
789 return 0;
790}
791
792module_init(hugetlbpage_init);