blob: 1f07f70ac89f6e213c2f70474e3a1ead890548b6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 *
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 */
9
10#include <linux/init.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/pagemap.h>
15#include <linux/smp_lock.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/sysctl.h>
19#include <asm/mman.h>
20#include <asm/pgalloc.h>
21#include <asm/tlb.h>
22#include <asm/tlbflush.h>
23#include <asm/mmu_context.h>
24#include <asm/machdep.h>
25#include <asm/cputable.h>
26#include <asm/tlb.h>
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010027#include <asm/spu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29#include <linux/sysctl.h>
30
David Gibsonc594ada2005-08-11 16:55:21 +100031#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
32#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
33
David Gibsonf10a04c2006-04-28 15:02:51 +100034#ifdef CONFIG_PPC_64K_PAGES
35#define HUGEPTE_INDEX_SIZE (PMD_SHIFT-HPAGE_SHIFT)
36#else
37#define HUGEPTE_INDEX_SIZE (PUD_SHIFT-HPAGE_SHIFT)
38#endif
39#define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE)
40#define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << HUGEPTE_INDEX_SIZE)
41
42#define HUGEPD_SHIFT (HPAGE_SHIFT + HUGEPTE_INDEX_SIZE)
43#define HUGEPD_SIZE (1UL << HUGEPD_SHIFT)
44#define HUGEPD_MASK (~(HUGEPD_SIZE-1))
45
46#define huge_pgtable_cache (pgtable_cache[HUGEPTE_CACHE_NUM])
47
48/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
49 * will choke on pointers to hugepte tables, which is handy for
50 * catching screwups early. */
51#define HUGEPD_OK 0x1
52
53typedef struct { unsigned long pd; } hugepd_t;
54
55#define hugepd_none(hpd) ((hpd).pd == 0)
56
57static inline pte_t *hugepd_page(hugepd_t hpd)
58{
59 BUG_ON(!(hpd.pd & HUGEPD_OK));
60 return (pte_t *)(hpd.pd & ~HUGEPD_OK);
61}
62
63static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr)
64{
65 unsigned long idx = ((addr >> HPAGE_SHIFT) & (PTRS_PER_HUGEPTE-1));
66 pte_t *dir = hugepd_page(*hpdp);
67
68 return dir + idx;
69}
70
71static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
72 unsigned long address)
73{
74 pte_t *new = kmem_cache_alloc(huge_pgtable_cache,
75 GFP_KERNEL|__GFP_REPEAT);
76
77 if (! new)
78 return -ENOMEM;
79
80 spin_lock(&mm->page_table_lock);
81 if (!hugepd_none(*hpdp))
82 kmem_cache_free(huge_pgtable_cache, new);
83 else
84 hpdp->pd = (unsigned long)new | HUGEPD_OK;
85 spin_unlock(&mm->page_table_lock);
86 return 0;
87}
88
David Gibsone28f7fa2005-08-05 19:39:06 +100089/* Modelled after find_linux_pte() */
90pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
David Gibsone28f7fa2005-08-05 19:39:06 +100092 pgd_t *pg;
93 pud_t *pu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 BUG_ON(! in_hugepage_area(mm->context, addr));
96
David Gibsone28f7fa2005-08-05 19:39:06 +100097 addr &= HPAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
David Gibsone28f7fa2005-08-05 19:39:06 +100099 pg = pgd_offset(mm, addr);
100 if (!pgd_none(*pg)) {
101 pu = pud_offset(pg, addr);
102 if (!pud_none(*pu)) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100103#ifdef CONFIG_PPC_64K_PAGES
David Gibsonf10a04c2006-04-28 15:02:51 +1000104 pmd_t *pm;
105 pm = pmd_offset(pu, addr);
106 if (!pmd_none(*pm))
107 return hugepte_offset((hugepd_t *)pm, addr);
108#else
109 return hugepte_offset((hugepd_t *)pu, addr);
110#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 }
112 }
113
David Gibsone28f7fa2005-08-05 19:39:06 +1000114 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115}
116
David Gibson63551ae2005-06-21 17:14:44 -0700117pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
David Gibsone28f7fa2005-08-05 19:39:06 +1000119 pgd_t *pg;
120 pud_t *pu;
David Gibsonf10a04c2006-04-28 15:02:51 +1000121 hugepd_t *hpdp = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 BUG_ON(! in_hugepage_area(mm->context, addr));
124
David Gibsone28f7fa2005-08-05 19:39:06 +1000125 addr &= HPAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
David Gibsone28f7fa2005-08-05 19:39:06 +1000127 pg = pgd_offset(mm, addr);
128 pu = pud_alloc(mm, pg, addr);
129
130 if (pu) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100131#ifdef CONFIG_PPC_64K_PAGES
David Gibsonf10a04c2006-04-28 15:02:51 +1000132 pmd_t *pm;
133 pm = pmd_alloc(mm, pu, addr);
134 if (pm)
135 hpdp = (hugepd_t *)pm;
136#else
137 hpdp = (hugepd_t *)pu;
138#endif
David Gibsone28f7fa2005-08-05 19:39:06 +1000139 }
140
David Gibsonf10a04c2006-04-28 15:02:51 +1000141 if (! hpdp)
142 return NULL;
143
144 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr))
145 return NULL;
146
147 return hugepte_offset(hpdp, addr);
148}
149
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800150int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
151{
152 return 0;
153}
154
David Gibsonf10a04c2006-04-28 15:02:51 +1000155static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
156{
157 pte_t *hugepte = hugepd_page(*hpdp);
158
159 hpdp->pd = 0;
160 tlb->need_flush = 1;
161 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM,
Adam Litkec9169f82006-08-18 11:22:21 -0700162 PGF_CACHENUM_MASK));
David Gibsonf10a04c2006-04-28 15:02:51 +1000163}
164
165#ifdef CONFIG_PPC_64K_PAGES
166static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
167 unsigned long addr, unsigned long end,
168 unsigned long floor, unsigned long ceiling)
169{
170 pmd_t *pmd;
171 unsigned long next;
172 unsigned long start;
173
174 start = addr;
175 pmd = pmd_offset(pud, addr);
176 do {
177 next = pmd_addr_end(addr, end);
178 if (pmd_none(*pmd))
179 continue;
180 free_hugepte_range(tlb, (hugepd_t *)pmd);
181 } while (pmd++, addr = next, addr != end);
182
183 start &= PUD_MASK;
184 if (start < floor)
185 return;
186 if (ceiling) {
187 ceiling &= PUD_MASK;
188 if (!ceiling)
189 return;
190 }
191 if (end - 1 > ceiling - 1)
192 return;
193
194 pmd = pmd_offset(pud, start);
195 pud_clear(pud);
196 pmd_free_tlb(tlb, pmd);
197}
198#endif
199
200static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
201 unsigned long addr, unsigned long end,
202 unsigned long floor, unsigned long ceiling)
203{
204 pud_t *pud;
205 unsigned long next;
206 unsigned long start;
207
208 start = addr;
209 pud = pud_offset(pgd, addr);
210 do {
211 next = pud_addr_end(addr, end);
212#ifdef CONFIG_PPC_64K_PAGES
213 if (pud_none_or_clear_bad(pud))
214 continue;
215 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
216#else
217 if (pud_none(*pud))
218 continue;
219 free_hugepte_range(tlb, (hugepd_t *)pud);
220#endif
221 } while (pud++, addr = next, addr != end);
222
223 start &= PGDIR_MASK;
224 if (start < floor)
225 return;
226 if (ceiling) {
227 ceiling &= PGDIR_MASK;
228 if (!ceiling)
229 return;
230 }
231 if (end - 1 > ceiling - 1)
232 return;
233
234 pud = pud_offset(pgd, start);
235 pgd_clear(pgd);
236 pud_free_tlb(tlb, pud);
237}
238
239/*
240 * This function frees user-level page tables of a process.
241 *
242 * Must be called with pagetable lock held.
243 */
244void hugetlb_free_pgd_range(struct mmu_gather **tlb,
245 unsigned long addr, unsigned long end,
246 unsigned long floor, unsigned long ceiling)
247{
248 pgd_t *pgd;
249 unsigned long next;
250 unsigned long start;
251
252 /*
253 * Comments below take from the normal free_pgd_range(). They
254 * apply here too. The tests against HUGEPD_MASK below are
255 * essential, because we *don't* test for this at the bottom
256 * level. Without them we'll attempt to free a hugepte table
257 * when we unmap just part of it, even if there are other
258 * active mappings using it.
259 *
260 * The next few lines have given us lots of grief...
261 *
262 * Why are we testing HUGEPD* at this top level? Because
263 * often there will be no work to do at all, and we'd prefer
264 * not to go all the way down to the bottom just to discover
265 * that.
266 *
267 * Why all these "- 1"s? Because 0 represents both the bottom
268 * of the address space and the top of it (using -1 for the
269 * top wouldn't help much: the masks would do the wrong thing).
270 * The rule is that addr 0 and floor 0 refer to the bottom of
271 * the address space, but end 0 and ceiling 0 refer to the top
272 * Comparisons need to use "end - 1" and "ceiling - 1" (though
273 * that end 0 case should be mythical).
274 *
275 * Wherever addr is brought up or ceiling brought down, we
276 * must be careful to reject "the opposite 0" before it
277 * confuses the subsequent tests. But what about where end is
278 * brought down by HUGEPD_SIZE below? no, end can't go down to
279 * 0 there.
280 *
281 * Whereas we round start (addr) and ceiling down, by different
282 * masks at different levels, in order to test whether a table
283 * now has no other vmas using it, so can be freed, we don't
284 * bother to round floor or end up - the tests don't need that.
285 */
286
287 addr &= HUGEPD_MASK;
288 if (addr < floor) {
289 addr += HUGEPD_SIZE;
290 if (!addr)
291 return;
292 }
293 if (ceiling) {
294 ceiling &= HUGEPD_MASK;
295 if (!ceiling)
296 return;
297 }
298 if (end - 1 > ceiling - 1)
299 end -= HUGEPD_SIZE;
300 if (addr > end - 1)
301 return;
302
303 start = addr;
304 pgd = pgd_offset((*tlb)->mm, addr);
305 do {
306 BUG_ON(! in_hugepage_area((*tlb)->mm->context, addr));
307 next = pgd_addr_end(addr, end);
308 if (pgd_none_or_clear_bad(pgd))
309 continue;
310 hugetlb_free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
311 } while (pgd++, addr = next, addr != end);
David Gibsone28f7fa2005-08-05 19:39:06 +1000312}
313
David Gibsone28f7fa2005-08-05 19:39:06 +1000314void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
315 pte_t *ptep, pte_t pte)
316{
David Gibsone28f7fa2005-08-05 19:39:06 +1000317 if (pte_present(*ptep)) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100318 /* We open-code pte_clear because we need to pass the right
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000319 * argument to hpte_need_flush (huge / !huge). Might not be
320 * necessary anymore if we make hpte_need_flush() get the
321 * page size from the slices
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100322 */
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000323 pte_update(mm, addr & HPAGE_MASK, ptep, ~0UL, 1);
David Gibsone28f7fa2005-08-05 19:39:06 +1000324 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100325 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
David Gibsone28f7fa2005-08-05 19:39:06 +1000326}
327
328pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
329 pte_t *ptep)
330{
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000331 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
David Gibsone28f7fa2005-08-05 19:39:06 +1000332 return __pte(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333}
334
David Gibson23ed6cb2005-12-09 16:45:17 +1100335struct slb_flush_info {
336 struct mm_struct *mm;
337 u16 newareas;
338};
339
David Gibsonc594ada2005-08-11 16:55:21 +1000340static void flush_low_segments(void *parm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
David Gibson23ed6cb2005-12-09 16:45:17 +1100342 struct slb_flush_info *fi = parm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 unsigned long i;
344
David Gibson23ed6cb2005-12-09 16:45:17 +1100345 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
346
347 if (current->active_mm != fi->mm)
348 return;
349
350 /* Only need to do anything if this CPU is working in the same
351 * mm as the one which has changed */
352
353 /* update the paca copy of the context struct */
354 get_paca()->context = current->active_mm->context;
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 asm volatile("isync" : : : "memory");
David Gibsonc594ada2005-08-11 16:55:21 +1000357 for (i = 0; i < NUM_LOW_AREAS; i++) {
David Gibson23ed6cb2005-12-09 16:45:17 +1100358 if (! (fi->newareas & (1U << i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 continue;
David Gibson14b34662005-09-06 14:59:47 +1000360 asm volatile("slbie %0"
361 : : "r" ((i << SID_SHIFT) | SLBIE_C));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 asm volatile("isync" : : : "memory");
364}
365
David Gibsonc594ada2005-08-11 16:55:21 +1000366static void flush_high_segments(void *parm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
David Gibson23ed6cb2005-12-09 16:45:17 +1100368 struct slb_flush_info *fi = parm;
David Gibsonc594ada2005-08-11 16:55:21 +1000369 unsigned long i, j;
370
David Gibson23ed6cb2005-12-09 16:45:17 +1100371
372 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
373
374 if (current->active_mm != fi->mm)
375 return;
376
377 /* Only need to do anything if this CPU is working in the same
378 * mm as the one which has changed */
379
380 /* update the paca copy of the context struct */
381 get_paca()->context = current->active_mm->context;
382
David Gibsonc594ada2005-08-11 16:55:21 +1000383 asm volatile("isync" : : : "memory");
David Gibsonc594ada2005-08-11 16:55:21 +1000384 for (i = 0; i < NUM_HIGH_AREAS; i++) {
David Gibson23ed6cb2005-12-09 16:45:17 +1100385 if (! (fi->newareas & (1U << i)))
David Gibsonc594ada2005-08-11 16:55:21 +1000386 continue;
387 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
388 asm volatile("slbie %0"
David Gibson14b34662005-09-06 14:59:47 +1000389 :: "r" (((i << HTLB_AREA_SHIFT)
David Gibson23ed6cb2005-12-09 16:45:17 +1100390 + (j << SID_SHIFT)) | SLBIE_C));
David Gibsonc594ada2005-08-11 16:55:21 +1000391 }
David Gibsonc594ada2005-08-11 16:55:21 +1000392 asm volatile("isync" : : : "memory");
393}
394
395static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
396{
397 unsigned long start = area << SID_SHIFT;
398 unsigned long end = (area+1) << SID_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
David Gibsonc594ada2005-08-11 16:55:21 +1000401 BUG_ON(area >= NUM_LOW_AREAS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
403 /* Check no VMAs are in the region */
404 vma = find_vma(mm, start);
405 if (vma && (vma->vm_start < end))
406 return -EBUSY;
407
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 return 0;
409}
410
David Gibsonc594ada2005-08-11 16:55:21 +1000411static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
412{
413 unsigned long start = area << HTLB_AREA_SHIFT;
414 unsigned long end = (area+1) << HTLB_AREA_SHIFT;
415 struct vm_area_struct *vma;
416
417 BUG_ON(area >= NUM_HIGH_AREAS);
418
David Gibson7d24f0b2005-11-07 00:57:52 -0800419 /* Hack, so that each addresses is controlled by exactly one
420 * of the high or low area bitmaps, the first high area starts
421 * at 4GB, not 0 */
422 if (start == 0)
423 start = 0x100000000UL;
424
David Gibsonc594ada2005-08-11 16:55:21 +1000425 /* Check no VMAs are in the region */
426 vma = find_vma(mm, start);
427 if (vma && (vma->vm_start < end))
428 return -EBUSY;
429
430 return 0;
431}
432
433static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434{
435 unsigned long i;
David Gibson23ed6cb2005-12-09 16:45:17 +1100436 struct slb_flush_info fi;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
David Gibsonc594ada2005-08-11 16:55:21 +1000438 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
439 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
440
441 newareas &= ~(mm->context.low_htlb_areas);
442 if (! newareas)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 return 0; /* The segments we want are already open */
444
David Gibsonc594ada2005-08-11 16:55:21 +1000445 for (i = 0; i < NUM_LOW_AREAS; i++)
446 if ((1 << i) & newareas)
447 if (prepare_low_area_for_htlb(mm, i) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 return -EBUSY;
449
David Gibsonc594ada2005-08-11 16:55:21 +1000450 mm->context.low_htlb_areas |= newareas;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 /* the context change must make it to memory before the flush,
453 * so that further SLB misses do the right thing. */
454 mb();
David Gibson23ed6cb2005-12-09 16:45:17 +1100455
456 fi.mm = mm;
457 fi.newareas = newareas;
458 on_each_cpu(flush_low_segments, &fi, 0, 1);
David Gibsonc594ada2005-08-11 16:55:21 +1000459
460 return 0;
461}
462
463static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
464{
David Gibson23ed6cb2005-12-09 16:45:17 +1100465 struct slb_flush_info fi;
David Gibsonc594ada2005-08-11 16:55:21 +1000466 unsigned long i;
467
468 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
469 BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
470 != NUM_HIGH_AREAS);
471
472 newareas &= ~(mm->context.high_htlb_areas);
473 if (! newareas)
474 return 0; /* The areas we want are already open */
475
476 for (i = 0; i < NUM_HIGH_AREAS; i++)
477 if ((1 << i) & newareas)
478 if (prepare_high_area_for_htlb(mm, i) != 0)
479 return -EBUSY;
480
481 mm->context.high_htlb_areas |= newareas;
482
David Gibsonc594ada2005-08-11 16:55:21 +1000483 /* the context change must make it to memory before the flush,
484 * so that further SLB misses do the right thing. */
485 mb();
David Gibson23ed6cb2005-12-09 16:45:17 +1100486
487 fi.mm = mm;
488 fi.newareas = newareas;
489 on_each_cpu(flush_high_segments, &fi, 0, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
491 return 0;
492}
493
Hugh Dickins68589bc2006-11-14 02:03:32 -0800494int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495{
David Gibson5e391dc2005-11-23 13:37:45 -0800496 int err = 0;
David Gibsonc594ada2005-08-11 16:55:21 +1000497
Hugh Dickins68589bc2006-11-14 02:03:32 -0800498 if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
499 return -EINVAL;
500 if (len & ~HPAGE_MASK)
501 return -EINVAL;
502 if (addr & ~HPAGE_MASK)
David Gibsonc594ada2005-08-11 16:55:21 +1000503 return -EINVAL;
504
David Gibson5e391dc2005-11-23 13:37:45 -0800505 if (addr < 0x100000000UL)
David Gibsonc594ada2005-08-11 16:55:21 +1000506 err = open_low_hpage_areas(current->mm,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 LOW_ESID_MASK(addr, len));
David Gibson9a94c572005-11-24 13:34:56 +1100508 if ((addr + len) > 0x100000000UL)
David Gibsonc594ada2005-08-11 16:55:21 +1000509 err = open_high_hpage_areas(current->mm,
510 HTLB_AREA_MASK(addr, len));
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100511#ifdef CONFIG_SPE_BASE
512 spu_flush_all_slbs(current->mm);
513#endif
David Gibsonc594ada2005-08-11 16:55:21 +1000514 if (err) {
515 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
516 " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
517 addr, len,
518 LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 return err;
520 }
521
David Gibsonc594ada2005-08-11 16:55:21 +1000522 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523}
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525struct page *
526follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
527{
528 pte_t *ptep;
529 struct page *page;
530
531 if (! in_hugepage_area(mm->context, address))
532 return ERR_PTR(-EINVAL);
533
534 ptep = huge_pte_offset(mm, address);
535 page = pte_page(*ptep);
536 if (page)
537 page += (address % HPAGE_SIZE) / PAGE_SIZE;
538
539 return page;
540}
541
542int pmd_huge(pmd_t pmd)
543{
544 return 0;
545}
546
547struct page *
548follow_huge_pmd(struct mm_struct *mm, unsigned long address,
549 pmd_t *pmd, int write)
550{
551 BUG();
552 return NULL;
553}
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555/* Because we have an exclusive hugepage region which lies within the
556 * normal user address space, we have to take special measures to make
557 * non-huge mmap()s evade the hugepage reserved regions. */
558unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
559 unsigned long len, unsigned long pgoff,
560 unsigned long flags)
561{
562 struct mm_struct *mm = current->mm;
563 struct vm_area_struct *vma;
564 unsigned long start_addr;
565
566 if (len > TASK_SIZE)
567 return -ENOMEM;
568
Benjamin Herrenschmidtd506a772007-05-06 14:50:02 -0700569 /* handle fixed mapping: prevent overlap with huge pages */
570 if (flags & MAP_FIXED) {
571 if (is_hugepage_only_range(mm, addr, len))
572 return -EINVAL;
573 return addr;
574 }
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 if (addr) {
577 addr = PAGE_ALIGN(addr);
578 vma = find_vma(mm, addr);
579 if (((TASK_SIZE - len) >= addr)
580 && (!vma || (addr+len) <= vma->vm_start)
581 && !is_hugepage_only_range(mm, addr,len))
582 return addr;
583 }
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700584 if (len > mm->cached_hole_size) {
585 start_addr = addr = mm->free_area_cache;
586 } else {
587 start_addr = addr = TASK_UNMAPPED_BASE;
588 mm->cached_hole_size = 0;
589 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
591full_search:
592 vma = find_vma(mm, addr);
593 while (TASK_SIZE - len >= addr) {
594 BUG_ON(vma && (addr >= vma->vm_end));
595
596 if (touches_hugepage_low_range(mm, addr, len)) {
597 addr = ALIGN(addr+1, 1<<SID_SHIFT);
598 vma = find_vma(mm, addr);
599 continue;
600 }
David Gibsonc594ada2005-08-11 16:55:21 +1000601 if (touches_hugepage_high_range(mm, addr, len)) {
602 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 vma = find_vma(mm, addr);
604 continue;
605 }
606 if (!vma || addr + len <= vma->vm_start) {
607 /*
608 * Remember the place where we stopped the search:
609 */
610 mm->free_area_cache = addr + len;
611 return addr;
612 }
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700613 if (addr + mm->cached_hole_size < vma->vm_start)
614 mm->cached_hole_size = vma->vm_start - addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 addr = vma->vm_end;
616 vma = vma->vm_next;
617 }
618
619 /* Make sure we didn't miss any holes */
620 if (start_addr != TASK_UNMAPPED_BASE) {
621 start_addr = addr = TASK_UNMAPPED_BASE;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700622 mm->cached_hole_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 goto full_search;
624 }
625 return -ENOMEM;
626}
627
628/*
629 * This mmap-allocator allocates new areas top-down from below the
630 * stack's low limit (the base):
631 *
632 * Because we have an exclusive hugepage region which lies within the
633 * normal user address space, we have to take special measures to make
634 * non-huge mmap()s evade the hugepage reserved regions.
635 */
636unsigned long
637arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
638 const unsigned long len, const unsigned long pgoff,
639 const unsigned long flags)
640{
641 struct vm_area_struct *vma, *prev_vma;
642 struct mm_struct *mm = current->mm;
643 unsigned long base = mm->mmap_base, addr = addr0;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700644 unsigned long largest_hole = mm->cached_hole_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 int first_time = 1;
646
647 /* requested length too big for entire address space */
648 if (len > TASK_SIZE)
649 return -ENOMEM;
650
Benjamin Herrenschmidtd506a772007-05-06 14:50:02 -0700651 /* handle fixed mapping: prevent overlap with huge pages */
652 if (flags & MAP_FIXED) {
653 if (is_hugepage_only_range(mm, addr, len))
654 return -EINVAL;
655 return addr;
656 }
657
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 /* dont allow allocations above current base */
659 if (mm->free_area_cache > base)
660 mm->free_area_cache = base;
661
662 /* requesting a specific address */
663 if (addr) {
664 addr = PAGE_ALIGN(addr);
665 vma = find_vma(mm, addr);
666 if (TASK_SIZE - len >= addr &&
667 (!vma || addr + len <= vma->vm_start)
668 && !is_hugepage_only_range(mm, addr,len))
669 return addr;
670 }
671
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700672 if (len <= largest_hole) {
673 largest_hole = 0;
674 mm->free_area_cache = base;
675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676try_again:
677 /* make sure it can fit in the remaining address space */
678 if (mm->free_area_cache < len)
679 goto fail;
680
681 /* either no address requested or cant fit in requested address hole */
682 addr = (mm->free_area_cache - len) & PAGE_MASK;
683 do {
684hugepage_recheck:
685 if (touches_hugepage_low_range(mm, addr, len)) {
686 addr = (addr & ((~0) << SID_SHIFT)) - len;
687 goto hugepage_recheck;
David Gibsonc594ada2005-08-11 16:55:21 +1000688 } else if (touches_hugepage_high_range(mm, addr, len)) {
689 addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
690 goto hugepage_recheck;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 }
692
693 /*
694 * Lookup failure means no vma is above this address,
695 * i.e. return with success:
696 */
697 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
698 return addr;
699
700 /*
701 * new region fits between prev_vma->vm_end and
702 * vma->vm_start, use it:
703 */
704 if (addr+len <= vma->vm_start &&
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700705 (!prev_vma || (addr >= prev_vma->vm_end))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 /* remember the address as a hint for next time */
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700707 mm->cached_hole_size = largest_hole;
708 return (mm->free_area_cache = addr);
709 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 /* pull free_area_cache down to the first hole */
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700711 if (mm->free_area_cache == vma->vm_end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 mm->free_area_cache = vma->vm_start;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700713 mm->cached_hole_size = largest_hole;
714 }
715 }
716
717 /* remember the largest hole we saw so far */
718 if (addr + largest_hole < vma->vm_start)
719 largest_hole = vma->vm_start - addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721 /* try just below the current vma->vm_start */
722 addr = vma->vm_start-len;
723 } while (len <= vma->vm_start);
724
725fail:
726 /*
727 * if hint left us with no space for the requested
728 * mapping then try again:
729 */
730 if (first_time) {
731 mm->free_area_cache = base;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700732 largest_hole = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 first_time = 0;
734 goto try_again;
735 }
736 /*
737 * A failed mmap() very likely causes application failure,
738 * so fall back to the bottom-up function here. This scenario
739 * can happen with large stack limits and large mmap()
740 * allocations.
741 */
742 mm->free_area_cache = TASK_UNMAPPED_BASE;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700743 mm->cached_hole_size = ~0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
745 /*
746 * Restore the topdown base:
747 */
748 mm->free_area_cache = base;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700749 mm->cached_hole_size = ~0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
751 return addr;
752}
753
David Gibson456752f2005-11-24 14:16:15 +1100754static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
755{
756 struct vm_area_struct *vma;
757
758 vma = find_vma(current->mm, addr);
David Gibson6aa3e1e2006-12-22 09:23:03 +1100759 if (TASK_SIZE - len >= addr &&
760 (!vma || ((addr + len) <= vma->vm_start)))
David Gibson456752f2005-11-24 14:16:15 +1100761 return 0;
762
763 return -ENOMEM;
764}
765
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
767{
768 unsigned long addr = 0;
769 struct vm_area_struct *vma;
770
771 vma = find_vma(current->mm, addr);
772 while (addr + len <= 0x100000000UL) {
773 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
774
775 if (! __within_hugepage_low_range(addr, len, segmask)) {
776 addr = ALIGN(addr+1, 1<<SID_SHIFT);
777 vma = find_vma(current->mm, addr);
778 continue;
779 }
780
781 if (!vma || (addr + len) <= vma->vm_start)
782 return addr;
783 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
784 /* Depending on segmask this might not be a confirmed
785 * hugepage region, so the ALIGN could have skipped
786 * some VMAs */
787 vma = find_vma(current->mm, addr);
788 }
789
790 return -ENOMEM;
791}
792
David Gibsonc594ada2005-08-11 16:55:21 +1000793static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794{
David Gibsonc594ada2005-08-11 16:55:21 +1000795 unsigned long addr = 0x100000000UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 struct vm_area_struct *vma;
797
798 vma = find_vma(current->mm, addr);
David Gibsonc594ada2005-08-11 16:55:21 +1000799 while (addr + len <= TASK_SIZE_USER64) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
David Gibsonc594ada2005-08-11 16:55:21 +1000801
802 if (! __within_hugepage_high_range(addr, len, areamask)) {
803 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
804 vma = find_vma(current->mm, addr);
805 continue;
806 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
808 if (!vma || (addr + len) <= vma->vm_start)
809 return addr;
810 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
David Gibsonc594ada2005-08-11 16:55:21 +1000811 /* Depending on segmask this might not be a confirmed
812 * hugepage region, so the ALIGN could have skipped
813 * some VMAs */
814 vma = find_vma(current->mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 }
816
817 return -ENOMEM;
818}
819
820unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
821 unsigned long len, unsigned long pgoff,
822 unsigned long flags)
823{
David Gibsonc594ada2005-08-11 16:55:21 +1000824 int lastshift;
825 u16 areamask, curareas;
826
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100827 if (HPAGE_SHIFT == 0)
828 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 if (len & ~HPAGE_MASK)
830 return -EINVAL;
David Gibson6aa3e1e2006-12-22 09:23:03 +1100831 if (len > TASK_SIZE)
832 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
834 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
835 return -EINVAL;
836
David Gibson456752f2005-11-24 14:16:15 +1100837 /* Paranoia, caller should have dealt with this */
838 BUG_ON((addr + len) < addr);
839
Benjamin Herrenschmidtd506a772007-05-06 14:50:02 -0700840 /* Handle MAP_FIXED */
841 if (flags & MAP_FIXED) {
842 if (prepare_hugepage_range(addr, len, pgoff))
843 return -EINVAL;
844 return addr;
845 }
846
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 if (test_thread_flag(TIF_32BIT)) {
David Gibsonc594ada2005-08-11 16:55:21 +1000848 curareas = current->mm->context.low_htlb_areas;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849
David Gibson456752f2005-11-24 14:16:15 +1100850 /* First see if we can use the hint address */
851 if (addr && (htlb_check_hinted_area(addr, len) == 0)) {
852 areamask = LOW_ESID_MASK(addr, len);
853 if (open_low_hpage_areas(current->mm, areamask) == 0)
854 return addr;
855 }
856
857 /* Next see if we can map in the existing low areas */
David Gibsonc594ada2005-08-11 16:55:21 +1000858 addr = htlb_get_low_area(len, curareas);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 if (addr != -ENOMEM)
860 return addr;
861
David Gibson456752f2005-11-24 14:16:15 +1100862 /* Finally go looking for areas to open */
David Gibsonc594ada2005-08-11 16:55:21 +1000863 lastshift = 0;
864 for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
865 ! lastshift; areamask >>=1) {
866 if (areamask & 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 lastshift = 1;
868
David Gibsonc594ada2005-08-11 16:55:21 +1000869 addr = htlb_get_low_area(len, curareas | areamask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 if ((addr != -ENOMEM)
David Gibsonc594ada2005-08-11 16:55:21 +1000871 && open_low_hpage_areas(current->mm, areamask) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 return addr;
873 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 } else {
David Gibsonc594ada2005-08-11 16:55:21 +1000875 curareas = current->mm->context.high_htlb_areas;
876
David Gibson456752f2005-11-24 14:16:15 +1100877 /* First see if we can use the hint address */
878 /* We discourage 64-bit processes from doing hugepage
879 * mappings below 4GB (must use MAP_FIXED) */
880 if ((addr >= 0x100000000UL)
881 && (htlb_check_hinted_area(addr, len) == 0)) {
882 areamask = HTLB_AREA_MASK(addr, len);
883 if (open_high_hpage_areas(current->mm, areamask) == 0)
884 return addr;
885 }
886
887 /* Next see if we can map in the existing high areas */
David Gibsonc594ada2005-08-11 16:55:21 +1000888 addr = htlb_get_high_area(len, curareas);
889 if (addr != -ENOMEM)
890 return addr;
891
David Gibson456752f2005-11-24 14:16:15 +1100892 /* Finally go looking for areas to open */
David Gibsonc594ada2005-08-11 16:55:21 +1000893 lastshift = 0;
894 for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
895 ! lastshift; areamask >>=1) {
896 if (areamask & 1)
897 lastshift = 1;
898
899 addr = htlb_get_high_area(len, curareas | areamask);
900 if ((addr != -ENOMEM)
901 && open_high_hpage_areas(current->mm, areamask) == 0)
902 return addr;
903 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 }
David Gibsonc594ada2005-08-11 16:55:21 +1000905 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
906 " enough areas\n");
907 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908}
909
David Gibsoncbf52af2005-12-09 14:20:52 +1100910/*
911 * Called by asm hashtable.S for doing lazy icache flush
912 */
913static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
914 pte_t pte, int trap)
915{
916 struct page *page;
917 int i;
918
919 if (!pfn_valid(pte_pfn(pte)))
920 return rflags;
921
922 page = pte_page(pte);
923
924 /* page is dirty */
925 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
926 if (trap == 0x400) {
927 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
928 __flush_dcache_icache(page_address(page+i));
929 set_bit(PG_arch_1, &page->flags);
930 } else {
931 rflags |= HPTE_R_N;
932 }
933 }
934 return rflags;
935}
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937int hash_huge_page(struct mm_struct *mm, unsigned long access,
David Gibsoncbf52af2005-12-09 14:20:52 +1100938 unsigned long ea, unsigned long vsid, int local,
939 unsigned long trap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940{
941 pte_t *ptep;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100942 unsigned long old_pte, new_pte;
943 unsigned long va, rflags, pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 long slot;
945 int err = 1;
946
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 ptep = huge_pte_offset(mm, ea);
948
949 /* Search the Linux page table for a match with va */
950 va = (vsid << 28) | (ea & 0x0fffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
952 /*
953 * If no pte found or not present, send the problem up to
954 * do_page_fault
955 */
956 if (unlikely(!ptep || pte_none(*ptep)))
957 goto out;
958
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 /*
960 * Check the user's access rights to the page. If access should be
961 * prevented then send the problem up to do_page_fault.
962 */
963 if (unlikely(access & ~pte_val(*ptep)))
964 goto out;
965 /*
966 * At this point, we have a pte (old_pte) which can be used to build
967 * or update an HPTE. There are 2 cases:
968 *
969 * 1. There is a valid (present) pte with no associated HPTE (this is
970 * the most common case)
971 * 2. There is a valid (present) pte with an associated HPTE. The
972 * current values of the pp bits in the HPTE prevent access
973 * because we are doing software DIRTY bit management and the
974 * page is currently not DIRTY.
975 */
976
977
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100978 do {
979 old_pte = pte_val(*ptep);
980 if (old_pte & _PAGE_BUSY)
981 goto out;
982 new_pte = old_pte | _PAGE_BUSY |
983 _PAGE_ACCESSED | _PAGE_HASHPTE;
984 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
985 old_pte, new_pte));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100987 rflags = 0x2 | (!(new_pte & _PAGE_RW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100989 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
David Gibsoncbf52af2005-12-09 14:20:52 +1100990 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
991 /* No CPU has hugepages but lacks no execute, so we
992 * don't need to worry about that case */
993 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
994 trap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
996 /* Check if pte already has an hpte (case 2) */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100997 if (unlikely(old_pte & _PAGE_HASHPTE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 /* There MIGHT be an HPTE for this pte */
999 unsigned long hash, slot;
1000
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001001 hash = hpt_hash(va, HPAGE_SHIFT);
1002 if (old_pte & _PAGE_F_SECOND)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 hash = ~hash;
1004 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001005 slot += (old_pte & _PAGE_F_GIX) >> 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
Benjamin Herrenschmidt325c82a2005-12-08 16:51:44 +11001007 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
1008 local) == -1)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001009 old_pte &= ~_PAGE_HPTEFLAGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 }
1011
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001012 if (likely(!(old_pte & _PAGE_HASHPTE))) {
1013 unsigned long hash = hpt_hash(va, HPAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 unsigned long hpte_group;
1015
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001016 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
1018repeat:
1019 hpte_group = ((hash & htab_hash_mask) *
1020 HPTES_PER_GROUP) & ~0x7UL;
1021
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001022 /* clear HPTE slot informations in new PTE */
1023 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
1025 /* Add in WIMG bits */
1026 /* XXX We should store these in the pte */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001027 /* --BenH: I think they are ... */
David Gibson96e28442005-07-13 01:11:42 -07001028 rflags |= _PAGE_COHERENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001030 /* Insert into the hash table, primary slot */
1031 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
1032 mmu_huge_psize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033
1034 /* Primary is full, try the secondary */
1035 if (unlikely(slot == -1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 hpte_group = ((~hash & htab_hash_mask) *
1037 HPTES_PER_GROUP) & ~0x7UL;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001038 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
Benjamin Herrenschmidt67b10812005-09-23 13:24:07 -07001039 HPTE_V_SECONDARY,
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001040 mmu_huge_psize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 if (slot == -1) {
1042 if (mftb() & 0x1)
Benjamin Herrenschmidt67b10812005-09-23 13:24:07 -07001043 hpte_group = ((hash & htab_hash_mask) *
1044 HPTES_PER_GROUP)&~0x7UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
1046 ppc_md.hpte_remove(hpte_group);
1047 goto repeat;
1048 }
1049 }
1050
1051 if (unlikely(slot == -2))
1052 panic("hash_huge_page: pte_insert failed\n");
1053
Ishizaki Koud649bd72007-01-12 09:54:39 +09001054 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 }
1056
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001057 /*
Hugh Dickins01edcd82005-11-23 13:37:39 -08001058 * No need to use ldarx/stdcx here
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001059 */
1060 *ptep = __pte(new_pte & ~_PAGE_BUSY);
1061
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 err = 0;
1063
1064 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 return err;
1066}
David Gibsonf10a04c2006-04-28 15:02:51 +10001067
Christoph Lametere18b8902006-12-06 20:33:20 -08001068static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
David Gibsonf10a04c2006-04-28 15:02:51 +10001069{
1070 memset(addr, 0, kmem_cache_size(cache));
1071}
1072
1073static int __init hugetlbpage_init(void)
1074{
1075 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
1076 return -ENODEV;
1077
1078 huge_pgtable_cache = kmem_cache_create("hugepte_cache",
1079 HUGEPTE_TABLE_SIZE,
1080 HUGEPTE_TABLE_SIZE,
Christoph Lameterf0f39802007-05-06 14:49:58 -07001081 0,
David Gibsonf10a04c2006-04-28 15:02:51 +10001082 zero_ctor, NULL);
1083 if (! huge_pgtable_cache)
1084 panic("hugetlbpage_init(): could not create hugepte cache\n");
1085
1086 return 0;
1087}
1088
1089module_init(hugetlbpage_init);