blob: 0073a04047e48b6a7b8144ee1538142925d7bdb3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 *
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 */
9
10#include <linux/init.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/pagemap.h>
15#include <linux/smp_lock.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/sysctl.h>
19#include <asm/mman.h>
20#include <asm/pgalloc.h>
21#include <asm/tlb.h>
22#include <asm/tlbflush.h>
23#include <asm/mmu_context.h>
24#include <asm/machdep.h>
25#include <asm/cputable.h>
26#include <asm/tlb.h>
27
28#include <linux/sysctl.h>
29
David Gibsonc594ada2005-08-11 16:55:21 +100030#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
32
David Gibsone28f7fa2005-08-05 19:39:06 +100033/* Modelled after find_linux_pte() */
34pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035{
David Gibsone28f7fa2005-08-05 19:39:06 +100036 pgd_t *pg;
37 pud_t *pu;
38 pmd_t *pm;
39 pte_t *pt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 BUG_ON(! in_hugepage_area(mm->context, addr));
42
David Gibsone28f7fa2005-08-05 19:39:06 +100043 addr &= HPAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
David Gibsone28f7fa2005-08-05 19:39:06 +100045 pg = pgd_offset(mm, addr);
46 if (!pgd_none(*pg)) {
47 pu = pud_offset(pg, addr);
48 if (!pud_none(*pu)) {
49 pm = pmd_offset(pu, addr);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110050#ifdef CONFIG_PPC_64K_PAGES
51 /* Currently, we use the normal PTE offset within full
52 * size PTE pages, thus our huge PTEs are scattered in
53 * the PTE page and we do waste some. We may change
54 * that in the future, but the current mecanism keeps
55 * things much simpler
56 */
57 if (!pmd_none(*pm)) {
58 /* Note: pte_offset_* are all equivalent on
59 * ppc64 as we don't have HIGHMEM
60 */
61 pt = pte_offset_kernel(pm, addr);
62 return pt;
63 }
64#else /* CONFIG_PPC_64K_PAGES */
65 /* On 4k pages, we put huge PTEs in the PMD page */
David Gibsone28f7fa2005-08-05 19:39:06 +100066 pt = (pte_t *)pm;
David Gibsone28f7fa2005-08-05 19:39:06 +100067 return pt;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110068#endif /* CONFIG_PPC_64K_PAGES */
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 }
70 }
71
David Gibsone28f7fa2005-08-05 19:39:06 +100072 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
David Gibson63551ae2005-06-21 17:14:44 -070075pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076{
David Gibsone28f7fa2005-08-05 19:39:06 +100077 pgd_t *pg;
78 pud_t *pu;
79 pmd_t *pm;
80 pte_t *pt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
82 BUG_ON(! in_hugepage_area(mm->context, addr));
83
David Gibsone28f7fa2005-08-05 19:39:06 +100084 addr &= HPAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
David Gibsone28f7fa2005-08-05 19:39:06 +100086 pg = pgd_offset(mm, addr);
87 pu = pud_alloc(mm, pg, addr);
88
89 if (pu) {
90 pm = pmd_alloc(mm, pu, addr);
91 if (pm) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110092#ifdef CONFIG_PPC_64K_PAGES
93 /* See comment in huge_pte_offset. Note that if we ever
94 * want to put the page size in the PMD, we would have
95 * to open code our own pte_alloc* function in order
96 * to populate and set the size atomically
97 */
98 pt = pte_alloc_map(mm, pm, addr);
99#else /* CONFIG_PPC_64K_PAGES */
David Gibsone28f7fa2005-08-05 19:39:06 +1000100 pt = (pte_t *)pm;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100101#endif /* CONFIG_PPC_64K_PAGES */
David Gibsone28f7fa2005-08-05 19:39:06 +1000102 return pt;
103 }
104 }
105
106 return NULL;
107}
108
David Gibsone28f7fa2005-08-05 19:39:06 +1000109void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
110 pte_t *ptep, pte_t pte)
111{
David Gibsone28f7fa2005-08-05 19:39:06 +1000112 if (pte_present(*ptep)) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100113 /* We open-code pte_clear because we need to pass the right
114 * argument to hpte_update (huge / !huge)
115 */
116 unsigned long old = pte_update(ptep, ~0UL);
117 if (old & _PAGE_HASHPTE)
118 hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
David Gibsone28f7fa2005-08-05 19:39:06 +1000119 flush_tlb_pending();
120 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100121 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
David Gibsone28f7fa2005-08-05 19:39:06 +1000122}
123
124pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
125 pte_t *ptep)
126{
127 unsigned long old = pte_update(ptep, ~0UL);
David Gibsone28f7fa2005-08-05 19:39:06 +1000128
129 if (old & _PAGE_HASHPTE)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100130 hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
131 *ptep = __pte(0);
David Gibsone28f7fa2005-08-05 19:39:06 +1000132
133 return __pte(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/*
137 * This function checks for proper alignment of input addr and len parameters.
138 */
139int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
140{
141 if (len & ~HPAGE_MASK)
142 return -EINVAL;
143 if (addr & ~HPAGE_MASK)
144 return -EINVAL;
145 if (! (within_hugepage_low_range(addr, len)
146 || within_hugepage_high_range(addr, len)) )
147 return -EINVAL;
148 return 0;
149}
150
David Gibsonc594ada2005-08-11 16:55:21 +1000151static void flush_low_segments(void *parm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152{
David Gibsonc594ada2005-08-11 16:55:21 +1000153 u16 areas = (unsigned long) parm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 unsigned long i;
155
156 asm volatile("isync" : : : "memory");
157
David Gibsonc594ada2005-08-11 16:55:21 +1000158 BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS);
159
160 for (i = 0; i < NUM_LOW_AREAS; i++) {
161 if (! (areas & (1U << i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 continue;
David Gibson14b34662005-09-06 14:59:47 +1000163 asm volatile("slbie %0"
164 : : "r" ((i << SID_SHIFT) | SLBIE_C));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 }
166
167 asm volatile("isync" : : : "memory");
168}
169
David Gibsonc594ada2005-08-11 16:55:21 +1000170static void flush_high_segments(void *parm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
David Gibsonc594ada2005-08-11 16:55:21 +1000172 u16 areas = (unsigned long) parm;
173 unsigned long i, j;
174
175 asm volatile("isync" : : : "memory");
176
177 BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS);
178
179 for (i = 0; i < NUM_HIGH_AREAS; i++) {
180 if (! (areas & (1U << i)))
181 continue;
182 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
183 asm volatile("slbie %0"
David Gibson14b34662005-09-06 14:59:47 +1000184 :: "r" (((i << HTLB_AREA_SHIFT)
185 + (j << SID_SHIFT)) | SLBIE_C));
David Gibsonc594ada2005-08-11 16:55:21 +1000186 }
187
188 asm volatile("isync" : : : "memory");
189}
190
191static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
192{
193 unsigned long start = area << SID_SHIFT;
194 unsigned long end = (area+1) << SID_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
David Gibsonc594ada2005-08-11 16:55:21 +1000197 BUG_ON(area >= NUM_LOW_AREAS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
199 /* Check no VMAs are in the region */
200 vma = find_vma(mm, start);
201 if (vma && (vma->vm_start < end))
202 return -EBUSY;
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 return 0;
205}
206
David Gibsonc594ada2005-08-11 16:55:21 +1000207static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
208{
209 unsigned long start = area << HTLB_AREA_SHIFT;
210 unsigned long end = (area+1) << HTLB_AREA_SHIFT;
211 struct vm_area_struct *vma;
212
213 BUG_ON(area >= NUM_HIGH_AREAS);
214
215 /* Check no VMAs are in the region */
216 vma = find_vma(mm, start);
217 if (vma && (vma->vm_start < end))
218 return -EBUSY;
219
220 return 0;
221}
222
223static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224{
225 unsigned long i;
226
David Gibsonc594ada2005-08-11 16:55:21 +1000227 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
228 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
229
230 newareas &= ~(mm->context.low_htlb_areas);
231 if (! newareas)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 return 0; /* The segments we want are already open */
233
David Gibsonc594ada2005-08-11 16:55:21 +1000234 for (i = 0; i < NUM_LOW_AREAS; i++)
235 if ((1 << i) & newareas)
236 if (prepare_low_area_for_htlb(mm, i) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 return -EBUSY;
238
David Gibsonc594ada2005-08-11 16:55:21 +1000239 mm->context.low_htlb_areas |= newareas;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241 /* update the paca copy of the context struct */
242 get_paca()->context = mm->context;
243
244 /* the context change must make it to memory before the flush,
245 * so that further SLB misses do the right thing. */
246 mb();
David Gibsonc594ada2005-08-11 16:55:21 +1000247 on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1);
248
249 return 0;
250}
251
252static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
253{
254 unsigned long i;
255
256 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
257 BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
258 != NUM_HIGH_AREAS);
259
260 newareas &= ~(mm->context.high_htlb_areas);
261 if (! newareas)
262 return 0; /* The areas we want are already open */
263
264 for (i = 0; i < NUM_HIGH_AREAS; i++)
265 if ((1 << i) & newareas)
266 if (prepare_high_area_for_htlb(mm, i) != 0)
267 return -EBUSY;
268
269 mm->context.high_htlb_areas |= newareas;
270
271 /* update the paca copy of the context struct */
272 get_paca()->context = mm->context;
273
274 /* the context change must make it to memory before the flush,
275 * so that further SLB misses do the right thing. */
276 mb();
277 on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
279 return 0;
280}
281
282int prepare_hugepage_range(unsigned long addr, unsigned long len)
283{
David Gibsonc594ada2005-08-11 16:55:21 +1000284 int err;
285
286 if ( (addr+len) < addr )
287 return -EINVAL;
288
289 if ((addr + len) < 0x100000000UL)
290 err = open_low_hpage_areas(current->mm,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 LOW_ESID_MASK(addr, len));
David Gibsonc594ada2005-08-11 16:55:21 +1000292 else
293 err = open_high_hpage_areas(current->mm,
294 HTLB_AREA_MASK(addr, len));
295 if (err) {
296 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
297 " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
298 addr, len,
299 LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 return err;
301 }
302
David Gibsonc594ada2005-08-11 16:55:21 +1000303 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304}
305
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306struct page *
307follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
308{
309 pte_t *ptep;
310 struct page *page;
311
312 if (! in_hugepage_area(mm->context, address))
313 return ERR_PTR(-EINVAL);
314
315 ptep = huge_pte_offset(mm, address);
316 page = pte_page(*ptep);
317 if (page)
318 page += (address % HPAGE_SIZE) / PAGE_SIZE;
319
320 return page;
321}
322
323int pmd_huge(pmd_t pmd)
324{
325 return 0;
326}
327
328struct page *
329follow_huge_pmd(struct mm_struct *mm, unsigned long address,
330 pmd_t *pmd, int write)
331{
332 BUG();
333 return NULL;
334}
335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336/* Because we have an exclusive hugepage region which lies within the
337 * normal user address space, we have to take special measures to make
338 * non-huge mmap()s evade the hugepage reserved regions. */
339unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
340 unsigned long len, unsigned long pgoff,
341 unsigned long flags)
342{
343 struct mm_struct *mm = current->mm;
344 struct vm_area_struct *vma;
345 unsigned long start_addr;
346
347 if (len > TASK_SIZE)
348 return -ENOMEM;
349
350 if (addr) {
351 addr = PAGE_ALIGN(addr);
352 vma = find_vma(mm, addr);
353 if (((TASK_SIZE - len) >= addr)
354 && (!vma || (addr+len) <= vma->vm_start)
355 && !is_hugepage_only_range(mm, addr,len))
356 return addr;
357 }
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700358 if (len > mm->cached_hole_size) {
359 start_addr = addr = mm->free_area_cache;
360 } else {
361 start_addr = addr = TASK_UNMAPPED_BASE;
362 mm->cached_hole_size = 0;
363 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
365full_search:
366 vma = find_vma(mm, addr);
367 while (TASK_SIZE - len >= addr) {
368 BUG_ON(vma && (addr >= vma->vm_end));
369
370 if (touches_hugepage_low_range(mm, addr, len)) {
371 addr = ALIGN(addr+1, 1<<SID_SHIFT);
372 vma = find_vma(mm, addr);
373 continue;
374 }
David Gibsonc594ada2005-08-11 16:55:21 +1000375 if (touches_hugepage_high_range(mm, addr, len)) {
376 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 vma = find_vma(mm, addr);
378 continue;
379 }
380 if (!vma || addr + len <= vma->vm_start) {
381 /*
382 * Remember the place where we stopped the search:
383 */
384 mm->free_area_cache = addr + len;
385 return addr;
386 }
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700387 if (addr + mm->cached_hole_size < vma->vm_start)
388 mm->cached_hole_size = vma->vm_start - addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 addr = vma->vm_end;
390 vma = vma->vm_next;
391 }
392
393 /* Make sure we didn't miss any holes */
394 if (start_addr != TASK_UNMAPPED_BASE) {
395 start_addr = addr = TASK_UNMAPPED_BASE;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700396 mm->cached_hole_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 goto full_search;
398 }
399 return -ENOMEM;
400}
401
402/*
403 * This mmap-allocator allocates new areas top-down from below the
404 * stack's low limit (the base):
405 *
406 * Because we have an exclusive hugepage region which lies within the
407 * normal user address space, we have to take special measures to make
408 * non-huge mmap()s evade the hugepage reserved regions.
409 */
410unsigned long
411arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
412 const unsigned long len, const unsigned long pgoff,
413 const unsigned long flags)
414{
415 struct vm_area_struct *vma, *prev_vma;
416 struct mm_struct *mm = current->mm;
417 unsigned long base = mm->mmap_base, addr = addr0;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700418 unsigned long largest_hole = mm->cached_hole_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 int first_time = 1;
420
421 /* requested length too big for entire address space */
422 if (len > TASK_SIZE)
423 return -ENOMEM;
424
425 /* dont allow allocations above current base */
426 if (mm->free_area_cache > base)
427 mm->free_area_cache = base;
428
429 /* requesting a specific address */
430 if (addr) {
431 addr = PAGE_ALIGN(addr);
432 vma = find_vma(mm, addr);
433 if (TASK_SIZE - len >= addr &&
434 (!vma || addr + len <= vma->vm_start)
435 && !is_hugepage_only_range(mm, addr,len))
436 return addr;
437 }
438
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700439 if (len <= largest_hole) {
440 largest_hole = 0;
441 mm->free_area_cache = base;
442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443try_again:
444 /* make sure it can fit in the remaining address space */
445 if (mm->free_area_cache < len)
446 goto fail;
447
448 /* either no address requested or cant fit in requested address hole */
449 addr = (mm->free_area_cache - len) & PAGE_MASK;
450 do {
451hugepage_recheck:
452 if (touches_hugepage_low_range(mm, addr, len)) {
453 addr = (addr & ((~0) << SID_SHIFT)) - len;
454 goto hugepage_recheck;
David Gibsonc594ada2005-08-11 16:55:21 +1000455 } else if (touches_hugepage_high_range(mm, addr, len)) {
456 addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
457 goto hugepage_recheck;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 }
459
460 /*
461 * Lookup failure means no vma is above this address,
462 * i.e. return with success:
463 */
464 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
465 return addr;
466
467 /*
468 * new region fits between prev_vma->vm_end and
469 * vma->vm_start, use it:
470 */
471 if (addr+len <= vma->vm_start &&
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700472 (!prev_vma || (addr >= prev_vma->vm_end))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 /* remember the address as a hint for next time */
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700474 mm->cached_hole_size = largest_hole;
475 return (mm->free_area_cache = addr);
476 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 /* pull free_area_cache down to the first hole */
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700478 if (mm->free_area_cache == vma->vm_end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 mm->free_area_cache = vma->vm_start;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700480 mm->cached_hole_size = largest_hole;
481 }
482 }
483
484 /* remember the largest hole we saw so far */
485 if (addr + largest_hole < vma->vm_start)
486 largest_hole = vma->vm_start - addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
488 /* try just below the current vma->vm_start */
489 addr = vma->vm_start-len;
490 } while (len <= vma->vm_start);
491
492fail:
493 /*
494 * if hint left us with no space for the requested
495 * mapping then try again:
496 */
497 if (first_time) {
498 mm->free_area_cache = base;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700499 largest_hole = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 first_time = 0;
501 goto try_again;
502 }
503 /*
504 * A failed mmap() very likely causes application failure,
505 * so fall back to the bottom-up function here. This scenario
506 * can happen with large stack limits and large mmap()
507 * allocations.
508 */
509 mm->free_area_cache = TASK_UNMAPPED_BASE;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700510 mm->cached_hole_size = ~0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
512 /*
513 * Restore the topdown base:
514 */
515 mm->free_area_cache = base;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -0700516 mm->cached_hole_size = ~0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
518 return addr;
519}
520
521static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
522{
523 unsigned long addr = 0;
524 struct vm_area_struct *vma;
525
526 vma = find_vma(current->mm, addr);
527 while (addr + len <= 0x100000000UL) {
528 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
529
530 if (! __within_hugepage_low_range(addr, len, segmask)) {
531 addr = ALIGN(addr+1, 1<<SID_SHIFT);
532 vma = find_vma(current->mm, addr);
533 continue;
534 }
535
536 if (!vma || (addr + len) <= vma->vm_start)
537 return addr;
538 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
539 /* Depending on segmask this might not be a confirmed
540 * hugepage region, so the ALIGN could have skipped
541 * some VMAs */
542 vma = find_vma(current->mm, addr);
543 }
544
545 return -ENOMEM;
546}
547
David Gibsonc594ada2005-08-11 16:55:21 +1000548static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549{
David Gibsonc594ada2005-08-11 16:55:21 +1000550 unsigned long addr = 0x100000000UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 struct vm_area_struct *vma;
552
553 vma = find_vma(current->mm, addr);
David Gibsonc594ada2005-08-11 16:55:21 +1000554 while (addr + len <= TASK_SIZE_USER64) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
David Gibsonc594ada2005-08-11 16:55:21 +1000556
557 if (! __within_hugepage_high_range(addr, len, areamask)) {
558 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
559 vma = find_vma(current->mm, addr);
560 continue;
561 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
563 if (!vma || (addr + len) <= vma->vm_start)
564 return addr;
565 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
David Gibsonc594ada2005-08-11 16:55:21 +1000566 /* Depending on segmask this might not be a confirmed
567 * hugepage region, so the ALIGN could have skipped
568 * some VMAs */
569 vma = find_vma(current->mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 }
571
572 return -ENOMEM;
573}
574
575unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
576 unsigned long len, unsigned long pgoff,
577 unsigned long flags)
578{
David Gibsonc594ada2005-08-11 16:55:21 +1000579 int lastshift;
580 u16 areamask, curareas;
581
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100582 if (HPAGE_SHIFT == 0)
583 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 if (len & ~HPAGE_MASK)
585 return -EINVAL;
586
587 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
588 return -EINVAL;
589
590 if (test_thread_flag(TIF_32BIT)) {
David Gibsonc594ada2005-08-11 16:55:21 +1000591 curareas = current->mm->context.low_htlb_areas;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
593 /* First see if we can do the mapping in the existing
David Gibsonc594ada2005-08-11 16:55:21 +1000594 * low areas */
595 addr = htlb_get_low_area(len, curareas);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 if (addr != -ENOMEM)
597 return addr;
598
David Gibsonc594ada2005-08-11 16:55:21 +1000599 lastshift = 0;
600 for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
601 ! lastshift; areamask >>=1) {
602 if (areamask & 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 lastshift = 1;
604
David Gibsonc594ada2005-08-11 16:55:21 +1000605 addr = htlb_get_low_area(len, curareas | areamask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 if ((addr != -ENOMEM)
David Gibsonc594ada2005-08-11 16:55:21 +1000607 && open_low_hpage_areas(current->mm, areamask) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 return addr;
609 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 } else {
David Gibsonc594ada2005-08-11 16:55:21 +1000611 curareas = current->mm->context.high_htlb_areas;
612
613 /* First see if we can do the mapping in the existing
614 * high areas */
615 addr = htlb_get_high_area(len, curareas);
616 if (addr != -ENOMEM)
617 return addr;
618
619 lastshift = 0;
620 for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
621 ! lastshift; areamask >>=1) {
622 if (areamask & 1)
623 lastshift = 1;
624
625 addr = htlb_get_high_area(len, curareas | areamask);
626 if ((addr != -ENOMEM)
627 && open_high_hpage_areas(current->mm, areamask) == 0)
628 return addr;
629 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 }
David Gibsonc594ada2005-08-11 16:55:21 +1000631 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
632 " enough areas\n");
633 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634}
635
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636int hash_huge_page(struct mm_struct *mm, unsigned long access,
637 unsigned long ea, unsigned long vsid, int local)
638{
639 pte_t *ptep;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100640 unsigned long old_pte, new_pte;
641 unsigned long va, rflags, pa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 long slot;
643 int err = 1;
644
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 ptep = huge_pte_offset(mm, ea);
646
647 /* Search the Linux page table for a match with va */
648 va = (vsid << 28) | (ea & 0x0fffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
650 /*
651 * If no pte found or not present, send the problem up to
652 * do_page_fault
653 */
654 if (unlikely(!ptep || pte_none(*ptep)))
655 goto out;
656
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 /*
658 * Check the user's access rights to the page. If access should be
659 * prevented then send the problem up to do_page_fault.
660 */
661 if (unlikely(access & ~pte_val(*ptep)))
662 goto out;
663 /*
664 * At this point, we have a pte (old_pte) which can be used to build
665 * or update an HPTE. There are 2 cases:
666 *
667 * 1. There is a valid (present) pte with no associated HPTE (this is
668 * the most common case)
669 * 2. There is a valid (present) pte with an associated HPTE. The
670 * current values of the pp bits in the HPTE prevent access
671 * because we are doing software DIRTY bit management and the
672 * page is currently not DIRTY.
673 */
674
675
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100676 do {
677 old_pte = pte_val(*ptep);
678 if (old_pte & _PAGE_BUSY)
679 goto out;
680 new_pte = old_pte | _PAGE_BUSY |
681 _PAGE_ACCESSED | _PAGE_HASHPTE;
682 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
683 old_pte, new_pte));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100685 rflags = 0x2 | (!(new_pte & _PAGE_RW));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100687 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689 /* Check if pte already has an hpte (case 2) */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100690 if (unlikely(old_pte & _PAGE_HASHPTE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 /* There MIGHT be an HPTE for this pte */
692 unsigned long hash, slot;
693
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100694 hash = hpt_hash(va, HPAGE_SHIFT);
695 if (old_pte & _PAGE_F_SECOND)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 hash = ~hash;
697 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100698 slot += (old_pte & _PAGE_F_GIX) >> 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
David Gibson96e28442005-07-13 01:11:42 -0700700 if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100701 old_pte &= ~_PAGE_HPTEFLAGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 }
703
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100704 if (likely(!(old_pte & _PAGE_HASHPTE))) {
705 unsigned long hash = hpt_hash(va, HPAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 unsigned long hpte_group;
707
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100708 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710repeat:
711 hpte_group = ((hash & htab_hash_mask) *
712 HPTES_PER_GROUP) & ~0x7UL;
713
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100714 /* clear HPTE slot informations in new PTE */
715 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
717 /* Add in WIMG bits */
718 /* XXX We should store these in the pte */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100719 /* --BenH: I think they are ... */
David Gibson96e28442005-07-13 01:11:42 -0700720 rflags |= _PAGE_COHERENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100722 /* Insert into the hash table, primary slot */
723 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
724 mmu_huge_psize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726 /* Primary is full, try the secondary */
727 if (unlikely(slot == -1)) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100728 new_pte |= _PAGE_F_SECOND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 hpte_group = ((~hash & htab_hash_mask) *
730 HPTES_PER_GROUP) & ~0x7UL;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100731 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
Benjamin Herrenschmidt67b10812005-09-23 13:24:07 -0700732 HPTE_V_SECONDARY,
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100733 mmu_huge_psize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 if (slot == -1) {
735 if (mftb() & 0x1)
Benjamin Herrenschmidt67b10812005-09-23 13:24:07 -0700736 hpte_group = ((hash & htab_hash_mask) *
737 HPTES_PER_GROUP)&~0x7UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
739 ppc_md.hpte_remove(hpte_group);
740 goto repeat;
741 }
742 }
743
744 if (unlikely(slot == -2))
745 panic("hash_huge_page: pte_insert failed\n");
746
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100747 new_pte |= (slot << 12) & _PAGE_F_GIX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 }
749
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100750 /*
751 * No need to use ldarx/stdcx here because all who
752 * might be updating the pte will hold the
753 * page_table_lock
754 */
755 *ptep = __pte(new_pte & ~_PAGE_BUSY);
756
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 err = 0;
758
759 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 return err;
761}