Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * PPC64 (POWER4) Huge TLB Page Support for Kernel. |
| 3 | * |
| 4 | * Copyright (C) 2003 David Gibson, IBM Corporation. |
| 5 | * |
| 6 | * Based on the IA-32 version: |
| 7 | * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/fs.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/hugetlb.h> |
| 14 | #include <linux/pagemap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/slab.h> |
| 16 | #include <linux/err.h> |
| 17 | #include <linux/sysctl.h> |
| 18 | #include <asm/mman.h> |
| 19 | #include <asm/pgalloc.h> |
| 20 | #include <asm/tlb.h> |
| 21 | #include <asm/tlbflush.h> |
| 22 | #include <asm/mmu_context.h> |
| 23 | #include <asm/machdep.h> |
| 24 | #include <asm/cputable.h> |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 25 | #include <asm/spu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Jon Tollefson | 9122434 | 2008-07-23 21:27:55 -0700 | [diff] [blame] | 27 | #define PAGE_SHIFT_64K 16 |
| 28 | #define PAGE_SHIFT_16M 24 |
| 29 | #define PAGE_SHIFT_16G 34 |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 30 | |
David Gibson | c594ada | 2005-08-11 16:55:21 +1000 | [diff] [blame] | 31 | #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT) |
| 32 | #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT) |
Jon Tollefson | ec4b2c0 | 2008-07-23 21:27:53 -0700 | [diff] [blame] | 33 | #define MAX_NUMBER_GPAGES 1024 |
| 34 | |
| 35 | /* Tracks the 16G pages after the device tree is scanned and before the |
| 36 | * huge_boot_pages list is ready. */ |
| 37 | static unsigned long gpage_freearray[MAX_NUMBER_GPAGES]; |
| 38 | static unsigned nr_gpages; |
David Gibson | c594ada | 2005-08-11 16:55:21 +1000 | [diff] [blame] | 39 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 40 | /* Array of valid huge page sizes - non-zero value(hugepte_shift) is |
| 41 | * stored for the huge page sizes that are valid. |
| 42 | */ |
| 43 | unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */ |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 44 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 45 | #define hugepte_shift mmu_huge_psizes |
| 46 | #define PTRS_PER_HUGEPTE(psize) (1 << hugepte_shift[psize]) |
| 47 | #define HUGEPTE_TABLE_SIZE(psize) (sizeof(pte_t) << hugepte_shift[psize]) |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 48 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 49 | #define HUGEPD_SHIFT(psize) (mmu_psize_to_shift(psize) \ |
| 50 | + hugepte_shift[psize]) |
| 51 | #define HUGEPD_SIZE(psize) (1UL << HUGEPD_SHIFT(psize)) |
| 52 | #define HUGEPD_MASK(psize) (~(HUGEPD_SIZE(psize)-1)) |
| 53 | |
| 54 | /* Subtract one from array size because we don't need a cache for 4K since |
| 55 | * is not a huge page size */ |
Jon Tollefson | 7d4320f | 2008-10-30 12:03:57 +0000 | [diff] [blame] | 56 | #define HUGE_PGTABLE_INDEX(psize) (HUGEPTE_CACHE_NUM + psize - 1) |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 57 | #define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize]) |
| 58 | |
| 59 | static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = { |
Benjamin Herrenschmidt | 57e2a99 | 2009-07-28 11:59:34 +1000 | [diff] [blame] | 60 | [MMU_PAGE_64K] = "hugepte_cache_64K", |
| 61 | [MMU_PAGE_1M] = "hugepte_cache_1M", |
| 62 | [MMU_PAGE_16M] = "hugepte_cache_16M", |
| 63 | [MMU_PAGE_16G] = "hugepte_cache_16G", |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 64 | }; |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 65 | |
| 66 | /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad() |
| 67 | * will choke on pointers to hugepte tables, which is handy for |
| 68 | * catching screwups early. */ |
| 69 | #define HUGEPD_OK 0x1 |
| 70 | |
| 71 | typedef struct { unsigned long pd; } hugepd_t; |
| 72 | |
| 73 | #define hugepd_none(hpd) ((hpd).pd == 0) |
| 74 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 75 | static inline int shift_to_mmu_psize(unsigned int shift) |
| 76 | { |
| 77 | switch (shift) { |
| 78 | #ifndef CONFIG_PPC_64K_PAGES |
| 79 | case PAGE_SHIFT_64K: |
| 80 | return MMU_PAGE_64K; |
| 81 | #endif |
| 82 | case PAGE_SHIFT_16M: |
| 83 | return MMU_PAGE_16M; |
| 84 | case PAGE_SHIFT_16G: |
| 85 | return MMU_PAGE_16G; |
| 86 | } |
| 87 | return -1; |
| 88 | } |
| 89 | |
| 90 | static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) |
| 91 | { |
| 92 | if (mmu_psize_defs[mmu_psize].shift) |
| 93 | return mmu_psize_defs[mmu_psize].shift; |
| 94 | BUG(); |
| 95 | } |
| 96 | |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 97 | static inline pte_t *hugepd_page(hugepd_t hpd) |
| 98 | { |
| 99 | BUG_ON(!(hpd.pd & HUGEPD_OK)); |
| 100 | return (pte_t *)(hpd.pd & ~HUGEPD_OK); |
| 101 | } |
| 102 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 103 | static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, |
| 104 | struct hstate *hstate) |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 105 | { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 106 | unsigned int shift = huge_page_shift(hstate); |
| 107 | int psize = shift_to_mmu_psize(shift); |
| 108 | unsigned long idx = ((addr >> shift) & (PTRS_PER_HUGEPTE(psize)-1)); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 109 | pte_t *dir = hugepd_page(*hpdp); |
| 110 | |
| 111 | return dir + idx; |
| 112 | } |
| 113 | |
| 114 | static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 115 | unsigned long address, unsigned int psize) |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 116 | { |
Jon Tollefson | 7d4320f | 2008-10-30 12:03:57 +0000 | [diff] [blame] | 117 | pte_t *new = kmem_cache_zalloc(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 118 | GFP_KERNEL|__GFP_REPEAT); |
| 119 | |
| 120 | if (! new) |
| 121 | return -ENOMEM; |
| 122 | |
| 123 | spin_lock(&mm->page_table_lock); |
| 124 | if (!hugepd_none(*hpdp)) |
Jon Tollefson | 7d4320f | 2008-10-30 12:03:57 +0000 | [diff] [blame] | 125 | kmem_cache_free(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], new); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 126 | else |
| 127 | hpdp->pd = (unsigned long)new | HUGEPD_OK; |
| 128 | spin_unlock(&mm->page_table_lock); |
| 129 | return 0; |
| 130 | } |
| 131 | |
David Gibson | 0b26425 | 2008-09-05 11:49:54 +1000 | [diff] [blame] | 132 | |
| 133 | static pud_t *hpud_offset(pgd_t *pgd, unsigned long addr, struct hstate *hstate) |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 134 | { |
David Gibson | 0b26425 | 2008-09-05 11:49:54 +1000 | [diff] [blame] | 135 | if (huge_page_shift(hstate) < PUD_SHIFT) |
| 136 | return pud_offset(pgd, addr); |
| 137 | else |
| 138 | return (pud_t *) pgd; |
| 139 | } |
| 140 | static pud_t *hpud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, |
| 141 | struct hstate *hstate) |
| 142 | { |
| 143 | if (huge_page_shift(hstate) < PUD_SHIFT) |
| 144 | return pud_alloc(mm, pgd, addr); |
| 145 | else |
| 146 | return (pud_t *) pgd; |
| 147 | } |
| 148 | static pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate) |
| 149 | { |
| 150 | if (huge_page_shift(hstate) < PMD_SHIFT) |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 151 | return pmd_offset(pud, addr); |
| 152 | else |
| 153 | return (pmd_t *) pud; |
| 154 | } |
David Gibson | 0b26425 | 2008-09-05 11:49:54 +1000 | [diff] [blame] | 155 | static pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr, |
| 156 | struct hstate *hstate) |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 157 | { |
David Gibson | 0b26425 | 2008-09-05 11:49:54 +1000 | [diff] [blame] | 158 | if (huge_page_shift(hstate) < PMD_SHIFT) |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 159 | return pmd_alloc(mm, pud, addr); |
| 160 | else |
| 161 | return (pmd_t *) pud; |
| 162 | } |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 163 | |
Jon Tollefson | 658013e | 2008-07-23 21:27:54 -0700 | [diff] [blame] | 164 | /* Build list of addresses of gigantic pages. This function is used in early |
| 165 | * boot before the buddy or bootmem allocator is setup. |
| 166 | */ |
| 167 | void add_gpage(unsigned long addr, unsigned long page_size, |
| 168 | unsigned long number_of_pages) |
| 169 | { |
| 170 | if (!addr) |
| 171 | return; |
| 172 | while (number_of_pages > 0) { |
| 173 | gpage_freearray[nr_gpages] = addr; |
| 174 | nr_gpages++; |
| 175 | number_of_pages--; |
| 176 | addr += page_size; |
| 177 | } |
| 178 | } |
| 179 | |
Jon Tollefson | ec4b2c0 | 2008-07-23 21:27:53 -0700 | [diff] [blame] | 180 | /* Moves the gigantic page addresses from the temporary list to the |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 181 | * huge_boot_pages list. |
| 182 | */ |
| 183 | int alloc_bootmem_huge_page(struct hstate *hstate) |
Jon Tollefson | ec4b2c0 | 2008-07-23 21:27:53 -0700 | [diff] [blame] | 184 | { |
| 185 | struct huge_bootmem_page *m; |
| 186 | if (nr_gpages == 0) |
| 187 | return 0; |
| 188 | m = phys_to_virt(gpage_freearray[--nr_gpages]); |
| 189 | gpage_freearray[nr_gpages] = 0; |
| 190 | list_add(&m->list, &huge_boot_pages); |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 191 | m->hstate = hstate; |
Jon Tollefson | ec4b2c0 | 2008-07-23 21:27:53 -0700 | [diff] [blame] | 192 | return 1; |
| 193 | } |
| 194 | |
| 195 | |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 196 | /* Modelled after find_linux_pte() */ |
| 197 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | { |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 199 | pgd_t *pg; |
| 200 | pud_t *pu; |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 201 | pmd_t *pm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 203 | unsigned int psize; |
| 204 | unsigned int shift; |
| 205 | unsigned long sz; |
| 206 | struct hstate *hstate; |
| 207 | psize = get_slice_psize(mm, addr); |
| 208 | shift = mmu_psize_to_shift(psize); |
| 209 | sz = ((1UL) << shift); |
| 210 | hstate = size_to_hstate(sz); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 212 | addr &= hstate->mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 214 | pg = pgd_offset(mm, addr); |
| 215 | if (!pgd_none(*pg)) { |
David Gibson | 0b26425 | 2008-09-05 11:49:54 +1000 | [diff] [blame] | 216 | pu = hpud_offset(pg, addr, hstate); |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 217 | if (!pud_none(*pu)) { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 218 | pm = hpmd_offset(pu, addr, hstate); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 219 | if (!pmd_none(*pm)) |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 220 | return hugepte_offset((hugepd_t *)pm, addr, |
| 221 | hstate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | } |
| 223 | } |
| 224 | |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 225 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | } |
| 227 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 228 | pte_t *huge_pte_alloc(struct mm_struct *mm, |
| 229 | unsigned long addr, unsigned long sz) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | { |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 231 | pgd_t *pg; |
| 232 | pud_t *pu; |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 233 | pmd_t *pm; |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 234 | hugepd_t *hpdp = NULL; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 235 | struct hstate *hstate; |
| 236 | unsigned int psize; |
| 237 | hstate = size_to_hstate(sz); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 239 | psize = get_slice_psize(mm, addr); |
| 240 | BUG_ON(!mmu_huge_psizes[psize]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 242 | addr &= hstate->mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 244 | pg = pgd_offset(mm, addr); |
David Gibson | 0b26425 | 2008-09-05 11:49:54 +1000 | [diff] [blame] | 245 | pu = hpud_alloc(mm, pg, addr, hstate); |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 246 | |
| 247 | if (pu) { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 248 | pm = hpmd_alloc(mm, pu, addr, hstate); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 249 | if (pm) |
| 250 | hpdp = (hugepd_t *)pm; |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 251 | } |
| 252 | |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 253 | if (! hpdp) |
| 254 | return NULL; |
| 255 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 256 | if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, psize)) |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 257 | return NULL; |
| 258 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 259 | return hugepte_offset(hpdp, addr, hstate); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 260 | } |
| 261 | |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 262 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) |
| 263 | { |
| 264 | return 0; |
| 265 | } |
| 266 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 267 | static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp, |
| 268 | unsigned int psize) |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 269 | { |
| 270 | pte_t *hugepte = hugepd_page(*hpdp); |
| 271 | |
| 272 | hpdp->pd = 0; |
| 273 | tlb->need_flush = 1; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 274 | pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, |
| 275 | HUGEPTE_CACHE_NUM+psize-1, |
Adam Litke | c9169f8 | 2006-08-18 11:22:21 -0700 | [diff] [blame] | 276 | PGF_CACHENUM_MASK)); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 277 | } |
| 278 | |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 279 | static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, |
| 280 | unsigned long addr, unsigned long end, |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 281 | unsigned long floor, unsigned long ceiling, |
| 282 | unsigned int psize) |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 283 | { |
| 284 | pmd_t *pmd; |
| 285 | unsigned long next; |
| 286 | unsigned long start; |
| 287 | |
| 288 | start = addr; |
| 289 | pmd = pmd_offset(pud, addr); |
| 290 | do { |
| 291 | next = pmd_addr_end(addr, end); |
| 292 | if (pmd_none(*pmd)) |
| 293 | continue; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 294 | free_hugepte_range(tlb, (hugepd_t *)pmd, psize); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 295 | } while (pmd++, addr = next, addr != end); |
| 296 | |
| 297 | start &= PUD_MASK; |
| 298 | if (start < floor) |
| 299 | return; |
| 300 | if (ceiling) { |
| 301 | ceiling &= PUD_MASK; |
| 302 | if (!ceiling) |
| 303 | return; |
| 304 | } |
| 305 | if (end - 1 > ceiling - 1) |
| 306 | return; |
| 307 | |
| 308 | pmd = pmd_offset(pud, start); |
| 309 | pud_clear(pud); |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 310 | pmd_free_tlb(tlb, pmd, start); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 311 | } |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 312 | |
| 313 | static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, |
| 314 | unsigned long addr, unsigned long end, |
| 315 | unsigned long floor, unsigned long ceiling) |
| 316 | { |
| 317 | pud_t *pud; |
| 318 | unsigned long next; |
| 319 | unsigned long start; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 320 | unsigned int shift; |
| 321 | unsigned int psize = get_slice_psize(tlb->mm, addr); |
| 322 | shift = mmu_psize_to_shift(psize); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 323 | |
| 324 | start = addr; |
| 325 | pud = pud_offset(pgd, addr); |
| 326 | do { |
| 327 | next = pud_addr_end(addr, end); |
David Gibson | 0b26425 | 2008-09-05 11:49:54 +1000 | [diff] [blame] | 328 | if (shift < PMD_SHIFT) { |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 329 | if (pud_none_or_clear_bad(pud)) |
| 330 | continue; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 331 | hugetlb_free_pmd_range(tlb, pud, addr, next, floor, |
| 332 | ceiling, psize); |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 333 | } else { |
| 334 | if (pud_none(*pud)) |
| 335 | continue; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 336 | free_hugepte_range(tlb, (hugepd_t *)pud, psize); |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 337 | } |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 338 | } while (pud++, addr = next, addr != end); |
| 339 | |
| 340 | start &= PGDIR_MASK; |
| 341 | if (start < floor) |
| 342 | return; |
| 343 | if (ceiling) { |
| 344 | ceiling &= PGDIR_MASK; |
| 345 | if (!ceiling) |
| 346 | return; |
| 347 | } |
| 348 | if (end - 1 > ceiling - 1) |
| 349 | return; |
| 350 | |
| 351 | pud = pud_offset(pgd, start); |
| 352 | pgd_clear(pgd); |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 353 | pud_free_tlb(tlb, pud, start); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 354 | } |
| 355 | |
| 356 | /* |
| 357 | * This function frees user-level page tables of a process. |
| 358 | * |
| 359 | * Must be called with pagetable lock held. |
| 360 | */ |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 361 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 362 | unsigned long addr, unsigned long end, |
| 363 | unsigned long floor, unsigned long ceiling) |
| 364 | { |
| 365 | pgd_t *pgd; |
| 366 | unsigned long next; |
| 367 | unsigned long start; |
| 368 | |
| 369 | /* |
| 370 | * Comments below take from the normal free_pgd_range(). They |
| 371 | * apply here too. The tests against HUGEPD_MASK below are |
| 372 | * essential, because we *don't* test for this at the bottom |
| 373 | * level. Without them we'll attempt to free a hugepte table |
| 374 | * when we unmap just part of it, even if there are other |
| 375 | * active mappings using it. |
| 376 | * |
| 377 | * The next few lines have given us lots of grief... |
| 378 | * |
| 379 | * Why are we testing HUGEPD* at this top level? Because |
| 380 | * often there will be no work to do at all, and we'd prefer |
| 381 | * not to go all the way down to the bottom just to discover |
| 382 | * that. |
| 383 | * |
| 384 | * Why all these "- 1"s? Because 0 represents both the bottom |
| 385 | * of the address space and the top of it (using -1 for the |
| 386 | * top wouldn't help much: the masks would do the wrong thing). |
| 387 | * The rule is that addr 0 and floor 0 refer to the bottom of |
| 388 | * the address space, but end 0 and ceiling 0 refer to the top |
| 389 | * Comparisons need to use "end - 1" and "ceiling - 1" (though |
| 390 | * that end 0 case should be mythical). |
| 391 | * |
| 392 | * Wherever addr is brought up or ceiling brought down, we |
| 393 | * must be careful to reject "the opposite 0" before it |
| 394 | * confuses the subsequent tests. But what about where end is |
| 395 | * brought down by HUGEPD_SIZE below? no, end can't go down to |
| 396 | * 0 there. |
| 397 | * |
| 398 | * Whereas we round start (addr) and ceiling down, by different |
| 399 | * masks at different levels, in order to test whether a table |
| 400 | * now has no other vmas using it, so can be freed, we don't |
| 401 | * bother to round floor or end up - the tests don't need that. |
| 402 | */ |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 403 | unsigned int psize = get_slice_psize(tlb->mm, addr); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 404 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 405 | addr &= HUGEPD_MASK(psize); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 406 | if (addr < floor) { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 407 | addr += HUGEPD_SIZE(psize); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 408 | if (!addr) |
| 409 | return; |
| 410 | } |
| 411 | if (ceiling) { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 412 | ceiling &= HUGEPD_MASK(psize); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 413 | if (!ceiling) |
| 414 | return; |
| 415 | } |
| 416 | if (end - 1 > ceiling - 1) |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 417 | end -= HUGEPD_SIZE(psize); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 418 | if (addr > end - 1) |
| 419 | return; |
| 420 | |
| 421 | start = addr; |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 422 | pgd = pgd_offset(tlb->mm, addr); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 423 | do { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 424 | psize = get_slice_psize(tlb->mm, addr); |
| 425 | BUG_ON(!mmu_huge_psizes[psize]); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 426 | next = pgd_addr_end(addr, end); |
David Gibson | 0b26425 | 2008-09-05 11:49:54 +1000 | [diff] [blame] | 427 | if (mmu_psize_to_shift(psize) < PUD_SHIFT) { |
| 428 | if (pgd_none_or_clear_bad(pgd)) |
| 429 | continue; |
| 430 | hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); |
| 431 | } else { |
| 432 | if (pgd_none(*pgd)) |
| 433 | continue; |
| 434 | free_hugepte_range(tlb, (hugepd_t *)pgd, psize); |
| 435 | } |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 436 | } while (pgd++, addr = next, addr != end); |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 437 | } |
| 438 | |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 439 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
| 440 | pte_t *ptep, pte_t pte) |
| 441 | { |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 442 | if (pte_present(*ptep)) { |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 443 | /* We open-code pte_clear because we need to pass the right |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 444 | * argument to hpte_need_flush (huge / !huge). Might not be |
| 445 | * necessary anymore if we make hpte_need_flush() get the |
| 446 | * page size from the slices |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 447 | */ |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 448 | unsigned int psize = get_slice_psize(mm, addr); |
| 449 | unsigned int shift = mmu_psize_to_shift(psize); |
| 450 | unsigned long sz = ((1UL) << shift); |
| 451 | struct hstate *hstate = size_to_hstate(sz); |
| 452 | pte_update(mm, addr & hstate->mask, ptep, ~0UL, 1); |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 453 | } |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 454 | *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 455 | } |
| 456 | |
| 457 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
| 458 | pte_t *ptep) |
| 459 | { |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 460 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1); |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 461 | return __pte(old); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | } |
| 463 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | struct page * |
| 465 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
| 466 | { |
| 467 | pte_t *ptep; |
| 468 | struct page *page; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 469 | unsigned int mmu_psize = get_slice_psize(mm, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 471 | /* Verify it is a huge page else bail. */ |
| 472 | if (!mmu_huge_psizes[mmu_psize]) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | return ERR_PTR(-EINVAL); |
| 474 | |
| 475 | ptep = huge_pte_offset(mm, address); |
| 476 | page = pte_page(*ptep); |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 477 | if (page) { |
| 478 | unsigned int shift = mmu_psize_to_shift(mmu_psize); |
| 479 | unsigned long sz = ((1UL) << shift); |
| 480 | page += (address % sz) / PAGE_SIZE; |
| 481 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | |
| 483 | return page; |
| 484 | } |
| 485 | |
| 486 | int pmd_huge(pmd_t pmd) |
| 487 | { |
| 488 | return 0; |
| 489 | } |
| 490 | |
Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 491 | int pud_huge(pud_t pud) |
| 492 | { |
| 493 | return 0; |
| 494 | } |
| 495 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | struct page * |
| 497 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
| 498 | pmd_t *pmd, int write) |
| 499 | { |
| 500 | BUG(); |
| 501 | return NULL; |
| 502 | } |
| 503 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | |
| 505 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 506 | unsigned long len, unsigned long pgoff, |
| 507 | unsigned long flags) |
| 508 | { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 509 | struct hstate *hstate = hstate_file(file); |
| 510 | int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); |
Brian King | 48f797d | 2008-12-04 04:07:54 +0000 | [diff] [blame] | 511 | |
| 512 | if (!mmu_huge_psizes[mmu_psize]) |
| 513 | return -EINVAL; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 514 | return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | } |
| 516 | |
Mel Gorman | 3340289 | 2009-01-06 14:38:54 -0800 | [diff] [blame] | 517 | unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) |
| 518 | { |
| 519 | unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); |
| 520 | |
| 521 | return 1UL << mmu_psize_to_shift(psize); |
| 522 | } |
| 523 | |
David Gibson | cbf52af | 2005-12-09 14:20:52 +1100 | [diff] [blame] | 524 | /* |
| 525 | * Called by asm hashtable.S for doing lazy icache flush |
| 526 | */ |
| 527 | static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags, |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 528 | pte_t pte, int trap, unsigned long sz) |
David Gibson | cbf52af | 2005-12-09 14:20:52 +1100 | [diff] [blame] | 529 | { |
| 530 | struct page *page; |
| 531 | int i; |
| 532 | |
| 533 | if (!pfn_valid(pte_pfn(pte))) |
| 534 | return rflags; |
| 535 | |
| 536 | page = pte_page(pte); |
| 537 | |
| 538 | /* page is dirty */ |
| 539 | if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { |
| 540 | if (trap == 0x400) { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 541 | for (i = 0; i < (sz / PAGE_SIZE); i++) |
David Gibson | cbf52af | 2005-12-09 14:20:52 +1100 | [diff] [blame] | 542 | __flush_dcache_icache(page_address(page+i)); |
| 543 | set_bit(PG_arch_1, &page->flags); |
| 544 | } else { |
| 545 | rflags |= HPTE_R_N; |
| 546 | } |
| 547 | } |
| 548 | return rflags; |
| 549 | } |
| 550 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | int hash_huge_page(struct mm_struct *mm, unsigned long access, |
David Gibson | cbf52af | 2005-12-09 14:20:52 +1100 | [diff] [blame] | 552 | unsigned long ea, unsigned long vsid, int local, |
| 553 | unsigned long trap) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | { |
| 555 | pte_t *ptep; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 556 | unsigned long old_pte, new_pte; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 557 | unsigned long va, rflags, pa, sz; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 558 | long slot; |
| 559 | int err = 1; |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 560 | int ssize = user_segment_size(ea); |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 561 | unsigned int mmu_psize; |
| 562 | int shift; |
| 563 | mmu_psize = get_slice_psize(mm, ea); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 565 | if (!mmu_huge_psizes[mmu_psize]) |
| 566 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | ptep = huge_pte_offset(mm, ea); |
| 568 | |
| 569 | /* Search the Linux page table for a match with va */ |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 570 | va = hpt_va(ea, vsid, ssize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | |
| 572 | /* |
| 573 | * If no pte found or not present, send the problem up to |
| 574 | * do_page_fault |
| 575 | */ |
| 576 | if (unlikely(!ptep || pte_none(*ptep))) |
| 577 | goto out; |
| 578 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | /* |
| 580 | * Check the user's access rights to the page. If access should be |
| 581 | * prevented then send the problem up to do_page_fault. |
| 582 | */ |
| 583 | if (unlikely(access & ~pte_val(*ptep))) |
| 584 | goto out; |
| 585 | /* |
| 586 | * At this point, we have a pte (old_pte) which can be used to build |
| 587 | * or update an HPTE. There are 2 cases: |
| 588 | * |
| 589 | * 1. There is a valid (present) pte with no associated HPTE (this is |
| 590 | * the most common case) |
| 591 | * 2. There is a valid (present) pte with an associated HPTE. The |
| 592 | * current values of the pp bits in the HPTE prevent access |
| 593 | * because we are doing software DIRTY bit management and the |
| 594 | * page is currently not DIRTY. |
| 595 | */ |
| 596 | |
| 597 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 598 | do { |
| 599 | old_pte = pte_val(*ptep); |
| 600 | if (old_pte & _PAGE_BUSY) |
| 601 | goto out; |
Benjamin Herrenschmidt | 41743a4 | 2008-06-11 15:37:10 +1000 | [diff] [blame] | 602 | new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 603 | } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, |
| 604 | old_pte, new_pte)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 606 | rflags = 0x2 | (!(new_pte & _PAGE_RW)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 608 | rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 609 | shift = mmu_psize_to_shift(mmu_psize); |
| 610 | sz = ((1UL) << shift); |
David Gibson | cbf52af | 2005-12-09 14:20:52 +1100 | [diff] [blame] | 611 | if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) |
| 612 | /* No CPU has hugepages but lacks no execute, so we |
| 613 | * don't need to worry about that case */ |
| 614 | rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte), |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 615 | trap, sz); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | |
| 617 | /* Check if pte already has an hpte (case 2) */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 618 | if (unlikely(old_pte & _PAGE_HASHPTE)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | /* There MIGHT be an HPTE for this pte */ |
| 620 | unsigned long hash, slot; |
| 621 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 622 | hash = hpt_hash(va, shift, ssize); |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 623 | if (old_pte & _PAGE_F_SECOND) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | hash = ~hash; |
| 625 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 626 | slot += (old_pte & _PAGE_F_GIX) >> 12; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 628 | if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize, |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 629 | ssize, local) == -1) |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 630 | old_pte &= ~_PAGE_HPTEFLAGS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | } |
| 632 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 633 | if (likely(!(old_pte & _PAGE_HASHPTE))) { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 634 | unsigned long hash = hpt_hash(va, shift, ssize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | unsigned long hpte_group; |
| 636 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 637 | pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | |
| 639 | repeat: |
| 640 | hpte_group = ((hash & htab_hash_mask) * |
| 641 | HPTES_PER_GROUP) & ~0x7UL; |
| 642 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 643 | /* clear HPTE slot informations in new PTE */ |
Benjamin Herrenschmidt | 41743a4 | 2008-06-11 15:37:10 +1000 | [diff] [blame] | 644 | #ifdef CONFIG_PPC_64K_PAGES |
| 645 | new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0; |
| 646 | #else |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 647 | new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; |
Benjamin Herrenschmidt | 41743a4 | 2008-06-11 15:37:10 +1000 | [diff] [blame] | 648 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | /* Add in WIMG bits */ |
Dave Kleikamp | 87e9ab1 | 2008-06-19 08:32:56 +1000 | [diff] [blame] | 650 | rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | |
| 651 | _PAGE_COHERENT | _PAGE_GUARDED)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 653 | /* Insert into the hash table, primary slot */ |
| 654 | slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0, |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 655 | mmu_psize, ssize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | |
| 657 | /* Primary is full, try the secondary */ |
| 658 | if (unlikely(slot == -1)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | hpte_group = ((~hash & htab_hash_mask) * |
| 660 | HPTES_PER_GROUP) & ~0x7UL; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 661 | slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, |
Benjamin Herrenschmidt | 67b1081 | 2005-09-23 13:24:07 -0700 | [diff] [blame] | 662 | HPTE_V_SECONDARY, |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 663 | mmu_psize, ssize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | if (slot == -1) { |
| 665 | if (mftb() & 0x1) |
Benjamin Herrenschmidt | 67b1081 | 2005-09-23 13:24:07 -0700 | [diff] [blame] | 666 | hpte_group = ((hash & htab_hash_mask) * |
| 667 | HPTES_PER_GROUP)&~0x7UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | |
| 669 | ppc_md.hpte_remove(hpte_group); |
| 670 | goto repeat; |
| 671 | } |
| 672 | } |
| 673 | |
| 674 | if (unlikely(slot == -2)) |
| 675 | panic("hash_huge_page: pte_insert failed\n"); |
| 676 | |
Ishizaki Kou | d649bd7 | 2007-01-12 09:54:39 +0900 | [diff] [blame] | 677 | new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | } |
| 679 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 680 | /* |
Hugh Dickins | 01edcd8 | 2005-11-23 13:37:39 -0800 | [diff] [blame] | 681 | * No need to use ldarx/stdcx here |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 682 | */ |
| 683 | *ptep = __pte(new_pte & ~_PAGE_BUSY); |
| 684 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | err = 0; |
| 686 | |
| 687 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | return err; |
| 689 | } |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 690 | |
Al Viro | 4ea8fb9 | 2008-11-22 17:33:44 +0000 | [diff] [blame] | 691 | static void __init set_huge_psize(int psize) |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 692 | { |
| 693 | /* Check that it is a page size supported by the hardware and |
| 694 | * that it fits within pagetable limits. */ |
Jon Tollefson | 9122434 | 2008-07-23 21:27:55 -0700 | [diff] [blame] | 695 | if (mmu_psize_defs[psize].shift && |
| 696 | mmu_psize_defs[psize].shift < SID_SHIFT_1T && |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 697 | (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT || |
Jon Tollefson | 9122434 | 2008-07-23 21:27:55 -0700 | [diff] [blame] | 698 | mmu_psize_defs[psize].shift == PAGE_SHIFT_64K || |
| 699 | mmu_psize_defs[psize].shift == PAGE_SHIFT_16G)) { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 700 | /* Return if huge page size has already been setup or is the |
| 701 | * same as the base page size. */ |
| 702 | if (mmu_huge_psizes[psize] || |
| 703 | mmu_psize_defs[psize].shift == PAGE_SHIFT) |
Jon Tollefson | 9122434 | 2008-07-23 21:27:55 -0700 | [diff] [blame] | 704 | return; |
Benjamin Herrenschmidt | 57e2a99 | 2009-07-28 11:59:34 +1000 | [diff] [blame] | 705 | if (WARN_ON(HUGEPTE_CACHE_NAME(psize) == NULL)) |
| 706 | return; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 707 | hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT); |
Jon Tollefson | 9122434 | 2008-07-23 21:27:55 -0700 | [diff] [blame] | 708 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 709 | switch (mmu_psize_defs[psize].shift) { |
Jon Tollefson | 9122434 | 2008-07-23 21:27:55 -0700 | [diff] [blame] | 710 | case PAGE_SHIFT_64K: |
| 711 | /* We only allow 64k hpages with 4k base page, |
| 712 | * which was checked above, and always put them |
| 713 | * at the PMD */ |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 714 | hugepte_shift[psize] = PMD_SHIFT; |
Jon Tollefson | 9122434 | 2008-07-23 21:27:55 -0700 | [diff] [blame] | 715 | break; |
| 716 | case PAGE_SHIFT_16M: |
| 717 | /* 16M pages can be at two different levels |
| 718 | * of pagestables based on base page size */ |
| 719 | if (PAGE_SHIFT == PAGE_SHIFT_64K) |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 720 | hugepte_shift[psize] = PMD_SHIFT; |
Jon Tollefson | 9122434 | 2008-07-23 21:27:55 -0700 | [diff] [blame] | 721 | else /* 4k base page */ |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 722 | hugepte_shift[psize] = PUD_SHIFT; |
Jon Tollefson | 9122434 | 2008-07-23 21:27:55 -0700 | [diff] [blame] | 723 | break; |
| 724 | case PAGE_SHIFT_16G: |
| 725 | /* 16G pages are always at PGD level */ |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 726 | hugepte_shift[psize] = PGDIR_SHIFT; |
Jon Tollefson | 9122434 | 2008-07-23 21:27:55 -0700 | [diff] [blame] | 727 | break; |
| 728 | } |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 729 | hugepte_shift[psize] -= mmu_psize_defs[psize].shift; |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 730 | } else |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 731 | hugepte_shift[psize] = 0; |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 732 | } |
| 733 | |
| 734 | static int __init hugepage_setup_sz(char *str) |
| 735 | { |
| 736 | unsigned long long size; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 737 | int mmu_psize; |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 738 | int shift; |
| 739 | |
| 740 | size = memparse(str, &str); |
| 741 | |
| 742 | shift = __ffs(size); |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 743 | mmu_psize = shift_to_mmu_psize(shift); |
| 744 | if (mmu_psize >= 0 && mmu_psize_defs[mmu_psize].shift) |
Jon Tollefson | 4ec161c | 2008-01-04 09:59:50 +1100 | [diff] [blame] | 745 | set_huge_psize(mmu_psize); |
| 746 | else |
| 747 | printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size); |
| 748 | |
| 749 | return 1; |
| 750 | } |
| 751 | __setup("hugepagesz=", hugepage_setup_sz); |
| 752 | |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 753 | static int __init hugetlbpage_init(void) |
| 754 | { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 755 | unsigned int psize; |
| 756 | |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 757 | if (!cpu_has_feature(CPU_FTR_16M_PAGE)) |
| 758 | return -ENODEV; |
Benjamin Herrenschmidt | 00df438 | 2008-07-28 16:13:18 +1000 | [diff] [blame] | 759 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 760 | /* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE |
| 761 | * and adjust PTE_NONCACHE_NUM if the number of supported huge page |
| 762 | * sizes changes. |
| 763 | */ |
| 764 | set_huge_psize(MMU_PAGE_16M); |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 765 | set_huge_psize(MMU_PAGE_16G); |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 766 | |
Benjamin Herrenschmidt | 00df438 | 2008-07-28 16:13:18 +1000 | [diff] [blame] | 767 | /* Temporarily disable support for 64K huge pages when 64K SPU local |
| 768 | * store support is enabled as the current implementation conflicts. |
| 769 | */ |
| 770 | #ifndef CONFIG_SPU_FS_64K_LS |
| 771 | set_huge_psize(MMU_PAGE_64K); |
| 772 | #endif |
| 773 | |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 774 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
| 775 | if (mmu_huge_psizes[psize]) { |
Jon Tollefson | 7d4320f | 2008-10-30 12:03:57 +0000 | [diff] [blame] | 776 | pgtable_cache[HUGE_PGTABLE_INDEX(psize)] = |
| 777 | kmem_cache_create( |
| 778 | HUGEPTE_CACHE_NAME(psize), |
| 779 | HUGEPTE_TABLE_SIZE(psize), |
| 780 | HUGEPTE_TABLE_SIZE(psize), |
| 781 | 0, |
| 782 | NULL); |
| 783 | if (!pgtable_cache[HUGE_PGTABLE_INDEX(psize)]) |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 784 | panic("hugetlbpage_init(): could not create %s"\ |
| 785 | "\n", HUGEPTE_CACHE_NAME(psize)); |
| 786 | } |
| 787 | } |
David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 788 | |
| 789 | return 0; |
| 790 | } |
| 791 | |
| 792 | module_init(hugetlbpage_init); |