Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * SPARC64 Huge TLB page support. |
| 3 | * |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame^] | 4 | * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/config.h> |
| 8 | #include <linux/init.h> |
| 9 | #include <linux/module.h> |
| 10 | #include <linux/fs.h> |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/hugetlb.h> |
| 13 | #include <linux/pagemap.h> |
| 14 | #include <linux/smp_lock.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/sysctl.h> |
| 17 | |
| 18 | #include <asm/mman.h> |
| 19 | #include <asm/pgalloc.h> |
| 20 | #include <asm/tlb.h> |
| 21 | #include <asm/tlbflush.h> |
| 22 | #include <asm/cacheflush.h> |
| 23 | #include <asm/mmu_context.h> |
| 24 | |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame^] | 25 | /* Slightly simplified from the non-hugepage variant because by |
| 26 | * definition we don't have to worry about any page coloring stuff |
| 27 | */ |
| 28 | #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) |
| 29 | #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) |
| 30 | |
| 31 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, |
| 32 | unsigned long addr, |
| 33 | unsigned long len, |
| 34 | unsigned long pgoff, |
| 35 | unsigned long flags) |
| 36 | { |
| 37 | struct mm_struct *mm = current->mm; |
| 38 | struct vm_area_struct * vma; |
| 39 | unsigned long task_size = TASK_SIZE; |
| 40 | unsigned long start_addr; |
| 41 | |
| 42 | if (test_thread_flag(TIF_32BIT)) |
| 43 | task_size = STACK_TOP32; |
| 44 | if (unlikely(len >= VA_EXCLUDE_START)) |
| 45 | return -ENOMEM; |
| 46 | |
| 47 | if (len > mm->cached_hole_size) { |
| 48 | start_addr = addr = mm->free_area_cache; |
| 49 | } else { |
| 50 | start_addr = addr = TASK_UNMAPPED_BASE; |
| 51 | mm->cached_hole_size = 0; |
| 52 | } |
| 53 | |
| 54 | task_size -= len; |
| 55 | |
| 56 | full_search: |
| 57 | addr = ALIGN(addr, HPAGE_SIZE); |
| 58 | |
| 59 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { |
| 60 | /* At this point: (!vma || addr < vma->vm_end). */ |
| 61 | if (addr < VA_EXCLUDE_START && |
| 62 | (addr + len) >= VA_EXCLUDE_START) { |
| 63 | addr = VA_EXCLUDE_END; |
| 64 | vma = find_vma(mm, VA_EXCLUDE_END); |
| 65 | } |
| 66 | if (unlikely(task_size < addr)) { |
| 67 | if (start_addr != TASK_UNMAPPED_BASE) { |
| 68 | start_addr = addr = TASK_UNMAPPED_BASE; |
| 69 | mm->cached_hole_size = 0; |
| 70 | goto full_search; |
| 71 | } |
| 72 | return -ENOMEM; |
| 73 | } |
| 74 | if (likely(!vma || addr + len <= vma->vm_start)) { |
| 75 | /* |
| 76 | * Remember the place where we stopped the search: |
| 77 | */ |
| 78 | mm->free_area_cache = addr + len; |
| 79 | return addr; |
| 80 | } |
| 81 | if (addr + mm->cached_hole_size < vma->vm_start) |
| 82 | mm->cached_hole_size = vma->vm_start - addr; |
| 83 | |
| 84 | addr = ALIGN(vma->vm_end, HPAGE_SIZE); |
| 85 | } |
| 86 | } |
| 87 | |
| 88 | static unsigned long |
| 89 | hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, |
| 90 | const unsigned long len, |
| 91 | const unsigned long pgoff, |
| 92 | const unsigned long flags) |
| 93 | { |
| 94 | struct vm_area_struct *vma; |
| 95 | struct mm_struct *mm = current->mm; |
| 96 | unsigned long addr = addr0; |
| 97 | |
| 98 | /* This should only ever run for 32-bit processes. */ |
| 99 | BUG_ON(!test_thread_flag(TIF_32BIT)); |
| 100 | |
| 101 | /* check if free_area_cache is useful for us */ |
| 102 | if (len <= mm->cached_hole_size) { |
| 103 | mm->cached_hole_size = 0; |
| 104 | mm->free_area_cache = mm->mmap_base; |
| 105 | } |
| 106 | |
| 107 | /* either no address requested or can't fit in requested address hole */ |
| 108 | addr = mm->free_area_cache & HPAGE_MASK; |
| 109 | |
| 110 | /* make sure it can fit in the remaining address space */ |
| 111 | if (likely(addr > len)) { |
| 112 | vma = find_vma(mm, addr-len); |
| 113 | if (!vma || addr <= vma->vm_start) { |
| 114 | /* remember the address as a hint for next time */ |
| 115 | return (mm->free_area_cache = addr-len); |
| 116 | } |
| 117 | } |
| 118 | |
| 119 | if (unlikely(mm->mmap_base < len)) |
| 120 | goto bottomup; |
| 121 | |
| 122 | addr = (mm->mmap_base-len) & HPAGE_MASK; |
| 123 | |
| 124 | do { |
| 125 | /* |
| 126 | * Lookup failure means no vma is above this address, |
| 127 | * else if new region fits below vma->vm_start, |
| 128 | * return with success: |
| 129 | */ |
| 130 | vma = find_vma(mm, addr); |
| 131 | if (likely(!vma || addr+len <= vma->vm_start)) { |
| 132 | /* remember the address as a hint for next time */ |
| 133 | return (mm->free_area_cache = addr); |
| 134 | } |
| 135 | |
| 136 | /* remember the largest hole we saw so far */ |
| 137 | if (addr + mm->cached_hole_size < vma->vm_start) |
| 138 | mm->cached_hole_size = vma->vm_start - addr; |
| 139 | |
| 140 | /* try just below the current vma->vm_start */ |
| 141 | addr = (vma->vm_start-len) & HPAGE_MASK; |
| 142 | } while (likely(len < vma->vm_start)); |
| 143 | |
| 144 | bottomup: |
| 145 | /* |
| 146 | * A failed mmap() very likely causes application failure, |
| 147 | * so fall back to the bottom-up function here. This scenario |
| 148 | * can happen with large stack limits and large mmap() |
| 149 | * allocations. |
| 150 | */ |
| 151 | mm->cached_hole_size = ~0UL; |
| 152 | mm->free_area_cache = TASK_UNMAPPED_BASE; |
| 153 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); |
| 154 | /* |
| 155 | * Restore the topdown base: |
| 156 | */ |
| 157 | mm->free_area_cache = mm->mmap_base; |
| 158 | mm->cached_hole_size = ~0UL; |
| 159 | |
| 160 | return addr; |
| 161 | } |
| 162 | |
| 163 | unsigned long |
| 164 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 165 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 166 | { |
| 167 | struct mm_struct *mm = current->mm; |
| 168 | struct vm_area_struct *vma; |
| 169 | unsigned long task_size = TASK_SIZE; |
| 170 | |
| 171 | if (test_thread_flag(TIF_32BIT)) |
| 172 | task_size = STACK_TOP32; |
| 173 | |
| 174 | if (len & ~HPAGE_MASK) |
| 175 | return -EINVAL; |
| 176 | if (len > task_size) |
| 177 | return -ENOMEM; |
| 178 | |
| 179 | if (addr) { |
| 180 | addr = ALIGN(addr, HPAGE_SIZE); |
| 181 | vma = find_vma(mm, addr); |
| 182 | if (task_size - len >= addr && |
| 183 | (!vma || addr + len <= vma->vm_start)) |
| 184 | return addr; |
| 185 | } |
| 186 | if (mm->get_unmapped_area == arch_get_unmapped_area) |
| 187 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, |
| 188 | pgoff, flags); |
| 189 | else |
| 190 | return hugetlb_get_unmapped_area_topdown(file, addr, len, |
| 191 | pgoff, flags); |
| 192 | } |
| 193 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 194 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | { |
| 196 | pgd_t *pgd; |
| 197 | pud_t *pud; |
| 198 | pmd_t *pmd; |
| 199 | pte_t *pte = NULL; |
| 200 | |
| 201 | pgd = pgd_offset(mm, addr); |
| 202 | if (pgd) { |
| 203 | pud = pud_offset(pgd, addr); |
| 204 | if (pud) { |
| 205 | pmd = pmd_alloc(mm, pud, addr); |
| 206 | if (pmd) |
| 207 | pte = pte_alloc_map(mm, pmd, addr); |
| 208 | } |
| 209 | } |
| 210 | return pte; |
| 211 | } |
| 212 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 213 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | { |
| 215 | pgd_t *pgd; |
| 216 | pud_t *pud; |
| 217 | pmd_t *pmd; |
| 218 | pte_t *pte = NULL; |
| 219 | |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame^] | 220 | addr &= HPAGE_MASK; |
| 221 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | pgd = pgd_offset(mm, addr); |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame^] | 223 | if (!pgd_none(*pgd)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | pud = pud_offset(pgd, addr); |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame^] | 225 | if (!pud_none(*pud)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | pmd = pmd_offset(pud, addr); |
David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame^] | 227 | if (!pmd_none(*pmd)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | pte = pte_offset_map(pmd, addr); |
| 229 | } |
| 230 | } |
| 231 | return pte; |
| 232 | } |
| 233 | |
| 234 | #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0) |
| 235 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 236 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
| 237 | pte_t *ptep, pte_t entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | { |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 239 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | |
| 241 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 242 | set_pte_at(mm, addr, ptep, entry); |
| 243 | ptep++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | addr += PAGE_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | pte_val(entry) += PAGE_SIZE; |
| 246 | } |
| 247 | } |
| 248 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 249 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
| 250 | pte_t *ptep) |
| 251 | { |
| 252 | pte_t entry; |
| 253 | int i; |
| 254 | |
| 255 | entry = *ptep; |
| 256 | |
| 257 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { |
| 258 | pte_clear(mm, addr, ptep); |
| 259 | addr += PAGE_SIZE; |
| 260 | ptep++; |
| 261 | } |
| 262 | |
| 263 | return entry; |
| 264 | } |
| 265 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | /* |
| 267 | * This function checks for proper alignment of input addr and len parameters. |
| 268 | */ |
| 269 | int is_aligned_hugepage_range(unsigned long addr, unsigned long len) |
| 270 | { |
| 271 | if (len & ~HPAGE_MASK) |
| 272 | return -EINVAL; |
| 273 | if (addr & ~HPAGE_MASK) |
| 274 | return -EINVAL; |
| 275 | return 0; |
| 276 | } |
| 277 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | struct page *follow_huge_addr(struct mm_struct *mm, |
| 279 | unsigned long address, int write) |
| 280 | { |
| 281 | return ERR_PTR(-EINVAL); |
| 282 | } |
| 283 | |
| 284 | int pmd_huge(pmd_t pmd) |
| 285 | { |
| 286 | return 0; |
| 287 | } |
| 288 | |
| 289 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
| 290 | pmd_t *pmd, int write) |
| 291 | { |
| 292 | return NULL; |
| 293 | } |
| 294 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | static void context_reload(void *__data) |
| 296 | { |
| 297 | struct mm_struct *mm = __data; |
| 298 | |
| 299 | if (mm == current->mm) |
| 300 | load_secondary_context(mm); |
| 301 | } |
| 302 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 303 | void hugetlb_prefault_arch_hook(struct mm_struct *mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | /* On UltraSPARC-III+ and later, configure the second half of |
| 306 | * the Data-TLB for huge pages. |
| 307 | */ |
| 308 | if (tlb_type == cheetah_plus) { |
| 309 | unsigned long ctx; |
| 310 | |
| 311 | spin_lock(&ctx_alloc_lock); |
| 312 | ctx = mm->context.sparc64_ctx_val; |
| 313 | ctx &= ~CTX_PGSZ_MASK; |
| 314 | ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; |
| 315 | ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT; |
| 316 | |
| 317 | if (ctx != mm->context.sparc64_ctx_val) { |
| 318 | /* When changing the page size fields, we |
| 319 | * must perform a context flush so that no |
| 320 | * stale entries match. This flush must |
| 321 | * occur with the original context register |
| 322 | * settings. |
| 323 | */ |
| 324 | do_flush_tlb_mm(mm); |
| 325 | |
| 326 | /* Reload the context register of all processors |
| 327 | * also executing in this address space. |
| 328 | */ |
| 329 | mm->context.sparc64_ctx_val = ctx; |
| 330 | on_each_cpu(context_reload, mm, 0, 0); |
| 331 | } |
| 332 | spin_unlock(&ctx_alloc_lock); |
| 333 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | } |