Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * SPARC64 Huge TLB page support. |
| 3 | * |
| 4 | * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) |
| 5 | */ |
| 6 | |
| 7 | #include <linux/config.h> |
| 8 | #include <linux/init.h> |
| 9 | #include <linux/module.h> |
| 10 | #include <linux/fs.h> |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/hugetlb.h> |
| 13 | #include <linux/pagemap.h> |
| 14 | #include <linux/smp_lock.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/sysctl.h> |
| 17 | |
| 18 | #include <asm/mman.h> |
| 19 | #include <asm/pgalloc.h> |
| 20 | #include <asm/tlb.h> |
| 21 | #include <asm/tlbflush.h> |
| 22 | #include <asm/cacheflush.h> |
| 23 | #include <asm/mmu_context.h> |
| 24 | |
| 25 | static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) |
| 26 | { |
| 27 | pgd_t *pgd; |
| 28 | pud_t *pud; |
| 29 | pmd_t *pmd; |
| 30 | pte_t *pte = NULL; |
| 31 | |
| 32 | pgd = pgd_offset(mm, addr); |
| 33 | if (pgd) { |
| 34 | pud = pud_offset(pgd, addr); |
| 35 | if (pud) { |
| 36 | pmd = pmd_alloc(mm, pud, addr); |
| 37 | if (pmd) |
| 38 | pte = pte_alloc_map(mm, pmd, addr); |
| 39 | } |
| 40 | } |
| 41 | return pte; |
| 42 | } |
| 43 | |
| 44 | static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
| 45 | { |
| 46 | pgd_t *pgd; |
| 47 | pud_t *pud; |
| 48 | pmd_t *pmd; |
| 49 | pte_t *pte = NULL; |
| 50 | |
| 51 | pgd = pgd_offset(mm, addr); |
| 52 | if (pgd) { |
| 53 | pud = pud_offset(pgd, addr); |
| 54 | if (pud) { |
| 55 | pmd = pmd_offset(pud, addr); |
| 56 | if (pmd) |
| 57 | pte = pte_offset_map(pmd, addr); |
| 58 | } |
| 59 | } |
| 60 | return pte; |
| 61 | } |
| 62 | |
| 63 | #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0) |
| 64 | |
| 65 | static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, |
| 66 | unsigned long addr, |
| 67 | struct page *page, pte_t * page_table, int write_access) |
| 68 | { |
| 69 | unsigned long i; |
| 70 | pte_t entry; |
| 71 | |
| 72 | add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); |
| 73 | |
| 74 | if (write_access) |
| 75 | entry = pte_mkwrite(pte_mkdirty(mk_pte(page, |
| 76 | vma->vm_page_prot))); |
| 77 | else |
| 78 | entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); |
| 79 | entry = pte_mkyoung(entry); |
| 80 | mk_pte_huge(entry); |
| 81 | |
| 82 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { |
| 83 | set_pte_at(mm, addr, page_table, entry); |
| 84 | page_table++; |
| 85 | addr += PAGE_SIZE; |
| 86 | |
| 87 | pte_val(entry) += PAGE_SIZE; |
| 88 | } |
| 89 | } |
| 90 | |
| 91 | /* |
| 92 | * This function checks for proper alignment of input addr and len parameters. |
| 93 | */ |
| 94 | int is_aligned_hugepage_range(unsigned long addr, unsigned long len) |
| 95 | { |
| 96 | if (len & ~HPAGE_MASK) |
| 97 | return -EINVAL; |
| 98 | if (addr & ~HPAGE_MASK) |
| 99 | return -EINVAL; |
| 100 | return 0; |
| 101 | } |
| 102 | |
| 103 | int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, |
| 104 | struct vm_area_struct *vma) |
| 105 | { |
| 106 | pte_t *src_pte, *dst_pte, entry; |
| 107 | struct page *ptepage; |
| 108 | unsigned long addr = vma->vm_start; |
| 109 | unsigned long end = vma->vm_end; |
| 110 | int i; |
| 111 | |
| 112 | while (addr < end) { |
| 113 | dst_pte = huge_pte_alloc(dst, addr); |
| 114 | if (!dst_pte) |
| 115 | goto nomem; |
| 116 | src_pte = huge_pte_offset(src, addr); |
| 117 | BUG_ON(!src_pte || pte_none(*src_pte)); |
| 118 | entry = *src_pte; |
| 119 | ptepage = pte_page(entry); |
| 120 | get_page(ptepage); |
| 121 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { |
| 122 | set_pte_at(dst, addr, dst_pte, entry); |
| 123 | pte_val(entry) += PAGE_SIZE; |
| 124 | dst_pte++; |
| 125 | addr += PAGE_SIZE; |
| 126 | } |
| 127 | add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); |
| 128 | } |
| 129 | return 0; |
| 130 | |
| 131 | nomem: |
| 132 | return -ENOMEM; |
| 133 | } |
| 134 | |
| 135 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 136 | struct page **pages, struct vm_area_struct **vmas, |
| 137 | unsigned long *position, int *length, int i) |
| 138 | { |
| 139 | unsigned long vaddr = *position; |
| 140 | int remainder = *length; |
| 141 | |
| 142 | WARN_ON(!is_vm_hugetlb_page(vma)); |
| 143 | |
| 144 | while (vaddr < vma->vm_end && remainder) { |
| 145 | if (pages) { |
| 146 | pte_t *pte; |
| 147 | struct page *page; |
| 148 | |
| 149 | pte = huge_pte_offset(mm, vaddr); |
| 150 | |
| 151 | /* hugetlb should be locked, and hence, prefaulted */ |
| 152 | BUG_ON(!pte || pte_none(*pte)); |
| 153 | |
| 154 | page = pte_page(*pte); |
| 155 | |
| 156 | WARN_ON(!PageCompound(page)); |
| 157 | |
| 158 | get_page(page); |
| 159 | pages[i] = page; |
| 160 | } |
| 161 | |
| 162 | if (vmas) |
| 163 | vmas[i] = vma; |
| 164 | |
| 165 | vaddr += PAGE_SIZE; |
| 166 | --remainder; |
| 167 | ++i; |
| 168 | } |
| 169 | |
| 170 | *length = remainder; |
| 171 | *position = vaddr; |
| 172 | |
| 173 | return i; |
| 174 | } |
| 175 | |
| 176 | struct page *follow_huge_addr(struct mm_struct *mm, |
| 177 | unsigned long address, int write) |
| 178 | { |
| 179 | return ERR_PTR(-EINVAL); |
| 180 | } |
| 181 | |
| 182 | int pmd_huge(pmd_t pmd) |
| 183 | { |
| 184 | return 0; |
| 185 | } |
| 186 | |
| 187 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
| 188 | pmd_t *pmd, int write) |
| 189 | { |
| 190 | return NULL; |
| 191 | } |
| 192 | |
| 193 | void unmap_hugepage_range(struct vm_area_struct *vma, |
| 194 | unsigned long start, unsigned long end) |
| 195 | { |
| 196 | struct mm_struct *mm = vma->vm_mm; |
| 197 | unsigned long address; |
| 198 | pte_t *pte; |
| 199 | struct page *page; |
| 200 | int i; |
| 201 | |
| 202 | BUG_ON(start & (HPAGE_SIZE - 1)); |
| 203 | BUG_ON(end & (HPAGE_SIZE - 1)); |
| 204 | |
| 205 | for (address = start; address < end; address += HPAGE_SIZE) { |
| 206 | pte = huge_pte_offset(mm, address); |
| 207 | BUG_ON(!pte); |
| 208 | if (pte_none(*pte)) |
| 209 | continue; |
| 210 | page = pte_page(*pte); |
| 211 | put_page(page); |
| 212 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { |
| 213 | pte_clear(mm, address+(i*PAGE_SIZE), pte); |
| 214 | pte++; |
| 215 | } |
| 216 | } |
| 217 | add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT)); |
| 218 | flush_tlb_range(vma, start, end); |
| 219 | } |
| 220 | |
| 221 | static void context_reload(void *__data) |
| 222 | { |
| 223 | struct mm_struct *mm = __data; |
| 224 | |
| 225 | if (mm == current->mm) |
| 226 | load_secondary_context(mm); |
| 227 | } |
| 228 | |
| 229 | int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) |
| 230 | { |
| 231 | struct mm_struct *mm = current->mm; |
| 232 | unsigned long addr; |
| 233 | int ret = 0; |
| 234 | |
| 235 | /* On UltraSPARC-III+ and later, configure the second half of |
| 236 | * the Data-TLB for huge pages. |
| 237 | */ |
| 238 | if (tlb_type == cheetah_plus) { |
| 239 | unsigned long ctx; |
| 240 | |
| 241 | spin_lock(&ctx_alloc_lock); |
| 242 | ctx = mm->context.sparc64_ctx_val; |
| 243 | ctx &= ~CTX_PGSZ_MASK; |
| 244 | ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; |
| 245 | ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT; |
| 246 | |
| 247 | if (ctx != mm->context.sparc64_ctx_val) { |
| 248 | /* When changing the page size fields, we |
| 249 | * must perform a context flush so that no |
| 250 | * stale entries match. This flush must |
| 251 | * occur with the original context register |
| 252 | * settings. |
| 253 | */ |
| 254 | do_flush_tlb_mm(mm); |
| 255 | |
| 256 | /* Reload the context register of all processors |
| 257 | * also executing in this address space. |
| 258 | */ |
| 259 | mm->context.sparc64_ctx_val = ctx; |
| 260 | on_each_cpu(context_reload, mm, 0, 0); |
| 261 | } |
| 262 | spin_unlock(&ctx_alloc_lock); |
| 263 | } |
| 264 | |
| 265 | BUG_ON(vma->vm_start & ~HPAGE_MASK); |
| 266 | BUG_ON(vma->vm_end & ~HPAGE_MASK); |
| 267 | |
| 268 | spin_lock(&mm->page_table_lock); |
| 269 | for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { |
| 270 | unsigned long idx; |
| 271 | pte_t *pte = huge_pte_alloc(mm, addr); |
| 272 | struct page *page; |
| 273 | |
| 274 | if (!pte) { |
| 275 | ret = -ENOMEM; |
| 276 | goto out; |
| 277 | } |
| 278 | if (!pte_none(*pte)) |
| 279 | continue; |
| 280 | |
| 281 | idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) |
| 282 | + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); |
| 283 | page = find_get_page(mapping, idx); |
| 284 | if (!page) { |
| 285 | /* charge the fs quota first */ |
| 286 | if (hugetlb_get_quota(mapping)) { |
| 287 | ret = -ENOMEM; |
| 288 | goto out; |
| 289 | } |
| 290 | page = alloc_huge_page(); |
| 291 | if (!page) { |
| 292 | hugetlb_put_quota(mapping); |
| 293 | ret = -ENOMEM; |
| 294 | goto out; |
| 295 | } |
| 296 | ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); |
| 297 | if (! ret) { |
| 298 | unlock_page(page); |
| 299 | } else { |
| 300 | hugetlb_put_quota(mapping); |
| 301 | free_huge_page(page); |
| 302 | goto out; |
| 303 | } |
| 304 | } |
| 305 | set_huge_pte(mm, vma, addr, page, pte, vma->vm_flags & VM_WRITE); |
| 306 | } |
| 307 | out: |
| 308 | spin_unlock(&mm->page_table_lock); |
| 309 | return ret; |
| 310 | } |