Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2005, Paul Mackerras, IBM Corporation. |
| 3 | * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation. |
| 4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/sched.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 13 | #include <linux/mm_types.h> |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 14 | #include <linux/mm.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 15 | |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 16 | #include <asm/pgalloc.h> |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 17 | #include <asm/pgtable.h> |
| 18 | #include <asm/sections.h> |
| 19 | #include <asm/mmu.h> |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 20 | #include <asm/tlb.h> |
| 21 | |
| 22 | #include "mmu_decl.h" |
| 23 | |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 24 | #define CREATE_TRACE_POINTS |
| 25 | #include <trace/events/thp.h> |
| 26 | |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 27 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 28 | /* |
Anshuman Khandual | b0f36c1 | 2017-04-06 19:44:49 +0530 | [diff] [blame] | 29 | * vmemmap is the starting address of the virtual address space where |
| 30 | * struct pages are allocated for all possible PFNs present on the system |
| 31 | * including holes and bad memory (hence sparse). These virtual struct |
| 32 | * pages are stored in sequence in this virtual address space irrespective |
| 33 | * of the fact whether the corresponding PFN is valid or not. This achieves |
| 34 | * constant relationship between address of struct page and its PFN. |
| 35 | * |
| 36 | * During boot or memory hotplug operation when a new memory section is |
| 37 | * added, physical memory allocation (including hash table bolting) will |
| 38 | * be performed for the set of struct pages which are part of the memory |
| 39 | * section. This saves memory by not allocating struct pages for PFNs |
| 40 | * which are not valid. |
| 41 | * |
| 42 | * ---------------------------------------------- |
| 43 | * | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES| |
| 44 | * ---------------------------------------------- |
| 45 | * |
| 46 | * f000000000000000 c000000000000000 |
| 47 | * vmemmap +--------------+ +--------------+ |
| 48 | * + | page struct | +--------------> | page struct | |
| 49 | * | +--------------+ +--------------+ |
| 50 | * | | page struct | +--------------> | page struct | |
| 51 | * | +--------------+ | +--------------+ |
| 52 | * | | page struct | + +------> | page struct | |
| 53 | * | +--------------+ | +--------------+ |
| 54 | * | | page struct | | +--> | page struct | |
| 55 | * | +--------------+ | | +--------------+ |
| 56 | * | | page struct | | | |
| 57 | * | +--------------+ | | |
| 58 | * | | page struct | | | |
| 59 | * | +--------------+ | | |
| 60 | * | | page struct | | | |
| 61 | * | +--------------+ | | |
| 62 | * | | page struct | | | |
| 63 | * | +--------------+ | | |
| 64 | * | | page struct | +-------+ | |
| 65 | * | +--------------+ | |
| 66 | * | | page struct | +-----------+ |
| 67 | * | +--------------+ |
| 68 | * | | page struct | No mapping |
| 69 | * | +--------------+ |
| 70 | * | | page struct | No mapping |
| 71 | * v +--------------+ |
| 72 | * |
| 73 | * ----------------------------------------- |
| 74 | * | RELATION BETWEEN STRUCT PAGES AND PFNS| |
| 75 | * ----------------------------------------- |
| 76 | * |
| 77 | * vmemmap +--------------+ +---------------+ |
| 78 | * + | page struct | +-------------> | PFN | |
| 79 | * | +--------------+ +---------------+ |
| 80 | * | | page struct | +-------------> | PFN | |
| 81 | * | +--------------+ +---------------+ |
| 82 | * | | page struct | +-------------> | PFN | |
| 83 | * | +--------------+ +---------------+ |
| 84 | * | | page struct | +-------------> | PFN | |
| 85 | * | +--------------+ +---------------+ |
| 86 | * | | | |
| 87 | * | +--------------+ |
| 88 | * | | | |
| 89 | * | +--------------+ |
| 90 | * | | | |
| 91 | * | +--------------+ +---------------+ |
| 92 | * | | page struct | +-------------> | PFN | |
| 93 | * | +--------------+ +---------------+ |
| 94 | * | | | |
| 95 | * | +--------------+ |
| 96 | * | | | |
| 97 | * | +--------------+ +---------------+ |
| 98 | * | | page struct | +-------------> | PFN | |
| 99 | * | +--------------+ +---------------+ |
| 100 | * | | page struct | +-------------> | PFN | |
| 101 | * v +--------------+ +---------------+ |
| 102 | */ |
| 103 | /* |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 104 | * On hash-based CPUs, the vmemmap is bolted in the hash table. |
| 105 | * |
| 106 | */ |
Aneesh Kumar K.V | 31a14fa | 2016-04-29 23:25:59 +1000 | [diff] [blame] | 107 | int __meminit hash__vmemmap_create_mapping(unsigned long start, |
| 108 | unsigned long page_size, |
| 109 | unsigned long phys) |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 110 | { |
| 111 | int rc = htab_bolt_mapping(start, start + page_size, phys, |
| 112 | pgprot_val(PAGE_KERNEL), |
| 113 | mmu_vmemmap_psize, mmu_kernel_ssize); |
| 114 | if (rc < 0) { |
| 115 | int rc2 = htab_remove_mapping(start, start + page_size, |
| 116 | mmu_vmemmap_psize, |
| 117 | mmu_kernel_ssize); |
| 118 | BUG_ON(rc2 && (rc2 != -ENOENT)); |
| 119 | } |
| 120 | return rc; |
| 121 | } |
| 122 | |
| 123 | #ifdef CONFIG_MEMORY_HOTPLUG |
Aneesh Kumar K.V | 31a14fa | 2016-04-29 23:25:59 +1000 | [diff] [blame] | 124 | void hash__vmemmap_remove_mapping(unsigned long start, |
| 125 | unsigned long page_size) |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 126 | { |
| 127 | int rc = htab_remove_mapping(start, start + page_size, |
| 128 | mmu_vmemmap_psize, |
| 129 | mmu_kernel_ssize); |
| 130 | BUG_ON((rc < 0) && (rc != -ENOENT)); |
| 131 | WARN_ON(rc == -ENOENT); |
| 132 | } |
| 133 | #endif |
| 134 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
| 135 | |
| 136 | /* |
| 137 | * map_kernel_page currently only called by __ioremap |
| 138 | * map_kernel_page adds an entry to the ioremap page table |
| 139 | * and adds an entry to the HPT, possibly bolting it |
| 140 | */ |
Aneesh Kumar K.V | 31a14fa | 2016-04-29 23:25:59 +1000 | [diff] [blame] | 141 | int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 142 | { |
| 143 | pgd_t *pgdp; |
| 144 | pud_t *pudp; |
| 145 | pmd_t *pmdp; |
| 146 | pte_t *ptep; |
| 147 | |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 148 | BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE); |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 149 | if (slab_is_available()) { |
| 150 | pgdp = pgd_offset_k(ea); |
| 151 | pudp = pud_alloc(&init_mm, pgdp, ea); |
| 152 | if (!pudp) |
| 153 | return -ENOMEM; |
| 154 | pmdp = pmd_alloc(&init_mm, pudp, ea); |
| 155 | if (!pmdp) |
| 156 | return -ENOMEM; |
| 157 | ptep = pte_alloc_kernel(pmdp, ea); |
| 158 | if (!ptep) |
| 159 | return -ENOMEM; |
| 160 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, |
| 161 | __pgprot(flags))); |
| 162 | } else { |
| 163 | /* |
| 164 | * If the mm subsystem is not fully up, we cannot create a |
| 165 | * linux page table entry for this mapping. Simply bolt an |
| 166 | * entry in the hardware page table. |
| 167 | * |
| 168 | */ |
| 169 | if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, |
| 170 | mmu_io_psize, mmu_kernel_ssize)) { |
| 171 | printk(KERN_ERR "Failed to do bolted mapping IO " |
| 172 | "memory at %016lx !\n", pa); |
| 173 | return -ENOMEM; |
| 174 | } |
| 175 | } |
| 176 | |
| 177 | smp_wmb(); |
| 178 | return 0; |
| 179 | } |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 180 | |
| 181 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 182 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 183 | unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, |
| 184 | pmd_t *pmdp, unsigned long clr, |
| 185 | unsigned long set) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 186 | { |
| 187 | __be64 old_be, tmp; |
| 188 | unsigned long old; |
| 189 | |
| 190 | #ifdef CONFIG_DEBUG_VM |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 191 | WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 192 | assert_spin_locked(&mm->page_table_lock); |
| 193 | #endif |
| 194 | |
| 195 | __asm__ __volatile__( |
| 196 | "1: ldarx %0,0,%3\n\ |
| 197 | and. %1,%0,%6\n\ |
| 198 | bne- 1b \n\ |
| 199 | andc %1,%0,%4 \n\ |
| 200 | or %1,%1,%7\n\ |
| 201 | stdcx. %1,0,%3 \n\ |
| 202 | bne- 1b" |
| 203 | : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp) |
| 204 | : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp), |
| 205 | "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) |
| 206 | : "cc" ); |
| 207 | |
| 208 | old = be64_to_cpu(old_be); |
| 209 | |
| 210 | trace_hugepage_update(addr, old, clr, set); |
| 211 | if (old & H_PAGE_HASHPTE) |
| 212 | hpte_do_hugepage_flush(mm, addr, pmdp, old); |
| 213 | return old; |
| 214 | } |
| 215 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 216 | pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, |
| 217 | pmd_t *pmdp) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 218 | { |
| 219 | pmd_t pmd; |
| 220 | |
| 221 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 222 | VM_BUG_ON(pmd_trans_huge(*pmdp)); |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 223 | VM_BUG_ON(pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 224 | |
| 225 | pmd = *pmdp; |
| 226 | pmd_clear(pmdp); |
| 227 | /* |
| 228 | * Wait for all pending hash_page to finish. This is needed |
| 229 | * in case of subpage collapse. When we collapse normal pages |
| 230 | * to hugepage, we first clear the pmd, then invalidate all |
| 231 | * the PTE entries. The assumption here is that any low level |
| 232 | * page fault will see a none pmd and take the slow path that |
| 233 | * will wait on mmap_sem. But we could very well be in a |
| 234 | * hash_page with local ptep pointer value. Such a hash page |
| 235 | * can result in adding new HPTE entries for normal subpages. |
| 236 | * That means we could be modifying the page content as we |
| 237 | * copy them to a huge page. So wait for parallel hash_page |
| 238 | * to finish before invalidating HPTE entries. We can do this |
| 239 | * by sending an IPI to all the cpus and executing a dummy |
| 240 | * function there. |
| 241 | */ |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 242 | serialize_against_pte_lookup(vma->vm_mm); |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 243 | /* |
| 244 | * Now invalidate the hpte entries in the range |
| 245 | * covered by pmd. This make sure we take a |
| 246 | * fault and will find the pmd as none, which will |
| 247 | * result in a major fault which takes mmap_sem and |
| 248 | * hence wait for collapse to complete. Without this |
| 249 | * the __collapse_huge_page_copy can result in copying |
| 250 | * the old content. |
| 251 | */ |
| 252 | flush_tlb_pmd_range(vma->vm_mm, &pmd, address); |
| 253 | return pmd; |
| 254 | } |
| 255 | |
| 256 | /* |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 257 | * We want to put the pgtable in pmd and use pgtable for tracking |
| 258 | * the base page size hptes |
| 259 | */ |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 260 | void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 261 | pgtable_t pgtable) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 262 | { |
| 263 | pgtable_t *pgtable_slot; |
| 264 | assert_spin_locked(&mm->page_table_lock); |
| 265 | /* |
| 266 | * we store the pgtable in the second half of PMD |
| 267 | */ |
| 268 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; |
| 269 | *pgtable_slot = pgtable; |
| 270 | /* |
| 271 | * expose the deposited pgtable to other cpus. |
| 272 | * before we set the hugepage PTE at pmd level |
| 273 | * hash fault code looks at the deposted pgtable |
| 274 | * to store hash index values. |
| 275 | */ |
| 276 | smp_wmb(); |
| 277 | } |
| 278 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 279 | pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 280 | { |
| 281 | pgtable_t pgtable; |
| 282 | pgtable_t *pgtable_slot; |
| 283 | |
| 284 | assert_spin_locked(&mm->page_table_lock); |
| 285 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; |
| 286 | pgtable = *pgtable_slot; |
| 287 | /* |
| 288 | * Once we withdraw, mark the entry NULL. |
| 289 | */ |
| 290 | *pgtable_slot = NULL; |
| 291 | /* |
| 292 | * We store HPTE information in the deposited PTE fragment. |
| 293 | * zero out the content on withdraw. |
| 294 | */ |
| 295 | memset(pgtable, 0, PTE_FRAG_SIZE); |
| 296 | return pgtable; |
| 297 | } |
| 298 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 299 | void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma, |
| 300 | unsigned long address, pmd_t *pmdp) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 301 | { |
| 302 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 303 | VM_BUG_ON(REGION_ID(address) != USER_REGION_ID); |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 304 | VM_BUG_ON(pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 305 | |
| 306 | /* |
| 307 | * We can't mark the pmd none here, because that will cause a race |
| 308 | * against exit_mmap. We need to continue mark pmd TRANS HUGE, while |
| 309 | * we spilt, but at the same time we wan't rest of the ppc64 code |
| 310 | * not to insert hash pte on this, because we will be modifying |
| 311 | * the deposited pgtable in the caller of this function. Hence |
| 312 | * clear the _PAGE_USER so that we move the fault handling to |
| 313 | * higher level function and that will serialize against ptl. |
| 314 | * We need to flush existing hash pte entries here even though, |
| 315 | * the translation is still valid, because we will withdraw |
| 316 | * pgtable_t after this. |
| 317 | */ |
| 318 | pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED); |
| 319 | } |
| 320 | |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 321 | /* |
| 322 | * A linux hugepage PMD was changed and the corresponding hash table entries |
| 323 | * neesd to be flushed. |
| 324 | */ |
| 325 | void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, |
| 326 | pmd_t *pmdp, unsigned long old_pmd) |
| 327 | { |
| 328 | int ssize; |
| 329 | unsigned int psize; |
| 330 | unsigned long vsid; |
| 331 | unsigned long flags = 0; |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 332 | |
| 333 | /* get the base page size,vsid and segment size */ |
| 334 | #ifdef CONFIG_DEBUG_VM |
| 335 | psize = get_slice_psize(mm, addr); |
| 336 | BUG_ON(psize == MMU_PAGE_16M); |
| 337 | #endif |
| 338 | if (old_pmd & H_PAGE_COMBO) |
| 339 | psize = MMU_PAGE_4K; |
| 340 | else |
| 341 | psize = MMU_PAGE_64K; |
| 342 | |
| 343 | if (!is_kernel_addr(addr)) { |
| 344 | ssize = user_segment_size(addr); |
| 345 | vsid = get_vsid(mm->context.id, addr, ssize); |
| 346 | WARN_ON(vsid == 0); |
| 347 | } else { |
| 348 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); |
| 349 | ssize = mmu_kernel_ssize; |
| 350 | } |
| 351 | |
Benjamin Herrenschmidt | b426e4b | 2017-07-24 14:28:01 +1000 | [diff] [blame] | 352 | if (mm_is_thread_local(mm)) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 353 | flags |= HPTE_LOCAL_UPDATE; |
| 354 | |
| 355 | return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); |
| 356 | } |
| 357 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 358 | pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, |
| 359 | unsigned long addr, pmd_t *pmdp) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 360 | { |
| 361 | pmd_t old_pmd; |
| 362 | pgtable_t pgtable; |
| 363 | unsigned long old; |
| 364 | pgtable_t *pgtable_slot; |
| 365 | |
| 366 | old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); |
| 367 | old_pmd = __pmd(old); |
| 368 | /* |
| 369 | * We have pmd == none and we are holding page_table_lock. |
| 370 | * So we can safely go and clear the pgtable hash |
| 371 | * index info. |
| 372 | */ |
| 373 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; |
| 374 | pgtable = *pgtable_slot; |
| 375 | /* |
| 376 | * Let's zero out old valid and hash index details |
| 377 | * hash fault look at them. |
| 378 | */ |
| 379 | memset(pgtable, 0, PTE_FRAG_SIZE); |
| 380 | /* |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 381 | * Serialize against find_current_mm_pte variants which does lock-less |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 382 | * lookup in page tables with local interrupts disabled. For huge pages |
| 383 | * it casts pmd_t to pte_t. Since format of pte_t is different from |
| 384 | * pmd_t we want to prevent transit from pmd pointing to page table |
| 385 | * to pmd pointing to huge page (and back) while interrupts are disabled. |
| 386 | * We clear pmd to possibly replace it with page table pointer in |
| 387 | * different code paths. So make sure we wait for the parallel |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 388 | * find_curren_mm_pte to finish. |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 389 | */ |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 390 | serialize_against_pte_lookup(mm); |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 391 | return old_pmd; |
| 392 | } |
| 393 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 394 | int hash__has_transparent_hugepage(void) |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 395 | { |
| 396 | |
| 397 | if (!mmu_has_feature(MMU_FTR_16M_PAGE)) |
| 398 | return 0; |
| 399 | /* |
| 400 | * We support THP only if PMD_SIZE is 16MB. |
| 401 | */ |
| 402 | if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) |
| 403 | return 0; |
| 404 | /* |
| 405 | * We need to make sure that we support 16MB hugepage in a segement |
| 406 | * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE |
| 407 | * of 64K. |
| 408 | */ |
| 409 | /* |
| 410 | * If we have 64K HPTE, we will be using that by default |
| 411 | */ |
| 412 | if (mmu_psize_defs[MMU_PAGE_64K].shift && |
| 413 | (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1)) |
| 414 | return 0; |
| 415 | /* |
| 416 | * Ok we only have 4K HPTE |
| 417 | */ |
| 418 | if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1) |
| 419 | return 0; |
| 420 | |
| 421 | return 1; |
| 422 | } |
| 423 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 424 | |
| 425 | #ifdef CONFIG_STRICT_KERNEL_RWX |
Michael Ellerman | fa7f918 | 2017-07-14 16:51:22 +1000 | [diff] [blame] | 426 | static bool hash__change_memory_range(unsigned long start, unsigned long end, |
| 427 | unsigned long newpp) |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 428 | { |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 429 | unsigned long idx; |
| 430 | unsigned int step, shift; |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 431 | |
| 432 | shift = mmu_psize_defs[mmu_linear_psize].shift; |
| 433 | step = 1 << shift; |
| 434 | |
Michael Ellerman | fa7f918 | 2017-07-14 16:51:22 +1000 | [diff] [blame] | 435 | start = ALIGN_DOWN(start, step); |
| 436 | end = ALIGN(end, step); // aligns up |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 437 | |
Michael Ellerman | fa7f918 | 2017-07-14 16:51:22 +1000 | [diff] [blame] | 438 | if (start >= end) |
| 439 | return false; |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 440 | |
Michael Ellerman | fa7f918 | 2017-07-14 16:51:22 +1000 | [diff] [blame] | 441 | pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n", |
| 442 | start, end, newpp, step); |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 443 | |
| 444 | for (idx = start; idx < end; idx += step) |
| 445 | /* Not sure if we can do much with the return value */ |
| 446 | mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize, |
| 447 | mmu_kernel_ssize); |
| 448 | |
Michael Ellerman | fa7f918 | 2017-07-14 16:51:22 +1000 | [diff] [blame] | 449 | return true; |
| 450 | } |
| 451 | |
| 452 | void hash__mark_rodata_ro(void) |
| 453 | { |
| 454 | unsigned long start, end; |
| 455 | |
| 456 | start = (unsigned long)_stext; |
| 457 | end = (unsigned long)__init_begin; |
| 458 | |
| 459 | WARN_ON(!hash__change_memory_range(start, end, PP_RXXX)); |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 460 | } |
Michael Ellerman | 029d925 | 2017-07-14 16:51:23 +1000 | [diff] [blame] | 461 | |
| 462 | void hash__mark_initmem_nx(void) |
| 463 | { |
| 464 | unsigned long start, end, pp; |
| 465 | |
| 466 | start = (unsigned long)__init_begin; |
| 467 | end = (unsigned long)__init_end; |
| 468 | |
| 469 | pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL)); |
| 470 | |
| 471 | WARN_ON(!hash__change_memory_range(start, end, pp)); |
| 472 | } |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 473 | #endif |