Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 1 | /* |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 2 | * Copyright IBM Corp. 2007, 2011 |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
| 4 | */ |
| 5 | |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/errno.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 9 | #include <linux/gfp.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 10 | #include <linux/mm.h> |
| 11 | #include <linux/swap.h> |
| 12 | #include <linux/smp.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 13 | #include <linux/spinlock.h> |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 14 | #include <linux/rcupdate.h> |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 15 | #include <linux/slab.h> |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 16 | #include <linux/swapops.h> |
Martin Schwidefsky | 0b46e0a | 2015-04-15 13:23:26 +0200 | [diff] [blame] | 17 | #include <linux/sysctl.h> |
Dominik Dingel | 3ac8e38 | 2014-10-23 12:09:17 +0200 | [diff] [blame] | 18 | #include <linux/ksm.h> |
| 19 | #include <linux/mman.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 20 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 21 | #include <asm/pgtable.h> |
| 22 | #include <asm/pgalloc.h> |
| 23 | #include <asm/tlb.h> |
| 24 | #include <asm/tlbflush.h> |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 25 | #include <asm/mmu_context.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 26 | |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 27 | static inline pte_t ptep_flush_direct(struct mm_struct *mm, |
| 28 | unsigned long addr, pte_t *ptep) |
| 29 | { |
| 30 | int active, count; |
| 31 | pte_t old; |
| 32 | |
| 33 | old = *ptep; |
| 34 | if (unlikely(pte_val(old) & _PAGE_INVALID)) |
| 35 | return old; |
| 36 | active = (mm == current->active_mm) ? 1 : 0; |
| 37 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
| 38 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && |
| 39 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) |
| 40 | __ptep_ipte_local(addr, ptep); |
| 41 | else |
| 42 | __ptep_ipte(addr, ptep); |
| 43 | atomic_sub(0x10000, &mm->context.attach_count); |
| 44 | return old; |
| 45 | } |
| 46 | |
| 47 | static inline pte_t ptep_flush_lazy(struct mm_struct *mm, |
| 48 | unsigned long addr, pte_t *ptep) |
| 49 | { |
| 50 | int active, count; |
| 51 | pte_t old; |
| 52 | |
| 53 | old = *ptep; |
| 54 | if (unlikely(pte_val(old) & _PAGE_INVALID)) |
| 55 | return old; |
| 56 | active = (mm == current->active_mm) ? 1 : 0; |
| 57 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
| 58 | if ((count & 0xffff) <= active) { |
| 59 | pte_val(*ptep) |= _PAGE_INVALID; |
| 60 | mm->context.flush_mm = 1; |
| 61 | } else |
| 62 | __ptep_ipte(addr, ptep); |
| 63 | atomic_sub(0x10000, &mm->context.attach_count); |
| 64 | return old; |
| 65 | } |
| 66 | |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 67 | static inline pgste_t pgste_get_lock(pte_t *ptep) |
| 68 | { |
| 69 | unsigned long new = 0; |
| 70 | #ifdef CONFIG_PGSTE |
| 71 | unsigned long old; |
| 72 | |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 73 | asm( |
| 74 | " lg %0,%2\n" |
| 75 | "0: lgr %1,%0\n" |
| 76 | " nihh %0,0xff7f\n" /* clear PCL bit in old */ |
| 77 | " oihh %1,0x0080\n" /* set PCL bit in new */ |
| 78 | " csg %0,%1,%2\n" |
| 79 | " jl 0b\n" |
| 80 | : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) |
| 81 | : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory"); |
| 82 | #endif |
| 83 | return __pgste(new); |
| 84 | } |
| 85 | |
| 86 | static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) |
| 87 | { |
| 88 | #ifdef CONFIG_PGSTE |
| 89 | asm( |
| 90 | " nihh %1,0xff7f\n" /* clear PCL bit */ |
| 91 | " stg %1,%0\n" |
| 92 | : "=Q" (ptep[PTRS_PER_PTE]) |
| 93 | : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) |
| 94 | : "cc", "memory"); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 95 | #endif |
| 96 | } |
| 97 | |
| 98 | static inline pgste_t pgste_get(pte_t *ptep) |
| 99 | { |
| 100 | unsigned long pgste = 0; |
| 101 | #ifdef CONFIG_PGSTE |
| 102 | pgste = *(unsigned long *)(ptep + PTRS_PER_PTE); |
| 103 | #endif |
| 104 | return __pgste(pgste); |
| 105 | } |
| 106 | |
| 107 | static inline void pgste_set(pte_t *ptep, pgste_t pgste) |
| 108 | { |
| 109 | #ifdef CONFIG_PGSTE |
| 110 | *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste; |
| 111 | #endif |
| 112 | } |
| 113 | |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 114 | static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste, |
| 115 | struct mm_struct *mm) |
| 116 | { |
| 117 | #ifdef CONFIG_PGSTE |
| 118 | unsigned long address, bits, skey; |
| 119 | |
| 120 | if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID) |
| 121 | return pgste; |
| 122 | address = pte_val(pte) & PAGE_MASK; |
| 123 | skey = (unsigned long) page_get_storage_key(address); |
| 124 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
| 125 | /* Transfer page changed & referenced bit to guest bits in pgste */ |
| 126 | pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ |
| 127 | /* Copy page access key and fetch protection bit to pgste */ |
| 128 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); |
| 129 | pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; |
| 130 | #endif |
| 131 | return pgste; |
| 132 | |
| 133 | } |
| 134 | |
| 135 | static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, |
| 136 | struct mm_struct *mm) |
| 137 | { |
| 138 | #ifdef CONFIG_PGSTE |
| 139 | unsigned long address; |
| 140 | unsigned long nkey; |
| 141 | |
| 142 | if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) |
| 143 | return; |
| 144 | VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); |
| 145 | address = pte_val(entry) & PAGE_MASK; |
| 146 | /* |
| 147 | * Set page access key and fetch protection bit from pgste. |
| 148 | * The guest C/R information is still in the PGSTE, set real |
| 149 | * key C/R to 0. |
| 150 | */ |
| 151 | nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; |
| 152 | nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; |
| 153 | page_set_storage_key(address, nkey, 0); |
| 154 | #endif |
| 155 | } |
| 156 | |
| 157 | static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) |
| 158 | { |
| 159 | #ifdef CONFIG_PGSTE |
| 160 | if ((pte_val(entry) & _PAGE_PRESENT) && |
| 161 | (pte_val(entry) & _PAGE_WRITE) && |
| 162 | !(pte_val(entry) & _PAGE_INVALID)) { |
| 163 | if (!MACHINE_HAS_ESOP) { |
| 164 | /* |
| 165 | * Without enhanced suppression-on-protection force |
| 166 | * the dirty bit on for all writable ptes. |
| 167 | */ |
| 168 | pte_val(entry) |= _PAGE_DIRTY; |
| 169 | pte_val(entry) &= ~_PAGE_PROTECT; |
| 170 | } |
| 171 | if (!(pte_val(entry) & _PAGE_PROTECT)) |
| 172 | /* This pte allows write access, set user-dirty */ |
| 173 | pgste_val(pgste) |= PGSTE_UC_BIT; |
| 174 | } |
| 175 | #endif |
| 176 | *ptep = entry; |
| 177 | return pgste; |
| 178 | } |
| 179 | |
| 180 | static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, |
| 181 | unsigned long addr, |
| 182 | pte_t *ptep, pgste_t pgste) |
| 183 | { |
| 184 | #ifdef CONFIG_PGSTE |
| 185 | if (pgste_val(pgste) & PGSTE_IN_BIT) { |
| 186 | pgste_val(pgste) &= ~PGSTE_IN_BIT; |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 187 | ptep_notify(mm, addr, ptep); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 188 | } |
| 189 | #endif |
| 190 | return pgste; |
| 191 | } |
| 192 | |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 193 | static inline pgste_t ptep_xchg_start(struct mm_struct *mm, |
| 194 | unsigned long addr, pte_t *ptep) |
| 195 | { |
| 196 | pgste_t pgste = __pgste(0); |
| 197 | |
| 198 | if (mm_has_pgste(mm)) { |
| 199 | pgste = pgste_get_lock(ptep); |
| 200 | pgste = pgste_ipte_notify(mm, addr, ptep, pgste); |
| 201 | } |
| 202 | return pgste; |
| 203 | } |
| 204 | |
| 205 | static inline void ptep_xchg_commit(struct mm_struct *mm, |
| 206 | unsigned long addr, pte_t *ptep, |
| 207 | pgste_t pgste, pte_t old, pte_t new) |
| 208 | { |
| 209 | if (mm_has_pgste(mm)) { |
| 210 | if (pte_val(old) & _PAGE_INVALID) |
| 211 | pgste_set_key(ptep, pgste, new, mm); |
| 212 | if (pte_val(new) & _PAGE_INVALID) { |
| 213 | pgste = pgste_update_all(old, pgste, mm); |
| 214 | if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == |
| 215 | _PGSTE_GPS_USAGE_UNUSED) |
| 216 | pte_val(old) |= _PAGE_UNUSED; |
| 217 | } |
| 218 | pgste = pgste_set_pte(ptep, pgste, new); |
| 219 | pgste_set_unlock(ptep, pgste); |
| 220 | } else { |
| 221 | *ptep = new; |
| 222 | } |
| 223 | } |
| 224 | |
| 225 | pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, |
| 226 | pte_t *ptep, pte_t new) |
| 227 | { |
| 228 | pgste_t pgste; |
| 229 | pte_t old; |
| 230 | |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 231 | preempt_disable(); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 232 | pgste = ptep_xchg_start(mm, addr, ptep); |
| 233 | old = ptep_flush_direct(mm, addr, ptep); |
| 234 | ptep_xchg_commit(mm, addr, ptep, pgste, old, new); |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 235 | preempt_enable(); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 236 | return old; |
| 237 | } |
| 238 | EXPORT_SYMBOL(ptep_xchg_direct); |
| 239 | |
| 240 | pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr, |
| 241 | pte_t *ptep, pte_t new) |
| 242 | { |
| 243 | pgste_t pgste; |
| 244 | pte_t old; |
| 245 | |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 246 | preempt_disable(); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 247 | pgste = ptep_xchg_start(mm, addr, ptep); |
| 248 | old = ptep_flush_lazy(mm, addr, ptep); |
| 249 | ptep_xchg_commit(mm, addr, ptep, pgste, old, new); |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 250 | preempt_enable(); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 251 | return old; |
| 252 | } |
| 253 | EXPORT_SYMBOL(ptep_xchg_lazy); |
| 254 | |
| 255 | pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, |
| 256 | pte_t *ptep) |
| 257 | { |
| 258 | pgste_t pgste; |
| 259 | pte_t old; |
| 260 | |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 261 | preempt_disable(); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 262 | pgste = ptep_xchg_start(mm, addr, ptep); |
| 263 | old = ptep_flush_lazy(mm, addr, ptep); |
| 264 | if (mm_has_pgste(mm)) { |
| 265 | pgste = pgste_update_all(old, pgste, mm); |
| 266 | pgste_set(ptep, pgste); |
| 267 | } |
| 268 | return old; |
| 269 | } |
| 270 | EXPORT_SYMBOL(ptep_modify_prot_start); |
| 271 | |
| 272 | void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, |
| 273 | pte_t *ptep, pte_t pte) |
| 274 | { |
| 275 | pgste_t pgste; |
| 276 | |
| 277 | if (mm_has_pgste(mm)) { |
| 278 | pgste = pgste_get(ptep); |
| 279 | pgste_set_key(ptep, pgste, pte, mm); |
| 280 | pgste = pgste_set_pte(ptep, pgste, pte); |
| 281 | pgste_set_unlock(ptep, pgste); |
| 282 | } else { |
| 283 | *ptep = pte; |
| 284 | } |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 285 | preempt_enable(); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 286 | } |
| 287 | EXPORT_SYMBOL(ptep_modify_prot_commit); |
| 288 | |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 289 | static inline pmd_t pmdp_flush_direct(struct mm_struct *mm, |
| 290 | unsigned long addr, pmd_t *pmdp) |
| 291 | { |
| 292 | int active, count; |
| 293 | pmd_t old; |
| 294 | |
| 295 | old = *pmdp; |
| 296 | if (pmd_val(old) & _SEGMENT_ENTRY_INVALID) |
| 297 | return old; |
| 298 | if (!MACHINE_HAS_IDTE) { |
| 299 | __pmdp_csp(pmdp); |
| 300 | return old; |
| 301 | } |
| 302 | active = (mm == current->active_mm) ? 1 : 0; |
| 303 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
| 304 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && |
| 305 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) |
| 306 | __pmdp_idte_local(addr, pmdp); |
| 307 | else |
| 308 | __pmdp_idte(addr, pmdp); |
| 309 | atomic_sub(0x10000, &mm->context.attach_count); |
| 310 | return old; |
| 311 | } |
| 312 | |
| 313 | static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, |
| 314 | unsigned long addr, pmd_t *pmdp) |
| 315 | { |
| 316 | int active, count; |
| 317 | pmd_t old; |
| 318 | |
| 319 | old = *pmdp; |
| 320 | if (pmd_val(old) & _SEGMENT_ENTRY_INVALID) |
| 321 | return old; |
| 322 | active = (mm == current->active_mm) ? 1 : 0; |
| 323 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
| 324 | if ((count & 0xffff) <= active) { |
| 325 | pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; |
| 326 | mm->context.flush_mm = 1; |
| 327 | } else if (MACHINE_HAS_IDTE) |
| 328 | __pmdp_idte(addr, pmdp); |
| 329 | else |
| 330 | __pmdp_csp(pmdp); |
| 331 | atomic_sub(0x10000, &mm->context.attach_count); |
| 332 | return old; |
| 333 | } |
| 334 | |
| 335 | pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, |
| 336 | pmd_t *pmdp, pmd_t new) |
| 337 | { |
| 338 | pmd_t old; |
| 339 | |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 340 | preempt_disable(); |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 341 | old = pmdp_flush_direct(mm, addr, pmdp); |
| 342 | *pmdp = new; |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 343 | preempt_enable(); |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 344 | return old; |
| 345 | } |
| 346 | EXPORT_SYMBOL(pmdp_xchg_direct); |
| 347 | |
| 348 | pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr, |
| 349 | pmd_t *pmdp, pmd_t new) |
| 350 | { |
| 351 | pmd_t old; |
| 352 | |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 353 | preempt_disable(); |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 354 | old = pmdp_flush_lazy(mm, addr, pmdp); |
| 355 | *pmdp = new; |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 356 | preempt_enable(); |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 357 | return old; |
| 358 | } |
| 359 | EXPORT_SYMBOL(pmdp_xchg_lazy); |
| 360 | |
Gerald Schaefer | 75077af | 2012-10-08 16:30:15 -0700 | [diff] [blame] | 361 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 362 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 363 | pgtable_t pgtable) |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 364 | { |
| 365 | struct list_head *lh = (struct list_head *) pgtable; |
| 366 | |
Martin Schwidefsky | ec66ad6 | 2014-02-12 14:16:18 +0100 | [diff] [blame] | 367 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 368 | |
| 369 | /* FIFO */ |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 370 | if (!pmd_huge_pte(mm, pmdp)) |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 371 | INIT_LIST_HEAD(lh); |
| 372 | else |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 373 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); |
| 374 | pmd_huge_pte(mm, pmdp) = pgtable; |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 375 | } |
| 376 | |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 377 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 378 | { |
| 379 | struct list_head *lh; |
| 380 | pgtable_t pgtable; |
| 381 | pte_t *ptep; |
| 382 | |
Martin Schwidefsky | ec66ad6 | 2014-02-12 14:16:18 +0100 | [diff] [blame] | 383 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 384 | |
| 385 | /* FIFO */ |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 386 | pgtable = pmd_huge_pte(mm, pmdp); |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 387 | lh = (struct list_head *) pgtable; |
| 388 | if (list_empty(lh)) |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 389 | pmd_huge_pte(mm, pmdp) = NULL; |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 390 | else { |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 391 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 392 | list_del(lh); |
| 393 | } |
| 394 | ptep = (pte_t *) pgtable; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 395 | pte_val(*ptep) = _PAGE_INVALID; |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 396 | ptep++; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 397 | pte_val(*ptep) = _PAGE_INVALID; |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 398 | return pgtable; |
| 399 | } |
Gerald Schaefer | 75077af | 2012-10-08 16:30:15 -0700 | [diff] [blame] | 400 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 401 | |
| 402 | #ifdef CONFIG_PGSTE |
| 403 | void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 404 | pte_t *ptep, pte_t entry) |
| 405 | { |
| 406 | pgste_t pgste; |
| 407 | |
| 408 | /* the mm_has_pgste() check is done in set_pte_at() */ |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 409 | preempt_disable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 410 | pgste = pgste_get_lock(ptep); |
| 411 | pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; |
| 412 | pgste_set_key(ptep, pgste, entry, mm); |
| 413 | pgste = pgste_set_pte(ptep, pgste, entry); |
| 414 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 415 | preempt_enable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 416 | } |
| 417 | |
| 418 | void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 419 | { |
| 420 | pgste_t pgste; |
| 421 | |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 422 | preempt_disable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 423 | pgste = pgste_get_lock(ptep); |
| 424 | pgste_val(pgste) |= PGSTE_IN_BIT; |
| 425 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 426 | preempt_enable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 427 | } |
| 428 | |
| 429 | static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) |
| 430 | { |
| 431 | if (!non_swap_entry(entry)) |
| 432 | dec_mm_counter(mm, MM_SWAPENTS); |
| 433 | else if (is_migration_entry(entry)) { |
| 434 | struct page *page = migration_entry_to_page(entry); |
| 435 | |
| 436 | dec_mm_counter(mm, mm_counter(page)); |
| 437 | } |
| 438 | free_swap_and_cache(entry); |
| 439 | } |
| 440 | |
| 441 | void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, |
| 442 | pte_t *ptep, int reset) |
| 443 | { |
| 444 | unsigned long pgstev; |
| 445 | pgste_t pgste; |
| 446 | pte_t pte; |
| 447 | |
| 448 | /* Zap unused and logically-zero pages */ |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 449 | preempt_disable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 450 | pgste = pgste_get_lock(ptep); |
| 451 | pgstev = pgste_val(pgste); |
| 452 | pte = *ptep; |
Christian Borntraeger | 1c343f7 | 2016-06-13 13:14:56 +0200 | [diff] [blame] | 453 | if (!reset && pte_swap(pte) && |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 454 | ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED || |
| 455 | (pgstev & _PGSTE_GPS_ZERO))) { |
| 456 | ptep_zap_swap_entry(mm, pte_to_swp_entry(pte)); |
| 457 | pte_clear(mm, addr, ptep); |
| 458 | } |
| 459 | if (reset) |
| 460 | pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; |
| 461 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 462 | preempt_enable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 463 | } |
| 464 | |
| 465 | void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 466 | { |
| 467 | unsigned long ptev; |
| 468 | pgste_t pgste; |
| 469 | |
| 470 | /* Clear storage key */ |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 471 | preempt_disable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 472 | pgste = pgste_get_lock(ptep); |
| 473 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | |
| 474 | PGSTE_GR_BIT | PGSTE_GC_BIT); |
| 475 | ptev = pte_val(*ptep); |
| 476 | if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) |
| 477 | page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); |
| 478 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame^] | 479 | preempt_enable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 480 | } |
| 481 | |
| 482 | /* |
| 483 | * Test and reset if a guest page is dirty |
| 484 | */ |
| 485 | bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) |
| 486 | { |
| 487 | spinlock_t *ptl; |
| 488 | pgste_t pgste; |
| 489 | pte_t *ptep; |
| 490 | pte_t pte; |
| 491 | bool dirty; |
| 492 | |
| 493 | ptep = get_locked_pte(mm, addr, &ptl); |
| 494 | if (unlikely(!ptep)) |
| 495 | return false; |
| 496 | |
| 497 | pgste = pgste_get_lock(ptep); |
| 498 | dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); |
| 499 | pgste_val(pgste) &= ~PGSTE_UC_BIT; |
| 500 | pte = *ptep; |
| 501 | if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { |
| 502 | pgste = pgste_ipte_notify(mm, addr, ptep, pgste); |
| 503 | __ptep_ipte(addr, ptep); |
| 504 | if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) |
| 505 | pte_val(pte) |= _PAGE_PROTECT; |
| 506 | else |
| 507 | pte_val(pte) |= _PAGE_INVALID; |
| 508 | *ptep = pte; |
| 509 | } |
| 510 | pgste_set_unlock(ptep, pgste); |
| 511 | |
| 512 | spin_unlock(ptl); |
| 513 | return dirty; |
| 514 | } |
| 515 | EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty); |
| 516 | |
| 517 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
| 518 | unsigned char key, bool nq) |
| 519 | { |
| 520 | unsigned long keyul; |
| 521 | spinlock_t *ptl; |
| 522 | pgste_t old, new; |
| 523 | pte_t *ptep; |
| 524 | |
| 525 | down_read(&mm->mmap_sem); |
| 526 | ptep = get_locked_pte(mm, addr, &ptl); |
| 527 | if (unlikely(!ptep)) { |
| 528 | up_read(&mm->mmap_sem); |
| 529 | return -EFAULT; |
| 530 | } |
| 531 | |
| 532 | new = old = pgste_get_lock(ptep); |
| 533 | pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | |
| 534 | PGSTE_ACC_BITS | PGSTE_FP_BIT); |
| 535 | keyul = (unsigned long) key; |
| 536 | pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; |
| 537 | pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; |
| 538 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { |
| 539 | unsigned long address, bits, skey; |
| 540 | |
| 541 | address = pte_val(*ptep) & PAGE_MASK; |
| 542 | skey = (unsigned long) page_get_storage_key(address); |
| 543 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
| 544 | skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT); |
| 545 | /* Set storage key ACC and FP */ |
| 546 | page_set_storage_key(address, skey, !nq); |
| 547 | /* Merge host changed & referenced into pgste */ |
| 548 | pgste_val(new) |= bits << 52; |
| 549 | } |
| 550 | /* changing the guest storage key is considered a change of the page */ |
| 551 | if ((pgste_val(new) ^ pgste_val(old)) & |
| 552 | (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) |
| 553 | pgste_val(new) |= PGSTE_UC_BIT; |
| 554 | |
| 555 | pgste_set_unlock(ptep, new); |
| 556 | pte_unmap_unlock(ptep, ptl); |
| 557 | up_read(&mm->mmap_sem); |
| 558 | return 0; |
| 559 | } |
| 560 | EXPORT_SYMBOL(set_guest_storage_key); |
| 561 | |
| 562 | unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr) |
| 563 | { |
| 564 | unsigned char key; |
| 565 | spinlock_t *ptl; |
| 566 | pgste_t pgste; |
| 567 | pte_t *ptep; |
| 568 | |
| 569 | down_read(&mm->mmap_sem); |
| 570 | ptep = get_locked_pte(mm, addr, &ptl); |
| 571 | if (unlikely(!ptep)) { |
| 572 | up_read(&mm->mmap_sem); |
| 573 | return -EFAULT; |
| 574 | } |
| 575 | pgste = pgste_get_lock(ptep); |
| 576 | |
| 577 | if (pte_val(*ptep) & _PAGE_INVALID) { |
| 578 | key = (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56; |
| 579 | key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56; |
| 580 | key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48; |
| 581 | key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48; |
| 582 | } else { |
| 583 | key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK); |
| 584 | |
| 585 | /* Reflect guest's logical view, not physical */ |
| 586 | if (pgste_val(pgste) & PGSTE_GR_BIT) |
| 587 | key |= _PAGE_REFERENCED; |
| 588 | if (pgste_val(pgste) & PGSTE_GC_BIT) |
| 589 | key |= _PAGE_CHANGED; |
| 590 | } |
| 591 | |
| 592 | pgste_set_unlock(ptep, pgste); |
| 593 | pte_unmap_unlock(ptep, ptl); |
| 594 | up_read(&mm->mmap_sem); |
| 595 | return key; |
| 596 | } |
| 597 | EXPORT_SYMBOL(get_guest_storage_key); |
| 598 | #endif |