Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 1 | /* |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 2 | * Copyright IBM Corp. 2007, 2011 |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
| 4 | */ |
| 5 | |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/errno.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 9 | #include <linux/gfp.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 10 | #include <linux/mm.h> |
| 11 | #include <linux/swap.h> |
| 12 | #include <linux/smp.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 13 | #include <linux/spinlock.h> |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 14 | #include <linux/rcupdate.h> |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 15 | #include <linux/slab.h> |
Konstantin Weitz | b31288f | 2013-04-17 17:36:29 +0200 | [diff] [blame] | 16 | #include <linux/swapops.h> |
Martin Schwidefsky | 0b46e0a | 2015-04-15 13:23:26 +0200 | [diff] [blame] | 17 | #include <linux/sysctl.h> |
Dominik Dingel | 3ac8e38 | 2014-10-23 12:09:17 +0200 | [diff] [blame] | 18 | #include <linux/ksm.h> |
| 19 | #include <linux/mman.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 20 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 21 | #include <asm/pgtable.h> |
| 22 | #include <asm/pgalloc.h> |
| 23 | #include <asm/tlb.h> |
| 24 | #include <asm/tlbflush.h> |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 25 | #include <asm/mmu_context.h> |
Claudio Imbrenda | 2d42f94 | 2017-04-20 10:03:45 +0200 | [diff] [blame] | 26 | #include <asm/page-states.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 27 | |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 28 | static inline pte_t ptep_flush_direct(struct mm_struct *mm, |
| 29 | unsigned long addr, pte_t *ptep) |
| 30 | { |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 31 | pte_t old; |
| 32 | |
| 33 | old = *ptep; |
| 34 | if (unlikely(pte_val(old) & _PAGE_INVALID)) |
| 35 | return old; |
Martin Schwidefsky | 64f31d5 | 2016-05-25 09:45:26 +0200 | [diff] [blame] | 36 | atomic_inc(&mm->context.flush_count); |
| 37 | if (MACHINE_HAS_TLB_LC && |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 38 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) |
Martin Schwidefsky | 34eeaf3 | 2016-06-14 12:38:40 +0200 | [diff] [blame] | 39 | __ptep_ipte(addr, ptep, IPTE_LOCAL); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 40 | else |
Martin Schwidefsky | 34eeaf3 | 2016-06-14 12:38:40 +0200 | [diff] [blame] | 41 | __ptep_ipte(addr, ptep, IPTE_GLOBAL); |
Martin Schwidefsky | 64f31d5 | 2016-05-25 09:45:26 +0200 | [diff] [blame] | 42 | atomic_dec(&mm->context.flush_count); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 43 | return old; |
| 44 | } |
| 45 | |
| 46 | static inline pte_t ptep_flush_lazy(struct mm_struct *mm, |
| 47 | unsigned long addr, pte_t *ptep) |
| 48 | { |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 49 | pte_t old; |
| 50 | |
| 51 | old = *ptep; |
| 52 | if (unlikely(pte_val(old) & _PAGE_INVALID)) |
| 53 | return old; |
Martin Schwidefsky | 64f31d5 | 2016-05-25 09:45:26 +0200 | [diff] [blame] | 54 | atomic_inc(&mm->context.flush_count); |
| 55 | if (cpumask_equal(&mm->context.cpu_attach_mask, |
| 56 | cpumask_of(smp_processor_id()))) { |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 57 | pte_val(*ptep) |= _PAGE_INVALID; |
| 58 | mm->context.flush_mm = 1; |
| 59 | } else |
Martin Schwidefsky | 34eeaf3 | 2016-06-14 12:38:40 +0200 | [diff] [blame] | 60 | __ptep_ipte(addr, ptep, IPTE_GLOBAL); |
Martin Schwidefsky | 64f31d5 | 2016-05-25 09:45:26 +0200 | [diff] [blame] | 61 | atomic_dec(&mm->context.flush_count); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 62 | return old; |
| 63 | } |
| 64 | |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 65 | static inline pgste_t pgste_get_lock(pte_t *ptep) |
| 66 | { |
| 67 | unsigned long new = 0; |
| 68 | #ifdef CONFIG_PGSTE |
| 69 | unsigned long old; |
| 70 | |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 71 | asm( |
| 72 | " lg %0,%2\n" |
| 73 | "0: lgr %1,%0\n" |
| 74 | " nihh %0,0xff7f\n" /* clear PCL bit in old */ |
| 75 | " oihh %1,0x0080\n" /* set PCL bit in new */ |
| 76 | " csg %0,%1,%2\n" |
| 77 | " jl 0b\n" |
| 78 | : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) |
| 79 | : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory"); |
| 80 | #endif |
| 81 | return __pgste(new); |
| 82 | } |
| 83 | |
| 84 | static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) |
| 85 | { |
| 86 | #ifdef CONFIG_PGSTE |
| 87 | asm( |
| 88 | " nihh %1,0xff7f\n" /* clear PCL bit */ |
| 89 | " stg %1,%0\n" |
| 90 | : "=Q" (ptep[PTRS_PER_PTE]) |
| 91 | : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) |
| 92 | : "cc", "memory"); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 93 | #endif |
| 94 | } |
| 95 | |
| 96 | static inline pgste_t pgste_get(pte_t *ptep) |
| 97 | { |
| 98 | unsigned long pgste = 0; |
| 99 | #ifdef CONFIG_PGSTE |
| 100 | pgste = *(unsigned long *)(ptep + PTRS_PER_PTE); |
| 101 | #endif |
| 102 | return __pgste(pgste); |
| 103 | } |
| 104 | |
| 105 | static inline void pgste_set(pte_t *ptep, pgste_t pgste) |
| 106 | { |
| 107 | #ifdef CONFIG_PGSTE |
| 108 | *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste; |
| 109 | #endif |
| 110 | } |
| 111 | |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 112 | static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste, |
| 113 | struct mm_struct *mm) |
| 114 | { |
| 115 | #ifdef CONFIG_PGSTE |
| 116 | unsigned long address, bits, skey; |
| 117 | |
| 118 | if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID) |
| 119 | return pgste; |
| 120 | address = pte_val(pte) & PAGE_MASK; |
| 121 | skey = (unsigned long) page_get_storage_key(address); |
| 122 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
| 123 | /* Transfer page changed & referenced bit to guest bits in pgste */ |
| 124 | pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ |
| 125 | /* Copy page access key and fetch protection bit to pgste */ |
| 126 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); |
| 127 | pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; |
| 128 | #endif |
| 129 | return pgste; |
| 130 | |
| 131 | } |
| 132 | |
| 133 | static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, |
| 134 | struct mm_struct *mm) |
| 135 | { |
| 136 | #ifdef CONFIG_PGSTE |
| 137 | unsigned long address; |
| 138 | unsigned long nkey; |
| 139 | |
| 140 | if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) |
| 141 | return; |
| 142 | VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); |
| 143 | address = pte_val(entry) & PAGE_MASK; |
| 144 | /* |
| 145 | * Set page access key and fetch protection bit from pgste. |
| 146 | * The guest C/R information is still in the PGSTE, set real |
| 147 | * key C/R to 0. |
| 148 | */ |
| 149 | nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; |
| 150 | nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; |
| 151 | page_set_storage_key(address, nkey, 0); |
| 152 | #endif |
| 153 | } |
| 154 | |
| 155 | static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) |
| 156 | { |
| 157 | #ifdef CONFIG_PGSTE |
| 158 | if ((pte_val(entry) & _PAGE_PRESENT) && |
| 159 | (pte_val(entry) & _PAGE_WRITE) && |
| 160 | !(pte_val(entry) & _PAGE_INVALID)) { |
| 161 | if (!MACHINE_HAS_ESOP) { |
| 162 | /* |
| 163 | * Without enhanced suppression-on-protection force |
| 164 | * the dirty bit on for all writable ptes. |
| 165 | */ |
| 166 | pte_val(entry) |= _PAGE_DIRTY; |
| 167 | pte_val(entry) &= ~_PAGE_PROTECT; |
| 168 | } |
| 169 | if (!(pte_val(entry) & _PAGE_PROTECT)) |
| 170 | /* This pte allows write access, set user-dirty */ |
| 171 | pgste_val(pgste) |= PGSTE_UC_BIT; |
| 172 | } |
| 173 | #endif |
| 174 | *ptep = entry; |
| 175 | return pgste; |
| 176 | } |
| 177 | |
Martin Schwidefsky | b2d73b2 | 2016-03-08 11:54:42 +0100 | [diff] [blame] | 178 | static inline pgste_t pgste_pte_notify(struct mm_struct *mm, |
| 179 | unsigned long addr, |
| 180 | pte_t *ptep, pgste_t pgste) |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 181 | { |
| 182 | #ifdef CONFIG_PGSTE |
Martin Schwidefsky | 4be130a | 2016-03-08 12:12:18 +0100 | [diff] [blame] | 183 | unsigned long bits; |
| 184 | |
| 185 | bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT); |
| 186 | if (bits) { |
| 187 | pgste_val(pgste) ^= bits; |
| 188 | ptep_notify(mm, addr, ptep, bits); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 189 | } |
| 190 | #endif |
| 191 | return pgste; |
| 192 | } |
| 193 | |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 194 | static inline pgste_t ptep_xchg_start(struct mm_struct *mm, |
| 195 | unsigned long addr, pte_t *ptep) |
| 196 | { |
| 197 | pgste_t pgste = __pgste(0); |
| 198 | |
| 199 | if (mm_has_pgste(mm)) { |
| 200 | pgste = pgste_get_lock(ptep); |
Martin Schwidefsky | b2d73b2 | 2016-03-08 11:54:42 +0100 | [diff] [blame] | 201 | pgste = pgste_pte_notify(mm, addr, ptep, pgste); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 202 | } |
| 203 | return pgste; |
| 204 | } |
| 205 | |
Christian Borntraeger | 0d6da87 | 2017-01-23 22:59:44 +0100 | [diff] [blame] | 206 | static inline pte_t ptep_xchg_commit(struct mm_struct *mm, |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 207 | unsigned long addr, pte_t *ptep, |
| 208 | pgste_t pgste, pte_t old, pte_t new) |
| 209 | { |
| 210 | if (mm_has_pgste(mm)) { |
| 211 | if (pte_val(old) & _PAGE_INVALID) |
| 212 | pgste_set_key(ptep, pgste, new, mm); |
| 213 | if (pte_val(new) & _PAGE_INVALID) { |
| 214 | pgste = pgste_update_all(old, pgste, mm); |
| 215 | if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == |
| 216 | _PGSTE_GPS_USAGE_UNUSED) |
| 217 | pte_val(old) |= _PAGE_UNUSED; |
| 218 | } |
| 219 | pgste = pgste_set_pte(ptep, pgste, new); |
| 220 | pgste_set_unlock(ptep, pgste); |
| 221 | } else { |
| 222 | *ptep = new; |
| 223 | } |
Christian Borntraeger | 0d6da87 | 2017-01-23 22:59:44 +0100 | [diff] [blame] | 224 | return old; |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, |
| 228 | pte_t *ptep, pte_t new) |
| 229 | { |
| 230 | pgste_t pgste; |
| 231 | pte_t old; |
| 232 | |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 233 | preempt_disable(); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 234 | pgste = ptep_xchg_start(mm, addr, ptep); |
| 235 | old = ptep_flush_direct(mm, addr, ptep); |
Christian Borntraeger | 0d6da87 | 2017-01-23 22:59:44 +0100 | [diff] [blame] | 236 | old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 237 | preempt_enable(); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 238 | return old; |
| 239 | } |
| 240 | EXPORT_SYMBOL(ptep_xchg_direct); |
| 241 | |
| 242 | pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr, |
| 243 | pte_t *ptep, pte_t new) |
| 244 | { |
| 245 | pgste_t pgste; |
| 246 | pte_t old; |
| 247 | |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 248 | preempt_disable(); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 249 | pgste = ptep_xchg_start(mm, addr, ptep); |
| 250 | old = ptep_flush_lazy(mm, addr, ptep); |
Christian Borntraeger | 0d6da87 | 2017-01-23 22:59:44 +0100 | [diff] [blame] | 251 | old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new); |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 252 | preempt_enable(); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 253 | return old; |
| 254 | } |
| 255 | EXPORT_SYMBOL(ptep_xchg_lazy); |
| 256 | |
| 257 | pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, |
| 258 | pte_t *ptep) |
| 259 | { |
| 260 | pgste_t pgste; |
| 261 | pte_t old; |
| 262 | |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 263 | preempt_disable(); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 264 | pgste = ptep_xchg_start(mm, addr, ptep); |
| 265 | old = ptep_flush_lazy(mm, addr, ptep); |
| 266 | if (mm_has_pgste(mm)) { |
| 267 | pgste = pgste_update_all(old, pgste, mm); |
| 268 | pgste_set(ptep, pgste); |
| 269 | } |
| 270 | return old; |
| 271 | } |
| 272 | EXPORT_SYMBOL(ptep_modify_prot_start); |
| 273 | |
| 274 | void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, |
| 275 | pte_t *ptep, pte_t pte) |
| 276 | { |
| 277 | pgste_t pgste; |
| 278 | |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 279 | if (!MACHINE_HAS_NX) |
| 280 | pte_val(pte) &= ~_PAGE_NOEXEC; |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 281 | if (mm_has_pgste(mm)) { |
| 282 | pgste = pgste_get(ptep); |
| 283 | pgste_set_key(ptep, pgste, pte, mm); |
| 284 | pgste = pgste_set_pte(ptep, pgste, pte); |
| 285 | pgste_set_unlock(ptep, pgste); |
| 286 | } else { |
| 287 | *ptep = pte; |
| 288 | } |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 289 | preempt_enable(); |
Martin Schwidefsky | ebde765 | 2016-03-08 11:08:09 +0100 | [diff] [blame] | 290 | } |
| 291 | EXPORT_SYMBOL(ptep_modify_prot_commit); |
| 292 | |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 293 | static inline pmd_t pmdp_flush_direct(struct mm_struct *mm, |
| 294 | unsigned long addr, pmd_t *pmdp) |
| 295 | { |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 296 | pmd_t old; |
| 297 | |
| 298 | old = *pmdp; |
| 299 | if (pmd_val(old) & _SEGMENT_ENTRY_INVALID) |
| 300 | return old; |
| 301 | if (!MACHINE_HAS_IDTE) { |
| 302 | __pmdp_csp(pmdp); |
| 303 | return old; |
| 304 | } |
Martin Schwidefsky | 64f31d5 | 2016-05-25 09:45:26 +0200 | [diff] [blame] | 305 | atomic_inc(&mm->context.flush_count); |
| 306 | if (MACHINE_HAS_TLB_LC && |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 307 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) |
Martin Schwidefsky | 47e4d85 | 2016-06-14 12:41:35 +0200 | [diff] [blame] | 308 | __pmdp_idte(addr, pmdp, IDTE_LOCAL); |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 309 | else |
Martin Schwidefsky | 47e4d85 | 2016-06-14 12:41:35 +0200 | [diff] [blame] | 310 | __pmdp_idte(addr, pmdp, IDTE_GLOBAL); |
Martin Schwidefsky | 64f31d5 | 2016-05-25 09:45:26 +0200 | [diff] [blame] | 311 | atomic_dec(&mm->context.flush_count); |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 312 | return old; |
| 313 | } |
| 314 | |
| 315 | static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm, |
| 316 | unsigned long addr, pmd_t *pmdp) |
| 317 | { |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 318 | pmd_t old; |
| 319 | |
| 320 | old = *pmdp; |
| 321 | if (pmd_val(old) & _SEGMENT_ENTRY_INVALID) |
| 322 | return old; |
Martin Schwidefsky | 64f31d5 | 2016-05-25 09:45:26 +0200 | [diff] [blame] | 323 | atomic_inc(&mm->context.flush_count); |
| 324 | if (cpumask_equal(&mm->context.cpu_attach_mask, |
| 325 | cpumask_of(smp_processor_id()))) { |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 326 | pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; |
| 327 | mm->context.flush_mm = 1; |
| 328 | } else if (MACHINE_HAS_IDTE) |
Martin Schwidefsky | 47e4d85 | 2016-06-14 12:41:35 +0200 | [diff] [blame] | 329 | __pmdp_idte(addr, pmdp, IDTE_GLOBAL); |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 330 | else |
| 331 | __pmdp_csp(pmdp); |
Martin Schwidefsky | 64f31d5 | 2016-05-25 09:45:26 +0200 | [diff] [blame] | 332 | atomic_dec(&mm->context.flush_count); |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 333 | return old; |
| 334 | } |
| 335 | |
| 336 | pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, |
| 337 | pmd_t *pmdp, pmd_t new) |
| 338 | { |
| 339 | pmd_t old; |
| 340 | |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 341 | preempt_disable(); |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 342 | old = pmdp_flush_direct(mm, addr, pmdp); |
| 343 | *pmdp = new; |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 344 | preempt_enable(); |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 345 | return old; |
| 346 | } |
| 347 | EXPORT_SYMBOL(pmdp_xchg_direct); |
| 348 | |
| 349 | pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr, |
| 350 | pmd_t *pmdp, pmd_t new) |
| 351 | { |
| 352 | pmd_t old; |
| 353 | |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 354 | preempt_disable(); |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 355 | old = pmdp_flush_lazy(mm, addr, pmdp); |
| 356 | *pmdp = new; |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 357 | preempt_enable(); |
Martin Schwidefsky | 227be79 | 2016-03-08 11:09:25 +0100 | [diff] [blame] | 358 | return old; |
| 359 | } |
| 360 | EXPORT_SYMBOL(pmdp_xchg_lazy); |
| 361 | |
Gerald Schaefer | d08de8e | 2016-07-04 14:47:01 +0200 | [diff] [blame] | 362 | static inline pud_t pudp_flush_direct(struct mm_struct *mm, |
| 363 | unsigned long addr, pud_t *pudp) |
| 364 | { |
| 365 | pud_t old; |
| 366 | |
| 367 | old = *pudp; |
| 368 | if (pud_val(old) & _REGION_ENTRY_INVALID) |
| 369 | return old; |
| 370 | if (!MACHINE_HAS_IDTE) { |
| 371 | /* |
| 372 | * Invalid bit position is the same for pmd and pud, so we can |
| 373 | * re-use _pmd_csp() here |
| 374 | */ |
| 375 | __pmdp_csp((pmd_t *) pudp); |
| 376 | return old; |
| 377 | } |
| 378 | atomic_inc(&mm->context.flush_count); |
| 379 | if (MACHINE_HAS_TLB_LC && |
| 380 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) |
Martin Schwidefsky | 47e4d85 | 2016-06-14 12:41:35 +0200 | [diff] [blame] | 381 | __pudp_idte(addr, pudp, IDTE_LOCAL); |
Gerald Schaefer | d08de8e | 2016-07-04 14:47:01 +0200 | [diff] [blame] | 382 | else |
Martin Schwidefsky | 47e4d85 | 2016-06-14 12:41:35 +0200 | [diff] [blame] | 383 | __pudp_idte(addr, pudp, IDTE_GLOBAL); |
Gerald Schaefer | d08de8e | 2016-07-04 14:47:01 +0200 | [diff] [blame] | 384 | atomic_dec(&mm->context.flush_count); |
| 385 | return old; |
| 386 | } |
| 387 | |
| 388 | pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr, |
| 389 | pud_t *pudp, pud_t new) |
| 390 | { |
| 391 | pud_t old; |
| 392 | |
| 393 | preempt_disable(); |
| 394 | old = pudp_flush_direct(mm, addr, pudp); |
| 395 | *pudp = new; |
| 396 | preempt_enable(); |
| 397 | return old; |
| 398 | } |
| 399 | EXPORT_SYMBOL(pudp_xchg_direct); |
| 400 | |
Gerald Schaefer | 75077af | 2012-10-08 16:30:15 -0700 | [diff] [blame] | 401 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 402 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 403 | pgtable_t pgtable) |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 404 | { |
| 405 | struct list_head *lh = (struct list_head *) pgtable; |
| 406 | |
Martin Schwidefsky | ec66ad6 | 2014-02-12 14:16:18 +0100 | [diff] [blame] | 407 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 408 | |
| 409 | /* FIFO */ |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 410 | if (!pmd_huge_pte(mm, pmdp)) |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 411 | INIT_LIST_HEAD(lh); |
| 412 | else |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 413 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); |
| 414 | pmd_huge_pte(mm, pmdp) = pgtable; |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 415 | } |
| 416 | |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 417 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 418 | { |
| 419 | struct list_head *lh; |
| 420 | pgtable_t pgtable; |
| 421 | pte_t *ptep; |
| 422 | |
Martin Schwidefsky | ec66ad6 | 2014-02-12 14:16:18 +0100 | [diff] [blame] | 423 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 424 | |
| 425 | /* FIFO */ |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 426 | pgtable = pmd_huge_pte(mm, pmdp); |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 427 | lh = (struct list_head *) pgtable; |
| 428 | if (list_empty(lh)) |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 429 | pmd_huge_pte(mm, pmdp) = NULL; |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 430 | else { |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 431 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 432 | list_del(lh); |
| 433 | } |
| 434 | ptep = (pte_t *) pgtable; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 435 | pte_val(*ptep) = _PAGE_INVALID; |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 436 | ptep++; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 437 | pte_val(*ptep) = _PAGE_INVALID; |
Gerald Schaefer | 9501d09 | 2012-10-08 16:30:18 -0700 | [diff] [blame] | 438 | return pgtable; |
| 439 | } |
Gerald Schaefer | 75077af | 2012-10-08 16:30:15 -0700 | [diff] [blame] | 440 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 441 | |
| 442 | #ifdef CONFIG_PGSTE |
| 443 | void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 444 | pte_t *ptep, pte_t entry) |
| 445 | { |
| 446 | pgste_t pgste; |
| 447 | |
| 448 | /* the mm_has_pgste() check is done in set_pte_at() */ |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 449 | preempt_disable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 450 | pgste = pgste_get_lock(ptep); |
| 451 | pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; |
| 452 | pgste_set_key(ptep, pgste, entry, mm); |
| 453 | pgste = pgste_set_pte(ptep, pgste, entry); |
| 454 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 455 | preempt_enable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 456 | } |
| 457 | |
| 458 | void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 459 | { |
| 460 | pgste_t pgste; |
| 461 | |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 462 | preempt_disable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 463 | pgste = pgste_get_lock(ptep); |
| 464 | pgste_val(pgste) |= PGSTE_IN_BIT; |
| 465 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 466 | preempt_enable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 467 | } |
| 468 | |
Martin Schwidefsky | b2d73b2 | 2016-03-08 11:54:42 +0100 | [diff] [blame] | 469 | /** |
| 470 | * ptep_force_prot - change access rights of a locked pte |
| 471 | * @mm: pointer to the process mm_struct |
| 472 | * @addr: virtual address in the guest address space |
| 473 | * @ptep: pointer to the page table entry |
| 474 | * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE |
Martin Schwidefsky | 4be130a | 2016-03-08 12:12:18 +0100 | [diff] [blame] | 475 | * @bit: pgste bit to set (e.g. for notification) |
Martin Schwidefsky | b2d73b2 | 2016-03-08 11:54:42 +0100 | [diff] [blame] | 476 | * |
| 477 | * Returns 0 if the access rights were changed and -EAGAIN if the current |
| 478 | * and requested access rights are incompatible. |
| 479 | */ |
| 480 | int ptep_force_prot(struct mm_struct *mm, unsigned long addr, |
Martin Schwidefsky | 4be130a | 2016-03-08 12:12:18 +0100 | [diff] [blame] | 481 | pte_t *ptep, int prot, unsigned long bit) |
Martin Schwidefsky | b2d73b2 | 2016-03-08 11:54:42 +0100 | [diff] [blame] | 482 | { |
| 483 | pte_t entry; |
| 484 | pgste_t pgste; |
| 485 | int pte_i, pte_p; |
| 486 | |
| 487 | pgste = pgste_get_lock(ptep); |
| 488 | entry = *ptep; |
| 489 | /* Check pte entry after all locks have been acquired */ |
| 490 | pte_i = pte_val(entry) & _PAGE_INVALID; |
| 491 | pte_p = pte_val(entry) & _PAGE_PROTECT; |
| 492 | if ((pte_i && (prot != PROT_NONE)) || |
| 493 | (pte_p && (prot & PROT_WRITE))) { |
| 494 | pgste_set_unlock(ptep, pgste); |
| 495 | return -EAGAIN; |
| 496 | } |
Martin Schwidefsky | 4be130a | 2016-03-08 12:12:18 +0100 | [diff] [blame] | 497 | /* Change access rights and set pgste bit */ |
Martin Schwidefsky | b2d73b2 | 2016-03-08 11:54:42 +0100 | [diff] [blame] | 498 | if (prot == PROT_NONE && !pte_i) { |
| 499 | ptep_flush_direct(mm, addr, ptep); |
| 500 | pgste = pgste_update_all(entry, pgste, mm); |
| 501 | pte_val(entry) |= _PAGE_INVALID; |
| 502 | } |
| 503 | if (prot == PROT_READ && !pte_p) { |
| 504 | ptep_flush_direct(mm, addr, ptep); |
| 505 | pte_val(entry) &= ~_PAGE_INVALID; |
| 506 | pte_val(entry) |= _PAGE_PROTECT; |
| 507 | } |
Martin Schwidefsky | 4be130a | 2016-03-08 12:12:18 +0100 | [diff] [blame] | 508 | pgste_val(pgste) |= bit; |
Martin Schwidefsky | b2d73b2 | 2016-03-08 11:54:42 +0100 | [diff] [blame] | 509 | pgste = pgste_set_pte(ptep, pgste, entry); |
| 510 | pgste_set_unlock(ptep, pgste); |
| 511 | return 0; |
| 512 | } |
| 513 | |
Martin Schwidefsky | 4be130a | 2016-03-08 12:12:18 +0100 | [diff] [blame] | 514 | int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, |
David Hildenbrand | a9d23e7 | 2016-03-08 12:21:41 +0100 | [diff] [blame] | 515 | pte_t *sptep, pte_t *tptep, pte_t pte) |
Martin Schwidefsky | 4be130a | 2016-03-08 12:12:18 +0100 | [diff] [blame] | 516 | { |
| 517 | pgste_t spgste, tpgste; |
| 518 | pte_t spte, tpte; |
| 519 | int rc = -EAGAIN; |
| 520 | |
David Hildenbrand | a9d23e7 | 2016-03-08 12:21:41 +0100 | [diff] [blame] | 521 | if (!(pte_val(*tptep) & _PAGE_INVALID)) |
| 522 | return 0; /* already shadowed */ |
Martin Schwidefsky | 4be130a | 2016-03-08 12:12:18 +0100 | [diff] [blame] | 523 | spgste = pgste_get_lock(sptep); |
| 524 | spte = *sptep; |
| 525 | if (!(pte_val(spte) & _PAGE_INVALID) && |
David Hildenbrand | a9d23e7 | 2016-03-08 12:21:41 +0100 | [diff] [blame] | 526 | !((pte_val(spte) & _PAGE_PROTECT) && |
| 527 | !(pte_val(pte) & _PAGE_PROTECT))) { |
Martin Schwidefsky | 4be130a | 2016-03-08 12:12:18 +0100 | [diff] [blame] | 528 | pgste_val(spgste) |= PGSTE_VSIE_BIT; |
| 529 | tpgste = pgste_get_lock(tptep); |
| 530 | pte_val(tpte) = (pte_val(spte) & PAGE_MASK) | |
David Hildenbrand | a9d23e7 | 2016-03-08 12:21:41 +0100 | [diff] [blame] | 531 | (pte_val(pte) & _PAGE_PROTECT); |
Martin Schwidefsky | 4be130a | 2016-03-08 12:12:18 +0100 | [diff] [blame] | 532 | /* don't touch the storage key - it belongs to parent pgste */ |
| 533 | tpgste = pgste_set_pte(tptep, tpgste, tpte); |
| 534 | pgste_set_unlock(tptep, tpgste); |
David Hildenbrand | a9d23e7 | 2016-03-08 12:21:41 +0100 | [diff] [blame] | 535 | rc = 1; |
Martin Schwidefsky | 4be130a | 2016-03-08 12:12:18 +0100 | [diff] [blame] | 536 | } |
| 537 | pgste_set_unlock(sptep, spgste); |
| 538 | return rc; |
| 539 | } |
| 540 | |
| 541 | void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep) |
| 542 | { |
| 543 | pgste_t pgste; |
| 544 | |
| 545 | pgste = pgste_get_lock(ptep); |
| 546 | /* notifier is called by the caller */ |
| 547 | ptep_flush_direct(mm, saddr, ptep); |
| 548 | /* don't touch the storage key - it belongs to parent pgste */ |
| 549 | pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID)); |
| 550 | pgste_set_unlock(ptep, pgste); |
| 551 | } |
| 552 | |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 553 | static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) |
| 554 | { |
| 555 | if (!non_swap_entry(entry)) |
| 556 | dec_mm_counter(mm, MM_SWAPENTS); |
| 557 | else if (is_migration_entry(entry)) { |
| 558 | struct page *page = migration_entry_to_page(entry); |
| 559 | |
| 560 | dec_mm_counter(mm, mm_counter(page)); |
| 561 | } |
| 562 | free_swap_and_cache(entry); |
| 563 | } |
| 564 | |
| 565 | void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, |
| 566 | pte_t *ptep, int reset) |
| 567 | { |
| 568 | unsigned long pgstev; |
| 569 | pgste_t pgste; |
| 570 | pte_t pte; |
| 571 | |
| 572 | /* Zap unused and logically-zero pages */ |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 573 | preempt_disable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 574 | pgste = pgste_get_lock(ptep); |
| 575 | pgstev = pgste_val(pgste); |
| 576 | pte = *ptep; |
Christian Borntraeger | 1c343f7 | 2016-06-13 13:14:56 +0200 | [diff] [blame] | 577 | if (!reset && pte_swap(pte) && |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 578 | ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED || |
| 579 | (pgstev & _PGSTE_GPS_ZERO))) { |
| 580 | ptep_zap_swap_entry(mm, pte_to_swp_entry(pte)); |
| 581 | pte_clear(mm, addr, ptep); |
| 582 | } |
| 583 | if (reset) |
| 584 | pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; |
| 585 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 586 | preempt_enable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 587 | } |
| 588 | |
| 589 | void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 590 | { |
| 591 | unsigned long ptev; |
| 592 | pgste_t pgste; |
| 593 | |
Christian Borntraeger | 97ca7bf | 2017-07-06 10:12:58 +0200 | [diff] [blame] | 594 | /* Clear storage key ACC and F, but set R/C */ |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 595 | preempt_disable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 596 | pgste = pgste_get_lock(ptep); |
Christian Borntraeger | 97ca7bf | 2017-07-06 10:12:58 +0200 | [diff] [blame] | 597 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); |
| 598 | pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT; |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 599 | ptev = pte_val(*ptep); |
| 600 | if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) |
| 601 | page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); |
| 602 | pgste_set_unlock(ptep, pgste); |
Martin Schwidefsky | a980940 | 2016-06-06 10:30:45 +0200 | [diff] [blame] | 603 | preempt_enable(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 604 | } |
| 605 | |
| 606 | /* |
| 607 | * Test and reset if a guest page is dirty |
| 608 | */ |
| 609 | bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) |
| 610 | { |
| 611 | spinlock_t *ptl; |
Janosch Frank | 2e4d880 | 2017-03-02 15:23:42 +0100 | [diff] [blame] | 612 | pgd_t *pgd; |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 613 | p4d_t *p4d; |
Janosch Frank | 2e4d880 | 2017-03-02 15:23:42 +0100 | [diff] [blame] | 614 | pud_t *pud; |
| 615 | pmd_t *pmd; |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 616 | pgste_t pgste; |
| 617 | pte_t *ptep; |
| 618 | pte_t pte; |
| 619 | bool dirty; |
| 620 | |
Janosch Frank | 2e4d880 | 2017-03-02 15:23:42 +0100 | [diff] [blame] | 621 | pgd = pgd_offset(mm, addr); |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 622 | p4d = p4d_alloc(mm, pgd, addr); |
| 623 | if (!p4d) |
| 624 | return false; |
| 625 | pud = pud_alloc(mm, p4d, addr); |
Janosch Frank | 2e4d880 | 2017-03-02 15:23:42 +0100 | [diff] [blame] | 626 | if (!pud) |
| 627 | return false; |
| 628 | pmd = pmd_alloc(mm, pud, addr); |
| 629 | if (!pmd) |
| 630 | return false; |
| 631 | /* We can't run guests backed by huge pages, but userspace can |
| 632 | * still set them up and then try to migrate them without any |
| 633 | * migration support. |
| 634 | */ |
| 635 | if (pmd_large(*pmd)) |
| 636 | return true; |
| 637 | |
| 638 | ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 639 | if (unlikely(!ptep)) |
| 640 | return false; |
| 641 | |
| 642 | pgste = pgste_get_lock(ptep); |
| 643 | dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); |
| 644 | pgste_val(pgste) &= ~PGSTE_UC_BIT; |
| 645 | pte = *ptep; |
| 646 | if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { |
Martin Schwidefsky | b2d73b2 | 2016-03-08 11:54:42 +0100 | [diff] [blame] | 647 | pgste = pgste_pte_notify(mm, addr, ptep, pgste); |
Martin Schwidefsky | 34eeaf3 | 2016-06-14 12:38:40 +0200 | [diff] [blame] | 648 | __ptep_ipte(addr, ptep, IPTE_GLOBAL); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 649 | if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) |
| 650 | pte_val(pte) |= _PAGE_PROTECT; |
| 651 | else |
| 652 | pte_val(pte) |= _PAGE_INVALID; |
| 653 | *ptep = pte; |
| 654 | } |
| 655 | pgste_set_unlock(ptep, pgste); |
| 656 | |
| 657 | spin_unlock(ptl); |
| 658 | return dirty; |
| 659 | } |
| 660 | EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty); |
| 661 | |
| 662 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
| 663 | unsigned char key, bool nq) |
| 664 | { |
| 665 | unsigned long keyul; |
| 666 | spinlock_t *ptl; |
| 667 | pgste_t old, new; |
| 668 | pte_t *ptep; |
| 669 | |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 670 | ptep = get_locked_pte(mm, addr, &ptl); |
Martin Schwidefsky | d3ed1ce | 2016-03-08 11:53:35 +0100 | [diff] [blame] | 671 | if (unlikely(!ptep)) |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 672 | return -EFAULT; |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 673 | |
| 674 | new = old = pgste_get_lock(ptep); |
| 675 | pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | |
| 676 | PGSTE_ACC_BITS | PGSTE_FP_BIT); |
| 677 | keyul = (unsigned long) key; |
| 678 | pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; |
| 679 | pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; |
| 680 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { |
| 681 | unsigned long address, bits, skey; |
| 682 | |
| 683 | address = pte_val(*ptep) & PAGE_MASK; |
| 684 | skey = (unsigned long) page_get_storage_key(address); |
| 685 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
| 686 | skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT); |
| 687 | /* Set storage key ACC and FP */ |
| 688 | page_set_storage_key(address, skey, !nq); |
| 689 | /* Merge host changed & referenced into pgste */ |
| 690 | pgste_val(new) |= bits << 52; |
| 691 | } |
| 692 | /* changing the guest storage key is considered a change of the page */ |
| 693 | if ((pgste_val(new) ^ pgste_val(old)) & |
| 694 | (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) |
| 695 | pgste_val(new) |= PGSTE_UC_BIT; |
| 696 | |
| 697 | pgste_set_unlock(ptep, new); |
| 698 | pte_unmap_unlock(ptep, ptl); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 699 | return 0; |
| 700 | } |
| 701 | EXPORT_SYMBOL(set_guest_storage_key); |
| 702 | |
David Hildenbrand | 1824c72 | 2016-05-10 09:43:11 +0200 | [diff] [blame] | 703 | /** |
| 704 | * Conditionally set a guest storage key (handling csske). |
| 705 | * oldkey will be updated when either mr or mc is set and a pointer is given. |
| 706 | * |
| 707 | * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest |
| 708 | * storage key was updated and -EFAULT on access errors. |
| 709 | */ |
| 710 | int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
| 711 | unsigned char key, unsigned char *oldkey, |
| 712 | bool nq, bool mr, bool mc) |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 713 | { |
David Hildenbrand | 1824c72 | 2016-05-10 09:43:11 +0200 | [diff] [blame] | 714 | unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT; |
| 715 | int rc; |
| 716 | |
| 717 | /* we can drop the pgste lock between getting and setting the key */ |
| 718 | if (mr | mc) { |
| 719 | rc = get_guest_storage_key(current->mm, addr, &tmp); |
| 720 | if (rc) |
| 721 | return rc; |
| 722 | if (oldkey) |
| 723 | *oldkey = tmp; |
| 724 | if (!mr) |
| 725 | mask |= _PAGE_REFERENCED; |
| 726 | if (!mc) |
| 727 | mask |= _PAGE_CHANGED; |
| 728 | if (!((tmp ^ key) & mask)) |
| 729 | return 0; |
| 730 | } |
| 731 | rc = set_guest_storage_key(current->mm, addr, key, nq); |
| 732 | return rc < 0 ? rc : 1; |
| 733 | } |
| 734 | EXPORT_SYMBOL(cond_set_guest_storage_key); |
| 735 | |
David Hildenbrand | a7e19ab | 2016-05-10 09:50:21 +0200 | [diff] [blame] | 736 | /** |
| 737 | * Reset a guest reference bit (rrbe), returning the reference and changed bit. |
| 738 | * |
| 739 | * Returns < 0 in case of error, otherwise the cc to be reported to the guest. |
| 740 | */ |
| 741 | int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) |
| 742 | { |
| 743 | spinlock_t *ptl; |
| 744 | pgste_t old, new; |
| 745 | pte_t *ptep; |
| 746 | int cc = 0; |
| 747 | |
| 748 | ptep = get_locked_pte(mm, addr, &ptl); |
| 749 | if (unlikely(!ptep)) |
| 750 | return -EFAULT; |
| 751 | |
| 752 | new = old = pgste_get_lock(ptep); |
| 753 | /* Reset guest reference bit only */ |
| 754 | pgste_val(new) &= ~PGSTE_GR_BIT; |
| 755 | |
| 756 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { |
| 757 | cc = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); |
| 758 | /* Merge real referenced bit into host-set */ |
| 759 | pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT; |
| 760 | } |
| 761 | /* Reflect guest's logical view, not physical */ |
| 762 | cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49; |
| 763 | /* Changing the guest storage key is considered a change of the page */ |
| 764 | if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT) |
| 765 | pgste_val(new) |= PGSTE_UC_BIT; |
| 766 | |
| 767 | pgste_set_unlock(ptep, new); |
| 768 | pte_unmap_unlock(ptep, ptl); |
Janosch Frank | 4bead2a | 2017-01-27 10:23:59 +0100 | [diff] [blame] | 769 | return cc; |
David Hildenbrand | a7e19ab | 2016-05-10 09:50:21 +0200 | [diff] [blame] | 770 | } |
| 771 | EXPORT_SYMBOL(reset_guest_reference_bit); |
| 772 | |
David Hildenbrand | 154c8c1 | 2016-05-09 11:22:34 +0200 | [diff] [blame] | 773 | int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
| 774 | unsigned char *key) |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 775 | { |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 776 | spinlock_t *ptl; |
| 777 | pgste_t pgste; |
| 778 | pte_t *ptep; |
| 779 | |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 780 | ptep = get_locked_pte(mm, addr, &ptl); |
Martin Schwidefsky | d3ed1ce | 2016-03-08 11:53:35 +0100 | [diff] [blame] | 781 | if (unlikely(!ptep)) |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 782 | return -EFAULT; |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 783 | |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 784 | pgste = pgste_get_lock(ptep); |
David Hildenbrand | 154c8c1 | 2016-05-09 11:22:34 +0200 | [diff] [blame] | 785 | *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; |
David Hildenbrand | 8d6037a | 2016-05-09 11:15:32 +0200 | [diff] [blame] | 786 | if (!(pte_val(*ptep) & _PAGE_INVALID)) |
David Hildenbrand | 154c8c1 | 2016-05-09 11:22:34 +0200 | [diff] [blame] | 787 | *key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK); |
David Hildenbrand | 8d6037a | 2016-05-09 11:15:32 +0200 | [diff] [blame] | 788 | /* Reflect guest's logical view, not physical */ |
David Hildenbrand | 154c8c1 | 2016-05-09 11:22:34 +0200 | [diff] [blame] | 789 | *key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 790 | pgste_set_unlock(ptep, pgste); |
| 791 | pte_unmap_unlock(ptep, ptl); |
David Hildenbrand | 154c8c1 | 2016-05-09 11:22:34 +0200 | [diff] [blame] | 792 | return 0; |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 793 | } |
| 794 | EXPORT_SYMBOL(get_guest_storage_key); |
Claudio Imbrenda | 2d42f94 | 2017-04-20 10:03:45 +0200 | [diff] [blame] | 795 | |
| 796 | /** |
| 797 | * pgste_perform_essa - perform ESSA actions on the PGSTE. |
| 798 | * @mm: the memory context. It must have PGSTEs, no check is performed here! |
| 799 | * @hva: the host virtual address of the page whose PGSTE is to be processed |
| 800 | * @orc: the specific action to perform, see the ESSA_SET_* macros. |
| 801 | * @oldpte: the PTE will be saved there if the pointer is not NULL. |
| 802 | * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL. |
| 803 | * |
| 804 | * Return: 1 if the page is to be added to the CBRL, otherwise 0, |
| 805 | * or < 0 in case of error. -EINVAL is returned for invalid values |
| 806 | * of orc, -EFAULT for invalid addresses. |
| 807 | */ |
| 808 | int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, |
| 809 | unsigned long *oldpte, unsigned long *oldpgste) |
| 810 | { |
| 811 | unsigned long pgstev; |
| 812 | spinlock_t *ptl; |
| 813 | pgste_t pgste; |
| 814 | pte_t *ptep; |
| 815 | int res = 0; |
| 816 | |
| 817 | WARN_ON_ONCE(orc > ESSA_MAX); |
| 818 | if (unlikely(orc > ESSA_MAX)) |
| 819 | return -EINVAL; |
| 820 | ptep = get_locked_pte(mm, hva, &ptl); |
| 821 | if (unlikely(!ptep)) |
| 822 | return -EFAULT; |
| 823 | pgste = pgste_get_lock(ptep); |
| 824 | pgstev = pgste_val(pgste); |
| 825 | if (oldpte) |
| 826 | *oldpte = pte_val(*ptep); |
| 827 | if (oldpgste) |
| 828 | *oldpgste = pgstev; |
| 829 | |
| 830 | switch (orc) { |
| 831 | case ESSA_GET_STATE: |
| 832 | break; |
| 833 | case ESSA_SET_STABLE: |
| 834 | pgstev &= ~_PGSTE_GPS_USAGE_MASK; |
| 835 | pgstev |= _PGSTE_GPS_USAGE_STABLE; |
| 836 | break; |
| 837 | case ESSA_SET_UNUSED: |
| 838 | pgstev &= ~_PGSTE_GPS_USAGE_MASK; |
| 839 | pgstev |= _PGSTE_GPS_USAGE_UNUSED; |
| 840 | if (pte_val(*ptep) & _PAGE_INVALID) |
| 841 | res = 1; |
| 842 | break; |
| 843 | case ESSA_SET_VOLATILE: |
| 844 | pgstev &= ~_PGSTE_GPS_USAGE_MASK; |
| 845 | pgstev |= _PGSTE_GPS_USAGE_VOLATILE; |
| 846 | if (pte_val(*ptep) & _PAGE_INVALID) |
| 847 | res = 1; |
| 848 | break; |
| 849 | case ESSA_SET_POT_VOLATILE: |
| 850 | pgstev &= ~_PGSTE_GPS_USAGE_MASK; |
| 851 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { |
| 852 | pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE; |
| 853 | break; |
| 854 | } |
| 855 | if (pgstev & _PGSTE_GPS_ZERO) { |
| 856 | pgstev |= _PGSTE_GPS_USAGE_VOLATILE; |
| 857 | break; |
| 858 | } |
| 859 | if (!(pgstev & PGSTE_GC_BIT)) { |
| 860 | pgstev |= _PGSTE_GPS_USAGE_VOLATILE; |
| 861 | res = 1; |
| 862 | break; |
| 863 | } |
| 864 | break; |
| 865 | case ESSA_SET_STABLE_RESIDENT: |
| 866 | pgstev &= ~_PGSTE_GPS_USAGE_MASK; |
| 867 | pgstev |= _PGSTE_GPS_USAGE_STABLE; |
| 868 | /* |
| 869 | * Since the resident state can go away any time after this |
| 870 | * call, we will not make this page resident. We can revisit |
| 871 | * this decision if a guest will ever start using this. |
| 872 | */ |
| 873 | break; |
| 874 | case ESSA_SET_STABLE_IF_RESIDENT: |
| 875 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { |
| 876 | pgstev &= ~_PGSTE_GPS_USAGE_MASK; |
| 877 | pgstev |= _PGSTE_GPS_USAGE_STABLE; |
| 878 | } |
| 879 | break; |
| 880 | default: |
| 881 | /* we should never get here! */ |
| 882 | break; |
| 883 | } |
| 884 | /* If we are discarding a page, set it to logical zero */ |
| 885 | if (res) |
| 886 | pgstev |= _PGSTE_GPS_ZERO; |
| 887 | |
| 888 | pgste_val(pgste) = pgstev; |
| 889 | pgste_set_unlock(ptep, pgste); |
| 890 | pte_unmap_unlock(ptep, ptl); |
| 891 | return res; |
| 892 | } |
| 893 | EXPORT_SYMBOL(pgste_perform_essa); |
| 894 | |
| 895 | /** |
| 896 | * set_pgste_bits - set specific PGSTE bits. |
| 897 | * @mm: the memory context. It must have PGSTEs, no check is performed here! |
| 898 | * @hva: the host virtual address of the page whose PGSTE is to be processed |
| 899 | * @bits: a bitmask representing the bits that will be touched |
| 900 | * @value: the values of the bits to be written. Only the bits in the mask |
| 901 | * will be written. |
| 902 | * |
| 903 | * Return: 0 on success, < 0 in case of error. |
| 904 | */ |
| 905 | int set_pgste_bits(struct mm_struct *mm, unsigned long hva, |
| 906 | unsigned long bits, unsigned long value) |
| 907 | { |
| 908 | spinlock_t *ptl; |
| 909 | pgste_t new; |
| 910 | pte_t *ptep; |
| 911 | |
| 912 | ptep = get_locked_pte(mm, hva, &ptl); |
| 913 | if (unlikely(!ptep)) |
| 914 | return -EFAULT; |
| 915 | new = pgste_get_lock(ptep); |
| 916 | |
| 917 | pgste_val(new) &= ~bits; |
| 918 | pgste_val(new) |= value & bits; |
| 919 | |
| 920 | pgste_set_unlock(ptep, new); |
| 921 | pte_unmap_unlock(ptep, ptl); |
| 922 | return 0; |
| 923 | } |
| 924 | EXPORT_SYMBOL(set_pgste_bits); |
| 925 | |
| 926 | /** |
| 927 | * get_pgste - get the current PGSTE for the given address. |
| 928 | * @mm: the memory context. It must have PGSTEs, no check is performed here! |
| 929 | * @hva: the host virtual address of the page whose PGSTE is to be processed |
| 930 | * @pgstep: will be written with the current PGSTE for the given address. |
| 931 | * |
| 932 | * Return: 0 on success, < 0 in case of error. |
| 933 | */ |
| 934 | int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep) |
| 935 | { |
| 936 | spinlock_t *ptl; |
| 937 | pte_t *ptep; |
| 938 | |
| 939 | ptep = get_locked_pte(mm, hva, &ptl); |
| 940 | if (unlikely(!ptep)) |
| 941 | return -EFAULT; |
| 942 | *pgstep = pgste_val(pgste_get(ptep)); |
| 943 | pte_unmap_unlock(ptep, ptl); |
| 944 | return 0; |
| 945 | } |
| 946 | EXPORT_SYMBOL(get_pgste); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 947 | #endif |