blob: 67111ccbb5e0445b5c7be038551266cae4a60a1d [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * Copyright IBM Corp. 2007, 2011
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020013#include <linux/spinlock.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020014#include <linux/rcupdate.h>
Martin Schwidefskye5992f22011-07-24 10:48:20 +020015#include <linux/slab.h>
Konstantin Weitzb31288f2013-04-17 17:36:29 +020016#include <linux/swapops.h>
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +020017#include <linux/sysctl.h>
Dominik Dingel3ac8e382014-10-23 12:09:17 +020018#include <linux/ksm.h>
19#include <linux/mman.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020020
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020021#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
23#include <asm/tlb.h>
24#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010025#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020026
Martin Schwidefskyebde7652016-03-08 11:08:09 +010027static inline pte_t ptep_flush_direct(struct mm_struct *mm,
28 unsigned long addr, pte_t *ptep)
29{
30 int active, count;
31 pte_t old;
32
33 old = *ptep;
34 if (unlikely(pte_val(old) & _PAGE_INVALID))
35 return old;
36 active = (mm == current->active_mm) ? 1 : 0;
37 count = atomic_add_return(0x10000, &mm->context.attach_count);
38 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
39 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
40 __ptep_ipte_local(addr, ptep);
41 else
42 __ptep_ipte(addr, ptep);
43 atomic_sub(0x10000, &mm->context.attach_count);
44 return old;
45}
46
47static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
48 unsigned long addr, pte_t *ptep)
49{
50 int active, count;
51 pte_t old;
52
53 old = *ptep;
54 if (unlikely(pte_val(old) & _PAGE_INVALID))
55 return old;
56 active = (mm == current->active_mm) ? 1 : 0;
57 count = atomic_add_return(0x10000, &mm->context.attach_count);
58 if ((count & 0xffff) <= active) {
59 pte_val(*ptep) |= _PAGE_INVALID;
60 mm->context.flush_mm = 1;
61 } else
62 __ptep_ipte(addr, ptep);
63 atomic_sub(0x10000, &mm->context.attach_count);
64 return old;
65}
66
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010067static inline pgste_t pgste_get_lock(pte_t *ptep)
68{
69 unsigned long new = 0;
70#ifdef CONFIG_PGSTE
71 unsigned long old;
72
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010073 asm(
74 " lg %0,%2\n"
75 "0: lgr %1,%0\n"
76 " nihh %0,0xff7f\n" /* clear PCL bit in old */
77 " oihh %1,0x0080\n" /* set PCL bit in new */
78 " csg %0,%1,%2\n"
79 " jl 0b\n"
80 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
81 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
82#endif
83 return __pgste(new);
84}
85
86static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
87{
88#ifdef CONFIG_PGSTE
89 asm(
90 " nihh %1,0xff7f\n" /* clear PCL bit */
91 " stg %1,%0\n"
92 : "=Q" (ptep[PTRS_PER_PTE])
93 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
94 : "cc", "memory");
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010095#endif
96}
97
98static inline pgste_t pgste_get(pte_t *ptep)
99{
100 unsigned long pgste = 0;
101#ifdef CONFIG_PGSTE
102 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
103#endif
104 return __pgste(pgste);
105}
106
107static inline void pgste_set(pte_t *ptep, pgste_t pgste)
108{
109#ifdef CONFIG_PGSTE
110 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
111#endif
112}
113
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100114static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
115 struct mm_struct *mm)
116{
117#ifdef CONFIG_PGSTE
118 unsigned long address, bits, skey;
119
120 if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID)
121 return pgste;
122 address = pte_val(pte) & PAGE_MASK;
123 skey = (unsigned long) page_get_storage_key(address);
124 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
125 /* Transfer page changed & referenced bit to guest bits in pgste */
126 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
127 /* Copy page access key and fetch protection bit to pgste */
128 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
129 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
130#endif
131 return pgste;
132
133}
134
135static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
136 struct mm_struct *mm)
137{
138#ifdef CONFIG_PGSTE
139 unsigned long address;
140 unsigned long nkey;
141
142 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
143 return;
144 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
145 address = pte_val(entry) & PAGE_MASK;
146 /*
147 * Set page access key and fetch protection bit from pgste.
148 * The guest C/R information is still in the PGSTE, set real
149 * key C/R to 0.
150 */
151 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
152 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
153 page_set_storage_key(address, nkey, 0);
154#endif
155}
156
157static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
158{
159#ifdef CONFIG_PGSTE
160 if ((pte_val(entry) & _PAGE_PRESENT) &&
161 (pte_val(entry) & _PAGE_WRITE) &&
162 !(pte_val(entry) & _PAGE_INVALID)) {
163 if (!MACHINE_HAS_ESOP) {
164 /*
165 * Without enhanced suppression-on-protection force
166 * the dirty bit on for all writable ptes.
167 */
168 pte_val(entry) |= _PAGE_DIRTY;
169 pte_val(entry) &= ~_PAGE_PROTECT;
170 }
171 if (!(pte_val(entry) & _PAGE_PROTECT))
172 /* This pte allows write access, set user-dirty */
173 pgste_val(pgste) |= PGSTE_UC_BIT;
174 }
175#endif
176 *ptep = entry;
177 return pgste;
178}
179
180static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
181 unsigned long addr,
182 pte_t *ptep, pgste_t pgste)
183{
184#ifdef CONFIG_PGSTE
185 if (pgste_val(pgste) & PGSTE_IN_BIT) {
186 pgste_val(pgste) &= ~PGSTE_IN_BIT;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100187 ptep_notify(mm, addr, ptep);
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100188 }
189#endif
190 return pgste;
191}
192
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100193static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
194 unsigned long addr, pte_t *ptep)
195{
196 pgste_t pgste = __pgste(0);
197
198 if (mm_has_pgste(mm)) {
199 pgste = pgste_get_lock(ptep);
200 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
201 }
202 return pgste;
203}
204
205static inline void ptep_xchg_commit(struct mm_struct *mm,
206 unsigned long addr, pte_t *ptep,
207 pgste_t pgste, pte_t old, pte_t new)
208{
209 if (mm_has_pgste(mm)) {
210 if (pte_val(old) & _PAGE_INVALID)
211 pgste_set_key(ptep, pgste, new, mm);
212 if (pte_val(new) & _PAGE_INVALID) {
213 pgste = pgste_update_all(old, pgste, mm);
214 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
215 _PGSTE_GPS_USAGE_UNUSED)
216 pte_val(old) |= _PAGE_UNUSED;
217 }
218 pgste = pgste_set_pte(ptep, pgste, new);
219 pgste_set_unlock(ptep, pgste);
220 } else {
221 *ptep = new;
222 }
223}
224
225pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
226 pte_t *ptep, pte_t new)
227{
228 pgste_t pgste;
229 pte_t old;
230
Martin Schwidefskya9809402016-06-06 10:30:45 +0200231 preempt_disable();
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100232 pgste = ptep_xchg_start(mm, addr, ptep);
233 old = ptep_flush_direct(mm, addr, ptep);
234 ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
Martin Schwidefskya9809402016-06-06 10:30:45 +0200235 preempt_enable();
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100236 return old;
237}
238EXPORT_SYMBOL(ptep_xchg_direct);
239
240pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
241 pte_t *ptep, pte_t new)
242{
243 pgste_t pgste;
244 pte_t old;
245
Martin Schwidefskya9809402016-06-06 10:30:45 +0200246 preempt_disable();
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100247 pgste = ptep_xchg_start(mm, addr, ptep);
248 old = ptep_flush_lazy(mm, addr, ptep);
249 ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
Martin Schwidefskya9809402016-06-06 10:30:45 +0200250 preempt_enable();
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100251 return old;
252}
253EXPORT_SYMBOL(ptep_xchg_lazy);
254
255pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
256 pte_t *ptep)
257{
258 pgste_t pgste;
259 pte_t old;
260
Martin Schwidefskya9809402016-06-06 10:30:45 +0200261 preempt_disable();
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100262 pgste = ptep_xchg_start(mm, addr, ptep);
263 old = ptep_flush_lazy(mm, addr, ptep);
264 if (mm_has_pgste(mm)) {
265 pgste = pgste_update_all(old, pgste, mm);
266 pgste_set(ptep, pgste);
267 }
268 return old;
269}
270EXPORT_SYMBOL(ptep_modify_prot_start);
271
272void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
273 pte_t *ptep, pte_t pte)
274{
275 pgste_t pgste;
276
277 if (mm_has_pgste(mm)) {
278 pgste = pgste_get(ptep);
279 pgste_set_key(ptep, pgste, pte, mm);
280 pgste = pgste_set_pte(ptep, pgste, pte);
281 pgste_set_unlock(ptep, pgste);
282 } else {
283 *ptep = pte;
284 }
Martin Schwidefskya9809402016-06-06 10:30:45 +0200285 preempt_enable();
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100286}
287EXPORT_SYMBOL(ptep_modify_prot_commit);
288
Martin Schwidefsky227be792016-03-08 11:09:25 +0100289static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
290 unsigned long addr, pmd_t *pmdp)
291{
292 int active, count;
293 pmd_t old;
294
295 old = *pmdp;
296 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
297 return old;
298 if (!MACHINE_HAS_IDTE) {
299 __pmdp_csp(pmdp);
300 return old;
301 }
302 active = (mm == current->active_mm) ? 1 : 0;
303 count = atomic_add_return(0x10000, &mm->context.attach_count);
304 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
305 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
306 __pmdp_idte_local(addr, pmdp);
307 else
308 __pmdp_idte(addr, pmdp);
309 atomic_sub(0x10000, &mm->context.attach_count);
310 return old;
311}
312
313static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
314 unsigned long addr, pmd_t *pmdp)
315{
316 int active, count;
317 pmd_t old;
318
319 old = *pmdp;
320 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
321 return old;
322 active = (mm == current->active_mm) ? 1 : 0;
323 count = atomic_add_return(0x10000, &mm->context.attach_count);
324 if ((count & 0xffff) <= active) {
325 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
326 mm->context.flush_mm = 1;
327 } else if (MACHINE_HAS_IDTE)
328 __pmdp_idte(addr, pmdp);
329 else
330 __pmdp_csp(pmdp);
331 atomic_sub(0x10000, &mm->context.attach_count);
332 return old;
333}
334
335pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
336 pmd_t *pmdp, pmd_t new)
337{
338 pmd_t old;
339
Martin Schwidefskya9809402016-06-06 10:30:45 +0200340 preempt_disable();
Martin Schwidefsky227be792016-03-08 11:09:25 +0100341 old = pmdp_flush_direct(mm, addr, pmdp);
342 *pmdp = new;
Martin Schwidefskya9809402016-06-06 10:30:45 +0200343 preempt_enable();
Martin Schwidefsky227be792016-03-08 11:09:25 +0100344 return old;
345}
346EXPORT_SYMBOL(pmdp_xchg_direct);
347
348pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
349 pmd_t *pmdp, pmd_t new)
350{
351 pmd_t old;
352
Martin Schwidefskya9809402016-06-06 10:30:45 +0200353 preempt_disable();
Martin Schwidefsky227be792016-03-08 11:09:25 +0100354 old = pmdp_flush_lazy(mm, addr, pmdp);
355 *pmdp = new;
Martin Schwidefskya9809402016-06-06 10:30:45 +0200356 preempt_enable();
Martin Schwidefsky227be792016-03-08 11:09:25 +0100357 return old;
358}
359EXPORT_SYMBOL(pmdp_xchg_lazy);
360
Gerald Schaefer75077af2012-10-08 16:30:15 -0700361#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700362void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
363 pgtable_t pgtable)
Gerald Schaefer9501d092012-10-08 16:30:18 -0700364{
365 struct list_head *lh = (struct list_head *) pgtable;
366
Martin Schwidefskyec66ad62014-02-12 14:16:18 +0100367 assert_spin_locked(pmd_lockptr(mm, pmdp));
Gerald Schaefer9501d092012-10-08 16:30:18 -0700368
369 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800370 if (!pmd_huge_pte(mm, pmdp))
Gerald Schaefer9501d092012-10-08 16:30:18 -0700371 INIT_LIST_HEAD(lh);
372 else
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800373 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
374 pmd_huge_pte(mm, pmdp) = pgtable;
Gerald Schaefer9501d092012-10-08 16:30:18 -0700375}
376
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700377pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
Gerald Schaefer9501d092012-10-08 16:30:18 -0700378{
379 struct list_head *lh;
380 pgtable_t pgtable;
381 pte_t *ptep;
382
Martin Schwidefskyec66ad62014-02-12 14:16:18 +0100383 assert_spin_locked(pmd_lockptr(mm, pmdp));
Gerald Schaefer9501d092012-10-08 16:30:18 -0700384
385 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800386 pgtable = pmd_huge_pte(mm, pmdp);
Gerald Schaefer9501d092012-10-08 16:30:18 -0700387 lh = (struct list_head *) pgtable;
388 if (list_empty(lh))
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800389 pmd_huge_pte(mm, pmdp) = NULL;
Gerald Schaefer9501d092012-10-08 16:30:18 -0700390 else {
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800391 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
Gerald Schaefer9501d092012-10-08 16:30:18 -0700392 list_del(lh);
393 }
394 ptep = (pte_t *) pgtable;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200395 pte_val(*ptep) = _PAGE_INVALID;
Gerald Schaefer9501d092012-10-08 16:30:18 -0700396 ptep++;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200397 pte_val(*ptep) = _PAGE_INVALID;
Gerald Schaefer9501d092012-10-08 16:30:18 -0700398 return pgtable;
399}
Gerald Schaefer75077af2012-10-08 16:30:15 -0700400#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100401
402#ifdef CONFIG_PGSTE
403void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
404 pte_t *ptep, pte_t entry)
405{
406 pgste_t pgste;
407
408 /* the mm_has_pgste() check is done in set_pte_at() */
Martin Schwidefskya9809402016-06-06 10:30:45 +0200409 preempt_disable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100410 pgste = pgste_get_lock(ptep);
411 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
412 pgste_set_key(ptep, pgste, entry, mm);
413 pgste = pgste_set_pte(ptep, pgste, entry);
414 pgste_set_unlock(ptep, pgste);
Martin Schwidefskya9809402016-06-06 10:30:45 +0200415 preempt_enable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100416}
417
418void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
419{
420 pgste_t pgste;
421
Martin Schwidefskya9809402016-06-06 10:30:45 +0200422 preempt_disable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100423 pgste = pgste_get_lock(ptep);
424 pgste_val(pgste) |= PGSTE_IN_BIT;
425 pgste_set_unlock(ptep, pgste);
Martin Schwidefskya9809402016-06-06 10:30:45 +0200426 preempt_enable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100427}
428
429static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
430{
431 if (!non_swap_entry(entry))
432 dec_mm_counter(mm, MM_SWAPENTS);
433 else if (is_migration_entry(entry)) {
434 struct page *page = migration_entry_to_page(entry);
435
436 dec_mm_counter(mm, mm_counter(page));
437 }
438 free_swap_and_cache(entry);
439}
440
441void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
442 pte_t *ptep, int reset)
443{
444 unsigned long pgstev;
445 pgste_t pgste;
446 pte_t pte;
447
448 /* Zap unused and logically-zero pages */
Martin Schwidefskya9809402016-06-06 10:30:45 +0200449 preempt_disable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100450 pgste = pgste_get_lock(ptep);
451 pgstev = pgste_val(pgste);
452 pte = *ptep;
Christian Borntraeger1c343f72016-06-13 13:14:56 +0200453 if (!reset && pte_swap(pte) &&
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100454 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
455 (pgstev & _PGSTE_GPS_ZERO))) {
456 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
457 pte_clear(mm, addr, ptep);
458 }
459 if (reset)
460 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
461 pgste_set_unlock(ptep, pgste);
Martin Schwidefskya9809402016-06-06 10:30:45 +0200462 preempt_enable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100463}
464
465void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
466{
467 unsigned long ptev;
468 pgste_t pgste;
469
470 /* Clear storage key */
Martin Schwidefskya9809402016-06-06 10:30:45 +0200471 preempt_disable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100472 pgste = pgste_get_lock(ptep);
473 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
474 PGSTE_GR_BIT | PGSTE_GC_BIT);
475 ptev = pte_val(*ptep);
476 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
477 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
478 pgste_set_unlock(ptep, pgste);
Martin Schwidefskya9809402016-06-06 10:30:45 +0200479 preempt_enable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100480}
481
482/*
483 * Test and reset if a guest page is dirty
484 */
485bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
486{
487 spinlock_t *ptl;
488 pgste_t pgste;
489 pte_t *ptep;
490 pte_t pte;
491 bool dirty;
492
493 ptep = get_locked_pte(mm, addr, &ptl);
494 if (unlikely(!ptep))
495 return false;
496
497 pgste = pgste_get_lock(ptep);
498 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
499 pgste_val(pgste) &= ~PGSTE_UC_BIT;
500 pte = *ptep;
501 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
502 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
503 __ptep_ipte(addr, ptep);
504 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
505 pte_val(pte) |= _PAGE_PROTECT;
506 else
507 pte_val(pte) |= _PAGE_INVALID;
508 *ptep = pte;
509 }
510 pgste_set_unlock(ptep, pgste);
511
512 spin_unlock(ptl);
513 return dirty;
514}
515EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
516
517int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
518 unsigned char key, bool nq)
519{
520 unsigned long keyul;
521 spinlock_t *ptl;
522 pgste_t old, new;
523 pte_t *ptep;
524
525 down_read(&mm->mmap_sem);
526 ptep = get_locked_pte(mm, addr, &ptl);
527 if (unlikely(!ptep)) {
528 up_read(&mm->mmap_sem);
529 return -EFAULT;
530 }
531
532 new = old = pgste_get_lock(ptep);
533 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
534 PGSTE_ACC_BITS | PGSTE_FP_BIT);
535 keyul = (unsigned long) key;
536 pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
537 pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
538 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
539 unsigned long address, bits, skey;
540
541 address = pte_val(*ptep) & PAGE_MASK;
542 skey = (unsigned long) page_get_storage_key(address);
543 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
544 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
545 /* Set storage key ACC and FP */
546 page_set_storage_key(address, skey, !nq);
547 /* Merge host changed & referenced into pgste */
548 pgste_val(new) |= bits << 52;
549 }
550 /* changing the guest storage key is considered a change of the page */
551 if ((pgste_val(new) ^ pgste_val(old)) &
552 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
553 pgste_val(new) |= PGSTE_UC_BIT;
554
555 pgste_set_unlock(ptep, new);
556 pte_unmap_unlock(ptep, ptl);
557 up_read(&mm->mmap_sem);
558 return 0;
559}
560EXPORT_SYMBOL(set_guest_storage_key);
561
562unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
563{
564 unsigned char key;
565 spinlock_t *ptl;
566 pgste_t pgste;
567 pte_t *ptep;
568
569 down_read(&mm->mmap_sem);
570 ptep = get_locked_pte(mm, addr, &ptl);
571 if (unlikely(!ptep)) {
572 up_read(&mm->mmap_sem);
573 return -EFAULT;
574 }
575 pgste = pgste_get_lock(ptep);
576
577 if (pte_val(*ptep) & _PAGE_INVALID) {
578 key = (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
579 key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
580 key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
581 key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
582 } else {
583 key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
584
585 /* Reflect guest's logical view, not physical */
586 if (pgste_val(pgste) & PGSTE_GR_BIT)
587 key |= _PAGE_REFERENCED;
588 if (pgste_val(pgste) & PGSTE_GC_BIT)
589 key |= _PAGE_CHANGED;
590 }
591
592 pgste_set_unlock(ptep, pgste);
593 pte_unmap_unlock(ptep, ptl);
594 up_read(&mm->mmap_sem);
595 return key;
596}
597EXPORT_SYMBOL(get_guest_storage_key);
598#endif