blob: b98d1a152d461e05aad2f5b32ab85aae7e5c2692 [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * Copyright IBM Corp. 2007, 2011
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020013#include <linux/spinlock.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020014#include <linux/rcupdate.h>
Martin Schwidefskye5992f22011-07-24 10:48:20 +020015#include <linux/slab.h>
Konstantin Weitzb31288f2013-04-17 17:36:29 +020016#include <linux/swapops.h>
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +020017#include <linux/sysctl.h>
Dominik Dingel3ac8e382014-10-23 12:09:17 +020018#include <linux/ksm.h>
19#include <linux/mman.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020020
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020021#include <asm/pgtable.h>
22#include <asm/pgalloc.h>
23#include <asm/tlb.h>
24#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010025#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020026
Martin Schwidefskyebde7652016-03-08 11:08:09 +010027static inline pte_t ptep_flush_direct(struct mm_struct *mm,
28 unsigned long addr, pte_t *ptep)
29{
Martin Schwidefskyebde7652016-03-08 11:08:09 +010030 pte_t old;
31
32 old = *ptep;
33 if (unlikely(pte_val(old) & _PAGE_INVALID))
34 return old;
Martin Schwidefsky64f31d52016-05-25 09:45:26 +020035 atomic_inc(&mm->context.flush_count);
36 if (MACHINE_HAS_TLB_LC &&
Martin Schwidefskyebde7652016-03-08 11:08:09 +010037 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
38 __ptep_ipte_local(addr, ptep);
39 else
40 __ptep_ipte(addr, ptep);
Martin Schwidefsky64f31d52016-05-25 09:45:26 +020041 atomic_dec(&mm->context.flush_count);
Martin Schwidefskyebde7652016-03-08 11:08:09 +010042 return old;
43}
44
45static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
46 unsigned long addr, pte_t *ptep)
47{
Martin Schwidefskyebde7652016-03-08 11:08:09 +010048 pte_t old;
49
50 old = *ptep;
51 if (unlikely(pte_val(old) & _PAGE_INVALID))
52 return old;
Martin Schwidefsky64f31d52016-05-25 09:45:26 +020053 atomic_inc(&mm->context.flush_count);
54 if (cpumask_equal(&mm->context.cpu_attach_mask,
55 cpumask_of(smp_processor_id()))) {
Martin Schwidefskyebde7652016-03-08 11:08:09 +010056 pte_val(*ptep) |= _PAGE_INVALID;
57 mm->context.flush_mm = 1;
58 } else
59 __ptep_ipte(addr, ptep);
Martin Schwidefsky64f31d52016-05-25 09:45:26 +020060 atomic_dec(&mm->context.flush_count);
Martin Schwidefskyebde7652016-03-08 11:08:09 +010061 return old;
62}
63
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010064static inline pgste_t pgste_get_lock(pte_t *ptep)
65{
66 unsigned long new = 0;
67#ifdef CONFIG_PGSTE
68 unsigned long old;
69
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010070 asm(
71 " lg %0,%2\n"
72 "0: lgr %1,%0\n"
73 " nihh %0,0xff7f\n" /* clear PCL bit in old */
74 " oihh %1,0x0080\n" /* set PCL bit in new */
75 " csg %0,%1,%2\n"
76 " jl 0b\n"
77 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
78 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
79#endif
80 return __pgste(new);
81}
82
83static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
84{
85#ifdef CONFIG_PGSTE
86 asm(
87 " nihh %1,0xff7f\n" /* clear PCL bit */
88 " stg %1,%0\n"
89 : "=Q" (ptep[PTRS_PER_PTE])
90 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
91 : "cc", "memory");
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010092#endif
93}
94
95static inline pgste_t pgste_get(pte_t *ptep)
96{
97 unsigned long pgste = 0;
98#ifdef CONFIG_PGSTE
99 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
100#endif
101 return __pgste(pgste);
102}
103
104static inline void pgste_set(pte_t *ptep, pgste_t pgste)
105{
106#ifdef CONFIG_PGSTE
107 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
108#endif
109}
110
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100111static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
112 struct mm_struct *mm)
113{
114#ifdef CONFIG_PGSTE
115 unsigned long address, bits, skey;
116
117 if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID)
118 return pgste;
119 address = pte_val(pte) & PAGE_MASK;
120 skey = (unsigned long) page_get_storage_key(address);
121 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
122 /* Transfer page changed & referenced bit to guest bits in pgste */
123 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
124 /* Copy page access key and fetch protection bit to pgste */
125 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
126 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
127#endif
128 return pgste;
129
130}
131
132static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
133 struct mm_struct *mm)
134{
135#ifdef CONFIG_PGSTE
136 unsigned long address;
137 unsigned long nkey;
138
139 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
140 return;
141 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
142 address = pte_val(entry) & PAGE_MASK;
143 /*
144 * Set page access key and fetch protection bit from pgste.
145 * The guest C/R information is still in the PGSTE, set real
146 * key C/R to 0.
147 */
148 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
149 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
150 page_set_storage_key(address, nkey, 0);
151#endif
152}
153
154static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
155{
156#ifdef CONFIG_PGSTE
157 if ((pte_val(entry) & _PAGE_PRESENT) &&
158 (pte_val(entry) & _PAGE_WRITE) &&
159 !(pte_val(entry) & _PAGE_INVALID)) {
160 if (!MACHINE_HAS_ESOP) {
161 /*
162 * Without enhanced suppression-on-protection force
163 * the dirty bit on for all writable ptes.
164 */
165 pte_val(entry) |= _PAGE_DIRTY;
166 pte_val(entry) &= ~_PAGE_PROTECT;
167 }
168 if (!(pte_val(entry) & _PAGE_PROTECT))
169 /* This pte allows write access, set user-dirty */
170 pgste_val(pgste) |= PGSTE_UC_BIT;
171 }
172#endif
173 *ptep = entry;
174 return pgste;
175}
176
177static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
178 unsigned long addr,
179 pte_t *ptep, pgste_t pgste)
180{
181#ifdef CONFIG_PGSTE
182 if (pgste_val(pgste) & PGSTE_IN_BIT) {
183 pgste_val(pgste) &= ~PGSTE_IN_BIT;
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100184 ptep_notify(mm, addr, ptep);
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100185 }
186#endif
187 return pgste;
188}
189
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100190static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
191 unsigned long addr, pte_t *ptep)
192{
193 pgste_t pgste = __pgste(0);
194
195 if (mm_has_pgste(mm)) {
196 pgste = pgste_get_lock(ptep);
197 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
198 }
199 return pgste;
200}
201
202static inline void ptep_xchg_commit(struct mm_struct *mm,
203 unsigned long addr, pte_t *ptep,
204 pgste_t pgste, pte_t old, pte_t new)
205{
206 if (mm_has_pgste(mm)) {
207 if (pte_val(old) & _PAGE_INVALID)
208 pgste_set_key(ptep, pgste, new, mm);
209 if (pte_val(new) & _PAGE_INVALID) {
210 pgste = pgste_update_all(old, pgste, mm);
211 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
212 _PGSTE_GPS_USAGE_UNUSED)
213 pte_val(old) |= _PAGE_UNUSED;
214 }
215 pgste = pgste_set_pte(ptep, pgste, new);
216 pgste_set_unlock(ptep, pgste);
217 } else {
218 *ptep = new;
219 }
220}
221
222pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
223 pte_t *ptep, pte_t new)
224{
225 pgste_t pgste;
226 pte_t old;
227
Martin Schwidefskya9809402016-06-06 10:30:45 +0200228 preempt_disable();
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100229 pgste = ptep_xchg_start(mm, addr, ptep);
230 old = ptep_flush_direct(mm, addr, ptep);
231 ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
Martin Schwidefskya9809402016-06-06 10:30:45 +0200232 preempt_enable();
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100233 return old;
234}
235EXPORT_SYMBOL(ptep_xchg_direct);
236
237pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
238 pte_t *ptep, pte_t new)
239{
240 pgste_t pgste;
241 pte_t old;
242
Martin Schwidefskya9809402016-06-06 10:30:45 +0200243 preempt_disable();
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100244 pgste = ptep_xchg_start(mm, addr, ptep);
245 old = ptep_flush_lazy(mm, addr, ptep);
246 ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
Martin Schwidefskya9809402016-06-06 10:30:45 +0200247 preempt_enable();
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100248 return old;
249}
250EXPORT_SYMBOL(ptep_xchg_lazy);
251
252pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
253 pte_t *ptep)
254{
255 pgste_t pgste;
256 pte_t old;
257
Martin Schwidefskya9809402016-06-06 10:30:45 +0200258 preempt_disable();
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100259 pgste = ptep_xchg_start(mm, addr, ptep);
260 old = ptep_flush_lazy(mm, addr, ptep);
261 if (mm_has_pgste(mm)) {
262 pgste = pgste_update_all(old, pgste, mm);
263 pgste_set(ptep, pgste);
264 }
265 return old;
266}
267EXPORT_SYMBOL(ptep_modify_prot_start);
268
269void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
270 pte_t *ptep, pte_t pte)
271{
272 pgste_t pgste;
273
274 if (mm_has_pgste(mm)) {
275 pgste = pgste_get(ptep);
276 pgste_set_key(ptep, pgste, pte, mm);
277 pgste = pgste_set_pte(ptep, pgste, pte);
278 pgste_set_unlock(ptep, pgste);
279 } else {
280 *ptep = pte;
281 }
Martin Schwidefskya9809402016-06-06 10:30:45 +0200282 preempt_enable();
Martin Schwidefskyebde7652016-03-08 11:08:09 +0100283}
284EXPORT_SYMBOL(ptep_modify_prot_commit);
285
Martin Schwidefsky227be792016-03-08 11:09:25 +0100286static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
287 unsigned long addr, pmd_t *pmdp)
288{
Martin Schwidefsky227be792016-03-08 11:09:25 +0100289 pmd_t old;
290
291 old = *pmdp;
292 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
293 return old;
294 if (!MACHINE_HAS_IDTE) {
295 __pmdp_csp(pmdp);
296 return old;
297 }
Martin Schwidefsky64f31d52016-05-25 09:45:26 +0200298 atomic_inc(&mm->context.flush_count);
299 if (MACHINE_HAS_TLB_LC &&
Martin Schwidefsky227be792016-03-08 11:09:25 +0100300 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
301 __pmdp_idte_local(addr, pmdp);
302 else
303 __pmdp_idte(addr, pmdp);
Martin Schwidefsky64f31d52016-05-25 09:45:26 +0200304 atomic_dec(&mm->context.flush_count);
Martin Schwidefsky227be792016-03-08 11:09:25 +0100305 return old;
306}
307
308static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
309 unsigned long addr, pmd_t *pmdp)
310{
Martin Schwidefsky227be792016-03-08 11:09:25 +0100311 pmd_t old;
312
313 old = *pmdp;
314 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
315 return old;
Martin Schwidefsky64f31d52016-05-25 09:45:26 +0200316 atomic_inc(&mm->context.flush_count);
317 if (cpumask_equal(&mm->context.cpu_attach_mask,
318 cpumask_of(smp_processor_id()))) {
Martin Schwidefsky227be792016-03-08 11:09:25 +0100319 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
320 mm->context.flush_mm = 1;
321 } else if (MACHINE_HAS_IDTE)
322 __pmdp_idte(addr, pmdp);
323 else
324 __pmdp_csp(pmdp);
Martin Schwidefsky64f31d52016-05-25 09:45:26 +0200325 atomic_dec(&mm->context.flush_count);
Martin Schwidefsky227be792016-03-08 11:09:25 +0100326 return old;
327}
328
329pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
330 pmd_t *pmdp, pmd_t new)
331{
332 pmd_t old;
333
Martin Schwidefskya9809402016-06-06 10:30:45 +0200334 preempt_disable();
Martin Schwidefsky227be792016-03-08 11:09:25 +0100335 old = pmdp_flush_direct(mm, addr, pmdp);
336 *pmdp = new;
Martin Schwidefskya9809402016-06-06 10:30:45 +0200337 preempt_enable();
Martin Schwidefsky227be792016-03-08 11:09:25 +0100338 return old;
339}
340EXPORT_SYMBOL(pmdp_xchg_direct);
341
342pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
343 pmd_t *pmdp, pmd_t new)
344{
345 pmd_t old;
346
Martin Schwidefskya9809402016-06-06 10:30:45 +0200347 preempt_disable();
Martin Schwidefsky227be792016-03-08 11:09:25 +0100348 old = pmdp_flush_lazy(mm, addr, pmdp);
349 *pmdp = new;
Martin Schwidefskya9809402016-06-06 10:30:45 +0200350 preempt_enable();
Martin Schwidefsky227be792016-03-08 11:09:25 +0100351 return old;
352}
353EXPORT_SYMBOL(pmdp_xchg_lazy);
354
Gerald Schaeferd08de8e2016-07-04 14:47:01 +0200355static inline pud_t pudp_flush_direct(struct mm_struct *mm,
356 unsigned long addr, pud_t *pudp)
357{
358 pud_t old;
359
360 old = *pudp;
361 if (pud_val(old) & _REGION_ENTRY_INVALID)
362 return old;
363 if (!MACHINE_HAS_IDTE) {
364 /*
365 * Invalid bit position is the same for pmd and pud, so we can
366 * re-use _pmd_csp() here
367 */
368 __pmdp_csp((pmd_t *) pudp);
369 return old;
370 }
371 atomic_inc(&mm->context.flush_count);
372 if (MACHINE_HAS_TLB_LC &&
373 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
374 __pudp_idte_local(addr, pudp);
375 else
376 __pudp_idte(addr, pudp);
377 atomic_dec(&mm->context.flush_count);
378 return old;
379}
380
381pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
382 pud_t *pudp, pud_t new)
383{
384 pud_t old;
385
386 preempt_disable();
387 old = pudp_flush_direct(mm, addr, pudp);
388 *pudp = new;
389 preempt_enable();
390 return old;
391}
392EXPORT_SYMBOL(pudp_xchg_direct);
393
Gerald Schaefer75077af2012-10-08 16:30:15 -0700394#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700395void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
396 pgtable_t pgtable)
Gerald Schaefer9501d092012-10-08 16:30:18 -0700397{
398 struct list_head *lh = (struct list_head *) pgtable;
399
Martin Schwidefskyec66ad62014-02-12 14:16:18 +0100400 assert_spin_locked(pmd_lockptr(mm, pmdp));
Gerald Schaefer9501d092012-10-08 16:30:18 -0700401
402 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800403 if (!pmd_huge_pte(mm, pmdp))
Gerald Schaefer9501d092012-10-08 16:30:18 -0700404 INIT_LIST_HEAD(lh);
405 else
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800406 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
407 pmd_huge_pte(mm, pmdp) = pgtable;
Gerald Schaefer9501d092012-10-08 16:30:18 -0700408}
409
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700410pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
Gerald Schaefer9501d092012-10-08 16:30:18 -0700411{
412 struct list_head *lh;
413 pgtable_t pgtable;
414 pte_t *ptep;
415
Martin Schwidefskyec66ad62014-02-12 14:16:18 +0100416 assert_spin_locked(pmd_lockptr(mm, pmdp));
Gerald Schaefer9501d092012-10-08 16:30:18 -0700417
418 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800419 pgtable = pmd_huge_pte(mm, pmdp);
Gerald Schaefer9501d092012-10-08 16:30:18 -0700420 lh = (struct list_head *) pgtable;
421 if (list_empty(lh))
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800422 pmd_huge_pte(mm, pmdp) = NULL;
Gerald Schaefer9501d092012-10-08 16:30:18 -0700423 else {
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800424 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
Gerald Schaefer9501d092012-10-08 16:30:18 -0700425 list_del(lh);
426 }
427 ptep = (pte_t *) pgtable;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200428 pte_val(*ptep) = _PAGE_INVALID;
Gerald Schaefer9501d092012-10-08 16:30:18 -0700429 ptep++;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200430 pte_val(*ptep) = _PAGE_INVALID;
Gerald Schaefer9501d092012-10-08 16:30:18 -0700431 return pgtable;
432}
Gerald Schaefer75077af2012-10-08 16:30:15 -0700433#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100434
435#ifdef CONFIG_PGSTE
436void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
437 pte_t *ptep, pte_t entry)
438{
439 pgste_t pgste;
440
441 /* the mm_has_pgste() check is done in set_pte_at() */
Martin Schwidefskya9809402016-06-06 10:30:45 +0200442 preempt_disable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100443 pgste = pgste_get_lock(ptep);
444 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
445 pgste_set_key(ptep, pgste, entry, mm);
446 pgste = pgste_set_pte(ptep, pgste, entry);
447 pgste_set_unlock(ptep, pgste);
Martin Schwidefskya9809402016-06-06 10:30:45 +0200448 preempt_enable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100449}
450
451void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
452{
453 pgste_t pgste;
454
Martin Schwidefskya9809402016-06-06 10:30:45 +0200455 preempt_disable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100456 pgste = pgste_get_lock(ptep);
457 pgste_val(pgste) |= PGSTE_IN_BIT;
458 pgste_set_unlock(ptep, pgste);
Martin Schwidefskya9809402016-06-06 10:30:45 +0200459 preempt_enable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100460}
461
462static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
463{
464 if (!non_swap_entry(entry))
465 dec_mm_counter(mm, MM_SWAPENTS);
466 else if (is_migration_entry(entry)) {
467 struct page *page = migration_entry_to_page(entry);
468
469 dec_mm_counter(mm, mm_counter(page));
470 }
471 free_swap_and_cache(entry);
472}
473
474void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
475 pte_t *ptep, int reset)
476{
477 unsigned long pgstev;
478 pgste_t pgste;
479 pte_t pte;
480
481 /* Zap unused and logically-zero pages */
Martin Schwidefskya9809402016-06-06 10:30:45 +0200482 preempt_disable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100483 pgste = pgste_get_lock(ptep);
484 pgstev = pgste_val(pgste);
485 pte = *ptep;
Christian Borntraeger1c343f72016-06-13 13:14:56 +0200486 if (!reset && pte_swap(pte) &&
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100487 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
488 (pgstev & _PGSTE_GPS_ZERO))) {
489 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
490 pte_clear(mm, addr, ptep);
491 }
492 if (reset)
493 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
494 pgste_set_unlock(ptep, pgste);
Martin Schwidefskya9809402016-06-06 10:30:45 +0200495 preempt_enable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100496}
497
498void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
499{
500 unsigned long ptev;
501 pgste_t pgste;
502
503 /* Clear storage key */
Martin Schwidefskya9809402016-06-06 10:30:45 +0200504 preempt_disable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100505 pgste = pgste_get_lock(ptep);
506 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
507 PGSTE_GR_BIT | PGSTE_GC_BIT);
508 ptev = pte_val(*ptep);
509 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
510 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
511 pgste_set_unlock(ptep, pgste);
Martin Schwidefskya9809402016-06-06 10:30:45 +0200512 preempt_enable();
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100513}
514
515/*
516 * Test and reset if a guest page is dirty
517 */
518bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
519{
520 spinlock_t *ptl;
521 pgste_t pgste;
522 pte_t *ptep;
523 pte_t pte;
524 bool dirty;
525
526 ptep = get_locked_pte(mm, addr, &ptl);
527 if (unlikely(!ptep))
528 return false;
529
530 pgste = pgste_get_lock(ptep);
531 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
532 pgste_val(pgste) &= ~PGSTE_UC_BIT;
533 pte = *ptep;
534 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
535 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
536 __ptep_ipte(addr, ptep);
537 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
538 pte_val(pte) |= _PAGE_PROTECT;
539 else
540 pte_val(pte) |= _PAGE_INVALID;
541 *ptep = pte;
542 }
543 pgste_set_unlock(ptep, pgste);
544
545 spin_unlock(ptl);
546 return dirty;
547}
548EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
549
550int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
551 unsigned char key, bool nq)
552{
553 unsigned long keyul;
554 spinlock_t *ptl;
555 pgste_t old, new;
556 pte_t *ptep;
557
558 down_read(&mm->mmap_sem);
559 ptep = get_locked_pte(mm, addr, &ptl);
560 if (unlikely(!ptep)) {
561 up_read(&mm->mmap_sem);
562 return -EFAULT;
563 }
564
565 new = old = pgste_get_lock(ptep);
566 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
567 PGSTE_ACC_BITS | PGSTE_FP_BIT);
568 keyul = (unsigned long) key;
569 pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
570 pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
571 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
572 unsigned long address, bits, skey;
573
574 address = pte_val(*ptep) & PAGE_MASK;
575 skey = (unsigned long) page_get_storage_key(address);
576 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
577 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
578 /* Set storage key ACC and FP */
579 page_set_storage_key(address, skey, !nq);
580 /* Merge host changed & referenced into pgste */
581 pgste_val(new) |= bits << 52;
582 }
583 /* changing the guest storage key is considered a change of the page */
584 if ((pgste_val(new) ^ pgste_val(old)) &
585 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
586 pgste_val(new) |= PGSTE_UC_BIT;
587
588 pgste_set_unlock(ptep, new);
589 pte_unmap_unlock(ptep, ptl);
590 up_read(&mm->mmap_sem);
591 return 0;
592}
593EXPORT_SYMBOL(set_guest_storage_key);
594
595unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
596{
597 unsigned char key;
598 spinlock_t *ptl;
599 pgste_t pgste;
600 pte_t *ptep;
601
602 down_read(&mm->mmap_sem);
603 ptep = get_locked_pte(mm, addr, &ptl);
604 if (unlikely(!ptep)) {
605 up_read(&mm->mmap_sem);
606 return -EFAULT;
607 }
608 pgste = pgste_get_lock(ptep);
609
610 if (pte_val(*ptep) & _PAGE_INVALID) {
611 key = (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
612 key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
613 key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
614 key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
615 } else {
616 key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
617
618 /* Reflect guest's logical view, not physical */
619 if (pgste_val(pgste) & PGSTE_GR_BIT)
620 key |= _PAGE_REFERENCED;
621 if (pgste_val(pgste) & PGSTE_GC_BIT)
622 key |= _PAGE_CHANGED;
623 }
624
625 pgste_set_unlock(ptep, pgste);
626 pte_unmap_unlock(ptep, ptl);
627 up_read(&mm->mmap_sem);
628 return key;
629}
630EXPORT_SYMBOL(get_guest_storage_key);
631#endif