blob: b21936c7b190cfcacaa8fad5b5d03c15fec8a4f7 [file] [log] [blame]
Alexander Graf3ae07892010-04-16 00:11:37 +02001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#ifndef __ASM_KVM_BOOK3S_64_H__
21#define __ASM_KVM_BOOK3S_64_H__
22
Paul Mackerras0eeede02016-09-02 17:20:43 +100023#include <asm/book3s/64/mmu-hash.h>
24
David Gibsonaae07772016-12-20 16:49:02 +110025/* Power architecture requires HPT is at least 256kiB, at most 64TiB */
26#define PPC_MIN_HPT_ORDER 18
27#define PPC_MAX_HPT_ORDER 46
28
Aneesh Kumar K.V7aa79932013-10-07 22:17:51 +053029#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
Alexander Graf468a12c2011-12-09 14:44:13 +010030static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
Alexander Graf3ae07892010-04-16 00:11:37 +020031{
Alexander Graf468a12c2011-12-09 14:44:13 +010032 preempt_disable();
Alexander Graf3ae07892010-04-16 00:11:37 +020033 return &get_paca()->shadow_vcpu;
34}
Alexander Graf468a12c2011-12-09 14:44:13 +010035
36static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
37{
38 preempt_enable();
39}
Paul Mackerrasde56a942011-06-29 00:21:34 +000040#endif
Alexander Graf3ae07892010-04-16 00:11:37 +020041
Aneesh Kumar K.V9975f5e2013-10-07 22:17:52 +053042#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerras9e04ba62017-01-30 21:21:44 +110043
44static inline bool kvm_is_radix(struct kvm *kvm)
45{
46 return kvm->arch.radix;
47}
48
Paul Mackerras32fad282012-05-04 02:32:53 +000049#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
Paul Mackerras8936dda2011-12-12 12:27:39 +000050#endif
51
Paul Mackerras075295d2011-12-12 12:30:16 +000052/*
53 * We use a lock bit in HPTE dword 0 to synchronize updates and
54 * accesses to each HPTE, and another bit to indicate non-present
55 * HPTEs.
56 */
57#define HPTE_V_HVLOCK 0x40UL
Paul Mackerras697d3892011-12-12 12:36:37 +000058#define HPTE_V_ABSENT 0x20UL
Paul Mackerras075295d2011-12-12 12:30:16 +000059
Paul Mackerras44e5f6b2012-11-19 22:52:49 +000060/*
61 * We use this bit in the guest_rpte field of the revmap entry
62 * to indicate a modified HPTE.
63 */
64#define HPTE_GR_MODIFIED (1ul << 62)
65
66/* These bits are reserved in the guest view of the HPTE */
67#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
68
Alexander Graf6f22bd32014-06-11 10:16:06 +020069static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
Paul Mackerras075295d2011-12-12 12:30:16 +000070{
71 unsigned long tmp, old;
Alexander Graf6f22bd32014-06-11 10:16:06 +020072 __be64 be_lockbit, be_bits;
73
74 /*
75 * We load/store in native endian, but the HTAB is in big endian. If
76 * we byte swap all data we apply on the PTE we're implicitly correct
77 * again.
78 */
79 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
80 be_bits = cpu_to_be64(bits);
Paul Mackerras075295d2011-12-12 12:30:16 +000081
82 asm volatile(" ldarx %0,0,%2\n"
83 " and. %1,%0,%3\n"
84 " bne 2f\n"
Alexander Graf6f22bd32014-06-11 10:16:06 +020085 " or %0,%0,%4\n"
Paul Mackerras075295d2011-12-12 12:30:16 +000086 " stdcx. %0,0,%2\n"
87 " beq+ 2f\n"
Paul Mackerras8b5869a2012-10-15 01:20:50 +000088 " mr %1,%3\n"
Paul Mackerras075295d2011-12-12 12:30:16 +000089 "2: isync"
90 : "=&r" (tmp), "=&r" (old)
Alexander Graf6f22bd32014-06-11 10:16:06 +020091 : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
Paul Mackerras075295d2011-12-12 12:30:16 +000092 : "cc", "memory");
93 return old == 0;
94}
95
Aneesh Kumar K.Va4bd6eb2015-03-20 20:39:43 +110096static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
97{
98 hpte_v &= ~HPTE_V_HVLOCK;
99 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
100 hpte[0] = cpu_to_be64(hpte_v);
101}
102
103/* Without barrier */
104static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
105{
106 hpte_v &= ~HPTE_V_HVLOCK;
107 hpte[0] = cpu_to_be64(hpte_v);
108}
109
Paul Mackerras8dc6cca2017-09-11 15:29:45 +1000110/*
111 * These functions encode knowledge of the POWER7/8/9 hardware
112 * interpretations of the HPTE LP (large page size) field.
113 */
114static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
115{
116 unsigned int lphi;
117
118 if (!(h & HPTE_V_LARGE))
119 return 12; /* 4kB */
120 lphi = (l >> 16) & 0xf;
121 switch ((l >> 12) & 0xf) {
122 case 0:
123 return !lphi ? 24 : -1; /* 16MB */
124 break;
125 case 1:
126 return 16; /* 64kB */
127 break;
128 case 3:
129 return !lphi ? 34 : -1; /* 16GB */
130 break;
131 case 7:
132 return (16 << 8) + 12; /* 64kB in 4kB */
133 break;
134 case 8:
135 if (!lphi)
136 return (24 << 8) + 16; /* 16MB in 64kkB */
137 if (lphi == 3)
138 return (24 << 8) + 12; /* 16MB in 4kB */
139 break;
140 }
141 return -1;
142}
143
144static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
145{
146 return kvmppc_hpte_page_shifts(h, l) & 0xff;
147}
148
149static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l)
150{
151 int tmp = kvmppc_hpte_page_shifts(h, l);
152
153 if (tmp >= 0x100)
154 tmp >>= 8;
155 return tmp;
156}
157
158static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
159{
160 return 1ul << kvmppc_hpte_actual_page_shift(v, r);
161}
162
163static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
164{
165 switch (base_shift) {
166 case 12:
167 switch (actual_shift) {
168 case 12:
169 return 0;
170 case 16:
171 return 7;
172 case 24:
173 return 0x38;
174 }
175 break;
176 case 16:
177 switch (actual_shift) {
178 case 16:
179 return 1;
180 case 24:
181 return 8;
182 }
183 break;
184 case 24:
185 return 0;
186 }
187 return -1;
188}
189
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000190static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
191 unsigned long pte_index)
192{
Paul Mackerras8dc6cca2017-09-11 15:29:45 +1000193 int a_pgshift, b_pgshift;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530194 unsigned long rb = 0, va_low, sllp;
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000195
Paul Mackerras8dc6cca2017-09-11 15:29:45 +1000196 b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r);
197 if (a_pgshift >= 0x100) {
198 b_pgshift &= 0xff;
199 a_pgshift >>= 8;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530200 }
Paul Mackerras0eeede02016-09-02 17:20:43 +1000201
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530202 /*
203 * Ignore the top 14 bits of va
204 * v have top two bits covering segment size, hence move
205 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
206 * AVA field in v also have the lower 23 bits ignored.
207 * For base page size 4K we need 14 .. 65 bits (so need to
208 * collect extra 11 bits)
209 * For others we need 14..14+i
210 */
211 /* This covers 14..54 bits of va*/
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000212 rb = (v & ~0x7fUL) << 16; /* AVA field */
Aneesh Kumar K.V63fff5c2014-06-29 16:47:30 +0530213
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530214 /*
215 * AVA in v had cleared lower 23 bits. We need to derive
216 * that from pteg index
217 */
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000218 va_low = pte_index >> 3;
219 if (v & HPTE_V_SECONDARY)
220 va_low = ~va_low;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530221 /*
222 * get the vpn bits from va_low using reverse of hashing.
223 * In v we have va with 23 bits dropped and then left shifted
224 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
225 * right shift it with (SID_SHIFT - (23 - 7))
226 */
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000227 if (!(v & HPTE_V_1TB_SEG))
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530228 va_low ^= v >> (SID_SHIFT - 16);
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000229 else
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530230 va_low ^= v >> (SID_SHIFT_1T - 16);
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000231 va_low &= 0x7ff;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530232
Paul Mackerras8dc6cca2017-09-11 15:29:45 +1000233 if (b_pgshift == 12) {
234 if (a_pgshift > 12) {
235 sllp = (a_pgshift == 16) ? 5 : 4;
236 rb |= sllp << 5; /* AP field */
237 }
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530238 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
Paul Mackerras8dc6cca2017-09-11 15:29:45 +1000239 } else {
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530240 int aval_shift;
241 /*
Aneesh Kumar K.V63fff5c2014-06-29 16:47:30 +0530242 * remaining bits of AVA/LP fields
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530243 * Also contain the rr bits of LP
244 */
Paul Mackerras8dc6cca2017-09-11 15:29:45 +1000245 rb |= (va_low << b_pgshift) & 0x7ff000;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530246 /*
247 * Now clear not needed LP bits based on actual psize
248 */
Paul Mackerras8dc6cca2017-09-11 15:29:45 +1000249 rb &= ~((1ul << a_pgshift) - 1);
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530250 /*
251 * AVAL field 58..77 - base_page_shift bits of va
252 * we have space for 58..64 bits, Missing bits should
253 * be zero filled. +1 is to take care of L bit shift
254 */
Paul Mackerras8dc6cca2017-09-11 15:29:45 +1000255 aval_shift = 64 - (77 - b_pgshift) + 1;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530256 rb |= ((va_low << aval_shift) & 0xfe);
257
258 rb |= 1; /* L field */
Paul Mackerras8dc6cca2017-09-11 15:29:45 +1000259 rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000260 }
Balbir Singh4f053d02016-09-16 17:25:50 +1000261 rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000262 return rb;
263}
264
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000265static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
266{
267 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
268}
269
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000270static inline int hpte_is_writable(unsigned long ptel)
271{
272 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
273
274 return pp != PP_RXRX && pp != PP_RXXX;
275}
276
277static inline unsigned long hpte_make_readonly(unsigned long ptel)
278{
279 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
280 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
281 else
282 ptel |= PP_RXRX;
283 return ptel;
284}
285
Aneesh Kumar K.V30bda412016-04-29 23:25:38 +1000286static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000287{
Aneesh Kumar K.V30bda412016-04-29 23:25:38 +1000288 unsigned int wimg = hptel & HPTE_R_WIMG;
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000289
290 /* Handle SAO */
291 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
292 cpu_has_feature(CPU_FTR_ARCH_206))
293 wimg = HPTE_R_M;
294
Aneesh Kumar K.V30bda412016-04-29 23:25:38 +1000295 if (!is_ci)
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000296 return wimg == HPTE_R_M;
Aneesh Kumar K.V30bda412016-04-29 23:25:38 +1000297 /*
298 * if host is mapped cache inhibited, make sure hptel also have
299 * cache inhibited.
300 */
301 if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
302 return false;
303 return !!(wimg & HPTE_R_I);
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000304}
305
Paul Mackerras342d3db2011-12-12 12:38:05 +0000306/*
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530307 * If it's present and writable, atomically set dirty and referenced bits and
Aneesh Kumar K.V7d6e7f72015-03-30 10:41:04 +0530308 * return the PTE, otherwise return 0.
Paul Mackerras342d3db2011-12-12 12:38:05 +0000309 */
Aneesh Kumar K.V7d6e7f72015-03-30 10:41:04 +0530310static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
Paul Mackerras342d3db2011-12-12 12:38:05 +0000311{
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530312 pte_t old_pte, new_pte = __pte(0);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000313
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530314 while (1) {
Aneesh Kumar K.V5e1d44a2015-03-30 10:39:12 +0530315 /*
316 * Make sure we don't reload from ptep
317 */
318 old_pte = READ_ONCE(*ptep);
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530319 /*
Aneesh Kumar K.V945537d2016-04-29 23:25:45 +1000320 * wait until H_PAGE_BUSY is clear then set it atomically
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530321 */
Aneesh Kumar K.V945537d2016-04-29 23:25:45 +1000322 if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530323 cpu_relax();
324 continue;
325 }
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530326 /* If pte is not present return None */
Michael Ellerman4f9c53c2015-03-25 20:11:57 +1100327 if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530328 return __pte(0);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000329
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530330 new_pte = pte_mkyoung(old_pte);
331 if (writing && pte_write(old_pte))
332 new_pte = pte_mkdirty(new_pte);
333
Michael Ellerman3910a7f2016-04-29 23:25:27 +1000334 if (pte_xchg(ptep, old_pte, new_pte))
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530335 break;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000336 }
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530337 return new_pte;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000338}
339
Paul Mackerras697d3892011-12-12 12:36:37 +0000340static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
341{
342 if (key)
343 return PP_RWRX <= pp && pp <= PP_RXRX;
Joe Perchesacdb6682015-03-30 16:46:04 -0700344 return true;
Paul Mackerras697d3892011-12-12 12:36:37 +0000345}
346
347static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
348{
349 if (key)
350 return pp == PP_RWRW;
351 return pp <= PP_RWRW;
352}
353
354static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
355{
356 unsigned long skey;
357
358 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
359 ((hpte_r & HPTE_R_KEY_LO) >> 9);
360 return (amr >> (62 - 2 * skey)) & 3;
361}
362
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000363static inline void lock_rmap(unsigned long *rmap)
364{
365 do {
366 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
367 cpu_relax();
368 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
369}
370
371static inline void unlock_rmap(unsigned long *rmap)
372{
373 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
374}
375
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000376static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
377 unsigned long pagesize)
378{
379 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
380
381 if (pagesize <= PAGE_SIZE)
Joe Perchesacdb6682015-03-30 16:46:04 -0700382 return true;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000383 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
384}
385
Paul Mackerrasa2932922012-11-19 22:57:20 +0000386/*
387 * This works for 4k, 64k and 16M pages on POWER7,
388 * and 4k and 16M pages on PPC970.
389 */
390static inline unsigned long slb_pgsize_encoding(unsigned long psize)
391{
392 unsigned long senc = 0;
393
394 if (psize > 0x1000) {
395 senc = SLB_VSID_L;
396 if (psize == 0x10000)
397 senc |= SLB_VSID_LP_01;
398 }
399 return senc;
400}
401
402static inline int is_vrma_hpte(unsigned long hpte_v)
403{
404 return (hpte_v & ~0xffffffUL) ==
405 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
406}
407
Aneesh Kumar K.V9975f5e2013-10-07 22:17:52 +0530408#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +0000409/*
410 * Note modification of an HPTE; set the HPTE modified bit
411 * if anyone is interested.
412 */
413static inline void note_hpte_modification(struct kvm *kvm,
414 struct revmap_entry *rev)
415{
416 if (atomic_read(&kvm->arch.hpte_mod_interest))
417 rev->guest_rpte |= HPTE_GR_MODIFIED;
418}
Paul Mackerras797f9c02014-03-25 10:47:06 +1100419
420/*
421 * Like kvm_memslots(), but for use in real mode when we can't do
422 * any RCU stuff (since the secondary threads are offline from the
423 * kernel's point of view), and we can't print anything.
424 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
425 */
426static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
427{
Paolo Bonzinif481b062015-05-17 17:30:37 +0200428 return rcu_dereference_raw_notrace(kvm->memslots[0]);
Paul Mackerras797f9c02014-03-25 10:47:06 +1100429}
430
Paul Mackerrase23a8082015-03-28 14:21:01 +1100431extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
432
Paul Mackerraseddb60f2015-03-28 14:21:11 +1100433extern void kvmhv_rm_send_ipi(int cpu);
434
David Gibson3d089f82016-12-20 16:49:01 +1100435static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
436{
437 /* HPTEs are 2**4 bytes long */
438 return 1UL << (hpt->order - 4);
439}
440
441static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
442{
443 /* 128 (2**7) bytes in each HPTEG */
444 return (1UL << (hpt->order - 7)) - 1;
445}
446
Aneesh Kumar K.V9975f5e2013-10-07 22:17:52 +0530447#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +0000448
Alexander Graf3ae07892010-04-16 00:11:37 +0200449#endif /* __ASM_KVM_BOOK3S_64_H__ */