blob: ce5610aa124c0ba17c577a6ece734e2b19f40fd9 [file] [log] [blame]
Alexander Graf3ae07892010-04-16 00:11:37 +02001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#ifndef __ASM_KVM_BOOK3S_64_H__
21#define __ASM_KVM_BOOK3S_64_H__
22
Aneesh Kumar K.V7aa79932013-10-07 22:17:51 +053023#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
Alexander Graf468a12c2011-12-09 14:44:13 +010024static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
Alexander Graf3ae07892010-04-16 00:11:37 +020025{
Alexander Graf468a12c2011-12-09 14:44:13 +010026 preempt_disable();
Alexander Graf3ae07892010-04-16 00:11:37 +020027 return &get_paca()->shadow_vcpu;
28}
Alexander Graf468a12c2011-12-09 14:44:13 +010029
30static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
31{
32 preempt_enable();
33}
Paul Mackerrasde56a942011-06-29 00:21:34 +000034#endif
Alexander Graf3ae07892010-04-16 00:11:37 +020035
David Gibson54738c02011-06-29 00:22:41 +000036#define SPAPR_TCE_SHIFT 12
37
Aneesh Kumar K.V9975f5e2013-10-07 22:17:52 +053038#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerras32fad282012-05-04 02:32:53 +000039#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
Paul Mackerras8936dda2011-12-12 12:27:39 +000040#endif
41
Paul Mackerras697d3892011-12-12 12:36:37 +000042#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
43
Paul Mackerras075295d2011-12-12 12:30:16 +000044/*
45 * We use a lock bit in HPTE dword 0 to synchronize updates and
46 * accesses to each HPTE, and another bit to indicate non-present
47 * HPTEs.
48 */
49#define HPTE_V_HVLOCK 0x40UL
Paul Mackerras697d3892011-12-12 12:36:37 +000050#define HPTE_V_ABSENT 0x20UL
Paul Mackerras075295d2011-12-12 12:30:16 +000051
Paul Mackerras44e5f6b2012-11-19 22:52:49 +000052/*
53 * We use this bit in the guest_rpte field of the revmap entry
54 * to indicate a modified HPTE.
55 */
56#define HPTE_GR_MODIFIED (1ul << 62)
57
58/* These bits are reserved in the guest view of the HPTE */
59#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
60
Alexander Graf6f22bd32014-06-11 10:16:06 +020061static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
Paul Mackerras075295d2011-12-12 12:30:16 +000062{
63 unsigned long tmp, old;
Alexander Graf6f22bd32014-06-11 10:16:06 +020064 __be64 be_lockbit, be_bits;
65
66 /*
67 * We load/store in native endian, but the HTAB is in big endian. If
68 * we byte swap all data we apply on the PTE we're implicitly correct
69 * again.
70 */
71 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
72 be_bits = cpu_to_be64(bits);
Paul Mackerras075295d2011-12-12 12:30:16 +000073
74 asm volatile(" ldarx %0,0,%2\n"
75 " and. %1,%0,%3\n"
76 " bne 2f\n"
Alexander Graf6f22bd32014-06-11 10:16:06 +020077 " or %0,%0,%4\n"
Paul Mackerras075295d2011-12-12 12:30:16 +000078 " stdcx. %0,0,%2\n"
79 " beq+ 2f\n"
Paul Mackerras8b5869a2012-10-15 01:20:50 +000080 " mr %1,%3\n"
Paul Mackerras075295d2011-12-12 12:30:16 +000081 "2: isync"
82 : "=&r" (tmp), "=&r" (old)
Alexander Graf6f22bd32014-06-11 10:16:06 +020083 : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
Paul Mackerras075295d2011-12-12 12:30:16 +000084 : "cc", "memory");
85 return old == 0;
86}
87
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +053088static inline int __hpte_actual_psize(unsigned int lp, int psize)
89{
90 int i, shift;
91 unsigned int mask;
92
93 /* start from 1 ignoring MMU_PAGE_4K */
94 for (i = 1; i < MMU_PAGE_COUNT; i++) {
95
96 /* invalid penc */
97 if (mmu_psize_defs[psize].penc[i] == -1)
98 continue;
99 /*
100 * encoding bits per actual page size
101 * PTE LP actual page size
102 * rrrr rrrz >=8KB
103 * rrrr rrzz >=16KB
104 * rrrr rzzz >=32KB
105 * rrrr zzzz >=64KB
106 * .......
107 */
108 shift = mmu_psize_defs[i].shift - LP_SHIFT;
109 if (shift > LP_BITS)
110 shift = LP_BITS;
111 mask = (1 << shift) - 1;
112 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
113 return i;
114 }
115 return -1;
116}
117
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000118static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
119 unsigned long pte_index)
120{
Alexander Graff6bf3a62014-06-11 17:13:55 +0200121 int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530122 unsigned int penc;
123 unsigned long rb = 0, va_low, sllp;
124 unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000125
Alexander Graff6bf3a62014-06-11 17:13:55 +0200126 if (v & HPTE_V_LARGE) {
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530127 for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
128
129 /* valid entries have a shift value */
130 if (!mmu_psize_defs[b_psize].shift)
131 continue;
132
133 a_psize = __hpte_actual_psize(lp, b_psize);
134 if (a_psize != -1)
135 break;
136 }
137 }
138 /*
139 * Ignore the top 14 bits of va
140 * v have top two bits covering segment size, hence move
141 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
142 * AVA field in v also have the lower 23 bits ignored.
143 * For base page size 4K we need 14 .. 65 bits (so need to
144 * collect extra 11 bits)
145 * For others we need 14..14+i
146 */
147 /* This covers 14..54 bits of va*/
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000148 rb = (v & ~0x7fUL) << 16; /* AVA field */
Aneesh Kumar K.V63fff5c2014-06-29 16:47:30 +0530149
Paul Mackerrasd5067352014-11-03 15:51:56 +1100150 rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530151 /*
152 * AVA in v had cleared lower 23 bits. We need to derive
153 * that from pteg index
154 */
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000155 va_low = pte_index >> 3;
156 if (v & HPTE_V_SECONDARY)
157 va_low = ~va_low;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530158 /*
159 * get the vpn bits from va_low using reverse of hashing.
160 * In v we have va with 23 bits dropped and then left shifted
161 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
162 * right shift it with (SID_SHIFT - (23 - 7))
163 */
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000164 if (!(v & HPTE_V_1TB_SEG))
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530165 va_low ^= v >> (SID_SHIFT - 16);
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000166 else
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530167 va_low ^= v >> (SID_SHIFT_1T - 16);
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000168 va_low &= 0x7ff;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530169
170 switch (b_psize) {
171 case MMU_PAGE_4K:
172 sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) |
173 ((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4);
174 rb |= sllp << 5; /* AP field */
175 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
176 break;
177 default:
178 {
179 int aval_shift;
180 /*
Aneesh Kumar K.V63fff5c2014-06-29 16:47:30 +0530181 * remaining bits of AVA/LP fields
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530182 * Also contain the rr bits of LP
183 */
Aneesh Kumar K.V63fff5c2014-06-29 16:47:30 +0530184 rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530185 /*
186 * Now clear not needed LP bits based on actual psize
187 */
188 rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1);
189 /*
190 * AVAL field 58..77 - base_page_shift bits of va
191 * we have space for 58..64 bits, Missing bits should
192 * be zero filled. +1 is to take care of L bit shift
193 */
194 aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1;
195 rb |= ((va_low << aval_shift) & 0xfe);
196
197 rb |= 1; /* L field */
198 penc = mmu_psize_defs[b_psize].penc[a_psize];
199 rb |= penc << 12; /* LP field */
200 break;
201 }
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000202 }
203 rb |= (v >> 54) & 0x300; /* B field */
204 return rb;
205}
206
Aneesh Kumar K.V341acbb32014-06-16 00:17:07 +0530207static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
208 bool is_base_size)
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000209{
Aneesh Kumar K.V341acbb32014-06-16 00:17:07 +0530210
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530211 int size, a_psize;
212 /* Look at the 8 bit LP value */
213 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
214
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000215 /* only handle 4k, 64k and 16M pages for now */
216 if (!(h & HPTE_V_LARGE))
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530217 return 1ul << 12;
218 else {
219 for (size = 0; size < MMU_PAGE_COUNT; size++) {
220 /* valid entries have a shift value */
221 if (!mmu_psize_defs[size].shift)
222 continue;
223
224 a_psize = __hpte_actual_psize(lp, size);
Aneesh Kumar K.V341acbb32014-06-16 00:17:07 +0530225 if (a_psize != -1) {
226 if (is_base_size)
227 return 1ul << mmu_psize_defs[size].shift;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530228 return 1ul << mmu_psize_defs[a_psize].shift;
Aneesh Kumar K.V341acbb32014-06-16 00:17:07 +0530229 }
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530230 }
231
232 }
233 return 0;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000234}
235
Aneesh Kumar K.V341acbb32014-06-16 00:17:07 +0530236static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
237{
238 return __hpte_page_size(h, l, 0);
239}
240
241static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
242{
243 return __hpte_page_size(h, l, 1);
244}
245
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000246static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
247{
248 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
249}
250
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000251static inline int hpte_is_writable(unsigned long ptel)
252{
253 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
254
255 return pp != PP_RXRX && pp != PP_RXXX;
256}
257
258static inline unsigned long hpte_make_readonly(unsigned long ptel)
259{
260 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
261 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
262 else
263 ptel |= PP_RXRX;
264 return ptel;
265}
266
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000267static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
268{
269 unsigned int wimg = ptel & HPTE_R_WIMG;
270
271 /* Handle SAO */
272 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
273 cpu_has_feature(CPU_FTR_ARCH_206))
274 wimg = HPTE_R_M;
275
276 if (!io_type)
277 return wimg == HPTE_R_M;
278
279 return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
280}
281
Paul Mackerras342d3db2011-12-12 12:38:05 +0000282/*
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530283 * If it's present and writable, atomically set dirty and referenced bits and
Aneesh Kumar K.V7d6e7f72015-03-30 10:41:04 +0530284 * return the PTE, otherwise return 0.
Paul Mackerras342d3db2011-12-12 12:38:05 +0000285 */
Aneesh Kumar K.V7d6e7f72015-03-30 10:41:04 +0530286static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
Paul Mackerras342d3db2011-12-12 12:38:05 +0000287{
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530288 pte_t old_pte, new_pte = __pte(0);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000289
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530290 while (1) {
Aneesh Kumar K.V5e1d44a2015-03-30 10:39:12 +0530291 /*
292 * Make sure we don't reload from ptep
293 */
294 old_pte = READ_ONCE(*ptep);
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530295 /*
296 * wait until _PAGE_BUSY is clear then set it atomically
297 */
Michael Ellerman4f9c53c2015-03-25 20:11:57 +1100298 if (unlikely(pte_val(old_pte) & _PAGE_BUSY)) {
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530299 cpu_relax();
300 continue;
301 }
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530302 /* If pte is not present return None */
Michael Ellerman4f9c53c2015-03-25 20:11:57 +1100303 if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530304 return __pte(0);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000305
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530306 new_pte = pte_mkyoung(old_pte);
307 if (writing && pte_write(old_pte))
308 new_pte = pte_mkdirty(new_pte);
309
Michael Ellerman4f9c53c2015-03-25 20:11:57 +1100310 if (pte_val(old_pte) == __cmpxchg_u64((unsigned long *)ptep,
311 pte_val(old_pte),
312 pte_val(new_pte))) {
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530313 break;
Michael Ellerman4f9c53c2015-03-25 20:11:57 +1100314 }
Paul Mackerras342d3db2011-12-12 12:38:05 +0000315 }
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530316 return new_pte;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000317}
318
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530319
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000320/* Return HPTE cache control bits corresponding to Linux pte bits */
321static inline unsigned long hpte_cache_bits(unsigned long pte_val)
322{
323#if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
324 return pte_val & (HPTE_R_W | HPTE_R_I);
325#else
326 return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
327 ((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
328#endif
329}
330
Paul Mackerras697d3892011-12-12 12:36:37 +0000331static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
332{
333 if (key)
334 return PP_RWRX <= pp && pp <= PP_RXRX;
Joe Perchesacdb6682015-03-30 16:46:04 -0700335 return true;
Paul Mackerras697d3892011-12-12 12:36:37 +0000336}
337
338static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
339{
340 if (key)
341 return pp == PP_RWRW;
342 return pp <= PP_RWRW;
343}
344
345static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
346{
347 unsigned long skey;
348
349 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
350 ((hpte_r & HPTE_R_KEY_LO) >> 9);
351 return (amr >> (62 - 2 * skey)) & 3;
352}
353
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000354static inline void lock_rmap(unsigned long *rmap)
355{
356 do {
357 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
358 cpu_relax();
359 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
360}
361
362static inline void unlock_rmap(unsigned long *rmap)
363{
364 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
365}
366
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000367static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
368 unsigned long pagesize)
369{
370 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
371
372 if (pagesize <= PAGE_SIZE)
Joe Perchesacdb6682015-03-30 16:46:04 -0700373 return true;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000374 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
375}
376
Paul Mackerrasa2932922012-11-19 22:57:20 +0000377/*
378 * This works for 4k, 64k and 16M pages on POWER7,
379 * and 4k and 16M pages on PPC970.
380 */
381static inline unsigned long slb_pgsize_encoding(unsigned long psize)
382{
383 unsigned long senc = 0;
384
385 if (psize > 0x1000) {
386 senc = SLB_VSID_L;
387 if (psize == 0x10000)
388 senc |= SLB_VSID_LP_01;
389 }
390 return senc;
391}
392
393static inline int is_vrma_hpte(unsigned long hpte_v)
394{
395 return (hpte_v & ~0xffffffUL) ==
396 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
397}
398
Aneesh Kumar K.V9975f5e2013-10-07 22:17:52 +0530399#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +0000400/*
401 * Note modification of an HPTE; set the HPTE modified bit
402 * if anyone is interested.
403 */
404static inline void note_hpte_modification(struct kvm *kvm,
405 struct revmap_entry *rev)
406{
407 if (atomic_read(&kvm->arch.hpte_mod_interest))
408 rev->guest_rpte |= HPTE_GR_MODIFIED;
409}
Paul Mackerras797f9c02014-03-25 10:47:06 +1100410
411/*
412 * Like kvm_memslots(), but for use in real mode when we can't do
413 * any RCU stuff (since the secondary threads are offline from the
414 * kernel's point of view), and we can't print anything.
415 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
416 */
417static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
418{
419 return rcu_dereference_raw_notrace(kvm->memslots);
420}
421
Aneesh Kumar K.V9975f5e2013-10-07 22:17:52 +0530422#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +0000423
Alexander Graf3ae07892010-04-16 00:11:37 +0200424#endif /* __ASM_KVM_BOOK3S_64_H__ */