blob: c7871f314539a68d04067a4990b3c8712aa6d26f [file] [log] [blame]
Alexander Graf3ae07892010-04-16 00:11:37 +02001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#ifndef __ASM_KVM_BOOK3S_64_H__
21#define __ASM_KVM_BOOK3S_64_H__
22
Aneesh Kumar K.V7aa79932013-10-07 22:17:51 +053023#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
Alexander Graf468a12c2011-12-09 14:44:13 +010024static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
Alexander Graf3ae07892010-04-16 00:11:37 +020025{
Alexander Graf468a12c2011-12-09 14:44:13 +010026 preempt_disable();
Alexander Graf3ae07892010-04-16 00:11:37 +020027 return &get_paca()->shadow_vcpu;
28}
Alexander Graf468a12c2011-12-09 14:44:13 +010029
30static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
31{
32 preempt_enable();
33}
Paul Mackerrasde56a942011-06-29 00:21:34 +000034#endif
Alexander Graf3ae07892010-04-16 00:11:37 +020035
David Gibson54738c02011-06-29 00:22:41 +000036#define SPAPR_TCE_SHIFT 12
37
Aneesh Kumar K.V9975f5e2013-10-07 22:17:52 +053038#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerras32fad282012-05-04 02:32:53 +000039#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +053040extern unsigned long kvm_rma_pages;
Paul Mackerras8936dda2011-12-12 12:27:39 +000041#endif
42
Paul Mackerras697d3892011-12-12 12:36:37 +000043#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
44
Paul Mackerras075295d2011-12-12 12:30:16 +000045/*
46 * We use a lock bit in HPTE dword 0 to synchronize updates and
47 * accesses to each HPTE, and another bit to indicate non-present
48 * HPTEs.
49 */
50#define HPTE_V_HVLOCK 0x40UL
Paul Mackerras697d3892011-12-12 12:36:37 +000051#define HPTE_V_ABSENT 0x20UL
Paul Mackerras075295d2011-12-12 12:30:16 +000052
Paul Mackerras44e5f6b2012-11-19 22:52:49 +000053/*
54 * We use this bit in the guest_rpte field of the revmap entry
55 * to indicate a modified HPTE.
56 */
57#define HPTE_GR_MODIFIED (1ul << 62)
58
59/* These bits are reserved in the guest view of the HPTE */
60#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
61
Paul Mackerras075295d2011-12-12 12:30:16 +000062static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
63{
64 unsigned long tmp, old;
65
66 asm volatile(" ldarx %0,0,%2\n"
67 " and. %1,%0,%3\n"
68 " bne 2f\n"
69 " ori %0,%0,%4\n"
70 " stdcx. %0,0,%2\n"
71 " beq+ 2f\n"
Paul Mackerras8b5869a2012-10-15 01:20:50 +000072 " mr %1,%3\n"
Paul Mackerras075295d2011-12-12 12:30:16 +000073 "2: isync"
74 : "=&r" (tmp), "=&r" (old)
75 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
76 : "cc", "memory");
77 return old == 0;
78}
79
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +053080static inline int __hpte_actual_psize(unsigned int lp, int psize)
81{
82 int i, shift;
83 unsigned int mask;
84
85 /* start from 1 ignoring MMU_PAGE_4K */
86 for (i = 1; i < MMU_PAGE_COUNT; i++) {
87
88 /* invalid penc */
89 if (mmu_psize_defs[psize].penc[i] == -1)
90 continue;
91 /*
92 * encoding bits per actual page size
93 * PTE LP actual page size
94 * rrrr rrrz >=8KB
95 * rrrr rrzz >=16KB
96 * rrrr rzzz >=32KB
97 * rrrr zzzz >=64KB
98 * .......
99 */
100 shift = mmu_psize_defs[i].shift - LP_SHIFT;
101 if (shift > LP_BITS)
102 shift = LP_BITS;
103 mask = (1 << shift) - 1;
104 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
105 return i;
106 }
107 return -1;
108}
109
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000110static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
111 unsigned long pte_index)
112{
Alexander Graff6bf3a62014-06-11 17:13:55 +0200113 int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530114 unsigned int penc;
115 unsigned long rb = 0, va_low, sllp;
116 unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000117
Alexander Graff6bf3a62014-06-11 17:13:55 +0200118 if (v & HPTE_V_LARGE) {
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530119 for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
120
121 /* valid entries have a shift value */
122 if (!mmu_psize_defs[b_psize].shift)
123 continue;
124
125 a_psize = __hpte_actual_psize(lp, b_psize);
126 if (a_psize != -1)
127 break;
128 }
129 }
130 /*
131 * Ignore the top 14 bits of va
132 * v have top two bits covering segment size, hence move
133 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
134 * AVA field in v also have the lower 23 bits ignored.
135 * For base page size 4K we need 14 .. 65 bits (so need to
136 * collect extra 11 bits)
137 * For others we need 14..14+i
138 */
139 /* This covers 14..54 bits of va*/
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000140 rb = (v & ~0x7fUL) << 16; /* AVA field */
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530141 /*
142 * AVA in v had cleared lower 23 bits. We need to derive
143 * that from pteg index
144 */
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000145 va_low = pte_index >> 3;
146 if (v & HPTE_V_SECONDARY)
147 va_low = ~va_low;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530148 /*
149 * get the vpn bits from va_low using reverse of hashing.
150 * In v we have va with 23 bits dropped and then left shifted
151 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
152 * right shift it with (SID_SHIFT - (23 - 7))
153 */
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000154 if (!(v & HPTE_V_1TB_SEG))
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530155 va_low ^= v >> (SID_SHIFT - 16);
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000156 else
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530157 va_low ^= v >> (SID_SHIFT_1T - 16);
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000158 va_low &= 0x7ff;
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530159
160 switch (b_psize) {
161 case MMU_PAGE_4K:
162 sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) |
163 ((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4);
164 rb |= sllp << 5; /* AP field */
165 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
166 break;
167 default:
168 {
169 int aval_shift;
170 /*
171 * remaining 7bits of AVA/LP fields
172 * Also contain the rr bits of LP
173 */
174 rb |= (va_low & 0x7f) << 16;
175 /*
176 * Now clear not needed LP bits based on actual psize
177 */
178 rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1);
179 /*
180 * AVAL field 58..77 - base_page_shift bits of va
181 * we have space for 58..64 bits, Missing bits should
182 * be zero filled. +1 is to take care of L bit shift
183 */
184 aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1;
185 rb |= ((va_low << aval_shift) & 0xfe);
186
187 rb |= 1; /* L field */
188 penc = mmu_psize_defs[b_psize].penc[a_psize];
189 rb |= penc << 12; /* LP field */
190 break;
191 }
Andreas Schwab36cc66d2011-11-08 07:08:52 +0000192 }
193 rb |= (v >> 54) & 0x300; /* B field */
194 return rb;
195}
196
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000197static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
198{
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530199 int size, a_psize;
200 /* Look at the 8 bit LP value */
201 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
202
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000203 /* only handle 4k, 64k and 16M pages for now */
204 if (!(h & HPTE_V_LARGE))
Aneesh Kumar K.V1f365bb2014-05-06 23:31:36 +0530205 return 1ul << 12;
206 else {
207 for (size = 0; size < MMU_PAGE_COUNT; size++) {
208 /* valid entries have a shift value */
209 if (!mmu_psize_defs[size].shift)
210 continue;
211
212 a_psize = __hpte_actual_psize(lp, size);
213 if (a_psize != -1)
214 return 1ul << mmu_psize_defs[a_psize].shift;
215 }
216
217 }
218 return 0;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000219}
220
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000221static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
222{
223 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
224}
225
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000226static inline int hpte_is_writable(unsigned long ptel)
227{
228 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
229
230 return pp != PP_RXRX && pp != PP_RXXX;
231}
232
233static inline unsigned long hpte_make_readonly(unsigned long ptel)
234{
235 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
236 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
237 else
238 ptel |= PP_RXRX;
239 return ptel;
240}
241
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000242static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
243{
244 unsigned int wimg = ptel & HPTE_R_WIMG;
245
246 /* Handle SAO */
247 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
248 cpu_has_feature(CPU_FTR_ARCH_206))
249 wimg = HPTE_R_M;
250
251 if (!io_type)
252 return wimg == HPTE_R_M;
253
254 return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
255}
256
Paul Mackerras342d3db2011-12-12 12:38:05 +0000257/*
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530258 * If it's present and writable, atomically set dirty and referenced bits and
259 * return the PTE, otherwise return 0. If we find a transparent hugepage
260 * and if it is marked splitting we return 0;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000261 */
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530262static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
263 unsigned int hugepage)
Paul Mackerras342d3db2011-12-12 12:38:05 +0000264{
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530265 pte_t old_pte, new_pte = __pte(0);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000266
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530267 while (1) {
268 old_pte = pte_val(*ptep);
269 /*
270 * wait until _PAGE_BUSY is clear then set it atomically
271 */
272 if (unlikely(old_pte & _PAGE_BUSY)) {
273 cpu_relax();
274 continue;
275 }
276#ifdef CONFIG_TRANSPARENT_HUGEPAGE
277 /* If hugepage and is trans splitting return None */
278 if (unlikely(hugepage &&
279 pmd_trans_splitting(pte_pmd(old_pte))))
280 return __pte(0);
281#endif
282 /* If pte is not present return None */
283 if (unlikely(!(old_pte & _PAGE_PRESENT)))
284 return __pte(0);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000285
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530286 new_pte = pte_mkyoung(old_pte);
287 if (writing && pte_write(old_pte))
288 new_pte = pte_mkdirty(new_pte);
289
290 if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte,
291 new_pte))
292 break;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000293 }
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530294 return new_pte;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000295}
296
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530297
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000298/* Return HPTE cache control bits corresponding to Linux pte bits */
299static inline unsigned long hpte_cache_bits(unsigned long pte_val)
300{
301#if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
302 return pte_val & (HPTE_R_W | HPTE_R_I);
303#else
304 return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
305 ((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
306#endif
307}
308
Paul Mackerras697d3892011-12-12 12:36:37 +0000309static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
310{
311 if (key)
312 return PP_RWRX <= pp && pp <= PP_RXRX;
313 return 1;
314}
315
316static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
317{
318 if (key)
319 return pp == PP_RWRW;
320 return pp <= PP_RWRW;
321}
322
323static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
324{
325 unsigned long skey;
326
327 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
328 ((hpte_r & HPTE_R_KEY_LO) >> 9);
329 return (amr >> (62 - 2 * skey)) & 3;
330}
331
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000332static inline void lock_rmap(unsigned long *rmap)
333{
334 do {
335 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
336 cpu_relax();
337 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
338}
339
340static inline void unlock_rmap(unsigned long *rmap)
341{
342 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
343}
344
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000345static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
346 unsigned long pagesize)
347{
348 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
349
350 if (pagesize <= PAGE_SIZE)
351 return 1;
352 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
353}
354
Paul Mackerrasa2932922012-11-19 22:57:20 +0000355/*
356 * This works for 4k, 64k and 16M pages on POWER7,
357 * and 4k and 16M pages on PPC970.
358 */
359static inline unsigned long slb_pgsize_encoding(unsigned long psize)
360{
361 unsigned long senc = 0;
362
363 if (psize > 0x1000) {
364 senc = SLB_VSID_L;
365 if (psize == 0x10000)
366 senc |= SLB_VSID_LP_01;
367 }
368 return senc;
369}
370
371static inline int is_vrma_hpte(unsigned long hpte_v)
372{
373 return (hpte_v & ~0xffffffUL) ==
374 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
375}
376
Aneesh Kumar K.V9975f5e2013-10-07 22:17:52 +0530377#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +0000378/*
379 * Note modification of an HPTE; set the HPTE modified bit
380 * if anyone is interested.
381 */
382static inline void note_hpte_modification(struct kvm *kvm,
383 struct revmap_entry *rev)
384{
385 if (atomic_read(&kvm->arch.hpte_mod_interest))
386 rev->guest_rpte |= HPTE_GR_MODIFIED;
387}
Paul Mackerras797f9c02014-03-25 10:47:06 +1100388
389/*
390 * Like kvm_memslots(), but for use in real mode when we can't do
391 * any RCU stuff (since the secondary threads are offline from the
392 * kernel's point of view), and we can't print anything.
393 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
394 */
395static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
396{
397 return rcu_dereference_raw_notrace(kvm->memslots);
398}
399
Aneesh Kumar K.V9975f5e2013-10-07 22:17:52 +0530400#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +0000401
Alexander Graf3ae07892010-04-16 00:11:37 +0200402#endif /* __ASM_KVM_BOOK3S_64_H__ */