blob: daa19a043677db89228abea7ca37f01d2709815f [file] [log] [blame]
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h>
Paul Mackerrasc77162d2011-12-12 12:31:00 +000014#include <linux/module.h>
Paul Mackerrasa8606e22011-06-29 00:22:05 +000015
16#include <asm/tlbflush.h>
17#include <asm/kvm_ppc.h>
18#include <asm/kvm_book3s.h>
19#include <asm/mmu-hash64.h>
20#include <asm/hvcall.h>
21#include <asm/synch.h>
22#include <asm/ppc-opcode.h>
23
Paul Mackerras8936dda2011-12-12 12:27:39 +000024/* Translate address of a vmalloc'd thing to a linear map address */
25static void *real_vmalloc_addr(void *x)
26{
27 unsigned long addr = (unsigned long) x;
28 pte_t *p;
29
Aneesh Kumar K.V12bc9f62013-06-20 14:30:18 +053030 p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
Paul Mackerras8936dda2011-12-12 12:27:39 +000031 if (!p || !pte_present(*p))
32 return NULL;
33 /* assume we don't have huge pages in vmalloc space... */
34 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
35 return __va(addr);
36}
Paul Mackerrasa8606e22011-06-29 00:22:05 +000037
Paul Mackerras1b400ba2012-11-21 23:28:08 +000038/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
39static int global_invalidates(struct kvm *kvm, unsigned long flags)
40{
41 int global;
42
43 /*
44 * If there is only one vcore, and it's currently running,
45 * we can use tlbiel as long as we mark all other physical
46 * cores as potentially having stale TLB entries for this lpid.
47 * If we're not using MMU notifiers, we never take pages away
48 * from the guest, so we can use tlbiel if requested.
49 * Otherwise, don't use tlbiel.
50 */
51 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcore)
52 global = 0;
53 else if (kvm->arch.using_mmu_notifiers)
54 global = 1;
55 else
56 global = !(flags & H_LOCAL);
57
58 if (!global) {
59 /* any other core might now have stale TLB entries... */
60 smp_wmb();
61 cpumask_setall(&kvm->arch.need_tlb_flush);
62 cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu,
63 &kvm->arch.need_tlb_flush);
64 }
65
66 return global;
67}
68
Paul Mackerras06ce2c62011-12-12 12:33:07 +000069/*
70 * Add this HPTE into the chain for the real page.
71 * Must be called with the chain locked; it unlocks the chain.
72 */
Paul Mackerras342d3db2011-12-12 12:38:05 +000073void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
Paul Mackerras06ce2c62011-12-12 12:33:07 +000074 unsigned long *rmap, long pte_index, int realmode)
75{
76 struct revmap_entry *head, *tail;
77 unsigned long i;
78
79 if (*rmap & KVMPPC_RMAP_PRESENT) {
80 i = *rmap & KVMPPC_RMAP_INDEX;
81 head = &kvm->arch.revmap[i];
82 if (realmode)
83 head = real_vmalloc_addr(head);
84 tail = &kvm->arch.revmap[head->back];
85 if (realmode)
86 tail = real_vmalloc_addr(tail);
87 rev->forw = i;
88 rev->back = head->back;
89 tail->forw = pte_index;
90 head->back = pte_index;
91 } else {
92 rev->forw = rev->back = pte_index;
Paul Mackerras4879f242012-11-19 23:01:34 +000093 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
94 pte_index | KVMPPC_RMAP_PRESENT;
Paul Mackerras06ce2c62011-12-12 12:33:07 +000095 }
Paul Mackerras4879f242012-11-19 23:01:34 +000096 unlock_rmap(rmap);
Paul Mackerras06ce2c62011-12-12 12:33:07 +000097}
Paul Mackerras342d3db2011-12-12 12:38:05 +000098EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
Paul Mackerras06ce2c62011-12-12 12:33:07 +000099
100/* Remove this HPTE from the chain for a real page */
101static void remove_revmap_chain(struct kvm *kvm, long pte_index,
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000102 struct revmap_entry *rev,
103 unsigned long hpte_v, unsigned long hpte_r)
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000104{
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000105 struct revmap_entry *next, *prev;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000106 unsigned long gfn, ptel, head;
107 struct kvm_memory_slot *memslot;
108 unsigned long *rmap;
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000109 unsigned long rcbits;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000110
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000111 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
112 ptel = rev->guest_rpte |= rcbits;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000113 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
Paul Mackerras9d4cba72012-01-12 20:09:51 +0000114 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
Paul Mackerrasdfe49db2012-09-11 13:28:18 +0000115 if (!memslot)
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000116 return;
117
Takuya Yoshikawad89cc612012-08-01 18:03:28 +0900118 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000119 lock_rmap(rmap);
120
121 head = *rmap & KVMPPC_RMAP_INDEX;
122 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
123 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
124 next->back = rev->back;
125 prev->forw = rev->forw;
126 if (head == pte_index) {
127 head = rev->forw;
128 if (head == pte_index)
129 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
130 else
131 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
132 }
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000133 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000134 unlock_rmap(rmap);
135}
136
Bharat Bhushan7c85e6b2013-11-15 11:01:14 +0530137static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000138 int writing, unsigned long *pte_sizep)
Paul Mackerras342d3db2011-12-12 12:38:05 +0000139{
140 pte_t *ptep;
141 unsigned long ps = *pte_sizep;
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530142 unsigned int hugepage_shift;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000143
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530144 ptep = find_linux_pte_or_hugepte(pgdir, hva, &hugepage_shift);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000145 if (!ptep)
146 return __pte(0);
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530147 if (hugepage_shift)
148 *pte_sizep = 1ul << hugepage_shift;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000149 else
150 *pte_sizep = PAGE_SIZE;
151 if (ps > *pte_sizep)
152 return __pte(0);
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530153 return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000154}
155
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000156static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
157{
158 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
159 hpte[0] = hpte_v;
160}
161
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000162long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
163 long pte_index, unsigned long pteh, unsigned long ptel,
164 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000165{
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000166 unsigned long i, pa, gpa, gfn, psize;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000167 unsigned long slot_fn, hva;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000168 unsigned long *hpte;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000169 struct revmap_entry *rev;
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000170 unsigned long g_ptel;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000171 struct kvm_memory_slot *memslot;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000172 unsigned long *physp, pte_size;
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000173 unsigned long is_io;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000174 unsigned long *rmap;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000175 pte_t pte;
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000176 unsigned int writing;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000177 unsigned long mmu_seq;
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000178 unsigned long rcbits;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000179
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000180 psize = hpte_page_size(pteh, ptel);
181 if (!psize)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000182 return H_PARAMETER;
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000183 writing = hpte_is_writable(ptel);
Paul Mackerras697d3892011-12-12 12:36:37 +0000184 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000185 ptel &= ~HPTE_GR_RESERVED;
186 g_ptel = ptel;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000187
Paul Mackerras342d3db2011-12-12 12:38:05 +0000188 /* used later to detect if we might have been invalidated */
189 mmu_seq = kvm->mmu_notifier_seq;
190 smp_rmb();
191
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000192 /* Find the memslot (if any) for this address */
193 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
194 gfn = gpa >> PAGE_SHIFT;
Paul Mackerras9d4cba72012-01-12 20:09:51 +0000195 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
Paul Mackerras697d3892011-12-12 12:36:37 +0000196 pa = 0;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000197 is_io = ~0ul;
Paul Mackerras697d3892011-12-12 12:36:37 +0000198 rmap = NULL;
199 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
200 /* PPC970 can't do emulated MMIO */
201 if (!cpu_has_feature(CPU_FTR_ARCH_206))
202 return H_PARAMETER;
203 /* Emulated MMIO - mark this with key=31 */
204 pteh |= HPTE_V_ABSENT;
205 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
206 goto do_insert;
207 }
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000208
209 /* Check if the requested page fits entirely in the memslot. */
210 if (!slot_is_aligned(memslot, psize))
211 return H_PARAMETER;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000212 slot_fn = gfn - memslot->base_gfn;
Takuya Yoshikawad89cc612012-08-01 18:03:28 +0900213 rmap = &memslot->arch.rmap[slot_fn];
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000214
Paul Mackerras342d3db2011-12-12 12:38:05 +0000215 if (!kvm->arch.using_mmu_notifiers) {
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000216 physp = memslot->arch.slot_phys;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000217 if (!physp)
218 return H_PARAMETER;
219 physp += slot_fn;
220 if (realmode)
221 physp = real_vmalloc_addr(physp);
222 pa = *physp;
223 if (!pa)
224 return H_TOO_HARD;
225 is_io = pa & (HPTE_R_I | HPTE_R_W);
226 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
227 pa &= PAGE_MASK;
228 } else {
229 /* Translate to host virtual address */
Gavin Shan66a03502012-08-24 16:50:28 +0800230 hva = __gfn_to_hva_memslot(memslot, gfn);
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000231
Paul Mackerras342d3db2011-12-12 12:38:05 +0000232 /* Look up the Linux PTE for the backing page */
233 pte_size = psize;
Bharat Bhushan7c85e6b2013-11-15 11:01:14 +0530234 pte = lookup_linux_pte_and_update(pgdir, hva, writing,
235 &pte_size);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000236 if (pte_present(pte)) {
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000237 if (writing && !pte_write(pte))
238 /* make the actual HPTE be read-only */
239 ptel = hpte_make_readonly(ptel);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000240 is_io = hpte_cache_bits(pte_val(pte));
241 pa = pte_pfn(pte) << PAGE_SHIFT;
242 }
243 }
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000244
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000245 if (pte_size < psize)
246 return H_PARAMETER;
247 if (pa && pte_size > psize)
248 pa |= gpa & (pte_size - 1);
249
250 ptel &= ~(HPTE_R_PP0 - psize);
251 ptel |= pa;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000252
253 if (pa)
254 pteh |= HPTE_V_VALID;
255 else
256 pteh |= HPTE_V_ABSENT;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000257
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000258 /* Check WIMG */
Paul Mackerras342d3db2011-12-12 12:38:05 +0000259 if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000260 if (is_io)
261 return H_PARAMETER;
262 /*
263 * Allow guest to map emulated device memory as
264 * uncacheable, but actually make it cacheable.
265 */
266 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
267 ptel |= HPTE_R_M;
268 }
Paul Mackerras075295d2011-12-12 12:30:16 +0000269
Paul Mackerras342d3db2011-12-12 12:38:05 +0000270 /* Find and lock the HPTEG slot to use */
Paul Mackerras697d3892011-12-12 12:36:37 +0000271 do_insert:
Paul Mackerras32fad282012-05-04 02:32:53 +0000272 if (pte_index >= kvm->arch.hpt_npte)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000273 return H_PARAMETER;
274 if (likely((flags & H_EXACT) == 0)) {
275 pte_index &= ~7UL;
276 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000277 for (i = 0; i < 8; ++i) {
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000278 if ((*hpte & HPTE_V_VALID) == 0 &&
Paul Mackerras697d3892011-12-12 12:36:37 +0000279 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
280 HPTE_V_ABSENT))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000281 break;
282 hpte += 2;
283 }
Paul Mackerras075295d2011-12-12 12:30:16 +0000284 if (i == 8) {
285 /*
286 * Since try_lock_hpte doesn't retry (not even stdcx.
287 * failures), it could be that there is a free slot
288 * but we transiently failed to lock it. Try again,
289 * actually locking each slot and checking it.
290 */
291 hpte -= 16;
292 for (i = 0; i < 8; ++i) {
293 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
294 cpu_relax();
Paul Mackerras697d3892011-12-12 12:36:37 +0000295 if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
Paul Mackerras075295d2011-12-12 12:30:16 +0000296 break;
297 *hpte &= ~HPTE_V_HVLOCK;
298 hpte += 2;
299 }
300 if (i == 8)
301 return H_PTEG_FULL;
302 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000303 pte_index += i;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000304 } else {
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000305 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras697d3892011-12-12 12:36:37 +0000306 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
307 HPTE_V_ABSENT)) {
Paul Mackerras075295d2011-12-12 12:30:16 +0000308 /* Lock the slot and check again */
309 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
310 cpu_relax();
Paul Mackerras697d3892011-12-12 12:36:37 +0000311 if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
Paul Mackerras075295d2011-12-12 12:30:16 +0000312 *hpte &= ~HPTE_V_HVLOCK;
313 return H_PTEG_FULL;
314 }
315 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000316 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000317
318 /* Save away the guest's idea of the second HPTE dword */
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000319 rev = &kvm->arch.revmap[pte_index];
320 if (realmode)
321 rev = real_vmalloc_addr(rev);
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000322 if (rev) {
Paul Mackerras8936dda2011-12-12 12:27:39 +0000323 rev->guest_rpte = g_ptel;
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000324 note_hpte_modification(kvm, rev);
325 }
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000326
327 /* Link HPTE into reverse-map chain */
Paul Mackerras697d3892011-12-12 12:36:37 +0000328 if (pteh & HPTE_V_VALID) {
329 if (realmode)
330 rmap = real_vmalloc_addr(rmap);
331 lock_rmap(rmap);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000332 /* Check for pending invalidations under the rmap chain lock */
333 if (kvm->arch.using_mmu_notifiers &&
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000334 mmu_notifier_retry(kvm, mmu_seq)) {
Paul Mackerras342d3db2011-12-12 12:38:05 +0000335 /* inval in progress, write a non-present HPTE */
336 pteh |= HPTE_V_ABSENT;
337 pteh &= ~HPTE_V_VALID;
338 unlock_rmap(rmap);
339 } else {
340 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
341 realmode);
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000342 /* Only set R/C in real HPTE if already set in *rmap */
343 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
344 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000345 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000346 }
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000347
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000348 hpte[1] = ptel;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000349
350 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000351 eieio();
352 hpte[0] = pteh;
353 asm volatile("ptesync" : : : "memory");
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000354
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000355 *pte_idx_ret = pte_index;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000356 return H_SUCCESS;
357}
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000358EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
359
360long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
361 long pte_index, unsigned long pteh, unsigned long ptel)
362{
363 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
364 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
365}
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000366
Anton Blanchard54bb7f42013-08-07 02:01:51 +1000367#ifdef __BIG_ENDIAN__
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000368#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
Anton Blanchard54bb7f42013-08-07 02:01:51 +1000369#else
370#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
371#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000372
373static inline int try_lock_tlbie(unsigned int *lock)
374{
375 unsigned int tmp, old;
376 unsigned int token = LOCK_TOKEN;
377
378 asm volatile("1:lwarx %1,0,%2\n"
379 " cmpwi cr0,%1,0\n"
380 " bne 2f\n"
381 " stwcx. %3,0,%2\n"
382 " bne- 1b\n"
383 " isync\n"
384 "2:"
385 : "=&r" (tmp), "=&r" (old)
386 : "r" (lock), "r" (token)
387 : "cc", "memory");
388 return old == 0;
389}
390
Paul Mackerras54480502013-07-08 20:08:25 +1000391/*
392 * tlbie/tlbiel is a bit different on the PPC970 compared to later
393 * processors such as POWER7; the large page bit is in the instruction
394 * not RB, and the top 16 bits and the bottom 12 bits of the VA
395 * in RB must be 0.
396 */
397static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
398 long npages, int global, bool need_sync)
399{
400 long i;
401
402 if (global) {
403 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
404 cpu_relax();
405 if (need_sync)
406 asm volatile("ptesync" : : : "memory");
407 for (i = 0; i < npages; ++i) {
408 unsigned long rb = rbvalues[i];
409
410 if (rb & 1) /* large page */
411 asm volatile("tlbie %0,1" : :
412 "r" (rb & 0x0000fffffffff000ul));
413 else
414 asm volatile("tlbie %0,0" : :
415 "r" (rb & 0x0000fffffffff000ul));
416 }
417 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
418 kvm->arch.tlbie_lock = 0;
419 } else {
420 if (need_sync)
421 asm volatile("ptesync" : : : "memory");
422 for (i = 0; i < npages; ++i) {
423 unsigned long rb = rbvalues[i];
424
425 if (rb & 1) /* large page */
426 asm volatile("tlbiel %0,1" : :
427 "r" (rb & 0x0000fffffffff000ul));
428 else
429 asm volatile("tlbiel %0,0" : :
430 "r" (rb & 0x0000fffffffff000ul));
431 }
432 asm volatile("ptesync" : : : "memory");
433 }
434}
435
436static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
437 long npages, int global, bool need_sync)
438{
439 long i;
440
441 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
442 /* PPC970 tlbie instruction is a bit different */
443 do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
444 return;
445 }
446 if (global) {
447 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
448 cpu_relax();
449 if (need_sync)
450 asm volatile("ptesync" : : : "memory");
451 for (i = 0; i < npages; ++i)
452 asm volatile(PPC_TLBIE(%1,%0) : :
453 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
454 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
455 kvm->arch.tlbie_lock = 0;
456 } else {
457 if (need_sync)
458 asm volatile("ptesync" : : : "memory");
459 for (i = 0; i < npages; ++i)
460 asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
461 asm volatile("ptesync" : : : "memory");
462 }
463}
464
Paul Mackerras6b445ad2012-11-19 22:55:44 +0000465long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
466 unsigned long pte_index, unsigned long avpn,
467 unsigned long *hpret)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000468{
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000469 unsigned long *hpte;
470 unsigned long v, r, rb;
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000471 struct revmap_entry *rev;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000472
Paul Mackerras32fad282012-05-04 02:32:53 +0000473 if (pte_index >= kvm->arch.hpt_npte)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000474 return H_PARAMETER;
475 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000476 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000477 cpu_relax();
Paul Mackerras697d3892011-12-12 12:36:37 +0000478 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000479 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
480 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
481 hpte[0] &= ~HPTE_V_HVLOCK;
482 return H_NOT_FOUND;
483 }
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000484
485 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
486 v = hpte[0] & ~HPTE_V_HVLOCK;
487 if (v & HPTE_V_VALID) {
488 hpte[0] &= ~HPTE_V_VALID;
489 rb = compute_tlbie_rb(v, hpte[1], pte_index);
Paul Mackerras54480502013-07-08 20:08:25 +1000490 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000491 /* Read PTE low word after tlbie to get final R/C values */
492 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000493 }
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000494 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
495 note_hpte_modification(kvm, rev);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000496 unlock_hpte(hpte, 0);
497
Paul Mackerras6b445ad2012-11-19 22:55:44 +0000498 hpret[0] = v;
499 hpret[1] = r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000500 return H_SUCCESS;
501}
Paul Mackerras6b445ad2012-11-19 22:55:44 +0000502EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
503
504long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
505 unsigned long pte_index, unsigned long avpn)
506{
507 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
508 &vcpu->arch.gpr[4]);
509}
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000510
511long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
512{
513 struct kvm *kvm = vcpu->kvm;
514 unsigned long *args = &vcpu->arch.gpr[4];
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000515 unsigned long *hp, *hptes[4], tlbrb[4];
516 long int i, j, k, n, found, indexes[4];
517 unsigned long flags, req, pte_index, rcbits;
Paul Mackerras54480502013-07-08 20:08:25 +1000518 int global;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000519 long int ret = H_SUCCESS;
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000520 struct revmap_entry *rev, *revs[4];
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000521
Paul Mackerras54480502013-07-08 20:08:25 +1000522 global = global_invalidates(kvm, 0);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000523 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
524 n = 0;
525 for (; i < 4; ++i) {
526 j = i * 2;
527 pte_index = args[j];
528 flags = pte_index >> 56;
529 pte_index &= ((1ul << 56) - 1);
530 req = flags >> 6;
531 flags &= 3;
532 if (req == 3) { /* no more requests */
533 i = 4;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000534 break;
535 }
Paul Mackerras32fad282012-05-04 02:32:53 +0000536 if (req != 1 || flags == 3 ||
537 pte_index >= kvm->arch.hpt_npte) {
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000538 /* parameter error */
539 args[j] = ((0xa0 | flags) << 56) + pte_index;
540 ret = H_PARAMETER;
541 break;
542 }
543 hp = (unsigned long *)
544 (kvm->arch.hpt_virt + (pte_index << 4));
545 /* to avoid deadlock, don't spin except for first */
546 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
547 if (n)
548 break;
549 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
550 cpu_relax();
551 }
552 found = 0;
553 if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
554 switch (flags & 3) {
555 case 0: /* absolute */
556 found = 1;
557 break;
558 case 1: /* andcond */
559 if (!(hp[0] & args[j + 1]))
560 found = 1;
561 break;
562 case 2: /* AVPN */
563 if ((hp[0] & ~0x7fUL) == args[j + 1])
564 found = 1;
565 break;
566 }
567 }
568 if (!found) {
569 hp[0] &= ~HPTE_V_HVLOCK;
570 args[j] = ((0x90 | flags) << 56) + pte_index;
571 continue;
572 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000573
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000574 args[j] = ((0x80 | flags) << 56) + pte_index;
575 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000576 note_hpte_modification(kvm, rev);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000577
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000578 if (!(hp[0] & HPTE_V_VALID)) {
579 /* insert R and C bits from PTE */
580 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
581 args[j] |= rcbits << (56 - 5);
Paul Mackerras51bfd292012-05-09 23:49:24 +0000582 hp[0] = 0;
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000583 continue;
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000584 }
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000585
586 hp[0] &= ~HPTE_V_VALID; /* leave it locked */
587 tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
588 indexes[n] = j;
589 hptes[n] = hp;
590 revs[n] = rev;
591 ++n;
592 }
593
594 if (!n)
595 break;
596
597 /* Now that we've collected a batch, do the tlbies */
Paul Mackerras54480502013-07-08 20:08:25 +1000598 do_tlbies(kvm, tlbrb, n, global, true);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000599
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000600 /* Read PTE low words after tlbie to get final R/C values */
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000601 for (k = 0; k < n; ++k) {
602 j = indexes[k];
603 pte_index = args[j] & ((1ul << 56) - 1);
604 hp = hptes[k];
605 rev = revs[k];
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000606 remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
607 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
608 args[j] |= rcbits << (56 - 5);
609 hp[0] = 0;
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000610 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000611 }
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000612
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000613 return ret;
614}
615
616long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
617 unsigned long pte_index, unsigned long avpn,
618 unsigned long va)
619{
620 struct kvm *kvm = vcpu->kvm;
621 unsigned long *hpte;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000622 struct revmap_entry *rev;
623 unsigned long v, r, rb, mask, bits;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000624
Paul Mackerras32fad282012-05-04 02:32:53 +0000625 if (pte_index >= kvm->arch.hpt_npte)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000626 return H_PARAMETER;
Paul Mackerras697d3892011-12-12 12:36:37 +0000627
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000628 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000629 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000630 cpu_relax();
Paul Mackerras697d3892011-12-12 12:36:37 +0000631 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000632 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
633 hpte[0] &= ~HPTE_V_HVLOCK;
634 return H_NOT_FOUND;
635 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000636
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000637 v = hpte[0];
Paul Mackerras8936dda2011-12-12 12:27:39 +0000638 bits = (flags << 55) & HPTE_R_PP0;
639 bits |= (flags << 48) & HPTE_R_KEY_HI;
640 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
641
642 /* Update guest view of 2nd HPTE dword */
643 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
644 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
645 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
646 if (rev) {
647 r = (rev->guest_rpte & ~mask) | bits;
648 rev->guest_rpte = r;
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000649 note_hpte_modification(kvm, rev);
Paul Mackerras8936dda2011-12-12 12:27:39 +0000650 }
651 r = (hpte[1] & ~mask) | bits;
652
653 /* Update HPTE */
Paul Mackerras697d3892011-12-12 12:36:37 +0000654 if (v & HPTE_V_VALID) {
655 rb = compute_tlbie_rb(v, r, pte_index);
656 hpte[0] = v & ~HPTE_V_VALID;
Paul Mackerras54480502013-07-08 20:08:25 +1000657 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
Paul Mackerras1cc8ed02012-11-21 23:28:41 +0000658 /*
659 * If the host has this page as readonly but the guest
660 * wants to make it read/write, reduce the permissions.
661 * Checking the host permissions involves finding the
662 * memslot and then the Linux PTE for the page.
663 */
664 if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) {
665 unsigned long psize, gfn, hva;
666 struct kvm_memory_slot *memslot;
667 pgd_t *pgdir = vcpu->arch.pgdir;
668 pte_t pte;
669
670 psize = hpte_page_size(v, r);
671 gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
672 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
673 if (memslot) {
674 hva = __gfn_to_hva_memslot(memslot, gfn);
Bharat Bhushan7c85e6b2013-11-15 11:01:14 +0530675 pte = lookup_linux_pte_and_update(pgdir, hva,
676 1, &psize);
Paul Mackerras1cc8ed02012-11-21 23:28:41 +0000677 if (pte_present(pte) && !pte_write(pte))
678 r = hpte_make_readonly(r);
679 }
680 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000681 }
682 hpte[1] = r;
683 eieio();
684 hpte[0] = v & ~HPTE_V_HVLOCK;
685 asm volatile("ptesync" : : : "memory");
686 return H_SUCCESS;
687}
688
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000689long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
690 unsigned long pte_index)
691{
692 struct kvm *kvm = vcpu->kvm;
Paul Mackerras697d3892011-12-12 12:36:37 +0000693 unsigned long *hpte, v, r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000694 int i, n = 1;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000695 struct revmap_entry *rev = NULL;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000696
Paul Mackerras32fad282012-05-04 02:32:53 +0000697 if (pte_index >= kvm->arch.hpt_npte)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000698 return H_PARAMETER;
699 if (flags & H_READ_4) {
700 pte_index &= ~3;
701 n = 4;
702 }
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000703 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000704 for (i = 0; i < n; ++i, ++pte_index) {
705 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras697d3892011-12-12 12:36:37 +0000706 v = hpte[0] & ~HPTE_V_HVLOCK;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000707 r = hpte[1];
Paul Mackerras697d3892011-12-12 12:36:37 +0000708 if (v & HPTE_V_ABSENT) {
709 v &= ~HPTE_V_ABSENT;
710 v |= HPTE_V_VALID;
711 }
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000712 if (v & HPTE_V_VALID) {
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000713 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000714 r &= ~HPTE_GR_RESERVED;
715 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000716 vcpu->arch.gpr[4 + i * 2] = v;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000717 vcpu->arch.gpr[5 + i * 2] = r;
718 }
719 return H_SUCCESS;
720}
Paul Mackerras697d3892011-12-12 12:36:37 +0000721
Paul Mackerras342d3db2011-12-12 12:38:05 +0000722void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
723 unsigned long pte_index)
724{
725 unsigned long rb;
726
727 hptep[0] &= ~HPTE_V_VALID;
728 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
Paul Mackerras54480502013-07-08 20:08:25 +1000729 do_tlbies(kvm, &rb, 1, 1, true);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000730}
731EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
732
Paul Mackerras55514892011-12-15 02:02:47 +0000733void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
734 unsigned long pte_index)
735{
736 unsigned long rb;
737 unsigned char rbyte;
738
739 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
740 rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
741 /* modify only the second-last byte, which contains the ref bit */
742 *((char *)hptep + 14) = rbyte;
Paul Mackerras54480502013-07-08 20:08:25 +1000743 do_tlbies(kvm, &rb, 1, 1, false);
Paul Mackerras55514892011-12-15 02:02:47 +0000744}
745EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
746
Paul Mackerras697d3892011-12-12 12:36:37 +0000747static int slb_base_page_shift[4] = {
748 24, /* 16M */
749 16, /* 64k */
750 34, /* 16G */
751 20, /* 1M, unsupported */
752};
753
754long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
755 unsigned long valid)
756{
757 unsigned int i;
758 unsigned int pshift;
759 unsigned long somask;
760 unsigned long vsid, hash;
761 unsigned long avpn;
762 unsigned long *hpte;
763 unsigned long mask, val;
764 unsigned long v, r;
765
766 /* Get page shift, work out hash and AVPN etc. */
767 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
768 val = 0;
769 pshift = 12;
770 if (slb_v & SLB_VSID_L) {
771 mask |= HPTE_V_LARGE;
772 val |= HPTE_V_LARGE;
773 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
774 }
775 if (slb_v & SLB_VSID_B_1T) {
776 somask = (1UL << 40) - 1;
777 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
778 vsid ^= vsid << 25;
779 } else {
780 somask = (1UL << 28) - 1;
781 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
782 }
Paul Mackerras32fad282012-05-04 02:32:53 +0000783 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
Paul Mackerras697d3892011-12-12 12:36:37 +0000784 avpn = slb_v & ~(somask >> 16); /* also includes B */
785 avpn |= (eaddr & somask) >> 16;
786
787 if (pshift >= 24)
788 avpn &= ~((1UL << (pshift - 16)) - 1);
789 else
790 avpn &= ~0x7fUL;
791 val |= avpn;
792
793 for (;;) {
794 hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
795
796 for (i = 0; i < 16; i += 2) {
797 /* Read the PTE racily */
798 v = hpte[i] & ~HPTE_V_HVLOCK;
799
800 /* Check valid/absent, hash, segment size and AVPN */
801 if (!(v & valid) || (v & mask) != val)
802 continue;
803
804 /* Lock the PTE and read it under the lock */
805 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
806 cpu_relax();
807 v = hpte[i] & ~HPTE_V_HVLOCK;
808 r = hpte[i+1];
809
810 /*
811 * Check the HPTE again, including large page size
812 * Since we don't currently allow any MPSS (mixed
813 * page-size segment) page sizes, it is sufficient
814 * to check against the actual page size.
815 */
816 if ((v & valid) && (v & mask) == val &&
817 hpte_page_size(v, r) == (1ul << pshift))
818 /* Return with the HPTE still locked */
819 return (hash << 3) + (i >> 1);
820
821 /* Unlock and move on */
822 hpte[i] = v;
823 }
824
825 if (val & HPTE_V_SECONDARY)
826 break;
827 val |= HPTE_V_SECONDARY;
Paul Mackerras32fad282012-05-04 02:32:53 +0000828 hash = hash ^ kvm->arch.hpt_mask;
Paul Mackerras697d3892011-12-12 12:36:37 +0000829 }
830 return -1;
831}
832EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
833
834/*
835 * Called in real mode to check whether an HPTE not found fault
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000836 * is due to accessing a paged-out page or an emulated MMIO page,
837 * or if a protection fault is due to accessing a page that the
838 * guest wanted read/write access to but which we made read-only.
Paul Mackerras697d3892011-12-12 12:36:37 +0000839 * Returns a possibly modified status (DSISR) value if not
840 * (i.e. pass the interrupt to the guest),
841 * -1 to pass the fault up to host kernel mode code, -2 to do that
Paul Mackerras342d3db2011-12-12 12:38:05 +0000842 * and also load the instruction word (for MMIO emulation),
Paul Mackerras697d3892011-12-12 12:36:37 +0000843 * or 0 if we should make the guest retry the access.
844 */
845long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
Paul Mackerras342d3db2011-12-12 12:38:05 +0000846 unsigned long slb_v, unsigned int status, bool data)
Paul Mackerras697d3892011-12-12 12:36:37 +0000847{
848 struct kvm *kvm = vcpu->kvm;
849 long int index;
850 unsigned long v, r, gr;
851 unsigned long *hpte;
852 unsigned long valid;
853 struct revmap_entry *rev;
854 unsigned long pp, key;
855
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000856 /* For protection fault, expect to find a valid HPTE */
857 valid = HPTE_V_VALID;
858 if (status & DSISR_NOHPTE)
859 valid |= HPTE_V_ABSENT;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000860
Paul Mackerras697d3892011-12-12 12:36:37 +0000861 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000862 if (index < 0) {
863 if (status & DSISR_NOHPTE)
864 return status; /* there really was no HPTE */
865 return 0; /* for prot fault, HPTE disappeared */
866 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000867 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
868 v = hpte[0] & ~HPTE_V_HVLOCK;
869 r = hpte[1];
870 rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
871 gr = rev->guest_rpte;
872
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000873 unlock_hpte(hpte, v);
Paul Mackerras697d3892011-12-12 12:36:37 +0000874
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000875 /* For not found, if the HPTE is valid by now, retry the instruction */
876 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
Paul Mackerras697d3892011-12-12 12:36:37 +0000877 return 0;
878
879 /* Check access permissions to the page */
880 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
881 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000882 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
883 if (!data) {
884 if (gr & (HPTE_R_N | HPTE_R_G))
885 return status | SRR1_ISI_N_OR_G;
886 if (!hpte_read_permission(pp, slb_v & key))
887 return status | SRR1_ISI_PROT;
888 } else if (status & DSISR_ISSTORE) {
Paul Mackerras697d3892011-12-12 12:36:37 +0000889 /* check write permission */
890 if (!hpte_write_permission(pp, slb_v & key))
Paul Mackerras342d3db2011-12-12 12:38:05 +0000891 return status | DSISR_PROTFAULT;
Paul Mackerras697d3892011-12-12 12:36:37 +0000892 } else {
893 if (!hpte_read_permission(pp, slb_v & key))
Paul Mackerras342d3db2011-12-12 12:38:05 +0000894 return status | DSISR_PROTFAULT;
Paul Mackerras697d3892011-12-12 12:36:37 +0000895 }
896
897 /* Check storage key, if applicable */
Paul Mackerras342d3db2011-12-12 12:38:05 +0000898 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
Paul Mackerras697d3892011-12-12 12:36:37 +0000899 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
900 if (status & DSISR_ISSTORE)
901 perm >>= 1;
902 if (perm & 1)
Paul Mackerras342d3db2011-12-12 12:38:05 +0000903 return status | DSISR_KEYFAULT;
Paul Mackerras697d3892011-12-12 12:36:37 +0000904 }
905
906 /* Save HPTE info for virtual-mode handler */
907 vcpu->arch.pgfault_addr = addr;
908 vcpu->arch.pgfault_index = index;
909 vcpu->arch.pgfault_hpte[0] = v;
910 vcpu->arch.pgfault_hpte[1] = r;
911
Paul Mackerras342d3db2011-12-12 12:38:05 +0000912 /* Check the storage key to see if it is possibly emulated MMIO */
913 if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
914 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
915 (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
Paul Mackerras697d3892011-12-12 12:36:37 +0000916 return -2; /* MMIO emulation - load instr word */
917
918 return -1; /* send fault up to host kernel mode */
Paul Mackerras697d3892011-12-12 12:36:37 +0000919}