blob: 1f22d9e977d4e8e031cf25436fad18a4904d4928 [file] [log] [blame]
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h>
Paul Mackerrasc77162d2011-12-12 12:31:00 +000014#include <linux/module.h>
Paul Mackerras08fe1e72015-06-24 21:18:06 +100015#include <linux/log2.h>
Paul Mackerrasa8606e22011-06-29 00:22:05 +000016
17#include <asm/tlbflush.h>
Balbir Singh04284912017-04-11 15:23:25 +100018#include <asm/trace.h>
Paul Mackerrasa8606e22011-06-29 00:22:05 +000019#include <asm/kvm_ppc.h>
20#include <asm/kvm_book3s.h>
Aneesh Kumar K.Vf64e8082016-03-01 12:59:20 +053021#include <asm/book3s/64/mmu-hash.h>
Paul Mackerrasa8606e22011-06-29 00:22:05 +000022#include <asm/hvcall.h>
23#include <asm/synch.h>
24#include <asm/ppc-opcode.h>
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053025#include <asm/pte-walk.h>
Paul Mackerrasa8606e22011-06-29 00:22:05 +000026
Paul Mackerras8936dda2011-12-12 12:27:39 +000027/* Translate address of a vmalloc'd thing to a linear map address */
28static void *real_vmalloc_addr(void *x)
29{
30 unsigned long addr = (unsigned long) x;
31 pte_t *p;
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +053032 /*
33 * assume we don't have huge pages in vmalloc space...
34 * So don't worry about THP collapse/split. Called
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053035 * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +053036 */
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053037 p = find_init_mm_pte(addr, NULL);
Paul Mackerras8936dda2011-12-12 12:27:39 +000038 if (!p || !pte_present(*p))
39 return NULL;
Paul Mackerras8936dda2011-12-12 12:27:39 +000040 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
41 return __va(addr);
42}
Paul Mackerrasa8606e22011-06-29 00:22:05 +000043
Paul Mackerras1b400ba2012-11-21 23:28:08 +000044/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
Aneesh Kumar K.V76b03dc2017-11-06 17:57:44 +053045static int global_invalidates(struct kvm *kvm)
Paul Mackerras1b400ba2012-11-21 23:28:08 +000046{
47 int global;
Paul Mackerrasa29ebea2017-01-30 21:21:50 +110048 int cpu;
Paul Mackerras1b400ba2012-11-21 23:28:08 +000049
50 /*
51 * If there is only one vcore, and it's currently running,
Paul Mackerras55765482014-05-26 19:48:36 +100052 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
Paul Mackerras1b400ba2012-11-21 23:28:08 +000053 * we can use tlbiel as long as we mark all other physical
54 * cores as potentially having stale TLB entries for this lpid.
Paul Mackerras1b400ba2012-11-21 23:28:08 +000055 * Otherwise, don't use tlbiel.
56 */
Paul Mackerras55765482014-05-26 19:48:36 +100057 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
Paul Mackerras1b400ba2012-11-21 23:28:08 +000058 global = 0;
Paul Mackerras1b400ba2012-11-21 23:28:08 +000059 else
Paul Mackerrasc17b98c2014-12-03 13:30:38 +110060 global = 1;
Paul Mackerras1b400ba2012-11-21 23:28:08 +000061
62 if (!global) {
63 /* any other core might now have stale TLB entries... */
64 smp_wmb();
65 cpumask_setall(&kvm->arch.need_tlb_flush);
Paul Mackerrasa29ebea2017-01-30 21:21:50 +110066 cpu = local_paca->kvm_hstate.kvm_vcore->pcpu;
67 /*
68 * On POWER9, threads are independent but the TLB is shared,
69 * so use the bit for the first thread to represent the core.
70 */
71 if (cpu_has_feature(CPU_FTR_ARCH_300))
72 cpu = cpu_first_thread_sibling(cpu);
73 cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
Paul Mackerras1b400ba2012-11-21 23:28:08 +000074 }
75
76 return global;
77}
78
Paul Mackerras06ce2c62011-12-12 12:33:07 +000079/*
80 * Add this HPTE into the chain for the real page.
81 * Must be called with the chain locked; it unlocks the chain.
82 */
Paul Mackerras342d3db2011-12-12 12:38:05 +000083void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
Paul Mackerras06ce2c62011-12-12 12:33:07 +000084 unsigned long *rmap, long pte_index, int realmode)
85{
86 struct revmap_entry *head, *tail;
87 unsigned long i;
88
89 if (*rmap & KVMPPC_RMAP_PRESENT) {
90 i = *rmap & KVMPPC_RMAP_INDEX;
David Gibson3f9d4f52016-12-20 16:49:00 +110091 head = &kvm->arch.hpt.rev[i];
Paul Mackerras06ce2c62011-12-12 12:33:07 +000092 if (realmode)
93 head = real_vmalloc_addr(head);
David Gibson3f9d4f52016-12-20 16:49:00 +110094 tail = &kvm->arch.hpt.rev[head->back];
Paul Mackerras06ce2c62011-12-12 12:33:07 +000095 if (realmode)
96 tail = real_vmalloc_addr(tail);
97 rev->forw = i;
98 rev->back = head->back;
99 tail->forw = pte_index;
100 head->back = pte_index;
101 } else {
102 rev->forw = rev->back = pte_index;
Paul Mackerras4879f242012-11-19 23:01:34 +0000103 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
104 pte_index | KVMPPC_RMAP_PRESENT;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000105 }
Paul Mackerras4879f242012-11-19 23:01:34 +0000106 unlock_rmap(rmap);
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000107}
Paul Mackerras342d3db2011-12-12 12:38:05 +0000108EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000109
Paul Mackerrase641a312017-10-26 16:39:19 +1100110/* Update the dirty bitmap of a memslot */
111void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot,
112 unsigned long gfn, unsigned long psize)
Paul Mackerras08fe1e72015-06-24 21:18:06 +1000113{
Paul Mackerrase641a312017-10-26 16:39:19 +1100114 unsigned long npages;
Paul Mackerras08fe1e72015-06-24 21:18:06 +1000115
Paul Mackerrase641a312017-10-26 16:39:19 +1100116 if (!psize || !memslot->dirty_bitmap)
Paul Mackerras08fe1e72015-06-24 21:18:06 +1000117 return;
Paul Mackerrase641a312017-10-26 16:39:19 +1100118 npages = (psize + PAGE_SIZE - 1) / PAGE_SIZE;
119 gfn -= memslot->base_gfn;
120 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages);
Paul Mackerras08fe1e72015-06-24 21:18:06 +1000121}
Paul Mackerrase641a312017-10-26 16:39:19 +1100122EXPORT_SYMBOL_GPL(kvmppc_update_dirty_map);
123
124static void kvmppc_set_dirty_from_hpte(struct kvm *kvm,
125 unsigned long hpte_v, unsigned long hpte_gr)
126{
127 struct kvm_memory_slot *memslot;
128 unsigned long gfn;
129 unsigned long psize;
130
131 psize = kvmppc_actual_pgsz(hpte_v, hpte_gr);
132 gfn = hpte_rpn(hpte_gr, psize);
133 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
134 if (memslot && memslot->dirty_bitmap)
135 kvmppc_update_dirty_map(memslot, gfn, psize);
136}
Paul Mackerras08fe1e72015-06-24 21:18:06 +1000137
Paul Mackerrascdeee512015-06-24 21:18:07 +1000138/* Returns a pointer to the revmap entry for the page mapped by a HPTE */
139static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v,
Paul Mackerrase641a312017-10-26 16:39:19 +1100140 unsigned long hpte_gr,
141 struct kvm_memory_slot **memslotp,
142 unsigned long *gfnp)
Paul Mackerrascdeee512015-06-24 21:18:07 +1000143{
144 struct kvm_memory_slot *memslot;
145 unsigned long *rmap;
146 unsigned long gfn;
147
Paul Mackerras8dc6cca2017-09-11 15:29:45 +1000148 gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr));
Paul Mackerrascdeee512015-06-24 21:18:07 +1000149 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
Paul Mackerrase641a312017-10-26 16:39:19 +1100150 if (memslotp)
151 *memslotp = memslot;
152 if (gfnp)
153 *gfnp = gfn;
Paul Mackerrascdeee512015-06-24 21:18:07 +1000154 if (!memslot)
155 return NULL;
156
157 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
158 return rmap;
159}
160
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000161/* Remove this HPTE from the chain for a real page */
162static void remove_revmap_chain(struct kvm *kvm, long pte_index,
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000163 struct revmap_entry *rev,
164 unsigned long hpte_v, unsigned long hpte_r)
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000165{
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000166 struct revmap_entry *next, *prev;
Paul Mackerrascdeee512015-06-24 21:18:07 +1000167 unsigned long ptel, head;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000168 unsigned long *rmap;
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000169 unsigned long rcbits;
Paul Mackerrase641a312017-10-26 16:39:19 +1100170 struct kvm_memory_slot *memslot;
171 unsigned long gfn;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000172
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000173 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
174 ptel = rev->guest_rpte |= rcbits;
Paul Mackerrase641a312017-10-26 16:39:19 +1100175 rmap = revmap_for_hpte(kvm, hpte_v, ptel, &memslot, &gfn);
Paul Mackerrascdeee512015-06-24 21:18:07 +1000176 if (!rmap)
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000177 return;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000178 lock_rmap(rmap);
179
180 head = *rmap & KVMPPC_RMAP_INDEX;
David Gibson3f9d4f52016-12-20 16:49:00 +1100181 next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]);
182 prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]);
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000183 next->back = rev->back;
184 prev->forw = rev->forw;
185 if (head == pte_index) {
186 head = rev->forw;
187 if (head == pte_index)
188 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
189 else
190 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
191 }
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000192 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
Paul Mackerras08fe1e72015-06-24 21:18:06 +1000193 if (rcbits & HPTE_R_C)
Paul Mackerrase641a312017-10-26 16:39:19 +1100194 kvmppc_update_dirty_map(memslot, gfn,
195 kvmppc_actual_pgsz(hpte_v, hpte_r));
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000196 unlock_rmap(rmap);
197}
198
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000199long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
200 long pte_index, unsigned long pteh, unsigned long ptel,
201 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000202{
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000203 unsigned long i, pa, gpa, gfn, psize;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000204 unsigned long slot_fn, hva;
Alexander Graf6f22bd32014-06-11 10:16:06 +0200205 __be64 *hpte;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000206 struct revmap_entry *rev;
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000207 unsigned long g_ptel;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000208 struct kvm_memory_slot *memslot;
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530209 unsigned hpage_shift;
Aneesh Kumar K.V30bda412016-04-29 23:25:38 +1000210 bool is_ci;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000211 unsigned long *rmap;
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530212 pte_t *ptep;
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000213 unsigned int writing;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000214 unsigned long mmu_seq;
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530215 unsigned long rcbits, irq_flags = 0;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000216
Paul Mackerras65dae542017-01-30 21:21:49 +1100217 if (kvm_is_radix(kvm))
218 return H_FUNCTION;
Paul Mackerras8dc6cca2017-09-11 15:29:45 +1000219 psize = kvmppc_actual_pgsz(pteh, ptel);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000220 if (!psize)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000221 return H_PARAMETER;
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000222 writing = hpte_is_writable(ptel);
Paul Mackerras697d3892011-12-12 12:36:37 +0000223 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000224 ptel &= ~HPTE_GR_RESERVED;
225 g_ptel = ptel;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000226
Paul Mackerras342d3db2011-12-12 12:38:05 +0000227 /* used later to detect if we might have been invalidated */
228 mmu_seq = kvm->mmu_notifier_seq;
229 smp_rmb();
230
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000231 /* Find the memslot (if any) for this address */
232 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
233 gfn = gpa >> PAGE_SHIFT;
Paul Mackerras797f9c02014-03-25 10:47:06 +1100234 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
Paul Mackerras697d3892011-12-12 12:36:37 +0000235 pa = 0;
Aneesh Kumar K.V30bda412016-04-29 23:25:38 +1000236 is_ci = false;
Paul Mackerras697d3892011-12-12 12:36:37 +0000237 rmap = NULL;
238 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
Paul Mackerras697d3892011-12-12 12:36:37 +0000239 /* Emulated MMIO - mark this with key=31 */
240 pteh |= HPTE_V_ABSENT;
241 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
242 goto do_insert;
243 }
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000244
245 /* Check if the requested page fits entirely in the memslot. */
246 if (!slot_is_aligned(memslot, psize))
247 return H_PARAMETER;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000248 slot_fn = gfn - memslot->base_gfn;
Takuya Yoshikawad89cc612012-08-01 18:03:28 +0900249 rmap = &memslot->arch.rmap[slot_fn];
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000250
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100251 /* Translate to host virtual address */
252 hva = __gfn_to_hva_memslot(memslot, gfn);
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530253 /*
254 * If we had a page table table change after lookup, we would
255 * retry via mmu_notifier_retry.
256 */
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +0530257 if (!realmode)
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530258 local_irq_save(irq_flags);
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +0530259 /*
260 * If called in real mode we have MSR_EE = 0. Otherwise
261 * we disable irq above.
262 */
263 ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530264 if (ptep) {
265 pte_t pte;
266 unsigned int host_pte_size;
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000267
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530268 if (hpage_shift)
269 host_pte_size = 1ul << hpage_shift;
270 else
271 host_pte_size = PAGE_SIZE;
272 /*
273 * We should always find the guest page size
274 * to <= host page size, if host is using hugepage
275 */
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530276 if (host_pte_size < psize) {
277 if (!realmode)
278 local_irq_restore(flags);
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530279 return H_PARAMETER;
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530280 }
Aneesh Kumar K.V7d6e7f72015-03-30 10:41:04 +0530281 pte = kvmppc_read_update_linux_pte(ptep, writing);
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530282 if (pte_present(pte) && !pte_protnone(pte)) {
Aneesh Kumar K.Vd19469e2017-03-09 16:16:39 -0800283 if (writing && !__pte_write(pte))
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530284 /* make the actual HPTE be read-only */
285 ptel = hpte_make_readonly(ptel);
Aneesh Kumar K.V30bda412016-04-29 23:25:38 +1000286 is_ci = pte_ci(pte);
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530287 pa = pte_pfn(pte) << PAGE_SHIFT;
288 pa |= hva & (host_pte_size - 1);
289 pa |= gpa & ~PAGE_MASK;
290 }
Paul Mackerras342d3db2011-12-12 12:38:05 +0000291 }
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530292 if (!realmode)
293 local_irq_restore(irq_flags);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000294
Ram Paid182b8f2017-07-31 14:39:59 -0700295 ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000296 ptel |= pa;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000297
298 if (pa)
299 pteh |= HPTE_V_VALID;
Yongji Xief0585982016-11-04 13:55:11 +0800300 else {
Paul Mackerras342d3db2011-12-12 12:38:05 +0000301 pteh |= HPTE_V_ABSENT;
Yongji Xief0585982016-11-04 13:55:11 +0800302 ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
303 }
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000304
Aneesh Kumar K.V30bda412016-04-29 23:25:38 +1000305 /*If we had host pte mapping then Check WIMG */
306 if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) {
307 if (is_ci)
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000308 return H_PARAMETER;
309 /*
310 * Allow guest to map emulated device memory as
311 * uncacheable, but actually make it cacheable.
312 */
313 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
314 ptel |= HPTE_R_M;
315 }
Paul Mackerras075295d2011-12-12 12:30:16 +0000316
Paul Mackerras342d3db2011-12-12 12:38:05 +0000317 /* Find and lock the HPTEG slot to use */
Paul Mackerras697d3892011-12-12 12:36:37 +0000318 do_insert:
David Gibson3d089f82016-12-20 16:49:01 +1100319 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000320 return H_PARAMETER;
321 if (likely((flags & H_EXACT) == 0)) {
322 pte_index &= ~7UL;
David Gibson3f9d4f52016-12-20 16:49:00 +1100323 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000324 for (i = 0; i < 8; ++i) {
Alexander Graf6f22bd32014-06-11 10:16:06 +0200325 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
Paul Mackerras697d3892011-12-12 12:36:37 +0000326 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
327 HPTE_V_ABSENT))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000328 break;
329 hpte += 2;
330 }
Paul Mackerras075295d2011-12-12 12:30:16 +0000331 if (i == 8) {
332 /*
333 * Since try_lock_hpte doesn't retry (not even stdcx.
334 * failures), it could be that there is a free slot
335 * but we transiently failed to lock it. Try again,
336 * actually locking each slot and checking it.
337 */
338 hpte -= 16;
339 for (i = 0; i < 8; ++i) {
Alexander Graf6f22bd32014-06-11 10:16:06 +0200340 u64 pte;
Paul Mackerras075295d2011-12-12 12:30:16 +0000341 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
342 cpu_relax();
Aneesh Kumar K.Va4bd6eb2015-03-20 20:39:43 +1100343 pte = be64_to_cpu(hpte[0]);
Alexander Graf6f22bd32014-06-11 10:16:06 +0200344 if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
Paul Mackerras075295d2011-12-12 12:30:16 +0000345 break;
Aneesh Kumar K.Va4bd6eb2015-03-20 20:39:43 +1100346 __unlock_hpte(hpte, pte);
Paul Mackerras075295d2011-12-12 12:30:16 +0000347 hpte += 2;
348 }
349 if (i == 8)
350 return H_PTEG_FULL;
351 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000352 pte_index += i;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000353 } else {
David Gibson3f9d4f52016-12-20 16:49:00 +1100354 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerras697d3892011-12-12 12:36:37 +0000355 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
356 HPTE_V_ABSENT)) {
Paul Mackerras075295d2011-12-12 12:30:16 +0000357 /* Lock the slot and check again */
Alexander Graf6f22bd32014-06-11 10:16:06 +0200358 u64 pte;
359
Paul Mackerras075295d2011-12-12 12:30:16 +0000360 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
361 cpu_relax();
Aneesh Kumar K.Va4bd6eb2015-03-20 20:39:43 +1100362 pte = be64_to_cpu(hpte[0]);
Alexander Graf6f22bd32014-06-11 10:16:06 +0200363 if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
Aneesh Kumar K.Va4bd6eb2015-03-20 20:39:43 +1100364 __unlock_hpte(hpte, pte);
Paul Mackerras075295d2011-12-12 12:30:16 +0000365 return H_PTEG_FULL;
366 }
367 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000368 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000369
370 /* Save away the guest's idea of the second HPTE dword */
David Gibson3f9d4f52016-12-20 16:49:00 +1100371 rev = &kvm->arch.hpt.rev[pte_index];
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000372 if (realmode)
373 rev = real_vmalloc_addr(rev);
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000374 if (rev) {
Paul Mackerras8936dda2011-12-12 12:27:39 +0000375 rev->guest_rpte = g_ptel;
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000376 note_hpte_modification(kvm, rev);
377 }
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000378
379 /* Link HPTE into reverse-map chain */
Paul Mackerras697d3892011-12-12 12:36:37 +0000380 if (pteh & HPTE_V_VALID) {
381 if (realmode)
382 rmap = real_vmalloc_addr(rmap);
383 lock_rmap(rmap);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000384 /* Check for pending invalidations under the rmap chain lock */
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100385 if (mmu_notifier_retry(kvm, mmu_seq)) {
Paul Mackerras342d3db2011-12-12 12:38:05 +0000386 /* inval in progress, write a non-present HPTE */
387 pteh |= HPTE_V_ABSENT;
388 pteh &= ~HPTE_V_VALID;
Yongji Xief0585982016-11-04 13:55:11 +0800389 ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000390 unlock_rmap(rmap);
391 } else {
392 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
393 realmode);
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000394 /* Only set R/C in real HPTE if already set in *rmap */
395 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
396 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000397 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000398 }
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000399
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100400 /* Convert to new format on P9 */
401 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
402 ptel = hpte_old_to_new_r(pteh, ptel);
403 pteh = hpte_old_to_new_v(pteh);
404 }
Alexander Graf6f22bd32014-06-11 10:16:06 +0200405 hpte[1] = cpu_to_be64(ptel);
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000406
407 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000408 eieio();
Aneesh Kumar K.Va4bd6eb2015-03-20 20:39:43 +1100409 __unlock_hpte(hpte, pteh);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000410 asm volatile("ptesync" : : : "memory");
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000411
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000412 *pte_idx_ret = pte_index;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000413 return H_SUCCESS;
414}
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000415EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
416
417long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
418 long pte_index, unsigned long pteh, unsigned long ptel)
419{
420 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
Simon Guo1143a702018-05-07 14:20:07 +0800421 vcpu->arch.pgdir, true,
422 &vcpu->arch.regs.gpr[4]);
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000423}
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000424
Anton Blanchard54bb7f42013-08-07 02:01:51 +1000425#ifdef __BIG_ENDIAN__
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000426#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
Anton Blanchard54bb7f42013-08-07 02:01:51 +1000427#else
428#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
429#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000430
Yongji Xiea56ee9f2016-11-04 13:55:12 +0800431static inline int is_mmio_hpte(unsigned long v, unsigned long r)
432{
433 return ((v & HPTE_V_ABSENT) &&
434 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
435 (HPTE_R_KEY_HI | HPTE_R_KEY_LO));
436}
437
Paul Mackerras54480502013-07-08 20:08:25 +1000438static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
439 long npages, int global, bool need_sync)
440{
441 long i;
442
Paul Mackerras7c5b06c2016-11-18 08:28:51 +1100443 /*
444 * We use the POWER9 5-operand versions of tlbie and tlbiel here.
445 * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores
446 * the RS field, this is backwards-compatible with P7 and P8.
447 */
Paul Mackerras54480502013-07-08 20:08:25 +1000448 if (global) {
Paul Mackerras54480502013-07-08 20:08:25 +1000449 if (need_sync)
450 asm volatile("ptesync" : : : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000451 for (i = 0; i < npages; ++i) {
Paul Mackerras7c5b06c2016-11-18 08:28:51 +1100452 asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
Paul Mackerras54480502013-07-08 20:08:25 +1000453 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
Balbir Singh04284912017-04-11 15:23:25 +1000454 }
Aneesh Kumar K.Va5d4b582018-03-23 10:26:27 +0530455
456 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
457 /*
458 * Need the extra ptesync to make sure we don't
459 * re-order the tlbie
460 */
461 asm volatile("ptesync": : :"memory");
462 asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
463 "r" (rbvalues[0]), "r" (kvm->arch.lpid));
464 }
465
Paul Mackerras54480502013-07-08 20:08:25 +1000466 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
Paul Mackerras54480502013-07-08 20:08:25 +1000467 } else {
468 if (need_sync)
469 asm volatile("ptesync" : : : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000470 for (i = 0; i < npages; ++i) {
Paul Mackerras7c5b06c2016-11-18 08:28:51 +1100471 asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
472 "r" (rbvalues[i]), "r" (0));
Balbir Singh04284912017-04-11 15:23:25 +1000473 }
Paul Mackerras54480502013-07-08 20:08:25 +1000474 asm volatile("ptesync" : : : "memory");
475 }
476}
477
Paul Mackerras6b445ad2012-11-19 22:55:44 +0000478long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
479 unsigned long pte_index, unsigned long avpn,
480 unsigned long *hpret)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000481{
Alexander Graf6f22bd32014-06-11 10:16:06 +0200482 __be64 *hpte;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000483 unsigned long v, r, rb;
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000484 struct revmap_entry *rev;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100485 u64 pte, orig_pte, pte_r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000486
Paul Mackerras65dae542017-01-30 21:21:49 +1100487 if (kvm_is_radix(kvm))
488 return H_FUNCTION;
David Gibson3d089f82016-12-20 16:49:01 +1100489 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000490 return H_PARAMETER;
David Gibson3f9d4f52016-12-20 16:49:00 +1100491 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000492 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000493 cpu_relax();
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100494 pte = orig_pte = be64_to_cpu(hpte[0]);
495 pte_r = be64_to_cpu(hpte[1]);
496 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
497 pte = hpte_new_to_old_v(pte, pte_r);
498 pte_r = hpte_new_to_old_r(pte_r);
499 }
Alexander Graf6f22bd32014-06-11 10:16:06 +0200500 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
501 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
502 ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100503 __unlock_hpte(hpte, orig_pte);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000504 return H_NOT_FOUND;
505 }
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000506
David Gibson3f9d4f52016-12-20 16:49:00 +1100507 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
Alexander Graf6f22bd32014-06-11 10:16:06 +0200508 v = pte & ~HPTE_V_HVLOCK;
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000509 if (v & HPTE_V_VALID) {
Alexander Graf6f22bd32014-06-11 10:16:06 +0200510 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100511 rb = compute_tlbie_rb(v, pte_r, pte_index);
Aneesh Kumar K.V76b03dc2017-11-06 17:57:44 +0530512 do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
Paul Mackerras1e5bf452015-06-24 21:18:05 +1000513 /*
514 * The reference (R) and change (C) bits in a HPT
515 * entry can be set by hardware at any time up until
516 * the HPTE is invalidated and the TLB invalidation
517 * sequence has completed. This means that when
518 * removing a HPTE, we need to re-read the HPTE after
519 * the invalidation sequence has completed in order to
520 * obtain reliable values of R and C.
521 */
522 remove_revmap_chain(kvm, pte_index, rev, v,
523 be64_to_cpu(hpte[1]));
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000524 }
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000525 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
526 note_hpte_modification(kvm, rev);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000527 unlock_hpte(hpte, 0);
528
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100529 if (is_mmio_hpte(v, pte_r))
Yongji Xiea56ee9f2016-11-04 13:55:12 +0800530 atomic64_inc(&kvm->arch.mmio_update);
531
Paul Mackerrasc64dfe22015-05-18 14:10:54 +1000532 if (v & HPTE_V_ABSENT)
533 v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID;
Paul Mackerras6b445ad2012-11-19 22:55:44 +0000534 hpret[0] = v;
535 hpret[1] = r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000536 return H_SUCCESS;
537}
Paul Mackerras6b445ad2012-11-19 22:55:44 +0000538EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
539
540long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
541 unsigned long pte_index, unsigned long avpn)
542{
543 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
Simon Guo1143a702018-05-07 14:20:07 +0800544 &vcpu->arch.regs.gpr[4]);
Paul Mackerras6b445ad2012-11-19 22:55:44 +0000545}
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000546
547long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
548{
549 struct kvm *kvm = vcpu->kvm;
Simon Guo1143a702018-05-07 14:20:07 +0800550 unsigned long *args = &vcpu->arch.regs.gpr[4];
Alexander Graf6f22bd32014-06-11 10:16:06 +0200551 __be64 *hp, *hptes[4];
552 unsigned long tlbrb[4];
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000553 long int i, j, k, n, found, indexes[4];
554 unsigned long flags, req, pte_index, rcbits;
Paul Mackerras54480502013-07-08 20:08:25 +1000555 int global;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000556 long int ret = H_SUCCESS;
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000557 struct revmap_entry *rev, *revs[4];
Yongji Xiea56ee9f2016-11-04 13:55:12 +0800558 u64 hp0, hp1;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000559
Paul Mackerras65dae542017-01-30 21:21:49 +1100560 if (kvm_is_radix(kvm))
561 return H_FUNCTION;
Aneesh Kumar K.V76b03dc2017-11-06 17:57:44 +0530562 global = global_invalidates(kvm);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000563 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
564 n = 0;
565 for (; i < 4; ++i) {
566 j = i * 2;
567 pte_index = args[j];
568 flags = pte_index >> 56;
569 pte_index &= ((1ul << 56) - 1);
570 req = flags >> 6;
571 flags &= 3;
572 if (req == 3) { /* no more requests */
573 i = 4;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000574 break;
575 }
Paul Mackerras32fad282012-05-04 02:32:53 +0000576 if (req != 1 || flags == 3 ||
David Gibson3d089f82016-12-20 16:49:01 +1100577 pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000578 /* parameter error */
579 args[j] = ((0xa0 | flags) << 56) + pte_index;
580 ret = H_PARAMETER;
581 break;
582 }
David Gibson3f9d4f52016-12-20 16:49:00 +1100583 hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000584 /* to avoid deadlock, don't spin except for first */
585 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
586 if (n)
587 break;
588 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
589 cpu_relax();
590 }
591 found = 0;
Alexander Graf6f22bd32014-06-11 10:16:06 +0200592 hp0 = be64_to_cpu(hp[0]);
Yongji Xiea56ee9f2016-11-04 13:55:12 +0800593 hp1 = be64_to_cpu(hp[1]);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100594 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
595 hp0 = hpte_new_to_old_v(hp0, hp1);
596 hp1 = hpte_new_to_old_r(hp1);
597 }
Alexander Graf6f22bd32014-06-11 10:16:06 +0200598 if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000599 switch (flags & 3) {
600 case 0: /* absolute */
601 found = 1;
602 break;
603 case 1: /* andcond */
Alexander Graf6f22bd32014-06-11 10:16:06 +0200604 if (!(hp0 & args[j + 1]))
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000605 found = 1;
606 break;
607 case 2: /* AVPN */
Alexander Graf6f22bd32014-06-11 10:16:06 +0200608 if ((hp0 & ~0x7fUL) == args[j + 1])
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000609 found = 1;
610 break;
611 }
612 }
613 if (!found) {
Alexander Graf6f22bd32014-06-11 10:16:06 +0200614 hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000615 args[j] = ((0x90 | flags) << 56) + pte_index;
616 continue;
617 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000618
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000619 args[j] = ((0x80 | flags) << 56) + pte_index;
David Gibson3f9d4f52016-12-20 16:49:00 +1100620 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000621 note_hpte_modification(kvm, rev);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000622
Alexander Graf6f22bd32014-06-11 10:16:06 +0200623 if (!(hp0 & HPTE_V_VALID)) {
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000624 /* insert R and C bits from PTE */
625 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
626 args[j] |= rcbits << (56 - 5);
Paul Mackerras51bfd292012-05-09 23:49:24 +0000627 hp[0] = 0;
Yongji Xiea56ee9f2016-11-04 13:55:12 +0800628 if (is_mmio_hpte(hp0, hp1))
629 atomic64_inc(&kvm->arch.mmio_update);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000630 continue;
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000631 }
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000632
Alexander Graf6f22bd32014-06-11 10:16:06 +0200633 /* leave it locked */
634 hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100635 tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000636 indexes[n] = j;
637 hptes[n] = hp;
638 revs[n] = rev;
639 ++n;
640 }
641
642 if (!n)
643 break;
644
645 /* Now that we've collected a batch, do the tlbies */
Paul Mackerras54480502013-07-08 20:08:25 +1000646 do_tlbies(kvm, tlbrb, n, global, true);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000647
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000648 /* Read PTE low words after tlbie to get final R/C values */
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000649 for (k = 0; k < n; ++k) {
650 j = indexes[k];
651 pte_index = args[j] & ((1ul << 56) - 1);
652 hp = hptes[k];
653 rev = revs[k];
Alexander Graf6f22bd32014-06-11 10:16:06 +0200654 remove_revmap_chain(kvm, pte_index, rev,
655 be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000656 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
657 args[j] |= rcbits << (56 - 5);
Aneesh Kumar K.Va4bd6eb2015-03-20 20:39:43 +1100658 __unlock_hpte(hp, 0);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000659 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000660 }
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000661
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000662 return ret;
663}
664
665long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
666 unsigned long pte_index, unsigned long avpn,
667 unsigned long va)
668{
669 struct kvm *kvm = vcpu->kvm;
Alexander Graf6f22bd32014-06-11 10:16:06 +0200670 __be64 *hpte;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000671 struct revmap_entry *rev;
672 unsigned long v, r, rb, mask, bits;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100673 u64 pte_v, pte_r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000674
Paul Mackerras65dae542017-01-30 21:21:49 +1100675 if (kvm_is_radix(kvm))
676 return H_FUNCTION;
David Gibson3d089f82016-12-20 16:49:01 +1100677 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000678 return H_PARAMETER;
Paul Mackerras697d3892011-12-12 12:36:37 +0000679
David Gibson3f9d4f52016-12-20 16:49:00 +1100680 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000681 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000682 cpu_relax();
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100683 v = pte_v = be64_to_cpu(hpte[0]);
684 if (cpu_has_feature(CPU_FTR_ARCH_300))
685 v = hpte_new_to_old_v(v, be64_to_cpu(hpte[1]));
686 if ((v & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
687 ((flags & H_AVPN) && (v & ~0x7fUL) != avpn)) {
688 __unlock_hpte(hpte, pte_v);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000689 return H_NOT_FOUND;
690 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000691
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100692 pte_r = be64_to_cpu(hpte[1]);
Paul Mackerras8936dda2011-12-12 12:27:39 +0000693 bits = (flags << 55) & HPTE_R_PP0;
694 bits |= (flags << 48) & HPTE_R_KEY_HI;
695 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
696
697 /* Update guest view of 2nd HPTE dword */
698 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
699 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
David Gibson3f9d4f52016-12-20 16:49:00 +1100700 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
Paul Mackerras8936dda2011-12-12 12:27:39 +0000701 if (rev) {
702 r = (rev->guest_rpte & ~mask) | bits;
703 rev->guest_rpte = r;
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000704 note_hpte_modification(kvm, rev);
Paul Mackerras8936dda2011-12-12 12:27:39 +0000705 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000706
707 /* Update HPTE */
Paul Mackerras697d3892011-12-12 12:36:37 +0000708 if (v & HPTE_V_VALID) {
Paul Mackerras1cc8ed02012-11-21 23:28:41 +0000709 /*
Paul Mackerrasb4a83902014-11-03 15:51:58 +1100710 * If the page is valid, don't let it transition from
711 * readonly to writable. If it should be writable, we'll
712 * take a trap and let the page fault code sort it out.
Paul Mackerras1cc8ed02012-11-21 23:28:41 +0000713 */
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100714 r = (pte_r & ~mask) | bits;
715 if (hpte_is_writable(r) && !hpte_is_writable(pte_r))
Paul Mackerrasb4a83902014-11-03 15:51:58 +1100716 r = hpte_make_readonly(r);
717 /* If the PTE is changing, invalidate it first */
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100718 if (r != pte_r) {
Paul Mackerrasb4a83902014-11-03 15:51:58 +1100719 rb = compute_tlbie_rb(v, r, pte_index);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100720 hpte[0] = cpu_to_be64((pte_v & ~HPTE_V_VALID) |
Paul Mackerrasb4a83902014-11-03 15:51:58 +1100721 HPTE_V_ABSENT);
Aneesh Kumar K.V76b03dc2017-11-06 17:57:44 +0530722 do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
Paul Mackerrasf064a0d2016-11-16 16:43:28 +1100723 /* Don't lose R/C bit updates done by hardware */
724 r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
Paul Mackerrasb4a83902014-11-03 15:51:58 +1100725 hpte[1] = cpu_to_be64(r);
Paul Mackerras1cc8ed02012-11-21 23:28:41 +0000726 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000727 }
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100728 unlock_hpte(hpte, pte_v & ~HPTE_V_HVLOCK);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000729 asm volatile("ptesync" : : : "memory");
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100730 if (is_mmio_hpte(v, pte_r))
Yongji Xiea56ee9f2016-11-04 13:55:12 +0800731 atomic64_inc(&kvm->arch.mmio_update);
732
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000733 return H_SUCCESS;
734}
735
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000736long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
737 unsigned long pte_index)
738{
739 struct kvm *kvm = vcpu->kvm;
Alexander Graf6f22bd32014-06-11 10:16:06 +0200740 __be64 *hpte;
741 unsigned long v, r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000742 int i, n = 1;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000743 struct revmap_entry *rev = NULL;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000744
Paul Mackerras65dae542017-01-30 21:21:49 +1100745 if (kvm_is_radix(kvm))
746 return H_FUNCTION;
David Gibson3d089f82016-12-20 16:49:01 +1100747 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000748 return H_PARAMETER;
749 if (flags & H_READ_4) {
750 pte_index &= ~3;
751 n = 4;
752 }
David Gibson3f9d4f52016-12-20 16:49:00 +1100753 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000754 for (i = 0; i < n; ++i, ++pte_index) {
David Gibson3f9d4f52016-12-20 16:49:00 +1100755 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Alexander Graf6f22bd32014-06-11 10:16:06 +0200756 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
757 r = be64_to_cpu(hpte[1]);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100758 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
759 v = hpte_new_to_old_v(v, r);
760 r = hpte_new_to_old_r(r);
761 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000762 if (v & HPTE_V_ABSENT) {
763 v &= ~HPTE_V_ABSENT;
764 v |= HPTE_V_VALID;
765 }
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000766 if (v & HPTE_V_VALID) {
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000767 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000768 r &= ~HPTE_GR_RESERVED;
769 }
Simon Guo1143a702018-05-07 14:20:07 +0800770 vcpu->arch.regs.gpr[4 + i * 2] = v;
771 vcpu->arch.regs.gpr[5 + i * 2] = r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000772 }
773 return H_SUCCESS;
774}
Paul Mackerras697d3892011-12-12 12:36:37 +0000775
Paul Mackerrascdeee512015-06-24 21:18:07 +1000776long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
777 unsigned long pte_index)
778{
779 struct kvm *kvm = vcpu->kvm;
780 __be64 *hpte;
781 unsigned long v, r, gr;
782 struct revmap_entry *rev;
783 unsigned long *rmap;
784 long ret = H_NOT_FOUND;
785
Paul Mackerras65dae542017-01-30 21:21:49 +1100786 if (kvm_is_radix(kvm))
787 return H_FUNCTION;
David Gibson3d089f82016-12-20 16:49:01 +1100788 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
Paul Mackerrascdeee512015-06-24 21:18:07 +1000789 return H_PARAMETER;
790
David Gibson3f9d4f52016-12-20 16:49:00 +1100791 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
792 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerrascdeee512015-06-24 21:18:07 +1000793 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
794 cpu_relax();
795 v = be64_to_cpu(hpte[0]);
796 r = be64_to_cpu(hpte[1]);
797 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
798 goto out;
799
800 gr = rev->guest_rpte;
801 if (rev->guest_rpte & HPTE_R_R) {
802 rev->guest_rpte &= ~HPTE_R_R;
803 note_hpte_modification(kvm, rev);
804 }
805 if (v & HPTE_V_VALID) {
806 gr |= r & (HPTE_R_R | HPTE_R_C);
807 if (r & HPTE_R_R) {
808 kvmppc_clear_ref_hpte(kvm, hpte, pte_index);
Paul Mackerrase641a312017-10-26 16:39:19 +1100809 rmap = revmap_for_hpte(kvm, v, gr, NULL, NULL);
Paul Mackerrascdeee512015-06-24 21:18:07 +1000810 if (rmap) {
811 lock_rmap(rmap);
812 *rmap |= KVMPPC_RMAP_REFERENCED;
813 unlock_rmap(rmap);
814 }
815 }
816 }
Simon Guo1143a702018-05-07 14:20:07 +0800817 vcpu->arch.regs.gpr[4] = gr;
Paul Mackerrascdeee512015-06-24 21:18:07 +1000818 ret = H_SUCCESS;
819 out:
820 unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
821 return ret;
822}
823
824long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
825 unsigned long pte_index)
826{
827 struct kvm *kvm = vcpu->kvm;
828 __be64 *hpte;
829 unsigned long v, r, gr;
830 struct revmap_entry *rev;
Paul Mackerrascdeee512015-06-24 21:18:07 +1000831 long ret = H_NOT_FOUND;
832
Paul Mackerras65dae542017-01-30 21:21:49 +1100833 if (kvm_is_radix(kvm))
834 return H_FUNCTION;
David Gibson3d089f82016-12-20 16:49:01 +1100835 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
Paul Mackerrascdeee512015-06-24 21:18:07 +1000836 return H_PARAMETER;
837
David Gibson3f9d4f52016-12-20 16:49:00 +1100838 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
839 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerrascdeee512015-06-24 21:18:07 +1000840 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
841 cpu_relax();
842 v = be64_to_cpu(hpte[0]);
843 r = be64_to_cpu(hpte[1]);
844 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
845 goto out;
846
847 gr = rev->guest_rpte;
848 if (gr & HPTE_R_C) {
849 rev->guest_rpte &= ~HPTE_R_C;
850 note_hpte_modification(kvm, rev);
851 }
852 if (v & HPTE_V_VALID) {
853 /* need to make it temporarily absent so C is stable */
854 hpte[0] |= cpu_to_be64(HPTE_V_ABSENT);
855 kvmppc_invalidate_hpte(kvm, hpte, pte_index);
856 r = be64_to_cpu(hpte[1]);
857 gr |= r & (HPTE_R_R | HPTE_R_C);
858 if (r & HPTE_R_C) {
Paul Mackerrascdeee512015-06-24 21:18:07 +1000859 hpte[1] = cpu_to_be64(r & ~HPTE_R_C);
860 eieio();
Paul Mackerrase641a312017-10-26 16:39:19 +1100861 kvmppc_set_dirty_from_hpte(kvm, v, gr);
Paul Mackerrascdeee512015-06-24 21:18:07 +1000862 }
863 }
Simon Guo1143a702018-05-07 14:20:07 +0800864 vcpu->arch.regs.gpr[4] = gr;
Paul Mackerrascdeee512015-06-24 21:18:07 +1000865 ret = H_SUCCESS;
866 out:
867 unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
868 return ret;
869}
870
Alexander Graf6f22bd32014-06-11 10:16:06 +0200871void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
Paul Mackerras342d3db2011-12-12 12:38:05 +0000872 unsigned long pte_index)
873{
874 unsigned long rb;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100875 u64 hp0, hp1;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000876
Alexander Graf6f22bd32014-06-11 10:16:06 +0200877 hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100878 hp0 = be64_to_cpu(hptep[0]);
879 hp1 = be64_to_cpu(hptep[1]);
880 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
881 hp0 = hpte_new_to_old_v(hp0, hp1);
882 hp1 = hpte_new_to_old_r(hp1);
883 }
884 rb = compute_tlbie_rb(hp0, hp1, pte_index);
Paul Mackerras54480502013-07-08 20:08:25 +1000885 do_tlbies(kvm, &rb, 1, 1, true);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000886}
887EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
888
Alexander Graf6f22bd32014-06-11 10:16:06 +0200889void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
Paul Mackerras55514892011-12-15 02:02:47 +0000890 unsigned long pte_index)
891{
892 unsigned long rb;
893 unsigned char rbyte;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100894 u64 hp0, hp1;
Paul Mackerras55514892011-12-15 02:02:47 +0000895
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100896 hp0 = be64_to_cpu(hptep[0]);
897 hp1 = be64_to_cpu(hptep[1]);
898 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
899 hp0 = hpte_new_to_old_v(hp0, hp1);
900 hp1 = hpte_new_to_old_r(hp1);
901 }
902 rb = compute_tlbie_rb(hp0, hp1, pte_index);
Alexander Graf6f22bd32014-06-11 10:16:06 +0200903 rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
Paul Mackerras55514892011-12-15 02:02:47 +0000904 /* modify only the second-last byte, which contains the ref bit */
905 *((char *)hptep + 14) = rbyte;
Paul Mackerras54480502013-07-08 20:08:25 +1000906 do_tlbies(kvm, &rb, 1, 1, false);
Paul Mackerras55514892011-12-15 02:02:47 +0000907}
908EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
909
Paul Mackerras697d3892011-12-12 12:36:37 +0000910static int slb_base_page_shift[4] = {
911 24, /* 16M */
912 16, /* 64k */
913 34, /* 16G */
914 20, /* 1M, unsupported */
915};
916
Yongji Xiea56ee9f2016-11-04 13:55:12 +0800917static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu,
918 unsigned long eaddr, unsigned long slb_v, long mmio_update)
919{
920 struct mmio_hpte_cache_entry *entry = NULL;
921 unsigned int pshift;
922 unsigned int i;
923
924 for (i = 0; i < MMIO_HPTE_CACHE_SIZE; i++) {
925 entry = &vcpu->arch.mmio_cache.entry[i];
926 if (entry->mmio_update == mmio_update) {
927 pshift = entry->slb_base_pshift;
928 if ((entry->eaddr >> pshift) == (eaddr >> pshift) &&
929 entry->slb_v == slb_v)
930 return entry;
931 }
932 }
933 return NULL;
934}
935
936static struct mmio_hpte_cache_entry *
937 next_mmio_cache_entry(struct kvm_vcpu *vcpu)
938{
939 unsigned int index = vcpu->arch.mmio_cache.index;
940
941 vcpu->arch.mmio_cache.index++;
942 if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE)
943 vcpu->arch.mmio_cache.index = 0;
944
945 return &vcpu->arch.mmio_cache.entry[index];
946}
947
pingfan liu91648ec2013-11-15 16:35:00 +0800948/* When called from virtmode, this func should be protected by
949 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
950 * can trigger deadlock issue.
951 */
Paul Mackerras697d3892011-12-12 12:36:37 +0000952long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
953 unsigned long valid)
954{
955 unsigned int i;
956 unsigned int pshift;
957 unsigned long somask;
958 unsigned long vsid, hash;
959 unsigned long avpn;
Alexander Graf6f22bd32014-06-11 10:16:06 +0200960 __be64 *hpte;
Paul Mackerras697d3892011-12-12 12:36:37 +0000961 unsigned long mask, val;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100962 unsigned long v, r, orig_v;
Paul Mackerras697d3892011-12-12 12:36:37 +0000963
964 /* Get page shift, work out hash and AVPN etc. */
965 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
966 val = 0;
967 pshift = 12;
968 if (slb_v & SLB_VSID_L) {
969 mask |= HPTE_V_LARGE;
970 val |= HPTE_V_LARGE;
971 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
972 }
973 if (slb_v & SLB_VSID_B_1T) {
974 somask = (1UL << 40) - 1;
975 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
976 vsid ^= vsid << 25;
977 } else {
978 somask = (1UL << 28) - 1;
979 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
980 }
David Gibson3d089f82016-12-20 16:49:01 +1100981 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt);
Paul Mackerras697d3892011-12-12 12:36:37 +0000982 avpn = slb_v & ~(somask >> 16); /* also includes B */
983 avpn |= (eaddr & somask) >> 16;
984
985 if (pshift >= 24)
986 avpn &= ~((1UL << (pshift - 16)) - 1);
987 else
988 avpn &= ~0x7fUL;
989 val |= avpn;
990
991 for (;;) {
David Gibson3f9d4f52016-12-20 16:49:00 +1100992 hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7));
Paul Mackerras697d3892011-12-12 12:36:37 +0000993
994 for (i = 0; i < 16; i += 2) {
995 /* Read the PTE racily */
Alexander Graf6f22bd32014-06-11 10:16:06 +0200996 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100997 if (cpu_has_feature(CPU_FTR_ARCH_300))
998 v = hpte_new_to_old_v(v, be64_to_cpu(hpte[i+1]));
Paul Mackerras697d3892011-12-12 12:36:37 +0000999
1000 /* Check valid/absent, hash, segment size and AVPN */
1001 if (!(v & valid) || (v & mask) != val)
1002 continue;
1003
1004 /* Lock the PTE and read it under the lock */
1005 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
1006 cpu_relax();
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001007 v = orig_v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
Alexander Graf6f22bd32014-06-11 10:16:06 +02001008 r = be64_to_cpu(hpte[i+1]);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001009 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1010 v = hpte_new_to_old_v(v, r);
1011 r = hpte_new_to_old_r(r);
1012 }
Paul Mackerras697d3892011-12-12 12:36:37 +00001013
1014 /*
Aneesh Kumar K.V341acbb32014-06-16 00:17:07 +05301015 * Check the HPTE again, including base page size
Paul Mackerras697d3892011-12-12 12:36:37 +00001016 */
1017 if ((v & valid) && (v & mask) == val &&
Paul Mackerras8dc6cca2017-09-11 15:29:45 +10001018 kvmppc_hpte_base_page_shift(v, r) == pshift)
Paul Mackerras697d3892011-12-12 12:36:37 +00001019 /* Return with the HPTE still locked */
1020 return (hash << 3) + (i >> 1);
1021
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001022 __unlock_hpte(&hpte[i], orig_v);
Paul Mackerras697d3892011-12-12 12:36:37 +00001023 }
1024
1025 if (val & HPTE_V_SECONDARY)
1026 break;
1027 val |= HPTE_V_SECONDARY;
David Gibson3d089f82016-12-20 16:49:01 +11001028 hash = hash ^ kvmppc_hpt_mask(&kvm->arch.hpt);
Paul Mackerras697d3892011-12-12 12:36:37 +00001029 }
1030 return -1;
1031}
1032EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
1033
1034/*
1035 * Called in real mode to check whether an HPTE not found fault
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001036 * is due to accessing a paged-out page or an emulated MMIO page,
1037 * or if a protection fault is due to accessing a page that the
1038 * guest wanted read/write access to but which we made read-only.
Paul Mackerras697d3892011-12-12 12:36:37 +00001039 * Returns a possibly modified status (DSISR) value if not
1040 * (i.e. pass the interrupt to the guest),
1041 * -1 to pass the fault up to host kernel mode code, -2 to do that
Paul Mackerras342d3db2011-12-12 12:38:05 +00001042 * and also load the instruction word (for MMIO emulation),
Paul Mackerras697d3892011-12-12 12:36:37 +00001043 * or 0 if we should make the guest retry the access.
1044 */
1045long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
Paul Mackerras342d3db2011-12-12 12:38:05 +00001046 unsigned long slb_v, unsigned int status, bool data)
Paul Mackerras697d3892011-12-12 12:36:37 +00001047{
1048 struct kvm *kvm = vcpu->kvm;
1049 long int index;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001050 unsigned long v, r, gr, orig_v;
Alexander Graf6f22bd32014-06-11 10:16:06 +02001051 __be64 *hpte;
Paul Mackerras697d3892011-12-12 12:36:37 +00001052 unsigned long valid;
1053 struct revmap_entry *rev;
1054 unsigned long pp, key;
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001055 struct mmio_hpte_cache_entry *cache_entry = NULL;
1056 long mmio_update = 0;
Paul Mackerras697d3892011-12-12 12:36:37 +00001057
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001058 /* For protection fault, expect to find a valid HPTE */
1059 valid = HPTE_V_VALID;
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001060 if (status & DSISR_NOHPTE) {
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001061 valid |= HPTE_V_ABSENT;
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001062 mmio_update = atomic64_read(&kvm->arch.mmio_update);
1063 cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update);
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001064 }
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001065 if (cache_entry) {
1066 index = cache_entry->pte_index;
1067 v = cache_entry->hpte_v;
1068 r = cache_entry->hpte_r;
1069 gr = cache_entry->rpte;
1070 } else {
1071 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
1072 if (index < 0) {
1073 if (status & DSISR_NOHPTE)
1074 return status; /* there really was no HPTE */
1075 return 0; /* for prot fault, HPTE disappeared */
1076 }
David Gibson3f9d4f52016-12-20 16:49:00 +11001077 hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001078 v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001079 r = be64_to_cpu(hpte[1]);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001080 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1081 v = hpte_new_to_old_v(v, r);
1082 r = hpte_new_to_old_r(r);
1083 }
David Gibson3f9d4f52016-12-20 16:49:00 +11001084 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]);
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001085 gr = rev->guest_rpte;
Paul Mackerras697d3892011-12-12 12:36:37 +00001086
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001087 unlock_hpte(hpte, orig_v);
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001088 }
Paul Mackerras697d3892011-12-12 12:36:37 +00001089
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001090 /* For not found, if the HPTE is valid by now, retry the instruction */
1091 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
Paul Mackerras697d3892011-12-12 12:36:37 +00001092 return 0;
1093
1094 /* Check access permissions to the page */
1095 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
1096 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001097 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
1098 if (!data) {
1099 if (gr & (HPTE_R_N | HPTE_R_G))
1100 return status | SRR1_ISI_N_OR_G;
1101 if (!hpte_read_permission(pp, slb_v & key))
1102 return status | SRR1_ISI_PROT;
1103 } else if (status & DSISR_ISSTORE) {
Paul Mackerras697d3892011-12-12 12:36:37 +00001104 /* check write permission */
1105 if (!hpte_write_permission(pp, slb_v & key))
Paul Mackerras342d3db2011-12-12 12:38:05 +00001106 return status | DSISR_PROTFAULT;
Paul Mackerras697d3892011-12-12 12:36:37 +00001107 } else {
1108 if (!hpte_read_permission(pp, slb_v & key))
Paul Mackerras342d3db2011-12-12 12:38:05 +00001109 return status | DSISR_PROTFAULT;
Paul Mackerras697d3892011-12-12 12:36:37 +00001110 }
1111
1112 /* Check storage key, if applicable */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001113 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
Paul Mackerras697d3892011-12-12 12:36:37 +00001114 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
1115 if (status & DSISR_ISSTORE)
1116 perm >>= 1;
1117 if (perm & 1)
Paul Mackerras342d3db2011-12-12 12:38:05 +00001118 return status | DSISR_KEYFAULT;
Paul Mackerras697d3892011-12-12 12:36:37 +00001119 }
1120
1121 /* Save HPTE info for virtual-mode handler */
1122 vcpu->arch.pgfault_addr = addr;
1123 vcpu->arch.pgfault_index = index;
1124 vcpu->arch.pgfault_hpte[0] = v;
1125 vcpu->arch.pgfault_hpte[1] = r;
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001126 vcpu->arch.pgfault_cache = cache_entry;
Paul Mackerras697d3892011-12-12 12:36:37 +00001127
Paul Mackerras342d3db2011-12-12 12:38:05 +00001128 /* Check the storage key to see if it is possibly emulated MMIO */
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001129 if ((r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
1130 (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) {
1131 if (!cache_entry) {
1132 unsigned int pshift = 12;
1133 unsigned int pshift_index;
1134
1135 if (slb_v & SLB_VSID_L) {
1136 pshift_index = ((slb_v & SLB_VSID_LP) >> 4);
1137 pshift = slb_base_page_shift[pshift_index];
1138 }
1139 cache_entry = next_mmio_cache_entry(vcpu);
1140 cache_entry->eaddr = addr;
1141 cache_entry->slb_base_pshift = pshift;
1142 cache_entry->pte_index = index;
1143 cache_entry->hpte_v = v;
1144 cache_entry->hpte_r = r;
1145 cache_entry->rpte = gr;
1146 cache_entry->slb_v = slb_v;
1147 cache_entry->mmio_update = mmio_update;
1148 }
1149 if (data && (vcpu->arch.shregs.msr & MSR_IR))
1150 return -2; /* MMIO emulation - load instr word */
1151 }
Paul Mackerras697d3892011-12-12 12:36:37 +00001152
1153 return -1; /* send fault up to host kernel mode */
Paul Mackerras697d3892011-12-12 12:36:37 +00001154}