blob: d4bcd1b17b09300701cd846375584d4319af30ad [file] [log] [blame]
David Gibson54738c02011-06-29 00:22:41 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +110017 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
David Gibson54738c02011-06-29 00:22:41 +000018 */
19
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/highmem.h>
25#include <linux/gfp.h>
26#include <linux/slab.h>
27#include <linux/hugetlb.h>
28#include <linux/list.h>
29
30#include <asm/tlbflush.h>
31#include <asm/kvm_ppc.h>
32#include <asm/kvm_book3s.h>
Aneesh Kumar K.Vf64e8082016-03-01 12:59:20 +053033#include <asm/book3s/64/mmu-hash.h>
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +110034#include <asm/mmu_context.h>
David Gibson54738c02011-06-29 00:22:41 +000035#include <asm/hvcall.h>
36#include <asm/synch.h>
37#include <asm/ppc-opcode.h>
38#include <asm/kvm_host.h>
39#include <asm/udbg.h>
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +110040#include <asm/iommu.h>
Alexey Kardashevskiy5ee7af12016-02-15 12:55:08 +110041#include <asm/tce.h>
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053042#include <asm/pte-walk.h>
David Gibson54738c02011-06-29 00:22:41 +000043
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +110044#ifdef CONFIG_BUG
45
46#define WARN_ON_ONCE_RM(condition) ({ \
47 static bool __section(.data.unlikely) __warned; \
48 int __ret_warn_once = !!(condition); \
49 \
50 if (unlikely(__ret_warn_once && !__warned)) { \
51 __warned = true; \
52 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
53 __stringify(condition), \
54 __func__, __LINE__); \
55 dump_stack(); \
56 } \
57 unlikely(__ret_warn_once); \
58})
59
60#else
61
62#define WARN_ON_ONCE_RM(condition) ({ \
63 int __ret_warn_on = !!(condition); \
64 unlikely(__ret_warn_on); \
65})
66
67#endif
68
David Gibson54738c02011-06-29 00:22:41 +000069#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
70
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +110071/*
72 * Finds a TCE table descriptor by LIOBN.
73 *
74 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
75 * mode on PR KVM
76 */
Alexey Kardashevskiy503bfcb2017-03-22 15:21:53 +110077struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +110078 unsigned long liobn)
79{
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +110080 struct kvmppc_spapr_tce_table *stt;
81
Alexey Kardashevskiy366baf22016-02-15 12:55:05 +110082 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +110083 if (stt->liobn == liobn)
84 return stt;
85
86 return NULL;
87}
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +110088EXPORT_SYMBOL_GPL(kvmppc_find_table);
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +110089
90/*
Alexey Kardashevskiy5ee7af12016-02-15 12:55:08 +110091 * Validates TCE address.
92 * At the moment flags and page mask are validated.
93 * As the host kernel does not access those addresses (just puts them
94 * to the table and user space is supposed to process them), we can skip
95 * checking other things (such as TCE is a guest RAM address or the page
96 * was actually allocated).
97 *
98 * WARNING: This will be called in real-mode on HV KVM and virtual
99 * mode on PR KVM
100 */
101long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
102{
Alexey Kardashevskiyb1af23d2017-03-22 15:21:55 +1100103 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
104 enum dma_data_direction dir = iommu_tce_direction(tce);
Alexey Kardashevskiy5ee7af12016-02-15 12:55:08 +1100105
Alexey Kardashevskiyb1af23d2017-03-22 15:21:55 +1100106 /* Allow userspace to poison TCE table */
107 if (dir == DMA_NONE)
108 return H_SUCCESS;
109
110 if (iommu_tce_check_gpa(stt->page_shift, gpa))
Alexey Kardashevskiy5ee7af12016-02-15 12:55:08 +1100111 return H_PARAMETER;
112
113 return H_SUCCESS;
114}
115EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
116
117/* Note on the use of page_address() in real mode,
118 *
119 * It is safe to use page_address() in real mode on ppc64 because
120 * page_address() is always defined as lowmem_page_address()
121 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
122 * operation and does not access page struct.
123 *
124 * Theoretically page_address() could be defined different
125 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
126 * would have to be enabled.
127 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
128 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
129 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
130 * is not expected to be enabled on ppc32, page_address()
131 * is safe for ppc32 as well.
132 *
133 * WARNING: This will be called in real-mode on HV KVM and virtual
134 * mode on PR KVM
135 */
136static u64 *kvmppc_page_address(struct page *page)
137{
138#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
139#error TODO: fix to avoid page_address() here
140#endif
141 return (u64 *) page_address(page);
142}
143
144/*
145 * Handles TCE requests for emulated devices.
146 * Puts guest TCE values to the table and expects user space to convert them.
147 * Called in both real and virtual modes.
148 * Cannot fail so kvmppc_tce_validate must be called before it.
149 *
150 * WARNING: This will be called in real-mode on HV KVM and virtual
151 * mode on PR KVM
152 */
153void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
154 unsigned long idx, unsigned long tce)
155{
156 struct page *page;
157 u64 *tbl;
158
Alexey Kardashevskiy14f853f2016-03-01 17:54:39 +1100159 idx -= stt->offset;
Alexey Kardashevskiy5ee7af12016-02-15 12:55:08 +1100160 page = stt->pages[idx / TCES_PER_PAGE];
161 tbl = kvmppc_page_address(page);
162
163 tbl[idx % TCES_PER_PAGE] = tce;
164}
165EXPORT_SYMBOL_GPL(kvmppc_tce_put);
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +1100166
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100167long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
168 unsigned long *ua, unsigned long **prmap)
169{
170 unsigned long gfn = gpa >> PAGE_SHIFT;
171 struct kvm_memory_slot *memslot;
172
173 memslot = search_memslots(kvm_memslots(kvm), gfn);
174 if (!memslot)
175 return -EINVAL;
176
177 *ua = __gfn_to_hva_memslot(memslot, gfn) |
178 (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
179
180#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
181 if (prmap)
182 *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
183#endif
184
185 return 0;
186}
187EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
188
189#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100190static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
191{
192 unsigned long hpa = 0;
193 enum dma_data_direction dir = DMA_NONE;
194
195 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
196}
197
198static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
199 struct iommu_table *tbl, unsigned long entry)
200{
201 struct mm_iommu_table_group_mem_t *mem = NULL;
202 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
Alexey Kardashevskiya68bd122018-07-04 16:13:49 +1000203 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100204
205 if (!pua)
206 /* it_userspace allocation might be delayed */
207 return H_TOO_HARD;
208
Alexey Kardashevskiy00a5c582018-07-04 16:13:46 +1000209 mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100210 if (!mem)
211 return H_TOO_HARD;
212
213 mm_iommu_mapped_dec(mem);
214
Alexey Kardashevskiy00a5c582018-07-04 16:13:46 +1000215 *pua = cpu_to_be64(0);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100216
217 return H_SUCCESS;
218}
219
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000220static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100221 struct iommu_table *tbl, unsigned long entry)
222{
223 enum dma_data_direction dir = DMA_NONE;
224 unsigned long hpa = 0;
225 long ret;
226
227 if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
228 /*
229 * real mode xchg can fail if struct page crosses
230 * a page boundary
231 */
232 return H_TOO_HARD;
233
234 if (dir == DMA_NONE)
235 return H_SUCCESS;
236
237 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
238 if (ret)
239 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
240
241 return ret;
242}
243
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000244static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
245 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
246 unsigned long entry)
247{
248 unsigned long i, ret = H_SUCCESS;
249 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
250 unsigned long io_entry = entry * subpages;
251
252 for (i = 0; i < subpages; ++i) {
253 ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
254 if (ret != H_SUCCESS)
255 break;
256 }
257
258 return ret;
259}
260
261static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100262 unsigned long entry, unsigned long ua,
263 enum dma_data_direction dir)
264{
265 long ret;
266 unsigned long hpa = 0;
Alexey Kardashevskiya68bd122018-07-04 16:13:49 +1000267 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100268 struct mm_iommu_table_group_mem_t *mem;
269
270 if (!pua)
271 /* it_userspace allocation might be delayed */
272 return H_TOO_HARD;
273
274 mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
275 if (!mem)
276 return H_TOO_HARD;
277
278 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
279 return H_HARDWARE;
280
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100281 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
282 return H_CLOSED;
283
284 ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
285 if (ret) {
286 mm_iommu_mapped_dec(mem);
287 /*
288 * real mode xchg can fail if struct page crosses
289 * a page boundary
290 */
291 return H_TOO_HARD;
292 }
293
294 if (dir != DMA_NONE)
295 kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
296
Alexey Kardashevskiy00a5c582018-07-04 16:13:46 +1000297 *pua = cpu_to_be64(ua);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100298
299 return 0;
300}
301
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000302static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
303 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
304 unsigned long entry, unsigned long ua,
305 enum dma_data_direction dir)
306{
307 unsigned long i, pgoff, ret = H_SUCCESS;
308 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
309 unsigned long io_entry = entry * subpages;
310
311 for (i = 0, pgoff = 0; i < subpages;
312 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
313
314 ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
315 io_entry + i, ua + pgoff, dir);
316 if (ret != H_SUCCESS)
317 break;
318 }
319
320 return ret;
321}
322
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100323long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
324 unsigned long ioba, unsigned long tce)
David Gibson54738c02011-06-29 00:22:41 +0000325{
Alexey Kardashevskiy503bfcb2017-03-22 15:21:53 +1100326 struct kvmppc_spapr_tce_table *stt;
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +1100327 long ret;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100328 struct kvmppc_spapr_tce_iommu_table *stit;
329 unsigned long entry, ua = 0;
330 enum dma_data_direction dir;
David Gibson54738c02011-06-29 00:22:41 +0000331
332 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
333 /* liobn, ioba, tce); */
334
Paul Mackerrasacde2572017-05-10 16:39:41 +1000335 /* For radix, we might be in virtual mode, so punt */
336 if (kvm_is_radix(vcpu->kvm))
337 return H_TOO_HARD;
338
Alexey Kardashevskiy503bfcb2017-03-22 15:21:53 +1100339 stt = kvmppc_find_table(vcpu->kvm, liobn);
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +1100340 if (!stt)
341 return H_TOO_HARD;
David Gibson54738c02011-06-29 00:22:41 +0000342
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +1100343 ret = kvmppc_ioba_validate(stt, ioba, 1);
344 if (ret != H_SUCCESS)
345 return ret;
David Gibson54738c02011-06-29 00:22:41 +0000346
Alexey Kardashevskiy5ee7af12016-02-15 12:55:08 +1100347 ret = kvmppc_tce_validate(stt, tce);
348 if (ret != H_SUCCESS)
349 return ret;
David Gibson54738c02011-06-29 00:22:41 +0000350
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100351 dir = iommu_tce_direction(tce);
352 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
353 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
354 return H_PARAMETER;
355
356 entry = ioba >> stt->page_shift;
357
358 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
359 if (dir == DMA_NONE)
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000360 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100361 stit->tbl, entry);
362 else
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000363 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100364 stit->tbl, entry, ua, dir);
365
366 if (ret == H_SUCCESS)
367 continue;
368
369 if (ret == H_TOO_HARD)
370 return ret;
371
372 WARN_ON_ONCE_RM(1);
373 kvmppc_rm_clear_tce(stit->tbl, entry);
374 }
375
376 kvmppc_tce_put(stt, entry, tce);
David Gibson54738c02011-06-29 00:22:41 +0000377
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +1100378 return H_SUCCESS;
David Gibson54738c02011-06-29 00:22:41 +0000379}
Laurent Dufour69e9fbb22014-02-21 16:31:10 +0100380
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100381static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
382 unsigned long ua, unsigned long *phpa)
383{
384 pte_t *ptep, pte;
385 unsigned shift = 0;
386
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +0530387 /*
388 * Called in real mode with MSR_EE = 0. We are safe here.
389 * It is ok to do the lookup with arch.pgdir here, because
390 * we are doing this on secondary cpus and current task there
391 * is not the hypervisor. Also this is safe against THP in the
392 * host, because an IPI to primary thread will wait for the secondary
393 * to exit which will agains result in the below page table walk
394 * to finish.
395 */
396 ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100397 if (!ptep || !pte_present(*ptep))
398 return -ENXIO;
399 pte = *ptep;
400
401 if (!shift)
402 shift = PAGE_SHIFT;
403
404 /* Avoid handling anything potentially complicated in realmode */
405 if (shift > PAGE_SHIFT)
406 return -EAGAIN;
407
408 if (!pte_young(pte))
409 return -EAGAIN;
410
411 *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
412 (ua & ~PAGE_MASK);
413
414 return 0;
415}
416
417long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
418 unsigned long liobn, unsigned long ioba,
419 unsigned long tce_list, unsigned long npages)
420{
421 struct kvmppc_spapr_tce_table *stt;
422 long i, ret = H_SUCCESS;
423 unsigned long tces, entry, ua = 0;
424 unsigned long *rmap = NULL;
Alexey Kardashevskiyda6f59e2017-03-22 15:21:54 +1100425 bool prereg = false;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100426 struct kvmppc_spapr_tce_iommu_table *stit;
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100427
Paul Mackerrasacde2572017-05-10 16:39:41 +1000428 /* For radix, we might be in virtual mode, so punt */
429 if (kvm_is_radix(vcpu->kvm))
430 return H_TOO_HARD;
431
Alexey Kardashevskiy503bfcb2017-03-22 15:21:53 +1100432 stt = kvmppc_find_table(vcpu->kvm, liobn);
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100433 if (!stt)
434 return H_TOO_HARD;
435
Alexey Kardashevskiyfe26e522016-03-01 17:54:38 +1100436 entry = ioba >> stt->page_shift;
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100437 /*
438 * The spec says that the maximum size of the list is 512 TCEs
439 * so the whole table addressed resides in 4K page
440 */
441 if (npages > 512)
442 return H_PARAMETER;
443
444 if (tce_list & (SZ_4K - 1))
445 return H_PARAMETER;
446
447 ret = kvmppc_ioba_validate(stt, ioba, npages);
448 if (ret != H_SUCCESS)
449 return ret;
450
Alexey Kardashevskiyda6f59e2017-03-22 15:21:54 +1100451 if (mm_iommu_preregistered(vcpu->kvm->mm)) {
452 /*
453 * We get here if guest memory was pre-registered which
454 * is normally VFIO case and gpa->hpa translation does not
455 * depend on hpt.
456 */
457 struct mm_iommu_table_group_mem_t *mem;
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100458
Alexey Kardashevskiyda6f59e2017-03-22 15:21:54 +1100459 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
460 return H_TOO_HARD;
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100461
Alexey Kardashevskiyda6f59e2017-03-22 15:21:54 +1100462 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
463 if (mem)
464 prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
465 }
466
467 if (!prereg) {
468 /*
469 * This is usually a case of a guest with emulated devices only
470 * when TCE list is not in preregistered memory.
471 * We do not require memory to be preregistered in this case
472 * so lock rmap and do __find_linux_pte_or_hugepte().
473 */
474 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
475 return H_TOO_HARD;
476
477 rmap = (void *) vmalloc_to_phys(rmap);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100478 if (WARN_ON_ONCE_RM(!rmap))
479 return H_HARDWARE;
Alexey Kardashevskiyda6f59e2017-03-22 15:21:54 +1100480
481 /*
482 * Synchronize with the MMU notifier callbacks in
Paul Mackerras39c983e2018-02-22 15:16:54 +1100483 * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
Alexey Kardashevskiyda6f59e2017-03-22 15:21:54 +1100484 * While we have the rmap lock, code running on other CPUs
485 * cannot finish unmapping the host real page that backs
486 * this guest real page, so we are OK to access the host
487 * real page.
488 */
489 lock_rmap(rmap);
490 if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
491 ret = H_TOO_HARD;
492 goto unlock_exit;
493 }
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100494 }
495
496 for (i = 0; i < npages; ++i) {
497 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
498
499 ret = kvmppc_tce_validate(stt, tce);
500 if (ret != H_SUCCESS)
501 goto unlock_exit;
502
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100503 ua = 0;
504 if (kvmppc_gpa_to_ua(vcpu->kvm,
505 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
506 &ua, NULL))
507 return H_PARAMETER;
508
509 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000510 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100511 stit->tbl, entry + i, ua,
512 iommu_tce_direction(tce));
513
514 if (ret == H_SUCCESS)
515 continue;
516
517 if (ret == H_TOO_HARD)
518 goto unlock_exit;
519
520 WARN_ON_ONCE_RM(1);
521 kvmppc_rm_clear_tce(stit->tbl, entry);
522 }
523
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100524 kvmppc_tce_put(stt, entry + i, tce);
525 }
526
527unlock_exit:
Alexey Kardashevskiyda6f59e2017-03-22 15:21:54 +1100528 if (rmap)
529 unlock_rmap(rmap);
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100530
531 return ret;
532}
533
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +1100534long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100535 unsigned long liobn, unsigned long ioba,
536 unsigned long tce_value, unsigned long npages)
537{
538 struct kvmppc_spapr_tce_table *stt;
539 long i, ret;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100540 struct kvmppc_spapr_tce_iommu_table *stit;
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100541
Paul Mackerrasacde2572017-05-10 16:39:41 +1000542 /* For radix, we might be in virtual mode, so punt */
543 if (kvm_is_radix(vcpu->kvm))
544 return H_TOO_HARD;
545
Alexey Kardashevskiy503bfcb2017-03-22 15:21:53 +1100546 stt = kvmppc_find_table(vcpu->kvm, liobn);
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100547 if (!stt)
548 return H_TOO_HARD;
549
550 ret = kvmppc_ioba_validate(stt, ioba, npages);
551 if (ret != H_SUCCESS)
552 return ret;
553
554 /* Check permission bits only to allow userspace poison TCE for debug */
555 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
556 return H_PARAMETER;
557
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100558 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
Alexey Kardashevskiyc6b61662018-05-14 20:00:27 +1000559 unsigned long entry = ioba >> stt->page_shift;
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100560
561 for (i = 0; i < npages; ++i) {
Alexey Kardashevskiyca1fc482018-05-14 20:00:28 +1000562 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100563 stit->tbl, entry + i);
564
565 if (ret == H_SUCCESS)
566 continue;
567
568 if (ret == H_TOO_HARD)
569 return ret;
570
571 WARN_ON_ONCE_RM(1);
572 kvmppc_rm_clear_tce(stit->tbl, entry);
573 }
574 }
575
Alexey Kardashevskiyfe26e522016-03-01 17:54:38 +1100576 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
577 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100578
579 return H_SUCCESS;
580}
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100581
Paul Mackerrasacde2572017-05-10 16:39:41 +1000582/* This can be called in either virtual mode or real mode */
Laurent Dufour69e9fbb22014-02-21 16:31:10 +0100583long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
584 unsigned long ioba)
585{
Alexey Kardashevskiy503bfcb2017-03-22 15:21:53 +1100586 struct kvmppc_spapr_tce_table *stt;
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +1100587 long ret;
588 unsigned long idx;
589 struct page *page;
590 u64 *tbl;
Laurent Dufour69e9fbb22014-02-21 16:31:10 +0100591
Alexey Kardashevskiy503bfcb2017-03-22 15:21:53 +1100592 stt = kvmppc_find_table(vcpu->kvm, liobn);
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +1100593 if (!stt)
594 return H_TOO_HARD;
Laurent Dufour69e9fbb22014-02-21 16:31:10 +0100595
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +1100596 ret = kvmppc_ioba_validate(stt, ioba, 1);
597 if (ret != H_SUCCESS)
598 return ret;
Laurent Dufour69e9fbb22014-02-21 16:31:10 +0100599
Alexey Kardashevskiy14f853f2016-03-01 17:54:39 +1100600 idx = (ioba >> stt->page_shift) - stt->offset;
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +1100601 page = stt->pages[idx / TCES_PER_PAGE];
602 tbl = (u64 *)page_address(page);
Laurent Dufour69e9fbb22014-02-21 16:31:10 +0100603
Simon Guo1143a702018-05-07 14:20:07 +0800604 vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
Laurent Dufour69e9fbb22014-02-21 16:31:10 +0100605
Alexey Kardashevskiyfcbb2ce2016-02-15 12:55:04 +1100606 return H_SUCCESS;
Laurent Dufour69e9fbb22014-02-21 16:31:10 +0100607}
608EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100609
610#endif /* KVM_BOOK3S_HV_POSSIBLE */