blob: d26395754b56ed6cbd130cab0e7c0a016ff96cb2 [file] [log] [blame]
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__
21
Marc Zyngier5a677ce2013-04-12 19:12:06 +010022#include <asm/memory.h>
23#include <asm/page.h>
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010024
Marc Zyngier06e8c3b2012-10-28 01:09:14 +010025/*
26 * We directly use the kernel VA for the HYP, as we can directly share
27 * the mapping (HTTBR "covers" TTBR1).
28 */
Marc Zyngier6c41a412016-06-30 18:40:51 +010029#define kern_hyp_va(kva) (kva)
Marc Zyngier06e8c3b2012-10-28 01:09:14 +010030
Marc Zyngier42768252018-07-20 10:56:19 +010031/* Contrary to arm64, there is no need to generate a PC-relative address */
32#define hyp_symbol_addr(s) \
33 ({ \
34 typeof(s) *addr = &(s); \
35 addr; \
36 })
37
Marc Zyngier5a677ce2013-04-12 19:12:06 +010038/*
Christoffer Dall38f791a2014-10-10 12:14:28 +020039 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
40 */
41#define KVM_MMU_CACHE_MIN_PAGES 2
42
Marc Zyngier5a677ce2013-04-12 19:12:06 +010043#ifndef __ASSEMBLY__
44
Marc Zyngier363ef892014-12-19 16:48:06 +000045#include <linux/highmem.h>
Marc Zyngier5a677ce2013-04-12 19:12:06 +010046#include <asm/cacheflush.h>
47#include <asm/pgalloc.h>
Suzuki K Pouloseb1ae9a32016-03-22 14:08:17 +000048#include <asm/stage2_pgtable.h>
Marc Zyngier5a677ce2013-04-12 19:12:06 +010049
Marc Zyngierc8dddec2016-06-13 15:00:45 +010050int create_hyp_mappings(void *from, void *to, pgprot_t prot);
Christoffer Dall342cd0a2013-01-20 18:28:06 -050051int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
Marc Zyngier4f728272013-04-12 19:12:05 +010052void free_hyp_pgds(void);
Christoffer Dall342cd0a2013-01-20 18:28:06 -050053
Christoffer Dall957db102014-11-27 10:35:03 +010054void stage2_unmap_vm(struct kvm *kvm);
Christoffer Dalld5d81842013-01-20 18:28:07 -050055int kvm_alloc_stage2_pgd(struct kvm *kvm);
56void kvm_free_stage2_pgd(struct kvm *kvm);
57int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -070058 phys_addr_t pa, unsigned long size, bool writable);
Christoffer Dalld5d81842013-01-20 18:28:07 -050059
60int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
61
62void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
63
Christoffer Dall342cd0a2013-01-20 18:28:06 -050064phys_addr_t kvm_mmu_get_httbr(void);
Marc Zyngier5a677ce2013-04-12 19:12:06 +010065phys_addr_t kvm_get_idmap_vector(void);
AKASHI Takahiro67f69192016-04-27 17:47:05 +010066phys_addr_t kvm_get_idmap_start(void);
Christoffer Dall342cd0a2013-01-20 18:28:06 -050067int kvm_mmu_init(void);
68void kvm_clear_hyp_idmap(void);
Christoffer Dall94f8e642013-01-20 18:28:12 -050069
Christoffer Dallad361f02012-11-01 17:14:45 +010070static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
71{
72 *pmd = new_pmd;
Mark Rutlanddcadda12016-08-30 17:05:55 +010073 dsb(ishst);
Christoffer Dallad361f02012-11-01 17:14:45 +010074}
75
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010076static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
77{
Christoffer Dall0963e5d2013-08-08 20:35:07 -070078 *pte = new_pte;
Mark Rutlanddcadda12016-08-30 17:05:55 +010079 dsb(ishst);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010080}
81
Catalin Marinas06485052016-04-13 17:57:37 +010082static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010083{
Catalin Marinas06485052016-04-13 17:57:37 +010084 pte_val(pte) |= L_PTE_S2_RDWR;
85 return pte;
Marc Zyngierc62ee2b2012-10-15 11:27:37 +010086}
87
Catalin Marinas06485052016-04-13 17:57:37 +010088static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
Christoffer Dallad361f02012-11-01 17:14:45 +010089{
Catalin Marinas06485052016-04-13 17:57:37 +010090 pmd_val(pmd) |= L_PMD_S2_RDWR;
91 return pmd;
Christoffer Dallad361f02012-11-01 17:14:45 +010092}
93
Mario Smarduchc6473552015-01-15 15:58:56 -080094static inline void kvm_set_s2pte_readonly(pte_t *pte)
95{
96 pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
97}
98
99static inline bool kvm_s2pte_readonly(pte_t *pte)
100{
101 return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
102}
103
104static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
105{
106 pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
107}
108
109static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
110{
111 return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
112}
113
Christoffer Dall4f853a72014-05-09 23:31:31 +0200114static inline bool kvm_page_empty(void *ptr)
115{
116 struct page *ptr_page = virt_to_page(ptr);
117 return page_count(ptr_page) == 1;
118}
119
Christoffer Dall38f791a2014-10-10 12:14:28 +0200120#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
121#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
Suzuki K Pouloseb1d030a2016-03-22 17:15:55 +0000122#define kvm_pud_table_empty(kvm, pudp) false
Christoffer Dall4f853a72014-05-09 23:31:31 +0200123
Suzuki K Pouloseb1d030a2016-03-22 17:15:55 +0000124#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
125#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
126#define hyp_pud_table_empty(pudp) false
Marc Zyngiera9873702015-03-10 19:06:59 +0000127
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100128struct kvm;
129
Marc Zyngier15979302014-01-14 19:13:10 +0000130#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
131
132static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
133{
Marc Zyngierfb32a522016-01-03 11:26:01 +0000134 return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
Marc Zyngier15979302014-01-14 19:13:10 +0000135}
136
Dan Williamsba049e92016-01-15 16:56:11 -0800137static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
138 kvm_pfn_t pfn,
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000139 unsigned long size,
140 bool ipa_uncached)
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100141{
142 /*
143 * If we are going to insert an instruction page and the icache is
144 * either VIPT or PIPT, there is a potential problem where the host
145 * (or another VM) may have used the same page as this guest, and we
146 * read incorrect data from the icache. If we're using a PIPT cache,
147 * we can invalidate just that page, but if we are using a VIPT cache
148 * we need to invalidate the entire icache - damn shame - as written
149 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
150 *
151 * VIVT caches are tagged using both the ASID and the VMID and doesn't
152 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000153 *
154 * We need to do this through a kernel mapping (using the
155 * user-space mapping has proved to be the wrong
156 * solution). For that, we need to kmap one page at a time,
157 * and iterate over the range.
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100158 */
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000159
Jan Kiszkaa050dfb2015-02-07 22:21:20 +0100160 VM_BUG_ON(size & ~PAGE_MASK);
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000161
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000162 while (size) {
163 void *va = kmap_atomic_pfn(pfn);
164
Marc Zyngierac4c8fc2017-01-25 12:29:59 +0000165 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000166
167 if (icache_is_pipt())
168 __cpuc_coherent_user_range((unsigned long)va,
169 (unsigned long)va + PAGE_SIZE);
170
171 size -= PAGE_SIZE;
172 pfn++;
173
174 kunmap_atomic(va);
175 }
176
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000177 if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100178 /* any kind of VIPT cache */
179 __flush_icache_all();
180 }
181}
182
Marc Zyngier363ef892014-12-19 16:48:06 +0000183static inline void __kvm_flush_dcache_pte(pte_t pte)
184{
185 void *va = kmap_atomic(pte_page(pte));
186
187 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
188
189 kunmap_atomic(va);
190}
191
192static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
193{
194 unsigned long size = PMD_SIZE;
Dan Williamsba049e92016-01-15 16:56:11 -0800195 kvm_pfn_t pfn = pmd_pfn(pmd);
Marc Zyngier363ef892014-12-19 16:48:06 +0000196
197 while (size) {
198 void *va = kmap_atomic_pfn(pfn);
199
200 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
201
202 pfn++;
203 size -= PAGE_SIZE;
204
205 kunmap_atomic(va);
206 }
207}
208
209static inline void __kvm_flush_dcache_pud(pud_t pud)
210{
211}
212
Santosh Shilimkar4fda3422013-11-19 14:59:12 -0500213#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100214
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000215void kvm_set_way_flush(struct kvm_vcpu *vcpu);
216void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000217
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000218static inline bool __kvm_cpu_uses_extended_idmap(void)
219{
220 return false;
221}
222
223static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
224 pgd_t *hyp_pgd,
225 pgd_t *merged_hyp_pgd,
226 unsigned long hyp_idmap_start) { }
227
Vladimir Murzin20475f72015-11-16 11:28:18 +0000228static inline unsigned int kvm_get_vmid_bits(void)
229{
230 return 8;
231}
232
Andre Przywara9488d112018-05-11 15:20:14 +0100233/*
234 * We are not in the kvm->srcu critical section most of the time, so we take
235 * the SRCU read lock here. Since we copy the data from the user page, we
236 * can immediately drop the lock again.
237 */
238static inline int kvm_read_guest_lock(struct kvm *kvm,
239 gpa_t gpa, void *data, unsigned long len)
240{
241 int srcu_idx = srcu_read_lock(&kvm->srcu);
242 int ret = kvm_read_guest(kvm, gpa, data, len);
243
244 srcu_read_unlock(&kvm->srcu, srcu_idx);
245
246 return ret;
247}
248
Mark Rutland9327f062018-04-12 12:11:16 +0100249static inline void *kvm_get_hyp_vector(void)
250{
Marc Zyngier518369632018-11-07 11:43:49 -0500251 switch(read_cpuid_part()) {
252#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
253 case ARM_CPU_PART_CORTEX_A12:
254 case ARM_CPU_PART_CORTEX_A17:
255 {
256 extern char __kvm_hyp_vector_bp_inv[];
257 return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
258 }
259
Russell King10c7b392018-11-07 11:43:51 -0500260 case ARM_CPU_PART_BRAHMA_B15:
Marc Zyngierb7888c62018-11-07 11:43:50 -0500261 case ARM_CPU_PART_CORTEX_A15:
262 {
263 extern char __kvm_hyp_vector_ic_inv[];
264 return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
265 }
Marc Zyngier518369632018-11-07 11:43:49 -0500266#endif
267 default:
268 {
269 extern char __kvm_hyp_vector[];
270 return kvm_ksym_ref(__kvm_hyp_vector);
271 }
272 }
Mark Rutland9327f062018-04-12 12:11:16 +0100273}
274
275static inline int kvm_map_vectors(void)
276{
277 return 0;
278}
279
Marc Zyngier68240e92018-07-20 10:56:32 +0100280static inline int hyp_map_aux_data(void)
281{
282 return 0;
283}
284
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100285#endif /* !__ASSEMBLY__ */
286
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500287#endif /* __ARM_KVM_MMU_H__ */