blob: 547519abc751c43925c5fe47e7120cdc5185a08c [file] [log] [blame]
Marc Zyngier37c43752012-12-10 15:35:24 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
Vladimir Murzin20475f72015-11-16 11:28:18 +000023#include <asm/cpufeature.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000024
25/*
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000026 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
Marc Zyngier37c43752012-12-10 15:35:24 +000027 * "negative" addresses. This makes it impossible to directly share
28 * mappings with the kernel.
29 *
30 * Instead, give the HYP mode its own VA region at a fixed offset from
31 * the kernel by just masking the top bits (which are all ones for a
Marc Zyngier82a81bf2016-06-30 18:40:34 +010032 * kernel address). We need to find out how many bits to mask.
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000033 *
Marc Zyngier82a81bf2016-06-30 18:40:34 +010034 * We want to build a set of page tables that cover both parts of the
35 * idmap (the trampoline page used to initialize EL2), and our normal
36 * runtime VA space, at the same time.
37 *
38 * Given that the kernel uses VA_BITS for its entire address space,
39 * and that half of that space (VA_BITS - 1) is used for the linear
40 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
41 *
42 * The main question is "Within the VA_BITS space, does EL2 use the
43 * top or the bottom half of that space to shadow the kernel's linear
44 * mapping?". As we need to idmap the trampoline page, this is
45 * determined by the range in which this page lives.
46 *
47 * If the page is in the bottom half, we have to use the top half. If
48 * the page is in the top half, we have to use the bottom half:
49 *
50 * T = __virt_to_phys(__hyp_idmap_text_start)
51 * if (T & BIT(VA_BITS - 1))
52 * HYP_VA_MIN = 0 //idmap in upper half
53 * else
54 * HYP_VA_MIN = 1 << (VA_BITS - 1)
55 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
56 *
57 * This of course assumes that the trampoline page exists within the
58 * VA_BITS range. If it doesn't, then it means we're in the odd case
59 * where the kernel idmap (as well as HYP) uses more levels than the
60 * kernel runtime page tables (as seen when the kernel is configured
61 * for 4k pages, 39bits VA, and yet memory lives just above that
62 * limit, forcing the idmap to use 4 levels of page tables while the
63 * kernel itself only uses 3). In this particular case, it doesn't
64 * matter which side of VA_BITS we use, as we're guaranteed not to
65 * conflict with anything.
66 *
67 * When using VHE, there are no separate hyp mappings and all KVM
68 * functionality is already mapped as part of the main kernel
69 * mappings, and none of this applies in that case.
Marc Zyngier37c43752012-12-10 15:35:24 +000070 */
Marc Zyngierd53d9bc2016-06-30 18:40:39 +010071
72#define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1)
73#define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1)
74
Marc Zyngier37c43752012-12-10 15:35:24 +000075#ifdef __ASSEMBLY__
76
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000077#include <asm/alternative.h>
78#include <asm/cpufeature.h>
79
Marc Zyngier37c43752012-12-10 15:35:24 +000080/*
81 * Convert a kernel VA into a HYP VA.
82 * reg: VA to be converted.
Marc Zyngierfd81e6b2016-06-30 18:40:40 +010083 *
84 * This generates the following sequences:
85 * - High mask:
86 * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
87 * nop
88 * - Low mask:
89 * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
90 * and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK
91 * - VHE:
92 * nop
93 * nop
94 *
95 * The "low mask" version works because the mask is a strict subset of
96 * the "high mask", hence performing the first mask for nothing.
97 * Should be completely invisible on any viable CPU.
Marc Zyngier37c43752012-12-10 15:35:24 +000098 */
99.macro kern_hyp_va reg
Marc Zyngierfd81e6b2016-06-30 18:40:40 +0100100alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
101 and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK
Mark Rutlande5062362016-09-07 11:07:10 +0100102alternative_else_nop_endif
103alternative_if ARM64_HYP_OFFSET_LOW
Marc Zyngierfd81e6b2016-06-30 18:40:40 +0100104 and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK
Mark Rutlande5062362016-09-07 11:07:10 +0100105alternative_else_nop_endif
Marc Zyngier37c43752012-12-10 15:35:24 +0000106.endm
107
108#else
109
Christoffer Dall38f791a2014-10-10 12:14:28 +0200110#include <asm/pgalloc.h>
Marc Zyngier37c43752012-12-10 15:35:24 +0000111#include <asm/cachetype.h>
112#include <asm/cacheflush.h>
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000113#include <asm/mmu_context.h>
114#include <asm/pgtable.h>
Marc Zyngier37c43752012-12-10 15:35:24 +0000115
Marc Zyngierfd81e6b2016-06-30 18:40:40 +0100116static inline unsigned long __kern_hyp_va(unsigned long v)
117{
118 asm volatile(ALTERNATIVE("and %0, %0, %1",
119 "nop",
120 ARM64_HAS_VIRT_HOST_EXTN)
121 : "+r" (v)
122 : "i" (HYP_PAGE_OFFSET_HIGH_MASK));
123 asm volatile(ALTERNATIVE("nop",
124 "and %0, %0, %1",
125 ARM64_HYP_OFFSET_LOW)
126 : "+r" (v)
127 : "i" (HYP_PAGE_OFFSET_LOW_MASK));
128 return v;
129}
130
Marc Zyngier94d0e592016-10-18 18:37:49 +0100131#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
Marc Zyngier37c43752012-12-10 15:35:24 +0000132
133/*
Marc Zyngier42768252018-07-20 10:56:19 +0100134 * Obtain the PC-relative address of a kernel symbol
135 * s: symbol
136 *
137 * The goal of this macro is to return a symbol's address based on a
138 * PC-relative computation, as opposed to a loading the VA from a
139 * constant pool or something similar. This works well for HYP, as an
140 * absolute VA is guaranteed to be wrong. Only use this if trying to
141 * obtain the address of a symbol (i.e. not something you obtained by
142 * following a pointer).
143 */
144#define hyp_symbol_addr(s) \
145 ({ \
146 typeof(s) *addr; \
147 asm("adrp %0, %1\n" \
148 "add %0, %0, :lo12:%1\n" \
149 : "=r" (addr) : "S" (&s)); \
150 addr; \
151 })
152
153/*
Joel Schoppdbff1242014-07-09 11:17:04 -0500154 * We currently only support a 40bit IPA.
Marc Zyngier37c43752012-12-10 15:35:24 +0000155 */
Joel Schoppdbff1242014-07-09 11:17:04 -0500156#define KVM_PHYS_SHIFT (40)
Marc Zyngier37c43752012-12-10 15:35:24 +0000157#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
158#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
159
Suzuki K Poulosec0ef6322016-03-22 14:16:52 +0000160#include <asm/stage2_pgtable.h>
161
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100162int create_hyp_mappings(void *from, void *to, pgprot_t prot);
Marc Zyngier37c43752012-12-10 15:35:24 +0000163int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
Marc Zyngier37c43752012-12-10 15:35:24 +0000164void free_hyp_pgds(void);
165
Christoffer Dall957db102014-11-27 10:35:03 +0100166void stage2_unmap_vm(struct kvm *kvm);
Marc Zyngier37c43752012-12-10 15:35:24 +0000167int kvm_alloc_stage2_pgd(struct kvm *kvm);
168void kvm_free_stage2_pgd(struct kvm *kvm);
169int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -0700170 phys_addr_t pa, unsigned long size, bool writable);
Marc Zyngier37c43752012-12-10 15:35:24 +0000171
172int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
173
174void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
175
176phys_addr_t kvm_mmu_get_httbr(void);
Marc Zyngier37c43752012-12-10 15:35:24 +0000177phys_addr_t kvm_get_idmap_vector(void);
AKASHI Takahiro67f69192016-04-27 17:47:05 +0100178phys_addr_t kvm_get_idmap_start(void);
Marc Zyngier37c43752012-12-10 15:35:24 +0000179int kvm_mmu_init(void);
180void kvm_clear_hyp_idmap(void);
181
182#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
Christoffer Dallad361f02012-11-01 17:14:45 +0100183#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
Marc Zyngier37c43752012-12-10 15:35:24 +0000184
Catalin Marinas06485052016-04-13 17:57:37 +0100185static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
Marc Zyngier37c43752012-12-10 15:35:24 +0000186{
Catalin Marinas06485052016-04-13 17:57:37 +0100187 pte_val(pte) |= PTE_S2_RDWR;
188 return pte;
Marc Zyngier37c43752012-12-10 15:35:24 +0000189}
190
Catalin Marinas06485052016-04-13 17:57:37 +0100191static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
Christoffer Dallad361f02012-11-01 17:14:45 +0100192{
Catalin Marinas06485052016-04-13 17:57:37 +0100193 pmd_val(pmd) |= PMD_S2_RDWR;
194 return pmd;
Christoffer Dallad361f02012-11-01 17:14:45 +0100195}
196
Mario Smarduch8199ed02015-01-15 15:58:59 -0800197static inline void kvm_set_s2pte_readonly(pte_t *pte)
198{
Catalin Marinas06485052016-04-13 17:57:37 +0100199 pteval_t pteval;
200 unsigned long tmp;
201
202 asm volatile("// kvm_set_s2pte_readonly\n"
203 " prfm pstl1strm, %2\n"
204 "1: ldxr %0, %2\n"
205 " and %0, %0, %3 // clear PTE_S2_RDWR\n"
206 " orr %0, %0, %4 // set PTE_S2_RDONLY\n"
207 " stxr %w1, %0, %2\n"
208 " cbnz %w1, 1b\n"
209 : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*pte))
210 : "L" (~PTE_S2_RDWR), "L" (PTE_S2_RDONLY));
Mario Smarduch8199ed02015-01-15 15:58:59 -0800211}
212
213static inline bool kvm_s2pte_readonly(pte_t *pte)
214{
215 return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
216}
217
218static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
219{
Catalin Marinas06485052016-04-13 17:57:37 +0100220 kvm_set_s2pte_readonly((pte_t *)pmd);
Mario Smarduch8199ed02015-01-15 15:58:59 -0800221}
222
223static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
224{
Catalin Marinas06485052016-04-13 17:57:37 +0100225 return kvm_s2pte_readonly((pte_t *)pmd);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200226}
227
Christoffer Dall4f853a72014-05-09 23:31:31 +0200228static inline bool kvm_page_empty(void *ptr)
229{
230 struct page *ptr_page = virt_to_page(ptr);
231 return page_count(ptr_page) == 1;
232}
233
Suzuki K Poulose66f877fa2016-03-22 17:20:28 +0000234#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200235
236#ifdef __PAGETABLE_PMD_FOLDED
Suzuki K Poulose66f877fa2016-03-22 17:20:28 +0000237#define hyp_pmd_table_empty(pmdp) (0)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200238#else
Suzuki K Poulose66f877fa2016-03-22 17:20:28 +0000239#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200240#endif
Christoffer Dall38f791a2014-10-10 12:14:28 +0200241
242#ifdef __PAGETABLE_PUD_FOLDED
Suzuki K Poulose66f877fa2016-03-22 17:20:28 +0000243#define hyp_pud_table_empty(pudp) (0)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200244#else
Suzuki K Poulose66f877fa2016-03-22 17:20:28 +0000245#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200246#endif
Christoffer Dall4f853a72014-05-09 23:31:31 +0200247
Marc Zyngier37c43752012-12-10 15:35:24 +0000248struct kvm;
249
Marc Zyngier2d58b732014-01-14 19:13:10 +0000250#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
251
252static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
Marc Zyngier37c43752012-12-10 15:35:24 +0000253{
Marc Zyngier2d58b732014-01-14 19:13:10 +0000254 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
255}
256
Dan Williamsba049e92016-01-15 16:56:11 -0800257static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
258 kvm_pfn_t pfn,
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000259 unsigned long size,
260 bool ipa_uncached)
Marc Zyngier2d58b732014-01-14 19:13:10 +0000261{
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000262 void *va = page_address(pfn_to_page(pfn));
263
Marc Zyngierac4c8fc2017-01-25 12:29:59 +0000264 kvm_flush_dcache_to_poc(va, size);
Marc Zyngier2d58b732014-01-14 19:13:10 +0000265
Marc Zyngier37c43752012-12-10 15:35:24 +0000266 if (!icache_is_aliasing()) { /* PIPT */
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000267 flush_icache_range((unsigned long)va,
268 (unsigned long)va + size);
Marc Zyngier37c43752012-12-10 15:35:24 +0000269 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
270 /* any kind of VIPT cache */
271 __flush_icache_all();
272 }
273}
274
Marc Zyngier363ef892014-12-19 16:48:06 +0000275static inline void __kvm_flush_dcache_pte(pte_t pte)
276{
277 struct page *page = pte_page(pte);
278 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
279}
280
281static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
282{
283 struct page *page = pmd_page(pmd);
284 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
285}
286
287static inline void __kvm_flush_dcache_pud(pud_t pud)
288{
289 struct page *page = pud_page(pud);
290 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
291}
292
Santosh Shilimkar4fda3422013-11-19 14:59:12 -0500293#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
Marc Zyngier37c43752012-12-10 15:35:24 +0000294
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000295void kvm_set_way_flush(struct kvm_vcpu *vcpu);
296void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000297
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000298static inline bool __kvm_cpu_uses_extended_idmap(void)
299{
300 return __cpu_uses_extended_idmap();
301}
302
303static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
304 pgd_t *hyp_pgd,
305 pgd_t *merged_hyp_pgd,
306 unsigned long hyp_idmap_start)
307{
308 int idmap_idx;
309
310 /*
311 * Use the first entry to access the HYP mappings. It is
312 * guaranteed to be free, otherwise we wouldn't use an
313 * extended idmap.
314 */
315 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
316 merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
317
318 /*
319 * Create another extended level entry that points to the boot HYP map,
320 * which contains an ID mapping of the HYP init code. We essentially
321 * merge the boot and runtime HYP maps by doing so, but they don't
322 * overlap anyway, so this is fine.
323 */
324 idmap_idx = hyp_idmap_start >> VA_BITS;
325 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
326 merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
327}
328
Vladimir Murzin20475f72015-11-16 11:28:18 +0000329static inline unsigned int kvm_get_vmid_bits(void)
330{
331 int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
332
Suzuki K Poulose28c5dcb2016-01-26 10:58:16 +0000333 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
Vladimir Murzin20475f72015-11-16 11:28:18 +0000334}
335
Andre Przywara9488d112018-05-11 15:20:14 +0100336/*
337 * We are not in the kvm->srcu critical section most of the time, so we take
338 * the SRCU read lock here. Since we copy the data from the user page, we
339 * can immediately drop the lock again.
340 */
341static inline int kvm_read_guest_lock(struct kvm *kvm,
342 gpa_t gpa, void *data, unsigned long len)
343{
344 int srcu_idx = srcu_read_lock(&kvm->srcu);
345 int ret = kvm_read_guest(kvm, gpa, data, len);
346
347 srcu_read_unlock(&kvm->srcu, srcu_idx);
348
349 return ret;
350}
351
Mark Rutland9327f062018-04-12 12:11:16 +0100352#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
353#include <asm/mmu.h>
354
355static inline void *kvm_get_hyp_vector(void)
356{
357 struct bp_hardening_data *data = arm64_get_bp_hardening_data();
358 void *vect = kvm_ksym_ref(__kvm_hyp_vector);
359
360 if (data->fn) {
361 vect = __bp_harden_hyp_vecs_start +
362 data->hyp_vectors_slot * SZ_2K;
363
Suzuki K Poulosefe64d7d2016-11-08 13:56:20 +0000364 if (!cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
Mark Rutland9327f062018-04-12 12:11:16 +0100365 vect = lm_alias(vect);
366 }
367
368 return vect;
369}
370
371static inline int kvm_map_vectors(void)
372{
373 return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
374 kvm_ksym_ref(__bp_harden_hyp_vecs_end),
375 PAGE_HYP_EXEC);
376}
377
378#else
379static inline void *kvm_get_hyp_vector(void)
380{
381 return kvm_ksym_ref(__kvm_hyp_vector);
382}
383
384static inline int kvm_map_vectors(void)
385{
386 return 0;
387}
388#endif
389
Marc Zyngier68240e92018-07-20 10:56:32 +0100390#ifdef CONFIG_ARM64_SSBD
391DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
392
393static inline int hyp_map_aux_data(void)
394{
395 int cpu, err;
396
397 for_each_possible_cpu(cpu) {
398 u64 *ptr;
399
400 ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
401 err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
402 if (err)
403 return err;
404 }
405 return 0;
406}
407#else
408static inline int hyp_map_aux_data(void)
409{
410 return 0;
411}
412#endif
413
Marc Zyngier37c43752012-12-10 15:35:24 +0000414#endif /* __ASSEMBLY__ */
415#endif /* __ARM64_KVM_MMU_H__ */