blob: 844fe5d5ff44454f1c5d6c97a9098625241b5c51 [file] [log] [blame]
Marc Zyngier37c43752012-12-10 15:35:24 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
Vladimir Murzin20475f72015-11-16 11:28:18 +000023#include <asm/cpufeature.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000024
25/*
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000026 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
Marc Zyngier37c43752012-12-10 15:35:24 +000027 * "negative" addresses. This makes it impossible to directly share
28 * mappings with the kernel.
29 *
30 * Instead, give the HYP mode its own VA region at a fixed offset from
31 * the kernel by just masking the top bits (which are all ones for a
32 * kernel address).
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000033 *
34 * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these
35 * macros (the entire kernel runs at EL2).
Marc Zyngier37c43752012-12-10 15:35:24 +000036 */
37#define HYP_PAGE_OFFSET_SHIFT VA_BITS
38#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
39#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
40
41/*
42 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
43 * shared across all the page-tables. Conveniently, we use the last
44 * possible page, where no kernel mapping will ever exist.
45 */
46#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
47
48#ifdef __ASSEMBLY__
49
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000050#include <asm/alternative.h>
51#include <asm/cpufeature.h>
52
Marc Zyngier37c43752012-12-10 15:35:24 +000053/*
54 * Convert a kernel VA into a HYP VA.
55 * reg: VA to be converted.
56 */
57.macro kern_hyp_va reg
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000058alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
Marc Zyngier37c43752012-12-10 15:35:24 +000059 and \reg, \reg, #HYP_PAGE_OFFSET_MASK
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000060alternative_else
61 nop
62alternative_endif
Marc Zyngier37c43752012-12-10 15:35:24 +000063.endm
64
65#else
66
Christoffer Dall38f791a2014-10-10 12:14:28 +020067#include <asm/pgalloc.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000068#include <asm/cachetype.h>
69#include <asm/cacheflush.h>
Ard Biesheuvele4c5a682015-03-19 16:42:28 +000070#include <asm/mmu_context.h>
71#include <asm/pgtable.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000072
73#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
74
75/*
Joel Schoppdbff1242014-07-09 11:17:04 -050076 * We currently only support a 40bit IPA.
Marc Zyngier37c43752012-12-10 15:35:24 +000077 */
Joel Schoppdbff1242014-07-09 11:17:04 -050078#define KVM_PHYS_SHIFT (40)
Marc Zyngier37c43752012-12-10 15:35:24 +000079#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
80#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
81
Suzuki K Poulosec0ef6322016-03-22 14:16:52 +000082#include <asm/stage2_pgtable.h>
83
Marc Zyngier37c43752012-12-10 15:35:24 +000084int create_hyp_mappings(void *from, void *to);
85int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
86void free_boot_hyp_pgd(void);
87void free_hyp_pgds(void);
88
Christoffer Dall957db102014-11-27 10:35:03 +010089void stage2_unmap_vm(struct kvm *kvm);
Marc Zyngier37c43752012-12-10 15:35:24 +000090int kvm_alloc_stage2_pgd(struct kvm *kvm);
91void kvm_free_stage2_pgd(struct kvm *kvm);
92int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -070093 phys_addr_t pa, unsigned long size, bool writable);
Marc Zyngier37c43752012-12-10 15:35:24 +000094
95int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
96
97void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
98
99phys_addr_t kvm_mmu_get_httbr(void);
100phys_addr_t kvm_mmu_get_boot_httbr(void);
101phys_addr_t kvm_get_idmap_vector(void);
102int kvm_mmu_init(void);
103void kvm_clear_hyp_idmap(void);
104
105#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
Christoffer Dallad361f02012-11-01 17:14:45 +0100106#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
Marc Zyngier37c43752012-12-10 15:35:24 +0000107
Marc Zyngier37c43752012-12-10 15:35:24 +0000108static inline void kvm_clean_pgd(pgd_t *pgd) {}
Christoffer Dall38f791a2014-10-10 12:14:28 +0200109static inline void kvm_clean_pmd(pmd_t *pmd) {}
Marc Zyngier37c43752012-12-10 15:35:24 +0000110static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
111static inline void kvm_clean_pte(pte_t *pte) {}
112static inline void kvm_clean_pte_entry(pte_t *pte) {}
113
Catalin Marinas06485052016-04-13 17:57:37 +0100114static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
Marc Zyngier37c43752012-12-10 15:35:24 +0000115{
Catalin Marinas06485052016-04-13 17:57:37 +0100116 pte_val(pte) |= PTE_S2_RDWR;
117 return pte;
Marc Zyngier37c43752012-12-10 15:35:24 +0000118}
119
Catalin Marinas06485052016-04-13 17:57:37 +0100120static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
Christoffer Dallad361f02012-11-01 17:14:45 +0100121{
Catalin Marinas06485052016-04-13 17:57:37 +0100122 pmd_val(pmd) |= PMD_S2_RDWR;
123 return pmd;
Christoffer Dallad361f02012-11-01 17:14:45 +0100124}
125
Mario Smarduch8199ed02015-01-15 15:58:59 -0800126static inline void kvm_set_s2pte_readonly(pte_t *pte)
127{
Catalin Marinas06485052016-04-13 17:57:37 +0100128 pteval_t pteval;
129 unsigned long tmp;
130
131 asm volatile("// kvm_set_s2pte_readonly\n"
132 " prfm pstl1strm, %2\n"
133 "1: ldxr %0, %2\n"
134 " and %0, %0, %3 // clear PTE_S2_RDWR\n"
135 " orr %0, %0, %4 // set PTE_S2_RDONLY\n"
136 " stxr %w1, %0, %2\n"
137 " cbnz %w1, 1b\n"
138 : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*pte))
139 : "L" (~PTE_S2_RDWR), "L" (PTE_S2_RDONLY));
Mario Smarduch8199ed02015-01-15 15:58:59 -0800140}
141
142static inline bool kvm_s2pte_readonly(pte_t *pte)
143{
144 return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
145}
146
147static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
148{
Catalin Marinas06485052016-04-13 17:57:37 +0100149 kvm_set_s2pte_readonly((pte_t *)pmd);
Mario Smarduch8199ed02015-01-15 15:58:59 -0800150}
151
152static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
153{
Catalin Marinas06485052016-04-13 17:57:37 +0100154 return kvm_s2pte_readonly((pte_t *)pmd);
Mario Smarduch8199ed02015-01-15 15:58:59 -0800155}
156
Christoffer Dall4f853a72014-05-09 23:31:31 +0200157static inline bool kvm_page_empty(void *ptr)
158{
159 struct page *ptr_page = virt_to_page(ptr);
160 return page_count(ptr_page) == 1;
161}
162
Suzuki K Poulose66f877fa2016-03-22 17:20:28 +0000163#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
164
165#ifdef __PAGETABLE_PMD_FOLDED
166#define hyp_pmd_table_empty(pmdp) (0)
167#else
168#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
169#endif
170
171#ifdef __PAGETABLE_PUD_FOLDED
172#define hyp_pud_table_empty(pudp) (0)
173#else
174#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
175#endif
176
Marc Zyngier37c43752012-12-10 15:35:24 +0000177struct kvm;
178
Marc Zyngier2d58b732014-01-14 19:13:10 +0000179#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
180
181static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
Marc Zyngier37c43752012-12-10 15:35:24 +0000182{
Marc Zyngier2d58b732014-01-14 19:13:10 +0000183 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
184}
185
Dan Williamsba049e92016-01-15 16:56:11 -0800186static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
187 kvm_pfn_t pfn,
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000188 unsigned long size,
189 bool ipa_uncached)
Marc Zyngier2d58b732014-01-14 19:13:10 +0000190{
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000191 void *va = page_address(pfn_to_page(pfn));
192
Laszlo Ersek840f4bf2014-11-17 14:58:52 +0000193 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000194 kvm_flush_dcache_to_poc(va, size);
Marc Zyngier2d58b732014-01-14 19:13:10 +0000195
Marc Zyngier37c43752012-12-10 15:35:24 +0000196 if (!icache_is_aliasing()) { /* PIPT */
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000197 flush_icache_range((unsigned long)va,
198 (unsigned long)va + size);
Marc Zyngier37c43752012-12-10 15:35:24 +0000199 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
200 /* any kind of VIPT cache */
201 __flush_icache_all();
202 }
203}
204
Marc Zyngier363ef892014-12-19 16:48:06 +0000205static inline void __kvm_flush_dcache_pte(pte_t pte)
206{
207 struct page *page = pte_page(pte);
208 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
209}
210
211static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
212{
213 struct page *page = pmd_page(pmd);
214 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
215}
216
217static inline void __kvm_flush_dcache_pud(pud_t pud)
218{
219 struct page *page = pud_page(pud);
220 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
221}
222
Santosh Shilimkar4fda3422013-11-19 14:59:12 -0500223#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
Marc Zyngier37c43752012-12-10 15:35:24 +0000224
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000225void kvm_set_way_flush(struct kvm_vcpu *vcpu);
226void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000227
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000228static inline bool __kvm_cpu_uses_extended_idmap(void)
229{
230 return __cpu_uses_extended_idmap();
231}
232
233static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
234 pgd_t *hyp_pgd,
235 pgd_t *merged_hyp_pgd,
236 unsigned long hyp_idmap_start)
237{
238 int idmap_idx;
239
240 /*
241 * Use the first entry to access the HYP mappings. It is
242 * guaranteed to be free, otherwise we wouldn't use an
243 * extended idmap.
244 */
245 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
246 merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
247
248 /*
249 * Create another extended level entry that points to the boot HYP map,
250 * which contains an ID mapping of the HYP init code. We essentially
251 * merge the boot and runtime HYP maps by doing so, but they don't
252 * overlap anyway, so this is fine.
253 */
254 idmap_idx = hyp_idmap_start >> VA_BITS;
255 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
256 merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
257}
258
Vladimir Murzin20475f72015-11-16 11:28:18 +0000259static inline unsigned int kvm_get_vmid_bits(void)
260{
261 int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
262
Suzuki K Poulose28c5dcb2016-01-26 10:58:16 +0000263 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
Vladimir Murzin20475f72015-11-16 11:28:18 +0000264}
265
Marc Zyngier37c43752012-12-10 15:35:24 +0000266#endif /* __ASSEMBLY__ */
267#endif /* __ARM64_KVM_MMU_H__ */