blob: 2ac4a22d311992e6ad9e04435cbf2b682c3780b6 [file] [log] [blame]
Marc Zyngier37c43752012-12-10 15:35:24 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
Vladimir Murzin20475f72015-11-16 11:28:18 +000023#include <asm/cpufeature.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000024
25/*
26 * As we only have the TTBR0_EL2 register, we cannot express
27 * "negative" addresses. This makes it impossible to directly share
28 * mappings with the kernel.
29 *
30 * Instead, give the HYP mode its own VA region at a fixed offset from
31 * the kernel by just masking the top bits (which are all ones for a
32 * kernel address).
33 */
34#define HYP_PAGE_OFFSET_SHIFT VA_BITS
35#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
36#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
37
38/*
39 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
40 * shared across all the page-tables. Conveniently, we use the last
41 * possible page, where no kernel mapping will ever exist.
42 */
43#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
44
Christoffer Dall38f791a2014-10-10 12:14:28 +020045/*
46 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
47 * levels in addition to the PGD and potentially the PUD which are
48 * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
49 * tables use one level of tables less than the kernel.
50 */
51#ifdef CONFIG_ARM64_64K_PAGES
52#define KVM_MMU_CACHE_MIN_PAGES 1
53#else
54#define KVM_MMU_CACHE_MIN_PAGES 2
55#endif
56
Marc Zyngier37c43752012-12-10 15:35:24 +000057#ifdef __ASSEMBLY__
58
59/*
60 * Convert a kernel VA into a HYP VA.
61 * reg: VA to be converted.
62 */
63.macro kern_hyp_va reg
64 and \reg, \reg, #HYP_PAGE_OFFSET_MASK
65.endm
66
67#else
68
Christoffer Dall38f791a2014-10-10 12:14:28 +020069#include <asm/pgalloc.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000070#include <asm/cachetype.h>
71#include <asm/cacheflush.h>
Ard Biesheuvele4c5a682015-03-19 16:42:28 +000072#include <asm/mmu_context.h>
73#include <asm/pgtable.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000074
75#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
76
77/*
Joel Schoppdbff1242014-07-09 11:17:04 -050078 * We currently only support a 40bit IPA.
Marc Zyngier37c43752012-12-10 15:35:24 +000079 */
Joel Schoppdbff1242014-07-09 11:17:04 -050080#define KVM_PHYS_SHIFT (40)
Marc Zyngier37c43752012-12-10 15:35:24 +000081#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
82#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
83
Marc Zyngier37c43752012-12-10 15:35:24 +000084int create_hyp_mappings(void *from, void *to);
85int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
86void free_boot_hyp_pgd(void);
87void free_hyp_pgds(void);
88
Christoffer Dall957db102014-11-27 10:35:03 +010089void stage2_unmap_vm(struct kvm *kvm);
Marc Zyngier37c43752012-12-10 15:35:24 +000090int kvm_alloc_stage2_pgd(struct kvm *kvm);
91void kvm_free_stage2_pgd(struct kvm *kvm);
92int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -070093 phys_addr_t pa, unsigned long size, bool writable);
Marc Zyngier37c43752012-12-10 15:35:24 +000094
95int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
96
97void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
98
99phys_addr_t kvm_mmu_get_httbr(void);
100phys_addr_t kvm_mmu_get_boot_httbr(void);
101phys_addr_t kvm_get_idmap_vector(void);
102int kvm_mmu_init(void);
103void kvm_clear_hyp_idmap(void);
104
105#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
Christoffer Dallad361f02012-11-01 17:14:45 +0100106#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
Marc Zyngier37c43752012-12-10 15:35:24 +0000107
Marc Zyngier37c43752012-12-10 15:35:24 +0000108static inline void kvm_clean_pgd(pgd_t *pgd) {}
Christoffer Dall38f791a2014-10-10 12:14:28 +0200109static inline void kvm_clean_pmd(pmd_t *pmd) {}
Marc Zyngier37c43752012-12-10 15:35:24 +0000110static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
111static inline void kvm_clean_pte(pte_t *pte) {}
112static inline void kvm_clean_pte_entry(pte_t *pte) {}
113
114static inline void kvm_set_s2pte_writable(pte_t *pte)
115{
116 pte_val(*pte) |= PTE_S2_RDWR;
117}
118
Christoffer Dallad361f02012-11-01 17:14:45 +0100119static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
120{
121 pmd_val(*pmd) |= PMD_S2_RDWR;
122}
123
Mario Smarduch8199ed02015-01-15 15:58:59 -0800124static inline void kvm_set_s2pte_readonly(pte_t *pte)
125{
126 pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
127}
128
129static inline bool kvm_s2pte_readonly(pte_t *pte)
130{
131 return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
132}
133
134static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
135{
136 pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
137}
138
139static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
140{
141 return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
142}
143
144
Marc Zyngiera3c8bd32014-02-18 14:29:03 +0000145#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
146#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
147#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
148
Christoffer Dall38f791a2014-10-10 12:14:28 +0200149/*
150 * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address
151 * the entire IPA input range with a single pgd entry, and we would only need
152 * one pgd entry. Note that in this case, the pgd is actually not used by
153 * the MMU for Stage-2 translations, but is merely a fake pgd used as a data
154 * structure for the kernel pgtable macros to work.
155 */
156#if PGDIR_SHIFT > KVM_PHYS_SHIFT
157#define PTRS_PER_S2_PGD_SHIFT 0
158#else
159#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
160#endif
161#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200162
Marc Zyngier04b8dc82015-03-10 19:07:00 +0000163#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
164
Christoffer Dall38f791a2014-10-10 12:14:28 +0200165/*
166 * If we are concatenating first level stage-2 page tables, we would have less
167 * than or equal to 16 pointers in the fake PGD, because that's what the
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700168 * architecture allows. In this case, (4 - CONFIG_PGTABLE_LEVELS)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200169 * represents the first level for the host, and we add 1 to go to the next
170 * level (which uses contatenation) for the stage-2 tables.
171 */
172#if PTRS_PER_S2_PGD <= 16
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700173#define KVM_PREALLOC_LEVEL (4 - CONFIG_PGTABLE_LEVELS + 1)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200174#else
175#define KVM_PREALLOC_LEVEL (0)
176#endif
177
Christoffer Dall38f791a2014-10-10 12:14:28 +0200178static inline void *kvm_get_hwpgd(struct kvm *kvm)
179{
180 pgd_t *pgd = kvm->arch.pgd;
181 pud_t *pud;
182
183 if (KVM_PREALLOC_LEVEL == 0)
184 return pgd;
185
186 pud = pud_offset(pgd, 0);
187 if (KVM_PREALLOC_LEVEL == 1)
188 return pud;
189
190 BUG_ON(KVM_PREALLOC_LEVEL != 2);
191 return pmd_offset(pud, 0);
192}
193
Marc Zyngiera9873702015-03-10 19:06:59 +0000194static inline unsigned int kvm_get_hwpgd_size(void)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200195{
Marc Zyngiera9873702015-03-10 19:06:59 +0000196 if (KVM_PREALLOC_LEVEL > 0)
197 return PTRS_PER_S2_PGD * PAGE_SIZE;
198 return PTRS_PER_S2_PGD * sizeof(pgd_t);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200199}
200
Christoffer Dall4f853a72014-05-09 23:31:31 +0200201static inline bool kvm_page_empty(void *ptr)
202{
203 struct page *ptr_page = virt_to_page(ptr);
204 return page_count(ptr_page) == 1;
205}
206
Christoffer Dall38f791a2014-10-10 12:14:28 +0200207#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
208
209#ifdef __PAGETABLE_PMD_FOLDED
210#define kvm_pmd_table_empty(kvm, pmdp) (0)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200211#else
Christoffer Dall38f791a2014-10-10 12:14:28 +0200212#define kvm_pmd_table_empty(kvm, pmdp) \
213 (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
Christoffer Dall4f853a72014-05-09 23:31:31 +0200214#endif
Christoffer Dall38f791a2014-10-10 12:14:28 +0200215
216#ifdef __PAGETABLE_PUD_FOLDED
217#define kvm_pud_table_empty(kvm, pudp) (0)
218#else
219#define kvm_pud_table_empty(kvm, pudp) \
220 (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
221#endif
Christoffer Dall4f853a72014-05-09 23:31:31 +0200222
223
Marc Zyngier37c43752012-12-10 15:35:24 +0000224struct kvm;
225
Marc Zyngier2d58b732014-01-14 19:13:10 +0000226#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
227
228static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
Marc Zyngier37c43752012-12-10 15:35:24 +0000229{
Marc Zyngier2d58b732014-01-14 19:13:10 +0000230 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
231}
232
Dan Williamsba049e92016-01-15 16:56:11 -0800233static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
234 kvm_pfn_t pfn,
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000235 unsigned long size,
236 bool ipa_uncached)
Marc Zyngier2d58b732014-01-14 19:13:10 +0000237{
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000238 void *va = page_address(pfn_to_page(pfn));
239
Laszlo Ersek840f4bf2014-11-17 14:58:52 +0000240 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000241 kvm_flush_dcache_to_poc(va, size);
Marc Zyngier2d58b732014-01-14 19:13:10 +0000242
Marc Zyngier37c43752012-12-10 15:35:24 +0000243 if (!icache_is_aliasing()) { /* PIPT */
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000244 flush_icache_range((unsigned long)va,
245 (unsigned long)va + size);
Marc Zyngier37c43752012-12-10 15:35:24 +0000246 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
247 /* any kind of VIPT cache */
248 __flush_icache_all();
249 }
250}
251
Marc Zyngier363ef892014-12-19 16:48:06 +0000252static inline void __kvm_flush_dcache_pte(pte_t pte)
253{
254 struct page *page = pte_page(pte);
255 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
256}
257
258static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
259{
260 struct page *page = pmd_page(pmd);
261 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
262}
263
264static inline void __kvm_flush_dcache_pud(pud_t pud)
265{
266 struct page *page = pud_page(pud);
267 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
268}
269
Santosh Shilimkar4fda3422013-11-19 14:59:12 -0500270#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
Marc Zyngier37c43752012-12-10 15:35:24 +0000271
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000272void kvm_set_way_flush(struct kvm_vcpu *vcpu);
273void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000274
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000275static inline bool __kvm_cpu_uses_extended_idmap(void)
276{
277 return __cpu_uses_extended_idmap();
278}
279
280static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
281 pgd_t *hyp_pgd,
282 pgd_t *merged_hyp_pgd,
283 unsigned long hyp_idmap_start)
284{
285 int idmap_idx;
286
287 /*
288 * Use the first entry to access the HYP mappings. It is
289 * guaranteed to be free, otherwise we wouldn't use an
290 * extended idmap.
291 */
292 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
293 merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
294
295 /*
296 * Create another extended level entry that points to the boot HYP map,
297 * which contains an ID mapping of the HYP init code. We essentially
298 * merge the boot and runtime HYP maps by doing so, but they don't
299 * overlap anyway, so this is fine.
300 */
301 idmap_idx = hyp_idmap_start >> VA_BITS;
302 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
303 merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
304}
305
Vladimir Murzin20475f72015-11-16 11:28:18 +0000306static inline unsigned int kvm_get_vmid_bits(void)
307{
308 int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
309
Suzuki K Poulose28c5dcb2016-01-26 10:58:16 +0000310 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
Vladimir Murzin20475f72015-11-16 11:28:18 +0000311}
312
Marc Zyngier37c43752012-12-10 15:35:24 +0000313#endif /* __ASSEMBLY__ */
314#endif /* __ARM64_KVM_MMU_H__ */