blob: a030d163840bf02e9eccd4828f54743dc4b8363b [file] [log] [blame]
Marc Zyngier37c43752012-12-10 15:35:24 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
23
24/*
25 * As we only have the TTBR0_EL2 register, we cannot express
26 * "negative" addresses. This makes it impossible to directly share
27 * mappings with the kernel.
28 *
29 * Instead, give the HYP mode its own VA region at a fixed offset from
30 * the kernel by just masking the top bits (which are all ones for a
31 * kernel address).
32 */
33#define HYP_PAGE_OFFSET_SHIFT VA_BITS
34#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
35#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
36
37/*
38 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
39 * shared across all the page-tables. Conveniently, we use the last
40 * possible page, where no kernel mapping will ever exist.
41 */
42#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
43
44#ifdef __ASSEMBLY__
45
46/*
47 * Convert a kernel VA into a HYP VA.
48 * reg: VA to be converted.
49 */
50.macro kern_hyp_va reg
51 and \reg, \reg, #HYP_PAGE_OFFSET_MASK
52.endm
53
54#else
55
56#include <asm/cachetype.h>
57#include <asm/cacheflush.h>
58
59#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
60
61/*
Joel Schoppdbff1242014-07-09 11:17:04 -050062 * We currently only support a 40bit IPA.
Marc Zyngier37c43752012-12-10 15:35:24 +000063 */
Joel Schoppdbff1242014-07-09 11:17:04 -050064#define KVM_PHYS_SHIFT (40)
Marc Zyngier37c43752012-12-10 15:35:24 +000065#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
66#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
67
68/* Make sure we get the right size, and thus the right alignment */
69#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
70#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
71
72int create_hyp_mappings(void *from, void *to);
73int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
74void free_boot_hyp_pgd(void);
75void free_hyp_pgds(void);
76
77int kvm_alloc_stage2_pgd(struct kvm *kvm);
78void kvm_free_stage2_pgd(struct kvm *kvm);
79int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
80 phys_addr_t pa, unsigned long size);
81
82int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
83
84void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
85
86phys_addr_t kvm_mmu_get_httbr(void);
87phys_addr_t kvm_mmu_get_boot_httbr(void);
88phys_addr_t kvm_get_idmap_vector(void);
89int kvm_mmu_init(void);
90void kvm_clear_hyp_idmap(void);
91
92#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
Christoffer Dallad361f02012-11-01 17:14:45 +010093#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
Marc Zyngier37c43752012-12-10 15:35:24 +000094
Marc Zyngier37c43752012-12-10 15:35:24 +000095static inline void kvm_clean_pgd(pgd_t *pgd) {}
96static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
97static inline void kvm_clean_pte(pte_t *pte) {}
98static inline void kvm_clean_pte_entry(pte_t *pte) {}
99
100static inline void kvm_set_s2pte_writable(pte_t *pte)
101{
102 pte_val(*pte) |= PTE_S2_RDWR;
103}
104
Christoffer Dallad361f02012-11-01 17:14:45 +0100105static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
106{
107 pmd_val(*pmd) |= PMD_S2_RDWR;
108}
109
Marc Zyngiera3c8bd32014-02-18 14:29:03 +0000110#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
111#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
112#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
113
Christoffer Dall4f853a72014-05-09 23:31:31 +0200114static inline bool kvm_page_empty(void *ptr)
115{
116 struct page *ptr_page = virt_to_page(ptr);
117 return page_count(ptr_page) == 1;
118}
119
120#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
121#ifndef CONFIG_ARM64_64K_PAGES
122#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
123#else
124#define kvm_pmd_table_empty(pmdp) (0)
125#endif
126#define kvm_pud_table_empty(pudp) (0)
127
128
Marc Zyngier37c43752012-12-10 15:35:24 +0000129struct kvm;
130
Marc Zyngier2d58b732014-01-14 19:13:10 +0000131#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
132
133static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
Marc Zyngier37c43752012-12-10 15:35:24 +0000134{
Marc Zyngier2d58b732014-01-14 19:13:10 +0000135 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
136}
137
138static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
139 unsigned long size)
140{
141 if (!vcpu_has_cache_enabled(vcpu))
142 kvm_flush_dcache_to_poc((void *)hva, size);
143
Marc Zyngier37c43752012-12-10 15:35:24 +0000144 if (!icache_is_aliasing()) { /* PIPT */
Christoffer Dallad361f02012-11-01 17:14:45 +0100145 flush_icache_range(hva, hva + size);
Marc Zyngier37c43752012-12-10 15:35:24 +0000146 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
147 /* any kind of VIPT cache */
148 __flush_icache_all();
149 }
150}
151
Santosh Shilimkar4fda3422013-11-19 14:59:12 -0500152#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
Marc Zyngier37c43752012-12-10 15:35:24 +0000153
Marc Zyngier9d218a12014-01-15 12:50:23 +0000154void stage2_flush_vm(struct kvm *kvm);
155
Marc Zyngier37c43752012-12-10 15:35:24 +0000156#endif /* __ASSEMBLY__ */
157#endif /* __ARM64_KVM_MMU_H__ */