Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012,2013 - ARM Ltd |
| 3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License version 2 as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
| 18 | #ifndef __ARM64_KVM_MMU_H__ |
| 19 | #define __ARM64_KVM_MMU_H__ |
| 20 | |
| 21 | #include <asm/page.h> |
| 22 | #include <asm/memory.h> |
Vladimir Murzin | 20475f7 | 2015-11-16 11:28:18 +0000 | [diff] [blame] | 23 | #include <asm/cpufeature.h> |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 24 | |
| 25 | /* |
Marc Zyngier | cedbb8b7 | 2015-01-29 13:50:34 +0000 | [diff] [blame] | 26 | * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 27 | * "negative" addresses. This makes it impossible to directly share |
| 28 | * mappings with the kernel. |
| 29 | * |
| 30 | * Instead, give the HYP mode its own VA region at a fixed offset from |
| 31 | * the kernel by just masking the top bits (which are all ones for a |
Marc Zyngier | 82a81bf | 2016-06-30 18:40:34 +0100 | [diff] [blame] | 32 | * kernel address). We need to find out how many bits to mask. |
Marc Zyngier | cedbb8b7 | 2015-01-29 13:50:34 +0000 | [diff] [blame] | 33 | * |
Marc Zyngier | 82a81bf | 2016-06-30 18:40:34 +0100 | [diff] [blame] | 34 | * We want to build a set of page tables that cover both parts of the |
| 35 | * idmap (the trampoline page used to initialize EL2), and our normal |
| 36 | * runtime VA space, at the same time. |
| 37 | * |
| 38 | * Given that the kernel uses VA_BITS for its entire address space, |
| 39 | * and that half of that space (VA_BITS - 1) is used for the linear |
| 40 | * mapping, we can also limit the EL2 space to (VA_BITS - 1). |
| 41 | * |
| 42 | * The main question is "Within the VA_BITS space, does EL2 use the |
| 43 | * top or the bottom half of that space to shadow the kernel's linear |
| 44 | * mapping?". As we need to idmap the trampoline page, this is |
| 45 | * determined by the range in which this page lives. |
| 46 | * |
| 47 | * If the page is in the bottom half, we have to use the top half. If |
| 48 | * the page is in the top half, we have to use the bottom half: |
| 49 | * |
| 50 | * T = __virt_to_phys(__hyp_idmap_text_start) |
| 51 | * if (T & BIT(VA_BITS - 1)) |
| 52 | * HYP_VA_MIN = 0 //idmap in upper half |
| 53 | * else |
| 54 | * HYP_VA_MIN = 1 << (VA_BITS - 1) |
| 55 | * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1 |
| 56 | * |
| 57 | * This of course assumes that the trampoline page exists within the |
| 58 | * VA_BITS range. If it doesn't, then it means we're in the odd case |
| 59 | * where the kernel idmap (as well as HYP) uses more levels than the |
| 60 | * kernel runtime page tables (as seen when the kernel is configured |
| 61 | * for 4k pages, 39bits VA, and yet memory lives just above that |
| 62 | * limit, forcing the idmap to use 4 levels of page tables while the |
| 63 | * kernel itself only uses 3). In this particular case, it doesn't |
| 64 | * matter which side of VA_BITS we use, as we're guaranteed not to |
| 65 | * conflict with anything. |
| 66 | * |
| 67 | * When using VHE, there are no separate hyp mappings and all KVM |
| 68 | * functionality is already mapped as part of the main kernel |
| 69 | * mappings, and none of this applies in that case. |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 70 | */ |
Marc Zyngier | d53d9bc | 2016-06-30 18:40:39 +0100 | [diff] [blame] | 71 | |
| 72 | #define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1) |
| 73 | #define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1) |
| 74 | |
| 75 | /* Temporary compat define */ |
| 76 | #define HYP_PAGE_OFFSET_MASK HYP_PAGE_OFFSET_HIGH_MASK |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 77 | |
| 78 | /* |
| 79 | * Our virtual mapping for the idmap-ed MMU-enable code. Must be |
| 80 | * shared across all the page-tables. Conveniently, we use the last |
| 81 | * possible page, where no kernel mapping will ever exist. |
| 82 | */ |
| 83 | #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) |
| 84 | |
| 85 | #ifdef __ASSEMBLY__ |
| 86 | |
Marc Zyngier | cedbb8b7 | 2015-01-29 13:50:34 +0000 | [diff] [blame] | 87 | #include <asm/alternative.h> |
| 88 | #include <asm/cpufeature.h> |
| 89 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 90 | /* |
| 91 | * Convert a kernel VA into a HYP VA. |
| 92 | * reg: VA to be converted. |
Marc Zyngier | fd81e6b | 2016-06-30 18:40:40 +0100 | [diff] [blame^] | 93 | * |
| 94 | * This generates the following sequences: |
| 95 | * - High mask: |
| 96 | * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK |
| 97 | * nop |
| 98 | * - Low mask: |
| 99 | * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK |
| 100 | * and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK |
| 101 | * - VHE: |
| 102 | * nop |
| 103 | * nop |
| 104 | * |
| 105 | * The "low mask" version works because the mask is a strict subset of |
| 106 | * the "high mask", hence performing the first mask for nothing. |
| 107 | * Should be completely invisible on any viable CPU. |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 108 | */ |
| 109 | .macro kern_hyp_va reg |
Marc Zyngier | fd81e6b | 2016-06-30 18:40:40 +0100 | [diff] [blame^] | 110 | alternative_if_not ARM64_HAS_VIRT_HOST_EXTN |
| 111 | and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK |
Marc Zyngier | cedbb8b7 | 2015-01-29 13:50:34 +0000 | [diff] [blame] | 112 | alternative_else |
| 113 | nop |
| 114 | alternative_endif |
Marc Zyngier | fd81e6b | 2016-06-30 18:40:40 +0100 | [diff] [blame^] | 115 | alternative_if_not ARM64_HYP_OFFSET_LOW |
| 116 | nop |
| 117 | alternative_else |
| 118 | and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK |
| 119 | alternative_endif |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 120 | .endm |
| 121 | |
| 122 | #else |
| 123 | |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 124 | #include <asm/pgalloc.h> |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 125 | #include <asm/cachetype.h> |
| 126 | #include <asm/cacheflush.h> |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 127 | #include <asm/mmu_context.h> |
| 128 | #include <asm/pgtable.h> |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 129 | |
Marc Zyngier | fd81e6b | 2016-06-30 18:40:40 +0100 | [diff] [blame^] | 130 | static inline unsigned long __kern_hyp_va(unsigned long v) |
| 131 | { |
| 132 | asm volatile(ALTERNATIVE("and %0, %0, %1", |
| 133 | "nop", |
| 134 | ARM64_HAS_VIRT_HOST_EXTN) |
| 135 | : "+r" (v) |
| 136 | : "i" (HYP_PAGE_OFFSET_HIGH_MASK)); |
| 137 | asm volatile(ALTERNATIVE("nop", |
| 138 | "and %0, %0, %1", |
| 139 | ARM64_HYP_OFFSET_LOW) |
| 140 | : "+r" (v) |
| 141 | : "i" (HYP_PAGE_OFFSET_LOW_MASK)); |
| 142 | return v; |
| 143 | } |
| 144 | |
| 145 | #define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) |
| 146 | #define KERN_TO_HYP(v) kern_hyp_va(v) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 147 | |
| 148 | /* |
Joel Schopp | dbff124 | 2014-07-09 11:17:04 -0500 | [diff] [blame] | 149 | * We currently only support a 40bit IPA. |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 150 | */ |
Joel Schopp | dbff124 | 2014-07-09 11:17:04 -0500 | [diff] [blame] | 151 | #define KVM_PHYS_SHIFT (40) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 152 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) |
| 153 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) |
| 154 | |
Suzuki K Poulose | c0ef632 | 2016-03-22 14:16:52 +0000 | [diff] [blame] | 155 | #include <asm/stage2_pgtable.h> |
| 156 | |
Marc Zyngier | c8dddec | 2016-06-13 15:00:45 +0100 | [diff] [blame] | 157 | int create_hyp_mappings(void *from, void *to, pgprot_t prot); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 158 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); |
| 159 | void free_boot_hyp_pgd(void); |
| 160 | void free_hyp_pgds(void); |
| 161 | |
Christoffer Dall | 957db10 | 2014-11-27 10:35:03 +0100 | [diff] [blame] | 162 | void stage2_unmap_vm(struct kvm *kvm); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 163 | int kvm_alloc_stage2_pgd(struct kvm *kvm); |
| 164 | void kvm_free_stage2_pgd(struct kvm *kvm); |
| 165 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
Ard Biesheuvel | c40f2f8 | 2014-09-17 14:56:18 -0700 | [diff] [blame] | 166 | phys_addr_t pa, unsigned long size, bool writable); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 167 | |
| 168 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); |
| 169 | |
| 170 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); |
| 171 | |
| 172 | phys_addr_t kvm_mmu_get_httbr(void); |
| 173 | phys_addr_t kvm_mmu_get_boot_httbr(void); |
| 174 | phys_addr_t kvm_get_idmap_vector(void); |
AKASHI Takahiro | 67f6919 | 2016-04-27 17:47:05 +0100 | [diff] [blame] | 175 | phys_addr_t kvm_get_idmap_start(void); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 176 | int kvm_mmu_init(void); |
| 177 | void kvm_clear_hyp_idmap(void); |
| 178 | |
| 179 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 180 | #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 181 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 182 | static inline void kvm_clean_pgd(pgd_t *pgd) {} |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 183 | static inline void kvm_clean_pmd(pmd_t *pmd) {} |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 184 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} |
| 185 | static inline void kvm_clean_pte(pte_t *pte) {} |
| 186 | static inline void kvm_clean_pte_entry(pte_t *pte) {} |
| 187 | |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 188 | static inline pte_t kvm_s2pte_mkwrite(pte_t pte) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 189 | { |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 190 | pte_val(pte) |= PTE_S2_RDWR; |
| 191 | return pte; |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 192 | } |
| 193 | |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 194 | static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 195 | { |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 196 | pmd_val(pmd) |= PMD_S2_RDWR; |
| 197 | return pmd; |
Christoffer Dall | ad361f0 | 2012-11-01 17:14:45 +0100 | [diff] [blame] | 198 | } |
| 199 | |
Mario Smarduch | 8199ed0 | 2015-01-15 15:58:59 -0800 | [diff] [blame] | 200 | static inline void kvm_set_s2pte_readonly(pte_t *pte) |
| 201 | { |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 202 | pteval_t pteval; |
| 203 | unsigned long tmp; |
| 204 | |
| 205 | asm volatile("// kvm_set_s2pte_readonly\n" |
| 206 | " prfm pstl1strm, %2\n" |
| 207 | "1: ldxr %0, %2\n" |
| 208 | " and %0, %0, %3 // clear PTE_S2_RDWR\n" |
| 209 | " orr %0, %0, %4 // set PTE_S2_RDONLY\n" |
| 210 | " stxr %w1, %0, %2\n" |
| 211 | " cbnz %w1, 1b\n" |
| 212 | : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*pte)) |
| 213 | : "L" (~PTE_S2_RDWR), "L" (PTE_S2_RDONLY)); |
Mario Smarduch | 8199ed0 | 2015-01-15 15:58:59 -0800 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | static inline bool kvm_s2pte_readonly(pte_t *pte) |
| 217 | { |
| 218 | return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY; |
| 219 | } |
| 220 | |
| 221 | static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) |
| 222 | { |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 223 | kvm_set_s2pte_readonly((pte_t *)pmd); |
Mario Smarduch | 8199ed0 | 2015-01-15 15:58:59 -0800 | [diff] [blame] | 224 | } |
| 225 | |
| 226 | static inline bool kvm_s2pmd_readonly(pmd_t *pmd) |
| 227 | { |
Catalin Marinas | 0648505 | 2016-04-13 17:57:37 +0100 | [diff] [blame] | 228 | return kvm_s2pte_readonly((pte_t *)pmd); |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 229 | } |
| 230 | |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 231 | static inline bool kvm_page_empty(void *ptr) |
| 232 | { |
| 233 | struct page *ptr_page = virt_to_page(ptr); |
| 234 | return page_count(ptr_page) == 1; |
| 235 | } |
| 236 | |
Suzuki K Poulose | 66f877f | 2016-03-22 17:20:28 +0000 | [diff] [blame] | 237 | #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 238 | |
| 239 | #ifdef __PAGETABLE_PMD_FOLDED |
Suzuki K Poulose | 66f877f | 2016-03-22 17:20:28 +0000 | [diff] [blame] | 240 | #define hyp_pmd_table_empty(pmdp) (0) |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 241 | #else |
Suzuki K Poulose | 66f877f | 2016-03-22 17:20:28 +0000 | [diff] [blame] | 242 | #define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp) |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 243 | #endif |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 244 | |
| 245 | #ifdef __PAGETABLE_PUD_FOLDED |
Suzuki K Poulose | 66f877f | 2016-03-22 17:20:28 +0000 | [diff] [blame] | 246 | #define hyp_pud_table_empty(pudp) (0) |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 247 | #else |
Suzuki K Poulose | 66f877f | 2016-03-22 17:20:28 +0000 | [diff] [blame] | 248 | #define hyp_pud_table_empty(pudp) kvm_page_empty(pudp) |
Christoffer Dall | 38f791a | 2014-10-10 12:14:28 +0200 | [diff] [blame] | 249 | #endif |
Christoffer Dall | 4f853a7 | 2014-05-09 23:31:31 +0200 | [diff] [blame] | 250 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 251 | struct kvm; |
| 252 | |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 253 | #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) |
| 254 | |
| 255 | static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 256 | { |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 257 | return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; |
| 258 | } |
| 259 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 260 | static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, |
| 261 | kvm_pfn_t pfn, |
Marc Zyngier | 0d3e4d4 | 2015-01-05 21:13:24 +0000 | [diff] [blame] | 262 | unsigned long size, |
| 263 | bool ipa_uncached) |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 264 | { |
Marc Zyngier | 0d3e4d4 | 2015-01-05 21:13:24 +0000 | [diff] [blame] | 265 | void *va = page_address(pfn_to_page(pfn)); |
| 266 | |
Laszlo Ersek | 840f4bf | 2014-11-17 14:58:52 +0000 | [diff] [blame] | 267 | if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) |
Marc Zyngier | 0d3e4d4 | 2015-01-05 21:13:24 +0000 | [diff] [blame] | 268 | kvm_flush_dcache_to_poc(va, size); |
Marc Zyngier | 2d58b73 | 2014-01-14 19:13:10 +0000 | [diff] [blame] | 269 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 270 | if (!icache_is_aliasing()) { /* PIPT */ |
Marc Zyngier | 0d3e4d4 | 2015-01-05 21:13:24 +0000 | [diff] [blame] | 271 | flush_icache_range((unsigned long)va, |
| 272 | (unsigned long)va + size); |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 273 | } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ |
| 274 | /* any kind of VIPT cache */ |
| 275 | __flush_icache_all(); |
| 276 | } |
| 277 | } |
| 278 | |
Marc Zyngier | 363ef89 | 2014-12-19 16:48:06 +0000 | [diff] [blame] | 279 | static inline void __kvm_flush_dcache_pte(pte_t pte) |
| 280 | { |
| 281 | struct page *page = pte_page(pte); |
| 282 | kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); |
| 283 | } |
| 284 | |
| 285 | static inline void __kvm_flush_dcache_pmd(pmd_t pmd) |
| 286 | { |
| 287 | struct page *page = pmd_page(pmd); |
| 288 | kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE); |
| 289 | } |
| 290 | |
| 291 | static inline void __kvm_flush_dcache_pud(pud_t pud) |
| 292 | { |
| 293 | struct page *page = pud_page(pud); |
| 294 | kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); |
| 295 | } |
| 296 | |
Santosh Shilimkar | 4fda342 | 2013-11-19 14:59:12 -0500 | [diff] [blame] | 297 | #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 298 | |
Marc Zyngier | 3c1e716 | 2014-12-19 16:05:31 +0000 | [diff] [blame] | 299 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); |
| 300 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); |
Marc Zyngier | 9d218a1 | 2014-01-15 12:50:23 +0000 | [diff] [blame] | 301 | |
Ard Biesheuvel | e4c5a68 | 2015-03-19 16:42:28 +0000 | [diff] [blame] | 302 | static inline bool __kvm_cpu_uses_extended_idmap(void) |
| 303 | { |
| 304 | return __cpu_uses_extended_idmap(); |
| 305 | } |
| 306 | |
| 307 | static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, |
| 308 | pgd_t *hyp_pgd, |
| 309 | pgd_t *merged_hyp_pgd, |
| 310 | unsigned long hyp_idmap_start) |
| 311 | { |
| 312 | int idmap_idx; |
| 313 | |
| 314 | /* |
| 315 | * Use the first entry to access the HYP mappings. It is |
| 316 | * guaranteed to be free, otherwise we wouldn't use an |
| 317 | * extended idmap. |
| 318 | */ |
| 319 | VM_BUG_ON(pgd_val(merged_hyp_pgd[0])); |
| 320 | merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE); |
| 321 | |
| 322 | /* |
| 323 | * Create another extended level entry that points to the boot HYP map, |
| 324 | * which contains an ID mapping of the HYP init code. We essentially |
| 325 | * merge the boot and runtime HYP maps by doing so, but they don't |
| 326 | * overlap anyway, so this is fine. |
| 327 | */ |
| 328 | idmap_idx = hyp_idmap_start >> VA_BITS; |
| 329 | VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx])); |
| 330 | merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); |
| 331 | } |
| 332 | |
Vladimir Murzin | 20475f7 | 2015-11-16 11:28:18 +0000 | [diff] [blame] | 333 | static inline unsigned int kvm_get_vmid_bits(void) |
| 334 | { |
| 335 | int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1); |
| 336 | |
Suzuki K Poulose | 28c5dcb | 2016-01-26 10:58:16 +0000 | [diff] [blame] | 337 | return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; |
Vladimir Murzin | 20475f7 | 2015-11-16 11:28:18 +0000 | [diff] [blame] | 338 | } |
| 339 | |
Marc Zyngier | 37c4375 | 2012-12-10 15:35:24 +0000 | [diff] [blame] | 340 | #endif /* __ASSEMBLY__ */ |
| 341 | #endif /* __ARM64_KVM_MMU_H__ */ |