Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
| 3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License, version 2, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 17 | */ |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 18 | |
| 19 | #include <linux/mman.h> |
| 20 | #include <linux/kvm_host.h> |
| 21 | #include <linux/io.h> |
Christoffer Dall | 45e96ea | 2013-01-20 18:43:58 -0500 | [diff] [blame] | 22 | #include <trace/events/kvm.h> |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 23 | #include <asm/pgalloc.h> |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 24 | #include <asm/cacheflush.h> |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 25 | #include <asm/kvm_arm.h> |
| 26 | #include <asm/kvm_mmu.h> |
Christoffer Dall | 45e96ea | 2013-01-20 18:43:58 -0500 | [diff] [blame] | 27 | #include <asm/kvm_mmio.h> |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 28 | #include <asm/kvm_asm.h> |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 29 | #include <asm/kvm_emulate.h> |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 30 | |
| 31 | #include "trace.h" |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 32 | |
| 33 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; |
| 34 | |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 35 | static pgd_t *boot_hyp_pgd; |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 36 | static pgd_t *hyp_pgd; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 37 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
| 38 | |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 39 | static void *init_bounce_page; |
| 40 | static unsigned long hyp_idmap_start; |
| 41 | static unsigned long hyp_idmap_end; |
| 42 | static phys_addr_t hyp_idmap_vector; |
| 43 | |
Marc Zyngier | 4876276 | 2013-01-28 15:27:00 +0000 | [diff] [blame] | 44 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 45 | { |
Marc Zyngier | 4876276 | 2013-01-28 15:27:00 +0000 | [diff] [blame] | 46 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 47 | } |
| 48 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 49 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
| 50 | int min, int max) |
| 51 | { |
| 52 | void *page; |
| 53 | |
| 54 | BUG_ON(max > KVM_NR_MEM_OBJS); |
| 55 | if (cache->nobjs >= min) |
| 56 | return 0; |
| 57 | while (cache->nobjs < max) { |
| 58 | page = (void *)__get_free_page(PGALLOC_GFP); |
| 59 | if (!page) |
| 60 | return -ENOMEM; |
| 61 | cache->objects[cache->nobjs++] = page; |
| 62 | } |
| 63 | return 0; |
| 64 | } |
| 65 | |
| 66 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) |
| 67 | { |
| 68 | while (mc->nobjs) |
| 69 | free_page((unsigned long)mc->objects[--mc->nobjs]); |
| 70 | } |
| 71 | |
| 72 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) |
| 73 | { |
| 74 | void *p; |
| 75 | |
| 76 | BUG_ON(!mc || !mc->nobjs); |
| 77 | p = mc->objects[--mc->nobjs]; |
| 78 | return p; |
| 79 | } |
| 80 | |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 81 | static void clear_pud_entry(pud_t *pud) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 82 | { |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 83 | pmd_t *pmd_table = pmd_offset(pud, 0); |
| 84 | pud_clear(pud); |
| 85 | pmd_free(NULL, pmd_table); |
| 86 | put_page(virt_to_page(pud)); |
| 87 | } |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 88 | |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 89 | static void clear_pmd_entry(pmd_t *pmd) |
| 90 | { |
| 91 | pte_t *pte_table = pte_offset_kernel(pmd, 0); |
| 92 | pmd_clear(pmd); |
| 93 | pte_free_kernel(NULL, pte_table); |
| 94 | put_page(virt_to_page(pmd)); |
| 95 | } |
| 96 | |
| 97 | static bool pmd_empty(pmd_t *pmd) |
| 98 | { |
| 99 | struct page *pmd_page = virt_to_page(pmd); |
| 100 | return page_count(pmd_page) == 1; |
| 101 | } |
| 102 | |
| 103 | static void clear_pte_entry(pte_t *pte) |
| 104 | { |
| 105 | if (pte_present(*pte)) { |
| 106 | kvm_set_pte(pte, __pte(0)); |
| 107 | put_page(virt_to_page(pte)); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 108 | } |
| 109 | } |
| 110 | |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 111 | static bool pte_empty(pte_t *pte) |
| 112 | { |
| 113 | struct page *pte_page = virt_to_page(pte); |
| 114 | return page_count(pte_page) == 1; |
| 115 | } |
| 116 | |
| 117 | static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 118 | { |
| 119 | pgd_t *pgd; |
| 120 | pud_t *pud; |
| 121 | pmd_t *pmd; |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 122 | pte_t *pte; |
| 123 | unsigned long long addr = start, end = start + size; |
| 124 | u64 range; |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 125 | |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 126 | while (addr < end) { |
| 127 | pgd = pgdp + pgd_index(addr); |
| 128 | pud = pud_offset(pgd, addr); |
| 129 | if (pud_none(*pud)) { |
| 130 | addr += PUD_SIZE; |
| 131 | continue; |
| 132 | } |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 133 | |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 134 | pmd = pmd_offset(pud, addr); |
| 135 | if (pmd_none(*pmd)) { |
| 136 | addr += PMD_SIZE; |
| 137 | continue; |
| 138 | } |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 139 | |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 140 | pte = pte_offset_kernel(pmd, addr); |
| 141 | clear_pte_entry(pte); |
| 142 | range = PAGE_SIZE; |
| 143 | |
| 144 | /* If we emptied the pte, walk back up the ladder */ |
| 145 | if (pte_empty(pte)) { |
| 146 | clear_pmd_entry(pmd); |
| 147 | range = PMD_SIZE; |
| 148 | if (pmd_empty(pmd)) { |
| 149 | clear_pud_entry(pud); |
| 150 | range = PUD_SIZE; |
| 151 | } |
| 152 | } |
| 153 | |
| 154 | addr += range; |
| 155 | } |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | /** |
Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 159 | * free_boot_hyp_pgd - free HYP boot page tables |
| 160 | * |
| 161 | * Free the HYP boot page tables. The bounce page is also freed. |
| 162 | */ |
| 163 | void free_boot_hyp_pgd(void) |
| 164 | { |
| 165 | mutex_lock(&kvm_hyp_pgd_mutex); |
| 166 | |
| 167 | if (boot_hyp_pgd) { |
| 168 | unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); |
| 169 | unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
| 170 | kfree(boot_hyp_pgd); |
| 171 | boot_hyp_pgd = NULL; |
| 172 | } |
| 173 | |
| 174 | if (hyp_pgd) |
| 175 | unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
| 176 | |
| 177 | kfree(init_bounce_page); |
| 178 | init_bounce_page = NULL; |
| 179 | |
| 180 | mutex_unlock(&kvm_hyp_pgd_mutex); |
| 181 | } |
| 182 | |
| 183 | /** |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 184 | * free_hyp_pgds - free Hyp-mode page tables |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 185 | * |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 186 | * Assumes hyp_pgd is a page table used strictly in Hyp-mode and |
| 187 | * therefore contains either mappings in the kernel memory area (above |
| 188 | * PAGE_OFFSET), or device mappings in the vmalloc range (from |
| 189 | * VMALLOC_START to VMALLOC_END). |
| 190 | * |
| 191 | * boot_hyp_pgd should only map two pages for the init code. |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 192 | */ |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 193 | void free_hyp_pgds(void) |
Marc Zyngier | 000d399 | 2013-03-05 02:43:17 +0000 | [diff] [blame] | 194 | { |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 195 | unsigned long addr; |
| 196 | |
Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 197 | free_boot_hyp_pgd(); |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 198 | |
Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 199 | mutex_lock(&kvm_hyp_pgd_mutex); |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 200 | |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 201 | if (hyp_pgd) { |
| 202 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) |
| 203 | unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
| 204 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
| 205 | unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
| 206 | kfree(hyp_pgd); |
Marc Zyngier | d157f4a | 2013-04-12 19:12:07 +0100 | [diff] [blame] | 207 | hyp_pgd = NULL; |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 208 | } |
| 209 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 210 | mutex_unlock(&kvm_hyp_pgd_mutex); |
| 211 | } |
| 212 | |
| 213 | static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 214 | unsigned long end, unsigned long pfn, |
| 215 | pgprot_t prot) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 216 | { |
| 217 | pte_t *pte; |
| 218 | unsigned long addr; |
| 219 | |
Marc Zyngier | 3562c76 | 2013-04-12 19:12:02 +0100 | [diff] [blame] | 220 | addr = start; |
| 221 | do { |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 222 | pte = pte_offset_kernel(pmd, addr); |
| 223 | kvm_set_pte(pte, pfn_pte(pfn, prot)); |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 224 | get_page(virt_to_page(pte)); |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 225 | kvm_flush_dcache_to_poc(pte, sizeof(*pte)); |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 226 | pfn++; |
Marc Zyngier | 3562c76 | 2013-04-12 19:12:02 +0100 | [diff] [blame] | 227 | } while (addr += PAGE_SIZE, addr != end); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 228 | } |
| 229 | |
| 230 | static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 231 | unsigned long end, unsigned long pfn, |
| 232 | pgprot_t prot) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 233 | { |
| 234 | pmd_t *pmd; |
| 235 | pte_t *pte; |
| 236 | unsigned long addr, next; |
| 237 | |
Marc Zyngier | 3562c76 | 2013-04-12 19:12:02 +0100 | [diff] [blame] | 238 | addr = start; |
| 239 | do { |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 240 | pmd = pmd_offset(pud, addr); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 241 | |
| 242 | BUG_ON(pmd_sect(*pmd)); |
| 243 | |
| 244 | if (pmd_none(*pmd)) { |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 245 | pte = pte_alloc_one_kernel(NULL, addr); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 246 | if (!pte) { |
| 247 | kvm_err("Cannot allocate Hyp pte\n"); |
| 248 | return -ENOMEM; |
| 249 | } |
| 250 | pmd_populate_kernel(NULL, pmd, pte); |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 251 | get_page(virt_to_page(pmd)); |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 252 | kvm_flush_dcache_to_poc(pmd, sizeof(*pmd)); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 253 | } |
| 254 | |
| 255 | next = pmd_addr_end(addr, end); |
| 256 | |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 257 | create_hyp_pte_mappings(pmd, addr, next, pfn, prot); |
| 258 | pfn += (next - addr) >> PAGE_SHIFT; |
Marc Zyngier | 3562c76 | 2013-04-12 19:12:02 +0100 | [diff] [blame] | 259 | } while (addr = next, addr != end); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 260 | |
| 261 | return 0; |
| 262 | } |
| 263 | |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 264 | static int __create_hyp_mappings(pgd_t *pgdp, |
| 265 | unsigned long start, unsigned long end, |
| 266 | unsigned long pfn, pgprot_t prot) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 267 | { |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 268 | pgd_t *pgd; |
| 269 | pud_t *pud; |
| 270 | pmd_t *pmd; |
| 271 | unsigned long addr, next; |
| 272 | int err = 0; |
| 273 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 274 | mutex_lock(&kvm_hyp_pgd_mutex); |
Marc Zyngier | 3562c76 | 2013-04-12 19:12:02 +0100 | [diff] [blame] | 275 | addr = start & PAGE_MASK; |
| 276 | end = PAGE_ALIGN(end); |
| 277 | do { |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 278 | pgd = pgdp + pgd_index(addr); |
| 279 | pud = pud_offset(pgd, addr); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 280 | |
| 281 | if (pud_none_or_clear_bad(pud)) { |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 282 | pmd = pmd_alloc_one(NULL, addr); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 283 | if (!pmd) { |
| 284 | kvm_err("Cannot allocate Hyp pmd\n"); |
| 285 | err = -ENOMEM; |
| 286 | goto out; |
| 287 | } |
| 288 | pud_populate(NULL, pud, pmd); |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 289 | get_page(virt_to_page(pud)); |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 290 | kvm_flush_dcache_to_poc(pud, sizeof(*pud)); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 291 | } |
| 292 | |
| 293 | next = pgd_addr_end(addr, end); |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 294 | err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 295 | if (err) |
| 296 | goto out; |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 297 | pfn += (next - addr) >> PAGE_SHIFT; |
Marc Zyngier | 3562c76 | 2013-04-12 19:12:02 +0100 | [diff] [blame] | 298 | } while (addr = next, addr != end); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 299 | out: |
| 300 | mutex_unlock(&kvm_hyp_pgd_mutex); |
| 301 | return err; |
| 302 | } |
| 303 | |
| 304 | /** |
Marc Zyngier | 06e8c3b | 2012-10-28 01:09:14 +0100 | [diff] [blame] | 305 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 306 | * @from: The virtual kernel start address of the range |
| 307 | * @to: The virtual kernel end address of the range (exclusive) |
| 308 | * |
Marc Zyngier | 06e8c3b | 2012-10-28 01:09:14 +0100 | [diff] [blame] | 309 | * The same virtual address as the kernel virtual address is also used |
| 310 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying |
| 311 | * physical pages. |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 312 | */ |
| 313 | int create_hyp_mappings(void *from, void *to) |
| 314 | { |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 315 | unsigned long phys_addr = virt_to_phys(from); |
| 316 | unsigned long start = KERN_TO_HYP((unsigned long)from); |
| 317 | unsigned long end = KERN_TO_HYP((unsigned long)to); |
| 318 | |
| 319 | /* Check for a valid kernel memory mapping */ |
| 320 | if (!virt_addr_valid(from) || !virt_addr_valid(to - 1)) |
| 321 | return -EINVAL; |
| 322 | |
| 323 | return __create_hyp_mappings(hyp_pgd, start, end, |
| 324 | __phys_to_pfn(phys_addr), PAGE_HYP); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 325 | } |
| 326 | |
| 327 | /** |
Marc Zyngier | 06e8c3b | 2012-10-28 01:09:14 +0100 | [diff] [blame] | 328 | * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode |
| 329 | * @from: The kernel start VA of the range |
| 330 | * @to: The kernel end VA of the range (exclusive) |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 331 | * @phys_addr: The physical start address which gets mapped |
Marc Zyngier | 06e8c3b | 2012-10-28 01:09:14 +0100 | [diff] [blame] | 332 | * |
| 333 | * The resulting HYP VA is the same as the kernel VA, modulo |
| 334 | * HYP_PAGE_OFFSET. |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 335 | */ |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 336 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 337 | { |
Marc Zyngier | 6060df8 | 2013-04-12 19:12:01 +0100 | [diff] [blame] | 338 | unsigned long start = KERN_TO_HYP((unsigned long)from); |
| 339 | unsigned long end = KERN_TO_HYP((unsigned long)to); |
| 340 | |
| 341 | /* Check for a valid kernel IO mapping */ |
| 342 | if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)) |
| 343 | return -EINVAL; |
| 344 | |
| 345 | return __create_hyp_mappings(hyp_pgd, start, end, |
| 346 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 347 | } |
| 348 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 349 | /** |
| 350 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. |
| 351 | * @kvm: The KVM struct pointer for the VM. |
| 352 | * |
| 353 | * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can |
| 354 | * support either full 40-bit input addresses or limited to 32-bit input |
| 355 | * addresses). Clears the allocated pages. |
| 356 | * |
| 357 | * Note we don't need locking here as this is only called when the VM is |
| 358 | * created, which can only be done once. |
| 359 | */ |
| 360 | int kvm_alloc_stage2_pgd(struct kvm *kvm) |
| 361 | { |
| 362 | pgd_t *pgd; |
| 363 | |
| 364 | if (kvm->arch.pgd != NULL) { |
| 365 | kvm_err("kvm_arch already initialized?\n"); |
| 366 | return -EINVAL; |
| 367 | } |
| 368 | |
| 369 | pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER); |
| 370 | if (!pgd) |
| 371 | return -ENOMEM; |
| 372 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 373 | memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); |
Marc Zyngier | c62ee2b | 2012-10-15 11:27:37 +0100 | [diff] [blame] | 374 | kvm_clean_pgd(pgd); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 375 | kvm->arch.pgd = pgd; |
| 376 | |
| 377 | return 0; |
| 378 | } |
| 379 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 380 | /** |
| 381 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range |
| 382 | * @kvm: The VM pointer |
| 383 | * @start: The intermediate physical base address of the range to unmap |
| 384 | * @size: The size of the area to unmap |
| 385 | * |
| 386 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must |
| 387 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before |
| 388 | * destroying the VM), otherwise another faulting VCPU may come in and mess |
| 389 | * with things behind our backs. |
| 390 | */ |
| 391 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) |
| 392 | { |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 393 | unmap_range(kvm->arch.pgd, start, size); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 394 | } |
| 395 | |
| 396 | /** |
| 397 | * kvm_free_stage2_pgd - free all stage-2 tables |
| 398 | * @kvm: The KVM struct pointer for the VM. |
| 399 | * |
| 400 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all |
| 401 | * underlying level-2 and level-3 tables before freeing the actual level-1 table |
| 402 | * and setting the struct pointer to NULL. |
| 403 | * |
| 404 | * Note we don't need locking here as this is only called when the VM is |
| 405 | * destroyed, which can only be done once. |
| 406 | */ |
| 407 | void kvm_free_stage2_pgd(struct kvm *kvm) |
| 408 | { |
| 409 | if (kvm->arch.pgd == NULL) |
| 410 | return; |
| 411 | |
| 412 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); |
| 413 | free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); |
| 414 | kvm->arch.pgd = NULL; |
| 415 | } |
| 416 | |
| 417 | |
| 418 | static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, |
| 419 | phys_addr_t addr, const pte_t *new_pte, bool iomap) |
| 420 | { |
| 421 | pgd_t *pgd; |
| 422 | pud_t *pud; |
| 423 | pmd_t *pmd; |
| 424 | pte_t *pte, old_pte; |
| 425 | |
| 426 | /* Create 2nd stage page table mapping - Level 1 */ |
| 427 | pgd = kvm->arch.pgd + pgd_index(addr); |
| 428 | pud = pud_offset(pgd, addr); |
| 429 | if (pud_none(*pud)) { |
| 430 | if (!cache) |
| 431 | return 0; /* ignore calls from kvm_set_spte_hva */ |
| 432 | pmd = mmu_memory_cache_alloc(cache); |
| 433 | pud_populate(NULL, pud, pmd); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 434 | get_page(virt_to_page(pud)); |
Marc Zyngier | c62ee2b | 2012-10-15 11:27:37 +0100 | [diff] [blame] | 435 | } |
| 436 | |
| 437 | pmd = pmd_offset(pud, addr); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 438 | |
| 439 | /* Create 2nd stage page table mapping - Level 2 */ |
| 440 | if (pmd_none(*pmd)) { |
| 441 | if (!cache) |
| 442 | return 0; /* ignore calls from kvm_set_spte_hva */ |
| 443 | pte = mmu_memory_cache_alloc(cache); |
Marc Zyngier | c62ee2b | 2012-10-15 11:27:37 +0100 | [diff] [blame] | 444 | kvm_clean_pte(pte); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 445 | pmd_populate_kernel(NULL, pmd, pte); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 446 | get_page(virt_to_page(pmd)); |
Marc Zyngier | c62ee2b | 2012-10-15 11:27:37 +0100 | [diff] [blame] | 447 | } |
| 448 | |
| 449 | pte = pte_offset_kernel(pmd, addr); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 450 | |
| 451 | if (iomap && pte_present(*pte)) |
| 452 | return -EFAULT; |
| 453 | |
| 454 | /* Create 2nd stage page table mapping - Level 3 */ |
| 455 | old_pte = *pte; |
| 456 | kvm_set_pte(pte, *new_pte); |
| 457 | if (pte_present(old_pte)) |
Marc Zyngier | 4876276 | 2013-01-28 15:27:00 +0000 | [diff] [blame] | 458 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 459 | else |
| 460 | get_page(virt_to_page(pte)); |
| 461 | |
| 462 | return 0; |
| 463 | } |
| 464 | |
| 465 | /** |
| 466 | * kvm_phys_addr_ioremap - map a device range to guest IPA |
| 467 | * |
| 468 | * @kvm: The KVM pointer |
| 469 | * @guest_ipa: The IPA at which to insert the mapping |
| 470 | * @pa: The physical address of the device |
| 471 | * @size: The size of the mapping |
| 472 | */ |
| 473 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
| 474 | phys_addr_t pa, unsigned long size) |
| 475 | { |
| 476 | phys_addr_t addr, end; |
| 477 | int ret = 0; |
| 478 | unsigned long pfn; |
| 479 | struct kvm_mmu_memory_cache cache = { 0, }; |
| 480 | |
| 481 | end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; |
| 482 | pfn = __phys_to_pfn(pa); |
| 483 | |
| 484 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { |
Marc Zyngier | c62ee2b | 2012-10-15 11:27:37 +0100 | [diff] [blame] | 485 | pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); |
| 486 | kvm_set_s2pte_writable(&pte); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 487 | |
| 488 | ret = mmu_topup_memory_cache(&cache, 2, 2); |
| 489 | if (ret) |
| 490 | goto out; |
| 491 | spin_lock(&kvm->mmu_lock); |
| 492 | ret = stage2_set_pte(kvm, &cache, addr, &pte, true); |
| 493 | spin_unlock(&kvm->mmu_lock); |
| 494 | if (ret) |
| 495 | goto out; |
| 496 | |
| 497 | pfn++; |
| 498 | } |
| 499 | |
| 500 | out: |
| 501 | mmu_free_memory_cache(&cache); |
| 502 | return ret; |
| 503 | } |
| 504 | |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 505 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
| 506 | gfn_t gfn, struct kvm_memory_slot *memslot, |
| 507 | unsigned long fault_status) |
| 508 | { |
| 509 | pte_t new_pte; |
| 510 | pfn_t pfn; |
| 511 | int ret; |
| 512 | bool write_fault, writable; |
| 513 | unsigned long mmu_seq; |
| 514 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
| 515 | |
Marc Zyngier | 7393b59 | 2012-09-17 19:27:09 +0100 | [diff] [blame] | 516 | write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 517 | if (fault_status == FSC_PERM && !write_fault) { |
| 518 | kvm_err("Unexpected L2 read permission error\n"); |
| 519 | return -EFAULT; |
| 520 | } |
| 521 | |
| 522 | /* We need minimum second+third level pages */ |
| 523 | ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS); |
| 524 | if (ret) |
| 525 | return ret; |
| 526 | |
| 527 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
| 528 | /* |
| 529 | * Ensure the read of mmu_notifier_seq happens before we call |
| 530 | * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk |
| 531 | * the page we just got a reference to gets unmapped before we have a |
| 532 | * chance to grab the mmu_lock, which ensure that if the page gets |
| 533 | * unmapped afterwards, the call to kvm_unmap_hva will take it away |
| 534 | * from us again properly. This smp_rmb() interacts with the smp_wmb() |
| 535 | * in kvm_mmu_notifier_invalidate_<page|range_end>. |
| 536 | */ |
| 537 | smp_rmb(); |
| 538 | |
| 539 | pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable); |
| 540 | if (is_error_pfn(pfn)) |
| 541 | return -EFAULT; |
| 542 | |
| 543 | new_pte = pfn_pte(pfn, PAGE_S2); |
| 544 | coherent_icache_guest_page(vcpu->kvm, gfn); |
| 545 | |
| 546 | spin_lock(&vcpu->kvm->mmu_lock); |
| 547 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) |
| 548 | goto out_unlock; |
| 549 | if (writable) { |
Marc Zyngier | c62ee2b | 2012-10-15 11:27:37 +0100 | [diff] [blame] | 550 | kvm_set_s2pte_writable(&new_pte); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 551 | kvm_set_pfn_dirty(pfn); |
| 552 | } |
| 553 | stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); |
| 554 | |
| 555 | out_unlock: |
| 556 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 557 | kvm_release_pfn_clean(pfn); |
| 558 | return 0; |
| 559 | } |
| 560 | |
| 561 | /** |
| 562 | * kvm_handle_guest_abort - handles all 2nd stage aborts |
| 563 | * @vcpu: the VCPU pointer |
| 564 | * @run: the kvm_run structure |
| 565 | * |
| 566 | * Any abort that gets to the host is almost guaranteed to be caused by a |
| 567 | * missing second stage translation table entry, which can mean that either the |
| 568 | * guest simply needs more memory and we must allocate an appropriate page or it |
| 569 | * can mean that the guest tried to access I/O memory, which is emulated by user |
| 570 | * space. The distinction is based on the IPA causing the fault and whether this |
| 571 | * memory region has been registered as standard RAM by user space. |
| 572 | */ |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 573 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) |
| 574 | { |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 575 | unsigned long fault_status; |
| 576 | phys_addr_t fault_ipa; |
| 577 | struct kvm_memory_slot *memslot; |
| 578 | bool is_iabt; |
| 579 | gfn_t gfn; |
| 580 | int ret, idx; |
| 581 | |
Marc Zyngier | 52d1dba | 2012-10-15 10:33:38 +0100 | [diff] [blame] | 582 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
Marc Zyngier | 7393b59 | 2012-09-17 19:27:09 +0100 | [diff] [blame] | 583 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 584 | |
Marc Zyngier | 7393b59 | 2012-09-17 19:27:09 +0100 | [diff] [blame] | 585 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), |
| 586 | kvm_vcpu_get_hfar(vcpu), fault_ipa); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 587 | |
| 588 | /* Check the stage-2 fault is trans. fault or write fault */ |
Marc Zyngier | 1cc287d | 2012-09-18 14:14:35 +0100 | [diff] [blame] | 589 | fault_status = kvm_vcpu_trap_get_fault(vcpu); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 590 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { |
Marc Zyngier | 52d1dba | 2012-10-15 10:33:38 +0100 | [diff] [blame] | 591 | kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n", |
| 592 | kvm_vcpu_trap_get_class(vcpu), fault_status); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 593 | return -EFAULT; |
| 594 | } |
| 595 | |
| 596 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 597 | |
| 598 | gfn = fault_ipa >> PAGE_SHIFT; |
| 599 | if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { |
| 600 | if (is_iabt) { |
| 601 | /* Prefetch Abort on I/O address */ |
Marc Zyngier | 7393b59 | 2012-09-17 19:27:09 +0100 | [diff] [blame] | 602 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 603 | ret = 1; |
| 604 | goto out_unlock; |
| 605 | } |
| 606 | |
| 607 | if (fault_status != FSC_FAULT) { |
| 608 | kvm_err("Unsupported fault status on io memory: %#lx\n", |
| 609 | fault_status); |
| 610 | ret = -EFAULT; |
| 611 | goto out_unlock; |
| 612 | } |
| 613 | |
Marc Zyngier | cfe3950 | 2012-12-12 14:42:09 +0000 | [diff] [blame] | 614 | /* |
| 615 | * The IPA is reported as [MAX:12], so we need to |
| 616 | * complement it with the bottom 12 bits from the |
| 617 | * faulting VA. This is always 12 bits, irrespective |
| 618 | * of the page size. |
| 619 | */ |
| 620 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); |
Christoffer Dall | 45e96ea | 2013-01-20 18:43:58 -0500 | [diff] [blame] | 621 | ret = io_mem_abort(vcpu, run, fault_ipa); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 622 | goto out_unlock; |
| 623 | } |
| 624 | |
| 625 | memslot = gfn_to_memslot(vcpu->kvm, gfn); |
Christoffer Dall | 94f8e64 | 2013-01-20 18:28:12 -0500 | [diff] [blame] | 626 | |
| 627 | ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status); |
| 628 | if (ret == 0) |
| 629 | ret = 1; |
| 630 | out_unlock: |
| 631 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
| 632 | return ret; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 633 | } |
| 634 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 635 | static void handle_hva_to_gpa(struct kvm *kvm, |
| 636 | unsigned long start, |
| 637 | unsigned long end, |
| 638 | void (*handler)(struct kvm *kvm, |
| 639 | gpa_t gpa, void *data), |
| 640 | void *data) |
| 641 | { |
| 642 | struct kvm_memslots *slots; |
| 643 | struct kvm_memory_slot *memslot; |
| 644 | |
| 645 | slots = kvm_memslots(kvm); |
| 646 | |
| 647 | /* we only care about the pages that the guest sees */ |
| 648 | kvm_for_each_memslot(memslot, slots) { |
| 649 | unsigned long hva_start, hva_end; |
| 650 | gfn_t gfn, gfn_end; |
| 651 | |
| 652 | hva_start = max(start, memslot->userspace_addr); |
| 653 | hva_end = min(end, memslot->userspace_addr + |
| 654 | (memslot->npages << PAGE_SHIFT)); |
| 655 | if (hva_start >= hva_end) |
| 656 | continue; |
| 657 | |
| 658 | /* |
| 659 | * {gfn(page) | page intersects with [hva_start, hva_end)} = |
| 660 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. |
| 661 | */ |
| 662 | gfn = hva_to_gfn_memslot(hva_start, memslot); |
| 663 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); |
| 664 | |
| 665 | for (; gfn < gfn_end; ++gfn) { |
| 666 | gpa_t gpa = gfn << PAGE_SHIFT; |
| 667 | handler(kvm, gpa, data); |
| 668 | } |
| 669 | } |
| 670 | } |
| 671 | |
| 672 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) |
| 673 | { |
| 674 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); |
Marc Zyngier | 4876276 | 2013-01-28 15:27:00 +0000 | [diff] [blame] | 675 | kvm_tlb_flush_vmid_ipa(kvm, gpa); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 676 | } |
| 677 | |
| 678 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
| 679 | { |
| 680 | unsigned long end = hva + PAGE_SIZE; |
| 681 | |
| 682 | if (!kvm->arch.pgd) |
| 683 | return 0; |
| 684 | |
| 685 | trace_kvm_unmap_hva(hva); |
| 686 | handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); |
| 687 | return 0; |
| 688 | } |
| 689 | |
| 690 | int kvm_unmap_hva_range(struct kvm *kvm, |
| 691 | unsigned long start, unsigned long end) |
| 692 | { |
| 693 | if (!kvm->arch.pgd) |
| 694 | return 0; |
| 695 | |
| 696 | trace_kvm_unmap_hva_range(start, end); |
| 697 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); |
| 698 | return 0; |
| 699 | } |
| 700 | |
| 701 | static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) |
| 702 | { |
| 703 | pte_t *pte = (pte_t *)data; |
| 704 | |
| 705 | stage2_set_pte(kvm, NULL, gpa, pte, false); |
| 706 | } |
| 707 | |
| 708 | |
| 709 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
| 710 | { |
| 711 | unsigned long end = hva + PAGE_SIZE; |
| 712 | pte_t stage2_pte; |
| 713 | |
| 714 | if (!kvm->arch.pgd) |
| 715 | return; |
| 716 | |
| 717 | trace_kvm_set_spte_hva(hva); |
| 718 | stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); |
| 719 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); |
| 720 | } |
| 721 | |
| 722 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
| 723 | { |
| 724 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); |
| 725 | } |
| 726 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 727 | phys_addr_t kvm_mmu_get_httbr(void) |
| 728 | { |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 729 | return virt_to_phys(hyp_pgd); |
| 730 | } |
| 731 | |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 732 | phys_addr_t kvm_mmu_get_boot_httbr(void) |
| 733 | { |
| 734 | return virt_to_phys(boot_hyp_pgd); |
| 735 | } |
| 736 | |
| 737 | phys_addr_t kvm_get_idmap_vector(void) |
| 738 | { |
| 739 | return hyp_idmap_vector; |
| 740 | } |
| 741 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 742 | int kvm_mmu_init(void) |
| 743 | { |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 744 | int err; |
| 745 | |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 746 | hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start); |
| 747 | hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end); |
| 748 | hyp_idmap_vector = virt_to_phys(__kvm_hyp_init); |
| 749 | |
| 750 | if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) { |
| 751 | /* |
| 752 | * Our init code is crossing a page boundary. Allocate |
| 753 | * a bounce page, copy the code over and use that. |
| 754 | */ |
| 755 | size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; |
| 756 | phys_addr_t phys_base; |
| 757 | |
| 758 | init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL); |
| 759 | if (!init_bounce_page) { |
| 760 | kvm_err("Couldn't allocate HYP init bounce page\n"); |
| 761 | err = -ENOMEM; |
| 762 | goto out; |
| 763 | } |
| 764 | |
| 765 | memcpy(init_bounce_page, __hyp_idmap_text_start, len); |
| 766 | /* |
| 767 | * Warning: the code we just copied to the bounce page |
| 768 | * must be flushed to the point of coherency. |
| 769 | * Otherwise, the data may be sitting in L2, and HYP |
| 770 | * mode won't be able to observe it as it runs with |
| 771 | * caches off at that point. |
| 772 | */ |
| 773 | kvm_flush_dcache_to_poc(init_bounce_page, len); |
| 774 | |
| 775 | phys_base = virt_to_phys(init_bounce_page); |
| 776 | hyp_idmap_vector += phys_base - hyp_idmap_start; |
| 777 | hyp_idmap_start = phys_base; |
| 778 | hyp_idmap_end = phys_base + len; |
| 779 | |
| 780 | kvm_info("Using HYP init bounce page @%lx\n", |
| 781 | (unsigned long)phys_base); |
| 782 | } |
| 783 | |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 784 | hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 785 | boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); |
| 786 | if (!hyp_pgd || !boot_hyp_pgd) { |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 787 | kvm_err("Hyp mode PGD not allocated\n"); |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 788 | err = -ENOMEM; |
| 789 | goto out; |
| 790 | } |
| 791 | |
| 792 | /* Create the idmap in the boot page tables */ |
| 793 | err = __create_hyp_mappings(boot_hyp_pgd, |
| 794 | hyp_idmap_start, hyp_idmap_end, |
| 795 | __phys_to_pfn(hyp_idmap_start), |
| 796 | PAGE_HYP); |
| 797 | |
| 798 | if (err) { |
| 799 | kvm_err("Failed to idmap %lx-%lx\n", |
| 800 | hyp_idmap_start, hyp_idmap_end); |
| 801 | goto out; |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 802 | } |
| 803 | |
Marc Zyngier | 5a677ce | 2013-04-12 19:12:06 +0100 | [diff] [blame] | 804 | /* Map the very same page at the trampoline VA */ |
| 805 | err = __create_hyp_mappings(boot_hyp_pgd, |
| 806 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, |
| 807 | __phys_to_pfn(hyp_idmap_start), |
| 808 | PAGE_HYP); |
| 809 | if (err) { |
| 810 | kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n", |
| 811 | TRAMPOLINE_VA); |
| 812 | goto out; |
| 813 | } |
| 814 | |
| 815 | /* Map the same page again into the runtime page tables */ |
| 816 | err = __create_hyp_mappings(hyp_pgd, |
| 817 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, |
| 818 | __phys_to_pfn(hyp_idmap_start), |
| 819 | PAGE_HYP); |
| 820 | if (err) { |
| 821 | kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n", |
| 822 | TRAMPOLINE_VA); |
| 823 | goto out; |
| 824 | } |
| 825 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 826 | return 0; |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 827 | out: |
Marc Zyngier | 4f72827 | 2013-04-12 19:12:05 +0100 | [diff] [blame] | 828 | free_hyp_pgds(); |
Marc Zyngier | 2fb4105 | 2013-04-12 19:12:03 +0100 | [diff] [blame] | 829 | return err; |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 830 | } |