blob: 371958370de445fbe0cc200e772fa1e9426fb327 [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -050018
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
Christoffer Dallad361f02012-11-01 17:14:45 +010022#include <linux/hugetlb.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050023#include <trace/events/kvm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050024#include <asm/pgalloc.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050025#include <asm/cacheflush.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050026#include <asm/kvm_arm.h>
27#include <asm/kvm_mmu.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050028#include <asm/kvm_mmio.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050029#include <asm/kvm_asm.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050030#include <asm/kvm_emulate.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050031
32#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050033
34extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
35
Marc Zyngier5a677ce2013-04-12 19:12:06 +010036static pgd_t *boot_hyp_pgd;
Marc Zyngier2fb41052013-04-12 19:12:03 +010037static pgd_t *hyp_pgd;
Christoffer Dall342cd0a2013-01-20 18:28:06 -050038static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39
Marc Zyngier5a677ce2013-04-12 19:12:06 +010040static void *init_bounce_page;
41static unsigned long hyp_idmap_start;
42static unsigned long hyp_idmap_end;
43static phys_addr_t hyp_idmap_vector;
44
Christoffer Dall9b5fdb92013-10-02 15:32:01 -070045#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
Christoffer Dallad361f02012-11-01 17:14:45 +010046
Marc Zyngier48762762013-01-28 15:27:00 +000047static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
Christoffer Dalld5d81842013-01-20 18:28:07 -050048{
Marc Zyngierd4cb9df52013-05-14 12:11:34 +010049 /*
50 * This function also gets called when dealing with HYP page
51 * tables. As HYP doesn't have an associated struct kvm (and
52 * the HYP page tables are fairly static), we don't do
53 * anything there.
54 */
55 if (kvm)
56 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
Christoffer Dalld5d81842013-01-20 18:28:07 -050057}
58
Christoffer Dalld5d81842013-01-20 18:28:07 -050059static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
60 int min, int max)
61{
62 void *page;
63
64 BUG_ON(max > KVM_NR_MEM_OBJS);
65 if (cache->nobjs >= min)
66 return 0;
67 while (cache->nobjs < max) {
68 page = (void *)__get_free_page(PGALLOC_GFP);
69 if (!page)
70 return -ENOMEM;
71 cache->objects[cache->nobjs++] = page;
72 }
73 return 0;
74}
75
76static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
77{
78 while (mc->nobjs)
79 free_page((unsigned long)mc->objects[--mc->nobjs]);
80}
81
82static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
83{
84 void *p;
85
86 BUG_ON(!mc || !mc->nobjs);
87 p = mc->objects[--mc->nobjs];
88 return p;
89}
90
Marc Zyngier979acd52013-08-06 13:05:48 +010091static bool page_empty(void *ptr)
92{
93 struct page *ptr_page = virt_to_page(ptr);
94 return page_count(ptr_page) == 1;
95}
96
Marc Zyngierd4cb9df52013-05-14 12:11:34 +010097static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -050098{
Christoffer Dallad361f02012-11-01 17:14:45 +010099 if (pud_huge(*pud)) {
100 pud_clear(pud);
101 kvm_tlb_flush_vmid_ipa(kvm, addr);
102 } else {
103 pmd_t *pmd_table = pmd_offset(pud, 0);
104 pud_clear(pud);
105 kvm_tlb_flush_vmid_ipa(kvm, addr);
106 pmd_free(NULL, pmd_table);
107 }
Marc Zyngier4f728272013-04-12 19:12:05 +0100108 put_page(virt_to_page(pud));
109}
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500110
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100111static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
Marc Zyngier4f728272013-04-12 19:12:05 +0100112{
Christoffer Dallad361f02012-11-01 17:14:45 +0100113 if (kvm_pmd_huge(*pmd)) {
114 pmd_clear(pmd);
115 kvm_tlb_flush_vmid_ipa(kvm, addr);
116 } else {
117 pte_t *pte_table = pte_offset_kernel(pmd, 0);
118 pmd_clear(pmd);
119 kvm_tlb_flush_vmid_ipa(kvm, addr);
120 pte_free_kernel(NULL, pte_table);
121 }
Marc Zyngier4f728272013-04-12 19:12:05 +0100122 put_page(virt_to_page(pmd));
123}
124
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100125static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
Marc Zyngier4f728272013-04-12 19:12:05 +0100126{
127 if (pte_present(*pte)) {
128 kvm_set_pte(pte, __pte(0));
129 put_page(virt_to_page(pte));
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100130 kvm_tlb_flush_vmid_ipa(kvm, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500131 }
132}
133
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100134static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
135 unsigned long long start, u64 size)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500136{
137 pgd_t *pgd;
138 pud_t *pud;
139 pmd_t *pmd;
Marc Zyngier4f728272013-04-12 19:12:05 +0100140 pte_t *pte;
141 unsigned long long addr = start, end = start + size;
Christoffer Dalld3840b22013-08-06 13:50:54 -0700142 u64 next;
Marc Zyngier000d3992013-03-05 02:43:17 +0000143
Marc Zyngier4f728272013-04-12 19:12:05 +0100144 while (addr < end) {
145 pgd = pgdp + pgd_index(addr);
146 pud = pud_offset(pgd, addr);
147 if (pud_none(*pud)) {
Christoffer Dalld3840b22013-08-06 13:50:54 -0700148 addr = pud_addr_end(addr, end);
Marc Zyngier4f728272013-04-12 19:12:05 +0100149 continue;
150 }
Marc Zyngier000d3992013-03-05 02:43:17 +0000151
Christoffer Dallad361f02012-11-01 17:14:45 +0100152 if (pud_huge(*pud)) {
153 /*
154 * If we are dealing with a huge pud, just clear it and
155 * move on.
156 */
157 clear_pud_entry(kvm, pud, addr);
158 addr = pud_addr_end(addr, end);
159 continue;
160 }
161
Marc Zyngier4f728272013-04-12 19:12:05 +0100162 pmd = pmd_offset(pud, addr);
163 if (pmd_none(*pmd)) {
Christoffer Dalld3840b22013-08-06 13:50:54 -0700164 addr = pmd_addr_end(addr, end);
Marc Zyngier4f728272013-04-12 19:12:05 +0100165 continue;
166 }
Marc Zyngier000d3992013-03-05 02:43:17 +0000167
Christoffer Dallad361f02012-11-01 17:14:45 +0100168 if (!kvm_pmd_huge(*pmd)) {
169 pte = pte_offset_kernel(pmd, addr);
170 clear_pte_entry(kvm, pte, addr);
171 next = addr + PAGE_SIZE;
172 }
Marc Zyngier4f728272013-04-12 19:12:05 +0100173
Christoffer Dallad361f02012-11-01 17:14:45 +0100174 /*
175 * If the pmd entry is to be cleared, walk back up the ladder
176 */
177 if (kvm_pmd_huge(*pmd) || page_empty(pte)) {
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100178 clear_pmd_entry(kvm, pmd, addr);
Christoffer Dalld3840b22013-08-06 13:50:54 -0700179 next = pmd_addr_end(addr, end);
Marc Zyngier979acd52013-08-06 13:05:48 +0100180 if (page_empty(pmd) && !page_empty(pud)) {
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100181 clear_pud_entry(kvm, pud, addr);
Christoffer Dalld3840b22013-08-06 13:50:54 -0700182 next = pud_addr_end(addr, end);
Marc Zyngier4f728272013-04-12 19:12:05 +0100183 }
184 }
185
Christoffer Dalld3840b22013-08-06 13:50:54 -0700186 addr = next;
Marc Zyngier4f728272013-04-12 19:12:05 +0100187 }
Marc Zyngier000d3992013-03-05 02:43:17 +0000188}
189
190/**
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100191 * free_boot_hyp_pgd - free HYP boot page tables
192 *
193 * Free the HYP boot page tables. The bounce page is also freed.
194 */
195void free_boot_hyp_pgd(void)
196{
197 mutex_lock(&kvm_hyp_pgd_mutex);
198
199 if (boot_hyp_pgd) {
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100200 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
201 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100202 kfree(boot_hyp_pgd);
203 boot_hyp_pgd = NULL;
204 }
205
206 if (hyp_pgd)
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100207 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100208
209 kfree(init_bounce_page);
210 init_bounce_page = NULL;
211
212 mutex_unlock(&kvm_hyp_pgd_mutex);
213}
214
215/**
Marc Zyngier4f728272013-04-12 19:12:05 +0100216 * free_hyp_pgds - free Hyp-mode page tables
Marc Zyngier000d3992013-03-05 02:43:17 +0000217 *
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100218 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
219 * therefore contains either mappings in the kernel memory area (above
220 * PAGE_OFFSET), or device mappings in the vmalloc range (from
221 * VMALLOC_START to VMALLOC_END).
222 *
223 * boot_hyp_pgd should only map two pages for the init code.
Marc Zyngier000d3992013-03-05 02:43:17 +0000224 */
Marc Zyngier4f728272013-04-12 19:12:05 +0100225void free_hyp_pgds(void)
Marc Zyngier000d3992013-03-05 02:43:17 +0000226{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500227 unsigned long addr;
228
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100229 free_boot_hyp_pgd();
Marc Zyngier4f728272013-04-12 19:12:05 +0100230
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100231 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100232
Marc Zyngier4f728272013-04-12 19:12:05 +0100233 if (hyp_pgd) {
234 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100235 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
Marc Zyngier4f728272013-04-12 19:12:05 +0100236 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100237 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
238
Marc Zyngier4f728272013-04-12 19:12:05 +0100239 kfree(hyp_pgd);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100240 hyp_pgd = NULL;
Marc Zyngier4f728272013-04-12 19:12:05 +0100241 }
242
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500243 mutex_unlock(&kvm_hyp_pgd_mutex);
244}
245
246static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100247 unsigned long end, unsigned long pfn,
248 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500249{
250 pte_t *pte;
251 unsigned long addr;
252
Marc Zyngier3562c762013-04-12 19:12:02 +0100253 addr = start;
254 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100255 pte = pte_offset_kernel(pmd, addr);
256 kvm_set_pte(pte, pfn_pte(pfn, prot));
Marc Zyngier4f728272013-04-12 19:12:05 +0100257 get_page(virt_to_page(pte));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100258 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
Marc Zyngier6060df82013-04-12 19:12:01 +0100259 pfn++;
Marc Zyngier3562c762013-04-12 19:12:02 +0100260 } while (addr += PAGE_SIZE, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500261}
262
263static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100264 unsigned long end, unsigned long pfn,
265 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500266{
267 pmd_t *pmd;
268 pte_t *pte;
269 unsigned long addr, next;
270
Marc Zyngier3562c762013-04-12 19:12:02 +0100271 addr = start;
272 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100273 pmd = pmd_offset(pud, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500274
275 BUG_ON(pmd_sect(*pmd));
276
277 if (pmd_none(*pmd)) {
Marc Zyngier6060df82013-04-12 19:12:01 +0100278 pte = pte_alloc_one_kernel(NULL, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500279 if (!pte) {
280 kvm_err("Cannot allocate Hyp pte\n");
281 return -ENOMEM;
282 }
283 pmd_populate_kernel(NULL, pmd, pte);
Marc Zyngier4f728272013-04-12 19:12:05 +0100284 get_page(virt_to_page(pmd));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100285 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500286 }
287
288 next = pmd_addr_end(addr, end);
289
Marc Zyngier6060df82013-04-12 19:12:01 +0100290 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
291 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100292 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500293
294 return 0;
295}
296
Marc Zyngier6060df82013-04-12 19:12:01 +0100297static int __create_hyp_mappings(pgd_t *pgdp,
298 unsigned long start, unsigned long end,
299 unsigned long pfn, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500300{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500301 pgd_t *pgd;
302 pud_t *pud;
303 pmd_t *pmd;
304 unsigned long addr, next;
305 int err = 0;
306
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500307 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier3562c762013-04-12 19:12:02 +0100308 addr = start & PAGE_MASK;
309 end = PAGE_ALIGN(end);
310 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100311 pgd = pgdp + pgd_index(addr);
312 pud = pud_offset(pgd, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500313
314 if (pud_none_or_clear_bad(pud)) {
Marc Zyngier6060df82013-04-12 19:12:01 +0100315 pmd = pmd_alloc_one(NULL, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500316 if (!pmd) {
317 kvm_err("Cannot allocate Hyp pmd\n");
318 err = -ENOMEM;
319 goto out;
320 }
321 pud_populate(NULL, pud, pmd);
Marc Zyngier4f728272013-04-12 19:12:05 +0100322 get_page(virt_to_page(pud));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100323 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500324 }
325
326 next = pgd_addr_end(addr, end);
Marc Zyngier6060df82013-04-12 19:12:01 +0100327 err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500328 if (err)
329 goto out;
Marc Zyngier6060df82013-04-12 19:12:01 +0100330 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100331 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500332out:
333 mutex_unlock(&kvm_hyp_pgd_mutex);
334 return err;
335}
336
337/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100338 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500339 * @from: The virtual kernel start address of the range
340 * @to: The virtual kernel end address of the range (exclusive)
341 *
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100342 * The same virtual address as the kernel virtual address is also used
343 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
344 * physical pages.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500345 */
346int create_hyp_mappings(void *from, void *to)
347{
Marc Zyngier6060df82013-04-12 19:12:01 +0100348 unsigned long phys_addr = virt_to_phys(from);
349 unsigned long start = KERN_TO_HYP((unsigned long)from);
350 unsigned long end = KERN_TO_HYP((unsigned long)to);
351
352 /* Check for a valid kernel memory mapping */
353 if (!virt_addr_valid(from) || !virt_addr_valid(to - 1))
354 return -EINVAL;
355
356 return __create_hyp_mappings(hyp_pgd, start, end,
357 __phys_to_pfn(phys_addr), PAGE_HYP);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500358}
359
360/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100361 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
362 * @from: The kernel start VA of the range
363 * @to: The kernel end VA of the range (exclusive)
Marc Zyngier6060df82013-04-12 19:12:01 +0100364 * @phys_addr: The physical start address which gets mapped
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100365 *
366 * The resulting HYP VA is the same as the kernel VA, modulo
367 * HYP_PAGE_OFFSET.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500368 */
Marc Zyngier6060df82013-04-12 19:12:01 +0100369int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500370{
Marc Zyngier6060df82013-04-12 19:12:01 +0100371 unsigned long start = KERN_TO_HYP((unsigned long)from);
372 unsigned long end = KERN_TO_HYP((unsigned long)to);
373
374 /* Check for a valid kernel IO mapping */
375 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
376 return -EINVAL;
377
378 return __create_hyp_mappings(hyp_pgd, start, end,
379 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500380}
381
Christoffer Dalld5d81842013-01-20 18:28:07 -0500382/**
383 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
384 * @kvm: The KVM struct pointer for the VM.
385 *
386 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
387 * support either full 40-bit input addresses or limited to 32-bit input
388 * addresses). Clears the allocated pages.
389 *
390 * Note we don't need locking here as this is only called when the VM is
391 * created, which can only be done once.
392 */
393int kvm_alloc_stage2_pgd(struct kvm *kvm)
394{
395 pgd_t *pgd;
396
397 if (kvm->arch.pgd != NULL) {
398 kvm_err("kvm_arch already initialized?\n");
399 return -EINVAL;
400 }
401
402 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
403 if (!pgd)
404 return -ENOMEM;
405
Christoffer Dalld5d81842013-01-20 18:28:07 -0500406 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100407 kvm_clean_pgd(pgd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500408 kvm->arch.pgd = pgd;
409
410 return 0;
411}
412
Christoffer Dalld5d81842013-01-20 18:28:07 -0500413/**
414 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
415 * @kvm: The VM pointer
416 * @start: The intermediate physical base address of the range to unmap
417 * @size: The size of the area to unmap
418 *
419 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
420 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
421 * destroying the VM), otherwise another faulting VCPU may come in and mess
422 * with things behind our backs.
423 */
424static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
425{
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100426 unmap_range(kvm, kvm->arch.pgd, start, size);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500427}
428
429/**
430 * kvm_free_stage2_pgd - free all stage-2 tables
431 * @kvm: The KVM struct pointer for the VM.
432 *
433 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
434 * underlying level-2 and level-3 tables before freeing the actual level-1 table
435 * and setting the struct pointer to NULL.
436 *
437 * Note we don't need locking here as this is only called when the VM is
438 * destroyed, which can only be done once.
439 */
440void kvm_free_stage2_pgd(struct kvm *kvm)
441{
442 if (kvm->arch.pgd == NULL)
443 return;
444
445 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
446 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
447 kvm->arch.pgd = NULL;
448}
449
Christoffer Dallad361f02012-11-01 17:14:45 +0100450static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
451 phys_addr_t addr)
Christoffer Dalld5d81842013-01-20 18:28:07 -0500452{
453 pgd_t *pgd;
454 pud_t *pud;
455 pmd_t *pmd;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500456
Christoffer Dalld5d81842013-01-20 18:28:07 -0500457 pgd = kvm->arch.pgd + pgd_index(addr);
458 pud = pud_offset(pgd, addr);
459 if (pud_none(*pud)) {
460 if (!cache)
Christoffer Dallad361f02012-11-01 17:14:45 +0100461 return NULL;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500462 pmd = mmu_memory_cache_alloc(cache);
463 pud_populate(NULL, pud, pmd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500464 get_page(virt_to_page(pud));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100465 }
466
Christoffer Dallad361f02012-11-01 17:14:45 +0100467 return pmd_offset(pud, addr);
468}
Christoffer Dalld5d81842013-01-20 18:28:07 -0500469
Christoffer Dallad361f02012-11-01 17:14:45 +0100470static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
471 *cache, phys_addr_t addr, const pmd_t *new_pmd)
472{
473 pmd_t *pmd, old_pmd;
474
475 pmd = stage2_get_pmd(kvm, cache, addr);
476 VM_BUG_ON(!pmd);
477
478 /*
479 * Mapping in huge pages should only happen through a fault. If a
480 * page is merged into a transparent huge page, the individual
481 * subpages of that huge page should be unmapped through MMU
482 * notifiers before we get here.
483 *
484 * Merging of CompoundPages is not supported; they should become
485 * splitting first, unmapped, merged, and mapped back in on-demand.
486 */
487 VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
488
489 old_pmd = *pmd;
490 kvm_set_pmd(pmd, *new_pmd);
491 if (pmd_present(old_pmd))
492 kvm_tlb_flush_vmid_ipa(kvm, addr);
493 else
494 get_page(virt_to_page(pmd));
495 return 0;
496}
497
498static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
499 phys_addr_t addr, const pte_t *new_pte, bool iomap)
500{
501 pmd_t *pmd;
502 pte_t *pte, old_pte;
503
504 /* Create stage-2 page table mapping - Level 1 */
505 pmd = stage2_get_pmd(kvm, cache, addr);
506 if (!pmd) {
507 /*
508 * Ignore calls from kvm_set_spte_hva for unallocated
509 * address ranges.
510 */
511 return 0;
512 }
513
514 /* Create stage-2 page mappings - Level 2 */
Christoffer Dalld5d81842013-01-20 18:28:07 -0500515 if (pmd_none(*pmd)) {
516 if (!cache)
517 return 0; /* ignore calls from kvm_set_spte_hva */
518 pte = mmu_memory_cache_alloc(cache);
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100519 kvm_clean_pte(pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500520 pmd_populate_kernel(NULL, pmd, pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500521 get_page(virt_to_page(pmd));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100522 }
523
524 pte = pte_offset_kernel(pmd, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500525
526 if (iomap && pte_present(*pte))
527 return -EFAULT;
528
529 /* Create 2nd stage page table mapping - Level 3 */
530 old_pte = *pte;
531 kvm_set_pte(pte, *new_pte);
532 if (pte_present(old_pte))
Marc Zyngier48762762013-01-28 15:27:00 +0000533 kvm_tlb_flush_vmid_ipa(kvm, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500534 else
535 get_page(virt_to_page(pte));
536
537 return 0;
538}
539
540/**
541 * kvm_phys_addr_ioremap - map a device range to guest IPA
542 *
543 * @kvm: The KVM pointer
544 * @guest_ipa: The IPA at which to insert the mapping
545 * @pa: The physical address of the device
546 * @size: The size of the mapping
547 */
548int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
549 phys_addr_t pa, unsigned long size)
550{
551 phys_addr_t addr, end;
552 int ret = 0;
553 unsigned long pfn;
554 struct kvm_mmu_memory_cache cache = { 0, };
555
556 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
557 pfn = __phys_to_pfn(pa);
558
559 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100560 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500561
562 ret = mmu_topup_memory_cache(&cache, 2, 2);
563 if (ret)
564 goto out;
565 spin_lock(&kvm->mmu_lock);
566 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
567 spin_unlock(&kvm->mmu_lock);
568 if (ret)
569 goto out;
570
571 pfn++;
572 }
573
574out:
575 mmu_free_memory_cache(&cache);
576 return ret;
577}
578
Christoffer Dall9b5fdb92013-10-02 15:32:01 -0700579static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
580{
581 pfn_t pfn = *pfnp;
582 gfn_t gfn = *ipap >> PAGE_SHIFT;
583
584 if (PageTransCompound(pfn_to_page(pfn))) {
585 unsigned long mask;
586 /*
587 * The address we faulted on is backed by a transparent huge
588 * page. However, because we map the compound huge page and
589 * not the individual tail page, we need to transfer the
590 * refcount to the head page. We have to be careful that the
591 * THP doesn't start to split while we are adjusting the
592 * refcounts.
593 *
594 * We are sure this doesn't happen, because mmu_notifier_retry
595 * was successful and we are holding the mmu_lock, so if this
596 * THP is trying to split, it will be blocked in the mmu
597 * notifier before touching any of the pages, specifically
598 * before being able to call __split_huge_page_refcount().
599 *
600 * We can therefore safely transfer the refcount from PG_tail
601 * to PG_head and switch the pfn from a tail page to the head
602 * page accordingly.
603 */
604 mask = PTRS_PER_PMD - 1;
605 VM_BUG_ON((gfn & mask) != (pfn & mask));
606 if (pfn & mask) {
607 *ipap &= PMD_MASK;
608 kvm_release_pfn_clean(pfn);
609 pfn &= ~mask;
610 kvm_get_pfn(pfn);
611 *pfnp = pfn;
612 }
613
614 return true;
615 }
616
617 return false;
618}
619
Christoffer Dall94f8e642013-01-20 18:28:12 -0500620static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
Christoffer Dallad361f02012-11-01 17:14:45 +0100621 struct kvm_memory_slot *memslot,
Christoffer Dall94f8e642013-01-20 18:28:12 -0500622 unsigned long fault_status)
623{
Christoffer Dall94f8e642013-01-20 18:28:12 -0500624 int ret;
Christoffer Dall9b5fdb92013-10-02 15:32:01 -0700625 bool write_fault, writable, hugetlb = false, force_pte = false;
Christoffer Dall94f8e642013-01-20 18:28:12 -0500626 unsigned long mmu_seq;
Christoffer Dallad361f02012-11-01 17:14:45 +0100627 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
628 unsigned long hva = gfn_to_hva(vcpu->kvm, gfn);
629 struct kvm *kvm = vcpu->kvm;
Christoffer Dall94f8e642013-01-20 18:28:12 -0500630 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
Christoffer Dallad361f02012-11-01 17:14:45 +0100631 struct vm_area_struct *vma;
632 pfn_t pfn;
Christoffer Dall94f8e642013-01-20 18:28:12 -0500633
Marc Zyngier7393b592012-09-17 19:27:09 +0100634 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500635 if (fault_status == FSC_PERM && !write_fault) {
636 kvm_err("Unexpected L2 read permission error\n");
637 return -EFAULT;
638 }
639
Christoffer Dallad361f02012-11-01 17:14:45 +0100640 /* Let's check if we will get back a huge page backed by hugetlbfs */
641 down_read(&current->mm->mmap_sem);
642 vma = find_vma_intersection(current->mm, hva, hva + 1);
643 if (is_vm_hugetlb_page(vma)) {
644 hugetlb = true;
645 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
Christoffer Dall9b5fdb92013-10-02 15:32:01 -0700646 } else {
647 /*
648 * Pages belonging to VMAs not aligned to the PMD mapping
649 * granularity cannot be mapped using block descriptors even
650 * if the pages belong to a THP for the process, because the
651 * stage-2 block descriptor will cover more than a single THP
652 * and we loose atomicity for unmapping, updates, and splits
653 * of the THP or other pages in the stage-2 block range.
654 */
655 if (vma->vm_start & ~PMD_MASK)
656 force_pte = true;
Christoffer Dallad361f02012-11-01 17:14:45 +0100657 }
658 up_read(&current->mm->mmap_sem);
659
Christoffer Dall94f8e642013-01-20 18:28:12 -0500660 /* We need minimum second+third level pages */
661 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
662 if (ret)
663 return ret;
664
665 mmu_seq = vcpu->kvm->mmu_notifier_seq;
666 /*
667 * Ensure the read of mmu_notifier_seq happens before we call
668 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
669 * the page we just got a reference to gets unmapped before we have a
670 * chance to grab the mmu_lock, which ensure that if the page gets
671 * unmapped afterwards, the call to kvm_unmap_hva will take it away
672 * from us again properly. This smp_rmb() interacts with the smp_wmb()
673 * in kvm_mmu_notifier_invalidate_<page|range_end>.
674 */
675 smp_rmb();
676
Christoffer Dallad361f02012-11-01 17:14:45 +0100677 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500678 if (is_error_pfn(pfn))
679 return -EFAULT;
680
Christoffer Dallad361f02012-11-01 17:14:45 +0100681 spin_lock(&kvm->mmu_lock);
682 if (mmu_notifier_retry(kvm, mmu_seq))
Christoffer Dall94f8e642013-01-20 18:28:12 -0500683 goto out_unlock;
Christoffer Dall9b5fdb92013-10-02 15:32:01 -0700684 if (!hugetlb && !force_pte)
685 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
Christoffer Dallad361f02012-11-01 17:14:45 +0100686
687 if (hugetlb) {
688 pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2);
689 new_pmd = pmd_mkhuge(new_pmd);
690 if (writable) {
691 kvm_set_s2pmd_writable(&new_pmd);
692 kvm_set_pfn_dirty(pfn);
693 }
694 coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE);
695 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
696 } else {
697 pte_t new_pte = pfn_pte(pfn, PAGE_S2);
698 if (writable) {
699 kvm_set_s2pte_writable(&new_pte);
700 kvm_set_pfn_dirty(pfn);
701 }
702 coherent_icache_guest_page(kvm, hva, PAGE_SIZE);
703 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500704 }
Christoffer Dallad361f02012-11-01 17:14:45 +0100705
Christoffer Dall94f8e642013-01-20 18:28:12 -0500706
707out_unlock:
Christoffer Dallad361f02012-11-01 17:14:45 +0100708 spin_unlock(&kvm->mmu_lock);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500709 kvm_release_pfn_clean(pfn);
Christoffer Dallad361f02012-11-01 17:14:45 +0100710 return ret;
Christoffer Dall94f8e642013-01-20 18:28:12 -0500711}
712
713/**
714 * kvm_handle_guest_abort - handles all 2nd stage aborts
715 * @vcpu: the VCPU pointer
716 * @run: the kvm_run structure
717 *
718 * Any abort that gets to the host is almost guaranteed to be caused by a
719 * missing second stage translation table entry, which can mean that either the
720 * guest simply needs more memory and we must allocate an appropriate page or it
721 * can mean that the guest tried to access I/O memory, which is emulated by user
722 * space. The distinction is based on the IPA causing the fault and whether this
723 * memory region has been registered as standard RAM by user space.
724 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500725int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
726{
Christoffer Dall94f8e642013-01-20 18:28:12 -0500727 unsigned long fault_status;
728 phys_addr_t fault_ipa;
729 struct kvm_memory_slot *memslot;
730 bool is_iabt;
731 gfn_t gfn;
732 int ret, idx;
733
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100734 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
Marc Zyngier7393b592012-09-17 19:27:09 +0100735 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500736
Marc Zyngier7393b592012-09-17 19:27:09 +0100737 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
738 kvm_vcpu_get_hfar(vcpu), fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500739
740 /* Check the stage-2 fault is trans. fault or write fault */
Marc Zyngier1cc287d2012-09-18 14:14:35 +0100741 fault_status = kvm_vcpu_trap_get_fault(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500742 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
Marc Zyngier52d1dba2012-10-15 10:33:38 +0100743 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
744 kvm_vcpu_trap_get_class(vcpu), fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500745 return -EFAULT;
746 }
747
748 idx = srcu_read_lock(&vcpu->kvm->srcu);
749
750 gfn = fault_ipa >> PAGE_SHIFT;
751 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
752 if (is_iabt) {
753 /* Prefetch Abort on I/O address */
Marc Zyngier7393b592012-09-17 19:27:09 +0100754 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -0500755 ret = 1;
756 goto out_unlock;
757 }
758
759 if (fault_status != FSC_FAULT) {
760 kvm_err("Unsupported fault status on io memory: %#lx\n",
761 fault_status);
762 ret = -EFAULT;
763 goto out_unlock;
764 }
765
Marc Zyngiercfe39502012-12-12 14:42:09 +0000766 /*
767 * The IPA is reported as [MAX:12], so we need to
768 * complement it with the bottom 12 bits from the
769 * faulting VA. This is always 12 bits, irrespective
770 * of the page size.
771 */
772 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500773 ret = io_mem_abort(vcpu, run, fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500774 goto out_unlock;
775 }
776
777 memslot = gfn_to_memslot(vcpu->kvm, gfn);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500778
Christoffer Dallad361f02012-11-01 17:14:45 +0100779 ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -0500780 if (ret == 0)
781 ret = 1;
782out_unlock:
783 srcu_read_unlock(&vcpu->kvm->srcu, idx);
784 return ret;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500785}
786
Christoffer Dalld5d81842013-01-20 18:28:07 -0500787static void handle_hva_to_gpa(struct kvm *kvm,
788 unsigned long start,
789 unsigned long end,
790 void (*handler)(struct kvm *kvm,
791 gpa_t gpa, void *data),
792 void *data)
793{
794 struct kvm_memslots *slots;
795 struct kvm_memory_slot *memslot;
796
797 slots = kvm_memslots(kvm);
798
799 /* we only care about the pages that the guest sees */
800 kvm_for_each_memslot(memslot, slots) {
801 unsigned long hva_start, hva_end;
802 gfn_t gfn, gfn_end;
803
804 hva_start = max(start, memslot->userspace_addr);
805 hva_end = min(end, memslot->userspace_addr +
806 (memslot->npages << PAGE_SHIFT));
807 if (hva_start >= hva_end)
808 continue;
809
810 /*
811 * {gfn(page) | page intersects with [hva_start, hva_end)} =
812 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
813 */
814 gfn = hva_to_gfn_memslot(hva_start, memslot);
815 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
816
817 for (; gfn < gfn_end; ++gfn) {
818 gpa_t gpa = gfn << PAGE_SHIFT;
819 handler(kvm, gpa, data);
820 }
821 }
822}
823
824static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
825{
826 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500827}
828
829int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
830{
831 unsigned long end = hva + PAGE_SIZE;
832
833 if (!kvm->arch.pgd)
834 return 0;
835
836 trace_kvm_unmap_hva(hva);
837 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
838 return 0;
839}
840
841int kvm_unmap_hva_range(struct kvm *kvm,
842 unsigned long start, unsigned long end)
843{
844 if (!kvm->arch.pgd)
845 return 0;
846
847 trace_kvm_unmap_hva_range(start, end);
848 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
849 return 0;
850}
851
852static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
853{
854 pte_t *pte = (pte_t *)data;
855
856 stage2_set_pte(kvm, NULL, gpa, pte, false);
857}
858
859
860void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
861{
862 unsigned long end = hva + PAGE_SIZE;
863 pte_t stage2_pte;
864
865 if (!kvm->arch.pgd)
866 return;
867
868 trace_kvm_set_spte_hva(hva);
869 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
870 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
871}
872
873void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
874{
875 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
876}
877
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500878phys_addr_t kvm_mmu_get_httbr(void)
879{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500880 return virt_to_phys(hyp_pgd);
881}
882
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100883phys_addr_t kvm_mmu_get_boot_httbr(void)
884{
885 return virt_to_phys(boot_hyp_pgd);
886}
887
888phys_addr_t kvm_get_idmap_vector(void)
889{
890 return hyp_idmap_vector;
891}
892
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500893int kvm_mmu_init(void)
894{
Marc Zyngier2fb41052013-04-12 19:12:03 +0100895 int err;
896
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100897 hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start);
898 hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end);
899 hyp_idmap_vector = virt_to_phys(__kvm_hyp_init);
900
901 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
902 /*
903 * Our init code is crossing a page boundary. Allocate
904 * a bounce page, copy the code over and use that.
905 */
906 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
907 phys_addr_t phys_base;
908
909 init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
910 if (!init_bounce_page) {
911 kvm_err("Couldn't allocate HYP init bounce page\n");
912 err = -ENOMEM;
913 goto out;
914 }
915
916 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
917 /*
918 * Warning: the code we just copied to the bounce page
919 * must be flushed to the point of coherency.
920 * Otherwise, the data may be sitting in L2, and HYP
921 * mode won't be able to observe it as it runs with
922 * caches off at that point.
923 */
924 kvm_flush_dcache_to_poc(init_bounce_page, len);
925
926 phys_base = virt_to_phys(init_bounce_page);
927 hyp_idmap_vector += phys_base - hyp_idmap_start;
928 hyp_idmap_start = phys_base;
929 hyp_idmap_end = phys_base + len;
930
931 kvm_info("Using HYP init bounce page @%lx\n",
932 (unsigned long)phys_base);
933 }
934
Marc Zyngier2fb41052013-04-12 19:12:03 +0100935 hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100936 boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
937 if (!hyp_pgd || !boot_hyp_pgd) {
Christoffer Dalld5d81842013-01-20 18:28:07 -0500938 kvm_err("Hyp mode PGD not allocated\n");
Marc Zyngier2fb41052013-04-12 19:12:03 +0100939 err = -ENOMEM;
940 goto out;
941 }
942
943 /* Create the idmap in the boot page tables */
944 err = __create_hyp_mappings(boot_hyp_pgd,
945 hyp_idmap_start, hyp_idmap_end,
946 __phys_to_pfn(hyp_idmap_start),
947 PAGE_HYP);
948
949 if (err) {
950 kvm_err("Failed to idmap %lx-%lx\n",
951 hyp_idmap_start, hyp_idmap_end);
952 goto out;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500953 }
954
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100955 /* Map the very same page at the trampoline VA */
956 err = __create_hyp_mappings(boot_hyp_pgd,
957 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
958 __phys_to_pfn(hyp_idmap_start),
959 PAGE_HYP);
960 if (err) {
961 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
962 TRAMPOLINE_VA);
963 goto out;
964 }
965
966 /* Map the same page again into the runtime page tables */
967 err = __create_hyp_mappings(hyp_pgd,
968 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
969 __phys_to_pfn(hyp_idmap_start),
970 PAGE_HYP);
971 if (err) {
972 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
973 TRAMPOLINE_VA);
974 goto out;
975 }
976
Christoffer Dalld5d81842013-01-20 18:28:07 -0500977 return 0;
Marc Zyngier2fb41052013-04-12 19:12:03 +0100978out:
Marc Zyngier4f728272013-04-12 19:12:05 +0100979 free_hyp_pgds();
Marc Zyngier2fb41052013-04-12 19:12:03 +0100980 return err;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500981}