blob: 4347d68f052f2792e31caaf626f40c3cb061ee70 [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -050018
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
22#include <asm/idmap.h>
23#include <asm/pgalloc.h>
24#include <asm/kvm_arm.h>
25#include <asm/kvm_mmu.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050026#include <asm/kvm_asm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050027#include <asm/mach/map.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050028#include <trace/events/kvm.h>
29
30#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050031
32extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
33
34static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
35
Christoffer Dalld5d81842013-01-20 18:28:07 -050036static void kvm_tlb_flush_vmid(struct kvm *kvm)
37{
38 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
39}
40
Christoffer Dall342cd0a2013-01-20 18:28:06 -050041static void kvm_set_pte(pte_t *pte, pte_t new_pte)
42{
43 pte_val(*pte) = new_pte;
44 /*
45 * flush_pmd_entry just takes a void pointer and cleans the necessary
46 * cache entries, so we can reuse the function for ptes.
47 */
48 flush_pmd_entry(pte);
49}
50
Christoffer Dalld5d81842013-01-20 18:28:07 -050051static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
52 int min, int max)
53{
54 void *page;
55
56 BUG_ON(max > KVM_NR_MEM_OBJS);
57 if (cache->nobjs >= min)
58 return 0;
59 while (cache->nobjs < max) {
60 page = (void *)__get_free_page(PGALLOC_GFP);
61 if (!page)
62 return -ENOMEM;
63 cache->objects[cache->nobjs++] = page;
64 }
65 return 0;
66}
67
68static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
69{
70 while (mc->nobjs)
71 free_page((unsigned long)mc->objects[--mc->nobjs]);
72}
73
74static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
75{
76 void *p;
77
78 BUG_ON(!mc || !mc->nobjs);
79 p = mc->objects[--mc->nobjs];
80 return p;
81}
82
Christoffer Dall342cd0a2013-01-20 18:28:06 -050083static void free_ptes(pmd_t *pmd, unsigned long addr)
84{
85 pte_t *pte;
86 unsigned int i;
87
88 for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
89 if (!pmd_none(*pmd) && pmd_table(*pmd)) {
90 pte = pte_offset_kernel(pmd, addr);
91 pte_free_kernel(NULL, pte);
92 }
93 pmd++;
94 }
95}
96
97/**
98 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
99 *
100 * Assumes this is a page table used strictly in Hyp-mode and therefore contains
101 * only mappings in the kernel memory area, which is above PAGE_OFFSET.
102 */
103void free_hyp_pmds(void)
104{
105 pgd_t *pgd;
106 pud_t *pud;
107 pmd_t *pmd;
108 unsigned long addr;
109
110 mutex_lock(&kvm_hyp_pgd_mutex);
111 for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
112 pgd = hyp_pgd + pgd_index(addr);
113 pud = pud_offset(pgd, addr);
114
115 if (pud_none(*pud))
116 continue;
117 BUG_ON(pud_bad(*pud));
118
119 pmd = pmd_offset(pud, addr);
120 free_ptes(pmd, addr);
121 pmd_free(NULL, pmd);
122 pud_clear(pud);
123 }
124 mutex_unlock(&kvm_hyp_pgd_mutex);
125}
126
127static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
128 unsigned long end)
129{
130 pte_t *pte;
131 unsigned long addr;
132 struct page *page;
133
134 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
135 pte = pte_offset_kernel(pmd, addr);
136 BUG_ON(!virt_addr_valid(addr));
137 page = virt_to_page(addr);
138 kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
139 }
140}
141
142static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
143 unsigned long end,
144 unsigned long *pfn_base)
145{
146 pte_t *pte;
147 unsigned long addr;
148
149 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
150 pte = pte_offset_kernel(pmd, addr);
151 BUG_ON(pfn_valid(*pfn_base));
152 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
153 (*pfn_base)++;
154 }
155}
156
157static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
158 unsigned long end, unsigned long *pfn_base)
159{
160 pmd_t *pmd;
161 pte_t *pte;
162 unsigned long addr, next;
163
164 for (addr = start; addr < end; addr = next) {
165 pmd = pmd_offset(pud, addr);
166
167 BUG_ON(pmd_sect(*pmd));
168
169 if (pmd_none(*pmd)) {
170 pte = pte_alloc_one_kernel(NULL, addr);
171 if (!pte) {
172 kvm_err("Cannot allocate Hyp pte\n");
173 return -ENOMEM;
174 }
175 pmd_populate_kernel(NULL, pmd, pte);
176 }
177
178 next = pmd_addr_end(addr, end);
179
180 /*
181 * If pfn_base is NULL, we map kernel pages into HYP with the
182 * virtual address. Otherwise, this is considered an I/O
183 * mapping and we map the physical region starting at
184 * *pfn_base to [start, end[.
185 */
186 if (!pfn_base)
187 create_hyp_pte_mappings(pmd, addr, next);
188 else
189 create_hyp_io_pte_mappings(pmd, addr, next, pfn_base);
190 }
191
192 return 0;
193}
194
195static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
196{
197 unsigned long start = (unsigned long)from;
198 unsigned long end = (unsigned long)to;
199 pgd_t *pgd;
200 pud_t *pud;
201 pmd_t *pmd;
202 unsigned long addr, next;
203 int err = 0;
204
205 BUG_ON(start > end);
206 if (start < PAGE_OFFSET)
207 return -EINVAL;
208
209 mutex_lock(&kvm_hyp_pgd_mutex);
210 for (addr = start; addr < end; addr = next) {
211 pgd = hyp_pgd + pgd_index(addr);
212 pud = pud_offset(pgd, addr);
213
214 if (pud_none_or_clear_bad(pud)) {
215 pmd = pmd_alloc_one(NULL, addr);
216 if (!pmd) {
217 kvm_err("Cannot allocate Hyp pmd\n");
218 err = -ENOMEM;
219 goto out;
220 }
221 pud_populate(NULL, pud, pmd);
222 }
223
224 next = pgd_addr_end(addr, end);
225 err = create_hyp_pmd_mappings(pud, addr, next, pfn_base);
226 if (err)
227 goto out;
228 }
229out:
230 mutex_unlock(&kvm_hyp_pgd_mutex);
231 return err;
232}
233
234/**
235 * create_hyp_mappings - map a kernel virtual address range in Hyp mode
236 * @from: The virtual kernel start address of the range
237 * @to: The virtual kernel end address of the range (exclusive)
238 *
239 * The same virtual address as the kernel virtual address is also used in
240 * Hyp-mode mapping to the same underlying physical pages.
241 *
242 * Note: Wrapping around zero in the "to" address is not supported.
243 */
244int create_hyp_mappings(void *from, void *to)
245{
246 return __create_hyp_mappings(from, to, NULL);
247}
248
249/**
250 * create_hyp_io_mappings - map a physical IO range in Hyp mode
251 * @from: The virtual HYP start address of the range
252 * @to: The virtual HYP end address of the range (exclusive)
253 * @addr: The physical start address which gets mapped
254 */
255int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
256{
257 unsigned long pfn = __phys_to_pfn(addr);
258 return __create_hyp_mappings(from, to, &pfn);
259}
260
Christoffer Dalld5d81842013-01-20 18:28:07 -0500261/**
262 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
263 * @kvm: The KVM struct pointer for the VM.
264 *
265 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
266 * support either full 40-bit input addresses or limited to 32-bit input
267 * addresses). Clears the allocated pages.
268 *
269 * Note we don't need locking here as this is only called when the VM is
270 * created, which can only be done once.
271 */
272int kvm_alloc_stage2_pgd(struct kvm *kvm)
273{
274 pgd_t *pgd;
275
276 if (kvm->arch.pgd != NULL) {
277 kvm_err("kvm_arch already initialized?\n");
278 return -EINVAL;
279 }
280
281 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
282 if (!pgd)
283 return -ENOMEM;
284
285 /* stage-2 pgd must be aligned to its size */
286 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
287
288 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
289 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
290 kvm->arch.pgd = pgd;
291
292 return 0;
293}
294
295static void clear_pud_entry(pud_t *pud)
296{
297 pmd_t *pmd_table = pmd_offset(pud, 0);
298 pud_clear(pud);
299 pmd_free(NULL, pmd_table);
300 put_page(virt_to_page(pud));
301}
302
303static void clear_pmd_entry(pmd_t *pmd)
304{
305 pte_t *pte_table = pte_offset_kernel(pmd, 0);
306 pmd_clear(pmd);
307 pte_free_kernel(NULL, pte_table);
308 put_page(virt_to_page(pmd));
309}
310
311static bool pmd_empty(pmd_t *pmd)
312{
313 struct page *pmd_page = virt_to_page(pmd);
314 return page_count(pmd_page) == 1;
315}
316
317static void clear_pte_entry(pte_t *pte)
318{
319 if (pte_present(*pte)) {
320 kvm_set_pte(pte, __pte(0));
321 put_page(virt_to_page(pte));
322 }
323}
324
325static bool pte_empty(pte_t *pte)
326{
327 struct page *pte_page = virt_to_page(pte);
328 return page_count(pte_page) == 1;
329}
330
331/**
332 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
333 * @kvm: The VM pointer
334 * @start: The intermediate physical base address of the range to unmap
335 * @size: The size of the area to unmap
336 *
337 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
338 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
339 * destroying the VM), otherwise another faulting VCPU may come in and mess
340 * with things behind our backs.
341 */
342static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
343{
344 pgd_t *pgd;
345 pud_t *pud;
346 pmd_t *pmd;
347 pte_t *pte;
348 phys_addr_t addr = start, end = start + size;
349 u64 range;
350
351 while (addr < end) {
352 pgd = kvm->arch.pgd + pgd_index(addr);
353 pud = pud_offset(pgd, addr);
354 if (pud_none(*pud)) {
355 addr += PUD_SIZE;
356 continue;
357 }
358
359 pmd = pmd_offset(pud, addr);
360 if (pmd_none(*pmd)) {
361 addr += PMD_SIZE;
362 continue;
363 }
364
365 pte = pte_offset_kernel(pmd, addr);
366 clear_pte_entry(pte);
367 range = PAGE_SIZE;
368
369 /* If we emptied the pte, walk back up the ladder */
370 if (pte_empty(pte)) {
371 clear_pmd_entry(pmd);
372 range = PMD_SIZE;
373 if (pmd_empty(pmd)) {
374 clear_pud_entry(pud);
375 range = PUD_SIZE;
376 }
377 }
378
379 addr += range;
380 }
381}
382
383/**
384 * kvm_free_stage2_pgd - free all stage-2 tables
385 * @kvm: The KVM struct pointer for the VM.
386 *
387 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
388 * underlying level-2 and level-3 tables before freeing the actual level-1 table
389 * and setting the struct pointer to NULL.
390 *
391 * Note we don't need locking here as this is only called when the VM is
392 * destroyed, which can only be done once.
393 */
394void kvm_free_stage2_pgd(struct kvm *kvm)
395{
396 if (kvm->arch.pgd == NULL)
397 return;
398
399 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
400 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
401 kvm->arch.pgd = NULL;
402}
403
404
405static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
406 phys_addr_t addr, const pte_t *new_pte, bool iomap)
407{
408 pgd_t *pgd;
409 pud_t *pud;
410 pmd_t *pmd;
411 pte_t *pte, old_pte;
412
413 /* Create 2nd stage page table mapping - Level 1 */
414 pgd = kvm->arch.pgd + pgd_index(addr);
415 pud = pud_offset(pgd, addr);
416 if (pud_none(*pud)) {
417 if (!cache)
418 return 0; /* ignore calls from kvm_set_spte_hva */
419 pmd = mmu_memory_cache_alloc(cache);
420 pud_populate(NULL, pud, pmd);
421 pmd += pmd_index(addr);
422 get_page(virt_to_page(pud));
423 } else
424 pmd = pmd_offset(pud, addr);
425
426 /* Create 2nd stage page table mapping - Level 2 */
427 if (pmd_none(*pmd)) {
428 if (!cache)
429 return 0; /* ignore calls from kvm_set_spte_hva */
430 pte = mmu_memory_cache_alloc(cache);
431 clean_pte_table(pte);
432 pmd_populate_kernel(NULL, pmd, pte);
433 pte += pte_index(addr);
434 get_page(virt_to_page(pmd));
435 } else
436 pte = pte_offset_kernel(pmd, addr);
437
438 if (iomap && pte_present(*pte))
439 return -EFAULT;
440
441 /* Create 2nd stage page table mapping - Level 3 */
442 old_pte = *pte;
443 kvm_set_pte(pte, *new_pte);
444 if (pte_present(old_pte))
445 kvm_tlb_flush_vmid(kvm);
446 else
447 get_page(virt_to_page(pte));
448
449 return 0;
450}
451
452/**
453 * kvm_phys_addr_ioremap - map a device range to guest IPA
454 *
455 * @kvm: The KVM pointer
456 * @guest_ipa: The IPA at which to insert the mapping
457 * @pa: The physical address of the device
458 * @size: The size of the mapping
459 */
460int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
461 phys_addr_t pa, unsigned long size)
462{
463 phys_addr_t addr, end;
464 int ret = 0;
465 unsigned long pfn;
466 struct kvm_mmu_memory_cache cache = { 0, };
467
468 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
469 pfn = __phys_to_pfn(pa);
470
471 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
472 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR);
473
474 ret = mmu_topup_memory_cache(&cache, 2, 2);
475 if (ret)
476 goto out;
477 spin_lock(&kvm->mmu_lock);
478 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
479 spin_unlock(&kvm->mmu_lock);
480 if (ret)
481 goto out;
482
483 pfn++;
484 }
485
486out:
487 mmu_free_memory_cache(&cache);
488 return ret;
489}
490
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500491int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
492{
493 return -EINVAL;
494}
495
Christoffer Dalld5d81842013-01-20 18:28:07 -0500496static void handle_hva_to_gpa(struct kvm *kvm,
497 unsigned long start,
498 unsigned long end,
499 void (*handler)(struct kvm *kvm,
500 gpa_t gpa, void *data),
501 void *data)
502{
503 struct kvm_memslots *slots;
504 struct kvm_memory_slot *memslot;
505
506 slots = kvm_memslots(kvm);
507
508 /* we only care about the pages that the guest sees */
509 kvm_for_each_memslot(memslot, slots) {
510 unsigned long hva_start, hva_end;
511 gfn_t gfn, gfn_end;
512
513 hva_start = max(start, memslot->userspace_addr);
514 hva_end = min(end, memslot->userspace_addr +
515 (memslot->npages << PAGE_SHIFT));
516 if (hva_start >= hva_end)
517 continue;
518
519 /*
520 * {gfn(page) | page intersects with [hva_start, hva_end)} =
521 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
522 */
523 gfn = hva_to_gfn_memslot(hva_start, memslot);
524 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
525
526 for (; gfn < gfn_end; ++gfn) {
527 gpa_t gpa = gfn << PAGE_SHIFT;
528 handler(kvm, gpa, data);
529 }
530 }
531}
532
533static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
534{
535 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
536 kvm_tlb_flush_vmid(kvm);
537}
538
539int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
540{
541 unsigned long end = hva + PAGE_SIZE;
542
543 if (!kvm->arch.pgd)
544 return 0;
545
546 trace_kvm_unmap_hva(hva);
547 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
548 return 0;
549}
550
551int kvm_unmap_hva_range(struct kvm *kvm,
552 unsigned long start, unsigned long end)
553{
554 if (!kvm->arch.pgd)
555 return 0;
556
557 trace_kvm_unmap_hva_range(start, end);
558 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
559 return 0;
560}
561
562static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
563{
564 pte_t *pte = (pte_t *)data;
565
566 stage2_set_pte(kvm, NULL, gpa, pte, false);
567}
568
569
570void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
571{
572 unsigned long end = hva + PAGE_SIZE;
573 pte_t stage2_pte;
574
575 if (!kvm->arch.pgd)
576 return;
577
578 trace_kvm_set_spte_hva(hva);
579 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
580 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
581}
582
583void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
584{
585 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
586}
587
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500588phys_addr_t kvm_mmu_get_httbr(void)
589{
590 VM_BUG_ON(!virt_addr_valid(hyp_pgd));
591 return virt_to_phys(hyp_pgd);
592}
593
594int kvm_mmu_init(void)
595{
Christoffer Dalld5d81842013-01-20 18:28:07 -0500596 if (!hyp_pgd) {
597 kvm_err("Hyp mode PGD not allocated\n");
598 return -ENOMEM;
599 }
600
601 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500602}
603
604/**
605 * kvm_clear_idmap - remove all idmaps from the hyp pgd
606 *
607 * Free the underlying pmds for all pgds in range and clear the pgds (but
608 * don't free them) afterwards.
609 */
610void kvm_clear_hyp_idmap(void)
611{
612 unsigned long addr, end;
613 unsigned long next;
614 pgd_t *pgd = hyp_pgd;
615 pud_t *pud;
616 pmd_t *pmd;
617
618 addr = virt_to_phys(__hyp_idmap_text_start);
619 end = virt_to_phys(__hyp_idmap_text_end);
620
621 pgd += pgd_index(addr);
622 do {
623 next = pgd_addr_end(addr, end);
624 if (pgd_none_or_clear_bad(pgd))
625 continue;
626 pud = pud_offset(pgd, addr);
627 pmd = pmd_offset(pud, addr);
628
629 pud_clear(pud);
630 clean_pmd_entry(pmd);
631 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
632 } while (pgd++, addr = next, addr < end);
633}