blob: 9a3409f7b37a3f928a5d877e7b9c0b15434955e7 [file] [log] [blame]
Marc Zyngier37c43752012-12-10 15:35:24 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
Vladimir Murzin20475f72015-11-16 11:28:18 +000023#include <asm/cpufeature.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000024
25/*
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000026 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
Marc Zyngier37c43752012-12-10 15:35:24 +000027 * "negative" addresses. This makes it impossible to directly share
28 * mappings with the kernel.
29 *
30 * Instead, give the HYP mode its own VA region at a fixed offset from
31 * the kernel by just masking the top bits (which are all ones for a
32 * kernel address).
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000033 *
34 * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these
35 * macros (the entire kernel runs at EL2).
Marc Zyngier37c43752012-12-10 15:35:24 +000036 */
37#define HYP_PAGE_OFFSET_SHIFT VA_BITS
38#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
39#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
40
41/*
42 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
43 * shared across all the page-tables. Conveniently, we use the last
44 * possible page, where no kernel mapping will ever exist.
45 */
46#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
47
Christoffer Dall38f791a2014-10-10 12:14:28 +020048/*
49 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
50 * levels in addition to the PGD and potentially the PUD which are
51 * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
52 * tables use one level of tables less than the kernel.
53 */
54#ifdef CONFIG_ARM64_64K_PAGES
55#define KVM_MMU_CACHE_MIN_PAGES 1
56#else
57#define KVM_MMU_CACHE_MIN_PAGES 2
58#endif
59
Marc Zyngier37c43752012-12-10 15:35:24 +000060#ifdef __ASSEMBLY__
61
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000062#include <asm/alternative.h>
63#include <asm/cpufeature.h>
64
Marc Zyngier37c43752012-12-10 15:35:24 +000065/*
66 * Convert a kernel VA into a HYP VA.
67 * reg: VA to be converted.
68 */
69.macro kern_hyp_va reg
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000070alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
Marc Zyngier37c43752012-12-10 15:35:24 +000071 and \reg, \reg, #HYP_PAGE_OFFSET_MASK
Marc Zyngiercedbb8b72015-01-29 13:50:34 +000072alternative_else
73 nop
74alternative_endif
Marc Zyngier37c43752012-12-10 15:35:24 +000075.endm
76
77#else
78
Christoffer Dall38f791a2014-10-10 12:14:28 +020079#include <asm/pgalloc.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000080#include <asm/cachetype.h>
81#include <asm/cacheflush.h>
Ard Biesheuvele4c5a682015-03-19 16:42:28 +000082#include <asm/mmu_context.h>
83#include <asm/pgtable.h>
Marc Zyngier37c43752012-12-10 15:35:24 +000084
85#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
86
87/*
Joel Schoppdbff1242014-07-09 11:17:04 -050088 * We currently only support a 40bit IPA.
Marc Zyngier37c43752012-12-10 15:35:24 +000089 */
Joel Schoppdbff1242014-07-09 11:17:04 -050090#define KVM_PHYS_SHIFT (40)
Marc Zyngier37c43752012-12-10 15:35:24 +000091#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
92#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
93
Marc Zyngier37c43752012-12-10 15:35:24 +000094int create_hyp_mappings(void *from, void *to);
95int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
96void free_boot_hyp_pgd(void);
97void free_hyp_pgds(void);
98
Christoffer Dall957db102014-11-27 10:35:03 +010099void stage2_unmap_vm(struct kvm *kvm);
Marc Zyngier37c43752012-12-10 15:35:24 +0000100int kvm_alloc_stage2_pgd(struct kvm *kvm);
101void kvm_free_stage2_pgd(struct kvm *kvm);
102int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -0700103 phys_addr_t pa, unsigned long size, bool writable);
Marc Zyngier37c43752012-12-10 15:35:24 +0000104
105int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
106
107void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
108
109phys_addr_t kvm_mmu_get_httbr(void);
110phys_addr_t kvm_mmu_get_boot_httbr(void);
111phys_addr_t kvm_get_idmap_vector(void);
112int kvm_mmu_init(void);
113void kvm_clear_hyp_idmap(void);
114
115#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
Christoffer Dallad361f02012-11-01 17:14:45 +0100116#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
Marc Zyngier37c43752012-12-10 15:35:24 +0000117
Marc Zyngier37c43752012-12-10 15:35:24 +0000118static inline void kvm_clean_pgd(pgd_t *pgd) {}
Christoffer Dall38f791a2014-10-10 12:14:28 +0200119static inline void kvm_clean_pmd(pmd_t *pmd) {}
Marc Zyngier37c43752012-12-10 15:35:24 +0000120static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
121static inline void kvm_clean_pte(pte_t *pte) {}
122static inline void kvm_clean_pte_entry(pte_t *pte) {}
123
124static inline void kvm_set_s2pte_writable(pte_t *pte)
125{
126 pte_val(*pte) |= PTE_S2_RDWR;
127}
128
Christoffer Dallad361f02012-11-01 17:14:45 +0100129static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
130{
131 pmd_val(*pmd) |= PMD_S2_RDWR;
132}
133
Mario Smarduch8199ed02015-01-15 15:58:59 -0800134static inline void kvm_set_s2pte_readonly(pte_t *pte)
135{
136 pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
137}
138
139static inline bool kvm_s2pte_readonly(pte_t *pte)
140{
141 return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
142}
143
144static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
145{
146 pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
147}
148
149static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
150{
151 return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
152}
153
154
Marc Zyngiera3c8bd32014-02-18 14:29:03 +0000155#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
156#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
157#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
158
Christoffer Dall38f791a2014-10-10 12:14:28 +0200159/*
160 * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address
161 * the entire IPA input range with a single pgd entry, and we would only need
162 * one pgd entry. Note that in this case, the pgd is actually not used by
163 * the MMU for Stage-2 translations, but is merely a fake pgd used as a data
164 * structure for the kernel pgtable macros to work.
165 */
166#if PGDIR_SHIFT > KVM_PHYS_SHIFT
167#define PTRS_PER_S2_PGD_SHIFT 0
168#else
169#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
170#endif
171#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200172
Marc Zyngier04b8dc82015-03-10 19:07:00 +0000173#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
174
Christoffer Dall38f791a2014-10-10 12:14:28 +0200175/*
176 * If we are concatenating first level stage-2 page tables, we would have less
177 * than or equal to 16 pointers in the fake PGD, because that's what the
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700178 * architecture allows. In this case, (4 - CONFIG_PGTABLE_LEVELS)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200179 * represents the first level for the host, and we add 1 to go to the next
180 * level (which uses contatenation) for the stage-2 tables.
181 */
182#if PTRS_PER_S2_PGD <= 16
Kirill A. Shutemov9f25e6a2015-04-14 15:45:39 -0700183#define KVM_PREALLOC_LEVEL (4 - CONFIG_PGTABLE_LEVELS + 1)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200184#else
185#define KVM_PREALLOC_LEVEL (0)
186#endif
187
Christoffer Dall38f791a2014-10-10 12:14:28 +0200188static inline void *kvm_get_hwpgd(struct kvm *kvm)
189{
190 pgd_t *pgd = kvm->arch.pgd;
191 pud_t *pud;
192
193 if (KVM_PREALLOC_LEVEL == 0)
194 return pgd;
195
196 pud = pud_offset(pgd, 0);
197 if (KVM_PREALLOC_LEVEL == 1)
198 return pud;
199
200 BUG_ON(KVM_PREALLOC_LEVEL != 2);
201 return pmd_offset(pud, 0);
202}
203
Marc Zyngiera9873702015-03-10 19:06:59 +0000204static inline unsigned int kvm_get_hwpgd_size(void)
Christoffer Dall38f791a2014-10-10 12:14:28 +0200205{
Marc Zyngiera9873702015-03-10 19:06:59 +0000206 if (KVM_PREALLOC_LEVEL > 0)
207 return PTRS_PER_S2_PGD * PAGE_SIZE;
208 return PTRS_PER_S2_PGD * sizeof(pgd_t);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200209}
210
Suzuki K Poulose120f0772016-03-01 10:03:06 +0000211/*
212 * Allocate fake pgd for the host kernel page table macros to work.
213 * This is not used by the hardware and we have no alignment
214 * requirement for this allocation.
215 */
216static inline pgd_t *kvm_setup_fake_pgd(pgd_t *hwpgd)
217{
218 int i;
219 pgd_t *pgd;
220
221 if (!KVM_PREALLOC_LEVEL)
222 return hwpgd;
223
224 /*
225 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
226 * the PMD and the kernel will use folded pud.
227 * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
228 * pages.
229 */
230
231 pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
232 GFP_KERNEL | __GFP_ZERO);
233 if (!pgd)
234 return ERR_PTR(-ENOMEM);
235
236 /* Plug the HW PGD into the fake one. */
237 for (i = 0; i < PTRS_PER_S2_PGD; i++) {
238 if (KVM_PREALLOC_LEVEL == 1)
239 pgd_populate(NULL, pgd + i,
240 (pud_t *)hwpgd + i * PTRS_PER_PUD);
241 else if (KVM_PREALLOC_LEVEL == 2)
242 pud_populate(NULL, pud_offset(pgd, 0) + i,
243 (pmd_t *)hwpgd + i * PTRS_PER_PMD);
244 }
245
246 return pgd;
247}
248
249static inline void kvm_free_fake_pgd(pgd_t *pgd)
250{
251 if (KVM_PREALLOC_LEVEL > 0)
252 kfree(pgd);
253}
Christoffer Dall4f853a72014-05-09 23:31:31 +0200254static inline bool kvm_page_empty(void *ptr)
255{
256 struct page *ptr_page = virt_to_page(ptr);
257 return page_count(ptr_page) == 1;
258}
259
Christoffer Dall38f791a2014-10-10 12:14:28 +0200260#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
261
262#ifdef __PAGETABLE_PMD_FOLDED
263#define kvm_pmd_table_empty(kvm, pmdp) (0)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200264#else
Christoffer Dall38f791a2014-10-10 12:14:28 +0200265#define kvm_pmd_table_empty(kvm, pmdp) \
266 (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
Christoffer Dall4f853a72014-05-09 23:31:31 +0200267#endif
Christoffer Dall38f791a2014-10-10 12:14:28 +0200268
269#ifdef __PAGETABLE_PUD_FOLDED
270#define kvm_pud_table_empty(kvm, pudp) (0)
271#else
272#define kvm_pud_table_empty(kvm, pudp) \
273 (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
274#endif
Christoffer Dall4f853a72014-05-09 23:31:31 +0200275
276
Marc Zyngier37c43752012-12-10 15:35:24 +0000277struct kvm;
278
Marc Zyngier2d58b732014-01-14 19:13:10 +0000279#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
280
281static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
Marc Zyngier37c43752012-12-10 15:35:24 +0000282{
Marc Zyngier2d58b732014-01-14 19:13:10 +0000283 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
284}
285
Dan Williamsba049e92016-01-15 16:56:11 -0800286static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
287 kvm_pfn_t pfn,
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000288 unsigned long size,
289 bool ipa_uncached)
Marc Zyngier2d58b732014-01-14 19:13:10 +0000290{
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000291 void *va = page_address(pfn_to_page(pfn));
292
Laszlo Ersek840f4bf2014-11-17 14:58:52 +0000293 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000294 kvm_flush_dcache_to_poc(va, size);
Marc Zyngier2d58b732014-01-14 19:13:10 +0000295
Marc Zyngier37c43752012-12-10 15:35:24 +0000296 if (!icache_is_aliasing()) { /* PIPT */
Marc Zyngier0d3e4d42015-01-05 21:13:24 +0000297 flush_icache_range((unsigned long)va,
298 (unsigned long)va + size);
Marc Zyngier37c43752012-12-10 15:35:24 +0000299 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
300 /* any kind of VIPT cache */
301 __flush_icache_all();
302 }
303}
304
Marc Zyngier363ef892014-12-19 16:48:06 +0000305static inline void __kvm_flush_dcache_pte(pte_t pte)
306{
307 struct page *page = pte_page(pte);
308 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
309}
310
311static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
312{
313 struct page *page = pmd_page(pmd);
314 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
315}
316
317static inline void __kvm_flush_dcache_pud(pud_t pud)
318{
319 struct page *page = pud_page(pud);
320 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
321}
322
Santosh Shilimkar4fda3422013-11-19 14:59:12 -0500323#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
Marc Zyngier37c43752012-12-10 15:35:24 +0000324
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000325void kvm_set_way_flush(struct kvm_vcpu *vcpu);
326void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000327
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000328static inline bool __kvm_cpu_uses_extended_idmap(void)
329{
330 return __cpu_uses_extended_idmap();
331}
332
333static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
334 pgd_t *hyp_pgd,
335 pgd_t *merged_hyp_pgd,
336 unsigned long hyp_idmap_start)
337{
338 int idmap_idx;
339
340 /*
341 * Use the first entry to access the HYP mappings. It is
342 * guaranteed to be free, otherwise we wouldn't use an
343 * extended idmap.
344 */
345 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
346 merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
347
348 /*
349 * Create another extended level entry that points to the boot HYP map,
350 * which contains an ID mapping of the HYP init code. We essentially
351 * merge the boot and runtime HYP maps by doing so, but they don't
352 * overlap anyway, so this is fine.
353 */
354 idmap_idx = hyp_idmap_start >> VA_BITS;
355 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
356 merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
357}
358
Vladimir Murzin20475f72015-11-16 11:28:18 +0000359static inline unsigned int kvm_get_vmid_bits(void)
360{
361 int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
362
Suzuki K Poulose28c5dcb2016-01-26 10:58:16 +0000363 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
Vladimir Murzin20475f72015-11-16 11:28:18 +0000364}
365
Marc Zyngier37c43752012-12-10 15:35:24 +0000366#endif /* __ASSEMBLY__ */
367#endif /* __ARM64_KVM_MMU_H__ */