blob: 2a35c1963f6d8d22571a973c11a94aba1e8ccb92 [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -050018
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
Christoffer Dallad361f02012-11-01 17:14:45 +010022#include <linux/hugetlb.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050023#include <trace/events/kvm.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050024#include <asm/pgalloc.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050025#include <asm/cacheflush.h>
Christoffer Dall342cd0a2013-01-20 18:28:06 -050026#include <asm/kvm_arm.h>
27#include <asm/kvm_mmu.h>
Christoffer Dall45e96ea2013-01-20 18:43:58 -050028#include <asm/kvm_mmio.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050029#include <asm/kvm_asm.h>
Christoffer Dall94f8e642013-01-20 18:28:12 -050030#include <asm/kvm_emulate.h>
Marc Zyngier1e947ba2015-01-29 11:59:54 +000031#include <asm/virt.h>
Christoffer Dalld5d81842013-01-20 18:28:07 -050032
33#include "trace.h"
Christoffer Dall342cd0a2013-01-20 18:28:06 -050034
Marc Zyngier5a677ce2013-04-12 19:12:06 +010035static pgd_t *boot_hyp_pgd;
Marc Zyngier2fb41052013-04-12 19:12:03 +010036static pgd_t *hyp_pgd;
Ard Biesheuvele4c5a682015-03-19 16:42:28 +000037static pgd_t *merged_hyp_pgd;
Christoffer Dall342cd0a2013-01-20 18:28:06 -050038static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39
Marc Zyngier5a677ce2013-04-12 19:12:06 +010040static unsigned long hyp_idmap_start;
41static unsigned long hyp_idmap_end;
42static phys_addr_t hyp_idmap_vector;
43
Suzuki K Poulose9163ee232016-03-22 17:01:21 +000044#define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
Christoffer Dall38f791a2014-10-10 12:14:28 +020045#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
Mark Salter5d4e08c2014-03-28 14:25:19 +000046
Mario Smarduch15a49a42015-01-15 15:58:58 -080047#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
48#define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
49
50static bool memslot_is_logging(struct kvm_memory_slot *memslot)
51{
Mario Smarduch15a49a42015-01-15 15:58:58 -080052 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
Mario Smarduch72760302015-01-15 15:59:01 -080053}
54
55/**
56 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
57 * @kvm: pointer to kvm structure.
58 *
59 * Interface to HYP function to flush all VM TLB entries
60 */
61void kvm_flush_remote_tlbs(struct kvm *kvm)
62{
63 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
Mario Smarduch15a49a42015-01-15 15:58:58 -080064}
Christoffer Dallad361f02012-11-01 17:14:45 +010065
Marc Zyngier48762762013-01-28 15:27:00 +000066static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
Christoffer Dalld5d81842013-01-20 18:28:07 -050067{
Suzuki K Poulose8684e702016-03-22 17:14:25 +000068 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
Christoffer Dalld5d81842013-01-20 18:28:07 -050069}
70
Marc Zyngier363ef892014-12-19 16:48:06 +000071/*
72 * D-Cache management functions. They take the page table entries by
73 * value, as they are flushing the cache using the kernel mapping (or
74 * kmap on 32bit).
75 */
76static void kvm_flush_dcache_pte(pte_t pte)
77{
78 __kvm_flush_dcache_pte(pte);
79}
80
81static void kvm_flush_dcache_pmd(pmd_t pmd)
82{
83 __kvm_flush_dcache_pmd(pmd);
84}
85
86static void kvm_flush_dcache_pud(pud_t pud)
87{
88 __kvm_flush_dcache_pud(pud);
89}
90
Ard Biesheuvele6fab542015-11-10 15:11:20 +010091static bool kvm_is_device_pfn(unsigned long pfn)
92{
93 return !pfn_valid(pfn);
94}
95
Mario Smarduch15a49a42015-01-15 15:58:58 -080096/**
97 * stage2_dissolve_pmd() - clear and flush huge PMD entry
98 * @kvm: pointer to kvm structure.
99 * @addr: IPA
100 * @pmd: pmd pointer for IPA
101 *
102 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
103 * pages in the range dirty.
104 */
105static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
106{
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +0000107 if (!pmd_thp_or_huge(*pmd))
Mario Smarduch15a49a42015-01-15 15:58:58 -0800108 return;
109
110 pmd_clear(pmd);
111 kvm_tlb_flush_vmid_ipa(kvm, addr);
112 put_page(virt_to_page(pmd));
113}
114
Christoffer Dalld5d81842013-01-20 18:28:07 -0500115static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
116 int min, int max)
117{
118 void *page;
119
120 BUG_ON(max > KVM_NR_MEM_OBJS);
121 if (cache->nobjs >= min)
122 return 0;
123 while (cache->nobjs < max) {
124 page = (void *)__get_free_page(PGALLOC_GFP);
125 if (!page)
126 return -ENOMEM;
127 cache->objects[cache->nobjs++] = page;
128 }
129 return 0;
130}
131
132static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
133{
134 while (mc->nobjs)
135 free_page((unsigned long)mc->objects[--mc->nobjs]);
136}
137
138static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
139{
140 void *p;
141
142 BUG_ON(!mc || !mc->nobjs);
143 p = mc->objects[--mc->nobjs];
144 return p;
145}
146
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000147static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
Marc Zyngier979acd52013-08-06 13:05:48 +0100148{
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000149 pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
150 stage2_pgd_clear(pgd);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200151 kvm_tlb_flush_vmid_ipa(kvm, addr);
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000152 stage2_pud_free(pud_table);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200153 put_page(virt_to_page(pgd));
Marc Zyngier979acd52013-08-06 13:05:48 +0100154}
155
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000156static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500157{
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000158 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
159 VM_BUG_ON(stage2_pud_huge(*pud));
160 stage2_pud_clear(pud);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200161 kvm_tlb_flush_vmid_ipa(kvm, addr);
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000162 stage2_pmd_free(pmd_table);
Marc Zyngier4f728272013-04-12 19:12:05 +0100163 put_page(virt_to_page(pud));
164}
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500165
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000166static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
Marc Zyngier4f728272013-04-12 19:12:05 +0100167{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200168 pte_t *pte_table = pte_offset_kernel(pmd, 0);
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +0000169 VM_BUG_ON(pmd_thp_or_huge(*pmd));
Christoffer Dall4f853a72014-05-09 23:31:31 +0200170 pmd_clear(pmd);
171 kvm_tlb_flush_vmid_ipa(kvm, addr);
172 pte_free_kernel(NULL, pte_table);
Marc Zyngier4f728272013-04-12 19:12:05 +0100173 put_page(virt_to_page(pmd));
174}
175
Marc Zyngier363ef892014-12-19 16:48:06 +0000176/*
177 * Unmapping vs dcache management:
178 *
179 * If a guest maps certain memory pages as uncached, all writes will
180 * bypass the data cache and go directly to RAM. However, the CPUs
181 * can still speculate reads (not writes) and fill cache lines with
182 * data.
183 *
184 * Those cache lines will be *clean* cache lines though, so a
185 * clean+invalidate operation is equivalent to an invalidate
186 * operation, because no cache lines are marked dirty.
187 *
188 * Those clean cache lines could be filled prior to an uncached write
189 * by the guest, and the cache coherent IO subsystem would therefore
190 * end up writing old data to disk.
191 *
192 * This is why right after unmapping a page/section and invalidating
193 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
194 * the IO subsystem will never hit in the cache.
195 */
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000196static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
Christoffer Dall4f853a72014-05-09 23:31:31 +0200197 phys_addr_t addr, phys_addr_t end)
Marc Zyngier4f728272013-04-12 19:12:05 +0100198{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200199 phys_addr_t start_addr = addr;
200 pte_t *pte, *start_pte;
201
202 start_pte = pte = pte_offset_kernel(pmd, addr);
203 do {
204 if (!pte_none(*pte)) {
Marc Zyngier363ef892014-12-19 16:48:06 +0000205 pte_t old_pte = *pte;
206
Christoffer Dall4f853a72014-05-09 23:31:31 +0200207 kvm_set_pte(pte, __pte(0));
Christoffer Dall4f853a72014-05-09 23:31:31 +0200208 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngier363ef892014-12-19 16:48:06 +0000209
210 /* No need to invalidate the cache for device mappings */
Ard Biesheuvel0de58f82015-12-03 09:25:22 +0100211 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
Marc Zyngier363ef892014-12-19 16:48:06 +0000212 kvm_flush_dcache_pte(old_pte);
213
214 put_page(virt_to_page(pte));
Christoffer Dall4f853a72014-05-09 23:31:31 +0200215 }
216 } while (pte++, addr += PAGE_SIZE, addr != end);
217
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000218 if (stage2_pte_table_empty(start_pte))
219 clear_stage2_pmd_entry(kvm, pmd, start_addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500220}
221
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000222static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
Christoffer Dall4f853a72014-05-09 23:31:31 +0200223 phys_addr_t addr, phys_addr_t end)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500224{
Christoffer Dall4f853a72014-05-09 23:31:31 +0200225 phys_addr_t next, start_addr = addr;
226 pmd_t *pmd, *start_pmd;
Marc Zyngier000d3992013-03-05 02:43:17 +0000227
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000228 start_pmd = pmd = stage2_pmd_offset(pud, addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200229 do {
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000230 next = stage2_pmd_addr_end(addr, end);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200231 if (!pmd_none(*pmd)) {
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +0000232 if (pmd_thp_or_huge(*pmd)) {
Marc Zyngier363ef892014-12-19 16:48:06 +0000233 pmd_t old_pmd = *pmd;
234
Christoffer Dall4f853a72014-05-09 23:31:31 +0200235 pmd_clear(pmd);
236 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngier363ef892014-12-19 16:48:06 +0000237
238 kvm_flush_dcache_pmd(old_pmd);
239
Christoffer Dall4f853a72014-05-09 23:31:31 +0200240 put_page(virt_to_page(pmd));
241 } else {
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000242 unmap_stage2_ptes(kvm, pmd, addr, next);
Marc Zyngier4f728272013-04-12 19:12:05 +0100243 }
244 }
Christoffer Dall4f853a72014-05-09 23:31:31 +0200245 } while (pmd++, addr = next, addr != end);
Marc Zyngier4f728272013-04-12 19:12:05 +0100246
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000247 if (stage2_pmd_table_empty(start_pmd))
248 clear_stage2_pud_entry(kvm, pud, start_addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200249}
250
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000251static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
Christoffer Dall4f853a72014-05-09 23:31:31 +0200252 phys_addr_t addr, phys_addr_t end)
253{
254 phys_addr_t next, start_addr = addr;
255 pud_t *pud, *start_pud;
256
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000257 start_pud = pud = stage2_pud_offset(pgd, addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200258 do {
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000259 next = stage2_pud_addr_end(addr, end);
260 if (!stage2_pud_none(*pud)) {
261 if (stage2_pud_huge(*pud)) {
Marc Zyngier363ef892014-12-19 16:48:06 +0000262 pud_t old_pud = *pud;
263
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000264 stage2_pud_clear(pud);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200265 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngier363ef892014-12-19 16:48:06 +0000266 kvm_flush_dcache_pud(old_pud);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200267 put_page(virt_to_page(pud));
268 } else {
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000269 unmap_stage2_pmds(kvm, pud, addr, next);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200270 }
271 }
272 } while (pud++, addr = next, addr != end);
273
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000274 if (stage2_pud_table_empty(start_pud))
275 clear_stage2_pgd_entry(kvm, pgd, start_addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200276}
277
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000278/**
279 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
280 * @kvm: The VM pointer
281 * @start: The intermediate physical base address of the range to unmap
282 * @size: The size of the area to unmap
283 *
284 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
285 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
286 * destroying the VM), otherwise another faulting VCPU may come in and mess
287 * with things behind our backs.
288 */
289static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
Christoffer Dall4f853a72014-05-09 23:31:31 +0200290{
291 pgd_t *pgd;
292 phys_addr_t addr = start, end = start + size;
293 phys_addr_t next;
294
Suzuki K Pouloseac303c62017-04-03 15:12:43 +0100295 assert_spin_locked(&kvm->mmu_lock);
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000296 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200297 do {
Suzuki K Poulose7a1c8312016-03-23 12:08:02 +0000298 next = stage2_pgd_addr_end(addr, end);
299 if (!stage2_pgd_none(*pgd))
300 unmap_stage2_puds(kvm, pgd, addr, next);
Suzuki K Pouloseac303c62017-04-03 15:12:43 +0100301 /*
302 * If the range is too large, release the kvm->mmu_lock
303 * to prevent starvation and lockup detector warnings.
304 */
305 if (next != end)
306 cond_resched_lock(&kvm->mmu_lock);
Christoffer Dall4f853a72014-05-09 23:31:31 +0200307 } while (pgd++, addr = next, addr != end);
Marc Zyngier000d3992013-03-05 02:43:17 +0000308}
309
Marc Zyngier9d218a12014-01-15 12:50:23 +0000310static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
311 phys_addr_t addr, phys_addr_t end)
312{
313 pte_t *pte;
314
315 pte = pte_offset_kernel(pmd, addr);
316 do {
Ard Biesheuvel0de58f82015-12-03 09:25:22 +0100317 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
Marc Zyngier363ef892014-12-19 16:48:06 +0000318 kvm_flush_dcache_pte(*pte);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000319 } while (pte++, addr += PAGE_SIZE, addr != end);
320}
321
322static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
323 phys_addr_t addr, phys_addr_t end)
324{
325 pmd_t *pmd;
326 phys_addr_t next;
327
Suzuki K Poulose70fd1902016-03-22 18:33:45 +0000328 pmd = stage2_pmd_offset(pud, addr);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000329 do {
Suzuki K Poulose70fd1902016-03-22 18:33:45 +0000330 next = stage2_pmd_addr_end(addr, end);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000331 if (!pmd_none(*pmd)) {
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +0000332 if (pmd_thp_or_huge(*pmd))
Marc Zyngier363ef892014-12-19 16:48:06 +0000333 kvm_flush_dcache_pmd(*pmd);
334 else
Marc Zyngier9d218a12014-01-15 12:50:23 +0000335 stage2_flush_ptes(kvm, pmd, addr, next);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000336 }
337 } while (pmd++, addr = next, addr != end);
338}
339
340static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
341 phys_addr_t addr, phys_addr_t end)
342{
343 pud_t *pud;
344 phys_addr_t next;
345
Suzuki K Poulose70fd1902016-03-22 18:33:45 +0000346 pud = stage2_pud_offset(pgd, addr);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000347 do {
Suzuki K Poulose70fd1902016-03-22 18:33:45 +0000348 next = stage2_pud_addr_end(addr, end);
349 if (!stage2_pud_none(*pud)) {
350 if (stage2_pud_huge(*pud))
Marc Zyngier363ef892014-12-19 16:48:06 +0000351 kvm_flush_dcache_pud(*pud);
352 else
Marc Zyngier9d218a12014-01-15 12:50:23 +0000353 stage2_flush_pmds(kvm, pud, addr, next);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000354 }
355 } while (pud++, addr = next, addr != end);
356}
357
358static void stage2_flush_memslot(struct kvm *kvm,
359 struct kvm_memory_slot *memslot)
360{
361 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
362 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
363 phys_addr_t next;
364 pgd_t *pgd;
365
Suzuki K Poulose70fd1902016-03-22 18:33:45 +0000366 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000367 do {
Suzuki K Poulose70fd1902016-03-22 18:33:45 +0000368 next = stage2_pgd_addr_end(addr, end);
Marc Zyngier9d218a12014-01-15 12:50:23 +0000369 stage2_flush_puds(kvm, pgd, addr, next);
370 } while (pgd++, addr = next, addr != end);
371}
372
373/**
374 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
375 * @kvm: The struct kvm pointer
376 *
377 * Go through the stage 2 page tables and invalidate any cache lines
378 * backing memory already mapped to the VM.
379 */
Marc Zyngier3c1e7162014-12-19 16:05:31 +0000380static void stage2_flush_vm(struct kvm *kvm)
Marc Zyngier9d218a12014-01-15 12:50:23 +0000381{
382 struct kvm_memslots *slots;
383 struct kvm_memory_slot *memslot;
384 int idx;
385
386 idx = srcu_read_lock(&kvm->srcu);
387 spin_lock(&kvm->mmu_lock);
388
389 slots = kvm_memslots(kvm);
390 kvm_for_each_memslot(memslot, slots)
391 stage2_flush_memslot(kvm, memslot);
392
393 spin_unlock(&kvm->mmu_lock);
394 srcu_read_unlock(&kvm->srcu, idx);
395}
396
Suzuki K Poulose64f32492016-03-22 18:56:21 +0000397static void clear_hyp_pgd_entry(pgd_t *pgd)
398{
399 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
400 pgd_clear(pgd);
401 pud_free(NULL, pud_table);
402 put_page(virt_to_page(pgd));
403}
404
405static void clear_hyp_pud_entry(pud_t *pud)
406{
407 pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
408 VM_BUG_ON(pud_huge(*pud));
409 pud_clear(pud);
410 pmd_free(NULL, pmd_table);
411 put_page(virt_to_page(pud));
412}
413
414static void clear_hyp_pmd_entry(pmd_t *pmd)
415{
416 pte_t *pte_table = pte_offset_kernel(pmd, 0);
417 VM_BUG_ON(pmd_thp_or_huge(*pmd));
418 pmd_clear(pmd);
419 pte_free_kernel(NULL, pte_table);
420 put_page(virt_to_page(pmd));
421}
422
423static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
424{
425 pte_t *pte, *start_pte;
426
427 start_pte = pte = pte_offset_kernel(pmd, addr);
428 do {
429 if (!pte_none(*pte)) {
430 kvm_set_pte(pte, __pte(0));
431 put_page(virt_to_page(pte));
432 }
433 } while (pte++, addr += PAGE_SIZE, addr != end);
434
435 if (hyp_pte_table_empty(start_pte))
436 clear_hyp_pmd_entry(pmd);
437}
438
439static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
440{
441 phys_addr_t next;
442 pmd_t *pmd, *start_pmd;
443
444 start_pmd = pmd = pmd_offset(pud, addr);
445 do {
446 next = pmd_addr_end(addr, end);
447 /* Hyp doesn't use huge pmds */
448 if (!pmd_none(*pmd))
449 unmap_hyp_ptes(pmd, addr, next);
450 } while (pmd++, addr = next, addr != end);
451
452 if (hyp_pmd_table_empty(start_pmd))
453 clear_hyp_pud_entry(pud);
454}
455
456static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
457{
458 phys_addr_t next;
459 pud_t *pud, *start_pud;
460
461 start_pud = pud = pud_offset(pgd, addr);
462 do {
463 next = pud_addr_end(addr, end);
464 /* Hyp doesn't use huge puds */
465 if (!pud_none(*pud))
466 unmap_hyp_pmds(pud, addr, next);
467 } while (pud++, addr = next, addr != end);
468
469 if (hyp_pud_table_empty(start_pud))
470 clear_hyp_pgd_entry(pgd);
471}
472
473static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
474{
475 pgd_t *pgd;
476 phys_addr_t addr = start, end = start + size;
477 phys_addr_t next;
478
479 /*
480 * We don't unmap anything from HYP, except at the hyp tear down.
481 * Hence, we don't have to invalidate the TLBs here.
482 */
483 pgd = pgdp + pgd_index(addr);
484 do {
485 next = pgd_addr_end(addr, end);
486 if (!pgd_none(*pgd))
487 unmap_hyp_puds(pgd, addr, next);
488 } while (pgd++, addr = next, addr != end);
489}
490
Marc Zyngier000d3992013-03-05 02:43:17 +0000491/**
Marc Zyngier4f728272013-04-12 19:12:05 +0100492 * free_hyp_pgds - free Hyp-mode page tables
Marc Zyngier000d3992013-03-05 02:43:17 +0000493 *
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100494 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
495 * therefore contains either mappings in the kernel memory area (above
496 * PAGE_OFFSET), or device mappings in the vmalloc range (from
497 * VMALLOC_START to VMALLOC_END).
498 *
499 * boot_hyp_pgd should only map two pages for the init code.
Marc Zyngier000d3992013-03-05 02:43:17 +0000500 */
Marc Zyngier4f728272013-04-12 19:12:05 +0100501void free_hyp_pgds(void)
Marc Zyngier000d3992013-03-05 02:43:17 +0000502{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500503 unsigned long addr;
504
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100505 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100506
Marc Zyngier26781f9c2016-06-30 18:40:46 +0100507 if (boot_hyp_pgd) {
508 unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
509 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
510 boot_hyp_pgd = NULL;
511 }
512
Marc Zyngier4f728272013-04-12 19:12:05 +0100513 if (hyp_pgd) {
Marc Zyngier26781f9c2016-06-30 18:40:46 +0100514 unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
Marc Zyngier4f728272013-04-12 19:12:05 +0100515 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
Marc Zyngier6c41a412016-06-30 18:40:51 +0100516 unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
Marc Zyngier4f728272013-04-12 19:12:05 +0100517 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
Marc Zyngier6c41a412016-06-30 18:40:51 +0100518 unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
Marc Zyngierd4cb9df52013-05-14 12:11:34 +0100519
Christoffer Dall38f791a2014-10-10 12:14:28 +0200520 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
Marc Zyngierd157f4a2013-04-12 19:12:07 +0100521 hyp_pgd = NULL;
Marc Zyngier4f728272013-04-12 19:12:05 +0100522 }
Ard Biesheuvele4c5a682015-03-19 16:42:28 +0000523 if (merged_hyp_pgd) {
524 clear_page(merged_hyp_pgd);
525 free_page((unsigned long)merged_hyp_pgd);
526 merged_hyp_pgd = NULL;
527 }
Marc Zyngier4f728272013-04-12 19:12:05 +0100528
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500529 mutex_unlock(&kvm_hyp_pgd_mutex);
530}
531
532static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100533 unsigned long end, unsigned long pfn,
534 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500535{
536 pte_t *pte;
537 unsigned long addr;
538
Marc Zyngier3562c762013-04-12 19:12:02 +0100539 addr = start;
540 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100541 pte = pte_offset_kernel(pmd, addr);
542 kvm_set_pte(pte, pfn_pte(pfn, prot));
Marc Zyngier4f728272013-04-12 19:12:05 +0100543 get_page(virt_to_page(pte));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100544 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
Marc Zyngier6060df82013-04-12 19:12:01 +0100545 pfn++;
Marc Zyngier3562c762013-04-12 19:12:02 +0100546 } while (addr += PAGE_SIZE, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500547}
548
549static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
Marc Zyngier6060df82013-04-12 19:12:01 +0100550 unsigned long end, unsigned long pfn,
551 pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500552{
553 pmd_t *pmd;
554 pte_t *pte;
555 unsigned long addr, next;
556
Marc Zyngier3562c762013-04-12 19:12:02 +0100557 addr = start;
558 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100559 pmd = pmd_offset(pud, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500560
561 BUG_ON(pmd_sect(*pmd));
562
563 if (pmd_none(*pmd)) {
Marc Zyngier6060df82013-04-12 19:12:01 +0100564 pte = pte_alloc_one_kernel(NULL, addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500565 if (!pte) {
566 kvm_err("Cannot allocate Hyp pte\n");
567 return -ENOMEM;
568 }
569 pmd_populate_kernel(NULL, pmd, pte);
Marc Zyngier4f728272013-04-12 19:12:05 +0100570 get_page(virt_to_page(pmd));
Marc Zyngier5a677ce2013-04-12 19:12:06 +0100571 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500572 }
573
574 next = pmd_addr_end(addr, end);
575
Marc Zyngier6060df82013-04-12 19:12:01 +0100576 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
577 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100578 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500579
580 return 0;
581}
582
Christoffer Dall38f791a2014-10-10 12:14:28 +0200583static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
584 unsigned long end, unsigned long pfn,
585 pgprot_t prot)
586{
587 pud_t *pud;
588 pmd_t *pmd;
589 unsigned long addr, next;
590 int ret;
591
592 addr = start;
593 do {
594 pud = pud_offset(pgd, addr);
595
596 if (pud_none_or_clear_bad(pud)) {
597 pmd = pmd_alloc_one(NULL, addr);
598 if (!pmd) {
599 kvm_err("Cannot allocate Hyp pmd\n");
600 return -ENOMEM;
601 }
602 pud_populate(NULL, pud, pmd);
603 get_page(virt_to_page(pud));
604 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
605 }
606
607 next = pud_addr_end(addr, end);
608 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
609 if (ret)
610 return ret;
611 pfn += (next - addr) >> PAGE_SHIFT;
612 } while (addr = next, addr != end);
613
614 return 0;
615}
616
Marc Zyngier6060df82013-04-12 19:12:01 +0100617static int __create_hyp_mappings(pgd_t *pgdp,
618 unsigned long start, unsigned long end,
619 unsigned long pfn, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500620{
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500621 pgd_t *pgd;
622 pud_t *pud;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500623 unsigned long addr, next;
624 int err = 0;
625
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500626 mutex_lock(&kvm_hyp_pgd_mutex);
Marc Zyngier3562c762013-04-12 19:12:02 +0100627 addr = start & PAGE_MASK;
628 end = PAGE_ALIGN(end);
629 do {
Marc Zyngier6060df82013-04-12 19:12:01 +0100630 pgd = pgdp + pgd_index(addr);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500631
Christoffer Dall38f791a2014-10-10 12:14:28 +0200632 if (pgd_none(*pgd)) {
633 pud = pud_alloc_one(NULL, addr);
634 if (!pud) {
635 kvm_err("Cannot allocate Hyp pud\n");
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500636 err = -ENOMEM;
637 goto out;
638 }
Christoffer Dall38f791a2014-10-10 12:14:28 +0200639 pgd_populate(NULL, pgd, pud);
640 get_page(virt_to_page(pgd));
641 kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500642 }
643
644 next = pgd_addr_end(addr, end);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200645 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500646 if (err)
647 goto out;
Marc Zyngier6060df82013-04-12 19:12:01 +0100648 pfn += (next - addr) >> PAGE_SHIFT;
Marc Zyngier3562c762013-04-12 19:12:02 +0100649 } while (addr = next, addr != end);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500650out:
651 mutex_unlock(&kvm_hyp_pgd_mutex);
652 return err;
653}
654
Christoffer Dall40c27292013-11-15 13:14:12 -0800655static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
656{
657 if (!is_vmalloc_addr(kaddr)) {
658 BUG_ON(!virt_addr_valid(kaddr));
659 return __pa(kaddr);
660 } else {
661 return page_to_phys(vmalloc_to_page(kaddr)) +
662 offset_in_page(kaddr);
663 }
664}
665
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500666/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100667 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500668 * @from: The virtual kernel start address of the range
669 * @to: The virtual kernel end address of the range (exclusive)
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100670 * @prot: The protection to be applied to this range
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500671 *
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100672 * The same virtual address as the kernel virtual address is also used
673 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
674 * physical pages.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500675 */
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100676int create_hyp_mappings(void *from, void *to, pgprot_t prot)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500677{
Christoffer Dall40c27292013-11-15 13:14:12 -0800678 phys_addr_t phys_addr;
679 unsigned long virt_addr;
Marc Zyngier6c41a412016-06-30 18:40:51 +0100680 unsigned long start = kern_hyp_va((unsigned long)from);
681 unsigned long end = kern_hyp_va((unsigned long)to);
Marc Zyngier6060df82013-04-12 19:12:01 +0100682
Marc Zyngier1e947ba2015-01-29 11:59:54 +0000683 if (is_kernel_in_hyp_mode())
684 return 0;
685
Christoffer Dall40c27292013-11-15 13:14:12 -0800686 start = start & PAGE_MASK;
687 end = PAGE_ALIGN(end);
Marc Zyngier6060df82013-04-12 19:12:01 +0100688
Christoffer Dall40c27292013-11-15 13:14:12 -0800689 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
690 int err;
691
692 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
693 err = __create_hyp_mappings(hyp_pgd, virt_addr,
694 virt_addr + PAGE_SIZE,
695 __phys_to_pfn(phys_addr),
Marc Zyngierc8dddec2016-06-13 15:00:45 +0100696 prot);
Christoffer Dall40c27292013-11-15 13:14:12 -0800697 if (err)
698 return err;
699 }
700
701 return 0;
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500702}
703
704/**
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100705 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
706 * @from: The kernel start VA of the range
707 * @to: The kernel end VA of the range (exclusive)
Marc Zyngier6060df82013-04-12 19:12:01 +0100708 * @phys_addr: The physical start address which gets mapped
Marc Zyngier06e8c3b2012-10-28 01:09:14 +0100709 *
710 * The resulting HYP VA is the same as the kernel VA, modulo
711 * HYP_PAGE_OFFSET.
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500712 */
Marc Zyngier6060df82013-04-12 19:12:01 +0100713int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500714{
Marc Zyngier6c41a412016-06-30 18:40:51 +0100715 unsigned long start = kern_hyp_va((unsigned long)from);
716 unsigned long end = kern_hyp_va((unsigned long)to);
Marc Zyngier6060df82013-04-12 19:12:01 +0100717
Marc Zyngier1e947ba2015-01-29 11:59:54 +0000718 if (is_kernel_in_hyp_mode())
719 return 0;
720
Marc Zyngier6060df82013-04-12 19:12:01 +0100721 /* Check for a valid kernel IO mapping */
722 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
723 return -EINVAL;
724
725 return __create_hyp_mappings(hyp_pgd, start, end,
726 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
Christoffer Dall342cd0a2013-01-20 18:28:06 -0500727}
728
Christoffer Dalld5d81842013-01-20 18:28:07 -0500729/**
730 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
731 * @kvm: The KVM struct pointer for the VM.
732 *
Vladimir Murzin9d4dc6882015-11-16 11:28:16 +0000733 * Allocates only the stage-2 HW PGD level table(s) (can support either full
734 * 40-bit input addresses or limited to 32-bit input addresses). Clears the
735 * allocated pages.
Christoffer Dalld5d81842013-01-20 18:28:07 -0500736 *
737 * Note we don't need locking here as this is only called when the VM is
738 * created, which can only be done once.
739 */
740int kvm_alloc_stage2_pgd(struct kvm *kvm)
741{
742 pgd_t *pgd;
743
744 if (kvm->arch.pgd != NULL) {
745 kvm_err("kvm_arch already initialized?\n");
746 return -EINVAL;
747 }
748
Suzuki K Poulose9163ee232016-03-22 17:01:21 +0000749 /* Allocate the HW PGD, making sure that each page gets its own refcount */
750 pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
751 if (!pgd)
Marc Zyngiera9873702015-03-10 19:06:59 +0000752 return -ENOMEM;
753
Christoffer Dalld5d81842013-01-20 18:28:07 -0500754 kvm->arch.pgd = pgd;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500755 return 0;
756}
757
Christoffer Dall957db102014-11-27 10:35:03 +0100758static void stage2_unmap_memslot(struct kvm *kvm,
759 struct kvm_memory_slot *memslot)
760{
761 hva_t hva = memslot->userspace_addr;
762 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
763 phys_addr_t size = PAGE_SIZE * memslot->npages;
764 hva_t reg_end = hva + size;
765
766 /*
767 * A memory region could potentially cover multiple VMAs, and any holes
768 * between them, so iterate over all of them to find out if we should
769 * unmap any of them.
770 *
771 * +--------------------------------------------+
772 * +---------------+----------------+ +----------------+
773 * | : VMA 1 | VMA 2 | | VMA 3 : |
774 * +---------------+----------------+ +----------------+
775 * | memory region |
776 * +--------------------------------------------+
777 */
778 do {
779 struct vm_area_struct *vma = find_vma(current->mm, hva);
780 hva_t vm_start, vm_end;
781
782 if (!vma || vma->vm_start >= reg_end)
783 break;
784
785 /*
786 * Take the intersection of this VMA with the memory region
787 */
788 vm_start = max(hva, vma->vm_start);
789 vm_end = min(reg_end, vma->vm_end);
790
791 if (!(vma->vm_flags & VM_PFNMAP)) {
792 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
793 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
794 }
795 hva = vm_end;
796 } while (hva < reg_end);
797}
798
799/**
800 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
801 * @kvm: The struct kvm pointer
802 *
803 * Go through the memregions and unmap any reguler RAM
804 * backing memory already mapped to the VM.
805 */
806void stage2_unmap_vm(struct kvm *kvm)
807{
808 struct kvm_memslots *slots;
809 struct kvm_memory_slot *memslot;
810 int idx;
811
812 idx = srcu_read_lock(&kvm->srcu);
Marc Zyngier48f28252017-03-16 18:20:49 +0000813 down_read(&current->mm->mmap_sem);
Christoffer Dall957db102014-11-27 10:35:03 +0100814 spin_lock(&kvm->mmu_lock);
815
816 slots = kvm_memslots(kvm);
817 kvm_for_each_memslot(memslot, slots)
818 stage2_unmap_memslot(kvm, memslot);
819
820 spin_unlock(&kvm->mmu_lock);
Marc Zyngier48f28252017-03-16 18:20:49 +0000821 up_read(&current->mm->mmap_sem);
Christoffer Dall957db102014-11-27 10:35:03 +0100822 srcu_read_unlock(&kvm->srcu, idx);
823}
824
Christoffer Dalld5d81842013-01-20 18:28:07 -0500825/**
826 * kvm_free_stage2_pgd - free all stage-2 tables
827 * @kvm: The KVM struct pointer for the VM.
828 *
829 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
830 * underlying level-2 and level-3 tables before freeing the actual level-1 table
831 * and setting the struct pointer to NULL.
Christoffer Dalld5d81842013-01-20 18:28:07 -0500832 */
833void kvm_free_stage2_pgd(struct kvm *kvm)
834{
Suzuki K Poulose3e033632017-05-03 15:17:51 +0100835 void *pgd = NULL;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500836
Suzuki K Pouloseac303c62017-04-03 15:12:43 +0100837 spin_lock(&kvm->mmu_lock);
Suzuki K Poulose3e033632017-05-03 15:17:51 +0100838 if (kvm->arch.pgd) {
839 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
Suzuki K Poulosedd2342a2017-05-16 10:34:54 +0100840 pgd = READ_ONCE(kvm->arch.pgd);
Suzuki K Poulose3e033632017-05-03 15:17:51 +0100841 kvm->arch.pgd = NULL;
842 }
Suzuki K Pouloseac303c62017-04-03 15:12:43 +0100843 spin_unlock(&kvm->mmu_lock);
844
Suzuki K Poulose9163ee232016-03-22 17:01:21 +0000845 /* Free the HW pgd, one page at a time */
Suzuki K Poulose3e033632017-05-03 15:17:51 +0100846 if (pgd)
847 free_pages_exact(pgd, S2_PGD_SIZE);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500848}
849
Christoffer Dall38f791a2014-10-10 12:14:28 +0200850static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
851 phys_addr_t addr)
852{
853 pgd_t *pgd;
854 pud_t *pud;
855
Suzuki K Poulose70fd1902016-03-22 18:33:45 +0000856 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
857 if (WARN_ON(stage2_pgd_none(*pgd))) {
Christoffer Dall38f791a2014-10-10 12:14:28 +0200858 if (!cache)
859 return NULL;
860 pud = mmu_memory_cache_alloc(cache);
Suzuki K Poulose70fd1902016-03-22 18:33:45 +0000861 stage2_pgd_populate(pgd, pud);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200862 get_page(virt_to_page(pgd));
863 }
864
Suzuki K Poulose70fd1902016-03-22 18:33:45 +0000865 return stage2_pud_offset(pgd, addr);
Christoffer Dall38f791a2014-10-10 12:14:28 +0200866}
867
Christoffer Dallad361f02012-11-01 17:14:45 +0100868static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
869 phys_addr_t addr)
Christoffer Dalld5d81842013-01-20 18:28:07 -0500870{
Christoffer Dalld5d81842013-01-20 18:28:07 -0500871 pud_t *pud;
872 pmd_t *pmd;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500873
Christoffer Dall38f791a2014-10-10 12:14:28 +0200874 pud = stage2_get_pud(kvm, cache, addr);
Marc Zyngierf75e09e2017-06-05 19:17:18 +0100875 if (!pud)
876 return NULL;
877
Suzuki K Poulose70fd1902016-03-22 18:33:45 +0000878 if (stage2_pud_none(*pud)) {
Christoffer Dalld5d81842013-01-20 18:28:07 -0500879 if (!cache)
Christoffer Dallad361f02012-11-01 17:14:45 +0100880 return NULL;
Christoffer Dalld5d81842013-01-20 18:28:07 -0500881 pmd = mmu_memory_cache_alloc(cache);
Suzuki K Poulose70fd1902016-03-22 18:33:45 +0000882 stage2_pud_populate(pud, pmd);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500883 get_page(virt_to_page(pud));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100884 }
885
Suzuki K Poulose70fd1902016-03-22 18:33:45 +0000886 return stage2_pmd_offset(pud, addr);
Christoffer Dallad361f02012-11-01 17:14:45 +0100887}
Christoffer Dalld5d81842013-01-20 18:28:07 -0500888
Christoffer Dallad361f02012-11-01 17:14:45 +0100889static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
890 *cache, phys_addr_t addr, const pmd_t *new_pmd)
891{
892 pmd_t *pmd, old_pmd;
893
894 pmd = stage2_get_pmd(kvm, cache, addr);
895 VM_BUG_ON(!pmd);
896
897 /*
898 * Mapping in huge pages should only happen through a fault. If a
899 * page is merged into a transparent huge page, the individual
900 * subpages of that huge page should be unmapped through MMU
901 * notifiers before we get here.
902 *
903 * Merging of CompoundPages is not supported; they should become
904 * splitting first, unmapped, merged, and mapped back in on-demand.
905 */
906 VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
907
908 old_pmd = *pmd;
Marc Zyngierd4b9e072016-04-28 16:16:31 +0100909 if (pmd_present(old_pmd)) {
910 pmd_clear(pmd);
Christoffer Dallad361f02012-11-01 17:14:45 +0100911 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngierd4b9e072016-04-28 16:16:31 +0100912 } else {
Christoffer Dallad361f02012-11-01 17:14:45 +0100913 get_page(virt_to_page(pmd));
Marc Zyngierd4b9e072016-04-28 16:16:31 +0100914 }
915
916 kvm_set_pmd(pmd, *new_pmd);
Christoffer Dallad361f02012-11-01 17:14:45 +0100917 return 0;
918}
919
920static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
Mario Smarduch15a49a42015-01-15 15:58:58 -0800921 phys_addr_t addr, const pte_t *new_pte,
922 unsigned long flags)
Christoffer Dallad361f02012-11-01 17:14:45 +0100923{
924 pmd_t *pmd;
925 pte_t *pte, old_pte;
Mario Smarduch15a49a42015-01-15 15:58:58 -0800926 bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
927 bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
928
929 VM_BUG_ON(logging_active && !cache);
Christoffer Dallad361f02012-11-01 17:14:45 +0100930
Christoffer Dall38f791a2014-10-10 12:14:28 +0200931 /* Create stage-2 page table mapping - Levels 0 and 1 */
Christoffer Dallad361f02012-11-01 17:14:45 +0100932 pmd = stage2_get_pmd(kvm, cache, addr);
933 if (!pmd) {
934 /*
935 * Ignore calls from kvm_set_spte_hva for unallocated
936 * address ranges.
937 */
938 return 0;
939 }
940
Mario Smarduch15a49a42015-01-15 15:58:58 -0800941 /*
942 * While dirty page logging - dissolve huge PMD, then continue on to
943 * allocate page.
944 */
945 if (logging_active)
946 stage2_dissolve_pmd(kvm, addr, pmd);
947
Christoffer Dallad361f02012-11-01 17:14:45 +0100948 /* Create stage-2 page mappings - Level 2 */
Christoffer Dalld5d81842013-01-20 18:28:07 -0500949 if (pmd_none(*pmd)) {
950 if (!cache)
951 return 0; /* ignore calls from kvm_set_spte_hva */
952 pte = mmu_memory_cache_alloc(cache);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500953 pmd_populate_kernel(NULL, pmd, pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500954 get_page(virt_to_page(pmd));
Marc Zyngierc62ee2b2012-10-15 11:27:37 +0100955 }
956
957 pte = pte_offset_kernel(pmd, addr);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500958
959 if (iomap && pte_present(*pte))
960 return -EFAULT;
961
962 /* Create 2nd stage page table mapping - Level 3 */
963 old_pte = *pte;
Marc Zyngierd4b9e072016-04-28 16:16:31 +0100964 if (pte_present(old_pte)) {
965 kvm_set_pte(pte, __pte(0));
Marc Zyngier48762762013-01-28 15:27:00 +0000966 kvm_tlb_flush_vmid_ipa(kvm, addr);
Marc Zyngierd4b9e072016-04-28 16:16:31 +0100967 } else {
Christoffer Dalld5d81842013-01-20 18:28:07 -0500968 get_page(virt_to_page(pte));
Marc Zyngierd4b9e072016-04-28 16:16:31 +0100969 }
Christoffer Dalld5d81842013-01-20 18:28:07 -0500970
Marc Zyngierd4b9e072016-04-28 16:16:31 +0100971 kvm_set_pte(pte, *new_pte);
Christoffer Dalld5d81842013-01-20 18:28:07 -0500972 return 0;
973}
974
Catalin Marinas06485052016-04-13 17:57:37 +0100975#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
976static int stage2_ptep_test_and_clear_young(pte_t *pte)
977{
978 if (pte_young(*pte)) {
979 *pte = pte_mkold(*pte);
980 return 1;
981 }
982 return 0;
983}
984#else
985static int stage2_ptep_test_and_clear_young(pte_t *pte)
986{
987 return __ptep_test_and_clear_young(pte);
988}
989#endif
990
991static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
992{
993 return stage2_ptep_test_and_clear_young((pte_t *)pmd);
994}
995
Christoffer Dalld5d81842013-01-20 18:28:07 -0500996/**
997 * kvm_phys_addr_ioremap - map a device range to guest IPA
998 *
999 * @kvm: The KVM pointer
1000 * @guest_ipa: The IPA at which to insert the mapping
1001 * @pa: The physical address of the device
1002 * @size: The size of the mapping
1003 */
1004int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -07001005 phys_addr_t pa, unsigned long size, bool writable)
Christoffer Dalld5d81842013-01-20 18:28:07 -05001006{
1007 phys_addr_t addr, end;
1008 int ret = 0;
1009 unsigned long pfn;
1010 struct kvm_mmu_memory_cache cache = { 0, };
1011
1012 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
1013 pfn = __phys_to_pfn(pa);
1014
1015 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
Marc Zyngierc62ee2b2012-10-15 11:27:37 +01001016 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001017
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -07001018 if (writable)
Catalin Marinas06485052016-04-13 17:57:37 +01001019 pte = kvm_s2pte_mkwrite(pte);
Ard Biesheuvelc40f2f82014-09-17 14:56:18 -07001020
Christoffer Dall38f791a2014-10-10 12:14:28 +02001021 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
1022 KVM_NR_MEM_OBJS);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001023 if (ret)
1024 goto out;
1025 spin_lock(&kvm->mmu_lock);
Mario Smarduch15a49a42015-01-15 15:58:58 -08001026 ret = stage2_set_pte(kvm, &cache, addr, &pte,
1027 KVM_S2PTE_FLAG_IS_IOMAP);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001028 spin_unlock(&kvm->mmu_lock);
1029 if (ret)
1030 goto out;
1031
1032 pfn++;
1033 }
1034
1035out:
1036 mmu_free_memory_cache(&cache);
1037 return ret;
1038}
1039
Dan Williamsba049e92016-01-15 16:56:11 -08001040static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001041{
Dan Williamsba049e92016-01-15 16:56:11 -08001042 kvm_pfn_t pfn = *pfnp;
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001043 gfn_t gfn = *ipap >> PAGE_SHIFT;
1044
Andrea Arcangeli127393f2016-05-05 16:22:20 -07001045 if (PageTransCompoundMap(pfn_to_page(pfn))) {
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001046 unsigned long mask;
1047 /*
1048 * The address we faulted on is backed by a transparent huge
1049 * page. However, because we map the compound huge page and
1050 * not the individual tail page, we need to transfer the
1051 * refcount to the head page. We have to be careful that the
1052 * THP doesn't start to split while we are adjusting the
1053 * refcounts.
1054 *
1055 * We are sure this doesn't happen, because mmu_notifier_retry
1056 * was successful and we are holding the mmu_lock, so if this
1057 * THP is trying to split, it will be blocked in the mmu
1058 * notifier before touching any of the pages, specifically
1059 * before being able to call __split_huge_page_refcount().
1060 *
1061 * We can therefore safely transfer the refcount from PG_tail
1062 * to PG_head and switch the pfn from a tail page to the head
1063 * page accordingly.
1064 */
1065 mask = PTRS_PER_PMD - 1;
1066 VM_BUG_ON((gfn & mask) != (pfn & mask));
1067 if (pfn & mask) {
1068 *ipap &= PMD_MASK;
1069 kvm_release_pfn_clean(pfn);
1070 pfn &= ~mask;
1071 kvm_get_pfn(pfn);
1072 *pfnp = pfn;
1073 }
1074
1075 return true;
1076 }
1077
1078 return false;
1079}
1080
Ard Biesheuvela7d079c2014-09-09 11:27:09 +01001081static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
1082{
1083 if (kvm_vcpu_trap_is_iabt(vcpu))
1084 return false;
1085
1086 return kvm_vcpu_dabt_iswrite(vcpu);
1087}
1088
Mario Smarduchc6473552015-01-15 15:58:56 -08001089/**
1090 * stage2_wp_ptes - write protect PMD range
1091 * @pmd: pointer to pmd entry
1092 * @addr: range start address
1093 * @end: range end address
1094 */
1095static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1096{
1097 pte_t *pte;
1098
1099 pte = pte_offset_kernel(pmd, addr);
1100 do {
1101 if (!pte_none(*pte)) {
1102 if (!kvm_s2pte_readonly(pte))
1103 kvm_set_s2pte_readonly(pte);
1104 }
1105 } while (pte++, addr += PAGE_SIZE, addr != end);
1106}
1107
1108/**
1109 * stage2_wp_pmds - write protect PUD range
1110 * @pud: pointer to pud entry
1111 * @addr: range start address
1112 * @end: range end address
1113 */
1114static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
1115{
1116 pmd_t *pmd;
1117 phys_addr_t next;
1118
Suzuki K Poulose70fd1902016-03-22 18:33:45 +00001119 pmd = stage2_pmd_offset(pud, addr);
Mario Smarduchc6473552015-01-15 15:58:56 -08001120
1121 do {
Suzuki K Poulose70fd1902016-03-22 18:33:45 +00001122 next = stage2_pmd_addr_end(addr, end);
Mario Smarduchc6473552015-01-15 15:58:56 -08001123 if (!pmd_none(*pmd)) {
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +00001124 if (pmd_thp_or_huge(*pmd)) {
Mario Smarduchc6473552015-01-15 15:58:56 -08001125 if (!kvm_s2pmd_readonly(pmd))
1126 kvm_set_s2pmd_readonly(pmd);
1127 } else {
1128 stage2_wp_ptes(pmd, addr, next);
1129 }
1130 }
1131 } while (pmd++, addr = next, addr != end);
1132}
1133
1134/**
1135 * stage2_wp_puds - write protect PGD range
1136 * @pgd: pointer to pgd entry
1137 * @addr: range start address
1138 * @end: range end address
1139 *
1140 * Process PUD entries, for a huge PUD we cause a panic.
1141 */
1142static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
1143{
1144 pud_t *pud;
1145 phys_addr_t next;
1146
Suzuki K Poulose70fd1902016-03-22 18:33:45 +00001147 pud = stage2_pud_offset(pgd, addr);
Mario Smarduchc6473552015-01-15 15:58:56 -08001148 do {
Suzuki K Poulose70fd1902016-03-22 18:33:45 +00001149 next = stage2_pud_addr_end(addr, end);
1150 if (!stage2_pud_none(*pud)) {
Mario Smarduchc6473552015-01-15 15:58:56 -08001151 /* TODO:PUD not supported, revisit later if supported */
Suzuki K Poulose70fd1902016-03-22 18:33:45 +00001152 BUG_ON(stage2_pud_huge(*pud));
Mario Smarduchc6473552015-01-15 15:58:56 -08001153 stage2_wp_pmds(pud, addr, next);
1154 }
1155 } while (pud++, addr = next, addr != end);
1156}
1157
1158/**
1159 * stage2_wp_range() - write protect stage2 memory region range
1160 * @kvm: The KVM pointer
1161 * @addr: Start address of range
1162 * @end: End address of range
1163 */
1164static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1165{
1166 pgd_t *pgd;
1167 phys_addr_t next;
1168
Suzuki K Poulose70fd1902016-03-22 18:33:45 +00001169 pgd = kvm->arch.pgd + stage2_pgd_index(addr);
Mario Smarduchc6473552015-01-15 15:58:56 -08001170 do {
1171 /*
1172 * Release kvm_mmu_lock periodically if the memory region is
1173 * large. Otherwise, we may see kernel panics with
Christoffer Dall227ea812015-01-23 10:49:31 +01001174 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1175 * CONFIG_LOCKDEP. Additionally, holding the lock too long
Mario Smarduchc6473552015-01-15 15:58:56 -08001176 * will also starve other vCPUs.
1177 */
1178 if (need_resched() || spin_needbreak(&kvm->mmu_lock))
1179 cond_resched_lock(&kvm->mmu_lock);
1180
Suzuki K Poulose70fd1902016-03-22 18:33:45 +00001181 next = stage2_pgd_addr_end(addr, end);
1182 if (stage2_pgd_present(*pgd))
Mario Smarduchc6473552015-01-15 15:58:56 -08001183 stage2_wp_puds(pgd, addr, next);
1184 } while (pgd++, addr = next, addr != end);
1185}
1186
1187/**
1188 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1189 * @kvm: The KVM pointer
1190 * @slot: The memory slot to write protect
1191 *
1192 * Called to start logging dirty pages after memory region
1193 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1194 * all present PMD and PTEs are write protected in the memory region.
1195 * Afterwards read of dirty page log can be called.
1196 *
1197 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1198 * serializing operations for VM memory regions.
1199 */
1200void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1201{
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001202 struct kvm_memslots *slots = kvm_memslots(kvm);
1203 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
Mario Smarduchc6473552015-01-15 15:58:56 -08001204 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
1205 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1206
1207 spin_lock(&kvm->mmu_lock);
1208 stage2_wp_range(kvm, start, end);
1209 spin_unlock(&kvm->mmu_lock);
1210 kvm_flush_remote_tlbs(kvm);
1211}
Mario Smarduch53c810c2015-01-15 15:58:57 -08001212
1213/**
Kai Huang3b0f1d02015-01-28 10:54:23 +08001214 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
Mario Smarduch53c810c2015-01-15 15:58:57 -08001215 * @kvm: The KVM pointer
1216 * @slot: The memory slot associated with mask
1217 * @gfn_offset: The gfn offset in memory slot
1218 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1219 * slot to be write protected
1220 *
1221 * Walks bits set in mask write protects the associated pte's. Caller must
1222 * acquire kvm_mmu_lock.
1223 */
Kai Huang3b0f1d02015-01-28 10:54:23 +08001224static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
Mario Smarduch53c810c2015-01-15 15:58:57 -08001225 struct kvm_memory_slot *slot,
1226 gfn_t gfn_offset, unsigned long mask)
1227{
1228 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1229 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1230 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1231
1232 stage2_wp_range(kvm, start, end);
1233}
Mario Smarduchc6473552015-01-15 15:58:56 -08001234
Kai Huang3b0f1d02015-01-28 10:54:23 +08001235/*
1236 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1237 * dirty pages.
1238 *
1239 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1240 * enable dirty logging for them.
1241 */
1242void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1243 struct kvm_memory_slot *slot,
1244 gfn_t gfn_offset, unsigned long mask)
1245{
1246 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1247}
1248
Dan Williamsba049e92016-01-15 16:56:11 -08001249static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
Marc Zyngier0d3e4d42015-01-05 21:13:24 +00001250 unsigned long size, bool uncached)
1251{
1252 __coherent_cache_guest_page(vcpu, pfn, size, uncached);
1253}
1254
Christoffer Dall94f8e642013-01-20 18:28:12 -05001255static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
Christoffer Dall98047882014-08-19 12:18:04 +02001256 struct kvm_memory_slot *memslot, unsigned long hva,
Christoffer Dall94f8e642013-01-20 18:28:12 -05001257 unsigned long fault_status)
1258{
Christoffer Dall94f8e642013-01-20 18:28:12 -05001259 int ret;
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001260 bool write_fault, writable, hugetlb = false, force_pte = false;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001261 unsigned long mmu_seq;
Christoffer Dallad361f02012-11-01 17:14:45 +01001262 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
Christoffer Dallad361f02012-11-01 17:14:45 +01001263 struct kvm *kvm = vcpu->kvm;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001264 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
Christoffer Dallad361f02012-11-01 17:14:45 +01001265 struct vm_area_struct *vma;
Dan Williamsba049e92016-01-15 16:56:11 -08001266 kvm_pfn_t pfn;
Kim Phillipsb8865762014-06-26 01:45:51 +01001267 pgprot_t mem_type = PAGE_S2;
Laszlo Ersek840f4bf2014-11-17 14:58:52 +00001268 bool fault_ipa_uncached;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001269 bool logging_active = memslot_is_logging(memslot);
1270 unsigned long flags = 0;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001271
Ard Biesheuvela7d079c2014-09-09 11:27:09 +01001272 write_fault = kvm_is_write_fault(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001273 if (fault_status == FSC_PERM && !write_fault) {
1274 kvm_err("Unexpected L2 read permission error\n");
1275 return -EFAULT;
1276 }
1277
Christoffer Dallad361f02012-11-01 17:14:45 +01001278 /* Let's check if we will get back a huge page backed by hugetlbfs */
1279 down_read(&current->mm->mmap_sem);
1280 vma = find_vma_intersection(current->mm, hva, hva + 1);
Ard Biesheuvel37b54402014-09-17 14:56:17 -07001281 if (unlikely(!vma)) {
1282 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1283 up_read(&current->mm->mmap_sem);
1284 return -EFAULT;
1285 }
1286
Punit Agrawal45ee9d52018-01-04 18:24:33 +00001287 if (vma_kernel_pagesize(vma) && !logging_active) {
Christoffer Dallad361f02012-11-01 17:14:45 +01001288 hugetlb = true;
1289 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001290 } else {
1291 /*
Marc Zyngier136d7372013-12-13 16:56:06 +00001292 * Pages belonging to memslots that don't have the same
1293 * alignment for userspace and IPA cannot be mapped using
1294 * block descriptors even if the pages belong to a THP for
1295 * the process, because the stage-2 block descriptor will
1296 * cover more than a single THP and we loose atomicity for
1297 * unmapping, updates, and splits of the THP or other pages
1298 * in the stage-2 block range.
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001299 */
Marc Zyngier136d7372013-12-13 16:56:06 +00001300 if ((memslot->userspace_addr & ~PMD_MASK) !=
1301 ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001302 force_pte = true;
Christoffer Dallad361f02012-11-01 17:14:45 +01001303 }
1304 up_read(&current->mm->mmap_sem);
1305
Christoffer Dall94f8e642013-01-20 18:28:12 -05001306 /* We need minimum second+third level pages */
Christoffer Dall38f791a2014-10-10 12:14:28 +02001307 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
1308 KVM_NR_MEM_OBJS);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001309 if (ret)
1310 return ret;
1311
1312 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1313 /*
1314 * Ensure the read of mmu_notifier_seq happens before we call
1315 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1316 * the page we just got a reference to gets unmapped before we have a
1317 * chance to grab the mmu_lock, which ensure that if the page gets
1318 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1319 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1320 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1321 */
1322 smp_rmb();
1323
Christoffer Dallad361f02012-11-01 17:14:45 +01001324 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
Christoffer Dall9ac71592016-08-17 10:46:10 +02001325 if (is_error_noslot_pfn(pfn))
Christoffer Dall94f8e642013-01-20 18:28:12 -05001326 return -EFAULT;
1327
Mario Smarduch15a49a42015-01-15 15:58:58 -08001328 if (kvm_is_device_pfn(pfn)) {
Kim Phillipsb8865762014-06-26 01:45:51 +01001329 mem_type = PAGE_S2_DEVICE;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001330 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1331 } else if (logging_active) {
1332 /*
1333 * Faults on pages in a memslot with logging enabled
1334 * should not be mapped with huge pages (it introduces churn
1335 * and performance degradation), so force a pte mapping.
1336 */
1337 force_pte = true;
1338 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1339
1340 /*
1341 * Only actually map the page as writable if this was a write
1342 * fault.
1343 */
1344 if (!write_fault)
1345 writable = false;
1346 }
Kim Phillipsb8865762014-06-26 01:45:51 +01001347
Christoffer Dallad361f02012-11-01 17:14:45 +01001348 spin_lock(&kvm->mmu_lock);
1349 if (mmu_notifier_retry(kvm, mmu_seq))
Christoffer Dall94f8e642013-01-20 18:28:12 -05001350 goto out_unlock;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001351
Christoffer Dall9b5fdb92013-10-02 15:32:01 -07001352 if (!hugetlb && !force_pte)
1353 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
Christoffer Dallad361f02012-11-01 17:14:45 +01001354
Ard Biesheuvel849260c2014-11-17 14:58:53 +00001355 fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
Laszlo Ersek840f4bf2014-11-17 14:58:52 +00001356
Christoffer Dallad361f02012-11-01 17:14:45 +01001357 if (hugetlb) {
Kim Phillipsb8865762014-06-26 01:45:51 +01001358 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
Christoffer Dallad361f02012-11-01 17:14:45 +01001359 new_pmd = pmd_mkhuge(new_pmd);
1360 if (writable) {
Catalin Marinas06485052016-04-13 17:57:37 +01001361 new_pmd = kvm_s2pmd_mkwrite(new_pmd);
Christoffer Dallad361f02012-11-01 17:14:45 +01001362 kvm_set_pfn_dirty(pfn);
1363 }
Marc Zyngier0d3e4d42015-01-05 21:13:24 +00001364 coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
Christoffer Dallad361f02012-11-01 17:14:45 +01001365 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1366 } else {
Kim Phillipsb8865762014-06-26 01:45:51 +01001367 pte_t new_pte = pfn_pte(pfn, mem_type);
Mario Smarduch15a49a42015-01-15 15:58:58 -08001368
Christoffer Dallad361f02012-11-01 17:14:45 +01001369 if (writable) {
Catalin Marinas06485052016-04-13 17:57:37 +01001370 new_pte = kvm_s2pte_mkwrite(new_pte);
Christoffer Dallad361f02012-11-01 17:14:45 +01001371 kvm_set_pfn_dirty(pfn);
Mario Smarduch15a49a42015-01-15 15:58:58 -08001372 mark_page_dirty(kvm, gfn);
Christoffer Dallad361f02012-11-01 17:14:45 +01001373 }
Marc Zyngier0d3e4d42015-01-05 21:13:24 +00001374 coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
Mario Smarduch15a49a42015-01-15 15:58:58 -08001375 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001376 }
Christoffer Dallad361f02012-11-01 17:14:45 +01001377
Christoffer Dall94f8e642013-01-20 18:28:12 -05001378out_unlock:
Christoffer Dallad361f02012-11-01 17:14:45 +01001379 spin_unlock(&kvm->mmu_lock);
Marc Zyngier35307b92015-03-12 18:16:51 +00001380 kvm_set_pfn_accessed(pfn);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001381 kvm_release_pfn_clean(pfn);
Christoffer Dallad361f02012-11-01 17:14:45 +01001382 return ret;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001383}
1384
Marc Zyngieraeda9132015-03-12 18:16:52 +00001385/*
1386 * Resolve the access fault by making the page young again.
1387 * Note that because the faulting entry is guaranteed not to be
1388 * cached in the TLB, we don't need to invalidate anything.
Catalin Marinas06485052016-04-13 17:57:37 +01001389 * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
1390 * so there is no need for atomic (pte|pmd)_mkyoung operations.
Marc Zyngieraeda9132015-03-12 18:16:52 +00001391 */
1392static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1393{
1394 pmd_t *pmd;
1395 pte_t *pte;
Dan Williamsba049e92016-01-15 16:56:11 -08001396 kvm_pfn_t pfn;
Marc Zyngieraeda9132015-03-12 18:16:52 +00001397 bool pfn_valid = false;
1398
1399 trace_kvm_access_fault(fault_ipa);
1400
1401 spin_lock(&vcpu->kvm->mmu_lock);
1402
1403 pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
1404 if (!pmd || pmd_none(*pmd)) /* Nothing there */
1405 goto out;
1406
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +00001407 if (pmd_thp_or_huge(*pmd)) { /* THP, HugeTLB */
Marc Zyngieraeda9132015-03-12 18:16:52 +00001408 *pmd = pmd_mkyoung(*pmd);
1409 pfn = pmd_pfn(*pmd);
1410 pfn_valid = true;
1411 goto out;
1412 }
1413
1414 pte = pte_offset_kernel(pmd, fault_ipa);
1415 if (pte_none(*pte)) /* Nothing there either */
1416 goto out;
1417
1418 *pte = pte_mkyoung(*pte); /* Just a page... */
1419 pfn = pte_pfn(*pte);
1420 pfn_valid = true;
1421out:
1422 spin_unlock(&vcpu->kvm->mmu_lock);
1423 if (pfn_valid)
1424 kvm_set_pfn_accessed(pfn);
1425}
1426
Christoffer Dall94f8e642013-01-20 18:28:12 -05001427/**
1428 * kvm_handle_guest_abort - handles all 2nd stage aborts
1429 * @vcpu: the VCPU pointer
1430 * @run: the kvm_run structure
1431 *
1432 * Any abort that gets to the host is almost guaranteed to be caused by a
1433 * missing second stage translation table entry, which can mean that either the
1434 * guest simply needs more memory and we must allocate an appropriate page or it
1435 * can mean that the guest tried to access I/O memory, which is emulated by user
1436 * space. The distinction is based on the IPA causing the fault and whether this
1437 * memory region has been registered as standard RAM by user space.
1438 */
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001439int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1440{
Christoffer Dall94f8e642013-01-20 18:28:12 -05001441 unsigned long fault_status;
1442 phys_addr_t fault_ipa;
1443 struct kvm_memory_slot *memslot;
Christoffer Dall98047882014-08-19 12:18:04 +02001444 unsigned long hva;
1445 bool is_iabt, write_fault, writable;
Christoffer Dall94f8e642013-01-20 18:28:12 -05001446 gfn_t gfn;
1447 int ret, idx;
1448
Marc Zyngier52d1dba2012-10-15 10:33:38 +01001449 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
Marc Zyngier40557102016-09-06 14:02:15 +01001450 if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) {
1451 kvm_inject_vabt(vcpu);
1452 return 1;
1453 }
1454
Marc Zyngier7393b592012-09-17 19:27:09 +01001455 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001456
Marc Zyngier7393b592012-09-17 19:27:09 +01001457 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1458 kvm_vcpu_get_hfar(vcpu), fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001459
1460 /* Check the stage-2 fault is trans. fault or write fault */
Christoffer Dall0496daa52014-09-26 12:29:34 +02001461 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
Marc Zyngier35307b92015-03-12 18:16:51 +00001462 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1463 fault_status != FSC_ACCESS) {
Christoffer Dall0496daa52014-09-26 12:29:34 +02001464 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1465 kvm_vcpu_trap_get_class(vcpu),
1466 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1467 (unsigned long)kvm_vcpu_get_hsr(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -05001468 return -EFAULT;
1469 }
1470
1471 idx = srcu_read_lock(&vcpu->kvm->srcu);
1472
1473 gfn = fault_ipa >> PAGE_SHIFT;
Christoffer Dall98047882014-08-19 12:18:04 +02001474 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1475 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
Ard Biesheuvela7d079c2014-09-09 11:27:09 +01001476 write_fault = kvm_is_write_fault(vcpu);
Christoffer Dall98047882014-08-19 12:18:04 +02001477 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
Christoffer Dall94f8e642013-01-20 18:28:12 -05001478 if (is_iabt) {
1479 /* Prefetch Abort on I/O address */
Marc Zyngier7393b592012-09-17 19:27:09 +01001480 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall94f8e642013-01-20 18:28:12 -05001481 ret = 1;
1482 goto out_unlock;
1483 }
1484
Marc Zyngiercfe39502012-12-12 14:42:09 +00001485 /*
Marc Zyngier57c841f2016-01-29 15:01:28 +00001486 * Check for a cache maintenance operation. Since we
1487 * ended-up here, we know it is outside of any memory
1488 * slot. But we can't find out if that is for a device,
1489 * or if the guest is just being stupid. The only thing
1490 * we know for sure is that this range cannot be cached.
1491 *
1492 * So let's assume that the guest is just being
1493 * cautious, and skip the instruction.
1494 */
1495 if (kvm_vcpu_dabt_is_cm(vcpu)) {
1496 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1497 ret = 1;
1498 goto out_unlock;
1499 }
1500
1501 /*
Marc Zyngiercfe39502012-12-12 14:42:09 +00001502 * The IPA is reported as [MAX:12], so we need to
1503 * complement it with the bottom 12 bits from the
1504 * faulting VA. This is always 12 bits, irrespective
1505 * of the page size.
1506 */
1507 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
Christoffer Dall45e96ea2013-01-20 18:43:58 -05001508 ret = io_mem_abort(vcpu, run, fault_ipa);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001509 goto out_unlock;
1510 }
1511
Christoffer Dallc3058d52014-10-10 12:14:29 +02001512 /* Userspace should not be able to register out-of-bounds IPAs */
1513 VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
1514
Marc Zyngieraeda9132015-03-12 18:16:52 +00001515 if (fault_status == FSC_ACCESS) {
1516 handle_access_fault(vcpu, fault_ipa);
1517 ret = 1;
1518 goto out_unlock;
1519 }
1520
Christoffer Dall98047882014-08-19 12:18:04 +02001521 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
Christoffer Dall94f8e642013-01-20 18:28:12 -05001522 if (ret == 0)
1523 ret = 1;
1524out_unlock:
1525 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1526 return ret;
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001527}
1528
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00001529static int handle_hva_to_gpa(struct kvm *kvm,
1530 unsigned long start,
1531 unsigned long end,
1532 int (*handler)(struct kvm *kvm,
1533 gpa_t gpa, void *data),
1534 void *data)
Christoffer Dalld5d81842013-01-20 18:28:07 -05001535{
1536 struct kvm_memslots *slots;
1537 struct kvm_memory_slot *memslot;
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00001538 int ret = 0;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001539
1540 slots = kvm_memslots(kvm);
1541
1542 /* we only care about the pages that the guest sees */
1543 kvm_for_each_memslot(memslot, slots) {
1544 unsigned long hva_start, hva_end;
1545 gfn_t gfn, gfn_end;
1546
1547 hva_start = max(start, memslot->userspace_addr);
1548 hva_end = min(end, memslot->userspace_addr +
1549 (memslot->npages << PAGE_SHIFT));
1550 if (hva_start >= hva_end)
1551 continue;
1552
1553 /*
1554 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1555 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1556 */
1557 gfn = hva_to_gfn_memslot(hva_start, memslot);
1558 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1559
1560 for (; gfn < gfn_end; ++gfn) {
1561 gpa_t gpa = gfn << PAGE_SHIFT;
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00001562 ret |= handler(kvm, gpa, data);
Christoffer Dalld5d81842013-01-20 18:28:07 -05001563 }
1564 }
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00001565
1566 return ret;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001567}
1568
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00001569static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
Christoffer Dalld5d81842013-01-20 18:28:07 -05001570{
1571 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00001572 return 0;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001573}
1574
1575int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1576{
1577 unsigned long end = hva + PAGE_SIZE;
1578
1579 if (!kvm->arch.pgd)
1580 return 0;
1581
1582 trace_kvm_unmap_hva(hva);
1583 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
1584 return 0;
1585}
1586
1587int kvm_unmap_hva_range(struct kvm *kvm,
1588 unsigned long start, unsigned long end)
1589{
1590 if (!kvm->arch.pgd)
1591 return 0;
1592
1593 trace_kvm_unmap_hva_range(start, end);
1594 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
1595 return 0;
1596}
1597
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00001598static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
Christoffer Dalld5d81842013-01-20 18:28:07 -05001599{
1600 pte_t *pte = (pte_t *)data;
1601
Mario Smarduch15a49a42015-01-15 15:58:58 -08001602 /*
1603 * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
1604 * flag clear because MMU notifiers will have unmapped a huge PMD before
1605 * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
1606 * therefore stage2_set_pte() never needs to clear out a huge PMD
1607 * through this calling path.
1608 */
1609 stage2_set_pte(kvm, NULL, gpa, pte, 0);
Marc Zyngier1d2ebac2015-03-12 18:16:50 +00001610 return 0;
Christoffer Dalld5d81842013-01-20 18:28:07 -05001611}
1612
1613
1614void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1615{
1616 unsigned long end = hva + PAGE_SIZE;
1617 pte_t stage2_pte;
1618
1619 if (!kvm->arch.pgd)
1620 return;
1621
1622 trace_kvm_set_spte_hva(hva);
1623 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
1624 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1625}
1626
Marc Zyngier35307b92015-03-12 18:16:51 +00001627static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1628{
1629 pmd_t *pmd;
1630 pte_t *pte;
1631
1632 pmd = stage2_get_pmd(kvm, NULL, gpa);
1633 if (!pmd || pmd_none(*pmd)) /* Nothing there */
1634 return 0;
1635
Catalin Marinas06485052016-04-13 17:57:37 +01001636 if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */
1637 return stage2_pmdp_test_and_clear_young(pmd);
Marc Zyngier35307b92015-03-12 18:16:51 +00001638
1639 pte = pte_offset_kernel(pmd, gpa);
1640 if (pte_none(*pte))
1641 return 0;
1642
Catalin Marinas06485052016-04-13 17:57:37 +01001643 return stage2_ptep_test_and_clear_young(pte);
Marc Zyngier35307b92015-03-12 18:16:51 +00001644}
1645
1646static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1647{
1648 pmd_t *pmd;
1649 pte_t *pte;
1650
1651 pmd = stage2_get_pmd(kvm, NULL, gpa);
1652 if (!pmd || pmd_none(*pmd)) /* Nothing there */
1653 return 0;
1654
Suzuki K Poulosebbb3b6b2016-03-01 12:00:39 +00001655 if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */
Marc Zyngier35307b92015-03-12 18:16:51 +00001656 return pmd_young(*pmd);
1657
1658 pte = pte_offset_kernel(pmd, gpa);
1659 if (!pte_none(*pte)) /* Just a page... */
1660 return pte_young(*pte);
1661
1662 return 0;
1663}
1664
1665int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1666{
Suzuki K Poulose3f0075c2017-07-05 09:57:00 +01001667 if (!kvm->arch.pgd)
1668 return 0;
Marc Zyngier35307b92015-03-12 18:16:51 +00001669 trace_kvm_age_hva(start, end);
1670 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
1671}
1672
1673int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1674{
Suzuki K Poulose3f0075c2017-07-05 09:57:00 +01001675 if (!kvm->arch.pgd)
1676 return 0;
Marc Zyngier35307b92015-03-12 18:16:51 +00001677 trace_kvm_test_age_hva(hva);
1678 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
1679}
1680
Christoffer Dalld5d81842013-01-20 18:28:07 -05001681void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1682{
1683 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1684}
1685
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001686phys_addr_t kvm_mmu_get_httbr(void)
1687{
Ard Biesheuvele4c5a682015-03-19 16:42:28 +00001688 if (__kvm_cpu_uses_extended_idmap())
1689 return virt_to_phys(merged_hyp_pgd);
1690 else
1691 return virt_to_phys(hyp_pgd);
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001692}
1693
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001694phys_addr_t kvm_get_idmap_vector(void)
1695{
1696 return hyp_idmap_vector;
1697}
1698
AKASHI Takahiro67f69192016-04-27 17:47:05 +01001699phys_addr_t kvm_get_idmap_start(void)
1700{
1701 return hyp_idmap_start;
1702}
1703
Marc Zyngier0535a3e2016-06-30 18:40:43 +01001704static int kvm_map_idmap_text(pgd_t *pgd)
1705{
1706 int err;
1707
1708 /* Create the idmap in the boot page tables */
1709 err = __create_hyp_mappings(pgd,
1710 hyp_idmap_start, hyp_idmap_end,
1711 __phys_to_pfn(hyp_idmap_start),
1712 PAGE_HYP_EXEC);
1713 if (err)
1714 kvm_err("Failed to idmap %lx-%lx\n",
1715 hyp_idmap_start, hyp_idmap_end);
1716
1717 return err;
1718}
1719
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001720int kvm_mmu_init(void)
1721{
Marc Zyngier2fb41052013-04-12 19:12:03 +01001722 int err;
1723
Santosh Shilimkar4fda3422013-11-19 14:59:12 -05001724 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
1725 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1726 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001727
Ard Biesheuvel06f75a12015-03-19 16:42:26 +00001728 /*
1729 * We rely on the linker script to ensure at build time that the HYP
1730 * init code does not cross a page boundary.
1731 */
1732 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001733
Marc Zyngiereac378a2016-06-30 18:40:50 +01001734 kvm_info("IDMAP page: %lx\n", hyp_idmap_start);
1735 kvm_info("HYP VA range: %lx:%lx\n",
Marc Zyngier6c41a412016-06-30 18:40:51 +01001736 kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
Marc Zyngiereac378a2016-06-30 18:40:50 +01001737
Marc Zyngier6c41a412016-06-30 18:40:51 +01001738 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
Marc Zyngierd2896d42016-08-22 09:01:17 +01001739 hyp_idmap_start < kern_hyp_va(~0UL) &&
1740 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
Marc Zyngiereac378a2016-06-30 18:40:50 +01001741 /*
1742 * The idmap page is intersecting with the VA space,
1743 * it is not safe to continue further.
1744 */
1745 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
1746 err = -EINVAL;
1747 goto out;
1748 }
1749
Christoffer Dall38f791a2014-10-10 12:14:28 +02001750 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
Marc Zyngier0535a3e2016-06-30 18:40:43 +01001751 if (!hyp_pgd) {
Christoffer Dalld5d81842013-01-20 18:28:07 -05001752 kvm_err("Hyp mode PGD not allocated\n");
Marc Zyngier2fb41052013-04-12 19:12:03 +01001753 err = -ENOMEM;
1754 goto out;
1755 }
1756
Ard Biesheuvele4c5a682015-03-19 16:42:28 +00001757 if (__kvm_cpu_uses_extended_idmap()) {
Marc Zyngier0535a3e2016-06-30 18:40:43 +01001758 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1759 hyp_pgd_order);
1760 if (!boot_hyp_pgd) {
1761 kvm_err("Hyp boot PGD not allocated\n");
1762 err = -ENOMEM;
1763 goto out;
1764 }
1765
1766 err = kvm_map_idmap_text(boot_hyp_pgd);
1767 if (err)
1768 goto out;
1769
Ard Biesheuvele4c5a682015-03-19 16:42:28 +00001770 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1771 if (!merged_hyp_pgd) {
1772 kvm_err("Failed to allocate extra HYP pgd\n");
1773 goto out;
1774 }
1775 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
1776 hyp_idmap_start);
Marc Zyngier0535a3e2016-06-30 18:40:43 +01001777 } else {
1778 err = kvm_map_idmap_text(hyp_pgd);
1779 if (err)
1780 goto out;
Marc Zyngier5a677ce2013-04-12 19:12:06 +01001781 }
1782
Christoffer Dalld5d81842013-01-20 18:28:07 -05001783 return 0;
Marc Zyngier2fb41052013-04-12 19:12:03 +01001784out:
Marc Zyngier4f728272013-04-12 19:12:05 +01001785 free_hyp_pgds();
Marc Zyngier2fb41052013-04-12 19:12:03 +01001786 return err;
Christoffer Dall342cd0a2013-01-20 18:28:06 -05001787}
Eric Augerdf6ce242014-06-06 11:10:23 +02001788
1789void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001790 const struct kvm_userspace_memory_region *mem,
Eric Augerdf6ce242014-06-06 11:10:23 +02001791 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02001792 const struct kvm_memory_slot *new,
Eric Augerdf6ce242014-06-06 11:10:23 +02001793 enum kvm_mr_change change)
1794{
Mario Smarduchc6473552015-01-15 15:58:56 -08001795 /*
1796 * At this point memslot has been committed and there is an
1797 * allocated dirty_bitmap[], dirty pages will be be tracked while the
1798 * memory slot is write protected.
1799 */
1800 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
1801 kvm_mmu_wp_memory_region(kvm, mem->slot);
Eric Augerdf6ce242014-06-06 11:10:23 +02001802}
1803
1804int kvm_arch_prepare_memory_region(struct kvm *kvm,
1805 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001806 const struct kvm_userspace_memory_region *mem,
Eric Augerdf6ce242014-06-06 11:10:23 +02001807 enum kvm_mr_change change)
1808{
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001809 hva_t hva = mem->userspace_addr;
1810 hva_t reg_end = hva + mem->memory_size;
1811 bool writable = !(mem->flags & KVM_MEM_READONLY);
1812 int ret = 0;
1813
Mario Smarduch15a49a42015-01-15 15:58:58 -08001814 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
1815 change != KVM_MR_FLAGS_ONLY)
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001816 return 0;
1817
1818 /*
Christoffer Dallc3058d52014-10-10 12:14:29 +02001819 * Prevent userspace from creating a memory region outside of the IPA
1820 * space addressable by the KVM guest IPA space.
1821 */
1822 if (memslot->base_gfn + memslot->npages >=
1823 (KVM_PHYS_SIZE >> PAGE_SHIFT))
1824 return -EFAULT;
1825
Marc Zyngiera1ea3182017-03-16 18:20:50 +00001826 down_read(&current->mm->mmap_sem);
Christoffer Dallc3058d52014-10-10 12:14:29 +02001827 /*
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001828 * A memory region could potentially cover multiple VMAs, and any holes
1829 * between them, so iterate over all of them to find out if we can map
1830 * any of them right now.
1831 *
1832 * +--------------------------------------------+
1833 * +---------------+----------------+ +----------------+
1834 * | : VMA 1 | VMA 2 | | VMA 3 : |
1835 * +---------------+----------------+ +----------------+
1836 * | memory region |
1837 * +--------------------------------------------+
1838 */
1839 do {
1840 struct vm_area_struct *vma = find_vma(current->mm, hva);
1841 hva_t vm_start, vm_end;
1842
1843 if (!vma || vma->vm_start >= reg_end)
1844 break;
1845
1846 /*
1847 * Mapping a read-only VMA is only allowed if the
1848 * memory region is configured as read-only.
1849 */
1850 if (writable && !(vma->vm_flags & VM_WRITE)) {
1851 ret = -EPERM;
1852 break;
1853 }
1854
1855 /*
1856 * Take the intersection of this VMA with the memory region
1857 */
1858 vm_start = max(hva, vma->vm_start);
1859 vm_end = min(reg_end, vma->vm_end);
1860
1861 if (vma->vm_flags & VM_PFNMAP) {
1862 gpa_t gpa = mem->guest_phys_addr +
1863 (vm_start - mem->userspace_addr);
Marek Majtykaca09f022015-09-16 12:04:55 +02001864 phys_addr_t pa;
1865
1866 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
1867 pa += vm_start - vma->vm_start;
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001868
Mario Smarduch15a49a42015-01-15 15:58:58 -08001869 /* IO region dirty page logging not allowed */
Marc Zyngiera1ea3182017-03-16 18:20:50 +00001870 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1871 ret = -EINVAL;
1872 goto out;
1873 }
Mario Smarduch15a49a42015-01-15 15:58:58 -08001874
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001875 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1876 vm_end - vm_start,
1877 writable);
1878 if (ret)
1879 break;
1880 }
1881 hva = vm_end;
1882 } while (hva < reg_end);
1883
Mario Smarduch15a49a42015-01-15 15:58:58 -08001884 if (change == KVM_MR_FLAGS_ONLY)
Marc Zyngiera1ea3182017-03-16 18:20:50 +00001885 goto out;
Mario Smarduch15a49a42015-01-15 15:58:58 -08001886
Ard Biesheuvel849260c2014-11-17 14:58:53 +00001887 spin_lock(&kvm->mmu_lock);
1888 if (ret)
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001889 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
Ard Biesheuvel849260c2014-11-17 14:58:53 +00001890 else
1891 stage2_flush_memslot(kvm, memslot);
1892 spin_unlock(&kvm->mmu_lock);
Marc Zyngiera1ea3182017-03-16 18:20:50 +00001893out:
1894 up_read(&current->mm->mmap_sem);
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001895 return ret;
Eric Augerdf6ce242014-06-06 11:10:23 +02001896}
1897
1898void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1899 struct kvm_memory_slot *dont)
1900{
1901}
1902
1903int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1904 unsigned long npages)
1905{
Ard Biesheuvel849260c2014-11-17 14:58:53 +00001906 /*
1907 * Readonly memslots are not incoherent with the caches by definition,
1908 * but in practice, they are used mostly to emulate ROMs or NOR flashes
1909 * that the guest may consider devices and hence map as uncached.
1910 * To prevent incoherency issues in these cases, tag all readonly
1911 * regions as incoherent.
1912 */
1913 if (slot->flags & KVM_MEM_READONLY)
1914 slot->flags |= KVM_MEMSLOT_INCOHERENT;
Eric Augerdf6ce242014-06-06 11:10:23 +02001915 return 0;
1916}
1917
Paolo Bonzini15f46012015-05-17 21:26:08 +02001918void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
Eric Augerdf6ce242014-06-06 11:10:23 +02001919{
1920}
1921
1922void kvm_arch_flush_shadow_all(struct kvm *kvm)
1923{
Suzuki K Poulose293f2932016-09-08 16:25:49 +01001924 kvm_free_stage2_pgd(kvm);
Eric Augerdf6ce242014-06-06 11:10:23 +02001925}
1926
1927void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1928 struct kvm_memory_slot *slot)
1929{
Ard Biesheuvel8eef9122014-10-10 17:00:32 +02001930 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1931 phys_addr_t size = slot->npages << PAGE_SHIFT;
1932
1933 spin_lock(&kvm->mmu_lock);
1934 unmap_stage2_range(kvm, gpa, size);
1935 spin_unlock(&kvm->mmu_lock);
Eric Augerdf6ce242014-06-06 11:10:23 +02001936}
Marc Zyngier3c1e7162014-12-19 16:05:31 +00001937
1938/*
1939 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
1940 *
1941 * Main problems:
1942 * - S/W ops are local to a CPU (not broadcast)
1943 * - We have line migration behind our back (speculation)
1944 * - System caches don't support S/W at all (damn!)
1945 *
1946 * In the face of the above, the best we can do is to try and convert
1947 * S/W ops to VA ops. Because the guest is not allowed to infer the
1948 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
1949 * which is a rather good thing for us.
1950 *
1951 * Also, it is only used when turning caches on/off ("The expected
1952 * usage of the cache maintenance instructions that operate by set/way
1953 * is associated with the cache maintenance instructions associated
1954 * with the powerdown and powerup of caches, if this is required by
1955 * the implementation.").
1956 *
1957 * We use the following policy:
1958 *
1959 * - If we trap a S/W operation, we enable VM trapping to detect
1960 * caches being turned on/off, and do a full clean.
1961 *
1962 * - We flush the caches on both caches being turned on and off.
1963 *
1964 * - Once the caches are enabled, we stop trapping VM ops.
1965 */
1966void kvm_set_way_flush(struct kvm_vcpu *vcpu)
1967{
1968 unsigned long hcr = vcpu_get_hcr(vcpu);
1969
1970 /*
1971 * If this is the first time we do a S/W operation
1972 * (i.e. HCR_TVM not set) flush the whole memory, and set the
1973 * VM trapping.
1974 *
1975 * Otherwise, rely on the VM trapping to wait for the MMU +
1976 * Caches to be turned off. At that point, we'll be able to
1977 * clean the caches again.
1978 */
1979 if (!(hcr & HCR_TVM)) {
1980 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
1981 vcpu_has_cache_enabled(vcpu));
1982 stage2_flush_vm(vcpu->kvm);
1983 vcpu_set_hcr(vcpu, hcr | HCR_TVM);
1984 }
1985}
1986
1987void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
1988{
1989 bool now_enabled = vcpu_has_cache_enabled(vcpu);
1990
1991 /*
1992 * If switching the MMU+caches on, need to invalidate the caches.
1993 * If switching it off, need to clean the caches.
1994 * Clean + invalidate does the trick always.
1995 */
1996 if (now_enabled != was_enabled)
1997 stage2_flush_vm(vcpu->kvm);
1998
1999 /* Caches are now on, stop trapping VM ops (until a S/W op) */
2000 if (now_enabled)
2001 vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
2002
2003 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
2004}