blob: f226038a39cafff066f066d8ab8fac39a66de14c [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Ingo Molnar589ee622017-02-04 00:16:44 +010041#include <linux/sched/mm.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Paul Gortmaker7a2463d2016-07-13 20:18:59 -040046#include <linux/export.h>
47#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090048#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070049#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050050#include <linux/seq_file.h>
Olaf Hering34b6f012012-10-01 21:18:01 +020051#include <linux/crash_dump.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070052
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -080053#include <trace/events/xen.h>
54
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070055#include <asm/pgtable.h>
56#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070057#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070058#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080059#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070060#include <asm/paravirt.h>
Ingo Molnar66441bd2017-01-27 10:27:10 +010061#include <asm/e820/api.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070062#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080063#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070064#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070065#include <asm/pat.h>
Andrew Jones900cba82009-12-18 10:31:31 +010066#include <asm/smp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070067
68#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070069#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070070
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080071#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070072#include <xen/page.h>
73#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010074#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080075#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080076#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080077#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070078
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070079#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070080#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070081#include "debugfs.h"
82
Alex Nixon19001c82009-02-09 12:05:46 -080083/*
84 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010085 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080086 */
87DEFINE_SPINLOCK(xen_reservation_lock);
88
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040089#ifdef CONFIG_X86_32
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080090/*
91 * Identity map, in addition to plain kernel map. This needs to be
92 * large enough to allocate page table pages to allocate the rest.
93 * Each page can map 2MB.
94 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -070095#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
96static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040097#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080098#ifdef CONFIG_X86_64
99/* l3 pud for userspace vsyscall mapping */
100static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
101#endif /* CONFIG_X86_64 */
102
103/*
104 * Note about cr3 (pagetable base) values:
105 *
106 * xen_cr3 contains the current logical cr3 value; it contains the
107 * last set cr3. This may not be the current effective cr3, because
108 * its update may be being lazily deferred. However, a vcpu looking
109 * at its own cr3 can use this value knowing that it everything will
110 * be self-consistent.
111 *
112 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
113 * hypercall to set the vcpu cr3 is complete (so it may be a little
114 * out of date, but it will never be set early). If one vcpu is
115 * looking at another vcpu's cr3 value, it should use this variable.
116 */
117DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
118DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
119
Juergen Gross04414ba2015-07-17 06:51:31 +0200120static phys_addr_t xen_pt_base, xen_pt_size __initdata;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800121
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700122/*
123 * Just beyond the highest usermode address. STACK_TOP_MAX has a
124 * redzone above it, so round it up to a PGD boundary.
125 */
126#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
127
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800128unsigned long arbitrary_virt_to_mfn(void *vaddr)
129{
130 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
131
132 return PFN_DOWN(maddr.maddr);
133}
134
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700135xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700136{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700137 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100138 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700139 pte_t *pte;
140 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700141
Chris Lalancette9f32d212008-10-23 17:40:25 -0700142 /*
143 * if the PFN is in the linear mapped vaddr range, we can just use
144 * the (quick) virt_to_machine() p2m lookup
145 */
146 if (virt_addr_valid(vaddr))
147 return virt_to_machine(vaddr);
148
149 /* otherwise we have to do a (slower) full page-table walk */
150
151 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700152 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700153 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700154 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700155}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100156EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700157
158void make_lowmem_page_readonly(void *vaddr)
159{
160 pte_t *pte, ptev;
161 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100162 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700163
Ingo Molnarf0646e42008-01-30 13:33:43 +0100164 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700165 if (pte == NULL)
166 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700167
168 ptev = pte_wrprotect(*pte);
169
170 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
171 BUG();
172}
173
174void make_lowmem_page_readwrite(void *vaddr)
175{
176 pte_t *pte, ptev;
177 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100178 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700179
Ingo Molnarf0646e42008-01-30 13:33:43 +0100180 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700181 if (pte == NULL)
182 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700183
184 ptev = pte_mkwrite(*pte);
185
186 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
187 BUG();
188}
189
190
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700191static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100192{
193 struct page *page = virt_to_page(ptr);
194
195 return PagePinned(page);
196}
197
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800198void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800199{
200 struct multicall_space mcs;
201 struct mmu_update *u;
202
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800203 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
204
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800205 mcs = xen_mc_entry(sizeof(*u));
206 u = mcs.args;
207
208 /* ptep might be kmapped when using 32-bit HIGHPTE */
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800209 u->ptr = virt_to_machine(ptep).maddr;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800210 u->val = pte_val_ma(pteval);
211
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800212 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800213
214 xen_mc_issue(PARAVIRT_LAZY_MMU);
215}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800216EXPORT_SYMBOL_GPL(xen_set_domain_pte);
217
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700218static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700219{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700220 struct multicall_space mcs;
221 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700222
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700223 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
224
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700225 if (mcs.mc != NULL) {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700226 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700227 } else {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700228 mcs = __xen_mc_entry(sizeof(*u));
229 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
230 }
231
232 u = mcs.args;
233 *u = *update;
234}
235
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800236static void xen_extend_mmuext_op(const struct mmuext_op *op)
237{
238 struct multicall_space mcs;
239 struct mmuext_op *u;
240
241 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
242
243 if (mcs.mc != NULL) {
244 mcs.mc->args[1]++;
245 } else {
246 mcs = __xen_mc_entry(sizeof(*u));
247 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
248 }
249
250 u = mcs.args;
251 *u = *op;
252}
253
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800254static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700255{
256 struct mmu_update u;
257
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700258 preempt_disable();
259
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700260 xen_mc_batch();
261
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700262 /* ptr may be ioremapped for 64-bit pagetable setup */
263 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700264 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700265 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700266
267 xen_mc_issue(PARAVIRT_LAZY_MMU);
268
269 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700270}
271
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800272static void xen_set_pmd(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100273{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800274 trace_xen_mmu_set_pmd(ptr, val);
275
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100276 /* If page is not pinned, we can just update the entry
277 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700278 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100279 *ptr = val;
280 return;
281 }
282
283 xen_set_pmd_hyper(ptr, val);
284}
285
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700286/*
287 * Associate a virtual page frame with a given physical page frame
288 * and protection flags for that frame.
289 */
290void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
291{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700292 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700293}
294
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800295static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
296{
297 struct mmu_update u;
298
299 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
300 return false;
301
302 xen_mc_batch();
303
304 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
305 u.val = pte_val_ma(pteval);
306 xen_extend_mmu_update(&u);
307
308 xen_mc_issue(PARAVIRT_LAZY_MMU);
309
310 return true;
311}
312
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800313static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800314{
David Vrabeld095d432012-07-09 11:39:05 +0100315 if (!xen_batched_set_pte(ptep, pteval)) {
316 /*
317 * Could call native_set_pte() here and trap and
318 * emulate the PTE write but with 32-bit guests this
319 * needs two traps (one for each of the two 32-bit
320 * words in the PTE) so do one hypercall directly
321 * instead.
322 */
323 struct mmu_update u;
324
325 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
326 u.val = pte_val_ma(pteval);
327 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
328 }
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800329}
330
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800331static void xen_set_pte(pte_t *ptep, pte_t pteval)
332{
333 trace_xen_mmu_set_pte(ptep, pteval);
334 __xen_set_pte(ptep, pteval);
335}
336
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800337static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700338 pte_t *ptep, pte_t pteval)
339{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800340 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
341 __xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700342}
343
Tejf63c2f22008-12-16 11:56:06 -0800344pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
345 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700346{
347 /* Just return the pte as-is. We preserve the bits on commit */
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800348 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700349 return *ptep;
350}
351
352void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
353 pte_t *ptep, pte_t pte)
354{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700355 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700356
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800357 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700358 xen_mc_batch();
359
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800360 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700361 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700362 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700363
364 xen_mc_issue(PARAVIRT_LAZY_MMU);
365}
366
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700367/* Assume pteval_t is equivalent to all the other *val_t types. */
368static pteval_t pte_mfn_to_pfn(pteval_t val)
369{
David Vrabel5926f872014-03-25 10:38:37 +0000370 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700371 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400372 unsigned long pfn = mfn_to_pfn(mfn);
373
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700374 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400375 if (unlikely(pfn == ~0))
376 val = flags & ~_PAGE_PRESENT;
377 else
378 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700379 }
380
381 return val;
382}
383
384static pteval_t pte_pfn_to_mfn(pteval_t val)
385{
David Vrabel5926f872014-03-25 10:38:37 +0000386 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700387 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700388 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500389 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700390
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500391 if (!xen_feature(XENFEAT_auto_translated_physmap))
Juergen Gross0aad5682014-11-28 11:53:57 +0100392 mfn = __pfn_to_mfn(pfn);
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500393 else
394 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700395 /*
396 * If there's no mfn for the pfn, then just create an
397 * empty non-present pte. Unfortunately this loses
398 * information about the original pfn, so
399 * pte_mfn_to_pfn is asymmetric.
400 */
401 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
402 mfn = 0;
403 flags = 0;
David Vrabel7f2f8822014-01-08 14:01:01 +0000404 } else
405 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700406 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700407 }
408
409 return val;
410}
411
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700412__visible pteval_t xen_pte_val(pte_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700413{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700414 pteval_t pteval = pte.pte;
Juergen Gross47591df2014-11-03 14:02:04 +0100415
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700416 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700417}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800418PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700419
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700420__visible pgdval_t xen_pgd_val(pgd_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700421{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700422 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700423}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800424PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700425
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700426__visible pte_t xen_make_pte(pteval_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700427{
David Vrabel7f2f8822014-01-08 14:01:01 +0000428 pte = pte_pfn_to_mfn(pte);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800429
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700430 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700431}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800432PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700433
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700434__visible pgd_t xen_make_pgd(pgdval_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700435{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700436 pgd = pte_pfn_to_mfn(pgd);
437 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700438}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800439PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700440
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700441__visible pmdval_t xen_pmd_val(pmd_t pmd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700442{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700443 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700444}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800445PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100446
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800447static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700448{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700449 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700450
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700451 preempt_disable();
452
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700453 xen_mc_batch();
454
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700455 /* ptr may be ioremapped for 64-bit pagetable setup */
456 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700457 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700458 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700459
460 xen_mc_issue(PARAVIRT_LAZY_MMU);
461
462 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700463}
464
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800465static void xen_set_pud(pud_t *ptr, pud_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100466{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800467 trace_xen_mmu_set_pud(ptr, val);
468
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100469 /* If page is not pinned, we can just update the entry
470 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700471 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100472 *ptr = val;
473 return;
474 }
475
476 xen_set_pud_hyper(ptr, val);
477}
478
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700479#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800480static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700481{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800482 trace_xen_mmu_set_pte_atomic(ptep, pte);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700483 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700484}
485
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800486static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700487{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800488 trace_xen_mmu_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800489 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
490 native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700491}
492
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800493static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700494{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800495 trace_xen_mmu_pmd_clear(pmdp);
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100496 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700497}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700498#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700499
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700500__visible pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700501{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700502 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700503 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700504}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800505PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700506
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700507#if CONFIG_PGTABLE_LEVELS == 4
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700508__visible pudval_t xen_pud_val(pud_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700509{
510 return pte_mfn_to_pfn(pud.pud);
511}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800512PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700513
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700514__visible pud_t xen_make_pud(pudval_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700515{
516 pud = pte_pfn_to_mfn(pud);
517
518 return native_make_pud(pud);
519}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800520PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700521
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800522static pgd_t *xen_get_user_pgd(pgd_t *pgd)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700523{
524 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
525 unsigned offset = pgd - pgd_page;
526 pgd_t *user_ptr = NULL;
527
528 if (offset < pgd_index(USER_LIMIT)) {
529 struct page *page = virt_to_page(pgd_page);
530 user_ptr = (pgd_t *)page->private;
531 if (user_ptr)
532 user_ptr += offset;
533 }
534
535 return user_ptr;
536}
537
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300538static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700539{
540 struct mmu_update u;
541
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700542 u.ptr = virt_to_machine(ptr).maddr;
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300543 u.val = p4d_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700544 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700545}
546
547/*
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300548 * Raw hypercall-based set_p4d, intended for in early boot before
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700549 * there's a page structure. This implies:
550 * 1. The only existing pagetable is the kernel's
551 * 2. It is always pinned
552 * 3. It has no user pagetable attached to it
553 */
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300554static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700555{
556 preempt_disable();
557
558 xen_mc_batch();
559
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300560 __xen_set_p4d_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700561
562 xen_mc_issue(PARAVIRT_LAZY_MMU);
563
564 preempt_enable();
565}
566
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300567static void xen_set_p4d(p4d_t *ptr, p4d_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700568{
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300569 pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
570 pgd_t pgd_val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700571
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300572 trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800573
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700574 /* If page is not pinned, we can just update the entry
575 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700576 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700577 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700578 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700579 WARN_ON(xen_page_pinned(user_ptr));
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300580 pgd_val.pgd = p4d_val_ma(val);
581 *user_ptr = pgd_val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700582 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700583 return;
584 }
585
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700586 /* If it's pinned, then we can at least batch the kernel and
587 user updates together. */
588 xen_mc_batch();
589
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300590 __xen_set_p4d_hyper(ptr, val);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700591 if (user_ptr)
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300592 __xen_set_p4d_hyper((p4d_t *)user_ptr, val);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700593
594 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700595}
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700596#endif /* CONFIG_PGTABLE_LEVELS == 4 */
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700597
Xiong Zhang907cd432017-03-17 21:55:14 +0300598static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
599 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
600 bool last, unsigned long limit)
601{
602 int i, nr, flush = 0;
603
604 nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
605 for (i = 0; i < nr; i++) {
606 if (!pmd_none(pmd[i]))
607 flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
608 }
609 return flush;
610}
611
612static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
613 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
614 bool last, unsigned long limit)
615{
616 int i, nr, flush = 0;
617
618 nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
619 for (i = 0; i < nr; i++) {
620 pmd_t *pmd;
621
622 if (pud_none(pud[i]))
623 continue;
624
625 pmd = pmd_offset(&pud[i], 0);
626 if (PTRS_PER_PMD > 1)
627 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
628 flush |= xen_pmd_walk(mm, pmd, func,
629 last && i == nr - 1, limit);
630 }
631 return flush;
632}
633
634static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
635 int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
636 bool last, unsigned long limit)
637{
638 int i, nr, flush = 0;
639
640 nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D;
641 for (i = 0; i < nr; i++) {
642 pud_t *pud;
643
644 if (p4d_none(p4d[i]))
645 continue;
646
647 pud = pud_offset(&p4d[i], 0);
648 if (PTRS_PER_PUD > 1)
649 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
650 flush |= xen_pud_walk(mm, pud, func,
651 last && i == nr - 1, limit);
652 }
653 return flush;
654}
655
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700656/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700657 * (Yet another) pagetable walker. This one is intended for pinning a
658 * pagetable. This means that it walks a pagetable and calls the
659 * callback function on each page it finds making up the page table,
660 * at every level. It walks the entire pagetable, but it only bothers
661 * pinning pte pages which are below limit. In the normal case this
662 * will be STACK_TOP_MAX, but at boot we need to pin up to
663 * FIXADDR_TOP.
664 *
665 * For 32-bit the important bit is that we don't pin beyond there,
666 * because then we start getting into Xen's ptes.
667 *
668 * For 64-bit, we must skip the Xen hole in the middle of the address
669 * space, just after the big x86-64 virtual hole.
670 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000671static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
672 int (*func)(struct mm_struct *mm, struct page *,
673 enum pt_level),
674 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700675{
Xiong Zhang907cd432017-03-17 21:55:14 +0300676 int i, nr, flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700677 unsigned hole_low, hole_high;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700678
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700679 /* The limit is the last byte to be touched */
680 limit--;
681 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700682
683 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700684 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700685
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700686 /*
687 * 64-bit has a great big hole in the middle of the address
688 * space, which contains the Xen mappings. On 32-bit these
689 * will end up making a zero-sized hole and so is a no-op.
690 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700691 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700692 hole_high = pgd_index(PAGE_OFFSET);
693
Xiong Zhang907cd432017-03-17 21:55:14 +0300694 nr = pgd_index(limit) + 1;
695 for (i = 0; i < nr; i++) {
696 p4d_t *p4d;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700697
Xiong Zhang907cd432017-03-17 21:55:14 +0300698 if (i >= hole_low && i < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700699 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700700
Xiong Zhang907cd432017-03-17 21:55:14 +0300701 if (pgd_none(pgd[i]))
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700702 continue;
703
Xiong Zhang907cd432017-03-17 21:55:14 +0300704 p4d = p4d_offset(&pgd[i], 0);
705 if (PTRS_PER_P4D > 1)
706 flush |= (*func)(mm, virt_to_page(p4d), PT_P4D);
707 flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700708 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700709
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700710 /* Do the top level last, so that the callbacks can use it as
711 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700712 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700713
714 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700715}
716
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000717static int xen_pgd_walk(struct mm_struct *mm,
718 int (*func)(struct mm_struct *mm, struct page *,
719 enum pt_level),
720 unsigned long limit)
721{
722 return __xen_pgd_walk(mm, mm->pgd, func, limit);
723}
724
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700725/* If we're using split pte locks, then take the page's lock and
726 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700727static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700728{
729 spinlock_t *ptl = NULL;
730
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -0800731#if USE_SPLIT_PTE_PTLOCKS
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -0800732 ptl = ptlock_ptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700733 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700734#endif
735
736 return ptl;
737}
738
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700739static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700740{
741 spinlock_t *ptl = v;
742 spin_unlock(ptl);
743}
744
745static void xen_do_pin(unsigned level, unsigned long pfn)
746{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800747 struct mmuext_op op;
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700748
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800749 op.cmd = level;
750 op.arg1.mfn = pfn_to_mfn(pfn);
751
752 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700753}
754
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700755static int xen_pin_page(struct mm_struct *mm, struct page *page,
756 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700757{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700758 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700759 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700760
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700761 if (pgfl)
762 flush = 0; /* already pinned */
763 else if (PageHighMem(page))
764 /* kmaps need flushing if we found an unpinned
765 highpage */
766 flush = 1;
767 else {
768 void *pt = lowmem_page_address(page);
769 unsigned long pfn = page_to_pfn(page);
770 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700771 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700772
773 flush = 0;
774
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700775 /*
776 * We need to hold the pagetable lock between the time
777 * we make the pagetable RO and when we actually pin
778 * it. If we don't, then other users may come in and
779 * attempt to update the pagetable by writing it,
780 * which will fail because the memory is RO but not
781 * pinned, so Xen won't do the trap'n'emulate.
782 *
783 * If we're using split pte locks, we can't hold the
784 * entire pagetable's worth of locks during the
785 * traverse, because we may wrap the preempt count (8
786 * bits). The solution is to mark RO and pin each PTE
787 * page while holding the lock. This means the number
788 * of locks we end up holding is never more than a
789 * batch size (~32 entries, at present).
790 *
791 * If we're not using split pte locks, we needn't pin
792 * the PTE pages independently, because we're
793 * protected by the overall pagetable lock.
794 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700795 ptl = NULL;
796 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700797 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700798
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700799 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
800 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700801 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
802
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700803 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700804 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
805
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700806 /* Queue a deferred unlock for when this batch
807 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700808 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700809 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700810 }
811
812 return flush;
813}
814
815/* This is called just after a mm has been created, but it has not
816 been used yet. We need to make sure that its pagetable is all
817 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700818static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700819{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800820 trace_xen_mmu_pgd_pin(mm, pgd);
821
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700822 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700823
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000824 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100825 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700826 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100827
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700828 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100829
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700830 xen_mc_batch();
831 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700832
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700833#ifdef CONFIG_X86_64
834 {
835 pgd_t *user_pgd = xen_get_user_pgd(pgd);
836
837 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
838
839 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700840 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800841 xen_do_pin(MMUEXT_PIN_L4_TABLE,
842 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700843 }
844 }
845#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700846#ifdef CONFIG_X86_PAE
847 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800848 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700849 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700850#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100851 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700852#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700853 xen_mc_issue(0);
854}
855
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700856static void xen_pgd_pin(struct mm_struct *mm)
857{
858 __xen_pgd_pin(mm, mm->pgd);
859}
860
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100861/*
862 * On save, we need to pin all pagetables to make sure they get their
863 * mfns turned into pfns. Search the list for any unpinned pgds and pin
864 * them (unpinned pgds are not currently in use, probably because the
865 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700866 *
867 * Expected to be called in stop_machine() ("equivalent to taking
868 * every spinlock in the system"), so the locking doesn't really
869 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100870 */
871void xen_mm_pin_all(void)
872{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100873 struct page *page;
874
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800875 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100876
877 list_for_each_entry(page, &pgd_list, lru) {
878 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700879 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100880 SetPageSavePinned(page);
881 }
882 }
883
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800884 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100885}
886
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700887/*
888 * The init_mm pagetable is really pinned as soon as its created, but
889 * that's before we have page structures to store the bits. So do all
890 * the book-keeping now.
891 */
Daniel Kiper3f5089532011-05-12 17:19:53 -0400892static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700893 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700894{
895 SetPagePinned(page);
896 return 0;
897}
898
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -0700899static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700900{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700901 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700902}
903
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700904static int xen_unpin_page(struct mm_struct *mm, struct page *page,
905 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700906{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700907 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700908
909 if (pgfl && !PageHighMem(page)) {
910 void *pt = lowmem_page_address(page);
911 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700912 spinlock_t *ptl = NULL;
913 struct multicall_space mcs;
914
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700915 /*
916 * Do the converse to pin_page. If we're using split
917 * pte locks, we must be holding the lock for while
918 * the pte page is unpinned but still RO to prevent
919 * concurrent updates from seeing it in this
920 * partially-pinned state.
921 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700922 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700923 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700924
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700925 if (ptl)
926 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700927 }
928
929 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700930
931 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
932 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700933 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
934
935 if (ptl) {
936 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700937 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700938 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700939 }
940
941 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700942}
943
944/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700945static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700946{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800947 trace_xen_mmu_pgd_unpin(mm, pgd);
948
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700949 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700950
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700951 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700952
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700953#ifdef CONFIG_X86_64
954 {
955 pgd_t *user_pgd = xen_get_user_pgd(pgd);
956
957 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -0800958 xen_do_pin(MMUEXT_UNPIN_TABLE,
959 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700960 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700961 }
962 }
963#endif
964
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700965#ifdef CONFIG_X86_PAE
966 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800967 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700968 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700969#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700970
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000971 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700972
973 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700974}
975
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700976static void xen_pgd_unpin(struct mm_struct *mm)
977{
978 __xen_pgd_unpin(mm, mm->pgd);
979}
980
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100981/*
982 * On resume, undo any pinning done at save, so that the rest of the
983 * kernel doesn't see any unexpected pinned pagetables.
984 */
985void xen_mm_unpin_all(void)
986{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100987 struct page *page;
988
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800989 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100990
991 list_for_each_entry(page, &pgd_list, lru) {
992 if (PageSavePinned(page)) {
993 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700994 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100995 ClearPageSavePinned(page);
996 }
997 }
998
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800999 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001000}
1001
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001002static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001003{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001004 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001005 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001006 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001007}
1008
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001009static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001010{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001011 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001012 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001013 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001014}
1015
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001016
1017#ifdef CONFIG_SMP
1018/* Another cpu may still have their %cr3 pointing at the pagetable, so
1019 we need to repoint it somewhere else before we can unpin it. */
1020static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001021{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001022 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001023 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001024
Alex Shi2113f462012-01-13 23:53:35 +08001025 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001026
Alex Shi2113f462012-01-13 23:53:35 +08001027 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001028 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001029
1030 /* If this cpu still has a stale cr3 reference, then make sure
1031 it has been flushed. */
Alex Shi2113f462012-01-13 23:53:35 +08001032 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001033 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001034}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001035
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001036static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001037{
Mike Travise4d98202008-12-16 17:34:05 -08001038 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001039 unsigned cpu;
1040
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001041 if (current->active_mm == mm) {
1042 if (current->mm == mm)
1043 load_cr3(swapper_pg_dir);
1044 else
1045 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001046 }
1047
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001048 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001049 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1050 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001051 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001052 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1053 continue;
1054 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1055 }
1056 return;
1057 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001058 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001059
1060 /* It's possible that a vcpu may have a stale reference to our
1061 cr3, because its in lazy mode, and it hasn't yet flushed
1062 its set of pending hypercalls yet. In this case, we can
1063 look at its actual current cr3 value, and force it to flush
1064 if needed. */
1065 for_each_online_cpu(cpu) {
1066 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001067 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001068 }
1069
Mike Travise4d98202008-12-16 17:34:05 -08001070 if (!cpumask_empty(mask))
1071 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1072 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001073}
1074#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001075static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001076{
1077 if (current->active_mm == mm)
1078 load_cr3(swapper_pg_dir);
1079}
1080#endif
1081
1082/*
1083 * While a process runs, Xen pins its pagetables, which means that the
1084 * hypervisor forces it to be read-only, and it controls all updates
1085 * to it. This means that all pagetable updates have to go via the
1086 * hypervisor, which is moderately expensive.
1087 *
1088 * Since we're pulling the pagetable down, we switch to use init_mm,
1089 * unpin old process pagetable and mark it all read-write, which
1090 * allows further operations on it to be simple memory accesses.
1091 *
1092 * The only subtle point is that another CPU may be still using the
1093 * pagetable because of lazy tlb flushing. This means we need need to
1094 * switch all CPUs off this pagetable before we can unpin it.
1095 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001096static void xen_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001097{
1098 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001099 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001100 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001101
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001102 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001103
1104 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001105 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001106 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001107
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001108 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001109}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001110
Attilio Raoc7112882012-08-21 21:22:40 +01001111static void xen_post_allocator_init(void);
1112
Juergen Gross70e61192015-07-17 06:51:35 +02001113static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1114{
1115 struct mmuext_op op;
1116
1117 op.cmd = cmd;
1118 op.arg1.mfn = pfn_to_mfn(pfn);
1119 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1120 BUG();
1121}
1122
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001123#ifdef CONFIG_X86_64
1124static void __init xen_cleanhighmap(unsigned long vaddr,
1125 unsigned long vaddr_end)
1126{
1127 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1128 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1129
1130 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1131 * We include the PMD passed in on _both_ boundaries. */
Juergen Gross1cf38742016-06-23 07:12:27 +02001132 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001133 pmd++, vaddr += PMD_SIZE) {
1134 if (pmd_none(*pmd))
1135 continue;
1136 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1137 set_pmd(pmd, __pmd(0));
1138 }
1139 /* In case we did something silly, we should crash in this function
1140 * instead of somewhere later and be confusing. */
1141 xen_mc_flush();
1142}
Juergen Gross054954e2014-11-28 11:53:58 +01001143
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001144/*
1145 * Make a page range writeable and free it.
1146 */
1147static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
1148{
1149 void *vaddr = __va(paddr);
1150 void *vaddr_end = vaddr + size;
1151
1152 for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
1153 make_lowmem_page_readwrite(vaddr);
1154
1155 memblock_free(paddr, size);
1156}
1157
Juergen Gross70e61192015-07-17 06:51:35 +02001158static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001159{
1160 unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
1161
Juergen Gross70e61192015-07-17 06:51:35 +02001162 if (unpin)
1163 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001164 ClearPagePinned(virt_to_page(__va(pa)));
1165 xen_free_ro_pages(pa, PAGE_SIZE);
1166}
1167
Xiong Zhang907cd432017-03-17 21:55:14 +03001168static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
1169{
1170 unsigned long pa;
1171 pte_t *pte_tbl;
1172 int i;
1173
1174 if (pmd_large(*pmd)) {
1175 pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1176 xen_free_ro_pages(pa, PMD_SIZE);
1177 return;
1178 }
1179
1180 pte_tbl = pte_offset_kernel(pmd, 0);
1181 for (i = 0; i < PTRS_PER_PTE; i++) {
1182 if (pte_none(pte_tbl[i]))
1183 continue;
1184 pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
1185 xen_free_ro_pages(pa, PAGE_SIZE);
1186 }
1187 set_pmd(pmd, __pmd(0));
1188 xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
1189}
1190
1191static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
1192{
1193 unsigned long pa;
1194 pmd_t *pmd_tbl;
1195 int i;
1196
1197 if (pud_large(*pud)) {
1198 pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1199 xen_free_ro_pages(pa, PUD_SIZE);
1200 return;
1201 }
1202
1203 pmd_tbl = pmd_offset(pud, 0);
1204 for (i = 0; i < PTRS_PER_PMD; i++) {
1205 if (pmd_none(pmd_tbl[i]))
1206 continue;
1207 xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
1208 }
1209 set_pud(pud, __pud(0));
1210 xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
1211}
1212
1213static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
1214{
1215 unsigned long pa;
1216 pud_t *pud_tbl;
1217 int i;
1218
1219 if (p4d_large(*p4d)) {
1220 pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
1221 xen_free_ro_pages(pa, P4D_SIZE);
1222 return;
1223 }
1224
1225 pud_tbl = pud_offset(p4d, 0);
1226 for (i = 0; i < PTRS_PER_PUD; i++) {
1227 if (pud_none(pud_tbl[i]))
1228 continue;
1229 xen_cleanmfnmap_pud(pud_tbl + i, unpin);
1230 }
1231 set_p4d(p4d, __p4d(0));
1232 xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
1233}
1234
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001235/*
1236 * Since it is well isolated we can (and since it is perhaps large we should)
1237 * also free the page tables mapping the initial P->M table.
1238 */
1239static void __init xen_cleanmfnmap(unsigned long vaddr)
1240{
Xiong Zhang907cd432017-03-17 21:55:14 +03001241 pgd_t *pgd;
1242 p4d_t *p4d;
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001243 unsigned int i;
Juergen Gross70e61192015-07-17 06:51:35 +02001244 bool unpin;
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001245
Juergen Gross70e61192015-07-17 06:51:35 +02001246 unpin = (vaddr == 2 * PGDIR_SIZE);
Xiong Zhang907cd432017-03-17 21:55:14 +03001247 vaddr &= PMD_MASK;
1248 pgd = pgd_offset_k(vaddr);
1249 p4d = p4d_offset(pgd, 0);
1250 for (i = 0; i < PTRS_PER_P4D; i++) {
1251 if (p4d_none(p4d[i]))
1252 continue;
1253 xen_cleanmfnmap_p4d(p4d + i, unpin);
1254 }
1255 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
1256 set_pgd(pgd, __pgd(0));
1257 xen_cleanmfnmap_free_pgtbl(p4d, unpin);
1258 }
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001259}
1260
Juergen Gross054954e2014-11-28 11:53:58 +01001261static void __init xen_pagetable_p2m_free(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001262{
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001263 unsigned long size;
1264 unsigned long addr;
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001265
1266 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1267
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001268 /* No memory or already called. */
Juergen Gross054954e2014-11-28 11:53:58 +01001269 if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001270 return;
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001271
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001272 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1273 memset((void *)xen_start_info->mfn_list, 0xff, size);
1274
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001275 addr = xen_start_info->mfn_list;
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001276 /*
1277 * We could be in __ka space.
1278 * We roundup to the PMD, which means that if anybody at this stage is
1279 * using the __ka address of xen_start_info or
1280 * xen_start_info->shared_info they are in going to crash. Fortunatly
1281 * we have already revectored in xen_setup_kernel_pagetable and in
1282 * xen_setup_shared_info.
1283 */
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001284 size = roundup(size, PMD_SIZE);
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001285
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001286 if (addr >= __START_KERNEL_map) {
1287 xen_cleanhighmap(addr, addr + size);
1288 size = PAGE_ALIGN(xen_start_info->nr_pages *
1289 sizeof(unsigned long));
1290 memblock_free(__pa(addr), size);
1291 } else {
1292 xen_cleanmfnmap(addr);
1293 }
Juergen Gross70e61192015-07-17 06:51:35 +02001294}
1295
1296static void __init xen_pagetable_cleanhighmap(void)
1297{
1298 unsigned long size;
1299 unsigned long addr;
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001300
Konrad Rzeszutek Wilk3aca7fb2012-08-14 14:34:00 -04001301 /* At this stage, cleanup_highmap has already cleaned __ka space
1302 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1303 * the ramdisk). We continue on, erasing PMD entries that point to page
1304 * tables - do note that they are accessible at this stage via __va.
1305 * For good measure we also round up to the PMD - which means that if
1306 * anybody is using __ka address to the initial boot-stack - and try
1307 * to use it - they are going to crash. The xen_start_info has been
1308 * taken care of already in xen_setup_kernel_pagetable. */
1309 addr = xen_start_info->pt_base;
1310 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1311
1312 xen_cleanhighmap(addr, addr + size);
1313 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1314#ifdef DEBUG
Adam Buchbinder6a6256f2016-02-23 15:34:30 -08001315 /* This is superfluous and is not necessary, but you know what
Konrad Rzeszutek Wilk3aca7fb2012-08-14 14:34:00 -04001316 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1317 * anything at this stage. */
1318 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1319#endif
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001320}
1321#endif
1322
Juergen Gross054954e2014-11-28 11:53:58 +01001323static void __init xen_pagetable_p2m_setup(void)
1324{
1325 if (xen_feature(XENFEAT_auto_translated_physmap))
1326 return;
1327
1328 xen_vmalloc_p2m_tree();
1329
1330#ifdef CONFIG_X86_64
1331 xen_pagetable_p2m_free();
Juergen Gross70e61192015-07-17 06:51:35 +02001332
1333 xen_pagetable_cleanhighmap();
Juergen Gross054954e2014-11-28 11:53:58 +01001334#endif
1335 /* And revector! Bye bye old array */
1336 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1337}
1338
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001339static void __init xen_pagetable_init(void)
1340{
1341 paging_init();
Juergen Grosscdfa0ba2014-12-10 16:56:03 +01001342 xen_post_allocator_init();
Juergen Gross054954e2014-11-28 11:53:58 +01001343
1344 xen_pagetable_p2m_setup();
1345
Juergen Gross2c185682014-10-14 13:33:46 +02001346 /* Allocate and initialize top and mid mfn levels for p2m structure */
1347 xen_build_mfn_list_list();
1348
Juergen Gross1f3ac862014-11-28 11:53:53 +01001349 /* Remap memory freed due to conflicts with E820 map */
1350 if (!xen_feature(XENFEAT_auto_translated_physmap))
1351 xen_remap_memory();
1352
Juergen Gross2c185682014-10-14 13:33:46 +02001353 xen_setup_shared_info();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001354}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001355static void xen_write_cr2(unsigned long cr2)
1356{
Alex Shi2113f462012-01-13 23:53:35 +08001357 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001358}
1359
1360static unsigned long xen_read_cr2(void)
1361{
Alex Shi2113f462012-01-13 23:53:35 +08001362 return this_cpu_read(xen_vcpu)->arch.cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001363}
1364
1365unsigned long xen_read_cr2_direct(void)
1366{
Alex Shi2113f462012-01-13 23:53:35 +08001367 return this_cpu_read(xen_vcpu_info.arch.cr2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001368}
1369
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04001370void xen_flush_tlb_all(void)
1371{
1372 struct mmuext_op *op;
1373 struct multicall_space mcs;
1374
1375 trace_xen_mmu_flush_tlb_all(0);
1376
1377 preempt_disable();
1378
1379 mcs = xen_mc_entry(sizeof(*op));
1380
1381 op = mcs.args;
1382 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1383 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1384
1385 xen_mc_issue(PARAVIRT_LAZY_MMU);
1386
1387 preempt_enable();
1388}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001389static void xen_flush_tlb(void)
1390{
1391 struct mmuext_op *op;
1392 struct multicall_space mcs;
1393
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001394 trace_xen_mmu_flush_tlb(0);
1395
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001396 preempt_disable();
1397
1398 mcs = xen_mc_entry(sizeof(*op));
1399
1400 op = mcs.args;
1401 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1402 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1403
1404 xen_mc_issue(PARAVIRT_LAZY_MMU);
1405
1406 preempt_enable();
1407}
1408
1409static void xen_flush_tlb_single(unsigned long addr)
1410{
1411 struct mmuext_op *op;
1412 struct multicall_space mcs;
1413
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001414 trace_xen_mmu_flush_tlb_single(addr);
1415
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001416 preempt_disable();
1417
1418 mcs = xen_mc_entry(sizeof(*op));
1419 op = mcs.args;
1420 op->cmd = MMUEXT_INVLPG_LOCAL;
1421 op->arg1.linear_addr = addr & PAGE_MASK;
1422 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1423
1424 xen_mc_issue(PARAVIRT_LAZY_MMU);
1425
1426 preempt_enable();
1427}
1428
1429static void xen_flush_tlb_others(const struct cpumask *cpus,
Alex Shie7b52ff2012-06-28 09:02:17 +08001430 struct mm_struct *mm, unsigned long start,
1431 unsigned long end)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001432{
1433 struct {
1434 struct mmuext_op op;
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001435#ifdef CONFIG_SMP
Andrew Jones900cba82009-12-18 10:31:31 +01001436 DECLARE_BITMAP(mask, num_processors);
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001437#else
1438 DECLARE_BITMAP(mask, NR_CPUS);
1439#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001440 } *args;
1441 struct multicall_space mcs;
1442
Alex Shie7b52ff2012-06-28 09:02:17 +08001443 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001444
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001445 if (cpumask_empty(cpus))
1446 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001447
1448 mcs = xen_mc_entry(sizeof(*args));
1449 args = mcs.args;
1450 args->op.arg2.vcpumask = to_cpumask(args->mask);
1451
1452 /* Remove us, and any offline CPUS. */
1453 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1454 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001455
Alex Shie7b52ff2012-06-28 09:02:17 +08001456 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
Alex Shice7184b2012-08-24 08:55:13 +00001457 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001458 args->op.cmd = MMUEXT_INVLPG_MULTI;
Alex Shie7b52ff2012-06-28 09:02:17 +08001459 args->op.arg1.linear_addr = start;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001460 }
1461
1462 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1463
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001464 xen_mc_issue(PARAVIRT_LAZY_MMU);
1465}
1466
1467static unsigned long xen_read_cr3(void)
1468{
Alex Shi2113f462012-01-13 23:53:35 +08001469 return this_cpu_read(xen_cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001470}
1471
1472static void set_current_cr3(void *v)
1473{
Alex Shi2113f462012-01-13 23:53:35 +08001474 this_cpu_write(xen_current_cr3, (unsigned long)v);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001475}
1476
1477static void __xen_write_cr3(bool kernel, unsigned long cr3)
1478{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001479 struct mmuext_op op;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001480 unsigned long mfn;
1481
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001482 trace_xen_mmu_write_cr3(kernel, cr3);
1483
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001484 if (cr3)
1485 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1486 else
1487 mfn = 0;
1488
1489 WARN_ON(mfn == 0 && kernel);
1490
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001491 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1492 op.arg1.mfn = mfn;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001493
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001494 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001495
1496 if (kernel) {
Alex Shi2113f462012-01-13 23:53:35 +08001497 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001498
1499 /* Update xen_current_cr3 once the batch has actually
1500 been submitted. */
1501 xen_mc_callback(set_current_cr3, (void *)cr3);
1502 }
1503}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001504static void xen_write_cr3(unsigned long cr3)
1505{
1506 BUG_ON(preemptible());
1507
1508 xen_mc_batch(); /* disables interrupts */
1509
1510 /* Update while interrupts are disabled, so its atomic with
1511 respect to ipis */
Alex Shi2113f462012-01-13 23:53:35 +08001512 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001513
1514 __xen_write_cr3(true, cr3);
1515
1516#ifdef CONFIG_X86_64
1517 {
1518 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1519 if (user_pgd)
1520 __xen_write_cr3(false, __pa(user_pgd));
1521 else
1522 __xen_write_cr3(false, 0);
1523 }
1524#endif
1525
1526 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1527}
1528
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001529#ifdef CONFIG_X86_64
1530/*
1531 * At the start of the day - when Xen launches a guest, it has already
1532 * built pagetables for the guest. We diligently look over them
Adam Buchbinder6a6256f2016-02-23 15:34:30 -08001533 * in xen_setup_kernel_pagetable and graft as appropriate them in the
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001534 * init_level4_pgt and its friends. Then when we are happy we load
1535 * the new init_level4_pgt - and continue on.
1536 *
1537 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1538 * up the rest of the pagetables. When it has completed it loads the cr3.
1539 * N.B. that baremetal would start at 'start_kernel' (and the early
1540 * #PF handler would create bootstrap pagetables) - so we are running
1541 * with the same assumptions as what to do when write_cr3 is executed
1542 * at this point.
1543 *
1544 * Since there are no user-page tables at all, we have two variants
1545 * of xen_write_cr3 - the early bootup (this one), and the late one
1546 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1547 * the Linux kernel and user-space are both in ring 3 while the
1548 * hypervisor is in ring 0.
1549 */
1550static void __init xen_write_cr3_init(unsigned long cr3)
1551{
1552 BUG_ON(preemptible());
1553
1554 xen_mc_batch(); /* disables interrupts */
1555
1556 /* Update while interrupts are disabled, so its atomic with
1557 respect to ipis */
1558 this_cpu_write(xen_cr3, cr3);
1559
1560 __xen_write_cr3(true, cr3);
1561
1562 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001563}
1564#endif
1565
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001566static int xen_pgd_alloc(struct mm_struct *mm)
1567{
1568 pgd_t *pgd = mm->pgd;
1569 int ret = 0;
1570
1571 BUG_ON(PagePinned(virt_to_page(pgd)));
1572
1573#ifdef CONFIG_X86_64
1574 {
1575 struct page *page = virt_to_page(pgd);
1576 pgd_t *user_pgd;
1577
1578 BUG_ON(page->private != 0);
1579
1580 ret = -ENOMEM;
1581
1582 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1583 page->private = (unsigned long)user_pgd;
1584
1585 if (user_pgd != NULL) {
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07001586#ifdef CONFIG_X86_VSYSCALL_EMULATION
Andy Lutomirskif40c3302014-05-05 12:19:36 -07001587 user_pgd[pgd_index(VSYSCALL_ADDR)] =
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001588 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07001589#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001590 ret = 0;
1591 }
1592
1593 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1594 }
1595#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001596 return ret;
1597}
1598
1599static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1600{
1601#ifdef CONFIG_X86_64
1602 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1603
1604 if (user_pgd)
1605 free_page((unsigned long)user_pgd);
1606#endif
1607}
1608
David Vrabeld095d432012-07-09 11:39:05 +01001609/*
1610 * Init-time set_pte while constructing initial pagetables, which
1611 * doesn't allow RO page table pages to be remapped RW.
1612 *
David Vrabel66a27dd2012-07-09 11:39:06 +01001613 * If there is no MFN for this PFN then this page is initially
1614 * ballooned out so clear the PTE (as in decrease_reservation() in
1615 * drivers/xen/balloon.c).
1616 *
David Vrabeld095d432012-07-09 11:39:05 +01001617 * Many of these PTE updates are done on unpinned and writable pages
1618 * and doing a hypercall for these is unnecessary and expensive. At
1619 * this point it is not possible to tell if a page is pinned or not,
1620 * so always write the PTE directly and rely on Xen trapping and
1621 * emulating any updates as necessary.
1622 */
David Vrabeld6b186c2016-05-17 15:54:50 +01001623__visible pte_t xen_make_pte_init(pteval_t pte)
1624{
1625#ifdef CONFIG_X86_64
1626 unsigned long pfn;
1627
1628 /*
1629 * Pages belonging to the initial p2m list mapped outside the default
1630 * address range must be mapped read-only. This region contains the
1631 * page tables for mapping the p2m list, too, and page tables MUST be
1632 * mapped read-only.
1633 */
1634 pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1635 if (xen_start_info->mfn_list < __START_KERNEL_map &&
1636 pfn >= xen_start_info->first_p2m_pfn &&
1637 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1638 pte &= ~_PAGE_RW;
1639#endif
1640 pte = pte_pfn_to_mfn(pte);
1641 return native_make_pte(pte);
1642}
1643PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1644
Daniel Kiper3f5089532011-05-12 17:19:53 -04001645static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001646{
David Vrabeld6b186c2016-05-17 15:54:50 +01001647#ifdef CONFIG_X86_32
1648 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1649 if (pte_mfn(pte) != INVALID_P2M_ENTRY
1650 && pte_val_ma(*ptep) & _PAGE_PRESENT)
1651 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1652 pte_val_ma(pte));
1653#endif
David Vrabeld095d432012-07-09 11:39:05 +01001654 native_set_pte(ptep, pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001655}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001656
1657/* Early in boot, while setting up the initial pagetable, assume
1658 everything is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001659static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001660{
1661#ifdef CONFIG_FLATMEM
1662 BUG_ON(mem_map); /* should only be used early */
1663#endif
1664 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001665 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1666}
1667
1668/* Used for pmd and pud */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001669static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001670{
1671#ifdef CONFIG_FLATMEM
1672 BUG_ON(mem_map); /* should only be used early */
1673#endif
1674 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001675}
1676
1677/* Early release_pte assumes that all pts are pinned, since there's
1678 only init_mm and anything attached to that is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001679static void __init xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001680{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001681 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001682 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1683}
1684
Daniel Kiper3f5089532011-05-12 17:19:53 -04001685static void __init xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001686{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001687 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001688}
1689
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001690static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1691{
1692 struct multicall_space mcs;
1693 struct mmuext_op *op;
1694
1695 mcs = __xen_mc_entry(sizeof(*op));
1696 op = mcs.args;
1697 op->cmd = cmd;
1698 op->arg1.mfn = pfn_to_mfn(pfn);
1699
1700 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1701}
1702
1703static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1704{
1705 struct multicall_space mcs;
1706 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1707
1708 mcs = __xen_mc_entry(0);
1709 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1710 pfn_pte(pfn, prot), 0);
1711}
1712
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001713/* This needs to make sure the new pte page is pinned iff its being
1714 attached to a pinned pagetable. */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001715static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1716 unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001717{
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001718 bool pinned = PagePinned(virt_to_page(mm->pgd));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001719
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001720 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001721
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001722 if (pinned) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001723 struct page *page = pfn_to_page(pfn);
1724
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001725 SetPagePinned(page);
1726
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001727 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001728 xen_mc_batch();
1729
1730 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1731
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001732 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001733 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1734
1735 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001736 } else {
1737 /* make sure there are no stray mappings of
1738 this page */
1739 kmap_flush_unused();
1740 }
1741 }
1742}
1743
1744static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1745{
1746 xen_alloc_ptpage(mm, pfn, PT_PTE);
1747}
1748
1749static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1750{
1751 xen_alloc_ptpage(mm, pfn, PT_PMD);
1752}
1753
1754/* This should never happen until we're OK to use struct page */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001755static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001756{
1757 struct page *page = pfn_to_page(pfn);
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001758 bool pinned = PagePinned(page);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001759
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001760 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1761
1762 if (pinned) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001763 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001764 xen_mc_batch();
1765
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001766 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001767 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1768
1769 __set_pfn_prot(pfn, PAGE_KERNEL);
1770
1771 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001772 }
1773 ClearPagePinned(page);
1774 }
1775}
1776
1777static void xen_release_pte(unsigned long pfn)
1778{
1779 xen_release_ptpage(pfn, PT_PTE);
1780}
1781
1782static void xen_release_pmd(unsigned long pfn)
1783{
1784 xen_release_ptpage(pfn, PT_PMD);
1785}
1786
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +03001787#if CONFIG_PGTABLE_LEVELS >= 4
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001788static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1789{
1790 xen_alloc_ptpage(mm, pfn, PT_PUD);
1791}
1792
1793static void xen_release_pud(unsigned long pfn)
1794{
1795 xen_release_ptpage(pfn, PT_PUD);
1796}
1797#endif
1798
1799void __init xen_reserve_top(void)
1800{
1801#ifdef CONFIG_X86_32
1802 unsigned long top = HYPERVISOR_VIRT_START;
1803 struct xen_platform_parameters pp;
1804
1805 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1806 top = pp.virt_start;
1807
1808 reserve_top_address(-top);
1809#endif /* CONFIG_X86_32 */
1810}
1811
1812/*
1813 * Like __va(), but returns address in the kernel mapping (which is
1814 * all we have until the physical memory mapping has been set up.
1815 */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001816static void * __init __ka(phys_addr_t paddr)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001817{
1818#ifdef CONFIG_X86_64
1819 return (void *)(paddr + __START_KERNEL_map);
1820#else
1821 return __va(paddr);
1822#endif
1823}
1824
1825/* Convert a machine address to physical address */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001826static unsigned long __init m2p(phys_addr_t maddr)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001827{
1828 phys_addr_t paddr;
1829
1830 maddr &= PTE_PFN_MASK;
1831 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1832
1833 return paddr;
1834}
1835
1836/* Convert a machine address to kernel virtual */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001837static void * __init m2v(phys_addr_t maddr)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001838{
1839 return __ka(m2p(maddr));
1840}
1841
Juan Quintela4ec53872010-09-02 15:45:43 +01001842/* Set the page permissions on an identity-mapped pages */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001843static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1844 unsigned long flags)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001845{
1846 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1847 pte_t pte = pfn_pte(pfn, prot);
1848
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001849 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001850 BUG();
1851}
Juergen Grossbf9d8342015-01-28 07:44:24 +01001852static void __init set_page_prot(void *addr, pgprot_t prot)
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001853{
1854 return set_page_prot_flags(addr, prot, UVMF_NONE);
1855}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001856#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001857static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001858{
1859 unsigned pmdidx, pteidx;
1860 unsigned ident_pte;
1861 unsigned long pfn;
1862
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001863 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1864 PAGE_SIZE);
1865
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001866 ident_pte = 0;
1867 pfn = 0;
1868 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1869 pte_t *pte_page;
1870
1871 /* Reuse or allocate a page of ptes */
1872 if (pmd_present(pmd[pmdidx]))
1873 pte_page = m2v(pmd[pmdidx].pmd);
1874 else {
1875 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001876 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001877 break;
1878
1879 pte_page = &level1_ident_pgt[ident_pte];
1880 ident_pte += PTRS_PER_PTE;
1881
1882 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1883 }
1884
1885 /* Install mappings */
1886 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1887 pte_t pte;
1888
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001889 if (pfn > max_pfn_mapped)
1890 max_pfn_mapped = pfn;
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001891
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001892 if (!pte_none(pte_page[pteidx]))
1893 continue;
1894
1895 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1896 pte_page[pteidx] = pte;
1897 }
1898 }
1899
1900 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1901 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1902
1903 set_page_prot(pmd, PAGE_KERNEL_RO);
1904}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001905#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001906void __init xen_setup_machphys_mapping(void)
1907{
1908 struct xen_machphys_mapping mapping;
Ian Campbell7e775062010-09-30 12:37:26 +01001909
1910 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1911 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
Jan Beulichccbcdf72011-08-16 15:07:41 +01001912 machine_to_phys_nr = mapping.max_mfn + 1;
Ian Campbell7e775062010-09-30 12:37:26 +01001913 } else {
Jan Beulichccbcdf72011-08-16 15:07:41 +01001914 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
Ian Campbell7e775062010-09-30 12:37:26 +01001915 }
Jan Beulichccbcdf72011-08-16 15:07:41 +01001916#ifdef CONFIG_X86_32
Jan Beulich61cca2f2011-09-15 08:52:40 +01001917 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1918 < machine_to_phys_mapping);
Jan Beulichccbcdf72011-08-16 15:07:41 +01001919#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001920}
1921
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001922#ifdef CONFIG_X86_64
Juergen Grossbf9d8342015-01-28 07:44:24 +01001923static void __init convert_pfn_mfn(void *v)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001924{
1925 pte_t *pte = v;
1926 int i;
1927
1928 /* All levels are converted the same way, so just treat them
1929 as ptes. */
1930 for (i = 0; i < PTRS_PER_PTE; i++)
1931 pte[i] = xen_make_pte(pte[i].pte);
1932}
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001933static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1934 unsigned long addr)
1935{
1936 if (*pt_base == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001937 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001938 clear_page((void *)addr);
1939 (*pt_base)++;
1940 }
1941 if (*pt_end == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001942 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001943 clear_page((void *)addr);
1944 (*pt_end)--;
1945 }
1946}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001947/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001948 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001949 *
1950 * We can construct this by grafting the Xen provided pagetable into
1951 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
Stefan Bader0b5a5062014-09-02 11:16:01 +01001952 * level2_ident_pgt, and level2_kernel_pgt. This means that only the
1953 * kernel has a physical mapping to start with - but that's enough to
1954 * get __va working. We need to fill in the rest of the physical
Boris Ostrovsky063334f2017-02-03 16:57:22 -05001955 * mapping once some sort of allocator has been set up.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001956 */
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04001957void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001958{
1959 pud_t *l3;
1960 pmd_t *l2;
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001961 unsigned long addr[3];
1962 unsigned long pt_base, pt_end;
1963 unsigned i;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001964
Stefano Stabellini14988a42011-02-18 11:32:40 +00001965 /* max_pfn_mapped is the last pfn mapped in the initial memory
1966 * mappings. Considering that on Xen after the kernel mappings we
1967 * have the mappings of some pages that don't exist in pfn space, we
1968 * set max_pfn_mapped to the last real pfn mapped. */
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001969 if (xen_start_info->mfn_list < __START_KERNEL_map)
1970 max_pfn_mapped = xen_start_info->first_p2m_pfn;
1971 else
1972 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
Stefano Stabellini14988a42011-02-18 11:32:40 +00001973
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001974 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1975 pt_end = pt_base + xen_start_info->nr_pt_frames;
1976
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001977 /* Zap identity mapping */
1978 init_level4_pgt[0] = __pgd(0);
1979
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001980 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1981 /* Pre-constructed entries are in pfn, so convert to mfn */
1982 /* L4[272] -> level3_ident_pgt
1983 * L4[511] -> level3_kernel_pgt */
1984 convert_pfn_mfn(init_level4_pgt);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001985
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001986 /* L3_i[0] -> level2_ident_pgt */
1987 convert_pfn_mfn(level3_ident_pgt);
1988 /* L3_k[510] -> level2_kernel_pgt
Stefan Bader0b5a5062014-09-02 11:16:01 +01001989 * L3_k[511] -> level2_fixmap_pgt */
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001990 convert_pfn_mfn(level3_kernel_pgt);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001991
1992 /* L3_k[511][506] -> level1_fixmap_pgt */
1993 convert_pfn_mfn(level2_fixmap_pgt);
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001994 }
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001995 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001996 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1997 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1998
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001999 addr[0] = (unsigned long)pgd;
2000 addr[1] = (unsigned long)l3;
2001 addr[2] = (unsigned long)l2;
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04002002 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
Stefan Bader0b5a5062014-09-02 11:16:01 +01002003 * Both L4[272][0] and L4[511][510] have entries that point to the same
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04002004 * L2 (PMD) tables. Meaning that if you modify it in __va space
2005 * it will be also modified in the __ka space! (But if you just
2006 * modify the PMD table to point to other PTE's or none, then you
2007 * are OK - which is what cleanup_highmap does) */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002008 copy_page(level2_ident_pgt, l2);
Stefan Bader0b5a5062014-09-02 11:16:01 +01002009 /* Graft it onto L4[511][510] */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002010 copy_page(level2_kernel_pgt, l2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002011
Juergen Gross8f5b0c62015-07-17 06:51:25 +02002012 /* Copy the initial P->M table mappings if necessary. */
2013 i = pgd_index(xen_start_info->mfn_list);
2014 if (i && i < pgd_index(__START_KERNEL_map))
2015 init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
2016
Mukesh Rathor4e44e442013-12-31 12:41:27 -05002017 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
2018 /* Make pagetable pieces RO */
2019 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
2020 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
2021 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
2022 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
2023 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
2024 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
2025 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
Stefan Bader0b5a5062014-09-02 11:16:01 +01002026 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002027
Mukesh Rathor4e44e442013-12-31 12:41:27 -05002028 /* Pin down new L4 */
2029 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
2030 PFN_DOWN(__pa_symbol(init_level4_pgt)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002031
Mukesh Rathor4e44e442013-12-31 12:41:27 -05002032 /* Unpin Xen-provided one */
2033 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002034
Mukesh Rathor4e44e442013-12-31 12:41:27 -05002035 /*
2036 * At this stage there can be no user pgd, and no page
2037 * structure to attach it to, so make sure we just set kernel
2038 * pgd.
2039 */
2040 xen_mc_batch();
2041 __xen_write_cr3(true, __pa(init_level4_pgt));
2042 xen_mc_issue(PARAVIRT_LAZY_CPU);
2043 } else
2044 native_write_cr3(__pa(init_level4_pgt));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002045
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04002046 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
2047 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
2048 * the initial domain. For guests using the toolstack, they are in:
2049 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
2050 * rip out the [L4] (pgd), but for guests we shave off three pages.
2051 */
2052 for (i = 0; i < ARRAY_SIZE(addr); i++)
2053 check_pt_base(&pt_base, &pt_end, addr[i]);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002054
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04002055 /* Our (by three pages) smaller Xen pagetable that we are using */
Juergen Gross04414ba2015-07-17 06:51:31 +02002056 xen_pt_base = PFN_PHYS(pt_base);
2057 xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
2058 memblock_reserve(xen_pt_base, xen_pt_size);
Juergen Gross70e61192015-07-17 06:51:35 +02002059
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04002060 /* Revector the xen_start_info */
2061 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002062}
Juergen Gross70e61192015-07-17 06:51:35 +02002063
2064/*
2065 * Read a value from a physical address.
2066 */
2067static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
2068{
2069 unsigned long *vaddr;
2070 unsigned long val;
2071
2072 vaddr = early_memremap_ro(addr, sizeof(val));
2073 val = *vaddr;
2074 early_memunmap(vaddr, sizeof(val));
2075 return val;
2076}
2077
2078/*
2079 * Translate a virtual address to a physical one without relying on mapped
2080 * page tables.
2081 */
2082static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2083{
2084 phys_addr_t pa;
2085 pgd_t pgd;
2086 pud_t pud;
2087 pmd_t pmd;
2088 pte_t pte;
2089
2090 pa = read_cr3();
2091 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
2092 sizeof(pgd)));
2093 if (!pgd_present(pgd))
2094 return 0;
2095
2096 pa = pgd_val(pgd) & PTE_PFN_MASK;
2097 pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
2098 sizeof(pud)));
2099 if (!pud_present(pud))
2100 return 0;
2101 pa = pud_pfn(pud) << PAGE_SHIFT;
2102 if (pud_large(pud))
2103 return pa + (vaddr & ~PUD_MASK);
2104
2105 pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
2106 sizeof(pmd)));
2107 if (!pmd_present(pmd))
2108 return 0;
2109 pa = pmd_pfn(pmd) << PAGE_SHIFT;
2110 if (pmd_large(pmd))
2111 return pa + (vaddr & ~PMD_MASK);
2112
2113 pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
2114 sizeof(pte)));
2115 if (!pte_present(pte))
2116 return 0;
2117 pa = pte_pfn(pte) << PAGE_SHIFT;
2118
2119 return pa | (vaddr & ~PAGE_MASK);
2120}
2121
2122/*
2123 * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
2124 * this area.
2125 */
2126void __init xen_relocate_p2m(void)
2127{
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +03002128 phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys;
Juergen Gross70e61192015-07-17 06:51:35 +02002129 unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +03002130 int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d;
Juergen Gross70e61192015-07-17 06:51:35 +02002131 pte_t *pt;
2132 pmd_t *pmd;
2133 pud_t *pud;
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +03002134 p4d_t *p4d = NULL;
Juergen Gross70e61192015-07-17 06:51:35 +02002135 pgd_t *pgd;
2136 unsigned long *new_p2m;
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +03002137 int save_pud;
Juergen Gross70e61192015-07-17 06:51:35 +02002138
2139 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
2140 n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
2141 n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
2142 n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +03002143 n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
2144 if (PTRS_PER_P4D > 1)
2145 n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
2146 else
2147 n_p4d = 0;
2148 n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d;
Juergen Gross70e61192015-07-17 06:51:35 +02002149
2150 new_area = xen_find_free_area(PFN_PHYS(n_frames));
2151 if (!new_area) {
2152 xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
2153 BUG();
2154 }
2155
2156 /*
2157 * Setup the page tables for addressing the new p2m list.
2158 * We have asked the hypervisor to map the p2m list at the user address
2159 * PUD_SIZE. It may have done so, or it may have used a kernel space
2160 * address depending on the Xen version.
2161 * To avoid any possible virtual address collision, just use
2162 * 2 * PUD_SIZE for the new area.
2163 */
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +03002164 p4d_phys = new_area;
2165 pud_phys = p4d_phys + PFN_PHYS(n_p4d);
Juergen Gross70e61192015-07-17 06:51:35 +02002166 pmd_phys = pud_phys + PFN_PHYS(n_pud);
2167 pt_phys = pmd_phys + PFN_PHYS(n_pmd);
2168 p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
2169
2170 pgd = __va(read_cr3());
2171 new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +03002172 idx_p4d = 0;
2173 save_pud = n_pud;
2174 do {
2175 if (n_p4d > 0) {
2176 p4d = early_memremap(p4d_phys, PAGE_SIZE);
2177 clear_page(p4d);
2178 n_pud = min(save_pud, PTRS_PER_P4D);
Juergen Gross70e61192015-07-17 06:51:35 +02002179 }
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +03002180 for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
2181 pud = early_memremap(pud_phys, PAGE_SIZE);
2182 clear_page(pud);
2183 for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
2184 idx_pmd++) {
2185 pmd = early_memremap(pmd_phys, PAGE_SIZE);
2186 clear_page(pmd);
2187 for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
2188 idx_pt++) {
2189 pt = early_memremap(pt_phys, PAGE_SIZE);
2190 clear_page(pt);
2191 for (idx_pte = 0;
2192 idx_pte < min(n_pte, PTRS_PER_PTE);
2193 idx_pte++) {
2194 set_pte(pt + idx_pte,
2195 pfn_pte(p2m_pfn, PAGE_KERNEL));
2196 p2m_pfn++;
2197 }
2198 n_pte -= PTRS_PER_PTE;
2199 early_memunmap(pt, PAGE_SIZE);
2200 make_lowmem_page_readonly(__va(pt_phys));
2201 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
2202 PFN_DOWN(pt_phys));
2203 set_pmd(pmd + idx_pt,
2204 __pmd(_PAGE_TABLE | pt_phys));
2205 pt_phys += PAGE_SIZE;
2206 }
2207 n_pt -= PTRS_PER_PMD;
2208 early_memunmap(pmd, PAGE_SIZE);
2209 make_lowmem_page_readonly(__va(pmd_phys));
2210 pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
2211 PFN_DOWN(pmd_phys));
2212 set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
2213 pmd_phys += PAGE_SIZE;
2214 }
2215 n_pmd -= PTRS_PER_PUD;
2216 early_memunmap(pud, PAGE_SIZE);
2217 make_lowmem_page_readonly(__va(pud_phys));
2218 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
2219 if (n_p4d > 0)
2220 set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys));
2221 else
2222 set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
2223 pud_phys += PAGE_SIZE;
2224 }
2225 if (n_p4d > 0) {
2226 save_pud -= PTRS_PER_P4D;
2227 early_memunmap(p4d, PAGE_SIZE);
2228 make_lowmem_page_readonly(__va(p4d_phys));
2229 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys));
2230 set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys));
2231 p4d_phys += PAGE_SIZE;
2232 }
2233 } while (++idx_p4d < n_p4d);
Juergen Gross70e61192015-07-17 06:51:35 +02002234
2235 /* Now copy the old p2m info to the new area. */
2236 memcpy(new_p2m, xen_p2m_addr, size);
2237 xen_p2m_addr = new_p2m;
2238
2239 /* Release the old p2m list and set new list info. */
2240 p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
2241 BUG_ON(!p2m_pfn);
2242 p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
2243
2244 if (xen_start_info->mfn_list < __START_KERNEL_map) {
2245 pfn = xen_start_info->first_p2m_pfn;
2246 pfn_end = xen_start_info->first_p2m_pfn +
2247 xen_start_info->nr_p2m_frames;
2248 set_pgd(pgd + 1, __pgd(0));
2249 } else {
2250 pfn = p2m_pfn;
2251 pfn_end = p2m_pfn_end;
2252 }
2253
2254 memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
2255 while (pfn < pfn_end) {
2256 if (pfn == p2m_pfn) {
2257 pfn = p2m_pfn_end;
2258 continue;
2259 }
2260 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
2261 pfn++;
2262 }
2263
2264 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
2265 xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
2266 xen_start_info->nr_p2m_frames = n_frames;
2267}
2268
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002269#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002270static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2271static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2272
Daniel Kiper3f5089532011-05-12 17:19:53 -04002273static void __init xen_write_cr3_init(unsigned long cr3)
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002274{
2275 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2276
2277 BUG_ON(read_cr3() != __pa(initial_page_table));
2278 BUG_ON(cr3 != __pa(swapper_pg_dir));
2279
2280 /*
2281 * We are switching to swapper_pg_dir for the first time (from
2282 * initial_page_table) and therefore need to mark that page
2283 * read-only and then pin it.
2284 *
2285 * Xen disallows sharing of kernel PMDs for PAE
2286 * guests. Therefore we must copy the kernel PMD from
2287 * initial_page_table into a new kernel PMD to be used in
2288 * swapper_pg_dir.
2289 */
2290 swapper_kernel_pmd =
2291 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002292 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002293 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2294 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2295 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2296
2297 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2298 xen_write_cr3(cr3);
2299 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2300
2301 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2302 PFN_DOWN(__pa(initial_page_table)));
2303 set_page_prot(initial_page_table, PAGE_KERNEL);
2304 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2305
2306 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2307}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002308
Juergen Gross70e61192015-07-17 06:51:35 +02002309/*
2310 * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
2311 * not the first page table in the page table pool.
2312 * Iterate through the initial page tables to find the real page table base.
2313 */
2314static phys_addr_t xen_find_pt_base(pmd_t *pmd)
2315{
2316 phys_addr_t pt_base, paddr;
2317 unsigned pmdidx;
2318
2319 pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
2320
2321 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
2322 if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
2323 paddr = m2p(pmd[pmdidx].pmd);
2324 pt_base = min(pt_base, paddr);
2325 }
2326
2327 return pt_base;
2328}
2329
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04002330void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002331{
2332 pmd_t *kernel_pmd;
2333
Juergen Gross70e61192015-07-17 06:51:35 +02002334 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2335
2336 xen_pt_base = xen_find_pt_base(kernel_pmd);
2337 xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
2338
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002339 initial_kernel_pmd =
2340 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07002341
Juergen Gross70e61192015-07-17 06:51:35 +02002342 max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002343
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002344 copy_page(initial_kernel_pmd, kernel_pmd);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002345
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002346 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002347
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002348 copy_page(initial_page_table, pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002349 initial_page_table[KERNEL_PGD_BOUNDARY] =
2350 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002351
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002352 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2353 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002354 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2355
2356 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2357
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002358 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2359 PFN_DOWN(__pa(initial_page_table)));
2360 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002361
Juergen Gross04414ba2015-07-17 06:51:31 +02002362 memblock_reserve(xen_pt_base, xen_pt_size);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002363}
2364#endif /* CONFIG_X86_64 */
2365
Juergen Gross6c2681c2015-07-17 06:51:34 +02002366void __init xen_reserve_special_pages(void)
2367{
2368 phys_addr_t paddr;
2369
2370 memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
2371 if (xen_start_info->store_mfn) {
2372 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
2373 memblock_reserve(paddr, PAGE_SIZE);
2374 }
2375 if (!xen_initial_domain()) {
2376 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
2377 memblock_reserve(paddr, PAGE_SIZE);
2378 }
2379}
2380
Juergen Gross04414ba2015-07-17 06:51:31 +02002381void __init xen_pt_check_e820(void)
2382{
2383 if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
2384 xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
2385 BUG();
2386 }
2387}
2388
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002389static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2390
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07002391static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002392{
2393 pte_t pte;
2394
2395 phys >>= PAGE_SHIFT;
2396
2397 switch (idx) {
2398 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
Kees Cook4eefbe72013-04-10 12:24:22 -07002399 case FIX_RO_IDT:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002400#ifdef CONFIG_X86_32
2401 case FIX_WP_TEST:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002402# ifdef CONFIG_HIGHMEM
2403 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2404# endif
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07002405#elif defined(CONFIG_X86_VSYSCALL_EMULATION)
Andy Lutomirskif40c3302014-05-05 12:19:36 -07002406 case VSYSCALL_PAGE:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002407#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08002408 case FIX_TEXT_POKE0:
2409 case FIX_TEXT_POKE1:
Thomas Garnier69218e42017-03-14 10:05:07 -07002410 case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END:
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08002411 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002412 pte = pfn_pte(phys, prot);
2413 break;
2414
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002415#ifdef CONFIG_X86_LOCAL_APIC
2416 case FIX_APIC_BASE: /* maps dummy local APIC */
2417 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2418 break;
2419#endif
2420
2421#ifdef CONFIG_X86_IO_APIC
2422 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2423 /*
2424 * We just don't map the IO APIC - all access is via
2425 * hypercalls. Keep the address in the pte for reference.
2426 */
Konrad Rzeszutek Wilk27abd142012-04-16 13:53:40 -04002427 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002428 break;
2429#endif
2430
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002431 case FIX_PARAVIRT_BOOTMAP:
2432 /* This is an MFN, but it isn't an IO mapping from the
2433 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002434 pte = mfn_pte(phys, prot);
2435 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002436
2437 default:
2438 /* By default, set_fixmap is used for hardware mappings */
David Vrabel7f2f8822014-01-08 14:01:01 +00002439 pte = mfn_pte(phys, prot);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002440 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002441 }
2442
2443 __native_set_fixmap(idx, pte);
2444
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07002445#ifdef CONFIG_X86_VSYSCALL_EMULATION
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002446 /* Replicate changes to map the vsyscall page into the user
2447 pagetable vsyscall mapping. */
Andy Lutomirskif40c3302014-05-05 12:19:36 -07002448 if (idx == VSYSCALL_PAGE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002449 unsigned long vaddr = __fix_to_virt(idx);
2450 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2451 }
2452#endif
2453}
2454
Daniel Kiper3f5089532011-05-12 17:19:53 -04002455static void __init xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002456{
Mukesh Rathor4e44e442013-12-31 12:41:27 -05002457 if (xen_feature(XENFEAT_auto_translated_physmap))
2458 return;
2459
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002460 pv_mmu_ops.set_pte = xen_set_pte;
2461 pv_mmu_ops.set_pmd = xen_set_pmd;
2462 pv_mmu_ops.set_pud = xen_set_pud;
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +03002463#if CONFIG_PGTABLE_LEVELS >= 4
2464 pv_mmu_ops.set_p4d = xen_set_p4d;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002465#endif
2466
2467 /* This will work as long as patching hasn't happened yet
2468 (which it hasn't) */
2469 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2470 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2471 pv_mmu_ops.release_pte = xen_release_pte;
2472 pv_mmu_ops.release_pmd = xen_release_pmd;
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +03002473#if CONFIG_PGTABLE_LEVELS >= 4
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002474 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2475 pv_mmu_ops.release_pud = xen_release_pud;
2476#endif
David Vrabeld6b186c2016-05-17 15:54:50 +01002477 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002478
2479#ifdef CONFIG_X86_64
Konrad Rzeszutek Wilkd3eb2c82013-03-22 10:34:28 -04002480 pv_mmu_ops.write_cr3 = &xen_write_cr3;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002481 SetPagePinned(virt_to_page(level3_user_vsyscall));
2482#endif
2483 xen_mark_init_mm_pinned();
2484}
2485
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002486static void xen_leave_lazy_mmu(void)
2487{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002488 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002489 xen_mc_flush();
2490 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002491 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002492}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002493
Daniel Kiper3f5089532011-05-12 17:19:53 -04002494static const struct pv_mmu_ops xen_mmu_ops __initconst = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002495 .read_cr2 = xen_read_cr2,
2496 .write_cr2 = xen_write_cr2,
2497
2498 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002499 .write_cr3 = xen_write_cr3_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002500
2501 .flush_tlb_user = xen_flush_tlb,
2502 .flush_tlb_kernel = xen_flush_tlb,
2503 .flush_tlb_single = xen_flush_tlb_single,
2504 .flush_tlb_others = xen_flush_tlb_others,
2505
2506 .pte_update = paravirt_nop,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002507
2508 .pgd_alloc = xen_pgd_alloc,
2509 .pgd_free = xen_pgd_free,
2510
2511 .alloc_pte = xen_alloc_pte_init,
2512 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002513 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002514 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002515
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002516 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002517 .set_pte_at = xen_set_pte_at,
2518 .set_pmd = xen_set_pmd_hyper,
2519
2520 .ptep_modify_prot_start = __ptep_modify_prot_start,
2521 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2522
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002523 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2524 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002525
David Vrabeld6b186c2016-05-17 15:54:50 +01002526 .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002527 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002528
2529#ifdef CONFIG_X86_PAE
2530 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002531 .pte_clear = xen_pte_clear,
2532 .pmd_clear = xen_pmd_clear,
2533#endif /* CONFIG_X86_PAE */
2534 .set_pud = xen_set_pud_hyper,
2535
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002536 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2537 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002538
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +03002539#if CONFIG_PGTABLE_LEVELS >= 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002540 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2541 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +03002542 .set_p4d = xen_set_p4d_hyper,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002543
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002544 .alloc_pud = xen_alloc_pmd_init,
2545 .release_pud = xen_release_pmd_init,
Kirill A. Shutemov98233362015-04-14 15:46:14 -07002546#endif /* CONFIG_PGTABLE_LEVELS == 4 */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002547
2548 .activate_mm = xen_activate_mm,
2549 .dup_mmap = xen_dup_mmap,
2550 .exit_mmap = xen_exit_mmap,
2551
2552 .lazy_mode = {
2553 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002554 .leave = xen_leave_lazy_mmu,
Boris Ostrovsky511ba862013-03-23 09:36:36 -04002555 .flush = paravirt_flush_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002556 },
2557
2558 .set_fixmap = xen_set_fixmap,
2559};
2560
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002561void __init xen_init_mmu_ops(void)
2562{
Attilio Rao7737b212012-08-21 21:22:38 +01002563 x86_init.paging.pagetable_init = xen_pagetable_init;
Mukesh Rathor76bccef2014-01-03 09:48:08 -05002564
Boris Ostrovsky20f36e02015-12-12 19:25:55 -05002565 if (xen_feature(XENFEAT_auto_translated_physmap))
Mukesh Rathor76bccef2014-01-03 09:48:08 -05002566 return;
Boris Ostrovsky20f36e02015-12-12 19:25:55 -05002567
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002568 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002569
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002570 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002571}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002572
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002573/* Protected by xen_reservation_lock. */
2574#define MAX_CONTIG_ORDER 9 /* 2MB */
2575static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2576
2577#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2578static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2579 unsigned long *in_frames,
2580 unsigned long *out_frames)
2581{
2582 int i;
2583 struct multicall_space mcs;
2584
2585 xen_mc_batch();
2586 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2587 mcs = __xen_mc_entry(0);
2588
2589 if (in_frames)
2590 in_frames[i] = virt_to_mfn(vaddr);
2591
2592 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002593 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002594
2595 if (out_frames)
2596 out_frames[i] = virt_to_pfn(vaddr);
2597 }
2598 xen_mc_issue(0);
2599}
2600
2601/*
2602 * Update the pfn-to-mfn mappings for a virtual address range, either to
2603 * point to an array of mfns, or contiguously from a single starting
2604 * mfn.
2605 */
2606static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2607 unsigned long *mfns,
2608 unsigned long first_mfn)
2609{
2610 unsigned i, limit;
2611 unsigned long mfn;
2612
2613 xen_mc_batch();
2614
2615 limit = 1u << order;
2616 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2617 struct multicall_space mcs;
2618 unsigned flags;
2619
2620 mcs = __xen_mc_entry(0);
2621 if (mfns)
2622 mfn = mfns[i];
2623 else
2624 mfn = first_mfn + i;
2625
2626 if (i < (limit - 1))
2627 flags = 0;
2628 else {
2629 if (order == 0)
2630 flags = UVMF_INVLPG | UVMF_ALL;
2631 else
2632 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2633 }
2634
2635 MULTI_update_va_mapping(mcs.mc, vaddr,
2636 mfn_pte(mfn, PAGE_KERNEL), flags);
2637
2638 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2639 }
2640
2641 xen_mc_issue(0);
2642}
2643
2644/*
2645 * Perform the hypercall to exchange a region of our pfns to point to
2646 * memory with the required contiguous alignment. Takes the pfns as
2647 * input, and populates mfns as output.
2648 *
2649 * Returns a success code indicating whether the hypervisor was able to
2650 * satisfy the request or not.
2651 */
2652static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2653 unsigned long *pfns_in,
2654 unsigned long extents_out,
2655 unsigned int order_out,
2656 unsigned long *mfns_out,
2657 unsigned int address_bits)
2658{
2659 long rc;
2660 int success;
2661
2662 struct xen_memory_exchange exchange = {
2663 .in = {
2664 .nr_extents = extents_in,
2665 .extent_order = order_in,
2666 .extent_start = pfns_in,
2667 .domid = DOMID_SELF
2668 },
2669 .out = {
2670 .nr_extents = extents_out,
2671 .extent_order = order_out,
2672 .extent_start = mfns_out,
2673 .address_bits = address_bits,
2674 .domid = DOMID_SELF
2675 }
2676 };
2677
2678 BUG_ON(extents_in << order_in != extents_out << order_out);
2679
2680 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2681 success = (exchange.nr_exchanged == extents_in);
2682
2683 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2684 BUG_ON(success && (rc != 0));
2685
2686 return success;
2687}
2688
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002689int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
Stefano Stabellini69908902013-10-09 16:56:32 +00002690 unsigned int address_bits,
2691 dma_addr_t *dma_handle)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002692{
2693 unsigned long *in_frames = discontig_frames, out_frame;
2694 unsigned long flags;
2695 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002696 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002697
2698 /*
2699 * Currently an auto-translated guest will not perform I/O, nor will
2700 * it require PAE page directories below 4GB. Therefore any calls to
2701 * this function are redundant and can be ignored.
2702 */
2703
2704 if (xen_feature(XENFEAT_auto_translated_physmap))
2705 return 0;
2706
2707 if (unlikely(order > MAX_CONTIG_ORDER))
2708 return -ENOMEM;
2709
2710 memset((void *) vstart, 0, PAGE_SIZE << order);
2711
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002712 spin_lock_irqsave(&xen_reservation_lock, flags);
2713
2714 /* 1. Zap current PTEs, remembering MFNs. */
2715 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2716
2717 /* 2. Get a new contiguous memory extent. */
2718 out_frame = virt_to_pfn(vstart);
2719 success = xen_exchange_memory(1UL << order, 0, in_frames,
2720 1, order, &out_frame,
2721 address_bits);
2722
2723 /* 3. Map the new extent in place of old pages. */
2724 if (success)
2725 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2726 else
2727 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2728
2729 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2730
Stefano Stabellini69908902013-10-09 16:56:32 +00002731 *dma_handle = virt_to_machine(vstart).maddr;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002732 return success ? 0 : -ENOMEM;
2733}
2734EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2735
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002736void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002737{
2738 unsigned long *out_frames = discontig_frames, in_frame;
2739 unsigned long flags;
2740 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002741 unsigned long vstart;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002742
2743 if (xen_feature(XENFEAT_auto_translated_physmap))
2744 return;
2745
2746 if (unlikely(order > MAX_CONTIG_ORDER))
2747 return;
2748
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002749 vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002750 memset((void *) vstart, 0, PAGE_SIZE << order);
2751
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002752 spin_lock_irqsave(&xen_reservation_lock, flags);
2753
2754 /* 1. Find start MFN of contiguous extent. */
2755 in_frame = virt_to_mfn(vstart);
2756
2757 /* 2. Zap current PTEs. */
2758 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2759
2760 /* 3. Do the exchange for non-contiguous MFNs. */
2761 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2762 0, out_frames, 0);
2763
2764 /* 4. Map new pages in place of old pages. */
2765 if (success)
2766 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2767 else
2768 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2769
2770 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2771}
2772EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2773
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002774#ifdef CONFIG_XEN_PVHVM
Olaf Hering34b6f012012-10-01 21:18:01 +02002775#ifdef CONFIG_PROC_VMCORE
2776/*
2777 * This function is used in two contexts:
2778 * - the kdump kernel has to check whether a pfn of the crashed kernel
2779 * was a ballooned page. vmcore is using this function to decide
2780 * whether to access a pfn of the crashed kernel.
2781 * - the kexec kernel has to check whether a pfn was ballooned by the
2782 * previous kernel. If the pfn is ballooned, handle it properly.
2783 * Returns 0 if the pfn is not backed by a RAM page, the caller may
2784 * handle the pfn special in this case.
2785 */
2786static int xen_oldmem_pfn_is_ram(unsigned long pfn)
2787{
2788 struct xen_hvm_get_mem_type a = {
2789 .domid = DOMID_SELF,
2790 .pfn = pfn,
2791 };
2792 int ram;
2793
2794 if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
2795 return -ENXIO;
2796
2797 switch (a.mem_type) {
2798 case HVMMEM_mmio_dm:
2799 ram = 0;
2800 break;
2801 case HVMMEM_ram_rw:
2802 case HVMMEM_ram_ro:
2803 default:
2804 ram = 1;
2805 break;
2806 }
2807
2808 return ram;
2809}
2810#endif
2811
Stefano Stabellini59151002010-06-17 14:22:52 +01002812static void xen_hvm_exit_mmap(struct mm_struct *mm)
2813{
2814 struct xen_hvm_pagetable_dying a;
2815 int rc;
2816
2817 a.domid = DOMID_SELF;
2818 a.gpa = __pa(mm->pgd);
2819 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2820 WARN_ON_ONCE(rc < 0);
2821}
2822
2823static int is_pagetable_dying_supported(void)
2824{
2825 struct xen_hvm_pagetable_dying a;
2826 int rc = 0;
2827
2828 a.domid = DOMID_SELF;
2829 a.gpa = 0x00;
2830 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2831 if (rc < 0) {
2832 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2833 return 0;
2834 }
2835 return 1;
2836}
2837
2838void __init xen_hvm_init_mmu_ops(void)
2839{
2840 if (is_pagetable_dying_supported())
2841 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
Olaf Hering34b6f012012-10-01 21:18:01 +02002842#ifdef CONFIG_PROC_VMCORE
2843 register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
2844#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002845}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002846#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002847
Ian Campbellde1ef202009-05-21 10:09:46 +01002848#define REMAP_BATCH_SIZE 16
2849
2850struct remap_data {
David Vrabel4e8c0c82015-03-11 14:49:57 +00002851 xen_pfn_t *mfn;
2852 bool contiguous;
Ian Campbellde1ef202009-05-21 10:09:46 +01002853 pgprot_t prot;
2854 struct mmu_update *mmu_update;
2855};
2856
2857static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2858 unsigned long addr, void *data)
2859{
2860 struct remap_data *rmd = data;
David Vrabel4e8c0c82015-03-11 14:49:57 +00002861 pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
2862
Adam Buchbinder6a6256f2016-02-23 15:34:30 -08002863 /* If we have a contiguous range, just update the mfn itself,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002864 else update pointer to be "next mfn". */
2865 if (rmd->contiguous)
2866 (*rmd->mfn)++;
2867 else
2868 rmd->mfn++;
Ian Campbellde1ef202009-05-21 10:09:46 +01002869
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -08002870 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002871 rmd->mmu_update->val = pte_val_ma(pte);
2872 rmd->mmu_update++;
2873
2874 return 0;
2875}
2876
Julien Gralla13d7202015-08-07 17:34:41 +01002877static int do_remap_gfn(struct vm_area_struct *vma,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002878 unsigned long addr,
Julien Gralla13d7202015-08-07 17:34:41 +01002879 xen_pfn_t *gfn, int nr,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002880 int *err_ptr, pgprot_t prot,
2881 unsigned domid,
2882 struct page **pages)
Ian Campbellde1ef202009-05-21 10:09:46 +01002883{
David Vrabel4e8c0c82015-03-11 14:49:57 +00002884 int err = 0;
Ian Campbellde1ef202009-05-21 10:09:46 +01002885 struct remap_data rmd;
2886 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
Ian Campbellde1ef202009-05-21 10:09:46 +01002887 unsigned long range;
David Vrabel4e8c0c82015-03-11 14:49:57 +00002888 int mapped = 0;
Ian Campbellde1ef202009-05-21 10:09:46 +01002889
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07002890 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002891
Julien Gralla13d7202015-08-07 17:34:41 +01002892 rmd.mfn = gfn;
Ian Campbellde1ef202009-05-21 10:09:46 +01002893 rmd.prot = prot;
Adam Buchbinder6a6256f2016-02-23 15:34:30 -08002894 /* We use the err_ptr to indicate if there we are doing a contiguous
David Vrabel4e8c0c82015-03-11 14:49:57 +00002895 * mapping or a discontigious mapping. */
2896 rmd.contiguous = !err_ptr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002897
2898 while (nr) {
David Vrabel4e8c0c82015-03-11 14:49:57 +00002899 int index = 0;
2900 int done = 0;
2901 int batch = min(REMAP_BATCH_SIZE, nr);
2902 int batch_left = batch;
Ian Campbellde1ef202009-05-21 10:09:46 +01002903 range = (unsigned long)batch << PAGE_SHIFT;
2904
2905 rmd.mmu_update = mmu_update;
2906 err = apply_to_page_range(vma->vm_mm, addr, range,
2907 remap_area_mfn_pte_fn, &rmd);
2908 if (err)
2909 goto out;
2910
David Vrabel4e8c0c82015-03-11 14:49:57 +00002911 /* We record the error for each page that gives an error, but
2912 * continue mapping until the whole set is done */
2913 do {
2914 int i;
2915
2916 err = HYPERVISOR_mmu_update(&mmu_update[index],
2917 batch_left, &done, domid);
2918
2919 /*
Julien Gralla13d7202015-08-07 17:34:41 +01002920 * @err_ptr may be the same buffer as @gfn, so
2921 * only clear it after each chunk of @gfn is
David Vrabel4e8c0c82015-03-11 14:49:57 +00002922 * used.
2923 */
2924 if (err_ptr) {
2925 for (i = index; i < index + done; i++)
2926 err_ptr[i] = 0;
2927 }
2928 if (err < 0) {
2929 if (!err_ptr)
2930 goto out;
2931 err_ptr[i] = err;
2932 done++; /* Skip failed frame. */
2933 } else
2934 mapped += done;
2935 batch_left -= done;
2936 index += done;
2937 } while (batch_left);
Ian Campbellde1ef202009-05-21 10:09:46 +01002938
2939 nr -= batch;
2940 addr += range;
David Vrabel4e8c0c82015-03-11 14:49:57 +00002941 if (err_ptr)
2942 err_ptr += batch;
David Vrabel914beb92015-10-28 13:39:05 +00002943 cond_resched();
Ian Campbellde1ef202009-05-21 10:09:46 +01002944 }
Ian Campbellde1ef202009-05-21 10:09:46 +01002945out:
2946
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04002947 xen_flush_tlb_all();
Ian Campbellde1ef202009-05-21 10:09:46 +01002948
David Vrabel4e8c0c82015-03-11 14:49:57 +00002949 return err < 0 ? err : mapped;
2950}
2951
Julien Gralla13d7202015-08-07 17:34:41 +01002952int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002953 unsigned long addr,
Julien Gralla13d7202015-08-07 17:34:41 +01002954 xen_pfn_t gfn, int nr,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002955 pgprot_t prot, unsigned domid,
2956 struct page **pages)
2957{
Julien Gralla13d7202015-08-07 17:34:41 +01002958 return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
Ian Campbellde1ef202009-05-21 10:09:46 +01002959}
Julien Gralla13d7202015-08-07 17:34:41 +01002960EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
Ian Campbell9a032e32012-10-17 13:37:49 -07002961
Julien Gralla13d7202015-08-07 17:34:41 +01002962int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002963 unsigned long addr,
Julien Gralla13d7202015-08-07 17:34:41 +01002964 xen_pfn_t *gfn, int nr,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002965 int *err_ptr, pgprot_t prot,
2966 unsigned domid, struct page **pages)
2967{
2968 /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
2969 * and the consequences later is quite hard to detect what the actual
2970 * cause of "wrong memory was mapped in".
2971 */
2972 BUG_ON(err_ptr == NULL);
Julien Gralla13d7202015-08-07 17:34:41 +01002973 return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
David Vrabel4e8c0c82015-03-11 14:49:57 +00002974}
Julien Gralla13d7202015-08-07 17:34:41 +01002975EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
David Vrabel4e8c0c82015-03-11 14:49:57 +00002976
2977
Ian Campbell9a032e32012-10-17 13:37:49 -07002978/* Returns: 0 success */
Julien Gralla13d7202015-08-07 17:34:41 +01002979int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
Ian Campbell9a032e32012-10-17 13:37:49 -07002980 int numpgs, struct page **pages)
2981{
2982 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2983 return 0;
2984
2985 return -EINVAL;
2986}
Julien Gralla13d7202015-08-07 17:34:41 +01002987EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);