blob: 7d5afdb417cc5e47a590f8f650e3fdcb96d544ac [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Paul Gortmaker7a2463d2016-07-13 20:18:59 -040046#include <linux/export.h>
47#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090048#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070049#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050050#include <linux/seq_file.h>
Olaf Hering34b6f012012-10-01 21:18:01 +020051#include <linux/crash_dump.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070052
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -080053#include <trace/events/xen.h>
54
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070055#include <asm/pgtable.h>
56#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070057#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070058#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080059#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070060#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050061#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070062#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080063#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070064#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070065#include <asm/pat.h>
Andrew Jones900cba82009-12-18 10:31:31 +010066#include <asm/smp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070067
68#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070069#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070070
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080071#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070072#include <xen/page.h>
73#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010074#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080075#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080076#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080077#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070078
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070079#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070080#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070081#include "debugfs.h"
82
Alex Nixon19001c82009-02-09 12:05:46 -080083/*
84 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010085 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080086 */
87DEFINE_SPINLOCK(xen_reservation_lock);
88
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040089#ifdef CONFIG_X86_32
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080090/*
91 * Identity map, in addition to plain kernel map. This needs to be
92 * large enough to allocate page table pages to allocate the rest.
93 * Each page can map 2MB.
94 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -070095#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
96static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040097#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080098#ifdef CONFIG_X86_64
99/* l3 pud for userspace vsyscall mapping */
100static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
101#endif /* CONFIG_X86_64 */
102
103/*
104 * Note about cr3 (pagetable base) values:
105 *
106 * xen_cr3 contains the current logical cr3 value; it contains the
107 * last set cr3. This may not be the current effective cr3, because
108 * its update may be being lazily deferred. However, a vcpu looking
109 * at its own cr3 can use this value knowing that it everything will
110 * be self-consistent.
111 *
112 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
113 * hypercall to set the vcpu cr3 is complete (so it may be a little
114 * out of date, but it will never be set early). If one vcpu is
115 * looking at another vcpu's cr3 value, it should use this variable.
116 */
117DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
118DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
119
Juergen Gross04414ba2015-07-17 06:51:31 +0200120static phys_addr_t xen_pt_base, xen_pt_size __initdata;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800121
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700122/*
123 * Just beyond the highest usermode address. STACK_TOP_MAX has a
124 * redzone above it, so round it up to a PGD boundary.
125 */
126#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
127
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800128unsigned long arbitrary_virt_to_mfn(void *vaddr)
129{
130 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
131
132 return PFN_DOWN(maddr.maddr);
133}
134
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700135xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700136{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700137 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100138 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700139 pte_t *pte;
140 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700141
Chris Lalancette9f32d212008-10-23 17:40:25 -0700142 /*
143 * if the PFN is in the linear mapped vaddr range, we can just use
144 * the (quick) virt_to_machine() p2m lookup
145 */
146 if (virt_addr_valid(vaddr))
147 return virt_to_machine(vaddr);
148
149 /* otherwise we have to do a (slower) full page-table walk */
150
151 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700152 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700153 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700154 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700155}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100156EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700157
158void make_lowmem_page_readonly(void *vaddr)
159{
160 pte_t *pte, ptev;
161 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100162 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700163
Ingo Molnarf0646e42008-01-30 13:33:43 +0100164 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700165 if (pte == NULL)
166 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700167
168 ptev = pte_wrprotect(*pte);
169
170 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
171 BUG();
172}
173
174void make_lowmem_page_readwrite(void *vaddr)
175{
176 pte_t *pte, ptev;
177 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100178 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700179
Ingo Molnarf0646e42008-01-30 13:33:43 +0100180 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700181 if (pte == NULL)
182 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700183
184 ptev = pte_mkwrite(*pte);
185
186 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
187 BUG();
188}
189
190
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700191static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100192{
193 struct page *page = virt_to_page(ptr);
194
195 return PagePinned(page);
196}
197
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800198void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800199{
200 struct multicall_space mcs;
201 struct mmu_update *u;
202
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800203 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
204
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800205 mcs = xen_mc_entry(sizeof(*u));
206 u = mcs.args;
207
208 /* ptep might be kmapped when using 32-bit HIGHPTE */
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800209 u->ptr = virt_to_machine(ptep).maddr;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800210 u->val = pte_val_ma(pteval);
211
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800212 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800213
214 xen_mc_issue(PARAVIRT_LAZY_MMU);
215}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800216EXPORT_SYMBOL_GPL(xen_set_domain_pte);
217
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700218static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700219{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700220 struct multicall_space mcs;
221 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700222
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700223 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
224
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700225 if (mcs.mc != NULL) {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700226 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700227 } else {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700228 mcs = __xen_mc_entry(sizeof(*u));
229 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
230 }
231
232 u = mcs.args;
233 *u = *update;
234}
235
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800236static void xen_extend_mmuext_op(const struct mmuext_op *op)
237{
238 struct multicall_space mcs;
239 struct mmuext_op *u;
240
241 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
242
243 if (mcs.mc != NULL) {
244 mcs.mc->args[1]++;
245 } else {
246 mcs = __xen_mc_entry(sizeof(*u));
247 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
248 }
249
250 u = mcs.args;
251 *u = *op;
252}
253
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800254static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700255{
256 struct mmu_update u;
257
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700258 preempt_disable();
259
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700260 xen_mc_batch();
261
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700262 /* ptr may be ioremapped for 64-bit pagetable setup */
263 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700264 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700265 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700266
267 xen_mc_issue(PARAVIRT_LAZY_MMU);
268
269 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700270}
271
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800272static void xen_set_pmd(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100273{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800274 trace_xen_mmu_set_pmd(ptr, val);
275
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100276 /* If page is not pinned, we can just update the entry
277 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700278 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100279 *ptr = val;
280 return;
281 }
282
283 xen_set_pmd_hyper(ptr, val);
284}
285
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700286/*
287 * Associate a virtual page frame with a given physical page frame
288 * and protection flags for that frame.
289 */
290void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
291{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700292 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700293}
294
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800295static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
296{
297 struct mmu_update u;
298
299 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
300 return false;
301
302 xen_mc_batch();
303
304 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
305 u.val = pte_val_ma(pteval);
306 xen_extend_mmu_update(&u);
307
308 xen_mc_issue(PARAVIRT_LAZY_MMU);
309
310 return true;
311}
312
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800313static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800314{
David Vrabeld095d432012-07-09 11:39:05 +0100315 if (!xen_batched_set_pte(ptep, pteval)) {
316 /*
317 * Could call native_set_pte() here and trap and
318 * emulate the PTE write but with 32-bit guests this
319 * needs two traps (one for each of the two 32-bit
320 * words in the PTE) so do one hypercall directly
321 * instead.
322 */
323 struct mmu_update u;
324
325 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
326 u.val = pte_val_ma(pteval);
327 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
328 }
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800329}
330
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800331static void xen_set_pte(pte_t *ptep, pte_t pteval)
332{
333 trace_xen_mmu_set_pte(ptep, pteval);
334 __xen_set_pte(ptep, pteval);
335}
336
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800337static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700338 pte_t *ptep, pte_t pteval)
339{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800340 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
341 __xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700342}
343
Tejf63c2f22008-12-16 11:56:06 -0800344pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
345 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700346{
347 /* Just return the pte as-is. We preserve the bits on commit */
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800348 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700349 return *ptep;
350}
351
352void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
353 pte_t *ptep, pte_t pte)
354{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700355 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700356
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800357 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700358 xen_mc_batch();
359
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800360 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700361 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700362 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700363
364 xen_mc_issue(PARAVIRT_LAZY_MMU);
365}
366
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700367/* Assume pteval_t is equivalent to all the other *val_t types. */
368static pteval_t pte_mfn_to_pfn(pteval_t val)
369{
David Vrabel5926f872014-03-25 10:38:37 +0000370 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700371 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400372 unsigned long pfn = mfn_to_pfn(mfn);
373
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700374 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400375 if (unlikely(pfn == ~0))
376 val = flags & ~_PAGE_PRESENT;
377 else
378 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700379 }
380
381 return val;
382}
383
384static pteval_t pte_pfn_to_mfn(pteval_t val)
385{
David Vrabel5926f872014-03-25 10:38:37 +0000386 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700387 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700388 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500389 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700390
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500391 if (!xen_feature(XENFEAT_auto_translated_physmap))
Juergen Gross0aad5682014-11-28 11:53:57 +0100392 mfn = __pfn_to_mfn(pfn);
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500393 else
394 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700395 /*
396 * If there's no mfn for the pfn, then just create an
397 * empty non-present pte. Unfortunately this loses
398 * information about the original pfn, so
399 * pte_mfn_to_pfn is asymmetric.
400 */
401 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
402 mfn = 0;
403 flags = 0;
David Vrabel7f2f8822014-01-08 14:01:01 +0000404 } else
405 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700406 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700407 }
408
409 return val;
410}
411
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700412__visible pteval_t xen_pte_val(pte_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700413{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700414 pteval_t pteval = pte.pte;
Juergen Gross47591df2014-11-03 14:02:04 +0100415
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700416 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700417}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800418PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700419
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700420__visible pgdval_t xen_pgd_val(pgd_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700421{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700422 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700423}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800424PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700425
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700426__visible pte_t xen_make_pte(pteval_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700427{
David Vrabel7f2f8822014-01-08 14:01:01 +0000428 pte = pte_pfn_to_mfn(pte);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800429
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700430 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700431}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800432PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700433
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700434__visible pgd_t xen_make_pgd(pgdval_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700435{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700436 pgd = pte_pfn_to_mfn(pgd);
437 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700438}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800439PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700440
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700441__visible pmdval_t xen_pmd_val(pmd_t pmd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700442{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700443 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700444}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800445PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100446
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800447static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700448{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700449 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700450
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700451 preempt_disable();
452
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700453 xen_mc_batch();
454
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700455 /* ptr may be ioremapped for 64-bit pagetable setup */
456 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700457 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700458 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700459
460 xen_mc_issue(PARAVIRT_LAZY_MMU);
461
462 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700463}
464
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800465static void xen_set_pud(pud_t *ptr, pud_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100466{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800467 trace_xen_mmu_set_pud(ptr, val);
468
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100469 /* If page is not pinned, we can just update the entry
470 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700471 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100472 *ptr = val;
473 return;
474 }
475
476 xen_set_pud_hyper(ptr, val);
477}
478
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700479#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800480static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700481{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800482 trace_xen_mmu_set_pte_atomic(ptep, pte);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700483 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700484}
485
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800486static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700487{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800488 trace_xen_mmu_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800489 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
490 native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700491}
492
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800493static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700494{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800495 trace_xen_mmu_pmd_clear(pmdp);
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100496 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700497}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700498#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700499
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700500__visible pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700501{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700502 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700503 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700504}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800505PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700506
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700507#if CONFIG_PGTABLE_LEVELS == 4
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700508__visible pudval_t xen_pud_val(pud_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700509{
510 return pte_mfn_to_pfn(pud.pud);
511}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800512PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700513
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700514__visible pud_t xen_make_pud(pudval_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700515{
516 pud = pte_pfn_to_mfn(pud);
517
518 return native_make_pud(pud);
519}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800520PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700521
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800522static pgd_t *xen_get_user_pgd(pgd_t *pgd)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700523{
524 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
525 unsigned offset = pgd - pgd_page;
526 pgd_t *user_ptr = NULL;
527
528 if (offset < pgd_index(USER_LIMIT)) {
529 struct page *page = virt_to_page(pgd_page);
530 user_ptr = (pgd_t *)page->private;
531 if (user_ptr)
532 user_ptr += offset;
533 }
534
535 return user_ptr;
536}
537
538static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700539{
540 struct mmu_update u;
541
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700542 u.ptr = virt_to_machine(ptr).maddr;
543 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700544 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700545}
546
547/*
548 * Raw hypercall-based set_pgd, intended for in early boot before
549 * there's a page structure. This implies:
550 * 1. The only existing pagetable is the kernel's
551 * 2. It is always pinned
552 * 3. It has no user pagetable attached to it
553 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800554static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700555{
556 preempt_disable();
557
558 xen_mc_batch();
559
560 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700561
562 xen_mc_issue(PARAVIRT_LAZY_MMU);
563
564 preempt_enable();
565}
566
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800567static void xen_set_pgd(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700568{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700569 pgd_t *user_ptr = xen_get_user_pgd(ptr);
570
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800571 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
572
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700573 /* If page is not pinned, we can just update the entry
574 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700575 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700576 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700577 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700578 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700579 *user_ptr = val;
580 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700581 return;
582 }
583
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700584 /* If it's pinned, then we can at least batch the kernel and
585 user updates together. */
586 xen_mc_batch();
587
588 __xen_set_pgd_hyper(ptr, val);
589 if (user_ptr)
590 __xen_set_pgd_hyper(user_ptr, val);
591
592 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700593}
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700594#endif /* CONFIG_PGTABLE_LEVELS == 4 */
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700595
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700596/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700597 * (Yet another) pagetable walker. This one is intended for pinning a
598 * pagetable. This means that it walks a pagetable and calls the
599 * callback function on each page it finds making up the page table,
600 * at every level. It walks the entire pagetable, but it only bothers
601 * pinning pte pages which are below limit. In the normal case this
602 * will be STACK_TOP_MAX, but at boot we need to pin up to
603 * FIXADDR_TOP.
604 *
605 * For 32-bit the important bit is that we don't pin beyond there,
606 * because then we start getting into Xen's ptes.
607 *
608 * For 64-bit, we must skip the Xen hole in the middle of the address
609 * space, just after the big x86-64 virtual hole.
610 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000611static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
612 int (*func)(struct mm_struct *mm, struct page *,
613 enum pt_level),
614 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700615{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700616 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700617 unsigned hole_low, hole_high;
618 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
619 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700620
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700621 /* The limit is the last byte to be touched */
622 limit--;
623 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700624
625 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700626 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700627
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700628 /*
629 * 64-bit has a great big hole in the middle of the address
630 * space, which contains the Xen mappings. On 32-bit these
631 * will end up making a zero-sized hole and so is a no-op.
632 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700633 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700634 hole_high = pgd_index(PAGE_OFFSET);
635
636 pgdidx_limit = pgd_index(limit);
637#if PTRS_PER_PUD > 1
638 pudidx_limit = pud_index(limit);
639#else
640 pudidx_limit = 0;
641#endif
642#if PTRS_PER_PMD > 1
643 pmdidx_limit = pmd_index(limit);
644#else
645 pmdidx_limit = 0;
646#endif
647
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700648 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700649 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700650
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700651 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700652 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700653
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700654 if (!pgd_val(pgd[pgdidx]))
655 continue;
656
657 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700658
659 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700660 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700661
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700662 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700663 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700664
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700665 if (pgdidx == pgdidx_limit &&
666 pudidx > pudidx_limit)
667 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700668
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700669 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700670 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700671
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700672 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700673
674 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700675 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700676
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700677 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
678 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700679
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700680 if (pgdidx == pgdidx_limit &&
681 pudidx == pudidx_limit &&
682 pmdidx > pmdidx_limit)
683 goto out;
684
685 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700686 continue;
687
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700688 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700689 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700690 }
691 }
692 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700693
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700694out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700695 /* Do the top level last, so that the callbacks can use it as
696 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700697 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700698
699 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700700}
701
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000702static int xen_pgd_walk(struct mm_struct *mm,
703 int (*func)(struct mm_struct *mm, struct page *,
704 enum pt_level),
705 unsigned long limit)
706{
707 return __xen_pgd_walk(mm, mm->pgd, func, limit);
708}
709
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700710/* If we're using split pte locks, then take the page's lock and
711 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700712static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700713{
714 spinlock_t *ptl = NULL;
715
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -0800716#if USE_SPLIT_PTE_PTLOCKS
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -0800717 ptl = ptlock_ptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700718 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700719#endif
720
721 return ptl;
722}
723
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700724static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700725{
726 spinlock_t *ptl = v;
727 spin_unlock(ptl);
728}
729
730static void xen_do_pin(unsigned level, unsigned long pfn)
731{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800732 struct mmuext_op op;
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700733
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800734 op.cmd = level;
735 op.arg1.mfn = pfn_to_mfn(pfn);
736
737 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700738}
739
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700740static int xen_pin_page(struct mm_struct *mm, struct page *page,
741 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700742{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700743 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700744 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700745
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700746 if (pgfl)
747 flush = 0; /* already pinned */
748 else if (PageHighMem(page))
749 /* kmaps need flushing if we found an unpinned
750 highpage */
751 flush = 1;
752 else {
753 void *pt = lowmem_page_address(page);
754 unsigned long pfn = page_to_pfn(page);
755 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700756 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700757
758 flush = 0;
759
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700760 /*
761 * We need to hold the pagetable lock between the time
762 * we make the pagetable RO and when we actually pin
763 * it. If we don't, then other users may come in and
764 * attempt to update the pagetable by writing it,
765 * which will fail because the memory is RO but not
766 * pinned, so Xen won't do the trap'n'emulate.
767 *
768 * If we're using split pte locks, we can't hold the
769 * entire pagetable's worth of locks during the
770 * traverse, because we may wrap the preempt count (8
771 * bits). The solution is to mark RO and pin each PTE
772 * page while holding the lock. This means the number
773 * of locks we end up holding is never more than a
774 * batch size (~32 entries, at present).
775 *
776 * If we're not using split pte locks, we needn't pin
777 * the PTE pages independently, because we're
778 * protected by the overall pagetable lock.
779 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700780 ptl = NULL;
781 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700782 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700783
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700784 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
785 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700786 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
787
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700788 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700789 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
790
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700791 /* Queue a deferred unlock for when this batch
792 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700793 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700794 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700795 }
796
797 return flush;
798}
799
800/* This is called just after a mm has been created, but it has not
801 been used yet. We need to make sure that its pagetable is all
802 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700803static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700804{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800805 trace_xen_mmu_pgd_pin(mm, pgd);
806
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700807 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700808
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000809 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100810 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700811 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100812
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700813 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100814
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700815 xen_mc_batch();
816 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700817
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700818#ifdef CONFIG_X86_64
819 {
820 pgd_t *user_pgd = xen_get_user_pgd(pgd);
821
822 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
823
824 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700825 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800826 xen_do_pin(MMUEXT_PIN_L4_TABLE,
827 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700828 }
829 }
830#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700831#ifdef CONFIG_X86_PAE
832 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800833 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700834 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700835#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100836 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700837#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700838 xen_mc_issue(0);
839}
840
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700841static void xen_pgd_pin(struct mm_struct *mm)
842{
843 __xen_pgd_pin(mm, mm->pgd);
844}
845
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100846/*
847 * On save, we need to pin all pagetables to make sure they get their
848 * mfns turned into pfns. Search the list for any unpinned pgds and pin
849 * them (unpinned pgds are not currently in use, probably because the
850 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700851 *
852 * Expected to be called in stop_machine() ("equivalent to taking
853 * every spinlock in the system"), so the locking doesn't really
854 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100855 */
856void xen_mm_pin_all(void)
857{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100858 struct page *page;
859
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800860 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100861
862 list_for_each_entry(page, &pgd_list, lru) {
863 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700864 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100865 SetPageSavePinned(page);
866 }
867 }
868
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800869 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100870}
871
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700872/*
873 * The init_mm pagetable is really pinned as soon as its created, but
874 * that's before we have page structures to store the bits. So do all
875 * the book-keeping now.
876 */
Daniel Kiper3f5089532011-05-12 17:19:53 -0400877static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700878 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700879{
880 SetPagePinned(page);
881 return 0;
882}
883
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -0700884static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700885{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700886 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700887}
888
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700889static int xen_unpin_page(struct mm_struct *mm, struct page *page,
890 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700891{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700892 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700893
894 if (pgfl && !PageHighMem(page)) {
895 void *pt = lowmem_page_address(page);
896 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700897 spinlock_t *ptl = NULL;
898 struct multicall_space mcs;
899
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700900 /*
901 * Do the converse to pin_page. If we're using split
902 * pte locks, we must be holding the lock for while
903 * the pte page is unpinned but still RO to prevent
904 * concurrent updates from seeing it in this
905 * partially-pinned state.
906 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700907 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700908 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700909
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700910 if (ptl)
911 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700912 }
913
914 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700915
916 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
917 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700918 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
919
920 if (ptl) {
921 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700922 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700923 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700924 }
925
926 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700927}
928
929/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700930static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700931{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800932 trace_xen_mmu_pgd_unpin(mm, pgd);
933
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700934 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700935
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700936 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700937
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700938#ifdef CONFIG_X86_64
939 {
940 pgd_t *user_pgd = xen_get_user_pgd(pgd);
941
942 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -0800943 xen_do_pin(MMUEXT_UNPIN_TABLE,
944 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700945 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700946 }
947 }
948#endif
949
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700950#ifdef CONFIG_X86_PAE
951 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800952 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700953 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700954#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700955
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000956 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700957
958 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700959}
960
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700961static void xen_pgd_unpin(struct mm_struct *mm)
962{
963 __xen_pgd_unpin(mm, mm->pgd);
964}
965
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100966/*
967 * On resume, undo any pinning done at save, so that the rest of the
968 * kernel doesn't see any unexpected pinned pagetables.
969 */
970void xen_mm_unpin_all(void)
971{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100972 struct page *page;
973
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800974 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100975
976 list_for_each_entry(page, &pgd_list, lru) {
977 if (PageSavePinned(page)) {
978 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700979 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100980 ClearPageSavePinned(page);
981 }
982 }
983
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800984 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100985}
986
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800987static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700988{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700989 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700990 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700991 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700992}
993
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800994static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700995{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700996 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700997 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700998 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700999}
1000
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001001
1002#ifdef CONFIG_SMP
1003/* Another cpu may still have their %cr3 pointing at the pagetable, so
1004 we need to repoint it somewhere else before we can unpin it. */
1005static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001006{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001007 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001008 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001009
Alex Shi2113f462012-01-13 23:53:35 +08001010 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001011
Alex Shi2113f462012-01-13 23:53:35 +08001012 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001013 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001014
1015 /* If this cpu still has a stale cr3 reference, then make sure
1016 it has been flushed. */
Alex Shi2113f462012-01-13 23:53:35 +08001017 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001018 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001019}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001020
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001021static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001022{
Mike Travise4d98202008-12-16 17:34:05 -08001023 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001024 unsigned cpu;
1025
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001026 if (current->active_mm == mm) {
1027 if (current->mm == mm)
1028 load_cr3(swapper_pg_dir);
1029 else
1030 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001031 }
1032
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001033 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001034 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1035 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001036 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001037 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1038 continue;
1039 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1040 }
1041 return;
1042 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001043 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001044
1045 /* It's possible that a vcpu may have a stale reference to our
1046 cr3, because its in lazy mode, and it hasn't yet flushed
1047 its set of pending hypercalls yet. In this case, we can
1048 look at its actual current cr3 value, and force it to flush
1049 if needed. */
1050 for_each_online_cpu(cpu) {
1051 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001052 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001053 }
1054
Mike Travise4d98202008-12-16 17:34:05 -08001055 if (!cpumask_empty(mask))
1056 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1057 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001058}
1059#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001060static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001061{
1062 if (current->active_mm == mm)
1063 load_cr3(swapper_pg_dir);
1064}
1065#endif
1066
1067/*
1068 * While a process runs, Xen pins its pagetables, which means that the
1069 * hypervisor forces it to be read-only, and it controls all updates
1070 * to it. This means that all pagetable updates have to go via the
1071 * hypervisor, which is moderately expensive.
1072 *
1073 * Since we're pulling the pagetable down, we switch to use init_mm,
1074 * unpin old process pagetable and mark it all read-write, which
1075 * allows further operations on it to be simple memory accesses.
1076 *
1077 * The only subtle point is that another CPU may be still using the
1078 * pagetable because of lazy tlb flushing. This means we need need to
1079 * switch all CPUs off this pagetable before we can unpin it.
1080 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001081static void xen_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001082{
1083 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001084 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001085 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001086
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001087 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001088
1089 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001090 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001091 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001092
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001093 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001094}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001095
Attilio Raoc7112882012-08-21 21:22:40 +01001096static void xen_post_allocator_init(void);
1097
Juergen Gross70e61192015-07-17 06:51:35 +02001098static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1099{
1100 struct mmuext_op op;
1101
1102 op.cmd = cmd;
1103 op.arg1.mfn = pfn_to_mfn(pfn);
1104 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1105 BUG();
1106}
1107
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001108#ifdef CONFIG_X86_64
1109static void __init xen_cleanhighmap(unsigned long vaddr,
1110 unsigned long vaddr_end)
1111{
1112 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1113 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1114
1115 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1116 * We include the PMD passed in on _both_ boundaries. */
Juergen Gross1cf38742016-06-23 07:12:27 +02001117 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001118 pmd++, vaddr += PMD_SIZE) {
1119 if (pmd_none(*pmd))
1120 continue;
1121 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1122 set_pmd(pmd, __pmd(0));
1123 }
1124 /* In case we did something silly, we should crash in this function
1125 * instead of somewhere later and be confusing. */
1126 xen_mc_flush();
1127}
Juergen Gross054954e2014-11-28 11:53:58 +01001128
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001129/*
1130 * Make a page range writeable and free it.
1131 */
1132static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
1133{
1134 void *vaddr = __va(paddr);
1135 void *vaddr_end = vaddr + size;
1136
1137 for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
1138 make_lowmem_page_readwrite(vaddr);
1139
1140 memblock_free(paddr, size);
1141}
1142
Juergen Gross70e61192015-07-17 06:51:35 +02001143static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001144{
1145 unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
1146
Juergen Gross70e61192015-07-17 06:51:35 +02001147 if (unpin)
1148 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001149 ClearPagePinned(virt_to_page(__va(pa)));
1150 xen_free_ro_pages(pa, PAGE_SIZE);
1151}
1152
1153/*
1154 * Since it is well isolated we can (and since it is perhaps large we should)
1155 * also free the page tables mapping the initial P->M table.
1156 */
1157static void __init xen_cleanmfnmap(unsigned long vaddr)
1158{
1159 unsigned long va = vaddr & PMD_MASK;
1160 unsigned long pa;
1161 pgd_t *pgd = pgd_offset_k(va);
1162 pud_t *pud_page = pud_offset(pgd, 0);
1163 pud_t *pud;
1164 pmd_t *pmd;
1165 pte_t *pte;
1166 unsigned int i;
Juergen Gross70e61192015-07-17 06:51:35 +02001167 bool unpin;
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001168
Juergen Gross70e61192015-07-17 06:51:35 +02001169 unpin = (vaddr == 2 * PGDIR_SIZE);
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001170 set_pgd(pgd, __pgd(0));
1171 do {
1172 pud = pud_page + pud_index(va);
1173 if (pud_none(*pud)) {
1174 va += PUD_SIZE;
1175 } else if (pud_large(*pud)) {
1176 pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1177 xen_free_ro_pages(pa, PUD_SIZE);
1178 va += PUD_SIZE;
1179 } else {
1180 pmd = pmd_offset(pud, va);
1181 if (pmd_large(*pmd)) {
1182 pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1183 xen_free_ro_pages(pa, PMD_SIZE);
1184 } else if (!pmd_none(*pmd)) {
1185 pte = pte_offset_kernel(pmd, va);
Juergen Gross70e61192015-07-17 06:51:35 +02001186 set_pmd(pmd, __pmd(0));
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001187 for (i = 0; i < PTRS_PER_PTE; ++i) {
1188 if (pte_none(pte[i]))
1189 break;
1190 pa = pte_pfn(pte[i]) << PAGE_SHIFT;
1191 xen_free_ro_pages(pa, PAGE_SIZE);
1192 }
Juergen Gross70e61192015-07-17 06:51:35 +02001193 xen_cleanmfnmap_free_pgtbl(pte, unpin);
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001194 }
1195 va += PMD_SIZE;
1196 if (pmd_index(va))
1197 continue;
Juergen Gross70e61192015-07-17 06:51:35 +02001198 set_pud(pud, __pud(0));
1199 xen_cleanmfnmap_free_pgtbl(pmd, unpin);
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001200 }
1201
1202 } while (pud_index(va) || pmd_index(va));
Juergen Gross70e61192015-07-17 06:51:35 +02001203 xen_cleanmfnmap_free_pgtbl(pud_page, unpin);
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001204}
1205
Juergen Gross054954e2014-11-28 11:53:58 +01001206static void __init xen_pagetable_p2m_free(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001207{
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001208 unsigned long size;
1209 unsigned long addr;
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001210
1211 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1212
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001213 /* No memory or already called. */
Juergen Gross054954e2014-11-28 11:53:58 +01001214 if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001215 return;
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001216
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001217 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1218 memset((void *)xen_start_info->mfn_list, 0xff, size);
1219
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001220 addr = xen_start_info->mfn_list;
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001221 /*
1222 * We could be in __ka space.
1223 * We roundup to the PMD, which means that if anybody at this stage is
1224 * using the __ka address of xen_start_info or
1225 * xen_start_info->shared_info they are in going to crash. Fortunatly
1226 * we have already revectored in xen_setup_kernel_pagetable and in
1227 * xen_setup_shared_info.
1228 */
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001229 size = roundup(size, PMD_SIZE);
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001230
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001231 if (addr >= __START_KERNEL_map) {
1232 xen_cleanhighmap(addr, addr + size);
1233 size = PAGE_ALIGN(xen_start_info->nr_pages *
1234 sizeof(unsigned long));
1235 memblock_free(__pa(addr), size);
1236 } else {
1237 xen_cleanmfnmap(addr);
1238 }
Juergen Gross70e61192015-07-17 06:51:35 +02001239}
1240
1241static void __init xen_pagetable_cleanhighmap(void)
1242{
1243 unsigned long size;
1244 unsigned long addr;
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001245
Konrad Rzeszutek Wilk3aca7fb2012-08-14 14:34:00 -04001246 /* At this stage, cleanup_highmap has already cleaned __ka space
1247 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1248 * the ramdisk). We continue on, erasing PMD entries that point to page
1249 * tables - do note that they are accessible at this stage via __va.
1250 * For good measure we also round up to the PMD - which means that if
1251 * anybody is using __ka address to the initial boot-stack - and try
1252 * to use it - they are going to crash. The xen_start_info has been
1253 * taken care of already in xen_setup_kernel_pagetable. */
1254 addr = xen_start_info->pt_base;
1255 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1256
1257 xen_cleanhighmap(addr, addr + size);
1258 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1259#ifdef DEBUG
Adam Buchbinder6a6256f2016-02-23 15:34:30 -08001260 /* This is superfluous and is not necessary, but you know what
Konrad Rzeszutek Wilk3aca7fb2012-08-14 14:34:00 -04001261 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1262 * anything at this stage. */
1263 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1264#endif
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001265}
1266#endif
1267
Juergen Gross054954e2014-11-28 11:53:58 +01001268static void __init xen_pagetable_p2m_setup(void)
1269{
1270 if (xen_feature(XENFEAT_auto_translated_physmap))
1271 return;
1272
1273 xen_vmalloc_p2m_tree();
1274
1275#ifdef CONFIG_X86_64
1276 xen_pagetable_p2m_free();
Juergen Gross70e61192015-07-17 06:51:35 +02001277
1278 xen_pagetable_cleanhighmap();
Juergen Gross054954e2014-11-28 11:53:58 +01001279#endif
1280 /* And revector! Bye bye old array */
1281 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1282}
1283
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001284static void __init xen_pagetable_init(void)
1285{
1286 paging_init();
Juergen Grosscdfa0ba2014-12-10 16:56:03 +01001287 xen_post_allocator_init();
Juergen Gross054954e2014-11-28 11:53:58 +01001288
1289 xen_pagetable_p2m_setup();
1290
Juergen Gross2c185682014-10-14 13:33:46 +02001291 /* Allocate and initialize top and mid mfn levels for p2m structure */
1292 xen_build_mfn_list_list();
1293
Juergen Gross1f3ac862014-11-28 11:53:53 +01001294 /* Remap memory freed due to conflicts with E820 map */
1295 if (!xen_feature(XENFEAT_auto_translated_physmap))
1296 xen_remap_memory();
1297
Juergen Gross2c185682014-10-14 13:33:46 +02001298 xen_setup_shared_info();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001299}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001300static void xen_write_cr2(unsigned long cr2)
1301{
Alex Shi2113f462012-01-13 23:53:35 +08001302 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001303}
1304
1305static unsigned long xen_read_cr2(void)
1306{
Alex Shi2113f462012-01-13 23:53:35 +08001307 return this_cpu_read(xen_vcpu)->arch.cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001308}
1309
1310unsigned long xen_read_cr2_direct(void)
1311{
Alex Shi2113f462012-01-13 23:53:35 +08001312 return this_cpu_read(xen_vcpu_info.arch.cr2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001313}
1314
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04001315void xen_flush_tlb_all(void)
1316{
1317 struct mmuext_op *op;
1318 struct multicall_space mcs;
1319
1320 trace_xen_mmu_flush_tlb_all(0);
1321
1322 preempt_disable();
1323
1324 mcs = xen_mc_entry(sizeof(*op));
1325
1326 op = mcs.args;
1327 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1328 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1329
1330 xen_mc_issue(PARAVIRT_LAZY_MMU);
1331
1332 preempt_enable();
1333}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001334static void xen_flush_tlb(void)
1335{
1336 struct mmuext_op *op;
1337 struct multicall_space mcs;
1338
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001339 trace_xen_mmu_flush_tlb(0);
1340
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001341 preempt_disable();
1342
1343 mcs = xen_mc_entry(sizeof(*op));
1344
1345 op = mcs.args;
1346 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1347 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1348
1349 xen_mc_issue(PARAVIRT_LAZY_MMU);
1350
1351 preempt_enable();
1352}
1353
1354static void xen_flush_tlb_single(unsigned long addr)
1355{
1356 struct mmuext_op *op;
1357 struct multicall_space mcs;
1358
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001359 trace_xen_mmu_flush_tlb_single(addr);
1360
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001361 preempt_disable();
1362
1363 mcs = xen_mc_entry(sizeof(*op));
1364 op = mcs.args;
1365 op->cmd = MMUEXT_INVLPG_LOCAL;
1366 op->arg1.linear_addr = addr & PAGE_MASK;
1367 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1368
1369 xen_mc_issue(PARAVIRT_LAZY_MMU);
1370
1371 preempt_enable();
1372}
1373
1374static void xen_flush_tlb_others(const struct cpumask *cpus,
Alex Shie7b52ff2012-06-28 09:02:17 +08001375 struct mm_struct *mm, unsigned long start,
1376 unsigned long end)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001377{
1378 struct {
1379 struct mmuext_op op;
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001380#ifdef CONFIG_SMP
Andrew Jones900cba82009-12-18 10:31:31 +01001381 DECLARE_BITMAP(mask, num_processors);
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001382#else
1383 DECLARE_BITMAP(mask, NR_CPUS);
1384#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001385 } *args;
1386 struct multicall_space mcs;
1387
Alex Shie7b52ff2012-06-28 09:02:17 +08001388 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001389
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001390 if (cpumask_empty(cpus))
1391 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001392
1393 mcs = xen_mc_entry(sizeof(*args));
1394 args = mcs.args;
1395 args->op.arg2.vcpumask = to_cpumask(args->mask);
1396
1397 /* Remove us, and any offline CPUS. */
1398 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1399 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001400
Alex Shie7b52ff2012-06-28 09:02:17 +08001401 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
Alex Shice7184b2012-08-24 08:55:13 +00001402 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001403 args->op.cmd = MMUEXT_INVLPG_MULTI;
Alex Shie7b52ff2012-06-28 09:02:17 +08001404 args->op.arg1.linear_addr = start;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001405 }
1406
1407 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1408
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001409 xen_mc_issue(PARAVIRT_LAZY_MMU);
1410}
1411
1412static unsigned long xen_read_cr3(void)
1413{
Alex Shi2113f462012-01-13 23:53:35 +08001414 return this_cpu_read(xen_cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001415}
1416
1417static void set_current_cr3(void *v)
1418{
Alex Shi2113f462012-01-13 23:53:35 +08001419 this_cpu_write(xen_current_cr3, (unsigned long)v);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001420}
1421
1422static void __xen_write_cr3(bool kernel, unsigned long cr3)
1423{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001424 struct mmuext_op op;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001425 unsigned long mfn;
1426
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001427 trace_xen_mmu_write_cr3(kernel, cr3);
1428
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001429 if (cr3)
1430 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1431 else
1432 mfn = 0;
1433
1434 WARN_ON(mfn == 0 && kernel);
1435
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001436 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1437 op.arg1.mfn = mfn;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001438
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001439 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001440
1441 if (kernel) {
Alex Shi2113f462012-01-13 23:53:35 +08001442 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001443
1444 /* Update xen_current_cr3 once the batch has actually
1445 been submitted. */
1446 xen_mc_callback(set_current_cr3, (void *)cr3);
1447 }
1448}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001449static void xen_write_cr3(unsigned long cr3)
1450{
1451 BUG_ON(preemptible());
1452
1453 xen_mc_batch(); /* disables interrupts */
1454
1455 /* Update while interrupts are disabled, so its atomic with
1456 respect to ipis */
Alex Shi2113f462012-01-13 23:53:35 +08001457 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001458
1459 __xen_write_cr3(true, cr3);
1460
1461#ifdef CONFIG_X86_64
1462 {
1463 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1464 if (user_pgd)
1465 __xen_write_cr3(false, __pa(user_pgd));
1466 else
1467 __xen_write_cr3(false, 0);
1468 }
1469#endif
1470
1471 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1472}
1473
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001474#ifdef CONFIG_X86_64
1475/*
1476 * At the start of the day - when Xen launches a guest, it has already
1477 * built pagetables for the guest. We diligently look over them
Adam Buchbinder6a6256f2016-02-23 15:34:30 -08001478 * in xen_setup_kernel_pagetable and graft as appropriate them in the
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001479 * init_level4_pgt and its friends. Then when we are happy we load
1480 * the new init_level4_pgt - and continue on.
1481 *
1482 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1483 * up the rest of the pagetables. When it has completed it loads the cr3.
1484 * N.B. that baremetal would start at 'start_kernel' (and the early
1485 * #PF handler would create bootstrap pagetables) - so we are running
1486 * with the same assumptions as what to do when write_cr3 is executed
1487 * at this point.
1488 *
1489 * Since there are no user-page tables at all, we have two variants
1490 * of xen_write_cr3 - the early bootup (this one), and the late one
1491 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1492 * the Linux kernel and user-space are both in ring 3 while the
1493 * hypervisor is in ring 0.
1494 */
1495static void __init xen_write_cr3_init(unsigned long cr3)
1496{
1497 BUG_ON(preemptible());
1498
1499 xen_mc_batch(); /* disables interrupts */
1500
1501 /* Update while interrupts are disabled, so its atomic with
1502 respect to ipis */
1503 this_cpu_write(xen_cr3, cr3);
1504
1505 __xen_write_cr3(true, cr3);
1506
1507 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001508}
1509#endif
1510
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001511static int xen_pgd_alloc(struct mm_struct *mm)
1512{
1513 pgd_t *pgd = mm->pgd;
1514 int ret = 0;
1515
1516 BUG_ON(PagePinned(virt_to_page(pgd)));
1517
1518#ifdef CONFIG_X86_64
1519 {
1520 struct page *page = virt_to_page(pgd);
1521 pgd_t *user_pgd;
1522
1523 BUG_ON(page->private != 0);
1524
1525 ret = -ENOMEM;
1526
1527 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1528 page->private = (unsigned long)user_pgd;
1529
1530 if (user_pgd != NULL) {
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07001531#ifdef CONFIG_X86_VSYSCALL_EMULATION
Andy Lutomirskif40c3302014-05-05 12:19:36 -07001532 user_pgd[pgd_index(VSYSCALL_ADDR)] =
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001533 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07001534#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001535 ret = 0;
1536 }
1537
1538 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1539 }
1540#endif
1541
1542 return ret;
1543}
1544
1545static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1546{
1547#ifdef CONFIG_X86_64
1548 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1549
1550 if (user_pgd)
1551 free_page((unsigned long)user_pgd);
1552#endif
1553}
1554
David Vrabeld095d432012-07-09 11:39:05 +01001555/*
1556 * Init-time set_pte while constructing initial pagetables, which
1557 * doesn't allow RO page table pages to be remapped RW.
1558 *
David Vrabel66a27dd2012-07-09 11:39:06 +01001559 * If there is no MFN for this PFN then this page is initially
1560 * ballooned out so clear the PTE (as in decrease_reservation() in
1561 * drivers/xen/balloon.c).
1562 *
David Vrabeld095d432012-07-09 11:39:05 +01001563 * Many of these PTE updates are done on unpinned and writable pages
1564 * and doing a hypercall for these is unnecessary and expensive. At
1565 * this point it is not possible to tell if a page is pinned or not,
1566 * so always write the PTE directly and rely on Xen trapping and
1567 * emulating any updates as necessary.
1568 */
David Vrabeld6b186c2016-05-17 15:54:50 +01001569__visible pte_t xen_make_pte_init(pteval_t pte)
1570{
1571#ifdef CONFIG_X86_64
1572 unsigned long pfn;
1573
1574 /*
1575 * Pages belonging to the initial p2m list mapped outside the default
1576 * address range must be mapped read-only. This region contains the
1577 * page tables for mapping the p2m list, too, and page tables MUST be
1578 * mapped read-only.
1579 */
1580 pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1581 if (xen_start_info->mfn_list < __START_KERNEL_map &&
1582 pfn >= xen_start_info->first_p2m_pfn &&
1583 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1584 pte &= ~_PAGE_RW;
1585#endif
1586 pte = pte_pfn_to_mfn(pte);
1587 return native_make_pte(pte);
1588}
1589PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1590
Daniel Kiper3f5089532011-05-12 17:19:53 -04001591static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001592{
David Vrabeld6b186c2016-05-17 15:54:50 +01001593#ifdef CONFIG_X86_32
1594 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1595 if (pte_mfn(pte) != INVALID_P2M_ENTRY
1596 && pte_val_ma(*ptep) & _PAGE_PRESENT)
1597 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1598 pte_val_ma(pte));
1599#endif
David Vrabeld095d432012-07-09 11:39:05 +01001600 native_set_pte(ptep, pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001601}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001602
1603/* Early in boot, while setting up the initial pagetable, assume
1604 everything is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001605static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001606{
1607#ifdef CONFIG_FLATMEM
1608 BUG_ON(mem_map); /* should only be used early */
1609#endif
1610 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001611 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1612}
1613
1614/* Used for pmd and pud */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001615static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001616{
1617#ifdef CONFIG_FLATMEM
1618 BUG_ON(mem_map); /* should only be used early */
1619#endif
1620 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001621}
1622
1623/* Early release_pte assumes that all pts are pinned, since there's
1624 only init_mm and anything attached to that is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001625static void __init xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001626{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001627 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001628 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1629}
1630
Daniel Kiper3f5089532011-05-12 17:19:53 -04001631static void __init xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001632{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001633 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001634}
1635
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001636static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1637{
1638 struct multicall_space mcs;
1639 struct mmuext_op *op;
1640
1641 mcs = __xen_mc_entry(sizeof(*op));
1642 op = mcs.args;
1643 op->cmd = cmd;
1644 op->arg1.mfn = pfn_to_mfn(pfn);
1645
1646 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1647}
1648
1649static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1650{
1651 struct multicall_space mcs;
1652 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1653
1654 mcs = __xen_mc_entry(0);
1655 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1656 pfn_pte(pfn, prot), 0);
1657}
1658
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001659/* This needs to make sure the new pte page is pinned iff its being
1660 attached to a pinned pagetable. */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001661static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1662 unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001663{
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001664 bool pinned = PagePinned(virt_to_page(mm->pgd));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001665
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001666 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001667
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001668 if (pinned) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001669 struct page *page = pfn_to_page(pfn);
1670
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001671 SetPagePinned(page);
1672
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001673 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001674 xen_mc_batch();
1675
1676 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1677
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001678 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001679 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1680
1681 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001682 } else {
1683 /* make sure there are no stray mappings of
1684 this page */
1685 kmap_flush_unused();
1686 }
1687 }
1688}
1689
1690static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1691{
1692 xen_alloc_ptpage(mm, pfn, PT_PTE);
1693}
1694
1695static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1696{
1697 xen_alloc_ptpage(mm, pfn, PT_PMD);
1698}
1699
1700/* This should never happen until we're OK to use struct page */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001701static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001702{
1703 struct page *page = pfn_to_page(pfn);
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001704 bool pinned = PagePinned(page);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001705
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001706 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1707
1708 if (pinned) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001709 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001710 xen_mc_batch();
1711
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001712 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001713 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1714
1715 __set_pfn_prot(pfn, PAGE_KERNEL);
1716
1717 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001718 }
1719 ClearPagePinned(page);
1720 }
1721}
1722
1723static void xen_release_pte(unsigned long pfn)
1724{
1725 xen_release_ptpage(pfn, PT_PTE);
1726}
1727
1728static void xen_release_pmd(unsigned long pfn)
1729{
1730 xen_release_ptpage(pfn, PT_PMD);
1731}
1732
Kirill A. Shutemov98233362015-04-14 15:46:14 -07001733#if CONFIG_PGTABLE_LEVELS == 4
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001734static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1735{
1736 xen_alloc_ptpage(mm, pfn, PT_PUD);
1737}
1738
1739static void xen_release_pud(unsigned long pfn)
1740{
1741 xen_release_ptpage(pfn, PT_PUD);
1742}
1743#endif
1744
1745void __init xen_reserve_top(void)
1746{
1747#ifdef CONFIG_X86_32
1748 unsigned long top = HYPERVISOR_VIRT_START;
1749 struct xen_platform_parameters pp;
1750
1751 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1752 top = pp.virt_start;
1753
1754 reserve_top_address(-top);
1755#endif /* CONFIG_X86_32 */
1756}
1757
1758/*
1759 * Like __va(), but returns address in the kernel mapping (which is
1760 * all we have until the physical memory mapping has been set up.
1761 */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001762static void * __init __ka(phys_addr_t paddr)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001763{
1764#ifdef CONFIG_X86_64
1765 return (void *)(paddr + __START_KERNEL_map);
1766#else
1767 return __va(paddr);
1768#endif
1769}
1770
1771/* Convert a machine address to physical address */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001772static unsigned long __init m2p(phys_addr_t maddr)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001773{
1774 phys_addr_t paddr;
1775
1776 maddr &= PTE_PFN_MASK;
1777 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1778
1779 return paddr;
1780}
1781
1782/* Convert a machine address to kernel virtual */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001783static void * __init m2v(phys_addr_t maddr)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001784{
1785 return __ka(m2p(maddr));
1786}
1787
Juan Quintela4ec53872010-09-02 15:45:43 +01001788/* Set the page permissions on an identity-mapped pages */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001789static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1790 unsigned long flags)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001791{
1792 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1793 pte_t pte = pfn_pte(pfn, prot);
1794
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001795 /* For PVH no need to set R/O or R/W to pin them or unpin them. */
1796 if (xen_feature(XENFEAT_auto_translated_physmap))
1797 return;
1798
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001799 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001800 BUG();
1801}
Juergen Grossbf9d8342015-01-28 07:44:24 +01001802static void __init set_page_prot(void *addr, pgprot_t prot)
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001803{
1804 return set_page_prot_flags(addr, prot, UVMF_NONE);
1805}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001806#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001807static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001808{
1809 unsigned pmdidx, pteidx;
1810 unsigned ident_pte;
1811 unsigned long pfn;
1812
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001813 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1814 PAGE_SIZE);
1815
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001816 ident_pte = 0;
1817 pfn = 0;
1818 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1819 pte_t *pte_page;
1820
1821 /* Reuse or allocate a page of ptes */
1822 if (pmd_present(pmd[pmdidx]))
1823 pte_page = m2v(pmd[pmdidx].pmd);
1824 else {
1825 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001826 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001827 break;
1828
1829 pte_page = &level1_ident_pgt[ident_pte];
1830 ident_pte += PTRS_PER_PTE;
1831
1832 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1833 }
1834
1835 /* Install mappings */
1836 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1837 pte_t pte;
1838
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001839 if (pfn > max_pfn_mapped)
1840 max_pfn_mapped = pfn;
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001841
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001842 if (!pte_none(pte_page[pteidx]))
1843 continue;
1844
1845 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1846 pte_page[pteidx] = pte;
1847 }
1848 }
1849
1850 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1851 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1852
1853 set_page_prot(pmd, PAGE_KERNEL_RO);
1854}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001855#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001856void __init xen_setup_machphys_mapping(void)
1857{
1858 struct xen_machphys_mapping mapping;
Ian Campbell7e775062010-09-30 12:37:26 +01001859
1860 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1861 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
Jan Beulichccbcdf72011-08-16 15:07:41 +01001862 machine_to_phys_nr = mapping.max_mfn + 1;
Ian Campbell7e775062010-09-30 12:37:26 +01001863 } else {
Jan Beulichccbcdf72011-08-16 15:07:41 +01001864 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
Ian Campbell7e775062010-09-30 12:37:26 +01001865 }
Jan Beulichccbcdf72011-08-16 15:07:41 +01001866#ifdef CONFIG_X86_32
Jan Beulich61cca2f2011-09-15 08:52:40 +01001867 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1868 < machine_to_phys_mapping);
Jan Beulichccbcdf72011-08-16 15:07:41 +01001869#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001870}
1871
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001872#ifdef CONFIG_X86_64
Juergen Grossbf9d8342015-01-28 07:44:24 +01001873static void __init convert_pfn_mfn(void *v)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001874{
1875 pte_t *pte = v;
1876 int i;
1877
1878 /* All levels are converted the same way, so just treat them
1879 as ptes. */
1880 for (i = 0; i < PTRS_PER_PTE; i++)
1881 pte[i] = xen_make_pte(pte[i].pte);
1882}
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001883static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1884 unsigned long addr)
1885{
1886 if (*pt_base == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001887 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001888 clear_page((void *)addr);
1889 (*pt_base)++;
1890 }
1891 if (*pt_end == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001892 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001893 clear_page((void *)addr);
1894 (*pt_end)--;
1895 }
1896}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001897/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001898 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001899 *
1900 * We can construct this by grafting the Xen provided pagetable into
1901 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
Stefan Bader0b5a5062014-09-02 11:16:01 +01001902 * level2_ident_pgt, and level2_kernel_pgt. This means that only the
1903 * kernel has a physical mapping to start with - but that's enough to
1904 * get __va working. We need to fill in the rest of the physical
1905 * mapping once some sort of allocator has been set up. NOTE: for
1906 * PVH, the page tables are native.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001907 */
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04001908void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001909{
1910 pud_t *l3;
1911 pmd_t *l2;
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001912 unsigned long addr[3];
1913 unsigned long pt_base, pt_end;
1914 unsigned i;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001915
Stefano Stabellini14988a42011-02-18 11:32:40 +00001916 /* max_pfn_mapped is the last pfn mapped in the initial memory
1917 * mappings. Considering that on Xen after the kernel mappings we
1918 * have the mappings of some pages that don't exist in pfn space, we
1919 * set max_pfn_mapped to the last real pfn mapped. */
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001920 if (xen_start_info->mfn_list < __START_KERNEL_map)
1921 max_pfn_mapped = xen_start_info->first_p2m_pfn;
1922 else
1923 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
Stefano Stabellini14988a42011-02-18 11:32:40 +00001924
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001925 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1926 pt_end = pt_base + xen_start_info->nr_pt_frames;
1927
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001928 /* Zap identity mapping */
1929 init_level4_pgt[0] = __pgd(0);
1930
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001931 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1932 /* Pre-constructed entries are in pfn, so convert to mfn */
1933 /* L4[272] -> level3_ident_pgt
1934 * L4[511] -> level3_kernel_pgt */
1935 convert_pfn_mfn(init_level4_pgt);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001936
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001937 /* L3_i[0] -> level2_ident_pgt */
1938 convert_pfn_mfn(level3_ident_pgt);
1939 /* L3_k[510] -> level2_kernel_pgt
Stefan Bader0b5a5062014-09-02 11:16:01 +01001940 * L3_k[511] -> level2_fixmap_pgt */
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001941 convert_pfn_mfn(level3_kernel_pgt);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001942
1943 /* L3_k[511][506] -> level1_fixmap_pgt */
1944 convert_pfn_mfn(level2_fixmap_pgt);
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001945 }
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001946 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001947 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1948 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1949
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001950 addr[0] = (unsigned long)pgd;
1951 addr[1] = (unsigned long)l3;
1952 addr[2] = (unsigned long)l2;
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001953 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
Stefan Bader0b5a5062014-09-02 11:16:01 +01001954 * Both L4[272][0] and L4[511][510] have entries that point to the same
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001955 * L2 (PMD) tables. Meaning that if you modify it in __va space
1956 * it will be also modified in the __ka space! (But if you just
1957 * modify the PMD table to point to other PTE's or none, then you
1958 * are OK - which is what cleanup_highmap does) */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001959 copy_page(level2_ident_pgt, l2);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001960 /* Graft it onto L4[511][510] */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001961 copy_page(level2_kernel_pgt, l2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001962
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001963 /* Copy the initial P->M table mappings if necessary. */
1964 i = pgd_index(xen_start_info->mfn_list);
1965 if (i && i < pgd_index(__START_KERNEL_map))
1966 init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
1967
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001968 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1969 /* Make pagetable pieces RO */
1970 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1971 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1972 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1973 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1974 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1975 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1976 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001977 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001978
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001979 /* Pin down new L4 */
1980 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1981 PFN_DOWN(__pa_symbol(init_level4_pgt)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001982
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001983 /* Unpin Xen-provided one */
1984 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001985
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001986 /*
1987 * At this stage there can be no user pgd, and no page
1988 * structure to attach it to, so make sure we just set kernel
1989 * pgd.
1990 */
1991 xen_mc_batch();
1992 __xen_write_cr3(true, __pa(init_level4_pgt));
1993 xen_mc_issue(PARAVIRT_LAZY_CPU);
1994 } else
1995 native_write_cr3(__pa(init_level4_pgt));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001996
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001997 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1998 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1999 * the initial domain. For guests using the toolstack, they are in:
2000 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
2001 * rip out the [L4] (pgd), but for guests we shave off three pages.
2002 */
2003 for (i = 0; i < ARRAY_SIZE(addr); i++)
2004 check_pt_base(&pt_base, &pt_end, addr[i]);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002005
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04002006 /* Our (by three pages) smaller Xen pagetable that we are using */
Juergen Gross04414ba2015-07-17 06:51:31 +02002007 xen_pt_base = PFN_PHYS(pt_base);
2008 xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
2009 memblock_reserve(xen_pt_base, xen_pt_size);
Juergen Gross70e61192015-07-17 06:51:35 +02002010
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04002011 /* Revector the xen_start_info */
2012 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002013}
Juergen Gross70e61192015-07-17 06:51:35 +02002014
2015/*
2016 * Read a value from a physical address.
2017 */
2018static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
2019{
2020 unsigned long *vaddr;
2021 unsigned long val;
2022
2023 vaddr = early_memremap_ro(addr, sizeof(val));
2024 val = *vaddr;
2025 early_memunmap(vaddr, sizeof(val));
2026 return val;
2027}
2028
2029/*
2030 * Translate a virtual address to a physical one without relying on mapped
2031 * page tables.
2032 */
2033static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2034{
2035 phys_addr_t pa;
2036 pgd_t pgd;
2037 pud_t pud;
2038 pmd_t pmd;
2039 pte_t pte;
2040
2041 pa = read_cr3();
2042 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
2043 sizeof(pgd)));
2044 if (!pgd_present(pgd))
2045 return 0;
2046
2047 pa = pgd_val(pgd) & PTE_PFN_MASK;
2048 pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
2049 sizeof(pud)));
2050 if (!pud_present(pud))
2051 return 0;
2052 pa = pud_pfn(pud) << PAGE_SHIFT;
2053 if (pud_large(pud))
2054 return pa + (vaddr & ~PUD_MASK);
2055
2056 pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
2057 sizeof(pmd)));
2058 if (!pmd_present(pmd))
2059 return 0;
2060 pa = pmd_pfn(pmd) << PAGE_SHIFT;
2061 if (pmd_large(pmd))
2062 return pa + (vaddr & ~PMD_MASK);
2063
2064 pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
2065 sizeof(pte)));
2066 if (!pte_present(pte))
2067 return 0;
2068 pa = pte_pfn(pte) << PAGE_SHIFT;
2069
2070 return pa | (vaddr & ~PAGE_MASK);
2071}
2072
2073/*
2074 * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
2075 * this area.
2076 */
2077void __init xen_relocate_p2m(void)
2078{
2079 phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
2080 unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
2081 int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
2082 pte_t *pt;
2083 pmd_t *pmd;
2084 pud_t *pud;
2085 pgd_t *pgd;
2086 unsigned long *new_p2m;
2087
2088 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
2089 n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
2090 n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
2091 n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
2092 n_pud = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
2093 n_frames = n_pte + n_pt + n_pmd + n_pud;
2094
2095 new_area = xen_find_free_area(PFN_PHYS(n_frames));
2096 if (!new_area) {
2097 xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
2098 BUG();
2099 }
2100
2101 /*
2102 * Setup the page tables for addressing the new p2m list.
2103 * We have asked the hypervisor to map the p2m list at the user address
2104 * PUD_SIZE. It may have done so, or it may have used a kernel space
2105 * address depending on the Xen version.
2106 * To avoid any possible virtual address collision, just use
2107 * 2 * PUD_SIZE for the new area.
2108 */
2109 pud_phys = new_area;
2110 pmd_phys = pud_phys + PFN_PHYS(n_pud);
2111 pt_phys = pmd_phys + PFN_PHYS(n_pmd);
2112 p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
2113
2114 pgd = __va(read_cr3());
2115 new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
2116 for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
2117 pud = early_memremap(pud_phys, PAGE_SIZE);
2118 clear_page(pud);
2119 for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
2120 idx_pmd++) {
2121 pmd = early_memremap(pmd_phys, PAGE_SIZE);
2122 clear_page(pmd);
2123 for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
2124 idx_pt++) {
2125 pt = early_memremap(pt_phys, PAGE_SIZE);
2126 clear_page(pt);
2127 for (idx_pte = 0;
2128 idx_pte < min(n_pte, PTRS_PER_PTE);
2129 idx_pte++) {
2130 set_pte(pt + idx_pte,
2131 pfn_pte(p2m_pfn, PAGE_KERNEL));
2132 p2m_pfn++;
2133 }
2134 n_pte -= PTRS_PER_PTE;
2135 early_memunmap(pt, PAGE_SIZE);
2136 make_lowmem_page_readonly(__va(pt_phys));
2137 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
2138 PFN_DOWN(pt_phys));
2139 set_pmd(pmd + idx_pt,
2140 __pmd(_PAGE_TABLE | pt_phys));
2141 pt_phys += PAGE_SIZE;
2142 }
2143 n_pt -= PTRS_PER_PMD;
2144 early_memunmap(pmd, PAGE_SIZE);
2145 make_lowmem_page_readonly(__va(pmd_phys));
2146 pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
2147 PFN_DOWN(pmd_phys));
2148 set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
2149 pmd_phys += PAGE_SIZE;
2150 }
2151 n_pmd -= PTRS_PER_PUD;
2152 early_memunmap(pud, PAGE_SIZE);
2153 make_lowmem_page_readonly(__va(pud_phys));
2154 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
2155 set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
2156 pud_phys += PAGE_SIZE;
2157 }
2158
2159 /* Now copy the old p2m info to the new area. */
2160 memcpy(new_p2m, xen_p2m_addr, size);
2161 xen_p2m_addr = new_p2m;
2162
2163 /* Release the old p2m list and set new list info. */
2164 p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
2165 BUG_ON(!p2m_pfn);
2166 p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
2167
2168 if (xen_start_info->mfn_list < __START_KERNEL_map) {
2169 pfn = xen_start_info->first_p2m_pfn;
2170 pfn_end = xen_start_info->first_p2m_pfn +
2171 xen_start_info->nr_p2m_frames;
2172 set_pgd(pgd + 1, __pgd(0));
2173 } else {
2174 pfn = p2m_pfn;
2175 pfn_end = p2m_pfn_end;
2176 }
2177
2178 memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
2179 while (pfn < pfn_end) {
2180 if (pfn == p2m_pfn) {
2181 pfn = p2m_pfn_end;
2182 continue;
2183 }
2184 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
2185 pfn++;
2186 }
2187
2188 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
2189 xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
2190 xen_start_info->nr_p2m_frames = n_frames;
2191}
2192
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002193#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002194static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2195static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2196
Daniel Kiper3f5089532011-05-12 17:19:53 -04002197static void __init xen_write_cr3_init(unsigned long cr3)
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002198{
2199 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2200
2201 BUG_ON(read_cr3() != __pa(initial_page_table));
2202 BUG_ON(cr3 != __pa(swapper_pg_dir));
2203
2204 /*
2205 * We are switching to swapper_pg_dir for the first time (from
2206 * initial_page_table) and therefore need to mark that page
2207 * read-only and then pin it.
2208 *
2209 * Xen disallows sharing of kernel PMDs for PAE
2210 * guests. Therefore we must copy the kernel PMD from
2211 * initial_page_table into a new kernel PMD to be used in
2212 * swapper_pg_dir.
2213 */
2214 swapper_kernel_pmd =
2215 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002216 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002217 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2218 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2219 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2220
2221 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2222 xen_write_cr3(cr3);
2223 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2224
2225 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2226 PFN_DOWN(__pa(initial_page_table)));
2227 set_page_prot(initial_page_table, PAGE_KERNEL);
2228 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2229
2230 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2231}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002232
Juergen Gross70e61192015-07-17 06:51:35 +02002233/*
2234 * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
2235 * not the first page table in the page table pool.
2236 * Iterate through the initial page tables to find the real page table base.
2237 */
2238static phys_addr_t xen_find_pt_base(pmd_t *pmd)
2239{
2240 phys_addr_t pt_base, paddr;
2241 unsigned pmdidx;
2242
2243 pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
2244
2245 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
2246 if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
2247 paddr = m2p(pmd[pmdidx].pmd);
2248 pt_base = min(pt_base, paddr);
2249 }
2250
2251 return pt_base;
2252}
2253
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04002254void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002255{
2256 pmd_t *kernel_pmd;
2257
Juergen Gross70e61192015-07-17 06:51:35 +02002258 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2259
2260 xen_pt_base = xen_find_pt_base(kernel_pmd);
2261 xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
2262
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002263 initial_kernel_pmd =
2264 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07002265
Juergen Gross70e61192015-07-17 06:51:35 +02002266 max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002267
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002268 copy_page(initial_kernel_pmd, kernel_pmd);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002269
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002270 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002271
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002272 copy_page(initial_page_table, pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002273 initial_page_table[KERNEL_PGD_BOUNDARY] =
2274 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002275
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002276 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2277 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002278 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2279
2280 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2281
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002282 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2283 PFN_DOWN(__pa(initial_page_table)));
2284 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002285
Juergen Gross04414ba2015-07-17 06:51:31 +02002286 memblock_reserve(xen_pt_base, xen_pt_size);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002287}
2288#endif /* CONFIG_X86_64 */
2289
Juergen Gross6c2681c2015-07-17 06:51:34 +02002290void __init xen_reserve_special_pages(void)
2291{
2292 phys_addr_t paddr;
2293
2294 memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
2295 if (xen_start_info->store_mfn) {
2296 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
2297 memblock_reserve(paddr, PAGE_SIZE);
2298 }
2299 if (!xen_initial_domain()) {
2300 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
2301 memblock_reserve(paddr, PAGE_SIZE);
2302 }
2303}
2304
Juergen Gross04414ba2015-07-17 06:51:31 +02002305void __init xen_pt_check_e820(void)
2306{
2307 if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
2308 xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
2309 BUG();
2310 }
2311}
2312
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002313static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2314
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07002315static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002316{
2317 pte_t pte;
2318
2319 phys >>= PAGE_SHIFT;
2320
2321 switch (idx) {
2322 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
Kees Cook4eefbe72013-04-10 12:24:22 -07002323 case FIX_RO_IDT:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002324#ifdef CONFIG_X86_32
2325 case FIX_WP_TEST:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002326# ifdef CONFIG_HIGHMEM
2327 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2328# endif
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07002329#elif defined(CONFIG_X86_VSYSCALL_EMULATION)
Andy Lutomirskif40c3302014-05-05 12:19:36 -07002330 case VSYSCALL_PAGE:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002331#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08002332 case FIX_TEXT_POKE0:
2333 case FIX_TEXT_POKE1:
2334 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002335 pte = pfn_pte(phys, prot);
2336 break;
2337
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002338#ifdef CONFIG_X86_LOCAL_APIC
2339 case FIX_APIC_BASE: /* maps dummy local APIC */
2340 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2341 break;
2342#endif
2343
2344#ifdef CONFIG_X86_IO_APIC
2345 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2346 /*
2347 * We just don't map the IO APIC - all access is via
2348 * hypercalls. Keep the address in the pte for reference.
2349 */
Konrad Rzeszutek Wilk27abd142012-04-16 13:53:40 -04002350 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002351 break;
2352#endif
2353
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002354 case FIX_PARAVIRT_BOOTMAP:
2355 /* This is an MFN, but it isn't an IO mapping from the
2356 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002357 pte = mfn_pte(phys, prot);
2358 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002359
2360 default:
2361 /* By default, set_fixmap is used for hardware mappings */
David Vrabel7f2f8822014-01-08 14:01:01 +00002362 pte = mfn_pte(phys, prot);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002363 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002364 }
2365
2366 __native_set_fixmap(idx, pte);
2367
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07002368#ifdef CONFIG_X86_VSYSCALL_EMULATION
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002369 /* Replicate changes to map the vsyscall page into the user
2370 pagetable vsyscall mapping. */
Andy Lutomirskif40c3302014-05-05 12:19:36 -07002371 if (idx == VSYSCALL_PAGE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002372 unsigned long vaddr = __fix_to_virt(idx);
2373 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2374 }
2375#endif
2376}
2377
Daniel Kiper3f5089532011-05-12 17:19:53 -04002378static void __init xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002379{
Mukesh Rathor4e44e442013-12-31 12:41:27 -05002380 if (xen_feature(XENFEAT_auto_translated_physmap))
2381 return;
2382
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002383 pv_mmu_ops.set_pte = xen_set_pte;
2384 pv_mmu_ops.set_pmd = xen_set_pmd;
2385 pv_mmu_ops.set_pud = xen_set_pud;
Kirill A. Shutemov98233362015-04-14 15:46:14 -07002386#if CONFIG_PGTABLE_LEVELS == 4
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002387 pv_mmu_ops.set_pgd = xen_set_pgd;
2388#endif
2389
2390 /* This will work as long as patching hasn't happened yet
2391 (which it hasn't) */
2392 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2393 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2394 pv_mmu_ops.release_pte = xen_release_pte;
2395 pv_mmu_ops.release_pmd = xen_release_pmd;
Kirill A. Shutemov98233362015-04-14 15:46:14 -07002396#if CONFIG_PGTABLE_LEVELS == 4
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002397 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2398 pv_mmu_ops.release_pud = xen_release_pud;
2399#endif
David Vrabeld6b186c2016-05-17 15:54:50 +01002400 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002401
2402#ifdef CONFIG_X86_64
Konrad Rzeszutek Wilkd3eb2c82013-03-22 10:34:28 -04002403 pv_mmu_ops.write_cr3 = &xen_write_cr3;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002404 SetPagePinned(virt_to_page(level3_user_vsyscall));
2405#endif
2406 xen_mark_init_mm_pinned();
2407}
2408
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002409static void xen_leave_lazy_mmu(void)
2410{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002411 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002412 xen_mc_flush();
2413 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002414 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002415}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002416
Daniel Kiper3f5089532011-05-12 17:19:53 -04002417static const struct pv_mmu_ops xen_mmu_ops __initconst = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002418 .read_cr2 = xen_read_cr2,
2419 .write_cr2 = xen_write_cr2,
2420
2421 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002422 .write_cr3 = xen_write_cr3_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002423
2424 .flush_tlb_user = xen_flush_tlb,
2425 .flush_tlb_kernel = xen_flush_tlb,
2426 .flush_tlb_single = xen_flush_tlb_single,
2427 .flush_tlb_others = xen_flush_tlb_others,
2428
2429 .pte_update = paravirt_nop,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002430
2431 .pgd_alloc = xen_pgd_alloc,
2432 .pgd_free = xen_pgd_free,
2433
2434 .alloc_pte = xen_alloc_pte_init,
2435 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002436 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002437 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002438
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002439 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002440 .set_pte_at = xen_set_pte_at,
2441 .set_pmd = xen_set_pmd_hyper,
2442
2443 .ptep_modify_prot_start = __ptep_modify_prot_start,
2444 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2445
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002446 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2447 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002448
David Vrabeld6b186c2016-05-17 15:54:50 +01002449 .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002450 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002451
2452#ifdef CONFIG_X86_PAE
2453 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002454 .pte_clear = xen_pte_clear,
2455 .pmd_clear = xen_pmd_clear,
2456#endif /* CONFIG_X86_PAE */
2457 .set_pud = xen_set_pud_hyper,
2458
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002459 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2460 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002461
Kirill A. Shutemov98233362015-04-14 15:46:14 -07002462#if CONFIG_PGTABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002463 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2464 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002465 .set_pgd = xen_set_pgd_hyper,
2466
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002467 .alloc_pud = xen_alloc_pmd_init,
2468 .release_pud = xen_release_pmd_init,
Kirill A. Shutemov98233362015-04-14 15:46:14 -07002469#endif /* CONFIG_PGTABLE_LEVELS == 4 */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002470
2471 .activate_mm = xen_activate_mm,
2472 .dup_mmap = xen_dup_mmap,
2473 .exit_mmap = xen_exit_mmap,
2474
2475 .lazy_mode = {
2476 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002477 .leave = xen_leave_lazy_mmu,
Boris Ostrovsky511ba862013-03-23 09:36:36 -04002478 .flush = paravirt_flush_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002479 },
2480
2481 .set_fixmap = xen_set_fixmap,
2482};
2483
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002484void __init xen_init_mmu_ops(void)
2485{
Attilio Rao7737b212012-08-21 21:22:38 +01002486 x86_init.paging.pagetable_init = xen_pagetable_init;
Mukesh Rathor76bccef2014-01-03 09:48:08 -05002487
Boris Ostrovsky20f36e02015-12-12 19:25:55 -05002488 if (xen_feature(XENFEAT_auto_translated_physmap))
Mukesh Rathor76bccef2014-01-03 09:48:08 -05002489 return;
Boris Ostrovsky20f36e02015-12-12 19:25:55 -05002490
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002491 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002492
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002493 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002494}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002495
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002496/* Protected by xen_reservation_lock. */
2497#define MAX_CONTIG_ORDER 9 /* 2MB */
2498static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2499
2500#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2501static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2502 unsigned long *in_frames,
2503 unsigned long *out_frames)
2504{
2505 int i;
2506 struct multicall_space mcs;
2507
2508 xen_mc_batch();
2509 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2510 mcs = __xen_mc_entry(0);
2511
2512 if (in_frames)
2513 in_frames[i] = virt_to_mfn(vaddr);
2514
2515 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002516 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002517
2518 if (out_frames)
2519 out_frames[i] = virt_to_pfn(vaddr);
2520 }
2521 xen_mc_issue(0);
2522}
2523
2524/*
2525 * Update the pfn-to-mfn mappings for a virtual address range, either to
2526 * point to an array of mfns, or contiguously from a single starting
2527 * mfn.
2528 */
2529static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2530 unsigned long *mfns,
2531 unsigned long first_mfn)
2532{
2533 unsigned i, limit;
2534 unsigned long mfn;
2535
2536 xen_mc_batch();
2537
2538 limit = 1u << order;
2539 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2540 struct multicall_space mcs;
2541 unsigned flags;
2542
2543 mcs = __xen_mc_entry(0);
2544 if (mfns)
2545 mfn = mfns[i];
2546 else
2547 mfn = first_mfn + i;
2548
2549 if (i < (limit - 1))
2550 flags = 0;
2551 else {
2552 if (order == 0)
2553 flags = UVMF_INVLPG | UVMF_ALL;
2554 else
2555 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2556 }
2557
2558 MULTI_update_va_mapping(mcs.mc, vaddr,
2559 mfn_pte(mfn, PAGE_KERNEL), flags);
2560
2561 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2562 }
2563
2564 xen_mc_issue(0);
2565}
2566
2567/*
2568 * Perform the hypercall to exchange a region of our pfns to point to
2569 * memory with the required contiguous alignment. Takes the pfns as
2570 * input, and populates mfns as output.
2571 *
2572 * Returns a success code indicating whether the hypervisor was able to
2573 * satisfy the request or not.
2574 */
2575static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2576 unsigned long *pfns_in,
2577 unsigned long extents_out,
2578 unsigned int order_out,
2579 unsigned long *mfns_out,
2580 unsigned int address_bits)
2581{
2582 long rc;
2583 int success;
2584
2585 struct xen_memory_exchange exchange = {
2586 .in = {
2587 .nr_extents = extents_in,
2588 .extent_order = order_in,
2589 .extent_start = pfns_in,
2590 .domid = DOMID_SELF
2591 },
2592 .out = {
2593 .nr_extents = extents_out,
2594 .extent_order = order_out,
2595 .extent_start = mfns_out,
2596 .address_bits = address_bits,
2597 .domid = DOMID_SELF
2598 }
2599 };
2600
2601 BUG_ON(extents_in << order_in != extents_out << order_out);
2602
2603 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2604 success = (exchange.nr_exchanged == extents_in);
2605
2606 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2607 BUG_ON(success && (rc != 0));
2608
2609 return success;
2610}
2611
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002612int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
Stefano Stabellini69908902013-10-09 16:56:32 +00002613 unsigned int address_bits,
2614 dma_addr_t *dma_handle)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002615{
2616 unsigned long *in_frames = discontig_frames, out_frame;
2617 unsigned long flags;
2618 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002619 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002620
2621 /*
2622 * Currently an auto-translated guest will not perform I/O, nor will
2623 * it require PAE page directories below 4GB. Therefore any calls to
2624 * this function are redundant and can be ignored.
2625 */
2626
2627 if (xen_feature(XENFEAT_auto_translated_physmap))
2628 return 0;
2629
2630 if (unlikely(order > MAX_CONTIG_ORDER))
2631 return -ENOMEM;
2632
2633 memset((void *) vstart, 0, PAGE_SIZE << order);
2634
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002635 spin_lock_irqsave(&xen_reservation_lock, flags);
2636
2637 /* 1. Zap current PTEs, remembering MFNs. */
2638 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2639
2640 /* 2. Get a new contiguous memory extent. */
2641 out_frame = virt_to_pfn(vstart);
2642 success = xen_exchange_memory(1UL << order, 0, in_frames,
2643 1, order, &out_frame,
2644 address_bits);
2645
2646 /* 3. Map the new extent in place of old pages. */
2647 if (success)
2648 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2649 else
2650 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2651
2652 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2653
Stefano Stabellini69908902013-10-09 16:56:32 +00002654 *dma_handle = virt_to_machine(vstart).maddr;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002655 return success ? 0 : -ENOMEM;
2656}
2657EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2658
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002659void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002660{
2661 unsigned long *out_frames = discontig_frames, in_frame;
2662 unsigned long flags;
2663 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002664 unsigned long vstart;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002665
2666 if (xen_feature(XENFEAT_auto_translated_physmap))
2667 return;
2668
2669 if (unlikely(order > MAX_CONTIG_ORDER))
2670 return;
2671
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002672 vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002673 memset((void *) vstart, 0, PAGE_SIZE << order);
2674
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002675 spin_lock_irqsave(&xen_reservation_lock, flags);
2676
2677 /* 1. Find start MFN of contiguous extent. */
2678 in_frame = virt_to_mfn(vstart);
2679
2680 /* 2. Zap current PTEs. */
2681 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2682
2683 /* 3. Do the exchange for non-contiguous MFNs. */
2684 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2685 0, out_frames, 0);
2686
2687 /* 4. Map new pages in place of old pages. */
2688 if (success)
2689 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2690 else
2691 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2692
2693 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2694}
2695EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2696
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002697#ifdef CONFIG_XEN_PVHVM
Olaf Hering34b6f012012-10-01 21:18:01 +02002698#ifdef CONFIG_PROC_VMCORE
2699/*
2700 * This function is used in two contexts:
2701 * - the kdump kernel has to check whether a pfn of the crashed kernel
2702 * was a ballooned page. vmcore is using this function to decide
2703 * whether to access a pfn of the crashed kernel.
2704 * - the kexec kernel has to check whether a pfn was ballooned by the
2705 * previous kernel. If the pfn is ballooned, handle it properly.
2706 * Returns 0 if the pfn is not backed by a RAM page, the caller may
2707 * handle the pfn special in this case.
2708 */
2709static int xen_oldmem_pfn_is_ram(unsigned long pfn)
2710{
2711 struct xen_hvm_get_mem_type a = {
2712 .domid = DOMID_SELF,
2713 .pfn = pfn,
2714 };
2715 int ram;
2716
2717 if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
2718 return -ENXIO;
2719
2720 switch (a.mem_type) {
2721 case HVMMEM_mmio_dm:
2722 ram = 0;
2723 break;
2724 case HVMMEM_ram_rw:
2725 case HVMMEM_ram_ro:
2726 default:
2727 ram = 1;
2728 break;
2729 }
2730
2731 return ram;
2732}
2733#endif
2734
Stefano Stabellini59151002010-06-17 14:22:52 +01002735static void xen_hvm_exit_mmap(struct mm_struct *mm)
2736{
2737 struct xen_hvm_pagetable_dying a;
2738 int rc;
2739
2740 a.domid = DOMID_SELF;
2741 a.gpa = __pa(mm->pgd);
2742 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2743 WARN_ON_ONCE(rc < 0);
2744}
2745
2746static int is_pagetable_dying_supported(void)
2747{
2748 struct xen_hvm_pagetable_dying a;
2749 int rc = 0;
2750
2751 a.domid = DOMID_SELF;
2752 a.gpa = 0x00;
2753 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2754 if (rc < 0) {
2755 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2756 return 0;
2757 }
2758 return 1;
2759}
2760
2761void __init xen_hvm_init_mmu_ops(void)
2762{
2763 if (is_pagetable_dying_supported())
2764 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
Olaf Hering34b6f012012-10-01 21:18:01 +02002765#ifdef CONFIG_PROC_VMCORE
2766 register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
2767#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002768}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002769#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002770
Ian Campbellde1ef202009-05-21 10:09:46 +01002771#define REMAP_BATCH_SIZE 16
2772
2773struct remap_data {
David Vrabel4e8c0c82015-03-11 14:49:57 +00002774 xen_pfn_t *mfn;
2775 bool contiguous;
Ian Campbellde1ef202009-05-21 10:09:46 +01002776 pgprot_t prot;
2777 struct mmu_update *mmu_update;
2778};
2779
2780static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2781 unsigned long addr, void *data)
2782{
2783 struct remap_data *rmd = data;
David Vrabel4e8c0c82015-03-11 14:49:57 +00002784 pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
2785
Adam Buchbinder6a6256f2016-02-23 15:34:30 -08002786 /* If we have a contiguous range, just update the mfn itself,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002787 else update pointer to be "next mfn". */
2788 if (rmd->contiguous)
2789 (*rmd->mfn)++;
2790 else
2791 rmd->mfn++;
Ian Campbellde1ef202009-05-21 10:09:46 +01002792
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -08002793 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002794 rmd->mmu_update->val = pte_val_ma(pte);
2795 rmd->mmu_update++;
2796
2797 return 0;
2798}
2799
Julien Gralla13d7202015-08-07 17:34:41 +01002800static int do_remap_gfn(struct vm_area_struct *vma,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002801 unsigned long addr,
Julien Gralla13d7202015-08-07 17:34:41 +01002802 xen_pfn_t *gfn, int nr,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002803 int *err_ptr, pgprot_t prot,
2804 unsigned domid,
2805 struct page **pages)
Ian Campbellde1ef202009-05-21 10:09:46 +01002806{
David Vrabel4e8c0c82015-03-11 14:49:57 +00002807 int err = 0;
Ian Campbellde1ef202009-05-21 10:09:46 +01002808 struct remap_data rmd;
2809 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
Ian Campbellde1ef202009-05-21 10:09:46 +01002810 unsigned long range;
David Vrabel4e8c0c82015-03-11 14:49:57 +00002811 int mapped = 0;
Ian Campbellde1ef202009-05-21 10:09:46 +01002812
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07002813 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002814
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002815 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2816#ifdef CONFIG_XEN_PVH
2817 /* We need to update the local page tables and the xen HAP */
Julien Gralla13d7202015-08-07 17:34:41 +01002818 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002819 prot, domid, pages);
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002820#else
2821 return -EINVAL;
2822#endif
2823 }
2824
Julien Gralla13d7202015-08-07 17:34:41 +01002825 rmd.mfn = gfn;
Ian Campbellde1ef202009-05-21 10:09:46 +01002826 rmd.prot = prot;
Adam Buchbinder6a6256f2016-02-23 15:34:30 -08002827 /* We use the err_ptr to indicate if there we are doing a contiguous
David Vrabel4e8c0c82015-03-11 14:49:57 +00002828 * mapping or a discontigious mapping. */
2829 rmd.contiguous = !err_ptr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002830
2831 while (nr) {
David Vrabel4e8c0c82015-03-11 14:49:57 +00002832 int index = 0;
2833 int done = 0;
2834 int batch = min(REMAP_BATCH_SIZE, nr);
2835 int batch_left = batch;
Ian Campbellde1ef202009-05-21 10:09:46 +01002836 range = (unsigned long)batch << PAGE_SHIFT;
2837
2838 rmd.mmu_update = mmu_update;
2839 err = apply_to_page_range(vma->vm_mm, addr, range,
2840 remap_area_mfn_pte_fn, &rmd);
2841 if (err)
2842 goto out;
2843
David Vrabel4e8c0c82015-03-11 14:49:57 +00002844 /* We record the error for each page that gives an error, but
2845 * continue mapping until the whole set is done */
2846 do {
2847 int i;
2848
2849 err = HYPERVISOR_mmu_update(&mmu_update[index],
2850 batch_left, &done, domid);
2851
2852 /*
Julien Gralla13d7202015-08-07 17:34:41 +01002853 * @err_ptr may be the same buffer as @gfn, so
2854 * only clear it after each chunk of @gfn is
David Vrabel4e8c0c82015-03-11 14:49:57 +00002855 * used.
2856 */
2857 if (err_ptr) {
2858 for (i = index; i < index + done; i++)
2859 err_ptr[i] = 0;
2860 }
2861 if (err < 0) {
2862 if (!err_ptr)
2863 goto out;
2864 err_ptr[i] = err;
2865 done++; /* Skip failed frame. */
2866 } else
2867 mapped += done;
2868 batch_left -= done;
2869 index += done;
2870 } while (batch_left);
Ian Campbellde1ef202009-05-21 10:09:46 +01002871
2872 nr -= batch;
2873 addr += range;
David Vrabel4e8c0c82015-03-11 14:49:57 +00002874 if (err_ptr)
2875 err_ptr += batch;
David Vrabel914beb92015-10-28 13:39:05 +00002876 cond_resched();
Ian Campbellde1ef202009-05-21 10:09:46 +01002877 }
Ian Campbellde1ef202009-05-21 10:09:46 +01002878out:
2879
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04002880 xen_flush_tlb_all();
Ian Campbellde1ef202009-05-21 10:09:46 +01002881
David Vrabel4e8c0c82015-03-11 14:49:57 +00002882 return err < 0 ? err : mapped;
2883}
2884
Julien Gralla13d7202015-08-07 17:34:41 +01002885int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002886 unsigned long addr,
Julien Gralla13d7202015-08-07 17:34:41 +01002887 xen_pfn_t gfn, int nr,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002888 pgprot_t prot, unsigned domid,
2889 struct page **pages)
2890{
Julien Gralla13d7202015-08-07 17:34:41 +01002891 return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
Ian Campbellde1ef202009-05-21 10:09:46 +01002892}
Julien Gralla13d7202015-08-07 17:34:41 +01002893EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
Ian Campbell9a032e32012-10-17 13:37:49 -07002894
Julien Gralla13d7202015-08-07 17:34:41 +01002895int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002896 unsigned long addr,
Julien Gralla13d7202015-08-07 17:34:41 +01002897 xen_pfn_t *gfn, int nr,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002898 int *err_ptr, pgprot_t prot,
2899 unsigned domid, struct page **pages)
2900{
2901 /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
2902 * and the consequences later is quite hard to detect what the actual
2903 * cause of "wrong memory was mapped in".
2904 */
2905 BUG_ON(err_ptr == NULL);
Julien Gralla13d7202015-08-07 17:34:41 +01002906 return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
David Vrabel4e8c0c82015-03-11 14:49:57 +00002907}
Julien Gralla13d7202015-08-07 17:34:41 +01002908EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
David Vrabel4e8c0c82015-03-11 14:49:57 +00002909
2910
Ian Campbell9a032e32012-10-17 13:37:49 -07002911/* Returns: 0 success */
Julien Gralla13d7202015-08-07 17:34:41 +01002912int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
Ian Campbell9a032e32012-10-17 13:37:49 -07002913 int numpgs, struct page **pages)
2914{
2915 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2916 return 0;
2917
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002918#ifdef CONFIG_XEN_PVH
David Vrabel628c28e2015-03-11 14:49:56 +00002919 return xen_xlate_unmap_gfn_range(vma, numpgs, pages);
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002920#else
Ian Campbell9a032e32012-10-17 13:37:49 -07002921 return -EINVAL;
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002922#endif
Ian Campbell9a032e32012-10-17 13:37:49 -07002923}
Julien Gralla13d7202015-08-07 17:34:41 +01002924EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);