blob: 2693b7ed5a6f9825dcca43a6afccf808e6407e56 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050049#include <linux/seq_file.h>
Olaf Hering34b6f012012-10-01 21:18:01 +020050#include <linux/crash_dump.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070051
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -080052#include <trace/events/xen.h>
53
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070054#include <asm/pgtable.h>
55#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070056#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070057#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080058#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070059#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050060#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070061#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080062#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070063#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070064#include <asm/pat.h>
Andrew Jones900cba82009-12-18 10:31:31 +010065#include <asm/smp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070066
67#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070068#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070069
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080070#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070071#include <xen/page.h>
72#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010073#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080074#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080075#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080076#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070077
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070078#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070079#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070080#include "debugfs.h"
81
Alex Nixon19001c82009-02-09 12:05:46 -080082/*
83 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010084 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080085 */
86DEFINE_SPINLOCK(xen_reservation_lock);
87
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040088#ifdef CONFIG_X86_32
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080089/*
90 * Identity map, in addition to plain kernel map. This needs to be
91 * large enough to allocate page table pages to allocate the rest.
92 * Each page can map 2MB.
93 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -070094#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
95static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040096#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080097#ifdef CONFIG_X86_64
98/* l3 pud for userspace vsyscall mapping */
99static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
100#endif /* CONFIG_X86_64 */
101
102/*
103 * Note about cr3 (pagetable base) values:
104 *
105 * xen_cr3 contains the current logical cr3 value; it contains the
106 * last set cr3. This may not be the current effective cr3, because
107 * its update may be being lazily deferred. However, a vcpu looking
108 * at its own cr3 can use this value knowing that it everything will
109 * be self-consistent.
110 *
111 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
112 * hypercall to set the vcpu cr3 is complete (so it may be a little
113 * out of date, but it will never be set early). If one vcpu is
114 * looking at another vcpu's cr3 value, it should use this variable.
115 */
116DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
117DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
118
Juergen Gross04414ba2015-07-17 06:51:31 +0200119static phys_addr_t xen_pt_base, xen_pt_size __initdata;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800120
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700121/*
122 * Just beyond the highest usermode address. STACK_TOP_MAX has a
123 * redzone above it, so round it up to a PGD boundary.
124 */
125#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
126
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800127unsigned long arbitrary_virt_to_mfn(void *vaddr)
128{
129 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
130
131 return PFN_DOWN(maddr.maddr);
132}
133
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700134xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700135{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700136 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100137 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700138 pte_t *pte;
139 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700140
Chris Lalancette9f32d212008-10-23 17:40:25 -0700141 /*
142 * if the PFN is in the linear mapped vaddr range, we can just use
143 * the (quick) virt_to_machine() p2m lookup
144 */
145 if (virt_addr_valid(vaddr))
146 return virt_to_machine(vaddr);
147
148 /* otherwise we have to do a (slower) full page-table walk */
149
150 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700151 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700152 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700153 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700154}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100155EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700156
157void make_lowmem_page_readonly(void *vaddr)
158{
159 pte_t *pte, ptev;
160 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100161 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700162
Ingo Molnarf0646e42008-01-30 13:33:43 +0100163 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700164 if (pte == NULL)
165 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700166
167 ptev = pte_wrprotect(*pte);
168
169 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
170 BUG();
171}
172
173void make_lowmem_page_readwrite(void *vaddr)
174{
175 pte_t *pte, ptev;
176 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100177 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700178
Ingo Molnarf0646e42008-01-30 13:33:43 +0100179 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700180 if (pte == NULL)
181 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700182
183 ptev = pte_mkwrite(*pte);
184
185 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
186 BUG();
187}
188
189
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700190static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100191{
192 struct page *page = virt_to_page(ptr);
193
194 return PagePinned(page);
195}
196
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800197void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800198{
199 struct multicall_space mcs;
200 struct mmu_update *u;
201
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800202 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
203
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800204 mcs = xen_mc_entry(sizeof(*u));
205 u = mcs.args;
206
207 /* ptep might be kmapped when using 32-bit HIGHPTE */
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800208 u->ptr = virt_to_machine(ptep).maddr;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800209 u->val = pte_val_ma(pteval);
210
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800211 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800212
213 xen_mc_issue(PARAVIRT_LAZY_MMU);
214}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800215EXPORT_SYMBOL_GPL(xen_set_domain_pte);
216
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700217static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700218{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700219 struct multicall_space mcs;
220 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700221
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700222 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
223
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700224 if (mcs.mc != NULL) {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700225 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700226 } else {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700227 mcs = __xen_mc_entry(sizeof(*u));
228 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
229 }
230
231 u = mcs.args;
232 *u = *update;
233}
234
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800235static void xen_extend_mmuext_op(const struct mmuext_op *op)
236{
237 struct multicall_space mcs;
238 struct mmuext_op *u;
239
240 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
241
242 if (mcs.mc != NULL) {
243 mcs.mc->args[1]++;
244 } else {
245 mcs = __xen_mc_entry(sizeof(*u));
246 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
247 }
248
249 u = mcs.args;
250 *u = *op;
251}
252
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800253static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700254{
255 struct mmu_update u;
256
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700257 preempt_disable();
258
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700259 xen_mc_batch();
260
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700261 /* ptr may be ioremapped for 64-bit pagetable setup */
262 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700263 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700264 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700265
266 xen_mc_issue(PARAVIRT_LAZY_MMU);
267
268 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700269}
270
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800271static void xen_set_pmd(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100272{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800273 trace_xen_mmu_set_pmd(ptr, val);
274
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100275 /* If page is not pinned, we can just update the entry
276 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700277 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100278 *ptr = val;
279 return;
280 }
281
282 xen_set_pmd_hyper(ptr, val);
283}
284
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700285/*
286 * Associate a virtual page frame with a given physical page frame
287 * and protection flags for that frame.
288 */
289void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
290{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700291 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700292}
293
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800294static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
295{
296 struct mmu_update u;
297
298 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
299 return false;
300
301 xen_mc_batch();
302
303 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
304 u.val = pte_val_ma(pteval);
305 xen_extend_mmu_update(&u);
306
307 xen_mc_issue(PARAVIRT_LAZY_MMU);
308
309 return true;
310}
311
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800312static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800313{
David Vrabeld095d432012-07-09 11:39:05 +0100314 if (!xen_batched_set_pte(ptep, pteval)) {
315 /*
316 * Could call native_set_pte() here and trap and
317 * emulate the PTE write but with 32-bit guests this
318 * needs two traps (one for each of the two 32-bit
319 * words in the PTE) so do one hypercall directly
320 * instead.
321 */
322 struct mmu_update u;
323
324 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
325 u.val = pte_val_ma(pteval);
326 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
327 }
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800328}
329
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800330static void xen_set_pte(pte_t *ptep, pte_t pteval)
331{
332 trace_xen_mmu_set_pte(ptep, pteval);
333 __xen_set_pte(ptep, pteval);
334}
335
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800336static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700337 pte_t *ptep, pte_t pteval)
338{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800339 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
340 __xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700341}
342
Tejf63c2f22008-12-16 11:56:06 -0800343pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
344 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700345{
346 /* Just return the pte as-is. We preserve the bits on commit */
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800347 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700348 return *ptep;
349}
350
351void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
352 pte_t *ptep, pte_t pte)
353{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700354 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700355
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800356 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700357 xen_mc_batch();
358
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800359 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700360 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700361 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700362
363 xen_mc_issue(PARAVIRT_LAZY_MMU);
364}
365
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700366/* Assume pteval_t is equivalent to all the other *val_t types. */
367static pteval_t pte_mfn_to_pfn(pteval_t val)
368{
David Vrabel5926f872014-03-25 10:38:37 +0000369 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700370 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400371 unsigned long pfn = mfn_to_pfn(mfn);
372
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700373 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400374 if (unlikely(pfn == ~0))
375 val = flags & ~_PAGE_PRESENT;
376 else
377 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700378 }
379
380 return val;
381}
382
383static pteval_t pte_pfn_to_mfn(pteval_t val)
384{
David Vrabel5926f872014-03-25 10:38:37 +0000385 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700386 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700387 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500388 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700389
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500390 if (!xen_feature(XENFEAT_auto_translated_physmap))
Juergen Gross0aad5682014-11-28 11:53:57 +0100391 mfn = __pfn_to_mfn(pfn);
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500392 else
393 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700394 /*
395 * If there's no mfn for the pfn, then just create an
396 * empty non-present pte. Unfortunately this loses
397 * information about the original pfn, so
398 * pte_mfn_to_pfn is asymmetric.
399 */
400 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
401 mfn = 0;
402 flags = 0;
David Vrabel7f2f8822014-01-08 14:01:01 +0000403 } else
404 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700405 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700406 }
407
408 return val;
409}
410
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700411__visible pteval_t xen_pte_val(pte_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700412{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700413 pteval_t pteval = pte.pte;
Juergen Gross47591df2014-11-03 14:02:04 +0100414
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700415 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700416}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800417PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700418
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700419__visible pgdval_t xen_pgd_val(pgd_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700420{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700421 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700422}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800423PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700424
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700425__visible pte_t xen_make_pte(pteval_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700426{
David Vrabel7f2f8822014-01-08 14:01:01 +0000427 pte = pte_pfn_to_mfn(pte);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800428
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700429 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700430}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800431PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700432
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700433__visible pgd_t xen_make_pgd(pgdval_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700434{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700435 pgd = pte_pfn_to_mfn(pgd);
436 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700437}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800438PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700439
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700440__visible pmdval_t xen_pmd_val(pmd_t pmd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700441{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700442 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700443}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800444PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100445
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800446static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700447{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700448 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700449
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700450 preempt_disable();
451
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700452 xen_mc_batch();
453
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700454 /* ptr may be ioremapped for 64-bit pagetable setup */
455 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700456 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700457 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700458
459 xen_mc_issue(PARAVIRT_LAZY_MMU);
460
461 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700462}
463
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800464static void xen_set_pud(pud_t *ptr, pud_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100465{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800466 trace_xen_mmu_set_pud(ptr, val);
467
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100468 /* If page is not pinned, we can just update the entry
469 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700470 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100471 *ptr = val;
472 return;
473 }
474
475 xen_set_pud_hyper(ptr, val);
476}
477
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700478#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800479static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700480{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800481 trace_xen_mmu_set_pte_atomic(ptep, pte);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700482 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700483}
484
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800485static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700486{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800487 trace_xen_mmu_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800488 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
489 native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700490}
491
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800492static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700493{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800494 trace_xen_mmu_pmd_clear(pmdp);
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100495 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700496}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700497#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700498
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700499__visible pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700500{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700501 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700502 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700503}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800504PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700505
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700506#if CONFIG_PGTABLE_LEVELS == 4
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700507__visible pudval_t xen_pud_val(pud_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700508{
509 return pte_mfn_to_pfn(pud.pud);
510}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800511PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700512
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700513__visible pud_t xen_make_pud(pudval_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700514{
515 pud = pte_pfn_to_mfn(pud);
516
517 return native_make_pud(pud);
518}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800519PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700520
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800521static pgd_t *xen_get_user_pgd(pgd_t *pgd)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700522{
523 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
524 unsigned offset = pgd - pgd_page;
525 pgd_t *user_ptr = NULL;
526
527 if (offset < pgd_index(USER_LIMIT)) {
528 struct page *page = virt_to_page(pgd_page);
529 user_ptr = (pgd_t *)page->private;
530 if (user_ptr)
531 user_ptr += offset;
532 }
533
534 return user_ptr;
535}
536
537static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700538{
539 struct mmu_update u;
540
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700541 u.ptr = virt_to_machine(ptr).maddr;
542 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700543 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700544}
545
546/*
547 * Raw hypercall-based set_pgd, intended for in early boot before
548 * there's a page structure. This implies:
549 * 1. The only existing pagetable is the kernel's
550 * 2. It is always pinned
551 * 3. It has no user pagetable attached to it
552 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800553static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700554{
555 preempt_disable();
556
557 xen_mc_batch();
558
559 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700560
561 xen_mc_issue(PARAVIRT_LAZY_MMU);
562
563 preempt_enable();
564}
565
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800566static void xen_set_pgd(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700567{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700568 pgd_t *user_ptr = xen_get_user_pgd(ptr);
569
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800570 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
571
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700572 /* If page is not pinned, we can just update the entry
573 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700574 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700575 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700576 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700577 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700578 *user_ptr = val;
579 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700580 return;
581 }
582
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700583 /* If it's pinned, then we can at least batch the kernel and
584 user updates together. */
585 xen_mc_batch();
586
587 __xen_set_pgd_hyper(ptr, val);
588 if (user_ptr)
589 __xen_set_pgd_hyper(user_ptr, val);
590
591 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700592}
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700593#endif /* CONFIG_PGTABLE_LEVELS == 4 */
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700594
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700595/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700596 * (Yet another) pagetable walker. This one is intended for pinning a
597 * pagetable. This means that it walks a pagetable and calls the
598 * callback function on each page it finds making up the page table,
599 * at every level. It walks the entire pagetable, but it only bothers
600 * pinning pte pages which are below limit. In the normal case this
601 * will be STACK_TOP_MAX, but at boot we need to pin up to
602 * FIXADDR_TOP.
603 *
604 * For 32-bit the important bit is that we don't pin beyond there,
605 * because then we start getting into Xen's ptes.
606 *
607 * For 64-bit, we must skip the Xen hole in the middle of the address
608 * space, just after the big x86-64 virtual hole.
609 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000610static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
611 int (*func)(struct mm_struct *mm, struct page *,
612 enum pt_level),
613 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700614{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700615 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700616 unsigned hole_low, hole_high;
617 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
618 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700619
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700620 /* The limit is the last byte to be touched */
621 limit--;
622 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700623
624 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700625 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700626
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700627 /*
628 * 64-bit has a great big hole in the middle of the address
629 * space, which contains the Xen mappings. On 32-bit these
630 * will end up making a zero-sized hole and so is a no-op.
631 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700632 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700633 hole_high = pgd_index(PAGE_OFFSET);
634
635 pgdidx_limit = pgd_index(limit);
636#if PTRS_PER_PUD > 1
637 pudidx_limit = pud_index(limit);
638#else
639 pudidx_limit = 0;
640#endif
641#if PTRS_PER_PMD > 1
642 pmdidx_limit = pmd_index(limit);
643#else
644 pmdidx_limit = 0;
645#endif
646
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700647 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700648 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700649
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700650 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700651 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700652
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700653 if (!pgd_val(pgd[pgdidx]))
654 continue;
655
656 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700657
658 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700659 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700660
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700661 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700662 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700663
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700664 if (pgdidx == pgdidx_limit &&
665 pudidx > pudidx_limit)
666 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700667
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700668 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700669 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700670
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700671 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700672
673 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700674 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700675
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700676 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
677 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700678
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700679 if (pgdidx == pgdidx_limit &&
680 pudidx == pudidx_limit &&
681 pmdidx > pmdidx_limit)
682 goto out;
683
684 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700685 continue;
686
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700687 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700688 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700689 }
690 }
691 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700692
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700693out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700694 /* Do the top level last, so that the callbacks can use it as
695 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700696 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700697
698 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700699}
700
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000701static int xen_pgd_walk(struct mm_struct *mm,
702 int (*func)(struct mm_struct *mm, struct page *,
703 enum pt_level),
704 unsigned long limit)
705{
706 return __xen_pgd_walk(mm, mm->pgd, func, limit);
707}
708
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700709/* If we're using split pte locks, then take the page's lock and
710 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700711static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700712{
713 spinlock_t *ptl = NULL;
714
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -0800715#if USE_SPLIT_PTE_PTLOCKS
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -0800716 ptl = ptlock_ptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700717 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700718#endif
719
720 return ptl;
721}
722
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700723static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700724{
725 spinlock_t *ptl = v;
726 spin_unlock(ptl);
727}
728
729static void xen_do_pin(unsigned level, unsigned long pfn)
730{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800731 struct mmuext_op op;
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700732
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800733 op.cmd = level;
734 op.arg1.mfn = pfn_to_mfn(pfn);
735
736 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700737}
738
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700739static int xen_pin_page(struct mm_struct *mm, struct page *page,
740 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700741{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700742 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700743 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700744
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700745 if (pgfl)
746 flush = 0; /* already pinned */
747 else if (PageHighMem(page))
748 /* kmaps need flushing if we found an unpinned
749 highpage */
750 flush = 1;
751 else {
752 void *pt = lowmem_page_address(page);
753 unsigned long pfn = page_to_pfn(page);
754 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700755 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700756
757 flush = 0;
758
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700759 /*
760 * We need to hold the pagetable lock between the time
761 * we make the pagetable RO and when we actually pin
762 * it. If we don't, then other users may come in and
763 * attempt to update the pagetable by writing it,
764 * which will fail because the memory is RO but not
765 * pinned, so Xen won't do the trap'n'emulate.
766 *
767 * If we're using split pte locks, we can't hold the
768 * entire pagetable's worth of locks during the
769 * traverse, because we may wrap the preempt count (8
770 * bits). The solution is to mark RO and pin each PTE
771 * page while holding the lock. This means the number
772 * of locks we end up holding is never more than a
773 * batch size (~32 entries, at present).
774 *
775 * If we're not using split pte locks, we needn't pin
776 * the PTE pages independently, because we're
777 * protected by the overall pagetable lock.
778 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700779 ptl = NULL;
780 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700781 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700782
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700783 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
784 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700785 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
786
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700787 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700788 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
789
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700790 /* Queue a deferred unlock for when this batch
791 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700792 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700793 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700794 }
795
796 return flush;
797}
798
799/* This is called just after a mm has been created, but it has not
800 been used yet. We need to make sure that its pagetable is all
801 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700802static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700803{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800804 trace_xen_mmu_pgd_pin(mm, pgd);
805
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700806 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700807
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000808 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100809 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700810 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100811
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700812 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100813
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700814 xen_mc_batch();
815 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700816
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700817#ifdef CONFIG_X86_64
818 {
819 pgd_t *user_pgd = xen_get_user_pgd(pgd);
820
821 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
822
823 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700824 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800825 xen_do_pin(MMUEXT_PIN_L4_TABLE,
826 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700827 }
828 }
829#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700830#ifdef CONFIG_X86_PAE
831 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800832 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700833 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700834#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100835 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700836#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700837 xen_mc_issue(0);
838}
839
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700840static void xen_pgd_pin(struct mm_struct *mm)
841{
842 __xen_pgd_pin(mm, mm->pgd);
843}
844
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100845/*
846 * On save, we need to pin all pagetables to make sure they get their
847 * mfns turned into pfns. Search the list for any unpinned pgds and pin
848 * them (unpinned pgds are not currently in use, probably because the
849 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700850 *
851 * Expected to be called in stop_machine() ("equivalent to taking
852 * every spinlock in the system"), so the locking doesn't really
853 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100854 */
855void xen_mm_pin_all(void)
856{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100857 struct page *page;
858
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800859 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100860
861 list_for_each_entry(page, &pgd_list, lru) {
862 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700863 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100864 SetPageSavePinned(page);
865 }
866 }
867
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800868 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100869}
870
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700871/*
872 * The init_mm pagetable is really pinned as soon as its created, but
873 * that's before we have page structures to store the bits. So do all
874 * the book-keeping now.
875 */
Daniel Kiper3f5089532011-05-12 17:19:53 -0400876static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700877 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700878{
879 SetPagePinned(page);
880 return 0;
881}
882
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -0700883static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700884{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700885 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700886}
887
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700888static int xen_unpin_page(struct mm_struct *mm, struct page *page,
889 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700890{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700891 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700892
893 if (pgfl && !PageHighMem(page)) {
894 void *pt = lowmem_page_address(page);
895 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700896 spinlock_t *ptl = NULL;
897 struct multicall_space mcs;
898
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700899 /*
900 * Do the converse to pin_page. If we're using split
901 * pte locks, we must be holding the lock for while
902 * the pte page is unpinned but still RO to prevent
903 * concurrent updates from seeing it in this
904 * partially-pinned state.
905 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700906 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700907 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700908
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700909 if (ptl)
910 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700911 }
912
913 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700914
915 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
916 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700917 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
918
919 if (ptl) {
920 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700921 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700922 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700923 }
924
925 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700926}
927
928/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700929static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700930{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800931 trace_xen_mmu_pgd_unpin(mm, pgd);
932
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700933 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700934
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700935 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700936
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700937#ifdef CONFIG_X86_64
938 {
939 pgd_t *user_pgd = xen_get_user_pgd(pgd);
940
941 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -0800942 xen_do_pin(MMUEXT_UNPIN_TABLE,
943 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700944 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700945 }
946 }
947#endif
948
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700949#ifdef CONFIG_X86_PAE
950 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800951 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700952 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700953#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700954
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000955 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700956
957 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700958}
959
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700960static void xen_pgd_unpin(struct mm_struct *mm)
961{
962 __xen_pgd_unpin(mm, mm->pgd);
963}
964
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100965/*
966 * On resume, undo any pinning done at save, so that the rest of the
967 * kernel doesn't see any unexpected pinned pagetables.
968 */
969void xen_mm_unpin_all(void)
970{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100971 struct page *page;
972
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800973 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100974
975 list_for_each_entry(page, &pgd_list, lru) {
976 if (PageSavePinned(page)) {
977 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700978 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100979 ClearPageSavePinned(page);
980 }
981 }
982
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800983 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100984}
985
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800986static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700987{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700988 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700989 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700990 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700991}
992
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800993static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700994{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700995 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700996 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700997 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700998}
999
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001000
1001#ifdef CONFIG_SMP
1002/* Another cpu may still have their %cr3 pointing at the pagetable, so
1003 we need to repoint it somewhere else before we can unpin it. */
1004static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001005{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001006 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001007 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001008
Alex Shi2113f462012-01-13 23:53:35 +08001009 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001010
Alex Shi2113f462012-01-13 23:53:35 +08001011 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001012 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001013
1014 /* If this cpu still has a stale cr3 reference, then make sure
1015 it has been flushed. */
Alex Shi2113f462012-01-13 23:53:35 +08001016 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001017 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001018}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001019
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001020static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001021{
Mike Travise4d98202008-12-16 17:34:05 -08001022 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001023 unsigned cpu;
1024
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001025 if (current->active_mm == mm) {
1026 if (current->mm == mm)
1027 load_cr3(swapper_pg_dir);
1028 else
1029 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001030 }
1031
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001032 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001033 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1034 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001035 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001036 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1037 continue;
1038 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1039 }
1040 return;
1041 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001042 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001043
1044 /* It's possible that a vcpu may have a stale reference to our
1045 cr3, because its in lazy mode, and it hasn't yet flushed
1046 its set of pending hypercalls yet. In this case, we can
1047 look at its actual current cr3 value, and force it to flush
1048 if needed. */
1049 for_each_online_cpu(cpu) {
1050 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001051 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001052 }
1053
Mike Travise4d98202008-12-16 17:34:05 -08001054 if (!cpumask_empty(mask))
1055 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1056 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001057}
1058#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001059static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001060{
1061 if (current->active_mm == mm)
1062 load_cr3(swapper_pg_dir);
1063}
1064#endif
1065
1066/*
1067 * While a process runs, Xen pins its pagetables, which means that the
1068 * hypervisor forces it to be read-only, and it controls all updates
1069 * to it. This means that all pagetable updates have to go via the
1070 * hypervisor, which is moderately expensive.
1071 *
1072 * Since we're pulling the pagetable down, we switch to use init_mm,
1073 * unpin old process pagetable and mark it all read-write, which
1074 * allows further operations on it to be simple memory accesses.
1075 *
1076 * The only subtle point is that another CPU may be still using the
1077 * pagetable because of lazy tlb flushing. This means we need need to
1078 * switch all CPUs off this pagetable before we can unpin it.
1079 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001080static void xen_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001081{
1082 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001083 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001084 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001085
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001086 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001087
1088 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001089 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001090 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001091
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001092 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001093}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001094
Attilio Raoc7112882012-08-21 21:22:40 +01001095static void xen_post_allocator_init(void);
1096
Juergen Gross70e61192015-07-17 06:51:35 +02001097static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1098{
1099 struct mmuext_op op;
1100
1101 op.cmd = cmd;
1102 op.arg1.mfn = pfn_to_mfn(pfn);
1103 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1104 BUG();
1105}
1106
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001107#ifdef CONFIG_X86_64
1108static void __init xen_cleanhighmap(unsigned long vaddr,
1109 unsigned long vaddr_end)
1110{
1111 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1112 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1113
1114 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1115 * We include the PMD passed in on _both_ boundaries. */
Juergen Gross1cf38742016-06-23 07:12:27 +02001116 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001117 pmd++, vaddr += PMD_SIZE) {
1118 if (pmd_none(*pmd))
1119 continue;
1120 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1121 set_pmd(pmd, __pmd(0));
1122 }
1123 /* In case we did something silly, we should crash in this function
1124 * instead of somewhere later and be confusing. */
1125 xen_mc_flush();
1126}
Juergen Gross054954e2014-11-28 11:53:58 +01001127
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001128/*
1129 * Make a page range writeable and free it.
1130 */
1131static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
1132{
1133 void *vaddr = __va(paddr);
1134 void *vaddr_end = vaddr + size;
1135
1136 for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
1137 make_lowmem_page_readwrite(vaddr);
1138
1139 memblock_free(paddr, size);
1140}
1141
Juergen Gross70e61192015-07-17 06:51:35 +02001142static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001143{
1144 unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
1145
Juergen Gross70e61192015-07-17 06:51:35 +02001146 if (unpin)
1147 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001148 ClearPagePinned(virt_to_page(__va(pa)));
1149 xen_free_ro_pages(pa, PAGE_SIZE);
1150}
1151
1152/*
1153 * Since it is well isolated we can (and since it is perhaps large we should)
1154 * also free the page tables mapping the initial P->M table.
1155 */
1156static void __init xen_cleanmfnmap(unsigned long vaddr)
1157{
1158 unsigned long va = vaddr & PMD_MASK;
1159 unsigned long pa;
1160 pgd_t *pgd = pgd_offset_k(va);
1161 pud_t *pud_page = pud_offset(pgd, 0);
1162 pud_t *pud;
1163 pmd_t *pmd;
1164 pte_t *pte;
1165 unsigned int i;
Juergen Gross70e61192015-07-17 06:51:35 +02001166 bool unpin;
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001167
Juergen Gross70e61192015-07-17 06:51:35 +02001168 unpin = (vaddr == 2 * PGDIR_SIZE);
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001169 set_pgd(pgd, __pgd(0));
1170 do {
1171 pud = pud_page + pud_index(va);
1172 if (pud_none(*pud)) {
1173 va += PUD_SIZE;
1174 } else if (pud_large(*pud)) {
1175 pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1176 xen_free_ro_pages(pa, PUD_SIZE);
1177 va += PUD_SIZE;
1178 } else {
1179 pmd = pmd_offset(pud, va);
1180 if (pmd_large(*pmd)) {
1181 pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1182 xen_free_ro_pages(pa, PMD_SIZE);
1183 } else if (!pmd_none(*pmd)) {
1184 pte = pte_offset_kernel(pmd, va);
Juergen Gross70e61192015-07-17 06:51:35 +02001185 set_pmd(pmd, __pmd(0));
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001186 for (i = 0; i < PTRS_PER_PTE; ++i) {
1187 if (pte_none(pte[i]))
1188 break;
1189 pa = pte_pfn(pte[i]) << PAGE_SHIFT;
1190 xen_free_ro_pages(pa, PAGE_SIZE);
1191 }
Juergen Gross70e61192015-07-17 06:51:35 +02001192 xen_cleanmfnmap_free_pgtbl(pte, unpin);
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001193 }
1194 va += PMD_SIZE;
1195 if (pmd_index(va))
1196 continue;
Juergen Gross70e61192015-07-17 06:51:35 +02001197 set_pud(pud, __pud(0));
1198 xen_cleanmfnmap_free_pgtbl(pmd, unpin);
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001199 }
1200
1201 } while (pud_index(va) || pmd_index(va));
Juergen Gross70e61192015-07-17 06:51:35 +02001202 xen_cleanmfnmap_free_pgtbl(pud_page, unpin);
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001203}
1204
Juergen Gross054954e2014-11-28 11:53:58 +01001205static void __init xen_pagetable_p2m_free(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001206{
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001207 unsigned long size;
1208 unsigned long addr;
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001209
1210 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1211
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001212 /* No memory or already called. */
Juergen Gross054954e2014-11-28 11:53:58 +01001213 if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001214 return;
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001215
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001216 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1217 memset((void *)xen_start_info->mfn_list, 0xff, size);
1218
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001219 addr = xen_start_info->mfn_list;
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001220 /*
1221 * We could be in __ka space.
1222 * We roundup to the PMD, which means that if anybody at this stage is
1223 * using the __ka address of xen_start_info or
1224 * xen_start_info->shared_info they are in going to crash. Fortunatly
1225 * we have already revectored in xen_setup_kernel_pagetable and in
1226 * xen_setup_shared_info.
1227 */
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001228 size = roundup(size, PMD_SIZE);
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001229
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001230 if (addr >= __START_KERNEL_map) {
1231 xen_cleanhighmap(addr, addr + size);
1232 size = PAGE_ALIGN(xen_start_info->nr_pages *
1233 sizeof(unsigned long));
1234 memblock_free(__pa(addr), size);
1235 } else {
1236 xen_cleanmfnmap(addr);
1237 }
Juergen Gross70e61192015-07-17 06:51:35 +02001238}
1239
1240static void __init xen_pagetable_cleanhighmap(void)
1241{
1242 unsigned long size;
1243 unsigned long addr;
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001244
Konrad Rzeszutek Wilk3aca7fb2012-08-14 14:34:00 -04001245 /* At this stage, cleanup_highmap has already cleaned __ka space
1246 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1247 * the ramdisk). We continue on, erasing PMD entries that point to page
1248 * tables - do note that they are accessible at this stage via __va.
1249 * For good measure we also round up to the PMD - which means that if
1250 * anybody is using __ka address to the initial boot-stack - and try
1251 * to use it - they are going to crash. The xen_start_info has been
1252 * taken care of already in xen_setup_kernel_pagetable. */
1253 addr = xen_start_info->pt_base;
1254 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1255
1256 xen_cleanhighmap(addr, addr + size);
1257 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1258#ifdef DEBUG
Adam Buchbinder6a6256f2016-02-23 15:34:30 -08001259 /* This is superfluous and is not necessary, but you know what
Konrad Rzeszutek Wilk3aca7fb2012-08-14 14:34:00 -04001260 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1261 * anything at this stage. */
1262 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1263#endif
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001264}
1265#endif
1266
Juergen Gross054954e2014-11-28 11:53:58 +01001267static void __init xen_pagetable_p2m_setup(void)
1268{
1269 if (xen_feature(XENFEAT_auto_translated_physmap))
1270 return;
1271
1272 xen_vmalloc_p2m_tree();
1273
1274#ifdef CONFIG_X86_64
1275 xen_pagetable_p2m_free();
Juergen Gross70e61192015-07-17 06:51:35 +02001276
1277 xen_pagetable_cleanhighmap();
Juergen Gross054954e2014-11-28 11:53:58 +01001278#endif
1279 /* And revector! Bye bye old array */
1280 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1281}
1282
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001283static void __init xen_pagetable_init(void)
1284{
1285 paging_init();
Juergen Grosscdfa0ba2014-12-10 16:56:03 +01001286 xen_post_allocator_init();
Juergen Gross054954e2014-11-28 11:53:58 +01001287
1288 xen_pagetable_p2m_setup();
1289
Juergen Gross2c185682014-10-14 13:33:46 +02001290 /* Allocate and initialize top and mid mfn levels for p2m structure */
1291 xen_build_mfn_list_list();
1292
Juergen Gross1f3ac862014-11-28 11:53:53 +01001293 /* Remap memory freed due to conflicts with E820 map */
1294 if (!xen_feature(XENFEAT_auto_translated_physmap))
1295 xen_remap_memory();
1296
Juergen Gross2c185682014-10-14 13:33:46 +02001297 xen_setup_shared_info();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001298}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001299static void xen_write_cr2(unsigned long cr2)
1300{
Alex Shi2113f462012-01-13 23:53:35 +08001301 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001302}
1303
1304static unsigned long xen_read_cr2(void)
1305{
Alex Shi2113f462012-01-13 23:53:35 +08001306 return this_cpu_read(xen_vcpu)->arch.cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001307}
1308
1309unsigned long xen_read_cr2_direct(void)
1310{
Alex Shi2113f462012-01-13 23:53:35 +08001311 return this_cpu_read(xen_vcpu_info.arch.cr2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001312}
1313
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04001314void xen_flush_tlb_all(void)
1315{
1316 struct mmuext_op *op;
1317 struct multicall_space mcs;
1318
1319 trace_xen_mmu_flush_tlb_all(0);
1320
1321 preempt_disable();
1322
1323 mcs = xen_mc_entry(sizeof(*op));
1324
1325 op = mcs.args;
1326 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1327 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1328
1329 xen_mc_issue(PARAVIRT_LAZY_MMU);
1330
1331 preempt_enable();
1332}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001333static void xen_flush_tlb(void)
1334{
1335 struct mmuext_op *op;
1336 struct multicall_space mcs;
1337
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001338 trace_xen_mmu_flush_tlb(0);
1339
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001340 preempt_disable();
1341
1342 mcs = xen_mc_entry(sizeof(*op));
1343
1344 op = mcs.args;
1345 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1346 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1347
1348 xen_mc_issue(PARAVIRT_LAZY_MMU);
1349
1350 preempt_enable();
1351}
1352
1353static void xen_flush_tlb_single(unsigned long addr)
1354{
1355 struct mmuext_op *op;
1356 struct multicall_space mcs;
1357
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001358 trace_xen_mmu_flush_tlb_single(addr);
1359
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001360 preempt_disable();
1361
1362 mcs = xen_mc_entry(sizeof(*op));
1363 op = mcs.args;
1364 op->cmd = MMUEXT_INVLPG_LOCAL;
1365 op->arg1.linear_addr = addr & PAGE_MASK;
1366 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1367
1368 xen_mc_issue(PARAVIRT_LAZY_MMU);
1369
1370 preempt_enable();
1371}
1372
1373static void xen_flush_tlb_others(const struct cpumask *cpus,
Alex Shie7b52ff2012-06-28 09:02:17 +08001374 struct mm_struct *mm, unsigned long start,
1375 unsigned long end)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001376{
1377 struct {
1378 struct mmuext_op op;
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001379#ifdef CONFIG_SMP
Andrew Jones900cba82009-12-18 10:31:31 +01001380 DECLARE_BITMAP(mask, num_processors);
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001381#else
1382 DECLARE_BITMAP(mask, NR_CPUS);
1383#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001384 } *args;
1385 struct multicall_space mcs;
1386
Alex Shie7b52ff2012-06-28 09:02:17 +08001387 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001388
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001389 if (cpumask_empty(cpus))
1390 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001391
1392 mcs = xen_mc_entry(sizeof(*args));
1393 args = mcs.args;
1394 args->op.arg2.vcpumask = to_cpumask(args->mask);
1395
1396 /* Remove us, and any offline CPUS. */
1397 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1398 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001399
Alex Shie7b52ff2012-06-28 09:02:17 +08001400 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
Alex Shice7184b2012-08-24 08:55:13 +00001401 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001402 args->op.cmd = MMUEXT_INVLPG_MULTI;
Alex Shie7b52ff2012-06-28 09:02:17 +08001403 args->op.arg1.linear_addr = start;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001404 }
1405
1406 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1407
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001408 xen_mc_issue(PARAVIRT_LAZY_MMU);
1409}
1410
1411static unsigned long xen_read_cr3(void)
1412{
Alex Shi2113f462012-01-13 23:53:35 +08001413 return this_cpu_read(xen_cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001414}
1415
1416static void set_current_cr3(void *v)
1417{
Alex Shi2113f462012-01-13 23:53:35 +08001418 this_cpu_write(xen_current_cr3, (unsigned long)v);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001419}
1420
1421static void __xen_write_cr3(bool kernel, unsigned long cr3)
1422{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001423 struct mmuext_op op;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001424 unsigned long mfn;
1425
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001426 trace_xen_mmu_write_cr3(kernel, cr3);
1427
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001428 if (cr3)
1429 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1430 else
1431 mfn = 0;
1432
1433 WARN_ON(mfn == 0 && kernel);
1434
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001435 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1436 op.arg1.mfn = mfn;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001437
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001438 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001439
1440 if (kernel) {
Alex Shi2113f462012-01-13 23:53:35 +08001441 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001442
1443 /* Update xen_current_cr3 once the batch has actually
1444 been submitted. */
1445 xen_mc_callback(set_current_cr3, (void *)cr3);
1446 }
1447}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001448static void xen_write_cr3(unsigned long cr3)
1449{
1450 BUG_ON(preemptible());
1451
1452 xen_mc_batch(); /* disables interrupts */
1453
1454 /* Update while interrupts are disabled, so its atomic with
1455 respect to ipis */
Alex Shi2113f462012-01-13 23:53:35 +08001456 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001457
1458 __xen_write_cr3(true, cr3);
1459
1460#ifdef CONFIG_X86_64
1461 {
1462 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1463 if (user_pgd)
1464 __xen_write_cr3(false, __pa(user_pgd));
1465 else
1466 __xen_write_cr3(false, 0);
1467 }
1468#endif
1469
1470 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1471}
1472
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001473#ifdef CONFIG_X86_64
1474/*
1475 * At the start of the day - when Xen launches a guest, it has already
1476 * built pagetables for the guest. We diligently look over them
Adam Buchbinder6a6256f2016-02-23 15:34:30 -08001477 * in xen_setup_kernel_pagetable and graft as appropriate them in the
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001478 * init_level4_pgt and its friends. Then when we are happy we load
1479 * the new init_level4_pgt - and continue on.
1480 *
1481 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1482 * up the rest of the pagetables. When it has completed it loads the cr3.
1483 * N.B. that baremetal would start at 'start_kernel' (and the early
1484 * #PF handler would create bootstrap pagetables) - so we are running
1485 * with the same assumptions as what to do when write_cr3 is executed
1486 * at this point.
1487 *
1488 * Since there are no user-page tables at all, we have two variants
1489 * of xen_write_cr3 - the early bootup (this one), and the late one
1490 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1491 * the Linux kernel and user-space are both in ring 3 while the
1492 * hypervisor is in ring 0.
1493 */
1494static void __init xen_write_cr3_init(unsigned long cr3)
1495{
1496 BUG_ON(preemptible());
1497
1498 xen_mc_batch(); /* disables interrupts */
1499
1500 /* Update while interrupts are disabled, so its atomic with
1501 respect to ipis */
1502 this_cpu_write(xen_cr3, cr3);
1503
1504 __xen_write_cr3(true, cr3);
1505
1506 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001507}
1508#endif
1509
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001510static int xen_pgd_alloc(struct mm_struct *mm)
1511{
1512 pgd_t *pgd = mm->pgd;
1513 int ret = 0;
1514
1515 BUG_ON(PagePinned(virt_to_page(pgd)));
1516
1517#ifdef CONFIG_X86_64
1518 {
1519 struct page *page = virt_to_page(pgd);
1520 pgd_t *user_pgd;
1521
1522 BUG_ON(page->private != 0);
1523
1524 ret = -ENOMEM;
1525
1526 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1527 page->private = (unsigned long)user_pgd;
1528
1529 if (user_pgd != NULL) {
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07001530#ifdef CONFIG_X86_VSYSCALL_EMULATION
Andy Lutomirskif40c3302014-05-05 12:19:36 -07001531 user_pgd[pgd_index(VSYSCALL_ADDR)] =
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001532 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07001533#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001534 ret = 0;
1535 }
1536
1537 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1538 }
1539#endif
1540
1541 return ret;
1542}
1543
1544static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1545{
1546#ifdef CONFIG_X86_64
1547 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1548
1549 if (user_pgd)
1550 free_page((unsigned long)user_pgd);
1551#endif
1552}
1553
Stefano Stabelliniee176452011-04-19 14:47:31 +01001554#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001555static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001556{
1557 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1558 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1559 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1560 pte_val_ma(pte));
Stefano Stabelliniee176452011-04-19 14:47:31 +01001561
1562 return pte;
1563}
1564#else /* CONFIG_X86_64 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001565static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Stefano Stabelliniee176452011-04-19 14:47:31 +01001566{
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001567 unsigned long pfn;
1568
1569 if (xen_feature(XENFEAT_writable_page_tables) ||
1570 xen_feature(XENFEAT_auto_translated_physmap) ||
1571 xen_start_info->mfn_list >= __START_KERNEL_map)
1572 return pte;
1573
1574 /*
1575 * Pages belonging to the initial p2m list mapped outside the default
1576 * address range must be mapped read-only. This region contains the
1577 * page tables for mapping the p2m list, too, and page tables MUST be
1578 * mapped read-only.
1579 */
1580 pfn = pte_pfn(pte);
1581 if (pfn >= xen_start_info->first_p2m_pfn &&
1582 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1583 pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
1584
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001585 return pte;
1586}
Stefano Stabelliniee176452011-04-19 14:47:31 +01001587#endif /* CONFIG_X86_64 */
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001588
David Vrabeld095d432012-07-09 11:39:05 +01001589/*
1590 * Init-time set_pte while constructing initial pagetables, which
1591 * doesn't allow RO page table pages to be remapped RW.
1592 *
David Vrabel66a27dd2012-07-09 11:39:06 +01001593 * If there is no MFN for this PFN then this page is initially
1594 * ballooned out so clear the PTE (as in decrease_reservation() in
1595 * drivers/xen/balloon.c).
1596 *
David Vrabeld095d432012-07-09 11:39:05 +01001597 * Many of these PTE updates are done on unpinned and writable pages
1598 * and doing a hypercall for these is unnecessary and expensive. At
1599 * this point it is not possible to tell if a page is pinned or not,
1600 * so always write the PTE directly and rely on Xen trapping and
1601 * emulating any updates as necessary.
1602 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001603static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001604{
David Vrabel66a27dd2012-07-09 11:39:06 +01001605 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1606 pte = mask_rw_pte(ptep, pte);
1607 else
1608 pte = __pte_ma(0);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001609
David Vrabeld095d432012-07-09 11:39:05 +01001610 native_set_pte(ptep, pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001611}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001612
1613/* Early in boot, while setting up the initial pagetable, assume
1614 everything is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001615static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001616{
1617#ifdef CONFIG_FLATMEM
1618 BUG_ON(mem_map); /* should only be used early */
1619#endif
1620 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001621 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1622}
1623
1624/* Used for pmd and pud */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001625static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001626{
1627#ifdef CONFIG_FLATMEM
1628 BUG_ON(mem_map); /* should only be used early */
1629#endif
1630 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001631}
1632
1633/* Early release_pte assumes that all pts are pinned, since there's
1634 only init_mm and anything attached to that is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001635static void __init xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001636{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001637 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001638 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1639}
1640
Daniel Kiper3f5089532011-05-12 17:19:53 -04001641static void __init xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001642{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001643 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001644}
1645
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001646static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1647{
1648 struct multicall_space mcs;
1649 struct mmuext_op *op;
1650
1651 mcs = __xen_mc_entry(sizeof(*op));
1652 op = mcs.args;
1653 op->cmd = cmd;
1654 op->arg1.mfn = pfn_to_mfn(pfn);
1655
1656 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1657}
1658
1659static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1660{
1661 struct multicall_space mcs;
1662 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1663
1664 mcs = __xen_mc_entry(0);
1665 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1666 pfn_pte(pfn, prot), 0);
1667}
1668
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001669/* This needs to make sure the new pte page is pinned iff its being
1670 attached to a pinned pagetable. */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001671static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1672 unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001673{
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001674 bool pinned = PagePinned(virt_to_page(mm->pgd));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001675
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001676 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001677
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001678 if (pinned) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001679 struct page *page = pfn_to_page(pfn);
1680
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001681 SetPagePinned(page);
1682
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001683 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001684 xen_mc_batch();
1685
1686 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1687
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001688 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001689 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1690
1691 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001692 } else {
1693 /* make sure there are no stray mappings of
1694 this page */
1695 kmap_flush_unused();
1696 }
1697 }
1698}
1699
1700static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1701{
1702 xen_alloc_ptpage(mm, pfn, PT_PTE);
1703}
1704
1705static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1706{
1707 xen_alloc_ptpage(mm, pfn, PT_PMD);
1708}
1709
1710/* This should never happen until we're OK to use struct page */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001711static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001712{
1713 struct page *page = pfn_to_page(pfn);
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001714 bool pinned = PagePinned(page);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001715
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001716 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1717
1718 if (pinned) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001719 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001720 xen_mc_batch();
1721
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001722 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001723 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1724
1725 __set_pfn_prot(pfn, PAGE_KERNEL);
1726
1727 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001728 }
1729 ClearPagePinned(page);
1730 }
1731}
1732
1733static void xen_release_pte(unsigned long pfn)
1734{
1735 xen_release_ptpage(pfn, PT_PTE);
1736}
1737
1738static void xen_release_pmd(unsigned long pfn)
1739{
1740 xen_release_ptpage(pfn, PT_PMD);
1741}
1742
Kirill A. Shutemov98233362015-04-14 15:46:14 -07001743#if CONFIG_PGTABLE_LEVELS == 4
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001744static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1745{
1746 xen_alloc_ptpage(mm, pfn, PT_PUD);
1747}
1748
1749static void xen_release_pud(unsigned long pfn)
1750{
1751 xen_release_ptpage(pfn, PT_PUD);
1752}
1753#endif
1754
1755void __init xen_reserve_top(void)
1756{
1757#ifdef CONFIG_X86_32
1758 unsigned long top = HYPERVISOR_VIRT_START;
1759 struct xen_platform_parameters pp;
1760
1761 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1762 top = pp.virt_start;
1763
1764 reserve_top_address(-top);
1765#endif /* CONFIG_X86_32 */
1766}
1767
1768/*
1769 * Like __va(), but returns address in the kernel mapping (which is
1770 * all we have until the physical memory mapping has been set up.
1771 */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001772static void * __init __ka(phys_addr_t paddr)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001773{
1774#ifdef CONFIG_X86_64
1775 return (void *)(paddr + __START_KERNEL_map);
1776#else
1777 return __va(paddr);
1778#endif
1779}
1780
1781/* Convert a machine address to physical address */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001782static unsigned long __init m2p(phys_addr_t maddr)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001783{
1784 phys_addr_t paddr;
1785
1786 maddr &= PTE_PFN_MASK;
1787 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1788
1789 return paddr;
1790}
1791
1792/* Convert a machine address to kernel virtual */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001793static void * __init m2v(phys_addr_t maddr)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001794{
1795 return __ka(m2p(maddr));
1796}
1797
Juan Quintela4ec53872010-09-02 15:45:43 +01001798/* Set the page permissions on an identity-mapped pages */
Juergen Grossbf9d8342015-01-28 07:44:24 +01001799static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1800 unsigned long flags)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001801{
1802 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1803 pte_t pte = pfn_pte(pfn, prot);
1804
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001805 /* For PVH no need to set R/O or R/W to pin them or unpin them. */
1806 if (xen_feature(XENFEAT_auto_translated_physmap))
1807 return;
1808
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001809 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001810 BUG();
1811}
Juergen Grossbf9d8342015-01-28 07:44:24 +01001812static void __init set_page_prot(void *addr, pgprot_t prot)
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001813{
1814 return set_page_prot_flags(addr, prot, UVMF_NONE);
1815}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001816#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001817static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001818{
1819 unsigned pmdidx, pteidx;
1820 unsigned ident_pte;
1821 unsigned long pfn;
1822
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001823 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1824 PAGE_SIZE);
1825
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001826 ident_pte = 0;
1827 pfn = 0;
1828 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1829 pte_t *pte_page;
1830
1831 /* Reuse or allocate a page of ptes */
1832 if (pmd_present(pmd[pmdidx]))
1833 pte_page = m2v(pmd[pmdidx].pmd);
1834 else {
1835 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001836 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001837 break;
1838
1839 pte_page = &level1_ident_pgt[ident_pte];
1840 ident_pte += PTRS_PER_PTE;
1841
1842 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1843 }
1844
1845 /* Install mappings */
1846 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1847 pte_t pte;
1848
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001849 if (pfn > max_pfn_mapped)
1850 max_pfn_mapped = pfn;
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001851
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001852 if (!pte_none(pte_page[pteidx]))
1853 continue;
1854
1855 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1856 pte_page[pteidx] = pte;
1857 }
1858 }
1859
1860 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1861 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1862
1863 set_page_prot(pmd, PAGE_KERNEL_RO);
1864}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001865#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001866void __init xen_setup_machphys_mapping(void)
1867{
1868 struct xen_machphys_mapping mapping;
Ian Campbell7e775062010-09-30 12:37:26 +01001869
1870 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1871 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
Jan Beulichccbcdf72011-08-16 15:07:41 +01001872 machine_to_phys_nr = mapping.max_mfn + 1;
Ian Campbell7e775062010-09-30 12:37:26 +01001873 } else {
Jan Beulichccbcdf72011-08-16 15:07:41 +01001874 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
Ian Campbell7e775062010-09-30 12:37:26 +01001875 }
Jan Beulichccbcdf72011-08-16 15:07:41 +01001876#ifdef CONFIG_X86_32
Jan Beulich61cca2f2011-09-15 08:52:40 +01001877 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1878 < machine_to_phys_mapping);
Jan Beulichccbcdf72011-08-16 15:07:41 +01001879#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001880}
1881
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001882#ifdef CONFIG_X86_64
Juergen Grossbf9d8342015-01-28 07:44:24 +01001883static void __init convert_pfn_mfn(void *v)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001884{
1885 pte_t *pte = v;
1886 int i;
1887
1888 /* All levels are converted the same way, so just treat them
1889 as ptes. */
1890 for (i = 0; i < PTRS_PER_PTE; i++)
1891 pte[i] = xen_make_pte(pte[i].pte);
1892}
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001893static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1894 unsigned long addr)
1895{
1896 if (*pt_base == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001897 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001898 clear_page((void *)addr);
1899 (*pt_base)++;
1900 }
1901 if (*pt_end == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001902 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001903 clear_page((void *)addr);
1904 (*pt_end)--;
1905 }
1906}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001907/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001908 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001909 *
1910 * We can construct this by grafting the Xen provided pagetable into
1911 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
Stefan Bader0b5a5062014-09-02 11:16:01 +01001912 * level2_ident_pgt, and level2_kernel_pgt. This means that only the
1913 * kernel has a physical mapping to start with - but that's enough to
1914 * get __va working. We need to fill in the rest of the physical
1915 * mapping once some sort of allocator has been set up. NOTE: for
1916 * PVH, the page tables are native.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001917 */
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04001918void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001919{
1920 pud_t *l3;
1921 pmd_t *l2;
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001922 unsigned long addr[3];
1923 unsigned long pt_base, pt_end;
1924 unsigned i;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001925
Stefano Stabellini14988a42011-02-18 11:32:40 +00001926 /* max_pfn_mapped is the last pfn mapped in the initial memory
1927 * mappings. Considering that on Xen after the kernel mappings we
1928 * have the mappings of some pages that don't exist in pfn space, we
1929 * set max_pfn_mapped to the last real pfn mapped. */
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001930 if (xen_start_info->mfn_list < __START_KERNEL_map)
1931 max_pfn_mapped = xen_start_info->first_p2m_pfn;
1932 else
1933 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
Stefano Stabellini14988a42011-02-18 11:32:40 +00001934
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001935 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1936 pt_end = pt_base + xen_start_info->nr_pt_frames;
1937
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001938 /* Zap identity mapping */
1939 init_level4_pgt[0] = __pgd(0);
1940
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001941 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1942 /* Pre-constructed entries are in pfn, so convert to mfn */
1943 /* L4[272] -> level3_ident_pgt
1944 * L4[511] -> level3_kernel_pgt */
1945 convert_pfn_mfn(init_level4_pgt);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001946
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001947 /* L3_i[0] -> level2_ident_pgt */
1948 convert_pfn_mfn(level3_ident_pgt);
1949 /* L3_k[510] -> level2_kernel_pgt
Stefan Bader0b5a5062014-09-02 11:16:01 +01001950 * L3_k[511] -> level2_fixmap_pgt */
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001951 convert_pfn_mfn(level3_kernel_pgt);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001952
1953 /* L3_k[511][506] -> level1_fixmap_pgt */
1954 convert_pfn_mfn(level2_fixmap_pgt);
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001955 }
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001956 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001957 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1958 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1959
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001960 addr[0] = (unsigned long)pgd;
1961 addr[1] = (unsigned long)l3;
1962 addr[2] = (unsigned long)l2;
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001963 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
Stefan Bader0b5a5062014-09-02 11:16:01 +01001964 * Both L4[272][0] and L4[511][510] have entries that point to the same
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001965 * L2 (PMD) tables. Meaning that if you modify it in __va space
1966 * it will be also modified in the __ka space! (But if you just
1967 * modify the PMD table to point to other PTE's or none, then you
1968 * are OK - which is what cleanup_highmap does) */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001969 copy_page(level2_ident_pgt, l2);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001970 /* Graft it onto L4[511][510] */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001971 copy_page(level2_kernel_pgt, l2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001972
Juergen Gross8f5b0c62015-07-17 06:51:25 +02001973 /* Copy the initial P->M table mappings if necessary. */
1974 i = pgd_index(xen_start_info->mfn_list);
1975 if (i && i < pgd_index(__START_KERNEL_map))
1976 init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
1977
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001978 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1979 /* Make pagetable pieces RO */
1980 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1981 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1982 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1983 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1984 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1985 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1986 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001987 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001988
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001989 /* Pin down new L4 */
1990 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1991 PFN_DOWN(__pa_symbol(init_level4_pgt)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001992
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001993 /* Unpin Xen-provided one */
1994 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001995
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001996 /*
1997 * At this stage there can be no user pgd, and no page
1998 * structure to attach it to, so make sure we just set kernel
1999 * pgd.
2000 */
2001 xen_mc_batch();
2002 __xen_write_cr3(true, __pa(init_level4_pgt));
2003 xen_mc_issue(PARAVIRT_LAZY_CPU);
2004 } else
2005 native_write_cr3(__pa(init_level4_pgt));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002006
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04002007 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
2008 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
2009 * the initial domain. For guests using the toolstack, they are in:
2010 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
2011 * rip out the [L4] (pgd), but for guests we shave off three pages.
2012 */
2013 for (i = 0; i < ARRAY_SIZE(addr); i++)
2014 check_pt_base(&pt_base, &pt_end, addr[i]);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002015
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04002016 /* Our (by three pages) smaller Xen pagetable that we are using */
Juergen Gross04414ba2015-07-17 06:51:31 +02002017 xen_pt_base = PFN_PHYS(pt_base);
2018 xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
2019 memblock_reserve(xen_pt_base, xen_pt_size);
Juergen Gross70e61192015-07-17 06:51:35 +02002020
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04002021 /* Revector the xen_start_info */
2022 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002023}
Juergen Gross70e61192015-07-17 06:51:35 +02002024
2025/*
2026 * Read a value from a physical address.
2027 */
2028static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
2029{
2030 unsigned long *vaddr;
2031 unsigned long val;
2032
2033 vaddr = early_memremap_ro(addr, sizeof(val));
2034 val = *vaddr;
2035 early_memunmap(vaddr, sizeof(val));
2036 return val;
2037}
2038
2039/*
2040 * Translate a virtual address to a physical one without relying on mapped
2041 * page tables.
2042 */
2043static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2044{
2045 phys_addr_t pa;
2046 pgd_t pgd;
2047 pud_t pud;
2048 pmd_t pmd;
2049 pte_t pte;
2050
2051 pa = read_cr3();
2052 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
2053 sizeof(pgd)));
2054 if (!pgd_present(pgd))
2055 return 0;
2056
2057 pa = pgd_val(pgd) & PTE_PFN_MASK;
2058 pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
2059 sizeof(pud)));
2060 if (!pud_present(pud))
2061 return 0;
2062 pa = pud_pfn(pud) << PAGE_SHIFT;
2063 if (pud_large(pud))
2064 return pa + (vaddr & ~PUD_MASK);
2065
2066 pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
2067 sizeof(pmd)));
2068 if (!pmd_present(pmd))
2069 return 0;
2070 pa = pmd_pfn(pmd) << PAGE_SHIFT;
2071 if (pmd_large(pmd))
2072 return pa + (vaddr & ~PMD_MASK);
2073
2074 pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
2075 sizeof(pte)));
2076 if (!pte_present(pte))
2077 return 0;
2078 pa = pte_pfn(pte) << PAGE_SHIFT;
2079
2080 return pa | (vaddr & ~PAGE_MASK);
2081}
2082
2083/*
2084 * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
2085 * this area.
2086 */
2087void __init xen_relocate_p2m(void)
2088{
2089 phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
2090 unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
2091 int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
2092 pte_t *pt;
2093 pmd_t *pmd;
2094 pud_t *pud;
2095 pgd_t *pgd;
2096 unsigned long *new_p2m;
2097
2098 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
2099 n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
2100 n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
2101 n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
2102 n_pud = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
2103 n_frames = n_pte + n_pt + n_pmd + n_pud;
2104
2105 new_area = xen_find_free_area(PFN_PHYS(n_frames));
2106 if (!new_area) {
2107 xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
2108 BUG();
2109 }
2110
2111 /*
2112 * Setup the page tables for addressing the new p2m list.
2113 * We have asked the hypervisor to map the p2m list at the user address
2114 * PUD_SIZE. It may have done so, or it may have used a kernel space
2115 * address depending on the Xen version.
2116 * To avoid any possible virtual address collision, just use
2117 * 2 * PUD_SIZE for the new area.
2118 */
2119 pud_phys = new_area;
2120 pmd_phys = pud_phys + PFN_PHYS(n_pud);
2121 pt_phys = pmd_phys + PFN_PHYS(n_pmd);
2122 p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
2123
2124 pgd = __va(read_cr3());
2125 new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
2126 for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
2127 pud = early_memremap(pud_phys, PAGE_SIZE);
2128 clear_page(pud);
2129 for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
2130 idx_pmd++) {
2131 pmd = early_memremap(pmd_phys, PAGE_SIZE);
2132 clear_page(pmd);
2133 for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
2134 idx_pt++) {
2135 pt = early_memremap(pt_phys, PAGE_SIZE);
2136 clear_page(pt);
2137 for (idx_pte = 0;
2138 idx_pte < min(n_pte, PTRS_PER_PTE);
2139 idx_pte++) {
2140 set_pte(pt + idx_pte,
2141 pfn_pte(p2m_pfn, PAGE_KERNEL));
2142 p2m_pfn++;
2143 }
2144 n_pte -= PTRS_PER_PTE;
2145 early_memunmap(pt, PAGE_SIZE);
2146 make_lowmem_page_readonly(__va(pt_phys));
2147 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
2148 PFN_DOWN(pt_phys));
2149 set_pmd(pmd + idx_pt,
2150 __pmd(_PAGE_TABLE | pt_phys));
2151 pt_phys += PAGE_SIZE;
2152 }
2153 n_pt -= PTRS_PER_PMD;
2154 early_memunmap(pmd, PAGE_SIZE);
2155 make_lowmem_page_readonly(__va(pmd_phys));
2156 pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
2157 PFN_DOWN(pmd_phys));
2158 set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
2159 pmd_phys += PAGE_SIZE;
2160 }
2161 n_pmd -= PTRS_PER_PUD;
2162 early_memunmap(pud, PAGE_SIZE);
2163 make_lowmem_page_readonly(__va(pud_phys));
2164 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
2165 set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
2166 pud_phys += PAGE_SIZE;
2167 }
2168
2169 /* Now copy the old p2m info to the new area. */
2170 memcpy(new_p2m, xen_p2m_addr, size);
2171 xen_p2m_addr = new_p2m;
2172
2173 /* Release the old p2m list and set new list info. */
2174 p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
2175 BUG_ON(!p2m_pfn);
2176 p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
2177
2178 if (xen_start_info->mfn_list < __START_KERNEL_map) {
2179 pfn = xen_start_info->first_p2m_pfn;
2180 pfn_end = xen_start_info->first_p2m_pfn +
2181 xen_start_info->nr_p2m_frames;
2182 set_pgd(pgd + 1, __pgd(0));
2183 } else {
2184 pfn = p2m_pfn;
2185 pfn_end = p2m_pfn_end;
2186 }
2187
2188 memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
2189 while (pfn < pfn_end) {
2190 if (pfn == p2m_pfn) {
2191 pfn = p2m_pfn_end;
2192 continue;
2193 }
2194 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
2195 pfn++;
2196 }
2197
2198 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
2199 xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
2200 xen_start_info->nr_p2m_frames = n_frames;
2201}
2202
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002203#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002204static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2205static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2206
Daniel Kiper3f5089532011-05-12 17:19:53 -04002207static void __init xen_write_cr3_init(unsigned long cr3)
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002208{
2209 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2210
2211 BUG_ON(read_cr3() != __pa(initial_page_table));
2212 BUG_ON(cr3 != __pa(swapper_pg_dir));
2213
2214 /*
2215 * We are switching to swapper_pg_dir for the first time (from
2216 * initial_page_table) and therefore need to mark that page
2217 * read-only and then pin it.
2218 *
2219 * Xen disallows sharing of kernel PMDs for PAE
2220 * guests. Therefore we must copy the kernel PMD from
2221 * initial_page_table into a new kernel PMD to be used in
2222 * swapper_pg_dir.
2223 */
2224 swapper_kernel_pmd =
2225 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002226 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002227 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2228 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2229 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2230
2231 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2232 xen_write_cr3(cr3);
2233 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2234
2235 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2236 PFN_DOWN(__pa(initial_page_table)));
2237 set_page_prot(initial_page_table, PAGE_KERNEL);
2238 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2239
2240 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2241}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002242
Juergen Gross70e61192015-07-17 06:51:35 +02002243/*
2244 * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
2245 * not the first page table in the page table pool.
2246 * Iterate through the initial page tables to find the real page table base.
2247 */
2248static phys_addr_t xen_find_pt_base(pmd_t *pmd)
2249{
2250 phys_addr_t pt_base, paddr;
2251 unsigned pmdidx;
2252
2253 pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
2254
2255 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
2256 if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
2257 paddr = m2p(pmd[pmdidx].pmd);
2258 pt_base = min(pt_base, paddr);
2259 }
2260
2261 return pt_base;
2262}
2263
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04002264void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002265{
2266 pmd_t *kernel_pmd;
2267
Juergen Gross70e61192015-07-17 06:51:35 +02002268 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2269
2270 xen_pt_base = xen_find_pt_base(kernel_pmd);
2271 xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
2272
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002273 initial_kernel_pmd =
2274 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07002275
Juergen Gross70e61192015-07-17 06:51:35 +02002276 max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002277
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002278 copy_page(initial_kernel_pmd, kernel_pmd);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002279
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002280 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002281
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04002282 copy_page(initial_page_table, pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002283 initial_page_table[KERNEL_PGD_BOUNDARY] =
2284 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002285
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002286 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2287 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002288 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2289
2290 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2291
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002292 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2293 PFN_DOWN(__pa(initial_page_table)));
2294 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002295
Juergen Gross04414ba2015-07-17 06:51:31 +02002296 memblock_reserve(xen_pt_base, xen_pt_size);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002297}
2298#endif /* CONFIG_X86_64 */
2299
Juergen Gross6c2681c2015-07-17 06:51:34 +02002300void __init xen_reserve_special_pages(void)
2301{
2302 phys_addr_t paddr;
2303
2304 memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
2305 if (xen_start_info->store_mfn) {
2306 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
2307 memblock_reserve(paddr, PAGE_SIZE);
2308 }
2309 if (!xen_initial_domain()) {
2310 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
2311 memblock_reserve(paddr, PAGE_SIZE);
2312 }
2313}
2314
Juergen Gross04414ba2015-07-17 06:51:31 +02002315void __init xen_pt_check_e820(void)
2316{
2317 if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
2318 xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
2319 BUG();
2320 }
2321}
2322
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002323static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2324
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07002325static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002326{
2327 pte_t pte;
2328
2329 phys >>= PAGE_SHIFT;
2330
2331 switch (idx) {
2332 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
Kees Cook4eefbe72013-04-10 12:24:22 -07002333 case FIX_RO_IDT:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002334#ifdef CONFIG_X86_32
2335 case FIX_WP_TEST:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002336# ifdef CONFIG_HIGHMEM
2337 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2338# endif
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07002339#elif defined(CONFIG_X86_VSYSCALL_EMULATION)
Andy Lutomirskif40c3302014-05-05 12:19:36 -07002340 case VSYSCALL_PAGE:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002341#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08002342 case FIX_TEXT_POKE0:
2343 case FIX_TEXT_POKE1:
2344 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002345 pte = pfn_pte(phys, prot);
2346 break;
2347
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002348#ifdef CONFIG_X86_LOCAL_APIC
2349 case FIX_APIC_BASE: /* maps dummy local APIC */
2350 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2351 break;
2352#endif
2353
2354#ifdef CONFIG_X86_IO_APIC
2355 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2356 /*
2357 * We just don't map the IO APIC - all access is via
2358 * hypercalls. Keep the address in the pte for reference.
2359 */
Konrad Rzeszutek Wilk27abd142012-04-16 13:53:40 -04002360 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002361 break;
2362#endif
2363
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002364 case FIX_PARAVIRT_BOOTMAP:
2365 /* This is an MFN, but it isn't an IO mapping from the
2366 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002367 pte = mfn_pte(phys, prot);
2368 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002369
2370 default:
2371 /* By default, set_fixmap is used for hardware mappings */
David Vrabel7f2f8822014-01-08 14:01:01 +00002372 pte = mfn_pte(phys, prot);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002373 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002374 }
2375
2376 __native_set_fixmap(idx, pte);
2377
Andy Lutomirski1ad83c82014-10-29 14:33:47 -07002378#ifdef CONFIG_X86_VSYSCALL_EMULATION
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002379 /* Replicate changes to map the vsyscall page into the user
2380 pagetable vsyscall mapping. */
Andy Lutomirskif40c3302014-05-05 12:19:36 -07002381 if (idx == VSYSCALL_PAGE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002382 unsigned long vaddr = __fix_to_virt(idx);
2383 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2384 }
2385#endif
2386}
2387
Daniel Kiper3f5089532011-05-12 17:19:53 -04002388static void __init xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002389{
Mukesh Rathor4e44e442013-12-31 12:41:27 -05002390 if (xen_feature(XENFEAT_auto_translated_physmap))
2391 return;
2392
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002393 pv_mmu_ops.set_pte = xen_set_pte;
2394 pv_mmu_ops.set_pmd = xen_set_pmd;
2395 pv_mmu_ops.set_pud = xen_set_pud;
Kirill A. Shutemov98233362015-04-14 15:46:14 -07002396#if CONFIG_PGTABLE_LEVELS == 4
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002397 pv_mmu_ops.set_pgd = xen_set_pgd;
2398#endif
2399
2400 /* This will work as long as patching hasn't happened yet
2401 (which it hasn't) */
2402 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2403 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2404 pv_mmu_ops.release_pte = xen_release_pte;
2405 pv_mmu_ops.release_pmd = xen_release_pmd;
Kirill A. Shutemov98233362015-04-14 15:46:14 -07002406#if CONFIG_PGTABLE_LEVELS == 4
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002407 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2408 pv_mmu_ops.release_pud = xen_release_pud;
2409#endif
2410
2411#ifdef CONFIG_X86_64
Konrad Rzeszutek Wilkd3eb2c82013-03-22 10:34:28 -04002412 pv_mmu_ops.write_cr3 = &xen_write_cr3;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002413 SetPagePinned(virt_to_page(level3_user_vsyscall));
2414#endif
2415 xen_mark_init_mm_pinned();
2416}
2417
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002418static void xen_leave_lazy_mmu(void)
2419{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002420 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002421 xen_mc_flush();
2422 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002423 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002424}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002425
Daniel Kiper3f5089532011-05-12 17:19:53 -04002426static const struct pv_mmu_ops xen_mmu_ops __initconst = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002427 .read_cr2 = xen_read_cr2,
2428 .write_cr2 = xen_write_cr2,
2429
2430 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002431 .write_cr3 = xen_write_cr3_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002432
2433 .flush_tlb_user = xen_flush_tlb,
2434 .flush_tlb_kernel = xen_flush_tlb,
2435 .flush_tlb_single = xen_flush_tlb_single,
2436 .flush_tlb_others = xen_flush_tlb_others,
2437
2438 .pte_update = paravirt_nop,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002439
2440 .pgd_alloc = xen_pgd_alloc,
2441 .pgd_free = xen_pgd_free,
2442
2443 .alloc_pte = xen_alloc_pte_init,
2444 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002445 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002446 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002447
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002448 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002449 .set_pte_at = xen_set_pte_at,
2450 .set_pmd = xen_set_pmd_hyper,
2451
2452 .ptep_modify_prot_start = __ptep_modify_prot_start,
2453 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2454
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002455 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2456 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002457
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002458 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2459 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002460
2461#ifdef CONFIG_X86_PAE
2462 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002463 .pte_clear = xen_pte_clear,
2464 .pmd_clear = xen_pmd_clear,
2465#endif /* CONFIG_X86_PAE */
2466 .set_pud = xen_set_pud_hyper,
2467
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002468 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2469 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002470
Kirill A. Shutemov98233362015-04-14 15:46:14 -07002471#if CONFIG_PGTABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002472 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2473 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002474 .set_pgd = xen_set_pgd_hyper,
2475
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002476 .alloc_pud = xen_alloc_pmd_init,
2477 .release_pud = xen_release_pmd_init,
Kirill A. Shutemov98233362015-04-14 15:46:14 -07002478#endif /* CONFIG_PGTABLE_LEVELS == 4 */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002479
2480 .activate_mm = xen_activate_mm,
2481 .dup_mmap = xen_dup_mmap,
2482 .exit_mmap = xen_exit_mmap,
2483
2484 .lazy_mode = {
2485 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002486 .leave = xen_leave_lazy_mmu,
Boris Ostrovsky511ba862013-03-23 09:36:36 -04002487 .flush = paravirt_flush_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002488 },
2489
2490 .set_fixmap = xen_set_fixmap,
2491};
2492
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002493void __init xen_init_mmu_ops(void)
2494{
Attilio Rao7737b212012-08-21 21:22:38 +01002495 x86_init.paging.pagetable_init = xen_pagetable_init;
Mukesh Rathor76bccef2014-01-03 09:48:08 -05002496
Boris Ostrovsky20f36e02015-12-12 19:25:55 -05002497 if (xen_feature(XENFEAT_auto_translated_physmap))
Mukesh Rathor76bccef2014-01-03 09:48:08 -05002498 return;
Boris Ostrovsky20f36e02015-12-12 19:25:55 -05002499
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002500 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002501
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002502 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002503}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002504
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002505/* Protected by xen_reservation_lock. */
2506#define MAX_CONTIG_ORDER 9 /* 2MB */
2507static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2508
2509#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2510static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2511 unsigned long *in_frames,
2512 unsigned long *out_frames)
2513{
2514 int i;
2515 struct multicall_space mcs;
2516
2517 xen_mc_batch();
2518 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2519 mcs = __xen_mc_entry(0);
2520
2521 if (in_frames)
2522 in_frames[i] = virt_to_mfn(vaddr);
2523
2524 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002525 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002526
2527 if (out_frames)
2528 out_frames[i] = virt_to_pfn(vaddr);
2529 }
2530 xen_mc_issue(0);
2531}
2532
2533/*
2534 * Update the pfn-to-mfn mappings for a virtual address range, either to
2535 * point to an array of mfns, or contiguously from a single starting
2536 * mfn.
2537 */
2538static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2539 unsigned long *mfns,
2540 unsigned long first_mfn)
2541{
2542 unsigned i, limit;
2543 unsigned long mfn;
2544
2545 xen_mc_batch();
2546
2547 limit = 1u << order;
2548 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2549 struct multicall_space mcs;
2550 unsigned flags;
2551
2552 mcs = __xen_mc_entry(0);
2553 if (mfns)
2554 mfn = mfns[i];
2555 else
2556 mfn = first_mfn + i;
2557
2558 if (i < (limit - 1))
2559 flags = 0;
2560 else {
2561 if (order == 0)
2562 flags = UVMF_INVLPG | UVMF_ALL;
2563 else
2564 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2565 }
2566
2567 MULTI_update_va_mapping(mcs.mc, vaddr,
2568 mfn_pte(mfn, PAGE_KERNEL), flags);
2569
2570 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2571 }
2572
2573 xen_mc_issue(0);
2574}
2575
2576/*
2577 * Perform the hypercall to exchange a region of our pfns to point to
2578 * memory with the required contiguous alignment. Takes the pfns as
2579 * input, and populates mfns as output.
2580 *
2581 * Returns a success code indicating whether the hypervisor was able to
2582 * satisfy the request or not.
2583 */
2584static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2585 unsigned long *pfns_in,
2586 unsigned long extents_out,
2587 unsigned int order_out,
2588 unsigned long *mfns_out,
2589 unsigned int address_bits)
2590{
2591 long rc;
2592 int success;
2593
2594 struct xen_memory_exchange exchange = {
2595 .in = {
2596 .nr_extents = extents_in,
2597 .extent_order = order_in,
2598 .extent_start = pfns_in,
2599 .domid = DOMID_SELF
2600 },
2601 .out = {
2602 .nr_extents = extents_out,
2603 .extent_order = order_out,
2604 .extent_start = mfns_out,
2605 .address_bits = address_bits,
2606 .domid = DOMID_SELF
2607 }
2608 };
2609
2610 BUG_ON(extents_in << order_in != extents_out << order_out);
2611
2612 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2613 success = (exchange.nr_exchanged == extents_in);
2614
2615 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2616 BUG_ON(success && (rc != 0));
2617
2618 return success;
2619}
2620
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002621int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
Stefano Stabellini69908902013-10-09 16:56:32 +00002622 unsigned int address_bits,
2623 dma_addr_t *dma_handle)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002624{
2625 unsigned long *in_frames = discontig_frames, out_frame;
2626 unsigned long flags;
2627 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002628 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002629
2630 /*
2631 * Currently an auto-translated guest will not perform I/O, nor will
2632 * it require PAE page directories below 4GB. Therefore any calls to
2633 * this function are redundant and can be ignored.
2634 */
2635
2636 if (xen_feature(XENFEAT_auto_translated_physmap))
2637 return 0;
2638
2639 if (unlikely(order > MAX_CONTIG_ORDER))
2640 return -ENOMEM;
2641
2642 memset((void *) vstart, 0, PAGE_SIZE << order);
2643
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002644 spin_lock_irqsave(&xen_reservation_lock, flags);
2645
2646 /* 1. Zap current PTEs, remembering MFNs. */
2647 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2648
2649 /* 2. Get a new contiguous memory extent. */
2650 out_frame = virt_to_pfn(vstart);
2651 success = xen_exchange_memory(1UL << order, 0, in_frames,
2652 1, order, &out_frame,
2653 address_bits);
2654
2655 /* 3. Map the new extent in place of old pages. */
2656 if (success)
2657 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2658 else
2659 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2660
2661 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2662
Stefano Stabellini69908902013-10-09 16:56:32 +00002663 *dma_handle = virt_to_machine(vstart).maddr;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002664 return success ? 0 : -ENOMEM;
2665}
2666EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2667
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002668void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002669{
2670 unsigned long *out_frames = discontig_frames, in_frame;
2671 unsigned long flags;
2672 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002673 unsigned long vstart;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002674
2675 if (xen_feature(XENFEAT_auto_translated_physmap))
2676 return;
2677
2678 if (unlikely(order > MAX_CONTIG_ORDER))
2679 return;
2680
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002681 vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002682 memset((void *) vstart, 0, PAGE_SIZE << order);
2683
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002684 spin_lock_irqsave(&xen_reservation_lock, flags);
2685
2686 /* 1. Find start MFN of contiguous extent. */
2687 in_frame = virt_to_mfn(vstart);
2688
2689 /* 2. Zap current PTEs. */
2690 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2691
2692 /* 3. Do the exchange for non-contiguous MFNs. */
2693 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2694 0, out_frames, 0);
2695
2696 /* 4. Map new pages in place of old pages. */
2697 if (success)
2698 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2699 else
2700 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2701
2702 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2703}
2704EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2705
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002706#ifdef CONFIG_XEN_PVHVM
Olaf Hering34b6f012012-10-01 21:18:01 +02002707#ifdef CONFIG_PROC_VMCORE
2708/*
2709 * This function is used in two contexts:
2710 * - the kdump kernel has to check whether a pfn of the crashed kernel
2711 * was a ballooned page. vmcore is using this function to decide
2712 * whether to access a pfn of the crashed kernel.
2713 * - the kexec kernel has to check whether a pfn was ballooned by the
2714 * previous kernel. If the pfn is ballooned, handle it properly.
2715 * Returns 0 if the pfn is not backed by a RAM page, the caller may
2716 * handle the pfn special in this case.
2717 */
2718static int xen_oldmem_pfn_is_ram(unsigned long pfn)
2719{
2720 struct xen_hvm_get_mem_type a = {
2721 .domid = DOMID_SELF,
2722 .pfn = pfn,
2723 };
2724 int ram;
2725
2726 if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
2727 return -ENXIO;
2728
2729 switch (a.mem_type) {
2730 case HVMMEM_mmio_dm:
2731 ram = 0;
2732 break;
2733 case HVMMEM_ram_rw:
2734 case HVMMEM_ram_ro:
2735 default:
2736 ram = 1;
2737 break;
2738 }
2739
2740 return ram;
2741}
2742#endif
2743
Stefano Stabellini59151002010-06-17 14:22:52 +01002744static void xen_hvm_exit_mmap(struct mm_struct *mm)
2745{
2746 struct xen_hvm_pagetable_dying a;
2747 int rc;
2748
2749 a.domid = DOMID_SELF;
2750 a.gpa = __pa(mm->pgd);
2751 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2752 WARN_ON_ONCE(rc < 0);
2753}
2754
2755static int is_pagetable_dying_supported(void)
2756{
2757 struct xen_hvm_pagetable_dying a;
2758 int rc = 0;
2759
2760 a.domid = DOMID_SELF;
2761 a.gpa = 0x00;
2762 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2763 if (rc < 0) {
2764 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2765 return 0;
2766 }
2767 return 1;
2768}
2769
2770void __init xen_hvm_init_mmu_ops(void)
2771{
2772 if (is_pagetable_dying_supported())
2773 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
Olaf Hering34b6f012012-10-01 21:18:01 +02002774#ifdef CONFIG_PROC_VMCORE
2775 register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
2776#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002777}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002778#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002779
Ian Campbellde1ef202009-05-21 10:09:46 +01002780#define REMAP_BATCH_SIZE 16
2781
2782struct remap_data {
David Vrabel4e8c0c82015-03-11 14:49:57 +00002783 xen_pfn_t *mfn;
2784 bool contiguous;
Ian Campbellde1ef202009-05-21 10:09:46 +01002785 pgprot_t prot;
2786 struct mmu_update *mmu_update;
2787};
2788
2789static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2790 unsigned long addr, void *data)
2791{
2792 struct remap_data *rmd = data;
David Vrabel4e8c0c82015-03-11 14:49:57 +00002793 pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
2794
Adam Buchbinder6a6256f2016-02-23 15:34:30 -08002795 /* If we have a contiguous range, just update the mfn itself,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002796 else update pointer to be "next mfn". */
2797 if (rmd->contiguous)
2798 (*rmd->mfn)++;
2799 else
2800 rmd->mfn++;
Ian Campbellde1ef202009-05-21 10:09:46 +01002801
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -08002802 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002803 rmd->mmu_update->val = pte_val_ma(pte);
2804 rmd->mmu_update++;
2805
2806 return 0;
2807}
2808
Julien Gralla13d7202015-08-07 17:34:41 +01002809static int do_remap_gfn(struct vm_area_struct *vma,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002810 unsigned long addr,
Julien Gralla13d7202015-08-07 17:34:41 +01002811 xen_pfn_t *gfn, int nr,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002812 int *err_ptr, pgprot_t prot,
2813 unsigned domid,
2814 struct page **pages)
Ian Campbellde1ef202009-05-21 10:09:46 +01002815{
David Vrabel4e8c0c82015-03-11 14:49:57 +00002816 int err = 0;
Ian Campbellde1ef202009-05-21 10:09:46 +01002817 struct remap_data rmd;
2818 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
Ian Campbellde1ef202009-05-21 10:09:46 +01002819 unsigned long range;
David Vrabel4e8c0c82015-03-11 14:49:57 +00002820 int mapped = 0;
Ian Campbellde1ef202009-05-21 10:09:46 +01002821
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07002822 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002823
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002824 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2825#ifdef CONFIG_XEN_PVH
2826 /* We need to update the local page tables and the xen HAP */
Julien Gralla13d7202015-08-07 17:34:41 +01002827 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002828 prot, domid, pages);
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002829#else
2830 return -EINVAL;
2831#endif
2832 }
2833
Julien Gralla13d7202015-08-07 17:34:41 +01002834 rmd.mfn = gfn;
Ian Campbellde1ef202009-05-21 10:09:46 +01002835 rmd.prot = prot;
Adam Buchbinder6a6256f2016-02-23 15:34:30 -08002836 /* We use the err_ptr to indicate if there we are doing a contiguous
David Vrabel4e8c0c82015-03-11 14:49:57 +00002837 * mapping or a discontigious mapping. */
2838 rmd.contiguous = !err_ptr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002839
2840 while (nr) {
David Vrabel4e8c0c82015-03-11 14:49:57 +00002841 int index = 0;
2842 int done = 0;
2843 int batch = min(REMAP_BATCH_SIZE, nr);
2844 int batch_left = batch;
Ian Campbellde1ef202009-05-21 10:09:46 +01002845 range = (unsigned long)batch << PAGE_SHIFT;
2846
2847 rmd.mmu_update = mmu_update;
2848 err = apply_to_page_range(vma->vm_mm, addr, range,
2849 remap_area_mfn_pte_fn, &rmd);
2850 if (err)
2851 goto out;
2852
David Vrabel4e8c0c82015-03-11 14:49:57 +00002853 /* We record the error for each page that gives an error, but
2854 * continue mapping until the whole set is done */
2855 do {
2856 int i;
2857
2858 err = HYPERVISOR_mmu_update(&mmu_update[index],
2859 batch_left, &done, domid);
2860
2861 /*
Julien Gralla13d7202015-08-07 17:34:41 +01002862 * @err_ptr may be the same buffer as @gfn, so
2863 * only clear it after each chunk of @gfn is
David Vrabel4e8c0c82015-03-11 14:49:57 +00002864 * used.
2865 */
2866 if (err_ptr) {
2867 for (i = index; i < index + done; i++)
2868 err_ptr[i] = 0;
2869 }
2870 if (err < 0) {
2871 if (!err_ptr)
2872 goto out;
2873 err_ptr[i] = err;
2874 done++; /* Skip failed frame. */
2875 } else
2876 mapped += done;
2877 batch_left -= done;
2878 index += done;
2879 } while (batch_left);
Ian Campbellde1ef202009-05-21 10:09:46 +01002880
2881 nr -= batch;
2882 addr += range;
David Vrabel4e8c0c82015-03-11 14:49:57 +00002883 if (err_ptr)
2884 err_ptr += batch;
David Vrabel914beb92015-10-28 13:39:05 +00002885 cond_resched();
Ian Campbellde1ef202009-05-21 10:09:46 +01002886 }
Ian Campbellde1ef202009-05-21 10:09:46 +01002887out:
2888
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04002889 xen_flush_tlb_all();
Ian Campbellde1ef202009-05-21 10:09:46 +01002890
David Vrabel4e8c0c82015-03-11 14:49:57 +00002891 return err < 0 ? err : mapped;
2892}
2893
Julien Gralla13d7202015-08-07 17:34:41 +01002894int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002895 unsigned long addr,
Julien Gralla13d7202015-08-07 17:34:41 +01002896 xen_pfn_t gfn, int nr,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002897 pgprot_t prot, unsigned domid,
2898 struct page **pages)
2899{
Julien Gralla13d7202015-08-07 17:34:41 +01002900 return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
Ian Campbellde1ef202009-05-21 10:09:46 +01002901}
Julien Gralla13d7202015-08-07 17:34:41 +01002902EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
Ian Campbell9a032e32012-10-17 13:37:49 -07002903
Julien Gralla13d7202015-08-07 17:34:41 +01002904int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002905 unsigned long addr,
Julien Gralla13d7202015-08-07 17:34:41 +01002906 xen_pfn_t *gfn, int nr,
David Vrabel4e8c0c82015-03-11 14:49:57 +00002907 int *err_ptr, pgprot_t prot,
2908 unsigned domid, struct page **pages)
2909{
2910 /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
2911 * and the consequences later is quite hard to detect what the actual
2912 * cause of "wrong memory was mapped in".
2913 */
2914 BUG_ON(err_ptr == NULL);
Julien Gralla13d7202015-08-07 17:34:41 +01002915 return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
David Vrabel4e8c0c82015-03-11 14:49:57 +00002916}
Julien Gralla13d7202015-08-07 17:34:41 +01002917EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
David Vrabel4e8c0c82015-03-11 14:49:57 +00002918
2919
Ian Campbell9a032e32012-10-17 13:37:49 -07002920/* Returns: 0 success */
Julien Gralla13d7202015-08-07 17:34:41 +01002921int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
Ian Campbell9a032e32012-10-17 13:37:49 -07002922 int numpgs, struct page **pages)
2923{
2924 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2925 return 0;
2926
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002927#ifdef CONFIG_XEN_PVH
David Vrabel628c28e2015-03-11 14:49:56 +00002928 return xen_xlate_unmap_gfn_range(vma, numpgs, pages);
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002929#else
Ian Campbell9a032e32012-10-17 13:37:49 -07002930 return -EINVAL;
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002931#endif
Ian Campbell9a032e32012-10-17 13:37:49 -07002932}
Julien Gralla13d7202015-08-07 17:34:41 +01002933EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);