blob: a8a1a3d08d4d938ef975754f9e586685043b6233 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050049#include <linux/seq_file.h>
Olaf Hering34b6f012012-10-01 21:18:01 +020050#include <linux/crash_dump.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070051
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -080052#include <trace/events/xen.h>
53
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070054#include <asm/pgtable.h>
55#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070056#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070057#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080058#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070059#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050060#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070061#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080062#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070063#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070064#include <asm/pat.h>
Andrew Jones900cba82009-12-18 10:31:31 +010065#include <asm/smp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070066
67#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070068#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070069
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080070#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070071#include <xen/page.h>
72#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010073#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080074#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080075#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080076#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070077
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070078#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070079#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070080#include "debugfs.h"
81
Alex Nixon19001c82009-02-09 12:05:46 -080082/*
83 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010084 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080085 */
86DEFINE_SPINLOCK(xen_reservation_lock);
87
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040088#ifdef CONFIG_X86_32
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080089/*
90 * Identity map, in addition to plain kernel map. This needs to be
91 * large enough to allocate page table pages to allocate the rest.
92 * Each page can map 2MB.
93 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -070094#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
95static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -040096#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080097#ifdef CONFIG_X86_64
98/* l3 pud for userspace vsyscall mapping */
99static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
100#endif /* CONFIG_X86_64 */
101
102/*
103 * Note about cr3 (pagetable base) values:
104 *
105 * xen_cr3 contains the current logical cr3 value; it contains the
106 * last set cr3. This may not be the current effective cr3, because
107 * its update may be being lazily deferred. However, a vcpu looking
108 * at its own cr3 can use this value knowing that it everything will
109 * be self-consistent.
110 *
111 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
112 * hypercall to set the vcpu cr3 is complete (so it may be a little
113 * out of date, but it will never be set early). If one vcpu is
114 * looking at another vcpu's cr3 value, it should use this variable.
115 */
116DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
117DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
118
119
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700120/*
121 * Just beyond the highest usermode address. STACK_TOP_MAX has a
122 * redzone above it, so round it up to a PGD boundary.
123 */
124#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
125
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800126unsigned long arbitrary_virt_to_mfn(void *vaddr)
127{
128 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
129
130 return PFN_DOWN(maddr.maddr);
131}
132
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700133xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700134{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700135 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100136 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700137 pte_t *pte;
138 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700139
Chris Lalancette9f32d212008-10-23 17:40:25 -0700140 /*
141 * if the PFN is in the linear mapped vaddr range, we can just use
142 * the (quick) virt_to_machine() p2m lookup
143 */
144 if (virt_addr_valid(vaddr))
145 return virt_to_machine(vaddr);
146
147 /* otherwise we have to do a (slower) full page-table walk */
148
149 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700150 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700151 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700152 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700153}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100154EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700155
156void make_lowmem_page_readonly(void *vaddr)
157{
158 pte_t *pte, ptev;
159 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100160 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700161
Ingo Molnarf0646e42008-01-30 13:33:43 +0100162 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700163 if (pte == NULL)
164 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700165
166 ptev = pte_wrprotect(*pte);
167
168 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
169 BUG();
170}
171
172void make_lowmem_page_readwrite(void *vaddr)
173{
174 pte_t *pte, ptev;
175 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100176 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700177
Ingo Molnarf0646e42008-01-30 13:33:43 +0100178 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700179 if (pte == NULL)
180 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700181
182 ptev = pte_mkwrite(*pte);
183
184 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
185 BUG();
186}
187
188
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700189static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100190{
191 struct page *page = virt_to_page(ptr);
192
193 return PagePinned(page);
194}
195
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800196void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800197{
198 struct multicall_space mcs;
199 struct mmu_update *u;
200
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800201 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
202
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800203 mcs = xen_mc_entry(sizeof(*u));
204 u = mcs.args;
205
206 /* ptep might be kmapped when using 32-bit HIGHPTE */
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800207 u->ptr = virt_to_machine(ptep).maddr;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800208 u->val = pte_val_ma(pteval);
209
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800210 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800211
212 xen_mc_issue(PARAVIRT_LAZY_MMU);
213}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800214EXPORT_SYMBOL_GPL(xen_set_domain_pte);
215
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700216static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700217{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700218 struct multicall_space mcs;
219 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700220
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700221 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
222
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700223 if (mcs.mc != NULL) {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700224 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700225 } else {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700226 mcs = __xen_mc_entry(sizeof(*u));
227 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
228 }
229
230 u = mcs.args;
231 *u = *update;
232}
233
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800234static void xen_extend_mmuext_op(const struct mmuext_op *op)
235{
236 struct multicall_space mcs;
237 struct mmuext_op *u;
238
239 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
240
241 if (mcs.mc != NULL) {
242 mcs.mc->args[1]++;
243 } else {
244 mcs = __xen_mc_entry(sizeof(*u));
245 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
246 }
247
248 u = mcs.args;
249 *u = *op;
250}
251
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800252static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700253{
254 struct mmu_update u;
255
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700256 preempt_disable();
257
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700258 xen_mc_batch();
259
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700260 /* ptr may be ioremapped for 64-bit pagetable setup */
261 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700262 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700263 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700264
265 xen_mc_issue(PARAVIRT_LAZY_MMU);
266
267 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700268}
269
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800270static void xen_set_pmd(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100271{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800272 trace_xen_mmu_set_pmd(ptr, val);
273
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100274 /* If page is not pinned, we can just update the entry
275 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700276 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100277 *ptr = val;
278 return;
279 }
280
281 xen_set_pmd_hyper(ptr, val);
282}
283
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700284/*
285 * Associate a virtual page frame with a given physical page frame
286 * and protection flags for that frame.
287 */
288void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
289{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700290 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700291}
292
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800293static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
294{
295 struct mmu_update u;
296
297 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
298 return false;
299
300 xen_mc_batch();
301
302 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
303 u.val = pte_val_ma(pteval);
304 xen_extend_mmu_update(&u);
305
306 xen_mc_issue(PARAVIRT_LAZY_MMU);
307
308 return true;
309}
310
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800311static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800312{
David Vrabeld095d432012-07-09 11:39:05 +0100313 if (!xen_batched_set_pte(ptep, pteval)) {
314 /*
315 * Could call native_set_pte() here and trap and
316 * emulate the PTE write but with 32-bit guests this
317 * needs two traps (one for each of the two 32-bit
318 * words in the PTE) so do one hypercall directly
319 * instead.
320 */
321 struct mmu_update u;
322
323 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
324 u.val = pte_val_ma(pteval);
325 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
326 }
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800327}
328
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800329static void xen_set_pte(pte_t *ptep, pte_t pteval)
330{
331 trace_xen_mmu_set_pte(ptep, pteval);
332 __xen_set_pte(ptep, pteval);
333}
334
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800335static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700336 pte_t *ptep, pte_t pteval)
337{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800338 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
339 __xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700340}
341
Tejf63c2f22008-12-16 11:56:06 -0800342pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
343 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700344{
345 /* Just return the pte as-is. We preserve the bits on commit */
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800346 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700347 return *ptep;
348}
349
350void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
351 pte_t *ptep, pte_t pte)
352{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700353 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700354
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800355 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700356 xen_mc_batch();
357
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800358 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700359 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700360 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700361
362 xen_mc_issue(PARAVIRT_LAZY_MMU);
363}
364
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700365/* Assume pteval_t is equivalent to all the other *val_t types. */
366static pteval_t pte_mfn_to_pfn(pteval_t val)
367{
David Vrabel5926f872014-03-25 10:38:37 +0000368 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400370 unsigned long pfn = mfn_to_pfn(mfn);
371
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700372 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400373 if (unlikely(pfn == ~0))
374 val = flags & ~_PAGE_PRESENT;
375 else
376 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700377 }
378
379 return val;
380}
381
382static pteval_t pte_pfn_to_mfn(pteval_t val)
383{
David Vrabel5926f872014-03-25 10:38:37 +0000384 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700386 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500387 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700388
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500389 if (!xen_feature(XENFEAT_auto_translated_physmap))
390 mfn = get_phys_to_machine(pfn);
391 else
392 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700393 /*
394 * If there's no mfn for the pfn, then just create an
395 * empty non-present pte. Unfortunately this loses
396 * information about the original pfn, so
397 * pte_mfn_to_pfn is asymmetric.
398 */
399 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
400 mfn = 0;
401 flags = 0;
David Vrabel7f2f8822014-01-08 14:01:01 +0000402 } else
403 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700404 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700405 }
406
407 return val;
408}
409
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700410__visible pteval_t xen_pte_val(pte_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700411{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700412 pteval_t pteval = pte.pte;
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500413#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700414 /* If this is a WC pte, convert back from Xen WC to Linux WC */
415 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
416 WARN_ON(!pat_enabled);
417 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
418 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500419#endif
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700420 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700421}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800422PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700423
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700424__visible pgdval_t xen_pgd_val(pgd_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700425{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700426 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700427}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800428PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700429
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700430/*
431 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
432 * are reserved for now, to correspond to the Intel-reserved PAT
433 * types.
434 *
435 * We expect Linux's PAT set as follows:
436 *
437 * Idx PTE flags Linux Xen Default
438 * 0 WB WB WB
439 * 1 PWT WC WT WT
440 * 2 PCD UC- UC- UC-
441 * 3 PCD PWT UC UC UC
442 * 4 PAT WB WC WB
443 * 5 PAT PWT WC WP WT
Konrad Rzeszutek Wilkb1922a52013-09-25 15:27:50 -0400444 * 6 PAT PCD UC- rsv UC-
445 * 7 PAT PCD PWT UC rsv UC
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700446 */
447
448void xen_set_pat(u64 pat)
449{
450 /* We expect Linux to use a PAT setting of
451 * UC UC- WC WB (ignoring the PAT flag) */
452 WARN_ON(pat != 0x0007010600070106ull);
453}
454
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700455__visible pte_t xen_make_pte(pteval_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700456{
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500457#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700458 /* If Linux is trying to set a WC pte, then map to the Xen WC.
459 * If _PAGE_PAT is set, then it probably means it is really
460 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
461 * things work out OK...
462 *
463 * (We should never see kernel mappings with _PAGE_PSE set,
464 * but we could see hugetlbfs mappings, I think.).
465 */
466 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
467 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
468 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
469 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500470#endif
David Vrabel7f2f8822014-01-08 14:01:01 +0000471 pte = pte_pfn_to_mfn(pte);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800472
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700473 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700474}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800475PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700476
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700477__visible pgd_t xen_make_pgd(pgdval_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700478{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700479 pgd = pte_pfn_to_mfn(pgd);
480 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700481}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800482PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700483
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700484__visible pmdval_t xen_pmd_val(pmd_t pmd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700485{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700486 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700487}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800488PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100489
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800490static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700491{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700492 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700493
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700494 preempt_disable();
495
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700496 xen_mc_batch();
497
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700498 /* ptr may be ioremapped for 64-bit pagetable setup */
499 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700500 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700501 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700502
503 xen_mc_issue(PARAVIRT_LAZY_MMU);
504
505 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700506}
507
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800508static void xen_set_pud(pud_t *ptr, pud_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100509{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800510 trace_xen_mmu_set_pud(ptr, val);
511
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100512 /* If page is not pinned, we can just update the entry
513 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700514 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100515 *ptr = val;
516 return;
517 }
518
519 xen_set_pud_hyper(ptr, val);
520}
521
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700522#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800523static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700524{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800525 trace_xen_mmu_set_pte_atomic(ptep, pte);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700526 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700527}
528
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800529static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700530{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800531 trace_xen_mmu_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800532 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
533 native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700534}
535
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800536static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700537{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800538 trace_xen_mmu_pmd_clear(pmdp);
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100539 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700540}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700541#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700542
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700543__visible pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700544{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700545 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700546 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700547}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800548PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700549
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700550#if PAGETABLE_LEVELS == 4
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700551__visible pudval_t xen_pud_val(pud_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700552{
553 return pte_mfn_to_pfn(pud.pud);
554}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800555PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700556
Andi Kleena2e7f0e2013-10-22 09:07:56 -0700557__visible pud_t xen_make_pud(pudval_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700558{
559 pud = pte_pfn_to_mfn(pud);
560
561 return native_make_pud(pud);
562}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800563PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700564
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800565static pgd_t *xen_get_user_pgd(pgd_t *pgd)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700566{
567 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
568 unsigned offset = pgd - pgd_page;
569 pgd_t *user_ptr = NULL;
570
571 if (offset < pgd_index(USER_LIMIT)) {
572 struct page *page = virt_to_page(pgd_page);
573 user_ptr = (pgd_t *)page->private;
574 if (user_ptr)
575 user_ptr += offset;
576 }
577
578 return user_ptr;
579}
580
581static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700582{
583 struct mmu_update u;
584
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700585 u.ptr = virt_to_machine(ptr).maddr;
586 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700587 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700588}
589
590/*
591 * Raw hypercall-based set_pgd, intended for in early boot before
592 * there's a page structure. This implies:
593 * 1. The only existing pagetable is the kernel's
594 * 2. It is always pinned
595 * 3. It has no user pagetable attached to it
596 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800597static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700598{
599 preempt_disable();
600
601 xen_mc_batch();
602
603 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700604
605 xen_mc_issue(PARAVIRT_LAZY_MMU);
606
607 preempt_enable();
608}
609
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800610static void xen_set_pgd(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700611{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700612 pgd_t *user_ptr = xen_get_user_pgd(ptr);
613
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800614 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
615
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700616 /* If page is not pinned, we can just update the entry
617 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700618 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700619 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700620 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700621 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700622 *user_ptr = val;
623 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700624 return;
625 }
626
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700627 /* If it's pinned, then we can at least batch the kernel and
628 user updates together. */
629 xen_mc_batch();
630
631 __xen_set_pgd_hyper(ptr, val);
632 if (user_ptr)
633 __xen_set_pgd_hyper(user_ptr, val);
634
635 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700636}
637#endif /* PAGETABLE_LEVELS == 4 */
638
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700639/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700640 * (Yet another) pagetable walker. This one is intended for pinning a
641 * pagetable. This means that it walks a pagetable and calls the
642 * callback function on each page it finds making up the page table,
643 * at every level. It walks the entire pagetable, but it only bothers
644 * pinning pte pages which are below limit. In the normal case this
645 * will be STACK_TOP_MAX, but at boot we need to pin up to
646 * FIXADDR_TOP.
647 *
648 * For 32-bit the important bit is that we don't pin beyond there,
649 * because then we start getting into Xen's ptes.
650 *
651 * For 64-bit, we must skip the Xen hole in the middle of the address
652 * space, just after the big x86-64 virtual hole.
653 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000654static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
655 int (*func)(struct mm_struct *mm, struct page *,
656 enum pt_level),
657 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700658{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700659 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700660 unsigned hole_low, hole_high;
661 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
662 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700663
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700664 /* The limit is the last byte to be touched */
665 limit--;
666 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700667
668 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700669 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700670
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700671 /*
672 * 64-bit has a great big hole in the middle of the address
673 * space, which contains the Xen mappings. On 32-bit these
674 * will end up making a zero-sized hole and so is a no-op.
675 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700676 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700677 hole_high = pgd_index(PAGE_OFFSET);
678
679 pgdidx_limit = pgd_index(limit);
680#if PTRS_PER_PUD > 1
681 pudidx_limit = pud_index(limit);
682#else
683 pudidx_limit = 0;
684#endif
685#if PTRS_PER_PMD > 1
686 pmdidx_limit = pmd_index(limit);
687#else
688 pmdidx_limit = 0;
689#endif
690
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700691 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700692 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700693
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700694 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700695 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700696
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700697 if (!pgd_val(pgd[pgdidx]))
698 continue;
699
700 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700701
702 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700703 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700704
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700705 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700706 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700707
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700708 if (pgdidx == pgdidx_limit &&
709 pudidx > pudidx_limit)
710 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700711
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700712 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700713 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700714
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700715 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700716
717 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700718 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700719
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700720 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
721 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700722
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700723 if (pgdidx == pgdidx_limit &&
724 pudidx == pudidx_limit &&
725 pmdidx > pmdidx_limit)
726 goto out;
727
728 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700729 continue;
730
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700731 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700732 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700733 }
734 }
735 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700736
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700737out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700738 /* Do the top level last, so that the callbacks can use it as
739 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700740 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700741
742 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700743}
744
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000745static int xen_pgd_walk(struct mm_struct *mm,
746 int (*func)(struct mm_struct *mm, struct page *,
747 enum pt_level),
748 unsigned long limit)
749{
750 return __xen_pgd_walk(mm, mm->pgd, func, limit);
751}
752
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700753/* If we're using split pte locks, then take the page's lock and
754 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700755static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700756{
757 spinlock_t *ptl = NULL;
758
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -0800759#if USE_SPLIT_PTE_PTLOCKS
Kirill A. Shutemov49076ec2013-11-14 14:31:51 -0800760 ptl = ptlock_ptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700761 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700762#endif
763
764 return ptl;
765}
766
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700767static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700768{
769 spinlock_t *ptl = v;
770 spin_unlock(ptl);
771}
772
773static void xen_do_pin(unsigned level, unsigned long pfn)
774{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800775 struct mmuext_op op;
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700776
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800777 op.cmd = level;
778 op.arg1.mfn = pfn_to_mfn(pfn);
779
780 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700781}
782
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700783static int xen_pin_page(struct mm_struct *mm, struct page *page,
784 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700785{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700786 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700787 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700788
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700789 if (pgfl)
790 flush = 0; /* already pinned */
791 else if (PageHighMem(page))
792 /* kmaps need flushing if we found an unpinned
793 highpage */
794 flush = 1;
795 else {
796 void *pt = lowmem_page_address(page);
797 unsigned long pfn = page_to_pfn(page);
798 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700799 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700800
801 flush = 0;
802
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700803 /*
804 * We need to hold the pagetable lock between the time
805 * we make the pagetable RO and when we actually pin
806 * it. If we don't, then other users may come in and
807 * attempt to update the pagetable by writing it,
808 * which will fail because the memory is RO but not
809 * pinned, so Xen won't do the trap'n'emulate.
810 *
811 * If we're using split pte locks, we can't hold the
812 * entire pagetable's worth of locks during the
813 * traverse, because we may wrap the preempt count (8
814 * bits). The solution is to mark RO and pin each PTE
815 * page while holding the lock. This means the number
816 * of locks we end up holding is never more than a
817 * batch size (~32 entries, at present).
818 *
819 * If we're not using split pte locks, we needn't pin
820 * the PTE pages independently, because we're
821 * protected by the overall pagetable lock.
822 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700823 ptl = NULL;
824 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700825 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700826
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700827 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
828 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700829 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
830
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700831 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700832 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
833
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700834 /* Queue a deferred unlock for when this batch
835 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700836 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700837 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700838 }
839
840 return flush;
841}
842
843/* This is called just after a mm has been created, but it has not
844 been used yet. We need to make sure that its pagetable is all
845 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700846static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700847{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800848 trace_xen_mmu_pgd_pin(mm, pgd);
849
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700850 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700851
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000852 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100853 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700854 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100855
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700856 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100857
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700858 xen_mc_batch();
859 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700860
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700861#ifdef CONFIG_X86_64
862 {
863 pgd_t *user_pgd = xen_get_user_pgd(pgd);
864
865 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
866
867 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700868 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800869 xen_do_pin(MMUEXT_PIN_L4_TABLE,
870 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700871 }
872 }
873#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700874#ifdef CONFIG_X86_PAE
875 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800876 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700877 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700878#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100879 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700880#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700881 xen_mc_issue(0);
882}
883
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700884static void xen_pgd_pin(struct mm_struct *mm)
885{
886 __xen_pgd_pin(mm, mm->pgd);
887}
888
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100889/*
890 * On save, we need to pin all pagetables to make sure they get their
891 * mfns turned into pfns. Search the list for any unpinned pgds and pin
892 * them (unpinned pgds are not currently in use, probably because the
893 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700894 *
895 * Expected to be called in stop_machine() ("equivalent to taking
896 * every spinlock in the system"), so the locking doesn't really
897 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100898 */
899void xen_mm_pin_all(void)
900{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100901 struct page *page;
902
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800903 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100904
905 list_for_each_entry(page, &pgd_list, lru) {
906 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700907 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100908 SetPageSavePinned(page);
909 }
910 }
911
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800912 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100913}
914
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700915/*
916 * The init_mm pagetable is really pinned as soon as its created, but
917 * that's before we have page structures to store the bits. So do all
918 * the book-keeping now.
919 */
Daniel Kiper3f5089532011-05-12 17:19:53 -0400920static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700921 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700922{
923 SetPagePinned(page);
924 return 0;
925}
926
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -0700927static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700928{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700929 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700930}
931
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700932static int xen_unpin_page(struct mm_struct *mm, struct page *page,
933 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700934{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700935 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700936
937 if (pgfl && !PageHighMem(page)) {
938 void *pt = lowmem_page_address(page);
939 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700940 spinlock_t *ptl = NULL;
941 struct multicall_space mcs;
942
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700943 /*
944 * Do the converse to pin_page. If we're using split
945 * pte locks, we must be holding the lock for while
946 * the pte page is unpinned but still RO to prevent
947 * concurrent updates from seeing it in this
948 * partially-pinned state.
949 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700950 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700951 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700952
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700953 if (ptl)
954 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700955 }
956
957 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700958
959 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
960 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700961 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
962
963 if (ptl) {
964 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700965 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700966 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700967 }
968
969 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700970}
971
972/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700973static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700974{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800975 trace_xen_mmu_pgd_unpin(mm, pgd);
976
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700977 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700978
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700979 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700980
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700981#ifdef CONFIG_X86_64
982 {
983 pgd_t *user_pgd = xen_get_user_pgd(pgd);
984
985 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -0800986 xen_do_pin(MMUEXT_UNPIN_TABLE,
987 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700988 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700989 }
990 }
991#endif
992
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700993#ifdef CONFIG_X86_PAE
994 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800995 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700996 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700997#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700998
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000999 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001000
1001 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001002}
1003
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001004static void xen_pgd_unpin(struct mm_struct *mm)
1005{
1006 __xen_pgd_unpin(mm, mm->pgd);
1007}
1008
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001009/*
1010 * On resume, undo any pinning done at save, so that the rest of the
1011 * kernel doesn't see any unexpected pinned pagetables.
1012 */
1013void xen_mm_unpin_all(void)
1014{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001015 struct page *page;
1016
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001017 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001018
1019 list_for_each_entry(page, &pgd_list, lru) {
1020 if (PageSavePinned(page)) {
1021 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001022 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001023 ClearPageSavePinned(page);
1024 }
1025 }
1026
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001027 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001028}
1029
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001030static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001031{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001032 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001033 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001034 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001035}
1036
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001037static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001038{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001039 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001040 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001041 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001042}
1043
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001044
1045#ifdef CONFIG_SMP
1046/* Another cpu may still have their %cr3 pointing at the pagetable, so
1047 we need to repoint it somewhere else before we can unpin it. */
1048static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001049{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001050 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001051 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001052
Alex Shi2113f462012-01-13 23:53:35 +08001053 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001054
Alex Shi2113f462012-01-13 23:53:35 +08001055 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001056 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001057
1058 /* If this cpu still has a stale cr3 reference, then make sure
1059 it has been flushed. */
Alex Shi2113f462012-01-13 23:53:35 +08001060 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001061 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001062}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001063
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001064static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001065{
Mike Travise4d98202008-12-16 17:34:05 -08001066 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001067 unsigned cpu;
1068
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001069 if (current->active_mm == mm) {
1070 if (current->mm == mm)
1071 load_cr3(swapper_pg_dir);
1072 else
1073 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001074 }
1075
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001076 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001077 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1078 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001079 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001080 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1081 continue;
1082 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1083 }
1084 return;
1085 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001086 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001087
1088 /* It's possible that a vcpu may have a stale reference to our
1089 cr3, because its in lazy mode, and it hasn't yet flushed
1090 its set of pending hypercalls yet. In this case, we can
1091 look at its actual current cr3 value, and force it to flush
1092 if needed. */
1093 for_each_online_cpu(cpu) {
1094 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001095 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001096 }
1097
Mike Travise4d98202008-12-16 17:34:05 -08001098 if (!cpumask_empty(mask))
1099 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1100 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001101}
1102#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001103static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001104{
1105 if (current->active_mm == mm)
1106 load_cr3(swapper_pg_dir);
1107}
1108#endif
1109
1110/*
1111 * While a process runs, Xen pins its pagetables, which means that the
1112 * hypervisor forces it to be read-only, and it controls all updates
1113 * to it. This means that all pagetable updates have to go via the
1114 * hypervisor, which is moderately expensive.
1115 *
1116 * Since we're pulling the pagetable down, we switch to use init_mm,
1117 * unpin old process pagetable and mark it all read-write, which
1118 * allows further operations on it to be simple memory accesses.
1119 *
1120 * The only subtle point is that another CPU may be still using the
1121 * pagetable because of lazy tlb flushing. This means we need need to
1122 * switch all CPUs off this pagetable before we can unpin it.
1123 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001124static void xen_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001125{
1126 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001127 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001128 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001129
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001130 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001131
1132 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001133 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001134 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001135
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001136 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001137}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001138
Attilio Raoc7112882012-08-21 21:22:40 +01001139static void xen_post_allocator_init(void);
1140
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001141#ifdef CONFIG_X86_64
1142static void __init xen_cleanhighmap(unsigned long vaddr,
1143 unsigned long vaddr_end)
1144{
1145 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1146 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1147
1148 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1149 * We include the PMD passed in on _both_ boundaries. */
1150 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1151 pmd++, vaddr += PMD_SIZE) {
1152 if (pmd_none(*pmd))
1153 continue;
1154 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1155 set_pmd(pmd, __pmd(0));
1156 }
1157 /* In case we did something silly, we should crash in this function
1158 * instead of somewhere later and be confusing. */
1159 xen_mc_flush();
1160}
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001161static void __init xen_pagetable_p2m_copy(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001162{
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001163 unsigned long size;
1164 unsigned long addr;
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001165 unsigned long new_mfn_list;
1166
1167 if (xen_feature(XENFEAT_auto_translated_physmap))
1168 return;
1169
1170 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1171
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001172 new_mfn_list = xen_revector_p2m_tree();
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001173 /* No memory or already called. */
1174 if (!new_mfn_list || new_mfn_list == xen_start_info->mfn_list)
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001175 return;
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001176
Konrad Rzeszutek Wilkb621e152014-01-03 14:08:39 -05001177 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1178 memset((void *)xen_start_info->mfn_list, 0xff, size);
1179
1180 /* We should be in __ka space. */
1181 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1182 addr = xen_start_info->mfn_list;
1183 /* We roundup to the PMD, which means that if anybody at this stage is
1184 * using the __ka address of xen_start_info or xen_start_info->shared_info
1185 * they are in going to crash. Fortunatly we have already revectored
1186 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1187 size = roundup(size, PMD_SIZE);
1188 xen_cleanhighmap(addr, addr + size);
1189
1190 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1191 memblock_free(__pa(xen_start_info->mfn_list), size);
1192 /* And revector! Bye bye old array */
1193 xen_start_info->mfn_list = new_mfn_list;
1194
Konrad Rzeszutek Wilk3aca7fb2012-08-14 14:34:00 -04001195 /* At this stage, cleanup_highmap has already cleaned __ka space
1196 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1197 * the ramdisk). We continue on, erasing PMD entries that point to page
1198 * tables - do note that they are accessible at this stage via __va.
1199 * For good measure we also round up to the PMD - which means that if
1200 * anybody is using __ka address to the initial boot-stack - and try
1201 * to use it - they are going to crash. The xen_start_info has been
1202 * taken care of already in xen_setup_kernel_pagetable. */
1203 addr = xen_start_info->pt_base;
1204 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1205
1206 xen_cleanhighmap(addr, addr + size);
1207 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1208#ifdef DEBUG
1209 /* This is superflous and is not neccessary, but you know what
1210 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1211 * anything at this stage. */
1212 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1213#endif
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001214}
1215#endif
1216
1217static void __init xen_pagetable_init(void)
1218{
1219 paging_init();
Konrad Rzeszutek Wilk32df75c2013-12-31 12:37:52 -05001220#ifdef CONFIG_X86_64
1221 xen_pagetable_p2m_copy();
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001222#endif
Juergen Gross2c185682014-10-14 13:33:46 +02001223 /* Allocate and initialize top and mid mfn levels for p2m structure */
1224 xen_build_mfn_list_list();
1225
1226 xen_setup_shared_info();
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001227 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001228}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001229static void xen_write_cr2(unsigned long cr2)
1230{
Alex Shi2113f462012-01-13 23:53:35 +08001231 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001232}
1233
1234static unsigned long xen_read_cr2(void)
1235{
Alex Shi2113f462012-01-13 23:53:35 +08001236 return this_cpu_read(xen_vcpu)->arch.cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001237}
1238
1239unsigned long xen_read_cr2_direct(void)
1240{
Alex Shi2113f462012-01-13 23:53:35 +08001241 return this_cpu_read(xen_vcpu_info.arch.cr2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001242}
1243
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04001244void xen_flush_tlb_all(void)
1245{
1246 struct mmuext_op *op;
1247 struct multicall_space mcs;
1248
1249 trace_xen_mmu_flush_tlb_all(0);
1250
1251 preempt_disable();
1252
1253 mcs = xen_mc_entry(sizeof(*op));
1254
1255 op = mcs.args;
1256 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1257 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1258
1259 xen_mc_issue(PARAVIRT_LAZY_MMU);
1260
1261 preempt_enable();
1262}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001263static void xen_flush_tlb(void)
1264{
1265 struct mmuext_op *op;
1266 struct multicall_space mcs;
1267
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001268 trace_xen_mmu_flush_tlb(0);
1269
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001270 preempt_disable();
1271
1272 mcs = xen_mc_entry(sizeof(*op));
1273
1274 op = mcs.args;
1275 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1276 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1277
1278 xen_mc_issue(PARAVIRT_LAZY_MMU);
1279
1280 preempt_enable();
1281}
1282
1283static void xen_flush_tlb_single(unsigned long addr)
1284{
1285 struct mmuext_op *op;
1286 struct multicall_space mcs;
1287
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001288 trace_xen_mmu_flush_tlb_single(addr);
1289
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001290 preempt_disable();
1291
1292 mcs = xen_mc_entry(sizeof(*op));
1293 op = mcs.args;
1294 op->cmd = MMUEXT_INVLPG_LOCAL;
1295 op->arg1.linear_addr = addr & PAGE_MASK;
1296 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1297
1298 xen_mc_issue(PARAVIRT_LAZY_MMU);
1299
1300 preempt_enable();
1301}
1302
1303static void xen_flush_tlb_others(const struct cpumask *cpus,
Alex Shie7b52ff2012-06-28 09:02:17 +08001304 struct mm_struct *mm, unsigned long start,
1305 unsigned long end)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001306{
1307 struct {
1308 struct mmuext_op op;
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001309#ifdef CONFIG_SMP
Andrew Jones900cba82009-12-18 10:31:31 +01001310 DECLARE_BITMAP(mask, num_processors);
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001311#else
1312 DECLARE_BITMAP(mask, NR_CPUS);
1313#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001314 } *args;
1315 struct multicall_space mcs;
1316
Alex Shie7b52ff2012-06-28 09:02:17 +08001317 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001318
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001319 if (cpumask_empty(cpus))
1320 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001321
1322 mcs = xen_mc_entry(sizeof(*args));
1323 args = mcs.args;
1324 args->op.arg2.vcpumask = to_cpumask(args->mask);
1325
1326 /* Remove us, and any offline CPUS. */
1327 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1328 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001329
Alex Shie7b52ff2012-06-28 09:02:17 +08001330 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
Alex Shice7184b2012-08-24 08:55:13 +00001331 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001332 args->op.cmd = MMUEXT_INVLPG_MULTI;
Alex Shie7b52ff2012-06-28 09:02:17 +08001333 args->op.arg1.linear_addr = start;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001334 }
1335
1336 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1337
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001338 xen_mc_issue(PARAVIRT_LAZY_MMU);
1339}
1340
1341static unsigned long xen_read_cr3(void)
1342{
Alex Shi2113f462012-01-13 23:53:35 +08001343 return this_cpu_read(xen_cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001344}
1345
1346static void set_current_cr3(void *v)
1347{
Alex Shi2113f462012-01-13 23:53:35 +08001348 this_cpu_write(xen_current_cr3, (unsigned long)v);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001349}
1350
1351static void __xen_write_cr3(bool kernel, unsigned long cr3)
1352{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001353 struct mmuext_op op;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001354 unsigned long mfn;
1355
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001356 trace_xen_mmu_write_cr3(kernel, cr3);
1357
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001358 if (cr3)
1359 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1360 else
1361 mfn = 0;
1362
1363 WARN_ON(mfn == 0 && kernel);
1364
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001365 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1366 op.arg1.mfn = mfn;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001367
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001368 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001369
1370 if (kernel) {
Alex Shi2113f462012-01-13 23:53:35 +08001371 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001372
1373 /* Update xen_current_cr3 once the batch has actually
1374 been submitted. */
1375 xen_mc_callback(set_current_cr3, (void *)cr3);
1376 }
1377}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001378static void xen_write_cr3(unsigned long cr3)
1379{
1380 BUG_ON(preemptible());
1381
1382 xen_mc_batch(); /* disables interrupts */
1383
1384 /* Update while interrupts are disabled, so its atomic with
1385 respect to ipis */
Alex Shi2113f462012-01-13 23:53:35 +08001386 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001387
1388 __xen_write_cr3(true, cr3);
1389
1390#ifdef CONFIG_X86_64
1391 {
1392 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1393 if (user_pgd)
1394 __xen_write_cr3(false, __pa(user_pgd));
1395 else
1396 __xen_write_cr3(false, 0);
1397 }
1398#endif
1399
1400 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1401}
1402
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001403#ifdef CONFIG_X86_64
1404/*
1405 * At the start of the day - when Xen launches a guest, it has already
1406 * built pagetables for the guest. We diligently look over them
1407 * in xen_setup_kernel_pagetable and graft as appropiate them in the
1408 * init_level4_pgt and its friends. Then when we are happy we load
1409 * the new init_level4_pgt - and continue on.
1410 *
1411 * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1412 * up the rest of the pagetables. When it has completed it loads the cr3.
1413 * N.B. that baremetal would start at 'start_kernel' (and the early
1414 * #PF handler would create bootstrap pagetables) - so we are running
1415 * with the same assumptions as what to do when write_cr3 is executed
1416 * at this point.
1417 *
1418 * Since there are no user-page tables at all, we have two variants
1419 * of xen_write_cr3 - the early bootup (this one), and the late one
1420 * (xen_write_cr3). The reason we have to do that is that in 64-bit
1421 * the Linux kernel and user-space are both in ring 3 while the
1422 * hypervisor is in ring 0.
1423 */
1424static void __init xen_write_cr3_init(unsigned long cr3)
1425{
1426 BUG_ON(preemptible());
1427
1428 xen_mc_batch(); /* disables interrupts */
1429
1430 /* Update while interrupts are disabled, so its atomic with
1431 respect to ipis */
1432 this_cpu_write(xen_cr3, cr3);
1433
1434 __xen_write_cr3(true, cr3);
1435
1436 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
Konrad Rzeszutek Wilk0cc91292013-02-22 17:35:13 -08001437}
1438#endif
1439
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001440static int xen_pgd_alloc(struct mm_struct *mm)
1441{
1442 pgd_t *pgd = mm->pgd;
1443 int ret = 0;
1444
1445 BUG_ON(PagePinned(virt_to_page(pgd)));
1446
1447#ifdef CONFIG_X86_64
1448 {
1449 struct page *page = virt_to_page(pgd);
1450 pgd_t *user_pgd;
1451
1452 BUG_ON(page->private != 0);
1453
1454 ret = -ENOMEM;
1455
1456 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1457 page->private = (unsigned long)user_pgd;
1458
1459 if (user_pgd != NULL) {
Andy Lutomirskif40c3302014-05-05 12:19:36 -07001460 user_pgd[pgd_index(VSYSCALL_ADDR)] =
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001461 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1462 ret = 0;
1463 }
1464
1465 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1466 }
1467#endif
1468
1469 return ret;
1470}
1471
1472static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1473{
1474#ifdef CONFIG_X86_64
1475 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1476
1477 if (user_pgd)
1478 free_page((unsigned long)user_pgd);
1479#endif
1480}
1481
Stefano Stabelliniee176452011-04-19 14:47:31 +01001482#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001483static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001484{
1485 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1486 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1487 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1488 pte_val_ma(pte));
Stefano Stabelliniee176452011-04-19 14:47:31 +01001489
1490 return pte;
1491}
1492#else /* CONFIG_X86_64 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001493static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Stefano Stabelliniee176452011-04-19 14:47:31 +01001494{
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001495 return pte;
1496}
Stefano Stabelliniee176452011-04-19 14:47:31 +01001497#endif /* CONFIG_X86_64 */
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001498
David Vrabeld095d432012-07-09 11:39:05 +01001499/*
1500 * Init-time set_pte while constructing initial pagetables, which
1501 * doesn't allow RO page table pages to be remapped RW.
1502 *
David Vrabel66a27dd2012-07-09 11:39:06 +01001503 * If there is no MFN for this PFN then this page is initially
1504 * ballooned out so clear the PTE (as in decrease_reservation() in
1505 * drivers/xen/balloon.c).
1506 *
David Vrabeld095d432012-07-09 11:39:05 +01001507 * Many of these PTE updates are done on unpinned and writable pages
1508 * and doing a hypercall for these is unnecessary and expensive. At
1509 * this point it is not possible to tell if a page is pinned or not,
1510 * so always write the PTE directly and rely on Xen trapping and
1511 * emulating any updates as necessary.
1512 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001513static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001514{
David Vrabel66a27dd2012-07-09 11:39:06 +01001515 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1516 pte = mask_rw_pte(ptep, pte);
1517 else
1518 pte = __pte_ma(0);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001519
David Vrabeld095d432012-07-09 11:39:05 +01001520 native_set_pte(ptep, pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001521}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001522
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001523static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1524{
1525 struct mmuext_op op;
1526 op.cmd = cmd;
1527 op.arg1.mfn = pfn_to_mfn(pfn);
1528 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1529 BUG();
1530}
1531
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001532/* Early in boot, while setting up the initial pagetable, assume
1533 everything is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001534static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001535{
1536#ifdef CONFIG_FLATMEM
1537 BUG_ON(mem_map); /* should only be used early */
1538#endif
1539 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001540 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1541}
1542
1543/* Used for pmd and pud */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001544static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001545{
1546#ifdef CONFIG_FLATMEM
1547 BUG_ON(mem_map); /* should only be used early */
1548#endif
1549 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001550}
1551
1552/* Early release_pte assumes that all pts are pinned, since there's
1553 only init_mm and anything attached to that is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001554static void __init xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001555{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001556 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001557 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1558}
1559
Daniel Kiper3f5089532011-05-12 17:19:53 -04001560static void __init xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001561{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001562 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001563}
1564
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001565static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1566{
1567 struct multicall_space mcs;
1568 struct mmuext_op *op;
1569
1570 mcs = __xen_mc_entry(sizeof(*op));
1571 op = mcs.args;
1572 op->cmd = cmd;
1573 op->arg1.mfn = pfn_to_mfn(pfn);
1574
1575 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1576}
1577
1578static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1579{
1580 struct multicall_space mcs;
1581 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1582
1583 mcs = __xen_mc_entry(0);
1584 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1585 pfn_pte(pfn, prot), 0);
1586}
1587
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001588/* This needs to make sure the new pte page is pinned iff its being
1589 attached to a pinned pagetable. */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001590static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1591 unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001592{
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001593 bool pinned = PagePinned(virt_to_page(mm->pgd));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001594
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001595 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001596
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001597 if (pinned) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001598 struct page *page = pfn_to_page(pfn);
1599
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001600 SetPagePinned(page);
1601
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001602 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001603 xen_mc_batch();
1604
1605 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1606
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001607 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001608 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1609
1610 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001611 } else {
1612 /* make sure there are no stray mappings of
1613 this page */
1614 kmap_flush_unused();
1615 }
1616 }
1617}
1618
1619static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1620{
1621 xen_alloc_ptpage(mm, pfn, PT_PTE);
1622}
1623
1624static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1625{
1626 xen_alloc_ptpage(mm, pfn, PT_PMD);
1627}
1628
1629/* This should never happen until we're OK to use struct page */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001630static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001631{
1632 struct page *page = pfn_to_page(pfn);
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001633 bool pinned = PagePinned(page);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001634
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001635 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1636
1637 if (pinned) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001638 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001639 xen_mc_batch();
1640
Kirill A. Shutemov57c1ffc2013-11-14 14:30:45 -08001641 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001642 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1643
1644 __set_pfn_prot(pfn, PAGE_KERNEL);
1645
1646 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001647 }
1648 ClearPagePinned(page);
1649 }
1650}
1651
1652static void xen_release_pte(unsigned long pfn)
1653{
1654 xen_release_ptpage(pfn, PT_PTE);
1655}
1656
1657static void xen_release_pmd(unsigned long pfn)
1658{
1659 xen_release_ptpage(pfn, PT_PMD);
1660}
1661
1662#if PAGETABLE_LEVELS == 4
1663static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1664{
1665 xen_alloc_ptpage(mm, pfn, PT_PUD);
1666}
1667
1668static void xen_release_pud(unsigned long pfn)
1669{
1670 xen_release_ptpage(pfn, PT_PUD);
1671}
1672#endif
1673
1674void __init xen_reserve_top(void)
1675{
1676#ifdef CONFIG_X86_32
1677 unsigned long top = HYPERVISOR_VIRT_START;
1678 struct xen_platform_parameters pp;
1679
1680 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1681 top = pp.virt_start;
1682
1683 reserve_top_address(-top);
1684#endif /* CONFIG_X86_32 */
1685}
1686
1687/*
1688 * Like __va(), but returns address in the kernel mapping (which is
1689 * all we have until the physical memory mapping has been set up.
1690 */
1691static void *__ka(phys_addr_t paddr)
1692{
1693#ifdef CONFIG_X86_64
1694 return (void *)(paddr + __START_KERNEL_map);
1695#else
1696 return __va(paddr);
1697#endif
1698}
1699
1700/* Convert a machine address to physical address */
1701static unsigned long m2p(phys_addr_t maddr)
1702{
1703 phys_addr_t paddr;
1704
1705 maddr &= PTE_PFN_MASK;
1706 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1707
1708 return paddr;
1709}
1710
1711/* Convert a machine address to kernel virtual */
1712static void *m2v(phys_addr_t maddr)
1713{
1714 return __ka(m2p(maddr));
1715}
1716
Juan Quintela4ec53872010-09-02 15:45:43 +01001717/* Set the page permissions on an identity-mapped pages */
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001718static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001719{
1720 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1721 pte_t pte = pfn_pte(pfn, prot);
1722
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001723 /* For PVH no need to set R/O or R/W to pin them or unpin them. */
1724 if (xen_feature(XENFEAT_auto_translated_physmap))
1725 return;
1726
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001727 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001728 BUG();
1729}
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001730static void set_page_prot(void *addr, pgprot_t prot)
1731{
1732 return set_page_prot_flags(addr, prot, UVMF_NONE);
1733}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001734#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001735static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001736{
1737 unsigned pmdidx, pteidx;
1738 unsigned ident_pte;
1739 unsigned long pfn;
1740
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001741 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1742 PAGE_SIZE);
1743
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001744 ident_pte = 0;
1745 pfn = 0;
1746 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1747 pte_t *pte_page;
1748
1749 /* Reuse or allocate a page of ptes */
1750 if (pmd_present(pmd[pmdidx]))
1751 pte_page = m2v(pmd[pmdidx].pmd);
1752 else {
1753 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001754 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001755 break;
1756
1757 pte_page = &level1_ident_pgt[ident_pte];
1758 ident_pte += PTRS_PER_PTE;
1759
1760 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1761 }
1762
1763 /* Install mappings */
1764 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1765 pte_t pte;
1766
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001767#ifdef CONFIG_X86_32
1768 if (pfn > max_pfn_mapped)
1769 max_pfn_mapped = pfn;
1770#endif
1771
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001772 if (!pte_none(pte_page[pteidx]))
1773 continue;
1774
1775 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1776 pte_page[pteidx] = pte;
1777 }
1778 }
1779
1780 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1781 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1782
1783 set_page_prot(pmd, PAGE_KERNEL_RO);
1784}
Konrad Rzeszutek Wilkcaaf9ec2012-07-12 13:59:36 -04001785#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001786void __init xen_setup_machphys_mapping(void)
1787{
1788 struct xen_machphys_mapping mapping;
Ian Campbell7e775062010-09-30 12:37:26 +01001789
1790 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1791 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
Jan Beulichccbcdf72011-08-16 15:07:41 +01001792 machine_to_phys_nr = mapping.max_mfn + 1;
Ian Campbell7e775062010-09-30 12:37:26 +01001793 } else {
Jan Beulichccbcdf72011-08-16 15:07:41 +01001794 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
Ian Campbell7e775062010-09-30 12:37:26 +01001795 }
Jan Beulichccbcdf72011-08-16 15:07:41 +01001796#ifdef CONFIG_X86_32
Jan Beulich61cca2f2011-09-15 08:52:40 +01001797 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1798 < machine_to_phys_mapping);
Jan Beulichccbcdf72011-08-16 15:07:41 +01001799#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001800}
1801
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001802#ifdef CONFIG_X86_64
1803static void convert_pfn_mfn(void *v)
1804{
1805 pte_t *pte = v;
1806 int i;
1807
1808 /* All levels are converted the same way, so just treat them
1809 as ptes. */
1810 for (i = 0; i < PTRS_PER_PTE; i++)
1811 pte[i] = xen_make_pte(pte[i].pte);
1812}
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001813static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1814 unsigned long addr)
1815{
1816 if (*pt_base == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001817 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001818 clear_page((void *)addr);
1819 (*pt_base)++;
1820 }
1821 if (*pt_end == PFN_DOWN(__pa(addr))) {
Konrad Rzeszutek Wilkb2222792013-03-29 10:20:56 -04001822 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001823 clear_page((void *)addr);
1824 (*pt_end)--;
1825 }
1826}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001827/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001828 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001829 *
1830 * We can construct this by grafting the Xen provided pagetable into
1831 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
Stefan Bader0b5a5062014-09-02 11:16:01 +01001832 * level2_ident_pgt, and level2_kernel_pgt. This means that only the
1833 * kernel has a physical mapping to start with - but that's enough to
1834 * get __va working. We need to fill in the rest of the physical
1835 * mapping once some sort of allocator has been set up. NOTE: for
1836 * PVH, the page tables are native.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001837 */
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04001838void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001839{
1840 pud_t *l3;
1841 pmd_t *l2;
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001842 unsigned long addr[3];
1843 unsigned long pt_base, pt_end;
1844 unsigned i;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001845
Stefano Stabellini14988a42011-02-18 11:32:40 +00001846 /* max_pfn_mapped is the last pfn mapped in the initial memory
1847 * mappings. Considering that on Xen after the kernel mappings we
1848 * have the mappings of some pages that don't exist in pfn space, we
1849 * set max_pfn_mapped to the last real pfn mapped. */
1850 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1851
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001852 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1853 pt_end = pt_base + xen_start_info->nr_pt_frames;
1854
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001855 /* Zap identity mapping */
1856 init_level4_pgt[0] = __pgd(0);
1857
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001858 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1859 /* Pre-constructed entries are in pfn, so convert to mfn */
1860 /* L4[272] -> level3_ident_pgt
1861 * L4[511] -> level3_kernel_pgt */
1862 convert_pfn_mfn(init_level4_pgt);
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001863
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001864 /* L3_i[0] -> level2_ident_pgt */
1865 convert_pfn_mfn(level3_ident_pgt);
1866 /* L3_k[510] -> level2_kernel_pgt
Stefan Bader0b5a5062014-09-02 11:16:01 +01001867 * L3_k[511] -> level2_fixmap_pgt */
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001868 convert_pfn_mfn(level3_kernel_pgt);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001869
1870 /* L3_k[511][506] -> level1_fixmap_pgt */
1871 convert_pfn_mfn(level2_fixmap_pgt);
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001872 }
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001873 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001874 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1875 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1876
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001877 addr[0] = (unsigned long)pgd;
1878 addr[1] = (unsigned long)l3;
1879 addr[2] = (unsigned long)l2;
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001880 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
Stefan Bader0b5a5062014-09-02 11:16:01 +01001881 * Both L4[272][0] and L4[511][510] have entries that point to the same
Konrad Rzeszutek Wilk4fac1532012-07-12 13:55:25 -04001882 * L2 (PMD) tables. Meaning that if you modify it in __va space
1883 * it will be also modified in the __ka space! (But if you just
1884 * modify the PMD table to point to other PTE's or none, then you
1885 * are OK - which is what cleanup_highmap does) */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001886 copy_page(level2_ident_pgt, l2);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001887 /* Graft it onto L4[511][510] */
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001888 copy_page(level2_kernel_pgt, l2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001889
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001890 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1891 /* Make pagetable pieces RO */
1892 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1893 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1894 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1895 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1896 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1897 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1898 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
Stefan Bader0b5a5062014-09-02 11:16:01 +01001899 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001900
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001901 /* Pin down new L4 */
1902 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1903 PFN_DOWN(__pa_symbol(init_level4_pgt)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001904
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001905 /* Unpin Xen-provided one */
1906 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001907
Mukesh Rathor4e44e442013-12-31 12:41:27 -05001908 /*
1909 * At this stage there can be no user pgd, and no page
1910 * structure to attach it to, so make sure we just set kernel
1911 * pgd.
1912 */
1913 xen_mc_batch();
1914 __xen_write_cr3(true, __pa(init_level4_pgt));
1915 xen_mc_issue(PARAVIRT_LAZY_CPU);
1916 } else
1917 native_write_cr3(__pa(init_level4_pgt));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001918
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001919 /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1920 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
1921 * the initial domain. For guests using the toolstack, they are in:
1922 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1923 * rip out the [L4] (pgd), but for guests we shave off three pages.
1924 */
1925 for (i = 0; i < ARRAY_SIZE(addr); i++)
1926 check_pt_base(&pt_base, &pt_end, addr[i]);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001927
Konrad Rzeszutek Wilk488f0462012-07-26 12:00:56 -04001928 /* Our (by three pages) smaller Xen pagetable that we are using */
1929 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
Konrad Rzeszutek Wilk7f914062012-07-26 12:47:40 -04001930 /* Revector the xen_start_info */
1931 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001932}
1933#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001934static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1935static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1936
Daniel Kiper3f5089532011-05-12 17:19:53 -04001937static void __init xen_write_cr3_init(unsigned long cr3)
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001938{
1939 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1940
1941 BUG_ON(read_cr3() != __pa(initial_page_table));
1942 BUG_ON(cr3 != __pa(swapper_pg_dir));
1943
1944 /*
1945 * We are switching to swapper_pg_dir for the first time (from
1946 * initial_page_table) and therefore need to mark that page
1947 * read-only and then pin it.
1948 *
1949 * Xen disallows sharing of kernel PMDs for PAE
1950 * guests. Therefore we must copy the kernel PMD from
1951 * initial_page_table into a new kernel PMD to be used in
1952 * swapper_pg_dir.
1953 */
1954 swapper_kernel_pmd =
1955 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001956 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001957 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1958 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1959 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1960
1961 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1962 xen_write_cr3(cr3);
1963 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1964
1965 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1966 PFN_DOWN(__pa(initial_page_table)));
1967 set_page_prot(initial_page_table, PAGE_KERNEL);
1968 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1969
1970 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1971}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001972
Konrad Rzeszutek Wilk3699aad2012-06-28 22:47:35 -04001973void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001974{
1975 pmd_t *kernel_pmd;
1976
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001977 initial_kernel_pmd =
1978 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07001979
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001980 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1981 xen_start_info->nr_pt_frames * PAGE_SIZE +
1982 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001983
1984 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001985 copy_page(initial_kernel_pmd, kernel_pmd);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001986
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001987 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001988
Konrad Rzeszutek Wilkae895ed2012-07-26 11:57:04 -04001989 copy_page(initial_page_table, pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001990 initial_page_table[KERNEL_PGD_BOUNDARY] =
1991 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001992
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001993 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1994 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001995 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1996
1997 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1998
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001999 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2000 PFN_DOWN(__pa(initial_page_table)));
2001 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002002
Tejun Heo24aa0782011-07-12 11:16:06 +02002003 memblock_reserve(__pa(xen_start_info->pt_base),
Konrad Rzeszutek Wilkdc6821e2012-01-07 21:27:38 -05002004 xen_start_info->nr_pt_frames * PAGE_SIZE);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002005}
2006#endif /* CONFIG_X86_64 */
2007
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002008static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2009
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07002010static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002011{
2012 pte_t pte;
2013
2014 phys >>= PAGE_SHIFT;
2015
2016 switch (idx) {
2017 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
Kees Cook4eefbe72013-04-10 12:24:22 -07002018 case FIX_RO_IDT:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002019#ifdef CONFIG_X86_32
2020 case FIX_WP_TEST:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002021# ifdef CONFIG_HIGHMEM
2022 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2023# endif
2024#else
Andy Lutomirskif40c3302014-05-05 12:19:36 -07002025 case VSYSCALL_PAGE:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002026#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08002027 case FIX_TEXT_POKE0:
2028 case FIX_TEXT_POKE1:
2029 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002030 pte = pfn_pte(phys, prot);
2031 break;
2032
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002033#ifdef CONFIG_X86_LOCAL_APIC
2034 case FIX_APIC_BASE: /* maps dummy local APIC */
2035 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2036 break;
2037#endif
2038
2039#ifdef CONFIG_X86_IO_APIC
2040 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2041 /*
2042 * We just don't map the IO APIC - all access is via
2043 * hypercalls. Keep the address in the pte for reference.
2044 */
Konrad Rzeszutek Wilk27abd142012-04-16 13:53:40 -04002045 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002046 break;
2047#endif
2048
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002049 case FIX_PARAVIRT_BOOTMAP:
2050 /* This is an MFN, but it isn't an IO mapping from the
2051 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002052 pte = mfn_pte(phys, prot);
2053 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002054
2055 default:
2056 /* By default, set_fixmap is used for hardware mappings */
David Vrabel7f2f8822014-01-08 14:01:01 +00002057 pte = mfn_pte(phys, prot);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08002058 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002059 }
2060
2061 __native_set_fixmap(idx, pte);
2062
2063#ifdef CONFIG_X86_64
2064 /* Replicate changes to map the vsyscall page into the user
2065 pagetable vsyscall mapping. */
Andy Lutomirskif40c3302014-05-05 12:19:36 -07002066 if (idx == VSYSCALL_PAGE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002067 unsigned long vaddr = __fix_to_virt(idx);
2068 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2069 }
2070#endif
2071}
2072
Daniel Kiper3f5089532011-05-12 17:19:53 -04002073static void __init xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002074{
Mukesh Rathor4e44e442013-12-31 12:41:27 -05002075 if (xen_feature(XENFEAT_auto_translated_physmap))
2076 return;
2077
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002078 pv_mmu_ops.set_pte = xen_set_pte;
2079 pv_mmu_ops.set_pmd = xen_set_pmd;
2080 pv_mmu_ops.set_pud = xen_set_pud;
2081#if PAGETABLE_LEVELS == 4
2082 pv_mmu_ops.set_pgd = xen_set_pgd;
2083#endif
2084
2085 /* This will work as long as patching hasn't happened yet
2086 (which it hasn't) */
2087 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2088 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2089 pv_mmu_ops.release_pte = xen_release_pte;
2090 pv_mmu_ops.release_pmd = xen_release_pmd;
2091#if PAGETABLE_LEVELS == 4
2092 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2093 pv_mmu_ops.release_pud = xen_release_pud;
2094#endif
2095
2096#ifdef CONFIG_X86_64
Konrad Rzeszutek Wilkd3eb2c82013-03-22 10:34:28 -04002097 pv_mmu_ops.write_cr3 = &xen_write_cr3;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002098 SetPagePinned(virt_to_page(level3_user_vsyscall));
2099#endif
2100 xen_mark_init_mm_pinned();
2101}
2102
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002103static void xen_leave_lazy_mmu(void)
2104{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002105 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002106 xen_mc_flush();
2107 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002108 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002109}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002110
Daniel Kiper3f5089532011-05-12 17:19:53 -04002111static const struct pv_mmu_ops xen_mmu_ops __initconst = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002112 .read_cr2 = xen_read_cr2,
2113 .write_cr2 = xen_write_cr2,
2114
2115 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002116 .write_cr3 = xen_write_cr3_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002117
2118 .flush_tlb_user = xen_flush_tlb,
2119 .flush_tlb_kernel = xen_flush_tlb,
2120 .flush_tlb_single = xen_flush_tlb_single,
2121 .flush_tlb_others = xen_flush_tlb_others,
2122
2123 .pte_update = paravirt_nop,
2124 .pte_update_defer = paravirt_nop,
2125
2126 .pgd_alloc = xen_pgd_alloc,
2127 .pgd_free = xen_pgd_free,
2128
2129 .alloc_pte = xen_alloc_pte_init,
2130 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002131 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002132 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002133
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002134 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002135 .set_pte_at = xen_set_pte_at,
2136 .set_pmd = xen_set_pmd_hyper,
2137
2138 .ptep_modify_prot_start = __ptep_modify_prot_start,
2139 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2140
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002141 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2142 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002143
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002144 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2145 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002146
2147#ifdef CONFIG_X86_PAE
2148 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002149 .pte_clear = xen_pte_clear,
2150 .pmd_clear = xen_pmd_clear,
2151#endif /* CONFIG_X86_PAE */
2152 .set_pud = xen_set_pud_hyper,
2153
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002154 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2155 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002156
2157#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002158 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2159 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002160 .set_pgd = xen_set_pgd_hyper,
2161
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002162 .alloc_pud = xen_alloc_pmd_init,
2163 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002164#endif /* PAGETABLE_LEVELS == 4 */
2165
2166 .activate_mm = xen_activate_mm,
2167 .dup_mmap = xen_dup_mmap,
2168 .exit_mmap = xen_exit_mmap,
2169
2170 .lazy_mode = {
2171 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002172 .leave = xen_leave_lazy_mmu,
Boris Ostrovsky511ba862013-03-23 09:36:36 -04002173 .flush = paravirt_flush_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002174 },
2175
2176 .set_fixmap = xen_set_fixmap,
2177};
2178
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002179void __init xen_init_mmu_ops(void)
2180{
Attilio Rao7737b212012-08-21 21:22:38 +01002181 x86_init.paging.pagetable_init = xen_pagetable_init;
Mukesh Rathor76bccef2014-01-03 09:48:08 -05002182
2183 /* Optimization - we can use the HVM one but it has no idea which
2184 * VCPUs are descheduled - which means that it will needlessly IPI
2185 * them. Xen knows so let it do the job.
2186 */
2187 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2188 pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
2189 return;
2190 }
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002191 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002192
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002193 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002194}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002195
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002196/* Protected by xen_reservation_lock. */
2197#define MAX_CONTIG_ORDER 9 /* 2MB */
2198static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2199
2200#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2201static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2202 unsigned long *in_frames,
2203 unsigned long *out_frames)
2204{
2205 int i;
2206 struct multicall_space mcs;
2207
2208 xen_mc_batch();
2209 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2210 mcs = __xen_mc_entry(0);
2211
2212 if (in_frames)
2213 in_frames[i] = virt_to_mfn(vaddr);
2214
2215 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002216 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002217
2218 if (out_frames)
2219 out_frames[i] = virt_to_pfn(vaddr);
2220 }
2221 xen_mc_issue(0);
2222}
2223
2224/*
2225 * Update the pfn-to-mfn mappings for a virtual address range, either to
2226 * point to an array of mfns, or contiguously from a single starting
2227 * mfn.
2228 */
2229static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2230 unsigned long *mfns,
2231 unsigned long first_mfn)
2232{
2233 unsigned i, limit;
2234 unsigned long mfn;
2235
2236 xen_mc_batch();
2237
2238 limit = 1u << order;
2239 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2240 struct multicall_space mcs;
2241 unsigned flags;
2242
2243 mcs = __xen_mc_entry(0);
2244 if (mfns)
2245 mfn = mfns[i];
2246 else
2247 mfn = first_mfn + i;
2248
2249 if (i < (limit - 1))
2250 flags = 0;
2251 else {
2252 if (order == 0)
2253 flags = UVMF_INVLPG | UVMF_ALL;
2254 else
2255 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2256 }
2257
2258 MULTI_update_va_mapping(mcs.mc, vaddr,
2259 mfn_pte(mfn, PAGE_KERNEL), flags);
2260
2261 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2262 }
2263
2264 xen_mc_issue(0);
2265}
2266
2267/*
2268 * Perform the hypercall to exchange a region of our pfns to point to
2269 * memory with the required contiguous alignment. Takes the pfns as
2270 * input, and populates mfns as output.
2271 *
2272 * Returns a success code indicating whether the hypervisor was able to
2273 * satisfy the request or not.
2274 */
2275static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2276 unsigned long *pfns_in,
2277 unsigned long extents_out,
2278 unsigned int order_out,
2279 unsigned long *mfns_out,
2280 unsigned int address_bits)
2281{
2282 long rc;
2283 int success;
2284
2285 struct xen_memory_exchange exchange = {
2286 .in = {
2287 .nr_extents = extents_in,
2288 .extent_order = order_in,
2289 .extent_start = pfns_in,
2290 .domid = DOMID_SELF
2291 },
2292 .out = {
2293 .nr_extents = extents_out,
2294 .extent_order = order_out,
2295 .extent_start = mfns_out,
2296 .address_bits = address_bits,
2297 .domid = DOMID_SELF
2298 }
2299 };
2300
2301 BUG_ON(extents_in << order_in != extents_out << order_out);
2302
2303 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2304 success = (exchange.nr_exchanged == extents_in);
2305
2306 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2307 BUG_ON(success && (rc != 0));
2308
2309 return success;
2310}
2311
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002312int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
Stefano Stabellini69908902013-10-09 16:56:32 +00002313 unsigned int address_bits,
2314 dma_addr_t *dma_handle)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002315{
2316 unsigned long *in_frames = discontig_frames, out_frame;
2317 unsigned long flags;
2318 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002319 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002320
2321 /*
2322 * Currently an auto-translated guest will not perform I/O, nor will
2323 * it require PAE page directories below 4GB. Therefore any calls to
2324 * this function are redundant and can be ignored.
2325 */
2326
2327 if (xen_feature(XENFEAT_auto_translated_physmap))
2328 return 0;
2329
2330 if (unlikely(order > MAX_CONTIG_ORDER))
2331 return -ENOMEM;
2332
2333 memset((void *) vstart, 0, PAGE_SIZE << order);
2334
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002335 spin_lock_irqsave(&xen_reservation_lock, flags);
2336
2337 /* 1. Zap current PTEs, remembering MFNs. */
2338 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2339
2340 /* 2. Get a new contiguous memory extent. */
2341 out_frame = virt_to_pfn(vstart);
2342 success = xen_exchange_memory(1UL << order, 0, in_frames,
2343 1, order, &out_frame,
2344 address_bits);
2345
2346 /* 3. Map the new extent in place of old pages. */
2347 if (success)
2348 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2349 else
2350 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2351
2352 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2353
Stefano Stabellini69908902013-10-09 16:56:32 +00002354 *dma_handle = virt_to_machine(vstart).maddr;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002355 return success ? 0 : -ENOMEM;
2356}
2357EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2358
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002359void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002360{
2361 unsigned long *out_frames = discontig_frames, in_frame;
2362 unsigned long flags;
2363 int success;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002364 unsigned long vstart;
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002365
2366 if (xen_feature(XENFEAT_auto_translated_physmap))
2367 return;
2368
2369 if (unlikely(order > MAX_CONTIG_ORDER))
2370 return;
2371
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +00002372 vstart = (unsigned long)phys_to_virt(pstart);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002373 memset((void *) vstart, 0, PAGE_SIZE << order);
2374
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002375 spin_lock_irqsave(&xen_reservation_lock, flags);
2376
2377 /* 1. Find start MFN of contiguous extent. */
2378 in_frame = virt_to_mfn(vstart);
2379
2380 /* 2. Zap current PTEs. */
2381 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2382
2383 /* 3. Do the exchange for non-contiguous MFNs. */
2384 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2385 0, out_frames, 0);
2386
2387 /* 4. Map new pages in place of old pages. */
2388 if (success)
2389 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2390 else
2391 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2392
2393 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2394}
2395EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2396
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002397#ifdef CONFIG_XEN_PVHVM
Olaf Hering34b6f012012-10-01 21:18:01 +02002398#ifdef CONFIG_PROC_VMCORE
2399/*
2400 * This function is used in two contexts:
2401 * - the kdump kernel has to check whether a pfn of the crashed kernel
2402 * was a ballooned page. vmcore is using this function to decide
2403 * whether to access a pfn of the crashed kernel.
2404 * - the kexec kernel has to check whether a pfn was ballooned by the
2405 * previous kernel. If the pfn is ballooned, handle it properly.
2406 * Returns 0 if the pfn is not backed by a RAM page, the caller may
2407 * handle the pfn special in this case.
2408 */
2409static int xen_oldmem_pfn_is_ram(unsigned long pfn)
2410{
2411 struct xen_hvm_get_mem_type a = {
2412 .domid = DOMID_SELF,
2413 .pfn = pfn,
2414 };
2415 int ram;
2416
2417 if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
2418 return -ENXIO;
2419
2420 switch (a.mem_type) {
2421 case HVMMEM_mmio_dm:
2422 ram = 0;
2423 break;
2424 case HVMMEM_ram_rw:
2425 case HVMMEM_ram_ro:
2426 default:
2427 ram = 1;
2428 break;
2429 }
2430
2431 return ram;
2432}
2433#endif
2434
Stefano Stabellini59151002010-06-17 14:22:52 +01002435static void xen_hvm_exit_mmap(struct mm_struct *mm)
2436{
2437 struct xen_hvm_pagetable_dying a;
2438 int rc;
2439
2440 a.domid = DOMID_SELF;
2441 a.gpa = __pa(mm->pgd);
2442 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2443 WARN_ON_ONCE(rc < 0);
2444}
2445
2446static int is_pagetable_dying_supported(void)
2447{
2448 struct xen_hvm_pagetable_dying a;
2449 int rc = 0;
2450
2451 a.domid = DOMID_SELF;
2452 a.gpa = 0x00;
2453 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2454 if (rc < 0) {
2455 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2456 return 0;
2457 }
2458 return 1;
2459}
2460
2461void __init xen_hvm_init_mmu_ops(void)
2462{
2463 if (is_pagetable_dying_supported())
2464 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
Olaf Hering34b6f012012-10-01 21:18:01 +02002465#ifdef CONFIG_PROC_VMCORE
2466 register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
2467#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002468}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002469#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002470
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002471#ifdef CONFIG_XEN_PVH
2472/*
2473 * Map foreign gfn (fgfn), to local pfn (lpfn). This for the user
2474 * space creating new guest on pvh dom0 and needing to map domU pages.
2475 */
2476static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn,
2477 unsigned int domid)
2478{
2479 int rc, err = 0;
2480 xen_pfn_t gpfn = lpfn;
2481 xen_ulong_t idx = fgfn;
2482
2483 struct xen_add_to_physmap_range xatp = {
2484 .domid = DOMID_SELF,
2485 .foreign_domid = domid,
2486 .size = 1,
2487 .space = XENMAPSPACE_gmfn_foreign,
2488 };
2489 set_xen_guest_handle(xatp.idxs, &idx);
2490 set_xen_guest_handle(xatp.gpfns, &gpfn);
2491 set_xen_guest_handle(xatp.errs, &err);
2492
2493 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
2494 if (rc < 0)
2495 return rc;
2496 return err;
2497}
2498
2499static int xlate_remove_from_p2m(unsigned long spfn, int count)
2500{
2501 struct xen_remove_from_physmap xrp;
2502 int i, rc;
2503
2504 for (i = 0; i < count; i++) {
2505 xrp.domid = DOMID_SELF;
2506 xrp.gpfn = spfn+i;
2507 rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
2508 if (rc)
2509 break;
2510 }
2511 return rc;
2512}
2513
2514struct xlate_remap_data {
2515 unsigned long fgfn; /* foreign domain's gfn */
2516 pgprot_t prot;
2517 domid_t domid;
2518 int index;
2519 struct page **pages;
2520};
2521
2522static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
2523 void *data)
2524{
2525 int rc;
2526 struct xlate_remap_data *remap = data;
2527 unsigned long pfn = page_to_pfn(remap->pages[remap->index++]);
2528 pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot));
2529
2530 rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid);
2531 if (rc)
2532 return rc;
2533 native_set_pte(ptep, pteval);
2534
2535 return 0;
2536}
2537
2538static int xlate_remap_gfn_range(struct vm_area_struct *vma,
2539 unsigned long addr, unsigned long mfn,
2540 int nr, pgprot_t prot, unsigned domid,
2541 struct page **pages)
2542{
2543 int err;
2544 struct xlate_remap_data pvhdata;
2545
2546 BUG_ON(!pages);
2547
2548 pvhdata.fgfn = mfn;
2549 pvhdata.prot = prot;
2550 pvhdata.domid = domid;
2551 pvhdata.index = 0;
2552 pvhdata.pages = pages;
2553 err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
2554 xlate_map_pte_fn, &pvhdata);
2555 flush_tlb_all();
2556 return err;
2557}
2558#endif
2559
Ian Campbellde1ef202009-05-21 10:09:46 +01002560#define REMAP_BATCH_SIZE 16
2561
2562struct remap_data {
2563 unsigned long mfn;
2564 pgprot_t prot;
2565 struct mmu_update *mmu_update;
2566};
2567
2568static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2569 unsigned long addr, void *data)
2570{
2571 struct remap_data *rmd = data;
David Vrabelf59c5142014-01-08 14:00:01 +00002572 pte_t pte = pte_mkspecial(mfn_pte(rmd->mfn++, rmd->prot));
Ian Campbellde1ef202009-05-21 10:09:46 +01002573
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -08002574 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002575 rmd->mmu_update->val = pte_val_ma(pte);
2576 rmd->mmu_update++;
2577
2578 return 0;
2579}
2580
2581int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2582 unsigned long addr,
Ian Campbell7892f692012-10-16 17:19:15 +01002583 xen_pfn_t mfn, int nr,
Ian Campbell9a032e32012-10-17 13:37:49 -07002584 pgprot_t prot, unsigned domid,
2585 struct page **pages)
2586
Ian Campbellde1ef202009-05-21 10:09:46 +01002587{
2588 struct remap_data rmd;
2589 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2590 int batch;
2591 unsigned long range;
2592 int err = 0;
2593
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07002594 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002595
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002596 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2597#ifdef CONFIG_XEN_PVH
2598 /* We need to update the local page tables and the xen HAP */
2599 return xlate_remap_gfn_range(vma, addr, mfn, nr, prot,
2600 domid, pages);
2601#else
2602 return -EINVAL;
2603#endif
2604 }
2605
Ian Campbellde1ef202009-05-21 10:09:46 +01002606 rmd.mfn = mfn;
2607 rmd.prot = prot;
2608
2609 while (nr) {
2610 batch = min(REMAP_BATCH_SIZE, nr);
2611 range = (unsigned long)batch << PAGE_SHIFT;
2612
2613 rmd.mmu_update = mmu_update;
2614 err = apply_to_page_range(vma->vm_mm, addr, range,
2615 remap_area_mfn_pte_fn, &rmd);
2616 if (err)
2617 goto out;
2618
David Vrabel69870a82012-08-30 13:58:11 +01002619 err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
2620 if (err < 0)
Ian Campbellde1ef202009-05-21 10:09:46 +01002621 goto out;
2622
2623 nr -= batch;
2624 addr += range;
2625 }
2626
2627 err = 0;
2628out:
2629
Konrad Rzeszutek Wilk95a7d762012-10-31 12:38:31 -04002630 xen_flush_tlb_all();
Ian Campbellde1ef202009-05-21 10:09:46 +01002631
2632 return err;
2633}
2634EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
Ian Campbell9a032e32012-10-17 13:37:49 -07002635
2636/* Returns: 0 success */
2637int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2638 int numpgs, struct page **pages)
2639{
2640 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2641 return 0;
2642
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002643#ifdef CONFIG_XEN_PVH
2644 while (numpgs--) {
2645 /*
2646 * The mmu has already cleaned up the process mmu
2647 * resources at this point (lookup_address will return
2648 * NULL).
2649 */
2650 unsigned long pfn = page_to_pfn(pages[numpgs]);
2651
2652 xlate_remove_from_p2m(pfn, 1);
2653 }
2654 /*
2655 * We don't need to flush tlbs because as part of
2656 * xlate_remove_from_p2m, the hypervisor will do tlb flushes
2657 * after removing the p2m entries from the EPT/NPT
2658 */
2659 return 0;
2660#else
Ian Campbell9a032e32012-10-17 13:37:49 -07002661 return -EINVAL;
Mukesh Rathor77945ca2014-05-23 19:33:44 -07002662#endif
Ian Campbell9a032e32012-10-17 13:37:49 -07002663}
2664EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);