blob: dfc900471aef54f4cfc6847b21a7c170f88345e4 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050049#include <linux/seq_file.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070050
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -080051#include <trace/events/xen.h>
52
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070053#include <asm/pgtable.h>
54#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070055#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070056#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080057#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070058#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050059#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070060#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080061#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070062#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070063#include <asm/pat.h>
Andrew Jones900cba82009-12-18 10:31:31 +010064#include <asm/smp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070065
66#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070067#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070068
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080069#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070070#include <xen/page.h>
71#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010072#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080073#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080074#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080075#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070076
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070077#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070078#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070079#include "debugfs.h"
80
Alex Nixon19001c82009-02-09 12:05:46 -080081/*
82 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010083 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080084 */
85DEFINE_SPINLOCK(xen_reservation_lock);
86
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080087/*
88 * Identity map, in addition to plain kernel map. This needs to be
89 * large enough to allocate page table pages to allocate the rest.
90 * Each page can map 2MB.
91 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -070092#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
93static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080094
95#ifdef CONFIG_X86_64
96/* l3 pud for userspace vsyscall mapping */
97static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
98#endif /* CONFIG_X86_64 */
99
100/*
101 * Note about cr3 (pagetable base) values:
102 *
103 * xen_cr3 contains the current logical cr3 value; it contains the
104 * last set cr3. This may not be the current effective cr3, because
105 * its update may be being lazily deferred. However, a vcpu looking
106 * at its own cr3 can use this value knowing that it everything will
107 * be self-consistent.
108 *
109 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
110 * hypercall to set the vcpu cr3 is complete (so it may be a little
111 * out of date, but it will never be set early). If one vcpu is
112 * looking at another vcpu's cr3 value, it should use this variable.
113 */
114DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
115DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
116
117
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700118/*
119 * Just beyond the highest usermode address. STACK_TOP_MAX has a
120 * redzone above it, so round it up to a PGD boundary.
121 */
122#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
123
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800124unsigned long arbitrary_virt_to_mfn(void *vaddr)
125{
126 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
127
128 return PFN_DOWN(maddr.maddr);
129}
130
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700131xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700132{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700133 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100134 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700135 pte_t *pte;
136 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700137
Chris Lalancette9f32d212008-10-23 17:40:25 -0700138 /*
139 * if the PFN is in the linear mapped vaddr range, we can just use
140 * the (quick) virt_to_machine() p2m lookup
141 */
142 if (virt_addr_valid(vaddr))
143 return virt_to_machine(vaddr);
144
145 /* otherwise we have to do a (slower) full page-table walk */
146
147 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700148 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700149 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700150 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700151}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100152EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700153
154void make_lowmem_page_readonly(void *vaddr)
155{
156 pte_t *pte, ptev;
157 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100158 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700159
Ingo Molnarf0646e42008-01-30 13:33:43 +0100160 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700161 if (pte == NULL)
162 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700163
164 ptev = pte_wrprotect(*pte);
165
166 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
167 BUG();
168}
169
170void make_lowmem_page_readwrite(void *vaddr)
171{
172 pte_t *pte, ptev;
173 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100174 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700175
Ingo Molnarf0646e42008-01-30 13:33:43 +0100176 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700177 if (pte == NULL)
178 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700179
180 ptev = pte_mkwrite(*pte);
181
182 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
183 BUG();
184}
185
186
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700187static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100188{
189 struct page *page = virt_to_page(ptr);
190
191 return PagePinned(page);
192}
193
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800194void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800195{
196 struct multicall_space mcs;
197 struct mmu_update *u;
198
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800199 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
200
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800201 mcs = xen_mc_entry(sizeof(*u));
202 u = mcs.args;
203
204 /* ptep might be kmapped when using 32-bit HIGHPTE */
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800205 u->ptr = virt_to_machine(ptep).maddr;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800206 u->val = pte_val_ma(pteval);
207
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800208 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800209
210 xen_mc_issue(PARAVIRT_LAZY_MMU);
211}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800212EXPORT_SYMBOL_GPL(xen_set_domain_pte);
213
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700214static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700215{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700216 struct multicall_space mcs;
217 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700218
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700219 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
220
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700221 if (mcs.mc != NULL) {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700222 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700223 } else {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700224 mcs = __xen_mc_entry(sizeof(*u));
225 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
226 }
227
228 u = mcs.args;
229 *u = *update;
230}
231
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800232static void xen_extend_mmuext_op(const struct mmuext_op *op)
233{
234 struct multicall_space mcs;
235 struct mmuext_op *u;
236
237 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
238
239 if (mcs.mc != NULL) {
240 mcs.mc->args[1]++;
241 } else {
242 mcs = __xen_mc_entry(sizeof(*u));
243 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
244 }
245
246 u = mcs.args;
247 *u = *op;
248}
249
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800250static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700251{
252 struct mmu_update u;
253
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700254 preempt_disable();
255
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700256 xen_mc_batch();
257
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700258 /* ptr may be ioremapped for 64-bit pagetable setup */
259 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700260 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700261 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700262
263 xen_mc_issue(PARAVIRT_LAZY_MMU);
264
265 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700266}
267
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800268static void xen_set_pmd(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100269{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800270 trace_xen_mmu_set_pmd(ptr, val);
271
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100272 /* If page is not pinned, we can just update the entry
273 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700274 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100275 *ptr = val;
276 return;
277 }
278
279 xen_set_pmd_hyper(ptr, val);
280}
281
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700282/*
283 * Associate a virtual page frame with a given physical page frame
284 * and protection flags for that frame.
285 */
286void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
287{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700288 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700289}
290
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800291static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
292{
293 struct mmu_update u;
294
295 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
296 return false;
297
298 xen_mc_batch();
299
300 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
301 u.val = pte_val_ma(pteval);
302 xen_extend_mmu_update(&u);
303
304 xen_mc_issue(PARAVIRT_LAZY_MMU);
305
306 return true;
307}
308
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800309static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800310{
David Vrabeld095d432012-07-09 11:39:05 +0100311 if (!xen_batched_set_pte(ptep, pteval)) {
312 /*
313 * Could call native_set_pte() here and trap and
314 * emulate the PTE write but with 32-bit guests this
315 * needs two traps (one for each of the two 32-bit
316 * words in the PTE) so do one hypercall directly
317 * instead.
318 */
319 struct mmu_update u;
320
321 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
322 u.val = pte_val_ma(pteval);
323 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
324 }
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800325}
326
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800327static void xen_set_pte(pte_t *ptep, pte_t pteval)
328{
329 trace_xen_mmu_set_pte(ptep, pteval);
330 __xen_set_pte(ptep, pteval);
331}
332
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800333static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700334 pte_t *ptep, pte_t pteval)
335{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800336 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
337 __xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700338}
339
Tejf63c2f22008-12-16 11:56:06 -0800340pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
341 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700342{
343 /* Just return the pte as-is. We preserve the bits on commit */
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800344 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700345 return *ptep;
346}
347
348void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
349 pte_t *ptep, pte_t pte)
350{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700351 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700352
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800353 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700354 xen_mc_batch();
355
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800356 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700357 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700358 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700359
360 xen_mc_issue(PARAVIRT_LAZY_MMU);
361}
362
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700363/* Assume pteval_t is equivalent to all the other *val_t types. */
364static pteval_t pte_mfn_to_pfn(pteval_t val)
365{
366 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700367 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400368 unsigned long pfn = mfn_to_pfn(mfn);
369
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700370 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkb7e5ffe2012-05-03 16:14:14 -0400371 if (unlikely(pfn == ~0))
372 val = flags & ~_PAGE_PRESENT;
373 else
374 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700375 }
376
377 return val;
378}
379
380static pteval_t pte_pfn_to_mfn(pteval_t val)
381{
382 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700383 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700384 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500385 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700386
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500387 if (!xen_feature(XENFEAT_auto_translated_physmap))
388 mfn = get_phys_to_machine(pfn);
389 else
390 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700391 /*
392 * If there's no mfn for the pfn, then just create an
393 * empty non-present pte. Unfortunately this loses
394 * information about the original pfn, so
395 * pte_mfn_to_pfn is asymmetric.
396 */
397 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
398 mfn = 0;
399 flags = 0;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500400 } else {
401 /*
402 * Paramount to do this test _after_ the
403 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
404 * IDENTITY_FRAME_BIT resolves to true.
405 */
406 mfn &= ~FOREIGN_FRAME_BIT;
407 if (mfn & IDENTITY_FRAME_BIT) {
408 mfn &= ~IDENTITY_FRAME_BIT;
409 flags |= _PAGE_IOMAP;
410 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700411 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700412 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700413 }
414
415 return val;
416}
417
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800418static pteval_t iomap_pte(pteval_t val)
419{
420 if (val & _PAGE_PRESENT) {
421 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
422 pteval_t flags = val & PTE_FLAGS_MASK;
423
424 /* We assume the pte frame number is a MFN, so
425 just use it as-is. */
426 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
427 }
428
429 return val;
430}
431
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800432static pteval_t xen_pte_val(pte_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700433{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700434 pteval_t pteval = pte.pte;
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500435#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700436 /* If this is a WC pte, convert back from Xen WC to Linux WC */
437 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
438 WARN_ON(!pat_enabled);
439 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
440 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500441#endif
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700442 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
443 return pteval;
444
445 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700446}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800447PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700448
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800449static pgdval_t xen_pgd_val(pgd_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700450{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700451 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700452}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800453PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700454
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700455/*
456 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
457 * are reserved for now, to correspond to the Intel-reserved PAT
458 * types.
459 *
460 * We expect Linux's PAT set as follows:
461 *
462 * Idx PTE flags Linux Xen Default
463 * 0 WB WB WB
464 * 1 PWT WC WT WT
465 * 2 PCD UC- UC- UC-
466 * 3 PCD PWT UC UC UC
467 * 4 PAT WB WC WB
468 * 5 PAT PWT WC WP WT
469 * 6 PAT PCD UC- UC UC-
470 * 7 PAT PCD PWT UC UC UC
471 */
472
473void xen_set_pat(u64 pat)
474{
475 /* We expect Linux to use a PAT setting of
476 * UC UC- WC WB (ignoring the PAT flag) */
477 WARN_ON(pat != 0x0007010600070106ull);
478}
479
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800480static pte_t xen_make_pte(pteval_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700481{
Alex Nixon7347b402010-02-19 13:31:06 -0500482 phys_addr_t addr = (pte & PTE_PFN_MASK);
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500483#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700484 /* If Linux is trying to set a WC pte, then map to the Xen WC.
485 * If _PAGE_PAT is set, then it probably means it is really
486 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
487 * things work out OK...
488 *
489 * (We should never see kernel mappings with _PAGE_PSE set,
490 * but we could see hugetlbfs mappings, I think.).
491 */
492 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
493 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
494 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
495 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500496#endif
Alex Nixon7347b402010-02-19 13:31:06 -0500497 /*
498 * Unprivileged domains are allowed to do IOMAPpings for
499 * PCI passthrough, but not map ISA space. The ISA
500 * mappings are just dummy local mappings to keep other
501 * parts of the kernel happy.
502 */
503 if (unlikely(pte & _PAGE_IOMAP) &&
504 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800505 pte = iomap_pte(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500506 } else {
507 pte &= ~_PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800508 pte = pte_pfn_to_mfn(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500509 }
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800510
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700511 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700512}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800513PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700514
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800515static pgd_t xen_make_pgd(pgdval_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700516{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700517 pgd = pte_pfn_to_mfn(pgd);
518 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700519}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800520PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700521
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800522static pmdval_t xen_pmd_val(pmd_t pmd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700523{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700524 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700525}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800526PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100527
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800528static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700529{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700530 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700531
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700532 preempt_disable();
533
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700534 xen_mc_batch();
535
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700536 /* ptr may be ioremapped for 64-bit pagetable setup */
537 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700538 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700539 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700540
541 xen_mc_issue(PARAVIRT_LAZY_MMU);
542
543 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700544}
545
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800546static void xen_set_pud(pud_t *ptr, pud_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100547{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800548 trace_xen_mmu_set_pud(ptr, val);
549
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100550 /* If page is not pinned, we can just update the entry
551 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700552 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100553 *ptr = val;
554 return;
555 }
556
557 xen_set_pud_hyper(ptr, val);
558}
559
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700560#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800561static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700562{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800563 trace_xen_mmu_set_pte_atomic(ptep, pte);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700564 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700565}
566
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800567static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700568{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800569 trace_xen_mmu_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800570 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
571 native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700572}
573
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800574static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700575{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800576 trace_xen_mmu_pmd_clear(pmdp);
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100577 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700578}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700579#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700580
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800581static pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700582{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700583 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700584 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700585}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800586PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700587
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700588#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800589static pudval_t xen_pud_val(pud_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700590{
591 return pte_mfn_to_pfn(pud.pud);
592}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800593PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700594
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800595static pud_t xen_make_pud(pudval_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700596{
597 pud = pte_pfn_to_mfn(pud);
598
599 return native_make_pud(pud);
600}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800601PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700602
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800603static pgd_t *xen_get_user_pgd(pgd_t *pgd)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700604{
605 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
606 unsigned offset = pgd - pgd_page;
607 pgd_t *user_ptr = NULL;
608
609 if (offset < pgd_index(USER_LIMIT)) {
610 struct page *page = virt_to_page(pgd_page);
611 user_ptr = (pgd_t *)page->private;
612 if (user_ptr)
613 user_ptr += offset;
614 }
615
616 return user_ptr;
617}
618
619static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700620{
621 struct mmu_update u;
622
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700623 u.ptr = virt_to_machine(ptr).maddr;
624 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700625 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700626}
627
628/*
629 * Raw hypercall-based set_pgd, intended for in early boot before
630 * there's a page structure. This implies:
631 * 1. The only existing pagetable is the kernel's
632 * 2. It is always pinned
633 * 3. It has no user pagetable attached to it
634 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800635static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700636{
637 preempt_disable();
638
639 xen_mc_batch();
640
641 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700642
643 xen_mc_issue(PARAVIRT_LAZY_MMU);
644
645 preempt_enable();
646}
647
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800648static void xen_set_pgd(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700649{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700650 pgd_t *user_ptr = xen_get_user_pgd(ptr);
651
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800652 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
653
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700654 /* If page is not pinned, we can just update the entry
655 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700656 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700657 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700658 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700659 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700660 *user_ptr = val;
661 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700662 return;
663 }
664
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700665 /* If it's pinned, then we can at least batch the kernel and
666 user updates together. */
667 xen_mc_batch();
668
669 __xen_set_pgd_hyper(ptr, val);
670 if (user_ptr)
671 __xen_set_pgd_hyper(user_ptr, val);
672
673 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700674}
675#endif /* PAGETABLE_LEVELS == 4 */
676
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700677/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700678 * (Yet another) pagetable walker. This one is intended for pinning a
679 * pagetable. This means that it walks a pagetable and calls the
680 * callback function on each page it finds making up the page table,
681 * at every level. It walks the entire pagetable, but it only bothers
682 * pinning pte pages which are below limit. In the normal case this
683 * will be STACK_TOP_MAX, but at boot we need to pin up to
684 * FIXADDR_TOP.
685 *
686 * For 32-bit the important bit is that we don't pin beyond there,
687 * because then we start getting into Xen's ptes.
688 *
689 * For 64-bit, we must skip the Xen hole in the middle of the address
690 * space, just after the big x86-64 virtual hole.
691 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000692static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
693 int (*func)(struct mm_struct *mm, struct page *,
694 enum pt_level),
695 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700696{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700697 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700698 unsigned hole_low, hole_high;
699 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
700 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700701
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700702 /* The limit is the last byte to be touched */
703 limit--;
704 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700705
706 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700707 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700708
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700709 /*
710 * 64-bit has a great big hole in the middle of the address
711 * space, which contains the Xen mappings. On 32-bit these
712 * will end up making a zero-sized hole and so is a no-op.
713 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700714 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700715 hole_high = pgd_index(PAGE_OFFSET);
716
717 pgdidx_limit = pgd_index(limit);
718#if PTRS_PER_PUD > 1
719 pudidx_limit = pud_index(limit);
720#else
721 pudidx_limit = 0;
722#endif
723#if PTRS_PER_PMD > 1
724 pmdidx_limit = pmd_index(limit);
725#else
726 pmdidx_limit = 0;
727#endif
728
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700729 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700730 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700731
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700732 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700733 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700734
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700735 if (!pgd_val(pgd[pgdidx]))
736 continue;
737
738 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700739
740 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700741 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700742
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700743 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700744 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700745
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700746 if (pgdidx == pgdidx_limit &&
747 pudidx > pudidx_limit)
748 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700749
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700750 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700751 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700752
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700753 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700754
755 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700756 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700757
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700758 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
759 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700760
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700761 if (pgdidx == pgdidx_limit &&
762 pudidx == pudidx_limit &&
763 pmdidx > pmdidx_limit)
764 goto out;
765
766 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700767 continue;
768
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700769 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700770 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700771 }
772 }
773 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700774
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700775out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700776 /* Do the top level last, so that the callbacks can use it as
777 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700778 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700779
780 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700781}
782
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000783static int xen_pgd_walk(struct mm_struct *mm,
784 int (*func)(struct mm_struct *mm, struct page *,
785 enum pt_level),
786 unsigned long limit)
787{
788 return __xen_pgd_walk(mm, mm->pgd, func, limit);
789}
790
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700791/* If we're using split pte locks, then take the page's lock and
792 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700793static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700794{
795 spinlock_t *ptl = NULL;
796
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700797#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700798 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700799 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700800#endif
801
802 return ptl;
803}
804
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700805static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700806{
807 spinlock_t *ptl = v;
808 spin_unlock(ptl);
809}
810
811static void xen_do_pin(unsigned level, unsigned long pfn)
812{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800813 struct mmuext_op op;
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700814
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800815 op.cmd = level;
816 op.arg1.mfn = pfn_to_mfn(pfn);
817
818 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700819}
820
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700821static int xen_pin_page(struct mm_struct *mm, struct page *page,
822 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700823{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700824 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700825 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700826
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700827 if (pgfl)
828 flush = 0; /* already pinned */
829 else if (PageHighMem(page))
830 /* kmaps need flushing if we found an unpinned
831 highpage */
832 flush = 1;
833 else {
834 void *pt = lowmem_page_address(page);
835 unsigned long pfn = page_to_pfn(page);
836 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700837 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700838
839 flush = 0;
840
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700841 /*
842 * We need to hold the pagetable lock between the time
843 * we make the pagetable RO and when we actually pin
844 * it. If we don't, then other users may come in and
845 * attempt to update the pagetable by writing it,
846 * which will fail because the memory is RO but not
847 * pinned, so Xen won't do the trap'n'emulate.
848 *
849 * If we're using split pte locks, we can't hold the
850 * entire pagetable's worth of locks during the
851 * traverse, because we may wrap the preempt count (8
852 * bits). The solution is to mark RO and pin each PTE
853 * page while holding the lock. This means the number
854 * of locks we end up holding is never more than a
855 * batch size (~32 entries, at present).
856 *
857 * If we're not using split pte locks, we needn't pin
858 * the PTE pages independently, because we're
859 * protected by the overall pagetable lock.
860 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700861 ptl = NULL;
862 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700863 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700864
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700865 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
866 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700867 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
868
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700869 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700870 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
871
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700872 /* Queue a deferred unlock for when this batch
873 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700874 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700875 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700876 }
877
878 return flush;
879}
880
881/* This is called just after a mm has been created, but it has not
882 been used yet. We need to make sure that its pagetable is all
883 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700884static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700885{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800886 trace_xen_mmu_pgd_pin(mm, pgd);
887
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700888 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700889
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000890 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100891 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700892 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100893
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700894 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100895
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700896 xen_mc_batch();
897 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700898
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700899#ifdef CONFIG_X86_64
900 {
901 pgd_t *user_pgd = xen_get_user_pgd(pgd);
902
903 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
904
905 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700906 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800907 xen_do_pin(MMUEXT_PIN_L4_TABLE,
908 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700909 }
910 }
911#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700912#ifdef CONFIG_X86_PAE
913 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800914 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700915 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700916#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100917 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700918#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700919 xen_mc_issue(0);
920}
921
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700922static void xen_pgd_pin(struct mm_struct *mm)
923{
924 __xen_pgd_pin(mm, mm->pgd);
925}
926
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100927/*
928 * On save, we need to pin all pagetables to make sure they get their
929 * mfns turned into pfns. Search the list for any unpinned pgds and pin
930 * them (unpinned pgds are not currently in use, probably because the
931 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700932 *
933 * Expected to be called in stop_machine() ("equivalent to taking
934 * every spinlock in the system"), so the locking doesn't really
935 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100936 */
937void xen_mm_pin_all(void)
938{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100939 struct page *page;
940
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800941 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100942
943 list_for_each_entry(page, &pgd_list, lru) {
944 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700945 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100946 SetPageSavePinned(page);
947 }
948 }
949
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800950 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100951}
952
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700953/*
954 * The init_mm pagetable is really pinned as soon as its created, but
955 * that's before we have page structures to store the bits. So do all
956 * the book-keeping now.
957 */
Daniel Kiper3f5089532011-05-12 17:19:53 -0400958static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700959 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700960{
961 SetPagePinned(page);
962 return 0;
963}
964
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -0700965static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700966{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700967 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700968}
969
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700970static int xen_unpin_page(struct mm_struct *mm, struct page *page,
971 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700972{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700973 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700974
975 if (pgfl && !PageHighMem(page)) {
976 void *pt = lowmem_page_address(page);
977 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700978 spinlock_t *ptl = NULL;
979 struct multicall_space mcs;
980
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700981 /*
982 * Do the converse to pin_page. If we're using split
983 * pte locks, we must be holding the lock for while
984 * the pte page is unpinned but still RO to prevent
985 * concurrent updates from seeing it in this
986 * partially-pinned state.
987 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700988 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700989 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700990
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700991 if (ptl)
992 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700993 }
994
995 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700996
997 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
998 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700999 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1000
1001 if (ptl) {
1002 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001003 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001004 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001005 }
1006
1007 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001008}
1009
1010/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001011static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001012{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -08001013 trace_xen_mmu_pgd_unpin(mm, pgd);
1014
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001015 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001016
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001017 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001018
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001019#ifdef CONFIG_X86_64
1020 {
1021 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1022
1023 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001024 xen_do_pin(MMUEXT_UNPIN_TABLE,
1025 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001026 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001027 }
1028 }
1029#endif
1030
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001031#ifdef CONFIG_X86_PAE
1032 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001033 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001034 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001035#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001036
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001037 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001038
1039 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001040}
1041
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001042static void xen_pgd_unpin(struct mm_struct *mm)
1043{
1044 __xen_pgd_unpin(mm, mm->pgd);
1045}
1046
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001047/*
1048 * On resume, undo any pinning done at save, so that the rest of the
1049 * kernel doesn't see any unexpected pinned pagetables.
1050 */
1051void xen_mm_unpin_all(void)
1052{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001053 struct page *page;
1054
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001055 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001056
1057 list_for_each_entry(page, &pgd_list, lru) {
1058 if (PageSavePinned(page)) {
1059 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001060 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001061 ClearPageSavePinned(page);
1062 }
1063 }
1064
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001065 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001066}
1067
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001068static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001069{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001070 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001071 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001072 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001073}
1074
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001075static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001076{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001077 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001078 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001079 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001080}
1081
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001082
1083#ifdef CONFIG_SMP
1084/* Another cpu may still have their %cr3 pointing at the pagetable, so
1085 we need to repoint it somewhere else before we can unpin it. */
1086static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001087{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001088 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001089 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001090
Alex Shi2113f462012-01-13 23:53:35 +08001091 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001092
Alex Shi2113f462012-01-13 23:53:35 +08001093 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001094 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001095
1096 /* If this cpu still has a stale cr3 reference, then make sure
1097 it has been flushed. */
Alex Shi2113f462012-01-13 23:53:35 +08001098 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001099 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001100}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001101
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001102static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001103{
Mike Travise4d98202008-12-16 17:34:05 -08001104 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001105 unsigned cpu;
1106
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001107 if (current->active_mm == mm) {
1108 if (current->mm == mm)
1109 load_cr3(swapper_pg_dir);
1110 else
1111 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001112 }
1113
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001114 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001115 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1116 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001117 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001118 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1119 continue;
1120 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1121 }
1122 return;
1123 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001124 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001125
1126 /* It's possible that a vcpu may have a stale reference to our
1127 cr3, because its in lazy mode, and it hasn't yet flushed
1128 its set of pending hypercalls yet. In this case, we can
1129 look at its actual current cr3 value, and force it to flush
1130 if needed. */
1131 for_each_online_cpu(cpu) {
1132 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001133 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001134 }
1135
Mike Travise4d98202008-12-16 17:34:05 -08001136 if (!cpumask_empty(mask))
1137 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1138 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001139}
1140#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001141static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001142{
1143 if (current->active_mm == mm)
1144 load_cr3(swapper_pg_dir);
1145}
1146#endif
1147
1148/*
1149 * While a process runs, Xen pins its pagetables, which means that the
1150 * hypervisor forces it to be read-only, and it controls all updates
1151 * to it. This means that all pagetable updates have to go via the
1152 * hypervisor, which is moderately expensive.
1153 *
1154 * Since we're pulling the pagetable down, we switch to use init_mm,
1155 * unpin old process pagetable and mark it all read-write, which
1156 * allows further operations on it to be simple memory accesses.
1157 *
1158 * The only subtle point is that another CPU may be still using the
1159 * pagetable because of lazy tlb flushing. This means we need need to
1160 * switch all CPUs off this pagetable before we can unpin it.
1161 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001162static void xen_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001163{
1164 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001165 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001166 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001167
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001168 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001169
1170 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001171 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001172 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001173
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001174 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001175}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001176
Attilio Raoc7112882012-08-21 21:22:40 +01001177static void xen_post_allocator_init(void);
1178
Attilio Rao7737b212012-08-21 21:22:38 +01001179static void __init xen_pagetable_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001180{
Attilio Rao843b8ed2012-08-21 21:22:39 +01001181 paging_init();
Attilio Raoc7112882012-08-21 21:22:40 +01001182 xen_setup_shared_info();
1183 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001184}
1185
Stefano Stabellini279b7062011-04-14 15:49:41 +01001186static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1187{
1188 /* reserve the range used */
1189 native_pagetable_reserve(start, end);
1190
1191 /* set as RW the rest */
1192 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1193 PFN_PHYS(pgt_buf_top));
1194 while (end < PFN_PHYS(pgt_buf_top)) {
1195 make_lowmem_page_readwrite(__va(end));
1196 end += PAGE_SIZE;
1197 }
1198}
1199
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001200static void xen_write_cr2(unsigned long cr2)
1201{
Alex Shi2113f462012-01-13 23:53:35 +08001202 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001203}
1204
1205static unsigned long xen_read_cr2(void)
1206{
Alex Shi2113f462012-01-13 23:53:35 +08001207 return this_cpu_read(xen_vcpu)->arch.cr2;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001208}
1209
1210unsigned long xen_read_cr2_direct(void)
1211{
Alex Shi2113f462012-01-13 23:53:35 +08001212 return this_cpu_read(xen_vcpu_info.arch.cr2);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001213}
1214
1215static void xen_flush_tlb(void)
1216{
1217 struct mmuext_op *op;
1218 struct multicall_space mcs;
1219
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001220 trace_xen_mmu_flush_tlb(0);
1221
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001222 preempt_disable();
1223
1224 mcs = xen_mc_entry(sizeof(*op));
1225
1226 op = mcs.args;
1227 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1228 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1229
1230 xen_mc_issue(PARAVIRT_LAZY_MMU);
1231
1232 preempt_enable();
1233}
1234
1235static void xen_flush_tlb_single(unsigned long addr)
1236{
1237 struct mmuext_op *op;
1238 struct multicall_space mcs;
1239
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001240 trace_xen_mmu_flush_tlb_single(addr);
1241
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001242 preempt_disable();
1243
1244 mcs = xen_mc_entry(sizeof(*op));
1245 op = mcs.args;
1246 op->cmd = MMUEXT_INVLPG_LOCAL;
1247 op->arg1.linear_addr = addr & PAGE_MASK;
1248 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1249
1250 xen_mc_issue(PARAVIRT_LAZY_MMU);
1251
1252 preempt_enable();
1253}
1254
1255static void xen_flush_tlb_others(const struct cpumask *cpus,
Alex Shie7b52ff2012-06-28 09:02:17 +08001256 struct mm_struct *mm, unsigned long start,
1257 unsigned long end)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001258{
1259 struct {
1260 struct mmuext_op op;
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001261#ifdef CONFIG_SMP
Andrew Jones900cba82009-12-18 10:31:31 +01001262 DECLARE_BITMAP(mask, num_processors);
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001263#else
1264 DECLARE_BITMAP(mask, NR_CPUS);
1265#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001266 } *args;
1267 struct multicall_space mcs;
1268
Alex Shie7b52ff2012-06-28 09:02:17 +08001269 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001270
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001271 if (cpumask_empty(cpus))
1272 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001273
1274 mcs = xen_mc_entry(sizeof(*args));
1275 args = mcs.args;
1276 args->op.arg2.vcpumask = to_cpumask(args->mask);
1277
1278 /* Remove us, and any offline CPUS. */
1279 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1280 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001281
Alex Shie7b52ff2012-06-28 09:02:17 +08001282 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
Alex Shice7184b2012-08-24 08:55:13 +00001283 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001284 args->op.cmd = MMUEXT_INVLPG_MULTI;
Alex Shie7b52ff2012-06-28 09:02:17 +08001285 args->op.arg1.linear_addr = start;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001286 }
1287
1288 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1289
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001290 xen_mc_issue(PARAVIRT_LAZY_MMU);
1291}
1292
1293static unsigned long xen_read_cr3(void)
1294{
Alex Shi2113f462012-01-13 23:53:35 +08001295 return this_cpu_read(xen_cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001296}
1297
1298static void set_current_cr3(void *v)
1299{
Alex Shi2113f462012-01-13 23:53:35 +08001300 this_cpu_write(xen_current_cr3, (unsigned long)v);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001301}
1302
1303static void __xen_write_cr3(bool kernel, unsigned long cr3)
1304{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001305 struct mmuext_op op;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001306 unsigned long mfn;
1307
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001308 trace_xen_mmu_write_cr3(kernel, cr3);
1309
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001310 if (cr3)
1311 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1312 else
1313 mfn = 0;
1314
1315 WARN_ON(mfn == 0 && kernel);
1316
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001317 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1318 op.arg1.mfn = mfn;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001319
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001320 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001321
1322 if (kernel) {
Alex Shi2113f462012-01-13 23:53:35 +08001323 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001324
1325 /* Update xen_current_cr3 once the batch has actually
1326 been submitted. */
1327 xen_mc_callback(set_current_cr3, (void *)cr3);
1328 }
1329}
1330
1331static void xen_write_cr3(unsigned long cr3)
1332{
1333 BUG_ON(preemptible());
1334
1335 xen_mc_batch(); /* disables interrupts */
1336
1337 /* Update while interrupts are disabled, so its atomic with
1338 respect to ipis */
Alex Shi2113f462012-01-13 23:53:35 +08001339 this_cpu_write(xen_cr3, cr3);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001340
1341 __xen_write_cr3(true, cr3);
1342
1343#ifdef CONFIG_X86_64
1344 {
1345 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1346 if (user_pgd)
1347 __xen_write_cr3(false, __pa(user_pgd));
1348 else
1349 __xen_write_cr3(false, 0);
1350 }
1351#endif
1352
1353 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1354}
1355
1356static int xen_pgd_alloc(struct mm_struct *mm)
1357{
1358 pgd_t *pgd = mm->pgd;
1359 int ret = 0;
1360
1361 BUG_ON(PagePinned(virt_to_page(pgd)));
1362
1363#ifdef CONFIG_X86_64
1364 {
1365 struct page *page = virt_to_page(pgd);
1366 pgd_t *user_pgd;
1367
1368 BUG_ON(page->private != 0);
1369
1370 ret = -ENOMEM;
1371
1372 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1373 page->private = (unsigned long)user_pgd;
1374
1375 if (user_pgd != NULL) {
1376 user_pgd[pgd_index(VSYSCALL_START)] =
1377 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1378 ret = 0;
1379 }
1380
1381 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1382 }
1383#endif
1384
1385 return ret;
1386}
1387
1388static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1389{
1390#ifdef CONFIG_X86_64
1391 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1392
1393 if (user_pgd)
1394 free_page((unsigned long)user_pgd);
1395#endif
1396}
1397
Stefano Stabelliniee176452011-04-19 14:47:31 +01001398#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001399static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001400{
1401 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1402 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1403 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1404 pte_val_ma(pte));
Stefano Stabelliniee176452011-04-19 14:47:31 +01001405
1406 return pte;
1407}
1408#else /* CONFIG_X86_64 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001409static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Stefano Stabelliniee176452011-04-19 14:47:31 +01001410{
1411 unsigned long pfn = pte_pfn(pte);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001412
1413 /*
1414 * If the new pfn is within the range of the newly allocated
1415 * kernel pagetable, and it isn't being mapped into an
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001416 * early_ioremap fixmap slot as a freshly allocated page, make sure
1417 * it is RO.
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001418 */
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001419 if (((!is_early_ioremap_ptep(ptep) &&
Stefano Stabellinib9269dc2011-04-12 12:19:49 +01001420 pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001421 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001422 pte = pte_wrprotect(pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001423
1424 return pte;
1425}
Stefano Stabelliniee176452011-04-19 14:47:31 +01001426#endif /* CONFIG_X86_64 */
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001427
David Vrabeld095d432012-07-09 11:39:05 +01001428/*
1429 * Init-time set_pte while constructing initial pagetables, which
1430 * doesn't allow RO page table pages to be remapped RW.
1431 *
David Vrabel66a27dd2012-07-09 11:39:06 +01001432 * If there is no MFN for this PFN then this page is initially
1433 * ballooned out so clear the PTE (as in decrease_reservation() in
1434 * drivers/xen/balloon.c).
1435 *
David Vrabeld095d432012-07-09 11:39:05 +01001436 * Many of these PTE updates are done on unpinned and writable pages
1437 * and doing a hypercall for these is unnecessary and expensive. At
1438 * this point it is not possible to tell if a page is pinned or not,
1439 * so always write the PTE directly and rely on Xen trapping and
1440 * emulating any updates as necessary.
1441 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001442static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001443{
David Vrabel66a27dd2012-07-09 11:39:06 +01001444 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1445 pte = mask_rw_pte(ptep, pte);
1446 else
1447 pte = __pte_ma(0);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001448
David Vrabeld095d432012-07-09 11:39:05 +01001449 native_set_pte(ptep, pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001450}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001451
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001452static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1453{
1454 struct mmuext_op op;
1455 op.cmd = cmd;
1456 op.arg1.mfn = pfn_to_mfn(pfn);
1457 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1458 BUG();
1459}
1460
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001461/* Early in boot, while setting up the initial pagetable, assume
1462 everything is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001463static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001464{
1465#ifdef CONFIG_FLATMEM
1466 BUG_ON(mem_map); /* should only be used early */
1467#endif
1468 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001469 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1470}
1471
1472/* Used for pmd and pud */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001473static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001474{
1475#ifdef CONFIG_FLATMEM
1476 BUG_ON(mem_map); /* should only be used early */
1477#endif
1478 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001479}
1480
1481/* Early release_pte assumes that all pts are pinned, since there's
1482 only init_mm and anything attached to that is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001483static void __init xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001484{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001485 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001486 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1487}
1488
Daniel Kiper3f5089532011-05-12 17:19:53 -04001489static void __init xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001490{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001491 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001492}
1493
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001494static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1495{
1496 struct multicall_space mcs;
1497 struct mmuext_op *op;
1498
1499 mcs = __xen_mc_entry(sizeof(*op));
1500 op = mcs.args;
1501 op->cmd = cmd;
1502 op->arg1.mfn = pfn_to_mfn(pfn);
1503
1504 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1505}
1506
1507static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1508{
1509 struct multicall_space mcs;
1510 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1511
1512 mcs = __xen_mc_entry(0);
1513 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1514 pfn_pte(pfn, prot), 0);
1515}
1516
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001517/* This needs to make sure the new pte page is pinned iff its being
1518 attached to a pinned pagetable. */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001519static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1520 unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001521{
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001522 bool pinned = PagePinned(virt_to_page(mm->pgd));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001523
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001524 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001525
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001526 if (pinned) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001527 struct page *page = pfn_to_page(pfn);
1528
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001529 SetPagePinned(page);
1530
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001531 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001532 xen_mc_batch();
1533
1534 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1535
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001536 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001537 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1538
1539 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001540 } else {
1541 /* make sure there are no stray mappings of
1542 this page */
1543 kmap_flush_unused();
1544 }
1545 }
1546}
1547
1548static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1549{
1550 xen_alloc_ptpage(mm, pfn, PT_PTE);
1551}
1552
1553static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1554{
1555 xen_alloc_ptpage(mm, pfn, PT_PMD);
1556}
1557
1558/* This should never happen until we're OK to use struct page */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001559static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001560{
1561 struct page *page = pfn_to_page(pfn);
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001562 bool pinned = PagePinned(page);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001563
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001564 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1565
1566 if (pinned) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001567 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001568 xen_mc_batch();
1569
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001570 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001571 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1572
1573 __set_pfn_prot(pfn, PAGE_KERNEL);
1574
1575 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001576 }
1577 ClearPagePinned(page);
1578 }
1579}
1580
1581static void xen_release_pte(unsigned long pfn)
1582{
1583 xen_release_ptpage(pfn, PT_PTE);
1584}
1585
1586static void xen_release_pmd(unsigned long pfn)
1587{
1588 xen_release_ptpage(pfn, PT_PMD);
1589}
1590
1591#if PAGETABLE_LEVELS == 4
1592static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1593{
1594 xen_alloc_ptpage(mm, pfn, PT_PUD);
1595}
1596
1597static void xen_release_pud(unsigned long pfn)
1598{
1599 xen_release_ptpage(pfn, PT_PUD);
1600}
1601#endif
1602
1603void __init xen_reserve_top(void)
1604{
1605#ifdef CONFIG_X86_32
1606 unsigned long top = HYPERVISOR_VIRT_START;
1607 struct xen_platform_parameters pp;
1608
1609 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1610 top = pp.virt_start;
1611
1612 reserve_top_address(-top);
1613#endif /* CONFIG_X86_32 */
1614}
1615
1616/*
1617 * Like __va(), but returns address in the kernel mapping (which is
1618 * all we have until the physical memory mapping has been set up.
1619 */
1620static void *__ka(phys_addr_t paddr)
1621{
1622#ifdef CONFIG_X86_64
1623 return (void *)(paddr + __START_KERNEL_map);
1624#else
1625 return __va(paddr);
1626#endif
1627}
1628
1629/* Convert a machine address to physical address */
1630static unsigned long m2p(phys_addr_t maddr)
1631{
1632 phys_addr_t paddr;
1633
1634 maddr &= PTE_PFN_MASK;
1635 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1636
1637 return paddr;
1638}
1639
1640/* Convert a machine address to kernel virtual */
1641static void *m2v(phys_addr_t maddr)
1642{
1643 return __ka(m2p(maddr));
1644}
1645
Juan Quintela4ec53872010-09-02 15:45:43 +01001646/* Set the page permissions on an identity-mapped pages */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001647static void set_page_prot(void *addr, pgprot_t prot)
1648{
1649 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1650 pte_t pte = pfn_pte(pfn, prot);
1651
1652 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1653 BUG();
1654}
1655
Daniel Kiper3f5089532011-05-12 17:19:53 -04001656static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001657{
1658 unsigned pmdidx, pteidx;
1659 unsigned ident_pte;
1660 unsigned long pfn;
1661
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001662 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1663 PAGE_SIZE);
1664
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001665 ident_pte = 0;
1666 pfn = 0;
1667 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1668 pte_t *pte_page;
1669
1670 /* Reuse or allocate a page of ptes */
1671 if (pmd_present(pmd[pmdidx]))
1672 pte_page = m2v(pmd[pmdidx].pmd);
1673 else {
1674 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001675 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001676 break;
1677
1678 pte_page = &level1_ident_pgt[ident_pte];
1679 ident_pte += PTRS_PER_PTE;
1680
1681 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1682 }
1683
1684 /* Install mappings */
1685 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1686 pte_t pte;
1687
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001688#ifdef CONFIG_X86_32
1689 if (pfn > max_pfn_mapped)
1690 max_pfn_mapped = pfn;
1691#endif
1692
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001693 if (!pte_none(pte_page[pteidx]))
1694 continue;
1695
1696 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1697 pte_page[pteidx] = pte;
1698 }
1699 }
1700
1701 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1702 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1703
1704 set_page_prot(pmd, PAGE_KERNEL_RO);
1705}
1706
Ian Campbell7e775062010-09-30 12:37:26 +01001707void __init xen_setup_machphys_mapping(void)
1708{
1709 struct xen_machphys_mapping mapping;
Ian Campbell7e775062010-09-30 12:37:26 +01001710
1711 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1712 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
Jan Beulichccbcdf72011-08-16 15:07:41 +01001713 machine_to_phys_nr = mapping.max_mfn + 1;
Ian Campbell7e775062010-09-30 12:37:26 +01001714 } else {
Jan Beulichccbcdf72011-08-16 15:07:41 +01001715 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
Ian Campbell7e775062010-09-30 12:37:26 +01001716 }
Jan Beulichccbcdf72011-08-16 15:07:41 +01001717#ifdef CONFIG_X86_32
Jan Beulich61cca2f2011-09-15 08:52:40 +01001718 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1719 < machine_to_phys_mapping);
Jan Beulichccbcdf72011-08-16 15:07:41 +01001720#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001721}
1722
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001723#ifdef CONFIG_X86_64
1724static void convert_pfn_mfn(void *v)
1725{
1726 pte_t *pte = v;
1727 int i;
1728
1729 /* All levels are converted the same way, so just treat them
1730 as ptes. */
1731 for (i = 0; i < PTRS_PER_PTE; i++)
1732 pte[i] = xen_make_pte(pte[i].pte);
1733}
1734
1735/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001736 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001737 *
1738 * We can construct this by grafting the Xen provided pagetable into
1739 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1740 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1741 * means that only the kernel has a physical mapping to start with -
1742 * but that's enough to get __va working. We need to fill in the rest
1743 * of the physical mapping once some sort of allocator has been set
1744 * up.
1745 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001746pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001747 unsigned long max_pfn)
1748{
1749 pud_t *l3;
1750 pmd_t *l2;
1751
Stefano Stabellini14988a42011-02-18 11:32:40 +00001752 /* max_pfn_mapped is the last pfn mapped in the initial memory
1753 * mappings. Considering that on Xen after the kernel mappings we
1754 * have the mappings of some pages that don't exist in pfn space, we
1755 * set max_pfn_mapped to the last real pfn mapped. */
1756 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1757
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001758 /* Zap identity mapping */
1759 init_level4_pgt[0] = __pgd(0);
1760
1761 /* Pre-constructed entries are in pfn, so convert to mfn */
1762 convert_pfn_mfn(init_level4_pgt);
1763 convert_pfn_mfn(level3_ident_pgt);
1764 convert_pfn_mfn(level3_kernel_pgt);
1765
1766 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1767 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1768
1769 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1770 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1771
1772 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1773 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1774 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1775
1776 /* Set up identity map */
1777 xen_map_identity_early(level2_ident_pgt, max_pfn);
1778
1779 /* Make pagetable pieces RO */
1780 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1781 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1782 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1783 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1784 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1785 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1786
1787 /* Pin down new L4 */
1788 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1789 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1790
1791 /* Unpin Xen-provided one */
1792 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1793
1794 /* Switch over */
1795 pgd = init_level4_pgt;
1796
1797 /*
1798 * At this stage there can be no user pgd, and no page
1799 * structure to attach it to, so make sure we just set kernel
1800 * pgd.
1801 */
1802 xen_mc_batch();
1803 __xen_write_cr3(true, __pa(pgd));
1804 xen_mc_issue(PARAVIRT_LAZY_CPU);
1805
Tejun Heo24aa0782011-07-12 11:16:06 +02001806 memblock_reserve(__pa(xen_start_info->pt_base),
1807 xen_start_info->nr_pt_frames * PAGE_SIZE);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001808
1809 return pgd;
1810}
1811#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001812static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1813static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1814
Daniel Kiper3f5089532011-05-12 17:19:53 -04001815static void __init xen_write_cr3_init(unsigned long cr3)
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001816{
1817 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1818
1819 BUG_ON(read_cr3() != __pa(initial_page_table));
1820 BUG_ON(cr3 != __pa(swapper_pg_dir));
1821
1822 /*
1823 * We are switching to swapper_pg_dir for the first time (from
1824 * initial_page_table) and therefore need to mark that page
1825 * read-only and then pin it.
1826 *
1827 * Xen disallows sharing of kernel PMDs for PAE
1828 * guests. Therefore we must copy the kernel PMD from
1829 * initial_page_table into a new kernel PMD to be used in
1830 * swapper_pg_dir.
1831 */
1832 swapper_kernel_pmd =
1833 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1834 memcpy(swapper_kernel_pmd, initial_kernel_pmd,
1835 sizeof(pmd_t) * PTRS_PER_PMD);
1836 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1837 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1838 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1839
1840 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1841 xen_write_cr3(cr3);
1842 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1843
1844 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1845 PFN_DOWN(__pa(initial_page_table)));
1846 set_page_prot(initial_page_table, PAGE_KERNEL);
1847 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1848
1849 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1850}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001851
Daniel Kiper3f5089532011-05-12 17:19:53 -04001852pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001853 unsigned long max_pfn)
1854{
1855 pmd_t *kernel_pmd;
1856
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001857 initial_kernel_pmd =
1858 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07001859
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001860 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1861 xen_start_info->nr_pt_frames * PAGE_SIZE +
1862 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001863
1864 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001865 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001866
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001867 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001868
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001869 memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1870 initial_page_table[KERNEL_PGD_BOUNDARY] =
1871 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001872
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001873 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1874 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001875 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1876
1877 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1878
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001879 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1880 PFN_DOWN(__pa(initial_page_table)));
1881 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001882
Tejun Heo24aa0782011-07-12 11:16:06 +02001883 memblock_reserve(__pa(xen_start_info->pt_base),
Konrad Rzeszutek Wilkdc6821e2012-01-07 21:27:38 -05001884 xen_start_info->nr_pt_frames * PAGE_SIZE);
Jeremy Fitzhardinge33df4db2009-05-07 11:56:44 -07001885
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001886 return initial_page_table;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001887}
1888#endif /* CONFIG_X86_64 */
1889
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001890static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1891
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07001892static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001893{
1894 pte_t pte;
1895
1896 phys >>= PAGE_SHIFT;
1897
1898 switch (idx) {
1899 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1900#ifdef CONFIG_X86_F00F_BUG
1901 case FIX_F00F_IDT:
1902#endif
1903#ifdef CONFIG_X86_32
1904 case FIX_WP_TEST:
1905 case FIX_VDSO:
1906# ifdef CONFIG_HIGHMEM
1907 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1908# endif
1909#else
1910 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
Andy Lutomirski5d5791a2011-08-03 09:31:52 -04001911 case VVAR_PAGE:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001912#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08001913 case FIX_TEXT_POKE0:
1914 case FIX_TEXT_POKE1:
1915 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001916 pte = pfn_pte(phys, prot);
1917 break;
1918
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001919#ifdef CONFIG_X86_LOCAL_APIC
1920 case FIX_APIC_BASE: /* maps dummy local APIC */
1921 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1922 break;
1923#endif
1924
1925#ifdef CONFIG_X86_IO_APIC
1926 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
1927 /*
1928 * We just don't map the IO APIC - all access is via
1929 * hypercalls. Keep the address in the pte for reference.
1930 */
Konrad Rzeszutek Wilk27abd142012-04-16 13:53:40 -04001931 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001932 break;
1933#endif
1934
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001935 case FIX_PARAVIRT_BOOTMAP:
1936 /* This is an MFN, but it isn't an IO mapping from the
1937 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001938 pte = mfn_pte(phys, prot);
1939 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001940
1941 default:
1942 /* By default, set_fixmap is used for hardware mappings */
1943 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
1944 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001945 }
1946
1947 __native_set_fixmap(idx, pte);
1948
1949#ifdef CONFIG_X86_64
1950 /* Replicate changes to map the vsyscall page into the user
1951 pagetable vsyscall mapping. */
Andy Lutomirski5d5791a2011-08-03 09:31:52 -04001952 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
1953 idx == VVAR_PAGE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001954 unsigned long vaddr = __fix_to_virt(idx);
1955 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1956 }
1957#endif
1958}
1959
Daniel Kiper3f5089532011-05-12 17:19:53 -04001960static void __init xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001961{
1962 pv_mmu_ops.set_pte = xen_set_pte;
1963 pv_mmu_ops.set_pmd = xen_set_pmd;
1964 pv_mmu_ops.set_pud = xen_set_pud;
1965#if PAGETABLE_LEVELS == 4
1966 pv_mmu_ops.set_pgd = xen_set_pgd;
1967#endif
1968
1969 /* This will work as long as patching hasn't happened yet
1970 (which it hasn't) */
1971 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1972 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1973 pv_mmu_ops.release_pte = xen_release_pte;
1974 pv_mmu_ops.release_pmd = xen_release_pmd;
1975#if PAGETABLE_LEVELS == 4
1976 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1977 pv_mmu_ops.release_pud = xen_release_pud;
1978#endif
1979
1980#ifdef CONFIG_X86_64
1981 SetPagePinned(virt_to_page(level3_user_vsyscall));
1982#endif
1983 xen_mark_init_mm_pinned();
1984}
1985
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001986static void xen_leave_lazy_mmu(void)
1987{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08001988 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001989 xen_mc_flush();
1990 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08001991 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001992}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001993
Daniel Kiper3f5089532011-05-12 17:19:53 -04001994static const struct pv_mmu_ops xen_mmu_ops __initconst = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001995 .read_cr2 = xen_read_cr2,
1996 .write_cr2 = xen_write_cr2,
1997
1998 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001999#ifdef CONFIG_X86_32
2000 .write_cr3 = xen_write_cr3_init,
2001#else
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002002 .write_cr3 = xen_write_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002003#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002004
2005 .flush_tlb_user = xen_flush_tlb,
2006 .flush_tlb_kernel = xen_flush_tlb,
2007 .flush_tlb_single = xen_flush_tlb_single,
2008 .flush_tlb_others = xen_flush_tlb_others,
2009
2010 .pte_update = paravirt_nop,
2011 .pte_update_defer = paravirt_nop,
2012
2013 .pgd_alloc = xen_pgd_alloc,
2014 .pgd_free = xen_pgd_free,
2015
2016 .alloc_pte = xen_alloc_pte_init,
2017 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002018 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002019 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002020
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002021 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002022 .set_pte_at = xen_set_pte_at,
2023 .set_pmd = xen_set_pmd_hyper,
2024
2025 .ptep_modify_prot_start = __ptep_modify_prot_start,
2026 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2027
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002028 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2029 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002030
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002031 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2032 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002033
2034#ifdef CONFIG_X86_PAE
2035 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002036 .pte_clear = xen_pte_clear,
2037 .pmd_clear = xen_pmd_clear,
2038#endif /* CONFIG_X86_PAE */
2039 .set_pud = xen_set_pud_hyper,
2040
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002041 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2042 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002043
2044#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002045 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2046 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002047 .set_pgd = xen_set_pgd_hyper,
2048
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002049 .alloc_pud = xen_alloc_pmd_init,
2050 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002051#endif /* PAGETABLE_LEVELS == 4 */
2052
2053 .activate_mm = xen_activate_mm,
2054 .dup_mmap = xen_dup_mmap,
2055 .exit_mmap = xen_exit_mmap,
2056
2057 .lazy_mode = {
2058 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002059 .leave = xen_leave_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002060 },
2061
2062 .set_fixmap = xen_set_fixmap,
2063};
2064
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002065void __init xen_init_mmu_ops(void)
2066{
Stefano Stabellini279b7062011-04-14 15:49:41 +01002067 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
Attilio Rao7737b212012-08-21 21:22:38 +01002068 x86_init.paging.pagetable_init = xen_pagetable_init;
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002069 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002070
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002071 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002072}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002073
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002074/* Protected by xen_reservation_lock. */
2075#define MAX_CONTIG_ORDER 9 /* 2MB */
2076static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2077
2078#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2079static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2080 unsigned long *in_frames,
2081 unsigned long *out_frames)
2082{
2083 int i;
2084 struct multicall_space mcs;
2085
2086 xen_mc_batch();
2087 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2088 mcs = __xen_mc_entry(0);
2089
2090 if (in_frames)
2091 in_frames[i] = virt_to_mfn(vaddr);
2092
2093 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002094 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002095
2096 if (out_frames)
2097 out_frames[i] = virt_to_pfn(vaddr);
2098 }
2099 xen_mc_issue(0);
2100}
2101
2102/*
2103 * Update the pfn-to-mfn mappings for a virtual address range, either to
2104 * point to an array of mfns, or contiguously from a single starting
2105 * mfn.
2106 */
2107static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2108 unsigned long *mfns,
2109 unsigned long first_mfn)
2110{
2111 unsigned i, limit;
2112 unsigned long mfn;
2113
2114 xen_mc_batch();
2115
2116 limit = 1u << order;
2117 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2118 struct multicall_space mcs;
2119 unsigned flags;
2120
2121 mcs = __xen_mc_entry(0);
2122 if (mfns)
2123 mfn = mfns[i];
2124 else
2125 mfn = first_mfn + i;
2126
2127 if (i < (limit - 1))
2128 flags = 0;
2129 else {
2130 if (order == 0)
2131 flags = UVMF_INVLPG | UVMF_ALL;
2132 else
2133 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2134 }
2135
2136 MULTI_update_va_mapping(mcs.mc, vaddr,
2137 mfn_pte(mfn, PAGE_KERNEL), flags);
2138
2139 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2140 }
2141
2142 xen_mc_issue(0);
2143}
2144
2145/*
2146 * Perform the hypercall to exchange a region of our pfns to point to
2147 * memory with the required contiguous alignment. Takes the pfns as
2148 * input, and populates mfns as output.
2149 *
2150 * Returns a success code indicating whether the hypervisor was able to
2151 * satisfy the request or not.
2152 */
2153static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2154 unsigned long *pfns_in,
2155 unsigned long extents_out,
2156 unsigned int order_out,
2157 unsigned long *mfns_out,
2158 unsigned int address_bits)
2159{
2160 long rc;
2161 int success;
2162
2163 struct xen_memory_exchange exchange = {
2164 .in = {
2165 .nr_extents = extents_in,
2166 .extent_order = order_in,
2167 .extent_start = pfns_in,
2168 .domid = DOMID_SELF
2169 },
2170 .out = {
2171 .nr_extents = extents_out,
2172 .extent_order = order_out,
2173 .extent_start = mfns_out,
2174 .address_bits = address_bits,
2175 .domid = DOMID_SELF
2176 }
2177 };
2178
2179 BUG_ON(extents_in << order_in != extents_out << order_out);
2180
2181 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2182 success = (exchange.nr_exchanged == extents_in);
2183
2184 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2185 BUG_ON(success && (rc != 0));
2186
2187 return success;
2188}
2189
2190int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2191 unsigned int address_bits)
2192{
2193 unsigned long *in_frames = discontig_frames, out_frame;
2194 unsigned long flags;
2195 int success;
2196
2197 /*
2198 * Currently an auto-translated guest will not perform I/O, nor will
2199 * it require PAE page directories below 4GB. Therefore any calls to
2200 * this function are redundant and can be ignored.
2201 */
2202
2203 if (xen_feature(XENFEAT_auto_translated_physmap))
2204 return 0;
2205
2206 if (unlikely(order > MAX_CONTIG_ORDER))
2207 return -ENOMEM;
2208
2209 memset((void *) vstart, 0, PAGE_SIZE << order);
2210
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002211 spin_lock_irqsave(&xen_reservation_lock, flags);
2212
2213 /* 1. Zap current PTEs, remembering MFNs. */
2214 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2215
2216 /* 2. Get a new contiguous memory extent. */
2217 out_frame = virt_to_pfn(vstart);
2218 success = xen_exchange_memory(1UL << order, 0, in_frames,
2219 1, order, &out_frame,
2220 address_bits);
2221
2222 /* 3. Map the new extent in place of old pages. */
2223 if (success)
2224 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2225 else
2226 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2227
2228 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2229
2230 return success ? 0 : -ENOMEM;
2231}
2232EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2233
2234void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2235{
2236 unsigned long *out_frames = discontig_frames, in_frame;
2237 unsigned long flags;
2238 int success;
2239
2240 if (xen_feature(XENFEAT_auto_translated_physmap))
2241 return;
2242
2243 if (unlikely(order > MAX_CONTIG_ORDER))
2244 return;
2245
2246 memset((void *) vstart, 0, PAGE_SIZE << order);
2247
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002248 spin_lock_irqsave(&xen_reservation_lock, flags);
2249
2250 /* 1. Find start MFN of contiguous extent. */
2251 in_frame = virt_to_mfn(vstart);
2252
2253 /* 2. Zap current PTEs. */
2254 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2255
2256 /* 3. Do the exchange for non-contiguous MFNs. */
2257 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2258 0, out_frames, 0);
2259
2260 /* 4. Map new pages in place of old pages. */
2261 if (success)
2262 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2263 else
2264 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2265
2266 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2267}
2268EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2269
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002270#ifdef CONFIG_XEN_PVHVM
Stefano Stabellini59151002010-06-17 14:22:52 +01002271static void xen_hvm_exit_mmap(struct mm_struct *mm)
2272{
2273 struct xen_hvm_pagetable_dying a;
2274 int rc;
2275
2276 a.domid = DOMID_SELF;
2277 a.gpa = __pa(mm->pgd);
2278 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2279 WARN_ON_ONCE(rc < 0);
2280}
2281
2282static int is_pagetable_dying_supported(void)
2283{
2284 struct xen_hvm_pagetable_dying a;
2285 int rc = 0;
2286
2287 a.domid = DOMID_SELF;
2288 a.gpa = 0x00;
2289 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2290 if (rc < 0) {
2291 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2292 return 0;
2293 }
2294 return 1;
2295}
2296
2297void __init xen_hvm_init_mmu_ops(void)
2298{
2299 if (is_pagetable_dying_supported())
2300 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2301}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002302#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002303
Ian Campbellde1ef202009-05-21 10:09:46 +01002304#define REMAP_BATCH_SIZE 16
2305
2306struct remap_data {
2307 unsigned long mfn;
2308 pgprot_t prot;
2309 struct mmu_update *mmu_update;
2310};
2311
2312static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2313 unsigned long addr, void *data)
2314{
2315 struct remap_data *rmd = data;
2316 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2317
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -08002318 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002319 rmd->mmu_update->val = pte_val_ma(pte);
2320 rmd->mmu_update++;
2321
2322 return 0;
2323}
2324
2325int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2326 unsigned long addr,
2327 unsigned long mfn, int nr,
2328 pgprot_t prot, unsigned domid)
2329{
2330 struct remap_data rmd;
2331 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2332 int batch;
2333 unsigned long range;
2334 int err = 0;
2335
Stefano Stabellini1a1d4332012-08-22 17:20:16 +01002336 if (xen_feature(XENFEAT_auto_translated_physmap))
2337 return -EINVAL;
2338
Ian Campbellde1ef202009-05-21 10:09:46 +01002339 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2340
Stefano Stabellinie060e7af2010-11-11 12:37:43 -08002341 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2342 (VM_PFNMAP | VM_RESERVED | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002343
2344 rmd.mfn = mfn;
2345 rmd.prot = prot;
2346
2347 while (nr) {
2348 batch = min(REMAP_BATCH_SIZE, nr);
2349 range = (unsigned long)batch << PAGE_SHIFT;
2350
2351 rmd.mmu_update = mmu_update;
2352 err = apply_to_page_range(vma->vm_mm, addr, range,
2353 remap_area_mfn_pte_fn, &rmd);
2354 if (err)
2355 goto out;
2356
David Vrabel69870a82012-08-30 13:58:11 +01002357 err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
2358 if (err < 0)
Ian Campbellde1ef202009-05-21 10:09:46 +01002359 goto out;
2360
2361 nr -= batch;
2362 addr += range;
2363 }
2364
2365 err = 0;
2366out:
2367
2368 flush_tlb_all();
2369
2370 return err;
2371}
2372EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);