blob: 95c1cf60c6694a0ff569e47012b9b30f2a73c97a [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050049#include <linux/seq_file.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070050
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -080051#include <trace/events/xen.h>
52
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070053#include <asm/pgtable.h>
54#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070055#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070056#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080057#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070058#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050059#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070060#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080061#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070062#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070063#include <asm/pat.h>
Andrew Jones900cba82009-12-18 10:31:31 +010064#include <asm/smp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070065
66#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070067#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070068
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080069#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070070#include <xen/page.h>
71#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010072#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080073#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080074#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080075#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070076
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070077#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070078#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070079#include "debugfs.h"
80
Alex Nixon19001c82009-02-09 12:05:46 -080081/*
82 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010083 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080084 */
85DEFINE_SPINLOCK(xen_reservation_lock);
86
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080087/*
88 * Identity map, in addition to plain kernel map. This needs to be
89 * large enough to allocate page table pages to allocate the rest.
90 * Each page can map 2MB.
91 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -070092#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
93static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080094
95#ifdef CONFIG_X86_64
96/* l3 pud for userspace vsyscall mapping */
97static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
98#endif /* CONFIG_X86_64 */
99
100/*
101 * Note about cr3 (pagetable base) values:
102 *
103 * xen_cr3 contains the current logical cr3 value; it contains the
104 * last set cr3. This may not be the current effective cr3, because
105 * its update may be being lazily deferred. However, a vcpu looking
106 * at its own cr3 can use this value knowing that it everything will
107 * be self-consistent.
108 *
109 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
110 * hypercall to set the vcpu cr3 is complete (so it may be a little
111 * out of date, but it will never be set early). If one vcpu is
112 * looking at another vcpu's cr3 value, it should use this variable.
113 */
114DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
115DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
116
117
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700118/*
119 * Just beyond the highest usermode address. STACK_TOP_MAX has a
120 * redzone above it, so round it up to a PGD boundary.
121 */
122#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
123
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800124unsigned long arbitrary_virt_to_mfn(void *vaddr)
125{
126 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
127
128 return PFN_DOWN(maddr.maddr);
129}
130
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700131xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700132{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700133 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100134 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700135 pte_t *pte;
136 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700137
Chris Lalancette9f32d212008-10-23 17:40:25 -0700138 /*
139 * if the PFN is in the linear mapped vaddr range, we can just use
140 * the (quick) virt_to_machine() p2m lookup
141 */
142 if (virt_addr_valid(vaddr))
143 return virt_to_machine(vaddr);
144
145 /* otherwise we have to do a (slower) full page-table walk */
146
147 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700148 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700149 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700150 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700151}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100152EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700153
154void make_lowmem_page_readonly(void *vaddr)
155{
156 pte_t *pte, ptev;
157 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100158 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700159
Ingo Molnarf0646e42008-01-30 13:33:43 +0100160 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700161 if (pte == NULL)
162 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700163
164 ptev = pte_wrprotect(*pte);
165
166 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
167 BUG();
168}
169
170void make_lowmem_page_readwrite(void *vaddr)
171{
172 pte_t *pte, ptev;
173 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100174 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700175
Ingo Molnarf0646e42008-01-30 13:33:43 +0100176 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700177 if (pte == NULL)
178 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700179
180 ptev = pte_mkwrite(*pte);
181
182 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
183 BUG();
184}
185
186
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700187static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100188{
189 struct page *page = virt_to_page(ptr);
190
191 return PagePinned(page);
192}
193
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800194void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800195{
196 struct multicall_space mcs;
197 struct mmu_update *u;
198
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800199 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
200
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800201 mcs = xen_mc_entry(sizeof(*u));
202 u = mcs.args;
203
204 /* ptep might be kmapped when using 32-bit HIGHPTE */
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800205 u->ptr = virt_to_machine(ptep).maddr;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800206 u->val = pte_val_ma(pteval);
207
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800208 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800209
210 xen_mc_issue(PARAVIRT_LAZY_MMU);
211}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800212EXPORT_SYMBOL_GPL(xen_set_domain_pte);
213
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700214static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700215{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700216 struct multicall_space mcs;
217 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700218
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700219 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
220
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700221 if (mcs.mc != NULL) {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700222 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700223 } else {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700224 mcs = __xen_mc_entry(sizeof(*u));
225 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
226 }
227
228 u = mcs.args;
229 *u = *update;
230}
231
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800232static void xen_extend_mmuext_op(const struct mmuext_op *op)
233{
234 struct multicall_space mcs;
235 struct mmuext_op *u;
236
237 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
238
239 if (mcs.mc != NULL) {
240 mcs.mc->args[1]++;
241 } else {
242 mcs = __xen_mc_entry(sizeof(*u));
243 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
244 }
245
246 u = mcs.args;
247 *u = *op;
248}
249
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800250static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700251{
252 struct mmu_update u;
253
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700254 preempt_disable();
255
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700256 xen_mc_batch();
257
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700258 /* ptr may be ioremapped for 64-bit pagetable setup */
259 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700260 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700261 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700262
263 xen_mc_issue(PARAVIRT_LAZY_MMU);
264
265 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700266}
267
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800268static void xen_set_pmd(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100269{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800270 trace_xen_mmu_set_pmd(ptr, val);
271
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100272 /* If page is not pinned, we can just update the entry
273 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700274 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100275 *ptr = val;
276 return;
277 }
278
279 xen_set_pmd_hyper(ptr, val);
280}
281
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700282/*
283 * Associate a virtual page frame with a given physical page frame
284 * and protection flags for that frame.
285 */
286void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
287{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700288 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700289}
290
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800291static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
292{
293 struct mmu_update u;
294
295 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
296 return false;
297
298 xen_mc_batch();
299
300 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
301 u.val = pte_val_ma(pteval);
302 xen_extend_mmu_update(&u);
303
304 xen_mc_issue(PARAVIRT_LAZY_MMU);
305
306 return true;
307}
308
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800309static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800310{
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800311 if (!xen_batched_set_pte(ptep, pteval))
312 native_set_pte(ptep, pteval);
313}
314
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800315static void xen_set_pte(pte_t *ptep, pte_t pteval)
316{
317 trace_xen_mmu_set_pte(ptep, pteval);
318 __xen_set_pte(ptep, pteval);
319}
320
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800321static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700322 pte_t *ptep, pte_t pteval)
323{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800324 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
325 __xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700326}
327
Tejf63c2f22008-12-16 11:56:06 -0800328pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
329 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700330{
331 /* Just return the pte as-is. We preserve the bits on commit */
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800332 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700333 return *ptep;
334}
335
336void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
337 pte_t *ptep, pte_t pte)
338{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700339 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700340
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800341 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700342 xen_mc_batch();
343
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800344 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700345 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700346 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700347
348 xen_mc_issue(PARAVIRT_LAZY_MMU);
349}
350
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700351/* Assume pteval_t is equivalent to all the other *val_t types. */
352static pteval_t pte_mfn_to_pfn(pteval_t val)
353{
354 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700355 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700356 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700357 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700358 }
359
360 return val;
361}
362
363static pteval_t pte_pfn_to_mfn(pteval_t val)
364{
365 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700366 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700367 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500368 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700369
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500370 if (!xen_feature(XENFEAT_auto_translated_physmap))
371 mfn = get_phys_to_machine(pfn);
372 else
373 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700374 /*
375 * If there's no mfn for the pfn, then just create an
376 * empty non-present pte. Unfortunately this loses
377 * information about the original pfn, so
378 * pte_mfn_to_pfn is asymmetric.
379 */
380 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
381 mfn = 0;
382 flags = 0;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500383 } else {
384 /*
385 * Paramount to do this test _after_ the
386 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
387 * IDENTITY_FRAME_BIT resolves to true.
388 */
389 mfn &= ~FOREIGN_FRAME_BIT;
390 if (mfn & IDENTITY_FRAME_BIT) {
391 mfn &= ~IDENTITY_FRAME_BIT;
392 flags |= _PAGE_IOMAP;
393 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700394 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700395 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700396 }
397
398 return val;
399}
400
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800401static pteval_t iomap_pte(pteval_t val)
402{
403 if (val & _PAGE_PRESENT) {
404 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
405 pteval_t flags = val & PTE_FLAGS_MASK;
406
407 /* We assume the pte frame number is a MFN, so
408 just use it as-is. */
409 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
410 }
411
412 return val;
413}
414
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800415static pteval_t xen_pte_val(pte_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700416{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700417 pteval_t pteval = pte.pte;
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500418#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700419 /* If this is a WC pte, convert back from Xen WC to Linux WC */
420 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
421 WARN_ON(!pat_enabled);
422 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
423 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500424#endif
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700425 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
426 return pteval;
427
428 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700429}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800430PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700431
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800432static pgdval_t xen_pgd_val(pgd_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700433{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700434 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700435}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800436PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700437
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700438/*
439 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
440 * are reserved for now, to correspond to the Intel-reserved PAT
441 * types.
442 *
443 * We expect Linux's PAT set as follows:
444 *
445 * Idx PTE flags Linux Xen Default
446 * 0 WB WB WB
447 * 1 PWT WC WT WT
448 * 2 PCD UC- UC- UC-
449 * 3 PCD PWT UC UC UC
450 * 4 PAT WB WC WB
451 * 5 PAT PWT WC WP WT
452 * 6 PAT PCD UC- UC UC-
453 * 7 PAT PCD PWT UC UC UC
454 */
455
456void xen_set_pat(u64 pat)
457{
458 /* We expect Linux to use a PAT setting of
459 * UC UC- WC WB (ignoring the PAT flag) */
460 WARN_ON(pat != 0x0007010600070106ull);
461}
462
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800463static pte_t xen_make_pte(pteval_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700464{
Alex Nixon7347b402010-02-19 13:31:06 -0500465 phys_addr_t addr = (pte & PTE_PFN_MASK);
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500466#if 0
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700467 /* If Linux is trying to set a WC pte, then map to the Xen WC.
468 * If _PAGE_PAT is set, then it probably means it is really
469 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
470 * things work out OK...
471 *
472 * (We should never see kernel mappings with _PAGE_PSE set,
473 * but we could see hugetlbfs mappings, I think.).
474 */
475 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
476 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
477 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
478 }
Konrad Rzeszutek Wilk8eaffa62012-02-10 09:16:27 -0500479#endif
Alex Nixon7347b402010-02-19 13:31:06 -0500480 /*
481 * Unprivileged domains are allowed to do IOMAPpings for
482 * PCI passthrough, but not map ISA space. The ISA
483 * mappings are just dummy local mappings to keep other
484 * parts of the kernel happy.
485 */
486 if (unlikely(pte & _PAGE_IOMAP) &&
487 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800488 pte = iomap_pte(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500489 } else {
490 pte &= ~_PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800491 pte = pte_pfn_to_mfn(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500492 }
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800493
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700494 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700495}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800496PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700497
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800498static pgd_t xen_make_pgd(pgdval_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700499{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700500 pgd = pte_pfn_to_mfn(pgd);
501 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700502}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800503PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700504
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800505static pmdval_t xen_pmd_val(pmd_t pmd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700506{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700507 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700508}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800509PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100510
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800511static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700512{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700513 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700514
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700515 preempt_disable();
516
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700517 xen_mc_batch();
518
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700519 /* ptr may be ioremapped for 64-bit pagetable setup */
520 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700521 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700522 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700523
524 xen_mc_issue(PARAVIRT_LAZY_MMU);
525
526 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700527}
528
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800529static void xen_set_pud(pud_t *ptr, pud_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100530{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800531 trace_xen_mmu_set_pud(ptr, val);
532
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100533 /* If page is not pinned, we can just update the entry
534 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700535 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100536 *ptr = val;
537 return;
538 }
539
540 xen_set_pud_hyper(ptr, val);
541}
542
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700543#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800544static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700545{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800546 trace_xen_mmu_set_pte_atomic(ptep, pte);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700547 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700548}
549
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800550static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700551{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800552 trace_xen_mmu_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800553 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
554 native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700555}
556
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800557static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700558{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800559 trace_xen_mmu_pmd_clear(pmdp);
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100560 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700561}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700562#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700563
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800564static pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700565{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700566 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700567 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700568}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800569PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700570
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700571#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800572static pudval_t xen_pud_val(pud_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700573{
574 return pte_mfn_to_pfn(pud.pud);
575}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800576PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700577
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800578static pud_t xen_make_pud(pudval_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700579{
580 pud = pte_pfn_to_mfn(pud);
581
582 return native_make_pud(pud);
583}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800584PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700585
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800586static pgd_t *xen_get_user_pgd(pgd_t *pgd)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700587{
588 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
589 unsigned offset = pgd - pgd_page;
590 pgd_t *user_ptr = NULL;
591
592 if (offset < pgd_index(USER_LIMIT)) {
593 struct page *page = virt_to_page(pgd_page);
594 user_ptr = (pgd_t *)page->private;
595 if (user_ptr)
596 user_ptr += offset;
597 }
598
599 return user_ptr;
600}
601
602static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700603{
604 struct mmu_update u;
605
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700606 u.ptr = virt_to_machine(ptr).maddr;
607 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700608 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700609}
610
611/*
612 * Raw hypercall-based set_pgd, intended for in early boot before
613 * there's a page structure. This implies:
614 * 1. The only existing pagetable is the kernel's
615 * 2. It is always pinned
616 * 3. It has no user pagetable attached to it
617 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800618static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700619{
620 preempt_disable();
621
622 xen_mc_batch();
623
624 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700625
626 xen_mc_issue(PARAVIRT_LAZY_MMU);
627
628 preempt_enable();
629}
630
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800631static void xen_set_pgd(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700632{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700633 pgd_t *user_ptr = xen_get_user_pgd(ptr);
634
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800635 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
636
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700637 /* If page is not pinned, we can just update the entry
638 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700639 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700640 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700641 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700642 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700643 *user_ptr = val;
644 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700645 return;
646 }
647
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700648 /* If it's pinned, then we can at least batch the kernel and
649 user updates together. */
650 xen_mc_batch();
651
652 __xen_set_pgd_hyper(ptr, val);
653 if (user_ptr)
654 __xen_set_pgd_hyper(user_ptr, val);
655
656 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700657}
658#endif /* PAGETABLE_LEVELS == 4 */
659
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700660/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700661 * (Yet another) pagetable walker. This one is intended for pinning a
662 * pagetable. This means that it walks a pagetable and calls the
663 * callback function on each page it finds making up the page table,
664 * at every level. It walks the entire pagetable, but it only bothers
665 * pinning pte pages which are below limit. In the normal case this
666 * will be STACK_TOP_MAX, but at boot we need to pin up to
667 * FIXADDR_TOP.
668 *
669 * For 32-bit the important bit is that we don't pin beyond there,
670 * because then we start getting into Xen's ptes.
671 *
672 * For 64-bit, we must skip the Xen hole in the middle of the address
673 * space, just after the big x86-64 virtual hole.
674 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000675static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
676 int (*func)(struct mm_struct *mm, struct page *,
677 enum pt_level),
678 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700679{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700680 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700681 unsigned hole_low, hole_high;
682 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
683 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700684
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700685 /* The limit is the last byte to be touched */
686 limit--;
687 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700688
689 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700690 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700691
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700692 /*
693 * 64-bit has a great big hole in the middle of the address
694 * space, which contains the Xen mappings. On 32-bit these
695 * will end up making a zero-sized hole and so is a no-op.
696 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700697 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700698 hole_high = pgd_index(PAGE_OFFSET);
699
700 pgdidx_limit = pgd_index(limit);
701#if PTRS_PER_PUD > 1
702 pudidx_limit = pud_index(limit);
703#else
704 pudidx_limit = 0;
705#endif
706#if PTRS_PER_PMD > 1
707 pmdidx_limit = pmd_index(limit);
708#else
709 pmdidx_limit = 0;
710#endif
711
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700712 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700713 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700714
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700715 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700716 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700717
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700718 if (!pgd_val(pgd[pgdidx]))
719 continue;
720
721 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700722
723 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700724 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700725
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700726 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700727 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700728
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700729 if (pgdidx == pgdidx_limit &&
730 pudidx > pudidx_limit)
731 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700732
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700733 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700734 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700735
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700736 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700737
738 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700739 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700740
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700741 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
742 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700743
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700744 if (pgdidx == pgdidx_limit &&
745 pudidx == pudidx_limit &&
746 pmdidx > pmdidx_limit)
747 goto out;
748
749 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700750 continue;
751
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700752 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700753 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700754 }
755 }
756 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700757
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700758out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700759 /* Do the top level last, so that the callbacks can use it as
760 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700761 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700762
763 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700764}
765
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000766static int xen_pgd_walk(struct mm_struct *mm,
767 int (*func)(struct mm_struct *mm, struct page *,
768 enum pt_level),
769 unsigned long limit)
770{
771 return __xen_pgd_walk(mm, mm->pgd, func, limit);
772}
773
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700774/* If we're using split pte locks, then take the page's lock and
775 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700776static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700777{
778 spinlock_t *ptl = NULL;
779
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700780#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700781 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700782 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700783#endif
784
785 return ptl;
786}
787
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700788static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700789{
790 spinlock_t *ptl = v;
791 spin_unlock(ptl);
792}
793
794static void xen_do_pin(unsigned level, unsigned long pfn)
795{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800796 struct mmuext_op op;
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700797
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800798 op.cmd = level;
799 op.arg1.mfn = pfn_to_mfn(pfn);
800
801 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700802}
803
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700804static int xen_pin_page(struct mm_struct *mm, struct page *page,
805 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700806{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700807 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700808 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700809
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700810 if (pgfl)
811 flush = 0; /* already pinned */
812 else if (PageHighMem(page))
813 /* kmaps need flushing if we found an unpinned
814 highpage */
815 flush = 1;
816 else {
817 void *pt = lowmem_page_address(page);
818 unsigned long pfn = page_to_pfn(page);
819 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700820 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700821
822 flush = 0;
823
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700824 /*
825 * We need to hold the pagetable lock between the time
826 * we make the pagetable RO and when we actually pin
827 * it. If we don't, then other users may come in and
828 * attempt to update the pagetable by writing it,
829 * which will fail because the memory is RO but not
830 * pinned, so Xen won't do the trap'n'emulate.
831 *
832 * If we're using split pte locks, we can't hold the
833 * entire pagetable's worth of locks during the
834 * traverse, because we may wrap the preempt count (8
835 * bits). The solution is to mark RO and pin each PTE
836 * page while holding the lock. This means the number
837 * of locks we end up holding is never more than a
838 * batch size (~32 entries, at present).
839 *
840 * If we're not using split pte locks, we needn't pin
841 * the PTE pages independently, because we're
842 * protected by the overall pagetable lock.
843 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700844 ptl = NULL;
845 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700846 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700847
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700848 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
849 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700850 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
851
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700852 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700853 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
854
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700855 /* Queue a deferred unlock for when this batch
856 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700857 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700858 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700859 }
860
861 return flush;
862}
863
864/* This is called just after a mm has been created, but it has not
865 been used yet. We need to make sure that its pagetable is all
866 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700867static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700868{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800869 trace_xen_mmu_pgd_pin(mm, pgd);
870
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700871 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700872
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000873 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100874 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700875 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100876
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700877 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100878
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700879 xen_mc_batch();
880 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700881
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700882#ifdef CONFIG_X86_64
883 {
884 pgd_t *user_pgd = xen_get_user_pgd(pgd);
885
886 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
887
888 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700889 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800890 xen_do_pin(MMUEXT_PIN_L4_TABLE,
891 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700892 }
893 }
894#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700895#ifdef CONFIG_X86_PAE
896 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800897 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700898 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700899#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100900 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700901#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700902 xen_mc_issue(0);
903}
904
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700905static void xen_pgd_pin(struct mm_struct *mm)
906{
907 __xen_pgd_pin(mm, mm->pgd);
908}
909
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100910/*
911 * On save, we need to pin all pagetables to make sure they get their
912 * mfns turned into pfns. Search the list for any unpinned pgds and pin
913 * them (unpinned pgds are not currently in use, probably because the
914 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700915 *
916 * Expected to be called in stop_machine() ("equivalent to taking
917 * every spinlock in the system"), so the locking doesn't really
918 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100919 */
920void xen_mm_pin_all(void)
921{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100922 struct page *page;
923
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800924 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100925
926 list_for_each_entry(page, &pgd_list, lru) {
927 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700928 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100929 SetPageSavePinned(page);
930 }
931 }
932
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800933 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100934}
935
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700936/*
937 * The init_mm pagetable is really pinned as soon as its created, but
938 * that's before we have page structures to store the bits. So do all
939 * the book-keeping now.
940 */
Daniel Kiper3f5089532011-05-12 17:19:53 -0400941static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700942 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700943{
944 SetPagePinned(page);
945 return 0;
946}
947
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -0700948static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700949{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700950 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700951}
952
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700953static int xen_unpin_page(struct mm_struct *mm, struct page *page,
954 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700955{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700956 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700957
958 if (pgfl && !PageHighMem(page)) {
959 void *pt = lowmem_page_address(page);
960 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700961 spinlock_t *ptl = NULL;
962 struct multicall_space mcs;
963
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700964 /*
965 * Do the converse to pin_page. If we're using split
966 * pte locks, we must be holding the lock for while
967 * the pte page is unpinned but still RO to prevent
968 * concurrent updates from seeing it in this
969 * partially-pinned state.
970 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700971 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700972 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700973
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700974 if (ptl)
975 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700976 }
977
978 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700979
980 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
981 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700982 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
983
984 if (ptl) {
985 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700986 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700987 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700988 }
989
990 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700991}
992
993/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700994static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700995{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800996 trace_xen_mmu_pgd_unpin(mm, pgd);
997
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700998 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700999
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001000 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001001
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001002#ifdef CONFIG_X86_64
1003 {
1004 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1005
1006 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001007 xen_do_pin(MMUEXT_UNPIN_TABLE,
1008 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001009 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001010 }
1011 }
1012#endif
1013
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001014#ifdef CONFIG_X86_PAE
1015 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001016 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001017 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001018#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001019
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001020 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001021
1022 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001023}
1024
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001025static void xen_pgd_unpin(struct mm_struct *mm)
1026{
1027 __xen_pgd_unpin(mm, mm->pgd);
1028}
1029
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001030/*
1031 * On resume, undo any pinning done at save, so that the rest of the
1032 * kernel doesn't see any unexpected pinned pagetables.
1033 */
1034void xen_mm_unpin_all(void)
1035{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001036 struct page *page;
1037
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001038 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001039
1040 list_for_each_entry(page, &pgd_list, lru) {
1041 if (PageSavePinned(page)) {
1042 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001043 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001044 ClearPageSavePinned(page);
1045 }
1046 }
1047
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001048 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001049}
1050
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001051static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001052{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001053 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001054 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001055 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001056}
1057
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001058static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001059{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001060 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001061 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001062 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001063}
1064
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001065
1066#ifdef CONFIG_SMP
1067/* Another cpu may still have their %cr3 pointing at the pagetable, so
1068 we need to repoint it somewhere else before we can unpin it. */
1069static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001070{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001071 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001072 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001073
Brian Gerst9eb912d2009-01-19 00:38:57 +09001074 active_mm = percpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001075
Tian, Kevin78998912011-05-12 10:56:08 +08001076 if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001077 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001078
1079 /* If this cpu still has a stale cr3 reference, then make sure
1080 it has been flushed. */
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -08001081 if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001082 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001083}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001084
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001085static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001086{
Mike Travise4d98202008-12-16 17:34:05 -08001087 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001088 unsigned cpu;
1089
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001090 if (current->active_mm == mm) {
1091 if (current->mm == mm)
1092 load_cr3(swapper_pg_dir);
1093 else
1094 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001095 }
1096
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001097 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001098 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1099 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001100 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001101 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1102 continue;
1103 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1104 }
1105 return;
1106 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001107 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001108
1109 /* It's possible that a vcpu may have a stale reference to our
1110 cr3, because its in lazy mode, and it hasn't yet flushed
1111 its set of pending hypercalls yet. In this case, we can
1112 look at its actual current cr3 value, and force it to flush
1113 if needed. */
1114 for_each_online_cpu(cpu) {
1115 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001116 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001117 }
1118
Mike Travise4d98202008-12-16 17:34:05 -08001119 if (!cpumask_empty(mask))
1120 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1121 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001122}
1123#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001124static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001125{
1126 if (current->active_mm == mm)
1127 load_cr3(swapper_pg_dir);
1128}
1129#endif
1130
1131/*
1132 * While a process runs, Xen pins its pagetables, which means that the
1133 * hypervisor forces it to be read-only, and it controls all updates
1134 * to it. This means that all pagetable updates have to go via the
1135 * hypervisor, which is moderately expensive.
1136 *
1137 * Since we're pulling the pagetable down, we switch to use init_mm,
1138 * unpin old process pagetable and mark it all read-write, which
1139 * allows further operations on it to be simple memory accesses.
1140 *
1141 * The only subtle point is that another CPU may be still using the
1142 * pagetable because of lazy tlb flushing. This means we need need to
1143 * switch all CPUs off this pagetable before we can unpin it.
1144 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001145static void xen_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001146{
1147 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001148 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001149 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001150
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001151 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001152
1153 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001154 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001155 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001156
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001157 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001158}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001159
Daniel Kiper3f5089532011-05-12 17:19:53 -04001160static void __init xen_pagetable_setup_start(pgd_t *base)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001161{
1162}
1163
Stefano Stabellini279b7062011-04-14 15:49:41 +01001164static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1165{
1166 /* reserve the range used */
1167 native_pagetable_reserve(start, end);
1168
1169 /* set as RW the rest */
1170 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1171 PFN_PHYS(pgt_buf_top));
1172 while (end < PFN_PHYS(pgt_buf_top)) {
1173 make_lowmem_page_readwrite(__va(end));
1174 end += PAGE_SIZE;
1175 }
1176}
1177
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001178static void xen_post_allocator_init(void);
1179
Daniel Kiper3f5089532011-05-12 17:19:53 -04001180static void __init xen_pagetable_setup_done(pgd_t *base)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001181{
1182 xen_setup_shared_info();
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001183 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001184}
1185
1186static void xen_write_cr2(unsigned long cr2)
1187{
1188 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1189}
1190
1191static unsigned long xen_read_cr2(void)
1192{
1193 return percpu_read(xen_vcpu)->arch.cr2;
1194}
1195
1196unsigned long xen_read_cr2_direct(void)
1197{
1198 return percpu_read(xen_vcpu_info.arch.cr2);
1199}
1200
1201static void xen_flush_tlb(void)
1202{
1203 struct mmuext_op *op;
1204 struct multicall_space mcs;
1205
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001206 trace_xen_mmu_flush_tlb(0);
1207
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001208 preempt_disable();
1209
1210 mcs = xen_mc_entry(sizeof(*op));
1211
1212 op = mcs.args;
1213 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1214 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1215
1216 xen_mc_issue(PARAVIRT_LAZY_MMU);
1217
1218 preempt_enable();
1219}
1220
1221static void xen_flush_tlb_single(unsigned long addr)
1222{
1223 struct mmuext_op *op;
1224 struct multicall_space mcs;
1225
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001226 trace_xen_mmu_flush_tlb_single(addr);
1227
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001228 preempt_disable();
1229
1230 mcs = xen_mc_entry(sizeof(*op));
1231 op = mcs.args;
1232 op->cmd = MMUEXT_INVLPG_LOCAL;
1233 op->arg1.linear_addr = addr & PAGE_MASK;
1234 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1235
1236 xen_mc_issue(PARAVIRT_LAZY_MMU);
1237
1238 preempt_enable();
1239}
1240
1241static void xen_flush_tlb_others(const struct cpumask *cpus,
1242 struct mm_struct *mm, unsigned long va)
1243{
1244 struct {
1245 struct mmuext_op op;
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001246#ifdef CONFIG_SMP
Andrew Jones900cba82009-12-18 10:31:31 +01001247 DECLARE_BITMAP(mask, num_processors);
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001248#else
1249 DECLARE_BITMAP(mask, NR_CPUS);
1250#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001251 } *args;
1252 struct multicall_space mcs;
1253
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001254 trace_xen_mmu_flush_tlb_others(cpus, mm, va);
1255
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001256 if (cpumask_empty(cpus))
1257 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001258
1259 mcs = xen_mc_entry(sizeof(*args));
1260 args = mcs.args;
1261 args->op.arg2.vcpumask = to_cpumask(args->mask);
1262
1263 /* Remove us, and any offline CPUS. */
1264 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1265 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001266
1267 if (va == TLB_FLUSH_ALL) {
1268 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1269 } else {
1270 args->op.cmd = MMUEXT_INVLPG_MULTI;
1271 args->op.arg1.linear_addr = va;
1272 }
1273
1274 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1275
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001276 xen_mc_issue(PARAVIRT_LAZY_MMU);
1277}
1278
1279static unsigned long xen_read_cr3(void)
1280{
1281 return percpu_read(xen_cr3);
1282}
1283
1284static void set_current_cr3(void *v)
1285{
1286 percpu_write(xen_current_cr3, (unsigned long)v);
1287}
1288
1289static void __xen_write_cr3(bool kernel, unsigned long cr3)
1290{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001291 struct mmuext_op op;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001292 unsigned long mfn;
1293
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001294 trace_xen_mmu_write_cr3(kernel, cr3);
1295
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001296 if (cr3)
1297 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1298 else
1299 mfn = 0;
1300
1301 WARN_ON(mfn == 0 && kernel);
1302
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001303 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1304 op.arg1.mfn = mfn;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001305
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001306 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001307
1308 if (kernel) {
1309 percpu_write(xen_cr3, cr3);
1310
1311 /* Update xen_current_cr3 once the batch has actually
1312 been submitted. */
1313 xen_mc_callback(set_current_cr3, (void *)cr3);
1314 }
1315}
1316
1317static void xen_write_cr3(unsigned long cr3)
1318{
1319 BUG_ON(preemptible());
1320
1321 xen_mc_batch(); /* disables interrupts */
1322
1323 /* Update while interrupts are disabled, so its atomic with
1324 respect to ipis */
1325 percpu_write(xen_cr3, cr3);
1326
1327 __xen_write_cr3(true, cr3);
1328
1329#ifdef CONFIG_X86_64
1330 {
1331 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1332 if (user_pgd)
1333 __xen_write_cr3(false, __pa(user_pgd));
1334 else
1335 __xen_write_cr3(false, 0);
1336 }
1337#endif
1338
1339 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1340}
1341
1342static int xen_pgd_alloc(struct mm_struct *mm)
1343{
1344 pgd_t *pgd = mm->pgd;
1345 int ret = 0;
1346
1347 BUG_ON(PagePinned(virt_to_page(pgd)));
1348
1349#ifdef CONFIG_X86_64
1350 {
1351 struct page *page = virt_to_page(pgd);
1352 pgd_t *user_pgd;
1353
1354 BUG_ON(page->private != 0);
1355
1356 ret = -ENOMEM;
1357
1358 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1359 page->private = (unsigned long)user_pgd;
1360
1361 if (user_pgd != NULL) {
1362 user_pgd[pgd_index(VSYSCALL_START)] =
1363 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1364 ret = 0;
1365 }
1366
1367 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1368 }
1369#endif
1370
1371 return ret;
1372}
1373
1374static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1375{
1376#ifdef CONFIG_X86_64
1377 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1378
1379 if (user_pgd)
1380 free_page((unsigned long)user_pgd);
1381#endif
1382}
1383
Stefano Stabelliniee176452011-04-19 14:47:31 +01001384#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001385static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001386{
1387 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1388 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1389 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1390 pte_val_ma(pte));
Stefano Stabelliniee176452011-04-19 14:47:31 +01001391
1392 return pte;
1393}
1394#else /* CONFIG_X86_64 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001395static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Stefano Stabelliniee176452011-04-19 14:47:31 +01001396{
1397 unsigned long pfn = pte_pfn(pte);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001398
1399 /*
1400 * If the new pfn is within the range of the newly allocated
1401 * kernel pagetable, and it isn't being mapped into an
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001402 * early_ioremap fixmap slot as a freshly allocated page, make sure
1403 * it is RO.
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001404 */
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001405 if (((!is_early_ioremap_ptep(ptep) &&
Stefano Stabellinib9269dc2011-04-12 12:19:49 +01001406 pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001407 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001408 pte = pte_wrprotect(pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001409
1410 return pte;
1411}
Stefano Stabelliniee176452011-04-19 14:47:31 +01001412#endif /* CONFIG_X86_64 */
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001413
1414/* Init-time set_pte while constructing initial pagetables, which
1415 doesn't allow RO pagetable pages to be remapped RW */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001416static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001417{
1418 pte = mask_rw_pte(ptep, pte);
1419
1420 xen_set_pte(ptep, pte);
1421}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001422
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001423static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1424{
1425 struct mmuext_op op;
1426 op.cmd = cmd;
1427 op.arg1.mfn = pfn_to_mfn(pfn);
1428 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1429 BUG();
1430}
1431
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001432/* Early in boot, while setting up the initial pagetable, assume
1433 everything is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001434static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001435{
1436#ifdef CONFIG_FLATMEM
1437 BUG_ON(mem_map); /* should only be used early */
1438#endif
1439 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001440 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1441}
1442
1443/* Used for pmd and pud */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001444static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001445{
1446#ifdef CONFIG_FLATMEM
1447 BUG_ON(mem_map); /* should only be used early */
1448#endif
1449 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001450}
1451
1452/* Early release_pte assumes that all pts are pinned, since there's
1453 only init_mm and anything attached to that is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001454static void __init xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001455{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001456 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001457 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1458}
1459
Daniel Kiper3f5089532011-05-12 17:19:53 -04001460static void __init xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001461{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001462 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001463}
1464
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001465static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1466{
1467 struct multicall_space mcs;
1468 struct mmuext_op *op;
1469
1470 mcs = __xen_mc_entry(sizeof(*op));
1471 op = mcs.args;
1472 op->cmd = cmd;
1473 op->arg1.mfn = pfn_to_mfn(pfn);
1474
1475 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1476}
1477
1478static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1479{
1480 struct multicall_space mcs;
1481 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1482
1483 mcs = __xen_mc_entry(0);
1484 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1485 pfn_pte(pfn, prot), 0);
1486}
1487
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001488/* This needs to make sure the new pte page is pinned iff its being
1489 attached to a pinned pagetable. */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001490static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1491 unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001492{
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001493 bool pinned = PagePinned(virt_to_page(mm->pgd));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001494
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001495 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001496
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001497 if (pinned) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001498 struct page *page = pfn_to_page(pfn);
1499
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001500 SetPagePinned(page);
1501
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001502 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001503 xen_mc_batch();
1504
1505 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1506
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001507 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001508 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1509
1510 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001511 } else {
1512 /* make sure there are no stray mappings of
1513 this page */
1514 kmap_flush_unused();
1515 }
1516 }
1517}
1518
1519static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1520{
1521 xen_alloc_ptpage(mm, pfn, PT_PTE);
1522}
1523
1524static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1525{
1526 xen_alloc_ptpage(mm, pfn, PT_PMD);
1527}
1528
1529/* This should never happen until we're OK to use struct page */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001530static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001531{
1532 struct page *page = pfn_to_page(pfn);
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001533 bool pinned = PagePinned(page);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001534
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001535 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1536
1537 if (pinned) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001538 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001539 xen_mc_batch();
1540
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001541 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001542 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1543
1544 __set_pfn_prot(pfn, PAGE_KERNEL);
1545
1546 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001547 }
1548 ClearPagePinned(page);
1549 }
1550}
1551
1552static void xen_release_pte(unsigned long pfn)
1553{
1554 xen_release_ptpage(pfn, PT_PTE);
1555}
1556
1557static void xen_release_pmd(unsigned long pfn)
1558{
1559 xen_release_ptpage(pfn, PT_PMD);
1560}
1561
1562#if PAGETABLE_LEVELS == 4
1563static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1564{
1565 xen_alloc_ptpage(mm, pfn, PT_PUD);
1566}
1567
1568static void xen_release_pud(unsigned long pfn)
1569{
1570 xen_release_ptpage(pfn, PT_PUD);
1571}
1572#endif
1573
1574void __init xen_reserve_top(void)
1575{
1576#ifdef CONFIG_X86_32
1577 unsigned long top = HYPERVISOR_VIRT_START;
1578 struct xen_platform_parameters pp;
1579
1580 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1581 top = pp.virt_start;
1582
1583 reserve_top_address(-top);
1584#endif /* CONFIG_X86_32 */
1585}
1586
1587/*
1588 * Like __va(), but returns address in the kernel mapping (which is
1589 * all we have until the physical memory mapping has been set up.
1590 */
1591static void *__ka(phys_addr_t paddr)
1592{
1593#ifdef CONFIG_X86_64
1594 return (void *)(paddr + __START_KERNEL_map);
1595#else
1596 return __va(paddr);
1597#endif
1598}
1599
1600/* Convert a machine address to physical address */
1601static unsigned long m2p(phys_addr_t maddr)
1602{
1603 phys_addr_t paddr;
1604
1605 maddr &= PTE_PFN_MASK;
1606 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1607
1608 return paddr;
1609}
1610
1611/* Convert a machine address to kernel virtual */
1612static void *m2v(phys_addr_t maddr)
1613{
1614 return __ka(m2p(maddr));
1615}
1616
Juan Quintela4ec53872010-09-02 15:45:43 +01001617/* Set the page permissions on an identity-mapped pages */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001618static void set_page_prot(void *addr, pgprot_t prot)
1619{
1620 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1621 pte_t pte = pfn_pte(pfn, prot);
1622
1623 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1624 BUG();
1625}
1626
Daniel Kiper3f5089532011-05-12 17:19:53 -04001627static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001628{
1629 unsigned pmdidx, pteidx;
1630 unsigned ident_pte;
1631 unsigned long pfn;
1632
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001633 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1634 PAGE_SIZE);
1635
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001636 ident_pte = 0;
1637 pfn = 0;
1638 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1639 pte_t *pte_page;
1640
1641 /* Reuse or allocate a page of ptes */
1642 if (pmd_present(pmd[pmdidx]))
1643 pte_page = m2v(pmd[pmdidx].pmd);
1644 else {
1645 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001646 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001647 break;
1648
1649 pte_page = &level1_ident_pgt[ident_pte];
1650 ident_pte += PTRS_PER_PTE;
1651
1652 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1653 }
1654
1655 /* Install mappings */
1656 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1657 pte_t pte;
1658
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001659#ifdef CONFIG_X86_32
1660 if (pfn > max_pfn_mapped)
1661 max_pfn_mapped = pfn;
1662#endif
1663
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001664 if (!pte_none(pte_page[pteidx]))
1665 continue;
1666
1667 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1668 pte_page[pteidx] = pte;
1669 }
1670 }
1671
1672 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1673 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1674
1675 set_page_prot(pmd, PAGE_KERNEL_RO);
1676}
1677
Ian Campbell7e775062010-09-30 12:37:26 +01001678void __init xen_setup_machphys_mapping(void)
1679{
1680 struct xen_machphys_mapping mapping;
Ian Campbell7e775062010-09-30 12:37:26 +01001681
1682 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1683 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
Jan Beulichccbcdf72011-08-16 15:07:41 +01001684 machine_to_phys_nr = mapping.max_mfn + 1;
Ian Campbell7e775062010-09-30 12:37:26 +01001685 } else {
Jan Beulichccbcdf72011-08-16 15:07:41 +01001686 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
Ian Campbell7e775062010-09-30 12:37:26 +01001687 }
Jan Beulichccbcdf72011-08-16 15:07:41 +01001688#ifdef CONFIG_X86_32
Jan Beulich61cca2f2011-09-15 08:52:40 +01001689 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1690 < machine_to_phys_mapping);
Jan Beulichccbcdf72011-08-16 15:07:41 +01001691#endif
Ian Campbell7e775062010-09-30 12:37:26 +01001692}
1693
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001694#ifdef CONFIG_X86_64
1695static void convert_pfn_mfn(void *v)
1696{
1697 pte_t *pte = v;
1698 int i;
1699
1700 /* All levels are converted the same way, so just treat them
1701 as ptes. */
1702 for (i = 0; i < PTRS_PER_PTE; i++)
1703 pte[i] = xen_make_pte(pte[i].pte);
1704}
1705
1706/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001707 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001708 *
1709 * We can construct this by grafting the Xen provided pagetable into
1710 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1711 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1712 * means that only the kernel has a physical mapping to start with -
1713 * but that's enough to get __va working. We need to fill in the rest
1714 * of the physical mapping once some sort of allocator has been set
1715 * up.
1716 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001717pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001718 unsigned long max_pfn)
1719{
1720 pud_t *l3;
1721 pmd_t *l2;
1722
Stefano Stabellini14988a42011-02-18 11:32:40 +00001723 /* max_pfn_mapped is the last pfn mapped in the initial memory
1724 * mappings. Considering that on Xen after the kernel mappings we
1725 * have the mappings of some pages that don't exist in pfn space, we
1726 * set max_pfn_mapped to the last real pfn mapped. */
1727 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1728
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001729 /* Zap identity mapping */
1730 init_level4_pgt[0] = __pgd(0);
1731
1732 /* Pre-constructed entries are in pfn, so convert to mfn */
1733 convert_pfn_mfn(init_level4_pgt);
1734 convert_pfn_mfn(level3_ident_pgt);
1735 convert_pfn_mfn(level3_kernel_pgt);
1736
1737 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1738 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1739
1740 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1741 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1742
1743 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1744 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1745 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1746
1747 /* Set up identity map */
1748 xen_map_identity_early(level2_ident_pgt, max_pfn);
1749
1750 /* Make pagetable pieces RO */
1751 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1752 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1753 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1754 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1755 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1756 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1757
1758 /* Pin down new L4 */
1759 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1760 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1761
1762 /* Unpin Xen-provided one */
1763 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1764
1765 /* Switch over */
1766 pgd = init_level4_pgt;
1767
1768 /*
1769 * At this stage there can be no user pgd, and no page
1770 * structure to attach it to, so make sure we just set kernel
1771 * pgd.
1772 */
1773 xen_mc_batch();
1774 __xen_write_cr3(true, __pa(pgd));
1775 xen_mc_issue(PARAVIRT_LAZY_CPU);
1776
Tejun Heo24aa0782011-07-12 11:16:06 +02001777 memblock_reserve(__pa(xen_start_info->pt_base),
1778 xen_start_info->nr_pt_frames * PAGE_SIZE);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001779
1780 return pgd;
1781}
1782#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001783static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1784static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1785
Daniel Kiper3f5089532011-05-12 17:19:53 -04001786static void __init xen_write_cr3_init(unsigned long cr3)
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001787{
1788 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1789
1790 BUG_ON(read_cr3() != __pa(initial_page_table));
1791 BUG_ON(cr3 != __pa(swapper_pg_dir));
1792
1793 /*
1794 * We are switching to swapper_pg_dir for the first time (from
1795 * initial_page_table) and therefore need to mark that page
1796 * read-only and then pin it.
1797 *
1798 * Xen disallows sharing of kernel PMDs for PAE
1799 * guests. Therefore we must copy the kernel PMD from
1800 * initial_page_table into a new kernel PMD to be used in
1801 * swapper_pg_dir.
1802 */
1803 swapper_kernel_pmd =
1804 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1805 memcpy(swapper_kernel_pmd, initial_kernel_pmd,
1806 sizeof(pmd_t) * PTRS_PER_PMD);
1807 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1808 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1809 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1810
1811 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1812 xen_write_cr3(cr3);
1813 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1814
1815 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1816 PFN_DOWN(__pa(initial_page_table)));
1817 set_page_prot(initial_page_table, PAGE_KERNEL);
1818 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1819
1820 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1821}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001822
Daniel Kiper3f5089532011-05-12 17:19:53 -04001823pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001824 unsigned long max_pfn)
1825{
1826 pmd_t *kernel_pmd;
1827
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001828 initial_kernel_pmd =
1829 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07001830
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001831 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1832 xen_start_info->nr_pt_frames * PAGE_SIZE +
1833 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001834
1835 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001836 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001837
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001838 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001839
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001840 memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1841 initial_page_table[KERNEL_PGD_BOUNDARY] =
1842 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001843
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001844 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1845 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001846 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1847
1848 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1849
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001850 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1851 PFN_DOWN(__pa(initial_page_table)));
1852 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001853
Tejun Heo24aa0782011-07-12 11:16:06 +02001854 memblock_reserve(__pa(xen_start_info->pt_base),
Konrad Rzeszutek Wilkdc6821e2012-01-07 21:27:38 -05001855 xen_start_info->nr_pt_frames * PAGE_SIZE);
Jeremy Fitzhardinge33df4db2009-05-07 11:56:44 -07001856
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001857 return initial_page_table;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001858}
1859#endif /* CONFIG_X86_64 */
1860
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001861static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1862
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07001863static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001864{
1865 pte_t pte;
1866
1867 phys >>= PAGE_SHIFT;
1868
1869 switch (idx) {
1870 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1871#ifdef CONFIG_X86_F00F_BUG
1872 case FIX_F00F_IDT:
1873#endif
1874#ifdef CONFIG_X86_32
1875 case FIX_WP_TEST:
1876 case FIX_VDSO:
1877# ifdef CONFIG_HIGHMEM
1878 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1879# endif
1880#else
1881 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
Andy Lutomirski5d5791a2011-08-03 09:31:52 -04001882 case VVAR_PAGE:
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001883#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08001884 case FIX_TEXT_POKE0:
1885 case FIX_TEXT_POKE1:
1886 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001887 pte = pfn_pte(phys, prot);
1888 break;
1889
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001890#ifdef CONFIG_X86_LOCAL_APIC
1891 case FIX_APIC_BASE: /* maps dummy local APIC */
1892 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1893 break;
1894#endif
1895
1896#ifdef CONFIG_X86_IO_APIC
1897 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
1898 /*
1899 * We just don't map the IO APIC - all access is via
1900 * hypercalls. Keep the address in the pte for reference.
1901 */
1902 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1903 break;
1904#endif
1905
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001906 case FIX_PARAVIRT_BOOTMAP:
1907 /* This is an MFN, but it isn't an IO mapping from the
1908 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001909 pte = mfn_pte(phys, prot);
1910 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001911
1912 default:
1913 /* By default, set_fixmap is used for hardware mappings */
1914 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
1915 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001916 }
1917
1918 __native_set_fixmap(idx, pte);
1919
1920#ifdef CONFIG_X86_64
1921 /* Replicate changes to map the vsyscall page into the user
1922 pagetable vsyscall mapping. */
Andy Lutomirski5d5791a2011-08-03 09:31:52 -04001923 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
1924 idx == VVAR_PAGE) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001925 unsigned long vaddr = __fix_to_virt(idx);
1926 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1927 }
1928#endif
1929}
1930
Daniel Kiper3f5089532011-05-12 17:19:53 -04001931void __init xen_ident_map_ISA(void)
Juan Quintela4ec53872010-09-02 15:45:43 +01001932{
1933 unsigned long pa;
1934
1935 /*
1936 * If we're dom0, then linear map the ISA machine addresses into
1937 * the kernel's address space.
1938 */
1939 if (!xen_initial_domain())
1940 return;
1941
1942 xen_raw_printk("Xen: setup ISA identity maps\n");
1943
1944 for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
1945 pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
1946
1947 if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
1948 BUG();
1949 }
1950
1951 xen_flush_tlb();
1952}
1953
Daniel Kiper3f5089532011-05-12 17:19:53 -04001954static void __init xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001955{
1956 pv_mmu_ops.set_pte = xen_set_pte;
1957 pv_mmu_ops.set_pmd = xen_set_pmd;
1958 pv_mmu_ops.set_pud = xen_set_pud;
1959#if PAGETABLE_LEVELS == 4
1960 pv_mmu_ops.set_pgd = xen_set_pgd;
1961#endif
1962
1963 /* This will work as long as patching hasn't happened yet
1964 (which it hasn't) */
1965 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1966 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1967 pv_mmu_ops.release_pte = xen_release_pte;
1968 pv_mmu_ops.release_pmd = xen_release_pmd;
1969#if PAGETABLE_LEVELS == 4
1970 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1971 pv_mmu_ops.release_pud = xen_release_pud;
1972#endif
1973
1974#ifdef CONFIG_X86_64
1975 SetPagePinned(virt_to_page(level3_user_vsyscall));
1976#endif
1977 xen_mark_init_mm_pinned();
1978}
1979
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001980static void xen_leave_lazy_mmu(void)
1981{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08001982 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001983 xen_mc_flush();
1984 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08001985 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001986}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001987
Daniel Kiper3f5089532011-05-12 17:19:53 -04001988static const struct pv_mmu_ops xen_mmu_ops __initconst = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001989 .read_cr2 = xen_read_cr2,
1990 .write_cr2 = xen_write_cr2,
1991
1992 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001993#ifdef CONFIG_X86_32
1994 .write_cr3 = xen_write_cr3_init,
1995#else
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001996 .write_cr3 = xen_write_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001997#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001998
1999 .flush_tlb_user = xen_flush_tlb,
2000 .flush_tlb_kernel = xen_flush_tlb,
2001 .flush_tlb_single = xen_flush_tlb_single,
2002 .flush_tlb_others = xen_flush_tlb_others,
2003
2004 .pte_update = paravirt_nop,
2005 .pte_update_defer = paravirt_nop,
2006
2007 .pgd_alloc = xen_pgd_alloc,
2008 .pgd_free = xen_pgd_free,
2009
2010 .alloc_pte = xen_alloc_pte_init,
2011 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002012 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002013 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002014
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002015 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002016 .set_pte_at = xen_set_pte_at,
2017 .set_pmd = xen_set_pmd_hyper,
2018
2019 .ptep_modify_prot_start = __ptep_modify_prot_start,
2020 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2021
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002022 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2023 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002024
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002025 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2026 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002027
2028#ifdef CONFIG_X86_PAE
2029 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002030 .pte_clear = xen_pte_clear,
2031 .pmd_clear = xen_pmd_clear,
2032#endif /* CONFIG_X86_PAE */
2033 .set_pud = xen_set_pud_hyper,
2034
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002035 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2036 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002037
2038#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002039 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2040 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002041 .set_pgd = xen_set_pgd_hyper,
2042
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002043 .alloc_pud = xen_alloc_pmd_init,
2044 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002045#endif /* PAGETABLE_LEVELS == 4 */
2046
2047 .activate_mm = xen_activate_mm,
2048 .dup_mmap = xen_dup_mmap,
2049 .exit_mmap = xen_exit_mmap,
2050
2051 .lazy_mode = {
2052 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002053 .leave = xen_leave_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002054 },
2055
2056 .set_fixmap = xen_set_fixmap,
2057};
2058
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002059void __init xen_init_mmu_ops(void)
2060{
Stefano Stabellini279b7062011-04-14 15:49:41 +01002061 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002062 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2063 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2064 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002065
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002066 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002067}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002068
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002069/* Protected by xen_reservation_lock. */
2070#define MAX_CONTIG_ORDER 9 /* 2MB */
2071static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2072
2073#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2074static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2075 unsigned long *in_frames,
2076 unsigned long *out_frames)
2077{
2078 int i;
2079 struct multicall_space mcs;
2080
2081 xen_mc_batch();
2082 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2083 mcs = __xen_mc_entry(0);
2084
2085 if (in_frames)
2086 in_frames[i] = virt_to_mfn(vaddr);
2087
2088 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002089 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002090
2091 if (out_frames)
2092 out_frames[i] = virt_to_pfn(vaddr);
2093 }
2094 xen_mc_issue(0);
2095}
2096
2097/*
2098 * Update the pfn-to-mfn mappings for a virtual address range, either to
2099 * point to an array of mfns, or contiguously from a single starting
2100 * mfn.
2101 */
2102static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2103 unsigned long *mfns,
2104 unsigned long first_mfn)
2105{
2106 unsigned i, limit;
2107 unsigned long mfn;
2108
2109 xen_mc_batch();
2110
2111 limit = 1u << order;
2112 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2113 struct multicall_space mcs;
2114 unsigned flags;
2115
2116 mcs = __xen_mc_entry(0);
2117 if (mfns)
2118 mfn = mfns[i];
2119 else
2120 mfn = first_mfn + i;
2121
2122 if (i < (limit - 1))
2123 flags = 0;
2124 else {
2125 if (order == 0)
2126 flags = UVMF_INVLPG | UVMF_ALL;
2127 else
2128 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2129 }
2130
2131 MULTI_update_va_mapping(mcs.mc, vaddr,
2132 mfn_pte(mfn, PAGE_KERNEL), flags);
2133
2134 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2135 }
2136
2137 xen_mc_issue(0);
2138}
2139
2140/*
2141 * Perform the hypercall to exchange a region of our pfns to point to
2142 * memory with the required contiguous alignment. Takes the pfns as
2143 * input, and populates mfns as output.
2144 *
2145 * Returns a success code indicating whether the hypervisor was able to
2146 * satisfy the request or not.
2147 */
2148static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2149 unsigned long *pfns_in,
2150 unsigned long extents_out,
2151 unsigned int order_out,
2152 unsigned long *mfns_out,
2153 unsigned int address_bits)
2154{
2155 long rc;
2156 int success;
2157
2158 struct xen_memory_exchange exchange = {
2159 .in = {
2160 .nr_extents = extents_in,
2161 .extent_order = order_in,
2162 .extent_start = pfns_in,
2163 .domid = DOMID_SELF
2164 },
2165 .out = {
2166 .nr_extents = extents_out,
2167 .extent_order = order_out,
2168 .extent_start = mfns_out,
2169 .address_bits = address_bits,
2170 .domid = DOMID_SELF
2171 }
2172 };
2173
2174 BUG_ON(extents_in << order_in != extents_out << order_out);
2175
2176 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2177 success = (exchange.nr_exchanged == extents_in);
2178
2179 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2180 BUG_ON(success && (rc != 0));
2181
2182 return success;
2183}
2184
2185int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2186 unsigned int address_bits)
2187{
2188 unsigned long *in_frames = discontig_frames, out_frame;
2189 unsigned long flags;
2190 int success;
2191
2192 /*
2193 * Currently an auto-translated guest will not perform I/O, nor will
2194 * it require PAE page directories below 4GB. Therefore any calls to
2195 * this function are redundant and can be ignored.
2196 */
2197
2198 if (xen_feature(XENFEAT_auto_translated_physmap))
2199 return 0;
2200
2201 if (unlikely(order > MAX_CONTIG_ORDER))
2202 return -ENOMEM;
2203
2204 memset((void *) vstart, 0, PAGE_SIZE << order);
2205
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002206 spin_lock_irqsave(&xen_reservation_lock, flags);
2207
2208 /* 1. Zap current PTEs, remembering MFNs. */
2209 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2210
2211 /* 2. Get a new contiguous memory extent. */
2212 out_frame = virt_to_pfn(vstart);
2213 success = xen_exchange_memory(1UL << order, 0, in_frames,
2214 1, order, &out_frame,
2215 address_bits);
2216
2217 /* 3. Map the new extent in place of old pages. */
2218 if (success)
2219 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2220 else
2221 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2222
2223 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2224
2225 return success ? 0 : -ENOMEM;
2226}
2227EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2228
2229void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2230{
2231 unsigned long *out_frames = discontig_frames, in_frame;
2232 unsigned long flags;
2233 int success;
2234
2235 if (xen_feature(XENFEAT_auto_translated_physmap))
2236 return;
2237
2238 if (unlikely(order > MAX_CONTIG_ORDER))
2239 return;
2240
2241 memset((void *) vstart, 0, PAGE_SIZE << order);
2242
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002243 spin_lock_irqsave(&xen_reservation_lock, flags);
2244
2245 /* 1. Find start MFN of contiguous extent. */
2246 in_frame = virt_to_mfn(vstart);
2247
2248 /* 2. Zap current PTEs. */
2249 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2250
2251 /* 3. Do the exchange for non-contiguous MFNs. */
2252 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2253 0, out_frames, 0);
2254
2255 /* 4. Map new pages in place of old pages. */
2256 if (success)
2257 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2258 else
2259 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2260
2261 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2262}
2263EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2264
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002265#ifdef CONFIG_XEN_PVHVM
Stefano Stabellini59151002010-06-17 14:22:52 +01002266static void xen_hvm_exit_mmap(struct mm_struct *mm)
2267{
2268 struct xen_hvm_pagetable_dying a;
2269 int rc;
2270
2271 a.domid = DOMID_SELF;
2272 a.gpa = __pa(mm->pgd);
2273 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2274 WARN_ON_ONCE(rc < 0);
2275}
2276
2277static int is_pagetable_dying_supported(void)
2278{
2279 struct xen_hvm_pagetable_dying a;
2280 int rc = 0;
2281
2282 a.domid = DOMID_SELF;
2283 a.gpa = 0x00;
2284 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2285 if (rc < 0) {
2286 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2287 return 0;
2288 }
2289 return 1;
2290}
2291
2292void __init xen_hvm_init_mmu_ops(void)
2293{
2294 if (is_pagetable_dying_supported())
2295 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2296}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002297#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002298
Ian Campbellde1ef202009-05-21 10:09:46 +01002299#define REMAP_BATCH_SIZE 16
2300
2301struct remap_data {
2302 unsigned long mfn;
2303 pgprot_t prot;
2304 struct mmu_update *mmu_update;
2305};
2306
2307static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2308 unsigned long addr, void *data)
2309{
2310 struct remap_data *rmd = data;
2311 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2312
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -08002313 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002314 rmd->mmu_update->val = pte_val_ma(pte);
2315 rmd->mmu_update++;
2316
2317 return 0;
2318}
2319
2320int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2321 unsigned long addr,
2322 unsigned long mfn, int nr,
2323 pgprot_t prot, unsigned domid)
2324{
2325 struct remap_data rmd;
2326 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2327 int batch;
2328 unsigned long range;
2329 int err = 0;
2330
2331 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2332
Stefano Stabellinie060e7a2010-11-11 12:37:43 -08002333 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2334 (VM_PFNMAP | VM_RESERVED | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002335
2336 rmd.mfn = mfn;
2337 rmd.prot = prot;
2338
2339 while (nr) {
2340 batch = min(REMAP_BATCH_SIZE, nr);
2341 range = (unsigned long)batch << PAGE_SHIFT;
2342
2343 rmd.mmu_update = mmu_update;
2344 err = apply_to_page_range(vma->vm_mm, addr, range,
2345 remap_area_mfn_pte_fn, &rmd);
2346 if (err)
2347 goto out;
2348
2349 err = -EFAULT;
2350 if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
2351 goto out;
2352
2353 nr -= batch;
2354 addr += range;
2355 }
2356
2357 err = 0;
2358out:
2359
2360 flush_tlb_all();
2361
2362 return err;
2363}
2364EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);