blob: f987bde77c490666fa27e728b509ce9d8d0ab1b6 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -050049#include <linux/seq_file.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070050
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -080051#include <trace/events/xen.h>
52
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070053#include <asm/pgtable.h>
54#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070055#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070056#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080057#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070058#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050059#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070060#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080061#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070062#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070063#include <asm/pat.h>
Andrew Jones900cba82009-12-18 10:31:31 +010064#include <asm/smp.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070065
66#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070067#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070068
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080069#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070070#include <xen/page.h>
71#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010072#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080073#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080074#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080075#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070076
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070077#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070078#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070079#include "debugfs.h"
80
Alex Nixon19001c82009-02-09 12:05:46 -080081/*
82 * Protects atomic reservation decrease/increase against concurrent increases.
Daniel Kiper06f521d2011-03-08 22:45:46 +010083 * Also protects non-atomic updates of current_pages and balloon lists.
Alex Nixon19001c82009-02-09 12:05:46 -080084 */
85DEFINE_SPINLOCK(xen_reservation_lock);
86
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080087/*
88 * Identity map, in addition to plain kernel map. This needs to be
89 * large enough to allocate page table pages to allocate the rest.
90 * Each page can map 2MB.
91 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -070092#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
93static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080094
95#ifdef CONFIG_X86_64
96/* l3 pud for userspace vsyscall mapping */
97static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
98#endif /* CONFIG_X86_64 */
99
100/*
101 * Note about cr3 (pagetable base) values:
102 *
103 * xen_cr3 contains the current logical cr3 value; it contains the
104 * last set cr3. This may not be the current effective cr3, because
105 * its update may be being lazily deferred. However, a vcpu looking
106 * at its own cr3 can use this value knowing that it everything will
107 * be self-consistent.
108 *
109 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
110 * hypercall to set the vcpu cr3 is complete (so it may be a little
111 * out of date, but it will never be set early). If one vcpu is
112 * looking at another vcpu's cr3 value, it should use this variable.
113 */
114DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
115DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
116
117
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700118/*
119 * Just beyond the highest usermode address. STACK_TOP_MAX has a
120 * redzone above it, so round it up to a PGD boundary.
121 */
122#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
123
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800124unsigned long arbitrary_virt_to_mfn(void *vaddr)
125{
126 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
127
128 return PFN_DOWN(maddr.maddr);
129}
130
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700131xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700132{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700133 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100134 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700135 pte_t *pte;
136 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700137
Chris Lalancette9f32d212008-10-23 17:40:25 -0700138 /*
139 * if the PFN is in the linear mapped vaddr range, we can just use
140 * the (quick) virt_to_machine() p2m lookup
141 */
142 if (virt_addr_valid(vaddr))
143 return virt_to_machine(vaddr);
144
145 /* otherwise we have to do a (slower) full page-table walk */
146
147 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700148 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700149 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700150 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700151}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100152EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700153
154void make_lowmem_page_readonly(void *vaddr)
155{
156 pte_t *pte, ptev;
157 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100158 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700159
Ingo Molnarf0646e42008-01-30 13:33:43 +0100160 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700161 if (pte == NULL)
162 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700163
164 ptev = pte_wrprotect(*pte);
165
166 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
167 BUG();
168}
169
170void make_lowmem_page_readwrite(void *vaddr)
171{
172 pte_t *pte, ptev;
173 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100174 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700175
Ingo Molnarf0646e42008-01-30 13:33:43 +0100176 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700177 if (pte == NULL)
178 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700179
180 ptev = pte_mkwrite(*pte);
181
182 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
183 BUG();
184}
185
186
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700187static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100188{
189 struct page *page = virt_to_page(ptr);
190
191 return PagePinned(page);
192}
193
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800194void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800195{
196 struct multicall_space mcs;
197 struct mmu_update *u;
198
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800199 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
200
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800201 mcs = xen_mc_entry(sizeof(*u));
202 u = mcs.args;
203
204 /* ptep might be kmapped when using 32-bit HIGHPTE */
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800205 u->ptr = virt_to_machine(ptep).maddr;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800206 u->val = pte_val_ma(pteval);
207
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800208 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800209
210 xen_mc_issue(PARAVIRT_LAZY_MMU);
211}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800212EXPORT_SYMBOL_GPL(xen_set_domain_pte);
213
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700214static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700215{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700216 struct multicall_space mcs;
217 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700218
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700219 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
220
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700221 if (mcs.mc != NULL) {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700222 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700223 } else {
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700224 mcs = __xen_mc_entry(sizeof(*u));
225 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
226 }
227
228 u = mcs.args;
229 *u = *update;
230}
231
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800232static void xen_extend_mmuext_op(const struct mmuext_op *op)
233{
234 struct multicall_space mcs;
235 struct mmuext_op *u;
236
237 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
238
239 if (mcs.mc != NULL) {
240 mcs.mc->args[1]++;
241 } else {
242 mcs = __xen_mc_entry(sizeof(*u));
243 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
244 }
245
246 u = mcs.args;
247 *u = *op;
248}
249
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800250static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700251{
252 struct mmu_update u;
253
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700254 preempt_disable();
255
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700256 xen_mc_batch();
257
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700258 /* ptr may be ioremapped for 64-bit pagetable setup */
259 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700260 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700261 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700262
263 xen_mc_issue(PARAVIRT_LAZY_MMU);
264
265 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700266}
267
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800268static void xen_set_pmd(pmd_t *ptr, pmd_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100269{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800270 trace_xen_mmu_set_pmd(ptr, val);
271
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100272 /* If page is not pinned, we can just update the entry
273 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700274 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100275 *ptr = val;
276 return;
277 }
278
279 xen_set_pmd_hyper(ptr, val);
280}
281
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700282/*
283 * Associate a virtual page frame with a given physical page frame
284 * and protection flags for that frame.
285 */
286void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
287{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700288 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700289}
290
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800291static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
292{
293 struct mmu_update u;
294
295 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
296 return false;
297
298 xen_mc_batch();
299
300 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
301 u.val = pte_val_ma(pteval);
302 xen_extend_mmu_update(&u);
303
304 xen_mc_issue(PARAVIRT_LAZY_MMU);
305
306 return true;
307}
308
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800309static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800310{
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800311 if (!xen_batched_set_pte(ptep, pteval))
312 native_set_pte(ptep, pteval);
313}
314
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800315static void xen_set_pte(pte_t *ptep, pte_t pteval)
316{
317 trace_xen_mmu_set_pte(ptep, pteval);
318 __xen_set_pte(ptep, pteval);
319}
320
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800321static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700322 pte_t *ptep, pte_t pteval)
323{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800324 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
325 __xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700326}
327
Tejf63c2f22008-12-16 11:56:06 -0800328pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
329 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700330{
331 /* Just return the pte as-is. We preserve the bits on commit */
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800332 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700333 return *ptep;
334}
335
336void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
337 pte_t *ptep, pte_t pte)
338{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700339 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700340
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800341 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700342 xen_mc_batch();
343
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -0800344 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700345 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700346 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700347
348 xen_mc_issue(PARAVIRT_LAZY_MMU);
349}
350
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700351/* Assume pteval_t is equivalent to all the other *val_t types. */
352static pteval_t pte_mfn_to_pfn(pteval_t val)
353{
354 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700355 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700356 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700357 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700358 }
359
360 return val;
361}
362
363static pteval_t pte_pfn_to_mfn(pteval_t val)
364{
365 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700366 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700367 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500368 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700369
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500370 if (!xen_feature(XENFEAT_auto_translated_physmap))
371 mfn = get_phys_to_machine(pfn);
372 else
373 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700374 /*
375 * If there's no mfn for the pfn, then just create an
376 * empty non-present pte. Unfortunately this loses
377 * information about the original pfn, so
378 * pte_mfn_to_pfn is asymmetric.
379 */
380 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
381 mfn = 0;
382 flags = 0;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500383 } else {
384 /*
385 * Paramount to do this test _after_ the
386 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
387 * IDENTITY_FRAME_BIT resolves to true.
388 */
389 mfn &= ~FOREIGN_FRAME_BIT;
390 if (mfn & IDENTITY_FRAME_BIT) {
391 mfn &= ~IDENTITY_FRAME_BIT;
392 flags |= _PAGE_IOMAP;
393 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700394 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700395 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700396 }
397
398 return val;
399}
400
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800401static pteval_t iomap_pte(pteval_t val)
402{
403 if (val & _PAGE_PRESENT) {
404 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
405 pteval_t flags = val & PTE_FLAGS_MASK;
406
407 /* We assume the pte frame number is a MFN, so
408 just use it as-is. */
409 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
410 }
411
412 return val;
413}
414
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800415static pteval_t xen_pte_val(pte_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700416{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700417 pteval_t pteval = pte.pte;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800418
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700419 /* If this is a WC pte, convert back from Xen WC to Linux WC */
420 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
421 WARN_ON(!pat_enabled);
422 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
423 }
424
425 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
426 return pteval;
427
428 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700429}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800430PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700431
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800432static pgdval_t xen_pgd_val(pgd_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700433{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700434 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700435}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800436PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700437
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700438/*
439 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
440 * are reserved for now, to correspond to the Intel-reserved PAT
441 * types.
442 *
443 * We expect Linux's PAT set as follows:
444 *
445 * Idx PTE flags Linux Xen Default
446 * 0 WB WB WB
447 * 1 PWT WC WT WT
448 * 2 PCD UC- UC- UC-
449 * 3 PCD PWT UC UC UC
450 * 4 PAT WB WC WB
451 * 5 PAT PWT WC WP WT
452 * 6 PAT PCD UC- UC UC-
453 * 7 PAT PCD PWT UC UC UC
454 */
455
456void xen_set_pat(u64 pat)
457{
458 /* We expect Linux to use a PAT setting of
459 * UC UC- WC WB (ignoring the PAT flag) */
460 WARN_ON(pat != 0x0007010600070106ull);
461}
462
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800463static pte_t xen_make_pte(pteval_t pte)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700464{
Alex Nixon7347b402010-02-19 13:31:06 -0500465 phys_addr_t addr = (pte & PTE_PFN_MASK);
466
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700467 /* If Linux is trying to set a WC pte, then map to the Xen WC.
468 * If _PAGE_PAT is set, then it probably means it is really
469 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
470 * things work out OK...
471 *
472 * (We should never see kernel mappings with _PAGE_PSE set,
473 * but we could see hugetlbfs mappings, I think.).
474 */
475 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
476 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
477 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
478 }
479
Alex Nixon7347b402010-02-19 13:31:06 -0500480 /*
481 * Unprivileged domains are allowed to do IOMAPpings for
482 * PCI passthrough, but not map ISA space. The ISA
483 * mappings are just dummy local mappings to keep other
484 * parts of the kernel happy.
485 */
486 if (unlikely(pte & _PAGE_IOMAP) &&
487 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800488 pte = iomap_pte(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500489 } else {
490 pte &= ~_PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800491 pte = pte_pfn_to_mfn(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500492 }
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800493
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700494 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700495}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800496PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700497
Konrad Rzeszutek Wilkfc251512010-12-23 16:25:29 -0500498#ifdef CONFIG_XEN_DEBUG
499pte_t xen_make_pte_debug(pteval_t pte)
500{
501 phys_addr_t addr = (pte & PTE_PFN_MASK);
502 phys_addr_t other_addr;
503 bool io_page = false;
504 pte_t _pte;
505
506 if (pte & _PAGE_IOMAP)
507 io_page = true;
508
509 _pte = xen_make_pte(pte);
510
511 if (!addr)
512 return _pte;
513
514 if (io_page &&
515 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
516 other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
Konrad Rzeszutek Wilkd88885d2011-04-04 14:48:20 -0400517 WARN_ONCE(addr != other_addr,
Konrad Rzeszutek Wilkfc251512010-12-23 16:25:29 -0500518 "0x%lx is using VM_IO, but it is 0x%lx!\n",
519 (unsigned long)addr, (unsigned long)other_addr);
520 } else {
521 pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
522 other_addr = (_pte.pte & PTE_PFN_MASK);
Konrad Rzeszutek Wilkd88885d2011-04-04 14:48:20 -0400523 WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set),
Konrad Rzeszutek Wilkfc251512010-12-23 16:25:29 -0500524 "0x%lx is missing VM_IO (and wasn't fixed)!\n",
525 (unsigned long)addr);
526 }
527
528 return _pte;
529}
530PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
531#endif
532
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800533static pgd_t xen_make_pgd(pgdval_t pgd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700534{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700535 pgd = pte_pfn_to_mfn(pgd);
536 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700537}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800538PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700539
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800540static pmdval_t xen_pmd_val(pmd_t pmd)
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700541{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700542 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700543}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800544PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100545
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800546static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700547{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700548 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700549
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700550 preempt_disable();
551
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700552 xen_mc_batch();
553
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700554 /* ptr may be ioremapped for 64-bit pagetable setup */
555 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700556 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700557 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700558
559 xen_mc_issue(PARAVIRT_LAZY_MMU);
560
561 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700562}
563
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800564static void xen_set_pud(pud_t *ptr, pud_t val)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100565{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800566 trace_xen_mmu_set_pud(ptr, val);
567
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100568 /* If page is not pinned, we can just update the entry
569 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700570 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100571 *ptr = val;
572 return;
573 }
574
575 xen_set_pud_hyper(ptr, val);
576}
577
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700578#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800579static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700580{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800581 trace_xen_mmu_set_pte_atomic(ptep, pte);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700582 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700583}
584
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800585static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700586{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800587 trace_xen_mmu_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge4a35c132010-12-01 15:30:41 -0800588 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
589 native_pte_clear(mm, addr, ptep);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700590}
591
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800592static void xen_pmd_clear(pmd_t *pmdp)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700593{
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800594 trace_xen_mmu_pmd_clear(pmdp);
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100595 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700596}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700597#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700598
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800599static pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700600{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700601 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700602 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700603}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800604PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700605
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700606#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800607static pudval_t xen_pud_val(pud_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700608{
609 return pte_mfn_to_pfn(pud.pud);
610}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800611PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700612
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800613static pud_t xen_make_pud(pudval_t pud)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700614{
615 pud = pte_pfn_to_mfn(pud);
616
617 return native_make_pud(pud);
618}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800619PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700620
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800621static pgd_t *xen_get_user_pgd(pgd_t *pgd)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700622{
623 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
624 unsigned offset = pgd - pgd_page;
625 pgd_t *user_ptr = NULL;
626
627 if (offset < pgd_index(USER_LIMIT)) {
628 struct page *page = virt_to_page(pgd_page);
629 user_ptr = (pgd_t *)page->private;
630 if (user_ptr)
631 user_ptr += offset;
632 }
633
634 return user_ptr;
635}
636
637static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700638{
639 struct mmu_update u;
640
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700641 u.ptr = virt_to_machine(ptr).maddr;
642 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700643 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700644}
645
646/*
647 * Raw hypercall-based set_pgd, intended for in early boot before
648 * there's a page structure. This implies:
649 * 1. The only existing pagetable is the kernel's
650 * 2. It is always pinned
651 * 3. It has no user pagetable attached to it
652 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800653static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700654{
655 preempt_disable();
656
657 xen_mc_batch();
658
659 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700660
661 xen_mc_issue(PARAVIRT_LAZY_MMU);
662
663 preempt_enable();
664}
665
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -0800666static void xen_set_pgd(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700667{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700668 pgd_t *user_ptr = xen_get_user_pgd(ptr);
669
Jeremy Fitzhardinge84708802010-12-16 17:02:35 -0800670 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
671
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700672 /* If page is not pinned, we can just update the entry
673 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700674 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700675 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700676 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700677 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700678 *user_ptr = val;
679 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700680 return;
681 }
682
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700683 /* If it's pinned, then we can at least batch the kernel and
684 user updates together. */
685 xen_mc_batch();
686
687 __xen_set_pgd_hyper(ptr, val);
688 if (user_ptr)
689 __xen_set_pgd_hyper(user_ptr, val);
690
691 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700692}
693#endif /* PAGETABLE_LEVELS == 4 */
694
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700695/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700696 * (Yet another) pagetable walker. This one is intended for pinning a
697 * pagetable. This means that it walks a pagetable and calls the
698 * callback function on each page it finds making up the page table,
699 * at every level. It walks the entire pagetable, but it only bothers
700 * pinning pte pages which are below limit. In the normal case this
701 * will be STACK_TOP_MAX, but at boot we need to pin up to
702 * FIXADDR_TOP.
703 *
704 * For 32-bit the important bit is that we don't pin beyond there,
705 * because then we start getting into Xen's ptes.
706 *
707 * For 64-bit, we must skip the Xen hole in the middle of the address
708 * space, just after the big x86-64 virtual hole.
709 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000710static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
711 int (*func)(struct mm_struct *mm, struct page *,
712 enum pt_level),
713 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700714{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700715 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700716 unsigned hole_low, hole_high;
717 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
718 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700719
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700720 /* The limit is the last byte to be touched */
721 limit--;
722 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700723
724 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700725 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700726
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700727 /*
728 * 64-bit has a great big hole in the middle of the address
729 * space, which contains the Xen mappings. On 32-bit these
730 * will end up making a zero-sized hole and so is a no-op.
731 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700732 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700733 hole_high = pgd_index(PAGE_OFFSET);
734
735 pgdidx_limit = pgd_index(limit);
736#if PTRS_PER_PUD > 1
737 pudidx_limit = pud_index(limit);
738#else
739 pudidx_limit = 0;
740#endif
741#if PTRS_PER_PMD > 1
742 pmdidx_limit = pmd_index(limit);
743#else
744 pmdidx_limit = 0;
745#endif
746
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700747 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700748 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700749
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700750 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700751 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700752
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700753 if (!pgd_val(pgd[pgdidx]))
754 continue;
755
756 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700757
758 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700759 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700760
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700761 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700762 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700763
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700764 if (pgdidx == pgdidx_limit &&
765 pudidx > pudidx_limit)
766 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700767
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700768 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700769 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700770
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700771 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700772
773 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700774 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700775
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700776 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
777 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700778
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700779 if (pgdidx == pgdidx_limit &&
780 pudidx == pudidx_limit &&
781 pmdidx > pmdidx_limit)
782 goto out;
783
784 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700785 continue;
786
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700787 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700788 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700789 }
790 }
791 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700792
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700793out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700794 /* Do the top level last, so that the callbacks can use it as
795 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700796 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700797
798 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700799}
800
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000801static int xen_pgd_walk(struct mm_struct *mm,
802 int (*func)(struct mm_struct *mm, struct page *,
803 enum pt_level),
804 unsigned long limit)
805{
806 return __xen_pgd_walk(mm, mm->pgd, func, limit);
807}
808
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700809/* If we're using split pte locks, then take the page's lock and
810 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700811static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700812{
813 spinlock_t *ptl = NULL;
814
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700815#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700816 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700817 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700818#endif
819
820 return ptl;
821}
822
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700823static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700824{
825 spinlock_t *ptl = v;
826 spin_unlock(ptl);
827}
828
829static void xen_do_pin(unsigned level, unsigned long pfn)
830{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800831 struct mmuext_op op;
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700832
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -0800833 op.cmd = level;
834 op.arg1.mfn = pfn_to_mfn(pfn);
835
836 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700837}
838
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700839static int xen_pin_page(struct mm_struct *mm, struct page *page,
840 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700841{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700842 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700843 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700844
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700845 if (pgfl)
846 flush = 0; /* already pinned */
847 else if (PageHighMem(page))
848 /* kmaps need flushing if we found an unpinned
849 highpage */
850 flush = 1;
851 else {
852 void *pt = lowmem_page_address(page);
853 unsigned long pfn = page_to_pfn(page);
854 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700855 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700856
857 flush = 0;
858
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700859 /*
860 * We need to hold the pagetable lock between the time
861 * we make the pagetable RO and when we actually pin
862 * it. If we don't, then other users may come in and
863 * attempt to update the pagetable by writing it,
864 * which will fail because the memory is RO but not
865 * pinned, so Xen won't do the trap'n'emulate.
866 *
867 * If we're using split pte locks, we can't hold the
868 * entire pagetable's worth of locks during the
869 * traverse, because we may wrap the preempt count (8
870 * bits). The solution is to mark RO and pin each PTE
871 * page while holding the lock. This means the number
872 * of locks we end up holding is never more than a
873 * batch size (~32 entries, at present).
874 *
875 * If we're not using split pte locks, we needn't pin
876 * the PTE pages independently, because we're
877 * protected by the overall pagetable lock.
878 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700879 ptl = NULL;
880 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700881 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700882
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700883 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
884 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700885 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
886
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700887 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700888 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
889
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700890 /* Queue a deferred unlock for when this batch
891 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700892 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700893 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700894 }
895
896 return flush;
897}
898
899/* This is called just after a mm has been created, but it has not
900 been used yet. We need to make sure that its pagetable is all
901 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700902static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700903{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -0800904 trace_xen_mmu_pgd_pin(mm, pgd);
905
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700906 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700907
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000908 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100909 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700910 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100911
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700912 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100913
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700914 xen_mc_batch();
915 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700916
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700917#ifdef CONFIG_X86_64
918 {
919 pgd_t *user_pgd = xen_get_user_pgd(pgd);
920
921 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
922
923 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700924 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800925 xen_do_pin(MMUEXT_PIN_L4_TABLE,
926 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700927 }
928 }
929#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700930#ifdef CONFIG_X86_PAE
931 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800932 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700933 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700934#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100935 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700936#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700937 xen_mc_issue(0);
938}
939
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700940static void xen_pgd_pin(struct mm_struct *mm)
941{
942 __xen_pgd_pin(mm, mm->pgd);
943}
944
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100945/*
946 * On save, we need to pin all pagetables to make sure they get their
947 * mfns turned into pfns. Search the list for any unpinned pgds and pin
948 * them (unpinned pgds are not currently in use, probably because the
949 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700950 *
951 * Expected to be called in stop_machine() ("equivalent to taking
952 * every spinlock in the system"), so the locking doesn't really
953 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100954 */
955void xen_mm_pin_all(void)
956{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100957 struct page *page;
958
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800959 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100960
961 list_for_each_entry(page, &pgd_list, lru) {
962 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700963 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100964 SetPageSavePinned(page);
965 }
966 }
967
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800968 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100969}
970
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700971/*
972 * The init_mm pagetable is really pinned as soon as its created, but
973 * that's before we have page structures to store the bits. So do all
974 * the book-keeping now.
975 */
Daniel Kiper3f5089532011-05-12 17:19:53 -0400976static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700977 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700978{
979 SetPagePinned(page);
980 return 0;
981}
982
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -0700983static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700984{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700985 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700986}
987
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700988static int xen_unpin_page(struct mm_struct *mm, struct page *page,
989 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700990{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700991 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700992
993 if (pgfl && !PageHighMem(page)) {
994 void *pt = lowmem_page_address(page);
995 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700996 spinlock_t *ptl = NULL;
997 struct multicall_space mcs;
998
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700999 /*
1000 * Do the converse to pin_page. If we're using split
1001 * pte locks, we must be holding the lock for while
1002 * the pte page is unpinned but still RO to prevent
1003 * concurrent updates from seeing it in this
1004 * partially-pinned state.
1005 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001006 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001007 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001008
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001009 if (ptl)
1010 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001011 }
1012
1013 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001014
1015 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1016 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001017 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1018
1019 if (ptl) {
1020 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001021 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001022 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001023 }
1024
1025 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001026}
1027
1028/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001029static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001030{
Jeremy Fitzhardinge5f94fb52010-12-17 15:31:23 -08001031 trace_xen_mmu_pgd_unpin(mm, pgd);
1032
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001033 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001034
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001035 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001036
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001037#ifdef CONFIG_X86_64
1038 {
1039 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1040
1041 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001042 xen_do_pin(MMUEXT_UNPIN_TABLE,
1043 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001044 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001045 }
1046 }
1047#endif
1048
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001049#ifdef CONFIG_X86_PAE
1050 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001051 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001052 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001053#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001054
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001055 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001056
1057 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001058}
1059
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001060static void xen_pgd_unpin(struct mm_struct *mm)
1061{
1062 __xen_pgd_unpin(mm, mm->pgd);
1063}
1064
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001065/*
1066 * On resume, undo any pinning done at save, so that the rest of the
1067 * kernel doesn't see any unexpected pinned pagetables.
1068 */
1069void xen_mm_unpin_all(void)
1070{
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001071 struct page *page;
1072
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001073 spin_lock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001074
1075 list_for_each_entry(page, &pgd_list, lru) {
1076 if (PageSavePinned(page)) {
1077 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001078 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001079 ClearPageSavePinned(page);
1080 }
1081 }
1082
Andrea Arcangelia79e53d2011-02-16 15:45:22 -08001083 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001084}
1085
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001086static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001087{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001088 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001089 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001090 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001091}
1092
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001093static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001094{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001095 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001096 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001097 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001098}
1099
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001100
1101#ifdef CONFIG_SMP
1102/* Another cpu may still have their %cr3 pointing at the pagetable, so
1103 we need to repoint it somewhere else before we can unpin it. */
1104static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001105{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001106 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001107 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001108
Brian Gerst9eb912d2009-01-19 00:38:57 +09001109 active_mm = percpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001110
Tian, Kevin78998912011-05-12 10:56:08 +08001111 if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001112 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001113
1114 /* If this cpu still has a stale cr3 reference, then make sure
1115 it has been flushed. */
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -08001116 if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001117 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001118}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001119
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001120static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001121{
Mike Travise4d98202008-12-16 17:34:05 -08001122 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001123 unsigned cpu;
1124
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001125 if (current->active_mm == mm) {
1126 if (current->mm == mm)
1127 load_cr3(swapper_pg_dir);
1128 else
1129 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001130 }
1131
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001132 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001133 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1134 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001135 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001136 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1137 continue;
1138 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1139 }
1140 return;
1141 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001142 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001143
1144 /* It's possible that a vcpu may have a stale reference to our
1145 cr3, because its in lazy mode, and it hasn't yet flushed
1146 its set of pending hypercalls yet. In this case, we can
1147 look at its actual current cr3 value, and force it to flush
1148 if needed. */
1149 for_each_online_cpu(cpu) {
1150 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001151 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001152 }
1153
Mike Travise4d98202008-12-16 17:34:05 -08001154 if (!cpumask_empty(mask))
1155 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1156 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001157}
1158#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001159static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001160{
1161 if (current->active_mm == mm)
1162 load_cr3(swapper_pg_dir);
1163}
1164#endif
1165
1166/*
1167 * While a process runs, Xen pins its pagetables, which means that the
1168 * hypervisor forces it to be read-only, and it controls all updates
1169 * to it. This means that all pagetable updates have to go via the
1170 * hypervisor, which is moderately expensive.
1171 *
1172 * Since we're pulling the pagetable down, we switch to use init_mm,
1173 * unpin old process pagetable and mark it all read-write, which
1174 * allows further operations on it to be simple memory accesses.
1175 *
1176 * The only subtle point is that another CPU may be still using the
1177 * pagetable because of lazy tlb flushing. This means we need need to
1178 * switch all CPUs off this pagetable before we can unpin it.
1179 */
Jeremy Fitzhardinge4c136292010-12-01 22:57:39 -08001180static void xen_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001181{
1182 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001183 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001184 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001185
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001186 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001187
1188 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001189 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001190 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001191
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001192 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001193}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001194
Daniel Kiper3f5089532011-05-12 17:19:53 -04001195static void __init xen_pagetable_setup_start(pgd_t *base)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001196{
1197}
1198
Stefano Stabellini279b7062011-04-14 15:49:41 +01001199static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1200{
1201 /* reserve the range used */
1202 native_pagetable_reserve(start, end);
1203
1204 /* set as RW the rest */
1205 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1206 PFN_PHYS(pgt_buf_top));
1207 while (end < PFN_PHYS(pgt_buf_top)) {
1208 make_lowmem_page_readwrite(__va(end));
1209 end += PAGE_SIZE;
1210 }
1211}
1212
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001213static void xen_post_allocator_init(void);
1214
Daniel Kiper3f5089532011-05-12 17:19:53 -04001215static void __init xen_pagetable_setup_done(pgd_t *base)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001216{
1217 xen_setup_shared_info();
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001218 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001219}
1220
1221static void xen_write_cr2(unsigned long cr2)
1222{
1223 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1224}
1225
1226static unsigned long xen_read_cr2(void)
1227{
1228 return percpu_read(xen_vcpu)->arch.cr2;
1229}
1230
1231unsigned long xen_read_cr2_direct(void)
1232{
1233 return percpu_read(xen_vcpu_info.arch.cr2);
1234}
1235
1236static void xen_flush_tlb(void)
1237{
1238 struct mmuext_op *op;
1239 struct multicall_space mcs;
1240
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001241 trace_xen_mmu_flush_tlb(0);
1242
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001243 preempt_disable();
1244
1245 mcs = xen_mc_entry(sizeof(*op));
1246
1247 op = mcs.args;
1248 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1249 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1250
1251 xen_mc_issue(PARAVIRT_LAZY_MMU);
1252
1253 preempt_enable();
1254}
1255
1256static void xen_flush_tlb_single(unsigned long addr)
1257{
1258 struct mmuext_op *op;
1259 struct multicall_space mcs;
1260
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001261 trace_xen_mmu_flush_tlb_single(addr);
1262
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001263 preempt_disable();
1264
1265 mcs = xen_mc_entry(sizeof(*op));
1266 op = mcs.args;
1267 op->cmd = MMUEXT_INVLPG_LOCAL;
1268 op->arg1.linear_addr = addr & PAGE_MASK;
1269 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1270
1271 xen_mc_issue(PARAVIRT_LAZY_MMU);
1272
1273 preempt_enable();
1274}
1275
1276static void xen_flush_tlb_others(const struct cpumask *cpus,
1277 struct mm_struct *mm, unsigned long va)
1278{
1279 struct {
1280 struct mmuext_op op;
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001281#ifdef CONFIG_SMP
Andrew Jones900cba82009-12-18 10:31:31 +01001282 DECLARE_BITMAP(mask, num_processors);
Konrad Rzeszutek Wilk32dd1192011-06-30 09:12:40 -04001283#else
1284 DECLARE_BITMAP(mask, NR_CPUS);
1285#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001286 } *args;
1287 struct multicall_space mcs;
1288
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001289 trace_xen_mmu_flush_tlb_others(cpus, mm, va);
1290
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001291 if (cpumask_empty(cpus))
1292 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001293
1294 mcs = xen_mc_entry(sizeof(*args));
1295 args = mcs.args;
1296 args->op.arg2.vcpumask = to_cpumask(args->mask);
1297
1298 /* Remove us, and any offline CPUS. */
1299 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1300 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001301
1302 if (va == TLB_FLUSH_ALL) {
1303 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1304 } else {
1305 args->op.cmd = MMUEXT_INVLPG_MULTI;
1306 args->op.arg1.linear_addr = va;
1307 }
1308
1309 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1310
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001311 xen_mc_issue(PARAVIRT_LAZY_MMU);
1312}
1313
1314static unsigned long xen_read_cr3(void)
1315{
1316 return percpu_read(xen_cr3);
1317}
1318
1319static void set_current_cr3(void *v)
1320{
1321 percpu_write(xen_current_cr3, (unsigned long)v);
1322}
1323
1324static void __xen_write_cr3(bool kernel, unsigned long cr3)
1325{
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001326 struct mmuext_op op;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001327 unsigned long mfn;
1328
Jeremy Fitzhardingec8eed172010-12-20 13:15:04 -08001329 trace_xen_mmu_write_cr3(kernel, cr3);
1330
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001331 if (cr3)
1332 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1333 else
1334 mfn = 0;
1335
1336 WARN_ON(mfn == 0 && kernel);
1337
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001338 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1339 op.arg1.mfn = mfn;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001340
Jeremy Fitzhardingedcf74352010-12-17 09:17:32 -08001341 xen_extend_mmuext_op(&op);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001342
1343 if (kernel) {
1344 percpu_write(xen_cr3, cr3);
1345
1346 /* Update xen_current_cr3 once the batch has actually
1347 been submitted. */
1348 xen_mc_callback(set_current_cr3, (void *)cr3);
1349 }
1350}
1351
1352static void xen_write_cr3(unsigned long cr3)
1353{
1354 BUG_ON(preemptible());
1355
1356 xen_mc_batch(); /* disables interrupts */
1357
1358 /* Update while interrupts are disabled, so its atomic with
1359 respect to ipis */
1360 percpu_write(xen_cr3, cr3);
1361
1362 __xen_write_cr3(true, cr3);
1363
1364#ifdef CONFIG_X86_64
1365 {
1366 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1367 if (user_pgd)
1368 __xen_write_cr3(false, __pa(user_pgd));
1369 else
1370 __xen_write_cr3(false, 0);
1371 }
1372#endif
1373
1374 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1375}
1376
1377static int xen_pgd_alloc(struct mm_struct *mm)
1378{
1379 pgd_t *pgd = mm->pgd;
1380 int ret = 0;
1381
1382 BUG_ON(PagePinned(virt_to_page(pgd)));
1383
1384#ifdef CONFIG_X86_64
1385 {
1386 struct page *page = virt_to_page(pgd);
1387 pgd_t *user_pgd;
1388
1389 BUG_ON(page->private != 0);
1390
1391 ret = -ENOMEM;
1392
1393 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1394 page->private = (unsigned long)user_pgd;
1395
1396 if (user_pgd != NULL) {
1397 user_pgd[pgd_index(VSYSCALL_START)] =
1398 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1399 ret = 0;
1400 }
1401
1402 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1403 }
1404#endif
1405
1406 return ret;
1407}
1408
1409static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1410{
1411#ifdef CONFIG_X86_64
1412 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1413
1414 if (user_pgd)
1415 free_page((unsigned long)user_pgd);
1416#endif
1417}
1418
Stefano Stabelliniee176452011-04-19 14:47:31 +01001419#ifdef CONFIG_X86_32
Daniel Kiper3f5089532011-05-12 17:19:53 -04001420static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001421{
1422 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1423 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1424 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1425 pte_val_ma(pte));
Stefano Stabelliniee176452011-04-19 14:47:31 +01001426
1427 return pte;
1428}
1429#else /* CONFIG_X86_64 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001430static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
Stefano Stabelliniee176452011-04-19 14:47:31 +01001431{
1432 unsigned long pfn = pte_pfn(pte);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001433
1434 /*
1435 * If the new pfn is within the range of the newly allocated
1436 * kernel pagetable, and it isn't being mapped into an
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001437 * early_ioremap fixmap slot as a freshly allocated page, make sure
1438 * it is RO.
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001439 */
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001440 if (((!is_early_ioremap_ptep(ptep) &&
Stefano Stabellinib9269dc2011-04-12 12:19:49 +01001441 pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
Stefano Stabellinid8aa5ec2011-03-09 14:22:05 +00001442 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001443 pte = pte_wrprotect(pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001444
1445 return pte;
1446}
Stefano Stabelliniee176452011-04-19 14:47:31 +01001447#endif /* CONFIG_X86_64 */
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001448
1449/* Init-time set_pte while constructing initial pagetables, which
1450 doesn't allow RO pagetable pages to be remapped RW */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001451static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001452{
1453 pte = mask_rw_pte(ptep, pte);
1454
1455 xen_set_pte(ptep, pte);
1456}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001457
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001458static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1459{
1460 struct mmuext_op op;
1461 op.cmd = cmd;
1462 op.arg1.mfn = pfn_to_mfn(pfn);
1463 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1464 BUG();
1465}
1466
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001467/* Early in boot, while setting up the initial pagetable, assume
1468 everything is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001469static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001470{
1471#ifdef CONFIG_FLATMEM
1472 BUG_ON(mem_map); /* should only be used early */
1473#endif
1474 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001475 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1476}
1477
1478/* Used for pmd and pud */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001479static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001480{
1481#ifdef CONFIG_FLATMEM
1482 BUG_ON(mem_map); /* should only be used early */
1483#endif
1484 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001485}
1486
1487/* Early release_pte assumes that all pts are pinned, since there's
1488 only init_mm and anything attached to that is pinned. */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001489static void __init xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001490{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001491 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001492 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1493}
1494
Daniel Kiper3f5089532011-05-12 17:19:53 -04001495static void __init xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001496{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001497 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001498}
1499
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001500static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1501{
1502 struct multicall_space mcs;
1503 struct mmuext_op *op;
1504
1505 mcs = __xen_mc_entry(sizeof(*op));
1506 op = mcs.args;
1507 op->cmd = cmd;
1508 op->arg1.mfn = pfn_to_mfn(pfn);
1509
1510 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1511}
1512
1513static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1514{
1515 struct multicall_space mcs;
1516 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1517
1518 mcs = __xen_mc_entry(0);
1519 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1520 pfn_pte(pfn, prot), 0);
1521}
1522
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001523/* This needs to make sure the new pte page is pinned iff its being
1524 attached to a pinned pagetable. */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001525static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1526 unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001527{
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001528 bool pinned = PagePinned(virt_to_page(mm->pgd));
1529
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001530 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001531
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001532 if (pinned) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001533 struct page *page = pfn_to_page(pfn);
1534
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001535 SetPagePinned(page);
1536
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001537 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001538 xen_mc_batch();
1539
1540 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1541
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001542 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001543 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1544
1545 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001546 } else {
1547 /* make sure there are no stray mappings of
1548 this page */
1549 kmap_flush_unused();
1550 }
1551 }
1552}
1553
1554static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1555{
1556 xen_alloc_ptpage(mm, pfn, PT_PTE);
1557}
1558
1559static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1560{
1561 xen_alloc_ptpage(mm, pfn, PT_PMD);
1562}
1563
1564/* This should never happen until we're OK to use struct page */
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001565static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001566{
1567 struct page *page = pfn_to_page(pfn);
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001568 bool pinned = PagePinned(page);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001569
Jeremy Fitzhardingec2ba0502010-12-17 14:21:17 -08001570 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1571
1572 if (pinned) {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001573 if (!PageHighMem(page)) {
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001574 xen_mc_batch();
1575
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001576 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
Jeremy Fitzhardingebc7fe1d2010-12-17 14:58:43 -08001577 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1578
1579 __set_pfn_prot(pfn, PAGE_KERNEL);
1580
1581 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001582 }
1583 ClearPagePinned(page);
1584 }
1585}
1586
1587static void xen_release_pte(unsigned long pfn)
1588{
1589 xen_release_ptpage(pfn, PT_PTE);
1590}
1591
1592static void xen_release_pmd(unsigned long pfn)
1593{
1594 xen_release_ptpage(pfn, PT_PMD);
1595}
1596
1597#if PAGETABLE_LEVELS == 4
1598static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1599{
1600 xen_alloc_ptpage(mm, pfn, PT_PUD);
1601}
1602
1603static void xen_release_pud(unsigned long pfn)
1604{
1605 xen_release_ptpage(pfn, PT_PUD);
1606}
1607#endif
1608
1609void __init xen_reserve_top(void)
1610{
1611#ifdef CONFIG_X86_32
1612 unsigned long top = HYPERVISOR_VIRT_START;
1613 struct xen_platform_parameters pp;
1614
1615 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1616 top = pp.virt_start;
1617
1618 reserve_top_address(-top);
1619#endif /* CONFIG_X86_32 */
1620}
1621
1622/*
1623 * Like __va(), but returns address in the kernel mapping (which is
1624 * all we have until the physical memory mapping has been set up.
1625 */
1626static void *__ka(phys_addr_t paddr)
1627{
1628#ifdef CONFIG_X86_64
1629 return (void *)(paddr + __START_KERNEL_map);
1630#else
1631 return __va(paddr);
1632#endif
1633}
1634
1635/* Convert a machine address to physical address */
1636static unsigned long m2p(phys_addr_t maddr)
1637{
1638 phys_addr_t paddr;
1639
1640 maddr &= PTE_PFN_MASK;
1641 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1642
1643 return paddr;
1644}
1645
1646/* Convert a machine address to kernel virtual */
1647static void *m2v(phys_addr_t maddr)
1648{
1649 return __ka(m2p(maddr));
1650}
1651
Juan Quintela4ec53872010-09-02 15:45:43 +01001652/* Set the page permissions on an identity-mapped pages */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001653static void set_page_prot(void *addr, pgprot_t prot)
1654{
1655 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1656 pte_t pte = pfn_pte(pfn, prot);
1657
1658 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1659 BUG();
1660}
1661
Daniel Kiper3f5089532011-05-12 17:19:53 -04001662static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001663{
1664 unsigned pmdidx, pteidx;
1665 unsigned ident_pte;
1666 unsigned long pfn;
1667
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001668 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1669 PAGE_SIZE);
1670
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001671 ident_pte = 0;
1672 pfn = 0;
1673 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1674 pte_t *pte_page;
1675
1676 /* Reuse or allocate a page of ptes */
1677 if (pmd_present(pmd[pmdidx]))
1678 pte_page = m2v(pmd[pmdidx].pmd);
1679 else {
1680 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001681 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001682 break;
1683
1684 pte_page = &level1_ident_pgt[ident_pte];
1685 ident_pte += PTRS_PER_PTE;
1686
1687 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1688 }
1689
1690 /* Install mappings */
1691 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1692 pte_t pte;
1693
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001694#ifdef CONFIG_X86_32
1695 if (pfn > max_pfn_mapped)
1696 max_pfn_mapped = pfn;
1697#endif
1698
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001699 if (!pte_none(pte_page[pteidx]))
1700 continue;
1701
1702 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1703 pte_page[pteidx] = pte;
1704 }
1705 }
1706
1707 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1708 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1709
1710 set_page_prot(pmd, PAGE_KERNEL_RO);
1711}
1712
Ian Campbell7e775062010-09-30 12:37:26 +01001713void __init xen_setup_machphys_mapping(void)
1714{
1715 struct xen_machphys_mapping mapping;
1716 unsigned long machine_to_phys_nr_ents;
1717
1718 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1719 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1720 machine_to_phys_nr_ents = mapping.max_mfn + 1;
1721 } else {
1722 machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
1723 }
1724 machine_to_phys_order = fls(machine_to_phys_nr_ents - 1);
1725}
1726
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001727#ifdef CONFIG_X86_64
1728static void convert_pfn_mfn(void *v)
1729{
1730 pte_t *pte = v;
1731 int i;
1732
1733 /* All levels are converted the same way, so just treat them
1734 as ptes. */
1735 for (i = 0; i < PTRS_PER_PTE; i++)
1736 pte[i] = xen_make_pte(pte[i].pte);
1737}
1738
1739/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001740 * Set up the initial kernel pagetable.
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001741 *
1742 * We can construct this by grafting the Xen provided pagetable into
1743 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1744 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1745 * means that only the kernel has a physical mapping to start with -
1746 * but that's enough to get __va working. We need to fill in the rest
1747 * of the physical mapping once some sort of allocator has been set
1748 * up.
1749 */
Daniel Kiper3f5089532011-05-12 17:19:53 -04001750pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001751 unsigned long max_pfn)
1752{
1753 pud_t *l3;
1754 pmd_t *l2;
1755
Stefano Stabellini14988a42011-02-18 11:32:40 +00001756 /* max_pfn_mapped is the last pfn mapped in the initial memory
1757 * mappings. Considering that on Xen after the kernel mappings we
1758 * have the mappings of some pages that don't exist in pfn space, we
1759 * set max_pfn_mapped to the last real pfn mapped. */
1760 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1761
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001762 /* Zap identity mapping */
1763 init_level4_pgt[0] = __pgd(0);
1764
1765 /* Pre-constructed entries are in pfn, so convert to mfn */
1766 convert_pfn_mfn(init_level4_pgt);
1767 convert_pfn_mfn(level3_ident_pgt);
1768 convert_pfn_mfn(level3_kernel_pgt);
1769
1770 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1771 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1772
1773 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1774 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1775
1776 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1777 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1778 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1779
1780 /* Set up identity map */
1781 xen_map_identity_early(level2_ident_pgt, max_pfn);
1782
1783 /* Make pagetable pieces RO */
1784 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1785 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1786 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1787 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1788 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1789 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1790
1791 /* Pin down new L4 */
1792 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1793 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1794
1795 /* Unpin Xen-provided one */
1796 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1797
1798 /* Switch over */
1799 pgd = init_level4_pgt;
1800
1801 /*
1802 * At this stage there can be no user pgd, and no page
1803 * structure to attach it to, so make sure we just set kernel
1804 * pgd.
1805 */
1806 xen_mc_batch();
1807 __xen_write_cr3(true, __pa(pgd));
1808 xen_mc_issue(PARAVIRT_LAZY_CPU);
1809
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07001810 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001811 __pa(xen_start_info->pt_base +
1812 xen_start_info->nr_pt_frames * PAGE_SIZE),
1813 "XEN PAGETABLES");
1814
1815 return pgd;
1816}
1817#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001818static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1819static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1820
Daniel Kiper3f5089532011-05-12 17:19:53 -04001821static void __init xen_write_cr3_init(unsigned long cr3)
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001822{
1823 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1824
1825 BUG_ON(read_cr3() != __pa(initial_page_table));
1826 BUG_ON(cr3 != __pa(swapper_pg_dir));
1827
1828 /*
1829 * We are switching to swapper_pg_dir for the first time (from
1830 * initial_page_table) and therefore need to mark that page
1831 * read-only and then pin it.
1832 *
1833 * Xen disallows sharing of kernel PMDs for PAE
1834 * guests. Therefore we must copy the kernel PMD from
1835 * initial_page_table into a new kernel PMD to be used in
1836 * swapper_pg_dir.
1837 */
1838 swapper_kernel_pmd =
1839 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1840 memcpy(swapper_kernel_pmd, initial_kernel_pmd,
1841 sizeof(pmd_t) * PTRS_PER_PMD);
1842 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1843 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1844 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1845
1846 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1847 xen_write_cr3(cr3);
1848 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1849
1850 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1851 PFN_DOWN(__pa(initial_page_table)));
1852 set_page_prot(initial_page_table, PAGE_KERNEL);
1853 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1854
1855 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1856}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001857
Daniel Kiper3f5089532011-05-12 17:19:53 -04001858pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001859 unsigned long max_pfn)
1860{
1861 pmd_t *kernel_pmd;
1862
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001863 initial_kernel_pmd =
1864 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07001865
Stefano Stabellinia91d9282011-06-03 09:51:34 +00001866 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1867 xen_start_info->nr_pt_frames * PAGE_SIZE +
1868 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001869
1870 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001871 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001872
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001873 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001874
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001875 memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1876 initial_page_table[KERNEL_PGD_BOUNDARY] =
1877 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001878
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001879 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1880 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001881 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1882
1883 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1884
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001885 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1886 PFN_DOWN(__pa(initial_page_table)));
1887 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001888
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07001889 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
Jeremy Fitzhardinge33df4db2009-05-07 11:56:44 -07001890 __pa(xen_start_info->pt_base +
1891 xen_start_info->nr_pt_frames * PAGE_SIZE),
1892 "XEN PAGETABLES");
1893
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001894 return initial_page_table;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001895}
1896#endif /* CONFIG_X86_64 */
1897
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001898static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1899
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07001900static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001901{
1902 pte_t pte;
1903
1904 phys >>= PAGE_SHIFT;
1905
1906 switch (idx) {
1907 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1908#ifdef CONFIG_X86_F00F_BUG
1909 case FIX_F00F_IDT:
1910#endif
1911#ifdef CONFIG_X86_32
1912 case FIX_WP_TEST:
1913 case FIX_VDSO:
1914# ifdef CONFIG_HIGHMEM
1915 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1916# endif
1917#else
1918 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1919#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08001920 case FIX_TEXT_POKE0:
1921 case FIX_TEXT_POKE1:
1922 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001923 pte = pfn_pte(phys, prot);
1924 break;
1925
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001926#ifdef CONFIG_X86_LOCAL_APIC
1927 case FIX_APIC_BASE: /* maps dummy local APIC */
1928 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1929 break;
1930#endif
1931
1932#ifdef CONFIG_X86_IO_APIC
1933 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
1934 /*
1935 * We just don't map the IO APIC - all access is via
1936 * hypercalls. Keep the address in the pte for reference.
1937 */
1938 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1939 break;
1940#endif
1941
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001942 case FIX_PARAVIRT_BOOTMAP:
1943 /* This is an MFN, but it isn't an IO mapping from the
1944 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001945 pte = mfn_pte(phys, prot);
1946 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001947
1948 default:
1949 /* By default, set_fixmap is used for hardware mappings */
1950 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
1951 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001952 }
1953
1954 __native_set_fixmap(idx, pte);
1955
1956#ifdef CONFIG_X86_64
1957 /* Replicate changes to map the vsyscall page into the user
1958 pagetable vsyscall mapping. */
1959 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1960 unsigned long vaddr = __fix_to_virt(idx);
1961 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1962 }
1963#endif
1964}
1965
Daniel Kiper3f5089532011-05-12 17:19:53 -04001966void __init xen_ident_map_ISA(void)
Juan Quintela4ec53872010-09-02 15:45:43 +01001967{
1968 unsigned long pa;
1969
1970 /*
1971 * If we're dom0, then linear map the ISA machine addresses into
1972 * the kernel's address space.
1973 */
1974 if (!xen_initial_domain())
1975 return;
1976
1977 xen_raw_printk("Xen: setup ISA identity maps\n");
1978
1979 for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
1980 pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
1981
1982 if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
1983 BUG();
1984 }
1985
1986 xen_flush_tlb();
1987}
1988
Daniel Kiper3f5089532011-05-12 17:19:53 -04001989static void __init xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001990{
Konrad Rzeszutek Wilkfc251512010-12-23 16:25:29 -05001991#ifdef CONFIG_XEN_DEBUG
1992 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
1993#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001994 pv_mmu_ops.set_pte = xen_set_pte;
1995 pv_mmu_ops.set_pmd = xen_set_pmd;
1996 pv_mmu_ops.set_pud = xen_set_pud;
1997#if PAGETABLE_LEVELS == 4
1998 pv_mmu_ops.set_pgd = xen_set_pgd;
1999#endif
2000
2001 /* This will work as long as patching hasn't happened yet
2002 (which it hasn't) */
2003 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2004 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2005 pv_mmu_ops.release_pte = xen_release_pte;
2006 pv_mmu_ops.release_pmd = xen_release_pmd;
2007#if PAGETABLE_LEVELS == 4
2008 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2009 pv_mmu_ops.release_pud = xen_release_pud;
2010#endif
2011
2012#ifdef CONFIG_X86_64
2013 SetPagePinned(virt_to_page(level3_user_vsyscall));
2014#endif
2015 xen_mark_init_mm_pinned();
2016}
2017
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002018static void xen_leave_lazy_mmu(void)
2019{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002020 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002021 xen_mc_flush();
2022 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08002023 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002024}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002025
Daniel Kiper3f5089532011-05-12 17:19:53 -04002026static const struct pv_mmu_ops xen_mmu_ops __initconst = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002027 .read_cr2 = xen_read_cr2,
2028 .write_cr2 = xen_write_cr2,
2029
2030 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002031#ifdef CONFIG_X86_32
2032 .write_cr3 = xen_write_cr3_init,
2033#else
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002034 .write_cr3 = xen_write_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002035#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002036
2037 .flush_tlb_user = xen_flush_tlb,
2038 .flush_tlb_kernel = xen_flush_tlb,
2039 .flush_tlb_single = xen_flush_tlb_single,
2040 .flush_tlb_others = xen_flush_tlb_others,
2041
2042 .pte_update = paravirt_nop,
2043 .pte_update_defer = paravirt_nop,
2044
2045 .pgd_alloc = xen_pgd_alloc,
2046 .pgd_free = xen_pgd_free,
2047
2048 .alloc_pte = xen_alloc_pte_init,
2049 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002050 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002051 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002052
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002053 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002054 .set_pte_at = xen_set_pte_at,
2055 .set_pmd = xen_set_pmd_hyper,
2056
2057 .ptep_modify_prot_start = __ptep_modify_prot_start,
2058 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2059
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002060 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2061 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002062
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002063 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2064 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002065
2066#ifdef CONFIG_X86_PAE
2067 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002068 .pte_clear = xen_pte_clear,
2069 .pmd_clear = xen_pmd_clear,
2070#endif /* CONFIG_X86_PAE */
2071 .set_pud = xen_set_pud_hyper,
2072
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002073 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2074 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002075
2076#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002077 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2078 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002079 .set_pgd = xen_set_pgd_hyper,
2080
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002081 .alloc_pud = xen_alloc_pmd_init,
2082 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002083#endif /* PAGETABLE_LEVELS == 4 */
2084
2085 .activate_mm = xen_activate_mm,
2086 .dup_mmap = xen_dup_mmap,
2087 .exit_mmap = xen_exit_mmap,
2088
2089 .lazy_mode = {
2090 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002091 .leave = xen_leave_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002092 },
2093
2094 .set_fixmap = xen_set_fixmap,
2095};
2096
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002097void __init xen_init_mmu_ops(void)
2098{
Stefano Stabellini279b7062011-04-14 15:49:41 +01002099 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002100 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2101 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2102 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002103
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002104 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002105}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002106
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002107/* Protected by xen_reservation_lock. */
2108#define MAX_CONTIG_ORDER 9 /* 2MB */
2109static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2110
2111#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2112static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2113 unsigned long *in_frames,
2114 unsigned long *out_frames)
2115{
2116 int i;
2117 struct multicall_space mcs;
2118
2119 xen_mc_batch();
2120 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2121 mcs = __xen_mc_entry(0);
2122
2123 if (in_frames)
2124 in_frames[i] = virt_to_mfn(vaddr);
2125
2126 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002127 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002128
2129 if (out_frames)
2130 out_frames[i] = virt_to_pfn(vaddr);
2131 }
2132 xen_mc_issue(0);
2133}
2134
2135/*
2136 * Update the pfn-to-mfn mappings for a virtual address range, either to
2137 * point to an array of mfns, or contiguously from a single starting
2138 * mfn.
2139 */
2140static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2141 unsigned long *mfns,
2142 unsigned long first_mfn)
2143{
2144 unsigned i, limit;
2145 unsigned long mfn;
2146
2147 xen_mc_batch();
2148
2149 limit = 1u << order;
2150 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2151 struct multicall_space mcs;
2152 unsigned flags;
2153
2154 mcs = __xen_mc_entry(0);
2155 if (mfns)
2156 mfn = mfns[i];
2157 else
2158 mfn = first_mfn + i;
2159
2160 if (i < (limit - 1))
2161 flags = 0;
2162 else {
2163 if (order == 0)
2164 flags = UVMF_INVLPG | UVMF_ALL;
2165 else
2166 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2167 }
2168
2169 MULTI_update_va_mapping(mcs.mc, vaddr,
2170 mfn_pte(mfn, PAGE_KERNEL), flags);
2171
2172 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2173 }
2174
2175 xen_mc_issue(0);
2176}
2177
2178/*
2179 * Perform the hypercall to exchange a region of our pfns to point to
2180 * memory with the required contiguous alignment. Takes the pfns as
2181 * input, and populates mfns as output.
2182 *
2183 * Returns a success code indicating whether the hypervisor was able to
2184 * satisfy the request or not.
2185 */
2186static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2187 unsigned long *pfns_in,
2188 unsigned long extents_out,
2189 unsigned int order_out,
2190 unsigned long *mfns_out,
2191 unsigned int address_bits)
2192{
2193 long rc;
2194 int success;
2195
2196 struct xen_memory_exchange exchange = {
2197 .in = {
2198 .nr_extents = extents_in,
2199 .extent_order = order_in,
2200 .extent_start = pfns_in,
2201 .domid = DOMID_SELF
2202 },
2203 .out = {
2204 .nr_extents = extents_out,
2205 .extent_order = order_out,
2206 .extent_start = mfns_out,
2207 .address_bits = address_bits,
2208 .domid = DOMID_SELF
2209 }
2210 };
2211
2212 BUG_ON(extents_in << order_in != extents_out << order_out);
2213
2214 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2215 success = (exchange.nr_exchanged == extents_in);
2216
2217 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2218 BUG_ON(success && (rc != 0));
2219
2220 return success;
2221}
2222
2223int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2224 unsigned int address_bits)
2225{
2226 unsigned long *in_frames = discontig_frames, out_frame;
2227 unsigned long flags;
2228 int success;
2229
2230 /*
2231 * Currently an auto-translated guest will not perform I/O, nor will
2232 * it require PAE page directories below 4GB. Therefore any calls to
2233 * this function are redundant and can be ignored.
2234 */
2235
2236 if (xen_feature(XENFEAT_auto_translated_physmap))
2237 return 0;
2238
2239 if (unlikely(order > MAX_CONTIG_ORDER))
2240 return -ENOMEM;
2241
2242 memset((void *) vstart, 0, PAGE_SIZE << order);
2243
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002244 spin_lock_irqsave(&xen_reservation_lock, flags);
2245
2246 /* 1. Zap current PTEs, remembering MFNs. */
2247 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2248
2249 /* 2. Get a new contiguous memory extent. */
2250 out_frame = virt_to_pfn(vstart);
2251 success = xen_exchange_memory(1UL << order, 0, in_frames,
2252 1, order, &out_frame,
2253 address_bits);
2254
2255 /* 3. Map the new extent in place of old pages. */
2256 if (success)
2257 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2258 else
2259 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2260
2261 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2262
2263 return success ? 0 : -ENOMEM;
2264}
2265EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2266
2267void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2268{
2269 unsigned long *out_frames = discontig_frames, in_frame;
2270 unsigned long flags;
2271 int success;
2272
2273 if (xen_feature(XENFEAT_auto_translated_physmap))
2274 return;
2275
2276 if (unlikely(order > MAX_CONTIG_ORDER))
2277 return;
2278
2279 memset((void *) vstart, 0, PAGE_SIZE << order);
2280
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002281 spin_lock_irqsave(&xen_reservation_lock, flags);
2282
2283 /* 1. Find start MFN of contiguous extent. */
2284 in_frame = virt_to_mfn(vstart);
2285
2286 /* 2. Zap current PTEs. */
2287 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2288
2289 /* 3. Do the exchange for non-contiguous MFNs. */
2290 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2291 0, out_frames, 0);
2292
2293 /* 4. Map new pages in place of old pages. */
2294 if (success)
2295 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2296 else
2297 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2298
2299 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2300}
2301EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2302
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002303#ifdef CONFIG_XEN_PVHVM
Stefano Stabellini59151002010-06-17 14:22:52 +01002304static void xen_hvm_exit_mmap(struct mm_struct *mm)
2305{
2306 struct xen_hvm_pagetable_dying a;
2307 int rc;
2308
2309 a.domid = DOMID_SELF;
2310 a.gpa = __pa(mm->pgd);
2311 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2312 WARN_ON_ONCE(rc < 0);
2313}
2314
2315static int is_pagetable_dying_supported(void)
2316{
2317 struct xen_hvm_pagetable_dying a;
2318 int rc = 0;
2319
2320 a.domid = DOMID_SELF;
2321 a.gpa = 0x00;
2322 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2323 if (rc < 0) {
2324 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2325 return 0;
2326 }
2327 return 1;
2328}
2329
2330void __init xen_hvm_init_mmu_ops(void)
2331{
2332 if (is_pagetable_dying_supported())
2333 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2334}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002335#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002336
Ian Campbellde1ef202009-05-21 10:09:46 +01002337#define REMAP_BATCH_SIZE 16
2338
2339struct remap_data {
2340 unsigned long mfn;
2341 pgprot_t prot;
2342 struct mmu_update *mmu_update;
2343};
2344
2345static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2346 unsigned long addr, void *data)
2347{
2348 struct remap_data *rmd = data;
2349 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2350
Jeremy Fitzhardinged5108312010-12-22 13:09:40 -08002351 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
Ian Campbellde1ef202009-05-21 10:09:46 +01002352 rmd->mmu_update->val = pte_val_ma(pte);
2353 rmd->mmu_update++;
2354
2355 return 0;
2356}
2357
2358int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2359 unsigned long addr,
2360 unsigned long mfn, int nr,
2361 pgprot_t prot, unsigned domid)
2362{
2363 struct remap_data rmd;
2364 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2365 int batch;
2366 unsigned long range;
2367 int err = 0;
2368
2369 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2370
Stefano Stabellinie060e7af2010-11-11 12:37:43 -08002371 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2372 (VM_PFNMAP | VM_RESERVED | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002373
2374 rmd.mfn = mfn;
2375 rmd.prot = prot;
2376
2377 while (nr) {
2378 batch = min(REMAP_BATCH_SIZE, nr);
2379 range = (unsigned long)batch << PAGE_SHIFT;
2380
2381 rmd.mmu_update = mmu_update;
2382 err = apply_to_page_range(vma->vm_mm, addr, range,
2383 remap_area_mfn_pte_fn, &rmd);
2384 if (err)
2385 goto out;
2386
2387 err = -EFAULT;
2388 if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
2389 goto out;
2390
2391 nr -= batch;
2392 addr += range;
2393 }
2394
2395 err = 0;
2396out:
2397
2398 flush_tlb_all();
2399
2400 return err;
2401}
2402EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2403
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07002404#ifdef CONFIG_XEN_DEBUG_FS
Konrad Rzeszutek Wilk2222e712010-12-22 08:57:30 -05002405static int p2m_dump_open(struct inode *inode, struct file *filp)
2406{
2407 return single_open(filp, p2m_dump_show, NULL);
2408}
2409
2410static const struct file_operations p2m_dump_fops = {
2411 .open = p2m_dump_open,
2412 .read = seq_read,
2413 .llseek = seq_lseek,
2414 .release = single_release,
2415};
Jeremy Fitzhardinge4bf0ff22011-05-20 16:34:44 -07002416#endif /* CONFIG_XEN_DEBUG_FS */