blob: 9c9e0761513968e9cfd8b38b670e3b278f4f6b52 [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070043#include <linux/debugfs.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044#include <linux/bug.h>
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -070045#include <linux/vmalloc.h>
Randy Dunlap44408ad2009-05-12 13:31:40 -070046#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/gfp.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070048#include <linux/memblock.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070049
50#include <asm/pgtable.h>
51#include <asm/tlbflush.h>
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -070052#include <asm/fixmap.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070053#include <asm/mmu_context.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080054#include <asm/setup.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070055#include <asm/paravirt.h>
Alex Nixon7347b402010-02-19 13:31:06 -050056#include <asm/e820.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070057#include <asm/linkage.h>
Alex Nixon08bbc9d2009-02-09 12:05:46 -080058#include <asm/page.h>
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -070059#include <asm/init.h>
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -070060#include <asm/pat.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070061
62#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070063#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070064
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080065#include <xen/xen.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070066#include <xen/page.h>
67#include <xen/interface/xen.h>
Stefano Stabellini59151002010-06-17 14:22:52 +010068#include <xen/interface/hvm/hvm_op.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080069#include <xen/interface/version.h>
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -080070#include <xen/interface/memory.h>
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -080071#include <xen/hvc-console.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070072
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070073#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070074#include "mmu.h"
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070075#include "debugfs.h"
76
77#define MMU_UPDATE_HISTO 30
78
Alex Nixon19001c82009-02-09 12:05:46 -080079/*
80 * Protects atomic reservation decrease/increase against concurrent increases.
81 * Also protects non-atomic updates of current_pages and driver_pages, and
82 * balloon lists.
83 */
84DEFINE_SPINLOCK(xen_reservation_lock);
85
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -070086#ifdef CONFIG_XEN_DEBUG_FS
87
88static struct {
89 u32 pgd_update;
90 u32 pgd_update_pinned;
91 u32 pgd_update_batched;
92
93 u32 pud_update;
94 u32 pud_update_pinned;
95 u32 pud_update_batched;
96
97 u32 pmd_update;
98 u32 pmd_update_pinned;
99 u32 pmd_update_batched;
100
101 u32 pte_update;
102 u32 pte_update_pinned;
103 u32 pte_update_batched;
104
105 u32 mmu_update;
106 u32 mmu_update_extended;
107 u32 mmu_update_histo[MMU_UPDATE_HISTO];
108
109 u32 prot_commit;
110 u32 prot_commit_batched;
111
112 u32 set_pte_at;
113 u32 set_pte_at_batched;
114 u32 set_pte_at_pinned;
115 u32 set_pte_at_current;
116 u32 set_pte_at_kernel;
117} mmu_stats;
118
119static u8 zero_stats;
120
121static inline void check_zero(void)
122{
123 if (unlikely(zero_stats)) {
124 memset(&mmu_stats, 0, sizeof(mmu_stats));
125 zero_stats = 0;
126 }
127}
128
129#define ADD_STATS(elem, val) \
130 do { check_zero(); mmu_stats.elem += (val); } while(0)
131
132#else /* !CONFIG_XEN_DEBUG_FS */
133
134#define ADD_STATS(elem, val) do { (void)(val); } while(0)
135
136#endif /* CONFIG_XEN_DEBUG_FS */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700137
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800138
139/*
140 * Identity map, in addition to plain kernel map. This needs to be
141 * large enough to allocate page table pages to allocate the rest.
142 * Each page can map 2MB.
143 */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -0700144#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
145static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -0800146
147#ifdef CONFIG_X86_64
148/* l3 pud for userspace vsyscall mapping */
149static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
150#endif /* CONFIG_X86_64 */
151
152/*
153 * Note about cr3 (pagetable base) values:
154 *
155 * xen_cr3 contains the current logical cr3 value; it contains the
156 * last set cr3. This may not be the current effective cr3, because
157 * its update may be being lazily deferred. However, a vcpu looking
158 * at its own cr3 can use this value knowing that it everything will
159 * be self-consistent.
160 *
161 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
162 * hypercall to set the vcpu cr3 is complete (so it may be a little
163 * out of date, but it will never be set early). If one vcpu is
164 * looking at another vcpu's cr3 value, it should use this variable.
165 */
166DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
167DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
168
169
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700170/*
171 * Just beyond the highest usermode address. STACK_TOP_MAX has a
172 * redzone above it, so round it up to a PGD boundary.
173 */
174#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
175
Jeremy Fitzhardinge9976b392009-02-27 09:19:26 -0800176unsigned long arbitrary_virt_to_mfn(void *vaddr)
177{
178 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
179
180 return PFN_DOWN(maddr.maddr);
181}
182
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700183xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700184{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700185 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100186 unsigned int level;
Chris Lalancette9f32d212008-10-23 17:40:25 -0700187 pte_t *pte;
188 unsigned offset;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700189
Chris Lalancette9f32d212008-10-23 17:40:25 -0700190 /*
191 * if the PFN is in the linear mapped vaddr range, we can just use
192 * the (quick) virt_to_machine() p2m lookup
193 */
194 if (virt_addr_valid(vaddr))
195 return virt_to_machine(vaddr);
196
197 /* otherwise we have to do a (slower) full page-table walk */
198
199 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700200 BUG_ON(pte == NULL);
Chris Lalancette9f32d212008-10-23 17:40:25 -0700201 offset = address & ~PAGE_MASK;
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700202 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700203}
Stephen Rothwellde23be52011-01-15 10:36:26 +1100204EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700205
206void make_lowmem_page_readonly(void *vaddr)
207{
208 pte_t *pte, ptev;
209 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100210 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700211
Ingo Molnarf0646e42008-01-30 13:33:43 +0100212 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700213 if (pte == NULL)
214 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700215
216 ptev = pte_wrprotect(*pte);
217
218 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
219 BUG();
220}
221
222void make_lowmem_page_readwrite(void *vaddr)
223{
224 pte_t *pte, ptev;
225 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100226 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700227
Ingo Molnarf0646e42008-01-30 13:33:43 +0100228 pte = lookup_address(address, &level);
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700229 if (pte == NULL)
230 return; /* vaddr missing */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700231
232 ptev = pte_mkwrite(*pte);
233
234 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
235 BUG();
236}
237
238
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700239static bool xen_page_pinned(void *ptr)
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100240{
241 struct page *page = virt_to_page(ptr);
242
243 return PagePinned(page);
244}
245
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800246static bool xen_iomap_pte(pte_t pte)
247{
Alex Nixon7347b402010-02-19 13:31:06 -0500248 return pte_flags(pte) & _PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800249}
250
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800251void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800252{
253 struct multicall_space mcs;
254 struct mmu_update *u;
255
256 mcs = xen_mc_entry(sizeof(*u));
257 u = mcs.args;
258
259 /* ptep might be kmapped when using 32-bit HIGHPTE */
260 u->ptr = arbitrary_virt_to_machine(ptep).maddr;
261 u->val = pte_val_ma(pteval);
262
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800263 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800264
265 xen_mc_issue(PARAVIRT_LAZY_MMU);
266}
Jeremy Fitzhardingeeba3ff82009-02-09 12:05:49 -0800267EXPORT_SYMBOL_GPL(xen_set_domain_pte);
268
269static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
270{
271 xen_set_domain_pte(ptep, pteval, DOMID_IO);
272}
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800273
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700274static void xen_extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700275{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700276 struct multicall_space mcs;
277 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700278
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700279 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
280
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700281 if (mcs.mc != NULL) {
282 ADD_STATS(mmu_update_extended, 1);
283 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
284
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700285 mcs.mc->args[1]++;
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700286
287 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
288 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
289 else
290 ADD_STATS(mmu_update_histo[0], 1);
291 } else {
292 ADD_STATS(mmu_update, 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700293 mcs = __xen_mc_entry(sizeof(*u));
294 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700295 ADD_STATS(mmu_update_histo[1], 1);
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700296 }
297
298 u = mcs.args;
299 *u = *update;
300}
301
302void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
303{
304 struct mmu_update u;
305
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700306 preempt_disable();
307
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700308 xen_mc_batch();
309
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700310 /* ptr may be ioremapped for 64-bit pagetable setup */
311 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700312 u.val = pmd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700313 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700314
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700315 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
316
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700317 xen_mc_issue(PARAVIRT_LAZY_MMU);
318
319 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700320}
321
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100322void xen_set_pmd(pmd_t *ptr, pmd_t val)
323{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700324 ADD_STATS(pmd_update, 1);
325
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100326 /* If page is not pinned, we can just update the entry
327 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700328 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100329 *ptr = val;
330 return;
331 }
332
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700333 ADD_STATS(pmd_update_pinned, 1);
334
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100335 xen_set_pmd_hyper(ptr, val);
336}
337
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700338/*
339 * Associate a virtual page frame with a given physical page frame
340 * and protection flags for that frame.
341 */
342void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
343{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700344 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700345}
346
347void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
348 pte_t *ptep, pte_t pteval)
349{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800350 if (xen_iomap_pte(pteval)) {
351 xen_set_iomap_pte(ptep, pteval);
352 goto out;
353 }
354
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700355 ADD_STATS(set_pte_at, 1);
356// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
357 ADD_STATS(set_pte_at_current, mm == current->mm);
358 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
359
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700360 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700361 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700362 struct multicall_space mcs;
363 mcs = xen_mc_entry(0);
364
365 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700366 ADD_STATS(set_pte_at_batched, 1);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700367 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700368 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700369 } else
370 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700371 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700372 }
373 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700374
Jeremy Fitzhardinge2829b442009-02-17 23:53:19 -0800375out: return;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700376}
377
Tejf63c2f22008-12-16 11:56:06 -0800378pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
379 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700380{
381 /* Just return the pte as-is. We preserve the bits on commit */
382 return *ptep;
383}
384
385void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
386 pte_t *ptep, pte_t pte)
387{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700388 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700389
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700390 xen_mc_batch();
391
Chris Lalancette9f32d212008-10-23 17:40:25 -0700392 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700393 u.val = pte_val_ma(pte);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700394 xen_extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700395
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700396 ADD_STATS(prot_commit, 1);
397 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
398
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700399 xen_mc_issue(PARAVIRT_LAZY_MMU);
400}
401
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700402/* Assume pteval_t is equivalent to all the other *val_t types. */
403static pteval_t pte_mfn_to_pfn(pteval_t val)
404{
405 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700406 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700407 pteval_t flags = val & PTE_FLAGS_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700408 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700409 }
410
411 return val;
412}
413
414static pteval_t pte_pfn_to_mfn(pteval_t val)
415{
416 if (val & _PAGE_PRESENT) {
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -0700417 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700418 pteval_t flags = val & PTE_FLAGS_MASK;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500419 unsigned long mfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700420
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500421 if (!xen_feature(XENFEAT_auto_translated_physmap))
422 mfn = get_phys_to_machine(pfn);
423 else
424 mfn = pfn;
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700425 /*
426 * If there's no mfn for the pfn, then just create an
427 * empty non-present pte. Unfortunately this loses
428 * information about the original pfn, so
429 * pte_mfn_to_pfn is asymmetric.
430 */
431 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
432 mfn = 0;
433 flags = 0;
Konrad Rzeszutek Wilkfb389232011-01-05 15:46:31 -0500434 } else {
435 /*
436 * Paramount to do this test _after_ the
437 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
438 * IDENTITY_FRAME_BIT resolves to true.
439 */
440 mfn &= ~FOREIGN_FRAME_BIT;
441 if (mfn & IDENTITY_FRAME_BIT) {
442 mfn &= ~IDENTITY_FRAME_BIT;
443 flags |= _PAGE_IOMAP;
444 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700445 }
Jeremy Fitzhardingecfd89512010-08-31 14:06:22 -0700446 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700447 }
448
449 return val;
450}
451
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800452static pteval_t iomap_pte(pteval_t val)
453{
454 if (val & _PAGE_PRESENT) {
455 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
456 pteval_t flags = val & PTE_FLAGS_MASK;
457
458 /* We assume the pte frame number is a MFN, so
459 just use it as-is. */
460 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
461 }
462
463 return val;
464}
465
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700466pteval_t xen_pte_val(pte_t pte)
467{
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700468 pteval_t pteval = pte.pte;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800469
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700470 /* If this is a WC pte, convert back from Xen WC to Linux WC */
471 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
472 WARN_ON(!pat_enabled);
473 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
474 }
475
476 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
477 return pteval;
478
479 return pte_mfn_to_pfn(pteval);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700480}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800481PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700482
483pgdval_t xen_pgd_val(pgd_t pgd)
484{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700485 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700486}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800487PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700488
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700489/*
490 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7
491 * are reserved for now, to correspond to the Intel-reserved PAT
492 * types.
493 *
494 * We expect Linux's PAT set as follows:
495 *
496 * Idx PTE flags Linux Xen Default
497 * 0 WB WB WB
498 * 1 PWT WC WT WT
499 * 2 PCD UC- UC- UC-
500 * 3 PCD PWT UC UC UC
501 * 4 PAT WB WC WB
502 * 5 PAT PWT WC WP WT
503 * 6 PAT PCD UC- UC UC-
504 * 7 PAT PCD PWT UC UC UC
505 */
506
507void xen_set_pat(u64 pat)
508{
509 /* We expect Linux to use a PAT setting of
510 * UC UC- WC WB (ignoring the PAT flag) */
511 WARN_ON(pat != 0x0007010600070106ull);
512}
513
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700514pte_t xen_make_pte(pteval_t pte)
515{
Alex Nixon7347b402010-02-19 13:31:06 -0500516 phys_addr_t addr = (pte & PTE_PFN_MASK);
517
Jeremy Fitzhardinge41f2e472010-03-30 11:47:40 -0700518 /* If Linux is trying to set a WC pte, then map to the Xen WC.
519 * If _PAGE_PAT is set, then it probably means it is really
520 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope
521 * things work out OK...
522 *
523 * (We should never see kernel mappings with _PAGE_PSE set,
524 * but we could see hugetlbfs mappings, I think.).
525 */
526 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
527 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
528 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
529 }
530
Alex Nixon7347b402010-02-19 13:31:06 -0500531 /*
532 * Unprivileged domains are allowed to do IOMAPpings for
533 * PCI passthrough, but not map ISA space. The ISA
534 * mappings are just dummy local mappings to keep other
535 * parts of the kernel happy.
536 */
537 if (unlikely(pte & _PAGE_IOMAP) &&
538 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800539 pte = iomap_pte(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500540 } else {
541 pte &= ~_PAGE_IOMAP;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800542 pte = pte_pfn_to_mfn(pte);
Alex Nixon7347b402010-02-19 13:31:06 -0500543 }
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800544
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700545 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700546}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800547PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700548
549pgd_t xen_make_pgd(pgdval_t pgd)
550{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700551 pgd = pte_pfn_to_mfn(pgd);
552 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700553}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800554PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700555
556pmdval_t xen_pmd_val(pmd_t pmd)
557{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700558 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700559}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800560PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100561
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100562void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700563{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700564 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700565
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700566 preempt_disable();
567
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700568 xen_mc_batch();
569
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700570 /* ptr may be ioremapped for 64-bit pagetable setup */
571 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700572 u.val = pud_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700573 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700574
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700575 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
576
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700577 xen_mc_issue(PARAVIRT_LAZY_MMU);
578
579 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700580}
581
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100582void xen_set_pud(pud_t *ptr, pud_t val)
583{
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700584 ADD_STATS(pud_update, 1);
585
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100586 /* If page is not pinned, we can just update the entry
587 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700588 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100589 *ptr = val;
590 return;
591 }
592
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700593 ADD_STATS(pud_update_pinned, 1);
594
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100595 xen_set_pud_hyper(ptr, val);
596}
597
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700598void xen_set_pte(pte_t *ptep, pte_t pte)
599{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800600 if (xen_iomap_pte(pte)) {
601 xen_set_iomap_pte(ptep, pte);
602 return;
603 }
604
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700605 ADD_STATS(pte_update, 1);
606// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
607 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
608
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700609#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700610 ptep->pte_high = pte.pte_high;
611 smp_wmb();
612 ptep->pte_low = pte.pte_low;
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700613#else
614 *ptep = pte;
615#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700616}
617
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700618#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700619void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
620{
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -0800621 if (xen_iomap_pte(pte)) {
622 xen_set_iomap_pte(ptep, pte);
623 return;
624 }
625
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700626 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700627}
628
629void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
630{
631 ptep->pte_low = 0;
632 smp_wmb(); /* make sure low gets written first */
633 ptep->pte_high = 0;
634}
635
636void xen_pmd_clear(pmd_t *pmdp)
637{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100638 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700639}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700640#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700641
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700642pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700643{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700644 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700645 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700646}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800647PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700648
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700649#if PAGETABLE_LEVELS == 4
650pudval_t xen_pud_val(pud_t pud)
651{
652 return pte_mfn_to_pfn(pud.pud);
653}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800654PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700655
656pud_t xen_make_pud(pudval_t pud)
657{
658 pud = pte_pfn_to_mfn(pud);
659
660 return native_make_pud(pud);
661}
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800662PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700663
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700664pgd_t *xen_get_user_pgd(pgd_t *pgd)
665{
666 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
667 unsigned offset = pgd - pgd_page;
668 pgd_t *user_ptr = NULL;
669
670 if (offset < pgd_index(USER_LIMIT)) {
671 struct page *page = virt_to_page(pgd_page);
672 user_ptr = (pgd_t *)page->private;
673 if (user_ptr)
674 user_ptr += offset;
675 }
676
677 return user_ptr;
678}
679
680static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700681{
682 struct mmu_update u;
683
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700684 u.ptr = virt_to_machine(ptr).maddr;
685 u.val = pgd_val_ma(val);
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700686 xen_extend_mmu_update(&u);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700687}
688
689/*
690 * Raw hypercall-based set_pgd, intended for in early boot before
691 * there's a page structure. This implies:
692 * 1. The only existing pagetable is the kernel's
693 * 2. It is always pinned
694 * 3. It has no user pagetable attached to it
695 */
696void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
697{
698 preempt_disable();
699
700 xen_mc_batch();
701
702 __xen_set_pgd_hyper(ptr, val);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700703
704 xen_mc_issue(PARAVIRT_LAZY_MMU);
705
706 preempt_enable();
707}
708
709void xen_set_pgd(pgd_t *ptr, pgd_t val)
710{
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700711 pgd_t *user_ptr = xen_get_user_pgd(ptr);
712
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700713 ADD_STATS(pgd_update, 1);
714
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700715 /* If page is not pinned, we can just update the entry
716 directly */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700717 if (!xen_page_pinned(ptr)) {
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700718 *ptr = val;
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700719 if (user_ptr) {
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700720 WARN_ON(xen_page_pinned(user_ptr));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700721 *user_ptr = val;
722 }
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700723 return;
724 }
725
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -0700726 ADD_STATS(pgd_update_pinned, 1);
727 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
728
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700729 /* If it's pinned, then we can at least batch the kernel and
730 user updates together. */
731 xen_mc_batch();
732
733 __xen_set_pgd_hyper(ptr, val);
734 if (user_ptr)
735 __xen_set_pgd_hyper(user_ptr, val);
736
737 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700738}
739#endif /* PAGETABLE_LEVELS == 4 */
740
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700741/*
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700742 * (Yet another) pagetable walker. This one is intended for pinning a
743 * pagetable. This means that it walks a pagetable and calls the
744 * callback function on each page it finds making up the page table,
745 * at every level. It walks the entire pagetable, but it only bothers
746 * pinning pte pages which are below limit. In the normal case this
747 * will be STACK_TOP_MAX, but at boot we need to pin up to
748 * FIXADDR_TOP.
749 *
750 * For 32-bit the important bit is that we don't pin beyond there,
751 * because then we start getting into Xen's ptes.
752 *
753 * For 64-bit, we must skip the Xen hole in the middle of the address
754 * space, just after the big x86-64 virtual hole.
755 */
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000756static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
757 int (*func)(struct mm_struct *mm, struct page *,
758 enum pt_level),
759 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700760{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700761 int flush = 0;
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700762 unsigned hole_low, hole_high;
763 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
764 unsigned pgdidx, pudidx, pmdidx;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700765
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700766 /* The limit is the last byte to be touched */
767 limit--;
768 BUG_ON(limit >= FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700769
770 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700771 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700772
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700773 /*
774 * 64-bit has a great big hole in the middle of the address
775 * space, which contains the Xen mappings. On 32-bit these
776 * will end up making a zero-sized hole and so is a no-op.
777 */
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700778 hole_low = pgd_index(USER_LIMIT);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700779 hole_high = pgd_index(PAGE_OFFSET);
780
781 pgdidx_limit = pgd_index(limit);
782#if PTRS_PER_PUD > 1
783 pudidx_limit = pud_index(limit);
784#else
785 pudidx_limit = 0;
786#endif
787#if PTRS_PER_PMD > 1
788 pmdidx_limit = pmd_index(limit);
789#else
790 pmdidx_limit = 0;
791#endif
792
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700793 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700794 pud_t *pud;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700795
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700796 if (pgdidx >= hole_low && pgdidx < hole_high)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700797 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700798
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700799 if (!pgd_val(pgd[pgdidx]))
800 continue;
801
802 pud = pud_offset(&pgd[pgdidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700803
804 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700805 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700806
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700807 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700808 pmd_t *pmd;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700809
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700810 if (pgdidx == pgdidx_limit &&
811 pudidx > pudidx_limit)
812 goto out;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700813
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700814 if (pud_none(pud[pudidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700815 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700816
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700817 pmd = pmd_offset(&pud[pudidx], 0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700818
819 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700820 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700821
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700822 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
823 struct page *pte;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700824
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700825 if (pgdidx == pgdidx_limit &&
826 pudidx == pudidx_limit &&
827 pmdidx > pmdidx_limit)
828 goto out;
829
830 if (pmd_none(pmd[pmdidx]))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700831 continue;
832
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700833 pte = pmd_page(pmd[pmdidx]);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700834 flush |= (*func)(mm, pte, PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700835 }
836 }
837 }
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700838
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700839out:
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700840 /* Do the top level last, so that the callbacks can use it as
841 a cue to do final things like tlb flushes. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700842 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700843
844 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700845}
846
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000847static int xen_pgd_walk(struct mm_struct *mm,
848 int (*func)(struct mm_struct *mm, struct page *,
849 enum pt_level),
850 unsigned long limit)
851{
852 return __xen_pgd_walk(mm, mm->pgd, func, limit);
853}
854
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700855/* If we're using split pte locks, then take the page's lock and
856 return a pointer to it. Otherwise return NULL. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700857static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700858{
859 spinlock_t *ptl = NULL;
860
Jeremy Fitzhardingef7d0b922008-09-09 15:43:22 -0700861#if USE_SPLIT_PTLOCKS
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700862 ptl = __pte_lockptr(page);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700863 spin_lock_nest_lock(ptl, &mm->page_table_lock);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700864#endif
865
866 return ptl;
867}
868
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700869static void xen_pte_unlock(void *v)
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700870{
871 spinlock_t *ptl = v;
872 spin_unlock(ptl);
873}
874
875static void xen_do_pin(unsigned level, unsigned long pfn)
876{
877 struct mmuext_op *op;
878 struct multicall_space mcs;
879
880 mcs = __xen_mc_entry(sizeof(*op));
881 op = mcs.args;
882 op->cmd = level;
883 op->arg1.mfn = pfn_to_mfn(pfn);
884 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
885}
886
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700887static int xen_pin_page(struct mm_struct *mm, struct page *page,
888 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700889{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700890 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700891 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700892
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700893 if (pgfl)
894 flush = 0; /* already pinned */
895 else if (PageHighMem(page))
896 /* kmaps need flushing if we found an unpinned
897 highpage */
898 flush = 1;
899 else {
900 void *pt = lowmem_page_address(page);
901 unsigned long pfn = page_to_pfn(page);
902 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700903 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700904
905 flush = 0;
906
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700907 /*
908 * We need to hold the pagetable lock between the time
909 * we make the pagetable RO and when we actually pin
910 * it. If we don't, then other users may come in and
911 * attempt to update the pagetable by writing it,
912 * which will fail because the memory is RO but not
913 * pinned, so Xen won't do the trap'n'emulate.
914 *
915 * If we're using split pte locks, we can't hold the
916 * entire pagetable's worth of locks during the
917 * traverse, because we may wrap the preempt count (8
918 * bits). The solution is to mark RO and pin each PTE
919 * page while holding the lock. This means the number
920 * of locks we end up holding is never more than a
921 * batch size (~32 entries, at present).
922 *
923 * If we're not using split pte locks, we needn't pin
924 * the PTE pages independently, because we're
925 * protected by the overall pagetable lock.
926 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700927 ptl = NULL;
928 if (level == PT_PTE)
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700929 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700930
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700931 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
932 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700933 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
934
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -0700935 if (ptl) {
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700936 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
937
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700938 /* Queue a deferred unlock for when this batch
939 is completed. */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -0700940 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700941 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700942 }
943
944 return flush;
945}
946
947/* This is called just after a mm has been created, but it has not
948 been used yet. We need to make sure that its pagetable is all
949 read-only, and can be pinned. */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700950static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700951{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700952 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700953
Ian Campbell86bbc2c2008-11-21 10:21:33 +0000954 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100955 /* re-enable interrupts for flushing */
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700956 xen_mc_issue(0);
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100957
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700958 kmap_flush_unused();
Jeremy Fitzhardinged05fdf32008-10-28 19:23:06 +1100959
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700960 xen_mc_batch();
961 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700962
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700963#ifdef CONFIG_X86_64
964 {
965 pgd_t *user_pgd = xen_get_user_pgd(pgd);
966
967 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
968
969 if (user_pgd) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700970 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
Tejf63c2f22008-12-16 11:56:06 -0800971 xen_do_pin(MMUEXT_PIN_L4_TABLE,
972 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700973 }
974 }
975#else /* CONFIG_X86_32 */
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700976#ifdef CONFIG_X86_PAE
977 /* Need to make sure unshared kernel PMD is pinnable */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -0800978 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700979 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -0700980#endif
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100981 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -0700982#endif /* CONFIG_X86_64 */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700983 xen_mc_issue(0);
984}
985
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700986static void xen_pgd_pin(struct mm_struct *mm)
987{
988 __xen_pgd_pin(mm, mm->pgd);
989}
990
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100991/*
992 * On save, we need to pin all pagetables to make sure they get their
993 * mfns turned into pfns. Search the list for any unpinned pgds and pin
994 * them (unpinned pgds are not currently in use, probably because the
995 * process is under construction or destruction).
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -0700996 *
997 * Expected to be called in stop_machine() ("equivalent to taking
998 * every spinlock in the system"), so the locking doesn't really
999 * matter all that much.
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001000 */
1001void xen_mm_pin_all(void)
1002{
1003 unsigned long flags;
1004 struct page *page;
1005
1006 spin_lock_irqsave(&pgd_lock, flags);
1007
1008 list_for_each_entry(page, &pgd_list, lru) {
1009 if (!PagePinned(page)) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001010 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001011 SetPageSavePinned(page);
1012 }
1013 }
1014
1015 spin_unlock_irqrestore(&pgd_lock, flags);
1016}
1017
Eduardo Habkostc1f2f092008-07-08 15:06:24 -07001018/*
1019 * The init_mm pagetable is really pinned as soon as its created, but
1020 * that's before we have page structures to store the bits. So do all
1021 * the book-keeping now.
1022 */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001023static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
1024 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001025{
1026 SetPagePinned(page);
1027 return 0;
1028}
1029
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001030static void __init xen_mark_init_mm_pinned(void)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001031{
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001032 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001033}
1034
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001035static int xen_unpin_page(struct mm_struct *mm, struct page *page,
1036 enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001037{
Christoph Lameterd60cd462008-04-28 02:12:51 -07001038 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001039
1040 if (pgfl && !PageHighMem(page)) {
1041 void *pt = lowmem_page_address(page);
1042 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001043 spinlock_t *ptl = NULL;
1044 struct multicall_space mcs;
1045
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001046 /*
1047 * Do the converse to pin_page. If we're using split
1048 * pte locks, we must be holding the lock for while
1049 * the pte page is unpinned but still RO to prevent
1050 * concurrent updates from seeing it in this
1051 * partially-pinned state.
1052 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001053 if (level == PT_PTE) {
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001054 ptl = xen_pte_lock(page, mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001055
Jeremy Fitzhardinge11ad93e2008-08-19 13:32:51 -07001056 if (ptl)
1057 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001058 }
1059
1060 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001061
1062 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1063 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001064 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1065
1066 if (ptl) {
1067 /* unlock when batch completed */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001068 xen_mc_callback(xen_pte_unlock, ptl);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001069 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001070 }
1071
1072 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001073}
1074
1075/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001076static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001077{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001078 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001079
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001080 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001081
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001082#ifdef CONFIG_X86_64
1083 {
1084 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1085
1086 if (user_pgd) {
Tejf63c2f22008-12-16 11:56:06 -08001087 xen_do_pin(MMUEXT_UNPIN_TABLE,
1088 PFN_DOWN(__pa(user_pgd)));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001089 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001090 }
1091 }
1092#endif
1093
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001094#ifdef CONFIG_X86_PAE
1095 /* Need to make sure unshared kernel PMD is unpinned */
Jeremy Fitzhardinge47cb2ed2008-11-06 13:48:24 -08001096 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001097 PT_PMD);
Jeremy Fitzhardinge5deb30d2008-07-08 15:07:06 -07001098#endif
Jeremy Fitzhardinged6182fb2008-07-08 15:07:13 -07001099
Ian Campbell86bbc2c2008-11-21 10:21:33 +00001100 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001101
1102 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001103}
1104
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001105static void xen_pgd_unpin(struct mm_struct *mm)
1106{
1107 __xen_pgd_unpin(mm, mm->pgd);
1108}
1109
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001110/*
1111 * On resume, undo any pinning done at save, so that the rest of the
1112 * kernel doesn't see any unexpected pinned pagetables.
1113 */
1114void xen_mm_unpin_all(void)
1115{
1116 unsigned long flags;
1117 struct page *page;
1118
1119 spin_lock_irqsave(&pgd_lock, flags);
1120
1121 list_for_each_entry(page, &pgd_list, lru) {
1122 if (PageSavePinned(page)) {
1123 BUG_ON(!PagePinned(page));
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001124 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001125 ClearPageSavePinned(page);
1126 }
1127 }
1128
1129 spin_unlock_irqrestore(&pgd_lock, flags);
1130}
1131
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001132void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1133{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001134 spin_lock(&next->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001135 xen_pgd_pin(next);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001136 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001137}
1138
1139void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1140{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001141 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001142 xen_pgd_pin(mm);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -07001143 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001144}
1145
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001146
1147#ifdef CONFIG_SMP
1148/* Another cpu may still have their %cr3 pointing at the pagetable, so
1149 we need to repoint it somewhere else before we can unpin it. */
1150static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001151{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001152 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001153 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001154
Brian Gerst9eb912d2009-01-19 00:38:57 +09001155 active_mm = percpu_read(cpu_tlbstate.active_mm);
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -07001156
1157 if (active_mm == mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001158 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001159
1160 /* If this cpu still has a stale cr3 reference, then make sure
1161 it has been flushed. */
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -08001162 if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001163 load_cr3(swapper_pg_dir);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001164}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001165
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001166static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001167{
Mike Travise4d98202008-12-16 17:34:05 -08001168 cpumask_var_t mask;
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001169 unsigned cpu;
1170
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001171 if (current->active_mm == mm) {
1172 if (current->mm == mm)
1173 load_cr3(swapper_pg_dir);
1174 else
1175 leave_mm(smp_processor_id());
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001176 }
1177
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001178 /* Get the "official" set of cpus referring to our pagetable. */
Mike Travise4d98202008-12-16 17:34:05 -08001179 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1180 for_each_online_cpu(cpu) {
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001181 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
Mike Travise4d98202008-12-16 17:34:05 -08001182 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1183 continue;
1184 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1185 }
1186 return;
1187 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -06001188 cpumask_copy(mask, mm_cpumask(mm));
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001189
1190 /* It's possible that a vcpu may have a stale reference to our
1191 cr3, because its in lazy mode, and it hasn't yet flushed
1192 its set of pending hypercalls yet. In this case, we can
1193 look at its actual current cr3 value, and force it to flush
1194 if needed. */
1195 for_each_online_cpu(cpu) {
1196 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
Mike Travise4d98202008-12-16 17:34:05 -08001197 cpumask_set_cpu(cpu, mask);
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -07001198 }
1199
Mike Travise4d98202008-12-16 17:34:05 -08001200 if (!cpumask_empty(mask))
1201 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1202 free_cpumask_var(mask);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001203}
1204#else
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001205static void xen_drop_mm_ref(struct mm_struct *mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001206{
1207 if (current->active_mm == mm)
1208 load_cr3(swapper_pg_dir);
1209}
1210#endif
1211
1212/*
1213 * While a process runs, Xen pins its pagetables, which means that the
1214 * hypervisor forces it to be read-only, and it controls all updates
1215 * to it. This means that all pagetable updates have to go via the
1216 * hypervisor, which is moderately expensive.
1217 *
1218 * Since we're pulling the pagetable down, we switch to use init_mm,
1219 * unpin old process pagetable and mark it all read-write, which
1220 * allows further operations on it to be simple memory accesses.
1221 *
1222 * The only subtle point is that another CPU may be still using the
1223 * pagetable because of lazy tlb flushing. This means we need need to
1224 * switch all CPUs off this pagetable before we can unpin it.
1225 */
1226void xen_exit_mmap(struct mm_struct *mm)
1227{
1228 get_cpu(); /* make sure we don't move around */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001229 xen_drop_mm_ref(mm);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001230 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001231
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001232 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -07001233
1234 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardinge7708ad62008-08-19 13:34:22 -07001235 if (xen_page_pinned(mm->pgd))
Jeremy Fitzhardingeeefb47f2008-10-08 13:01:39 -07001236 xen_pgd_unpin(mm);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -07001237
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -07001238 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001239}
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07001240
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001241static __init void xen_pagetable_setup_start(pgd_t *base)
1242{
1243}
1244
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001245static void xen_post_allocator_init(void);
1246
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001247static __init void xen_pagetable_setup_done(pgd_t *base)
1248{
1249 xen_setup_shared_info();
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001250 xen_post_allocator_init();
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001251}
1252
1253static void xen_write_cr2(unsigned long cr2)
1254{
1255 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1256}
1257
1258static unsigned long xen_read_cr2(void)
1259{
1260 return percpu_read(xen_vcpu)->arch.cr2;
1261}
1262
1263unsigned long xen_read_cr2_direct(void)
1264{
1265 return percpu_read(xen_vcpu_info.arch.cr2);
1266}
1267
1268static void xen_flush_tlb(void)
1269{
1270 struct mmuext_op *op;
1271 struct multicall_space mcs;
1272
1273 preempt_disable();
1274
1275 mcs = xen_mc_entry(sizeof(*op));
1276
1277 op = mcs.args;
1278 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1279 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1280
1281 xen_mc_issue(PARAVIRT_LAZY_MMU);
1282
1283 preempt_enable();
1284}
1285
1286static void xen_flush_tlb_single(unsigned long addr)
1287{
1288 struct mmuext_op *op;
1289 struct multicall_space mcs;
1290
1291 preempt_disable();
1292
1293 mcs = xen_mc_entry(sizeof(*op));
1294 op = mcs.args;
1295 op->cmd = MMUEXT_INVLPG_LOCAL;
1296 op->arg1.linear_addr = addr & PAGE_MASK;
1297 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1298
1299 xen_mc_issue(PARAVIRT_LAZY_MMU);
1300
1301 preempt_enable();
1302}
1303
1304static void xen_flush_tlb_others(const struct cpumask *cpus,
1305 struct mm_struct *mm, unsigned long va)
1306{
1307 struct {
1308 struct mmuext_op op;
1309 DECLARE_BITMAP(mask, NR_CPUS);
1310 } *args;
1311 struct multicall_space mcs;
1312
Jeremy Fitzhardingee3f8a742009-03-04 17:36:57 -08001313 if (cpumask_empty(cpus))
1314 return; /* nothing to do */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001315
1316 mcs = xen_mc_entry(sizeof(*args));
1317 args = mcs.args;
1318 args->op.arg2.vcpumask = to_cpumask(args->mask);
1319
1320 /* Remove us, and any offline CPUS. */
1321 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1322 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001323
1324 if (va == TLB_FLUSH_ALL) {
1325 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1326 } else {
1327 args->op.cmd = MMUEXT_INVLPG_MULTI;
1328 args->op.arg1.linear_addr = va;
1329 }
1330
1331 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1332
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001333 xen_mc_issue(PARAVIRT_LAZY_MMU);
1334}
1335
1336static unsigned long xen_read_cr3(void)
1337{
1338 return percpu_read(xen_cr3);
1339}
1340
1341static void set_current_cr3(void *v)
1342{
1343 percpu_write(xen_current_cr3, (unsigned long)v);
1344}
1345
1346static void __xen_write_cr3(bool kernel, unsigned long cr3)
1347{
1348 struct mmuext_op *op;
1349 struct multicall_space mcs;
1350 unsigned long mfn;
1351
1352 if (cr3)
1353 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1354 else
1355 mfn = 0;
1356
1357 WARN_ON(mfn == 0 && kernel);
1358
1359 mcs = __xen_mc_entry(sizeof(*op));
1360
1361 op = mcs.args;
1362 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1363 op->arg1.mfn = mfn;
1364
1365 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1366
1367 if (kernel) {
1368 percpu_write(xen_cr3, cr3);
1369
1370 /* Update xen_current_cr3 once the batch has actually
1371 been submitted. */
1372 xen_mc_callback(set_current_cr3, (void *)cr3);
1373 }
1374}
1375
1376static void xen_write_cr3(unsigned long cr3)
1377{
1378 BUG_ON(preemptible());
1379
1380 xen_mc_batch(); /* disables interrupts */
1381
1382 /* Update while interrupts are disabled, so its atomic with
1383 respect to ipis */
1384 percpu_write(xen_cr3, cr3);
1385
1386 __xen_write_cr3(true, cr3);
1387
1388#ifdef CONFIG_X86_64
1389 {
1390 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1391 if (user_pgd)
1392 __xen_write_cr3(false, __pa(user_pgd));
1393 else
1394 __xen_write_cr3(false, 0);
1395 }
1396#endif
1397
1398 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1399}
1400
1401static int xen_pgd_alloc(struct mm_struct *mm)
1402{
1403 pgd_t *pgd = mm->pgd;
1404 int ret = 0;
1405
1406 BUG_ON(PagePinned(virt_to_page(pgd)));
1407
1408#ifdef CONFIG_X86_64
1409 {
1410 struct page *page = virt_to_page(pgd);
1411 pgd_t *user_pgd;
1412
1413 BUG_ON(page->private != 0);
1414
1415 ret = -ENOMEM;
1416
1417 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1418 page->private = (unsigned long)user_pgd;
1419
1420 if (user_pgd != NULL) {
1421 user_pgd[pgd_index(VSYSCALL_START)] =
1422 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1423 ret = 0;
1424 }
1425
1426 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1427 }
1428#endif
1429
1430 return ret;
1431}
1432
1433static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1434{
1435#ifdef CONFIG_X86_64
1436 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1437
1438 if (user_pgd)
1439 free_page((unsigned long)user_pgd);
1440#endif
1441}
1442
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001443static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1444{
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001445 unsigned long pfn = pte_pfn(pte);
1446
1447#ifdef CONFIG_X86_32
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001448 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1449 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1450 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1451 pte_val_ma(pte));
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -07001452#endif
1453
1454 /*
1455 * If the new pfn is within the range of the newly allocated
1456 * kernel pagetable, and it isn't being mapped into an
1457 * early_ioremap fixmap slot, make sure it is RO.
1458 */
1459 if (!is_early_ioremap_ptep(ptep) &&
1460 pfn >= e820_table_start && pfn < e820_table_end)
1461 pte = pte_wrprotect(pte);
Jeremy Fitzhardinge1f4f9312009-02-02 13:58:06 -08001462
1463 return pte;
1464}
1465
1466/* Init-time set_pte while constructing initial pagetables, which
1467 doesn't allow RO pagetable pages to be remapped RW */
1468static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1469{
1470 pte = mask_rw_pte(ptep, pte);
1471
1472 xen_set_pte(ptep, pte);
1473}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001474
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001475static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1476{
1477 struct mmuext_op op;
1478 op.cmd = cmd;
1479 op.arg1.mfn = pfn_to_mfn(pfn);
1480 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1481 BUG();
1482}
1483
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001484/* Early in boot, while setting up the initial pagetable, assume
1485 everything is pinned. */
1486static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1487{
1488#ifdef CONFIG_FLATMEM
1489 BUG_ON(mem_map); /* should only be used early */
1490#endif
1491 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001492 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1493}
1494
1495/* Used for pmd and pud */
1496static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1497{
1498#ifdef CONFIG_FLATMEM
1499 BUG_ON(mem_map); /* should only be used early */
1500#endif
1501 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001502}
1503
1504/* Early release_pte assumes that all pts are pinned, since there's
1505 only init_mm and anything attached to that is pinned. */
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001506static __init void xen_release_pte_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001507{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001508 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001509 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1510}
1511
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001512static __init void xen_release_pmd_init(unsigned long pfn)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001513{
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07001514 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001515}
1516
1517/* This needs to make sure the new pte page is pinned iff its being
1518 attached to a pinned pagetable. */
1519static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1520{
1521 struct page *page = pfn_to_page(pfn);
1522
1523 if (PagePinned(virt_to_page(mm->pgd))) {
1524 SetPagePinned(page);
1525
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001526 if (!PageHighMem(page)) {
1527 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1528 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1529 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1530 } else {
1531 /* make sure there are no stray mappings of
1532 this page */
1533 kmap_flush_unused();
1534 }
1535 }
1536}
1537
1538static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1539{
1540 xen_alloc_ptpage(mm, pfn, PT_PTE);
1541}
1542
1543static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1544{
1545 xen_alloc_ptpage(mm, pfn, PT_PMD);
1546}
1547
1548/* This should never happen until we're OK to use struct page */
1549static void xen_release_ptpage(unsigned long pfn, unsigned level)
1550{
1551 struct page *page = pfn_to_page(pfn);
1552
1553 if (PagePinned(page)) {
1554 if (!PageHighMem(page)) {
1555 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1556 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1557 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1558 }
1559 ClearPagePinned(page);
1560 }
1561}
1562
1563static void xen_release_pte(unsigned long pfn)
1564{
1565 xen_release_ptpage(pfn, PT_PTE);
1566}
1567
1568static void xen_release_pmd(unsigned long pfn)
1569{
1570 xen_release_ptpage(pfn, PT_PMD);
1571}
1572
1573#if PAGETABLE_LEVELS == 4
1574static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1575{
1576 xen_alloc_ptpage(mm, pfn, PT_PUD);
1577}
1578
1579static void xen_release_pud(unsigned long pfn)
1580{
1581 xen_release_ptpage(pfn, PT_PUD);
1582}
1583#endif
1584
1585void __init xen_reserve_top(void)
1586{
1587#ifdef CONFIG_X86_32
1588 unsigned long top = HYPERVISOR_VIRT_START;
1589 struct xen_platform_parameters pp;
1590
1591 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1592 top = pp.virt_start;
1593
1594 reserve_top_address(-top);
1595#endif /* CONFIG_X86_32 */
1596}
1597
1598/*
1599 * Like __va(), but returns address in the kernel mapping (which is
1600 * all we have until the physical memory mapping has been set up.
1601 */
1602static void *__ka(phys_addr_t paddr)
1603{
1604#ifdef CONFIG_X86_64
1605 return (void *)(paddr + __START_KERNEL_map);
1606#else
1607 return __va(paddr);
1608#endif
1609}
1610
1611/* Convert a machine address to physical address */
1612static unsigned long m2p(phys_addr_t maddr)
1613{
1614 phys_addr_t paddr;
1615
1616 maddr &= PTE_PFN_MASK;
1617 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1618
1619 return paddr;
1620}
1621
1622/* Convert a machine address to kernel virtual */
1623static void *m2v(phys_addr_t maddr)
1624{
1625 return __ka(m2p(maddr));
1626}
1627
Juan Quintela4ec53872010-09-02 15:45:43 +01001628/* Set the page permissions on an identity-mapped pages */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001629static void set_page_prot(void *addr, pgprot_t prot)
1630{
1631 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1632 pte_t pte = pfn_pte(pfn, prot);
1633
1634 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1635 BUG();
1636}
1637
1638static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1639{
1640 unsigned pmdidx, pteidx;
1641 unsigned ident_pte;
1642 unsigned long pfn;
1643
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001644 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1645 PAGE_SIZE);
1646
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001647 ident_pte = 0;
1648 pfn = 0;
1649 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1650 pte_t *pte_page;
1651
1652 /* Reuse or allocate a page of ptes */
1653 if (pmd_present(pmd[pmdidx]))
1654 pte_page = m2v(pmd[pmdidx].pmd);
1655 else {
1656 /* Check for free pte pages */
Jeremy Fitzhardinge764f01382010-08-26 16:23:51 -07001657 if (ident_pte == LEVEL1_IDENT_ENTRIES)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001658 break;
1659
1660 pte_page = &level1_ident_pgt[ident_pte];
1661 ident_pte += PTRS_PER_PTE;
1662
1663 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1664 }
1665
1666 /* Install mappings */
1667 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1668 pte_t pte;
1669
1670 if (pfn > max_pfn_mapped)
1671 max_pfn_mapped = pfn;
1672
1673 if (!pte_none(pte_page[pteidx]))
1674 continue;
1675
1676 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1677 pte_page[pteidx] = pte;
1678 }
1679 }
1680
1681 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1682 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1683
1684 set_page_prot(pmd, PAGE_KERNEL_RO);
1685}
1686
Ian Campbell7e775062010-09-30 12:37:26 +01001687void __init xen_setup_machphys_mapping(void)
1688{
1689 struct xen_machphys_mapping mapping;
1690 unsigned long machine_to_phys_nr_ents;
1691
1692 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1693 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1694 machine_to_phys_nr_ents = mapping.max_mfn + 1;
1695 } else {
1696 machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
1697 }
1698 machine_to_phys_order = fls(machine_to_phys_nr_ents - 1);
1699}
1700
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001701#ifdef CONFIG_X86_64
1702static void convert_pfn_mfn(void *v)
1703{
1704 pte_t *pte = v;
1705 int i;
1706
1707 /* All levels are converted the same way, so just treat them
1708 as ptes. */
1709 for (i = 0; i < PTRS_PER_PTE; i++)
1710 pte[i] = xen_make_pte(pte[i].pte);
1711}
1712
1713/*
1714 * Set up the inital kernel pagetable.
1715 *
1716 * We can construct this by grafting the Xen provided pagetable into
1717 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1718 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1719 * means that only the kernel has a physical mapping to start with -
1720 * but that's enough to get __va working. We need to fill in the rest
1721 * of the physical mapping once some sort of allocator has been set
1722 * up.
1723 */
1724__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1725 unsigned long max_pfn)
1726{
1727 pud_t *l3;
1728 pmd_t *l2;
1729
1730 /* Zap identity mapping */
1731 init_level4_pgt[0] = __pgd(0);
1732
1733 /* Pre-constructed entries are in pfn, so convert to mfn */
1734 convert_pfn_mfn(init_level4_pgt);
1735 convert_pfn_mfn(level3_ident_pgt);
1736 convert_pfn_mfn(level3_kernel_pgt);
1737
1738 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1739 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1740
1741 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1742 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1743
1744 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1745 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1746 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1747
1748 /* Set up identity map */
1749 xen_map_identity_early(level2_ident_pgt, max_pfn);
1750
1751 /* Make pagetable pieces RO */
1752 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1753 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1754 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1755 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1756 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1757 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1758
1759 /* Pin down new L4 */
1760 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1761 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1762
1763 /* Unpin Xen-provided one */
1764 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1765
1766 /* Switch over */
1767 pgd = init_level4_pgt;
1768
1769 /*
1770 * At this stage there can be no user pgd, and no page
1771 * structure to attach it to, so make sure we just set kernel
1772 * pgd.
1773 */
1774 xen_mc_batch();
1775 __xen_write_cr3(true, __pa(pgd));
1776 xen_mc_issue(PARAVIRT_LAZY_CPU);
1777
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07001778 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001779 __pa(xen_start_info->pt_base +
1780 xen_start_info->nr_pt_frames * PAGE_SIZE),
1781 "XEN PAGETABLES");
1782
1783 return pgd;
1784}
1785#else /* !CONFIG_X86_64 */
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001786static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1787static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1788
1789static __init void xen_write_cr3_init(unsigned long cr3)
1790{
1791 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1792
1793 BUG_ON(read_cr3() != __pa(initial_page_table));
1794 BUG_ON(cr3 != __pa(swapper_pg_dir));
1795
1796 /*
1797 * We are switching to swapper_pg_dir for the first time (from
1798 * initial_page_table) and therefore need to mark that page
1799 * read-only and then pin it.
1800 *
1801 * Xen disallows sharing of kernel PMDs for PAE
1802 * guests. Therefore we must copy the kernel PMD from
1803 * initial_page_table into a new kernel PMD to be used in
1804 * swapper_pg_dir.
1805 */
1806 swapper_kernel_pmd =
1807 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1808 memcpy(swapper_kernel_pmd, initial_kernel_pmd,
1809 sizeof(pmd_t) * PTRS_PER_PMD);
1810 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1811 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1812 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1813
1814 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1815 xen_write_cr3(cr3);
1816 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1817
1818 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1819 PFN_DOWN(__pa(initial_page_table)));
1820 set_page_prot(initial_page_table, PAGE_KERNEL);
1821 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1822
1823 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1824}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001825
1826__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1827 unsigned long max_pfn)
1828{
1829 pmd_t *kernel_pmd;
1830
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001831 initial_kernel_pmd =
1832 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
Jeremy Fitzhardingef0991802010-08-26 16:16:28 -07001833
Jeremy Fitzhardinge93dbda72009-02-26 17:35:44 -08001834 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1835 xen_start_info->nr_pt_frames * PAGE_SIZE +
1836 512*1024);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001837
1838 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001839 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001840
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001841 xen_map_identity_early(initial_kernel_pmd, max_pfn);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001842
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001843 memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1844 initial_page_table[KERNEL_PGD_BOUNDARY] =
1845 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001846
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001847 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1848 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001849 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1850
1851 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1852
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001853 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1854 PFN_DOWN(__pa(initial_page_table)));
1855 xen_write_cr3(__pa(initial_page_table));
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001856
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07001857 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
Jeremy Fitzhardinge33df4db2009-05-07 11:56:44 -07001858 __pa(xen_start_info->pt_base +
1859 xen_start_info->nr_pt_frames * PAGE_SIZE),
1860 "XEN PAGETABLES");
1861
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001862 return initial_page_table;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001863}
1864#endif /* CONFIG_X86_64 */
1865
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001866static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1867
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -07001868static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001869{
1870 pte_t pte;
1871
1872 phys >>= PAGE_SHIFT;
1873
1874 switch (idx) {
1875 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1876#ifdef CONFIG_X86_F00F_BUG
1877 case FIX_F00F_IDT:
1878#endif
1879#ifdef CONFIG_X86_32
1880 case FIX_WP_TEST:
1881 case FIX_VDSO:
1882# ifdef CONFIG_HIGHMEM
1883 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1884# endif
1885#else
1886 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1887#endif
Jeremy Fitzhardinge3ecb1b72009-03-07 23:48:41 -08001888 case FIX_TEXT_POKE0:
1889 case FIX_TEXT_POKE1:
1890 /* All local page mappings */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001891 pte = pfn_pte(phys, prot);
1892 break;
1893
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01001894#ifdef CONFIG_X86_LOCAL_APIC
1895 case FIX_APIC_BASE: /* maps dummy local APIC */
1896 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1897 break;
1898#endif
1899
1900#ifdef CONFIG_X86_IO_APIC
1901 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
1902 /*
1903 * We just don't map the IO APIC - all access is via
1904 * hypercalls. Keep the address in the pte for reference.
1905 */
1906 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1907 break;
1908#endif
1909
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001910 case FIX_PARAVIRT_BOOTMAP:
1911 /* This is an MFN, but it isn't an IO mapping from the
1912 IO domain */
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001913 pte = mfn_pte(phys, prot);
1914 break;
Jeremy Fitzhardingec0011db2010-02-04 14:46:34 -08001915
1916 default:
1917 /* By default, set_fixmap is used for hardware mappings */
1918 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
1919 break;
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001920 }
1921
1922 __native_set_fixmap(idx, pte);
1923
1924#ifdef CONFIG_X86_64
1925 /* Replicate changes to map the vsyscall page into the user
1926 pagetable vsyscall mapping. */
1927 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1928 unsigned long vaddr = __fix_to_virt(idx);
1929 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1930 }
1931#endif
1932}
1933
Juan Quintela4ec53872010-09-02 15:45:43 +01001934__init void xen_ident_map_ISA(void)
1935{
1936 unsigned long pa;
1937
1938 /*
1939 * If we're dom0, then linear map the ISA machine addresses into
1940 * the kernel's address space.
1941 */
1942 if (!xen_initial_domain())
1943 return;
1944
1945 xen_raw_printk("Xen: setup ISA identity maps\n");
1946
1947 for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
1948 pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
1949
1950 if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
1951 BUG();
1952 }
1953
1954 xen_flush_tlb();
1955}
1956
Thomas Gleixnerf1d70622009-08-20 13:13:52 +02001957static __init void xen_post_allocator_init(void)
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001958{
1959 pv_mmu_ops.set_pte = xen_set_pte;
1960 pv_mmu_ops.set_pmd = xen_set_pmd;
1961 pv_mmu_ops.set_pud = xen_set_pud;
1962#if PAGETABLE_LEVELS == 4
1963 pv_mmu_ops.set_pgd = xen_set_pgd;
1964#endif
1965
1966 /* This will work as long as patching hasn't happened yet
1967 (which it hasn't) */
1968 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1969 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1970 pv_mmu_ops.release_pte = xen_release_pte;
1971 pv_mmu_ops.release_pmd = xen_release_pmd;
1972#if PAGETABLE_LEVELS == 4
1973 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1974 pv_mmu_ops.release_pud = xen_release_pud;
1975#endif
1976
1977#ifdef CONFIG_X86_64
1978 SetPagePinned(virt_to_page(level3_user_vsyscall));
1979#endif
1980 xen_mark_init_mm_pinned();
1981}
1982
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001983static void xen_leave_lazy_mmu(void)
1984{
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08001985 preempt_disable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001986 xen_mc_flush();
1987 paravirt_leave_lazy_mmu();
Jeremy Fitzhardinge5caecb92009-02-20 23:01:26 -08001988 preempt_enable();
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08001989}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001990
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02001991static const struct pv_mmu_ops xen_mmu_ops __initdata = {
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001992 .read_cr2 = xen_read_cr2,
1993 .write_cr2 = xen_write_cr2,
1994
1995 .read_cr3 = xen_read_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00001996#ifdef CONFIG_X86_32
1997 .write_cr3 = xen_write_cr3_init,
1998#else
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08001999 .write_cr3 = xen_write_cr3,
Ian Campbell5b5c1af2010-11-24 12:09:41 +00002000#endif
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002001
2002 .flush_tlb_user = xen_flush_tlb,
2003 .flush_tlb_kernel = xen_flush_tlb,
2004 .flush_tlb_single = xen_flush_tlb_single,
2005 .flush_tlb_others = xen_flush_tlb_others,
2006
2007 .pte_update = paravirt_nop,
2008 .pte_update_defer = paravirt_nop,
2009
2010 .pgd_alloc = xen_pgd_alloc,
2011 .pgd_free = xen_pgd_free,
2012
2013 .alloc_pte = xen_alloc_pte_init,
2014 .release_pte = xen_release_pte_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002015 .alloc_pmd = xen_alloc_pmd_init,
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002016 .release_pmd = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002017
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002018 .set_pte = xen_set_pte_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002019 .set_pte_at = xen_set_pte_at,
2020 .set_pmd = xen_set_pmd_hyper,
2021
2022 .ptep_modify_prot_start = __ptep_modify_prot_start,
2023 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2024
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002025 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2026 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002027
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002028 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2029 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002030
2031#ifdef CONFIG_X86_PAE
2032 .set_pte_atomic = xen_set_pte_atomic,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002033 .pte_clear = xen_pte_clear,
2034 .pmd_clear = xen_pmd_clear,
2035#endif /* CONFIG_X86_PAE */
2036 .set_pud = xen_set_pud_hyper,
2037
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002038 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2039 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002040
2041#if PAGETABLE_LEVELS == 4
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -08002042 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2043 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002044 .set_pgd = xen_set_pgd_hyper,
2045
Jeremy Fitzhardingeb96229b2009-03-17 13:30:55 -07002046 .alloc_pud = xen_alloc_pmd_init,
2047 .release_pud = xen_release_pmd_init,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002048#endif /* PAGETABLE_LEVELS == 4 */
2049
2050 .activate_mm = xen_activate_mm,
2051 .dup_mmap = xen_dup_mmap,
2052 .exit_mmap = xen_exit_mmap,
2053
2054 .lazy_mode = {
2055 .enter = paravirt_enter_lazy_mmu,
Jeremy Fitzhardingeb407fc52009-02-17 23:46:21 -08002056 .leave = xen_leave_lazy_mmu,
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002057 },
2058
2059 .set_fixmap = xen_set_fixmap,
2060};
2061
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002062void __init xen_init_mmu_ops(void)
2063{
2064 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2065 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2066 pv_mmu_ops = xen_mmu_ops;
Jeremy Fitzhardinged2cb2142010-03-26 15:37:50 -07002067
Jeremy Fitzhardinge98511f32010-09-03 14:55:16 +01002068 memset(dummy_mapping, 0xff, PAGE_SIZE);
Thomas Gleixner030cb6c2009-08-20 14:30:02 +02002069}
Jeremy Fitzhardinge319f3ba2009-01-28 14:35:01 -08002070
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002071/* Protected by xen_reservation_lock. */
2072#define MAX_CONTIG_ORDER 9 /* 2MB */
2073static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2074
2075#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2076static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2077 unsigned long *in_frames,
2078 unsigned long *out_frames)
2079{
2080 int i;
2081 struct multicall_space mcs;
2082
2083 xen_mc_batch();
2084 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2085 mcs = __xen_mc_entry(0);
2086
2087 if (in_frames)
2088 in_frames[i] = virt_to_mfn(vaddr);
2089
2090 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -05002091 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002092
2093 if (out_frames)
2094 out_frames[i] = virt_to_pfn(vaddr);
2095 }
2096 xen_mc_issue(0);
2097}
2098
2099/*
2100 * Update the pfn-to-mfn mappings for a virtual address range, either to
2101 * point to an array of mfns, or contiguously from a single starting
2102 * mfn.
2103 */
2104static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2105 unsigned long *mfns,
2106 unsigned long first_mfn)
2107{
2108 unsigned i, limit;
2109 unsigned long mfn;
2110
2111 xen_mc_batch();
2112
2113 limit = 1u << order;
2114 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2115 struct multicall_space mcs;
2116 unsigned flags;
2117
2118 mcs = __xen_mc_entry(0);
2119 if (mfns)
2120 mfn = mfns[i];
2121 else
2122 mfn = first_mfn + i;
2123
2124 if (i < (limit - 1))
2125 flags = 0;
2126 else {
2127 if (order == 0)
2128 flags = UVMF_INVLPG | UVMF_ALL;
2129 else
2130 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2131 }
2132
2133 MULTI_update_va_mapping(mcs.mc, vaddr,
2134 mfn_pte(mfn, PAGE_KERNEL), flags);
2135
2136 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2137 }
2138
2139 xen_mc_issue(0);
2140}
2141
2142/*
2143 * Perform the hypercall to exchange a region of our pfns to point to
2144 * memory with the required contiguous alignment. Takes the pfns as
2145 * input, and populates mfns as output.
2146 *
2147 * Returns a success code indicating whether the hypervisor was able to
2148 * satisfy the request or not.
2149 */
2150static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2151 unsigned long *pfns_in,
2152 unsigned long extents_out,
2153 unsigned int order_out,
2154 unsigned long *mfns_out,
2155 unsigned int address_bits)
2156{
2157 long rc;
2158 int success;
2159
2160 struct xen_memory_exchange exchange = {
2161 .in = {
2162 .nr_extents = extents_in,
2163 .extent_order = order_in,
2164 .extent_start = pfns_in,
2165 .domid = DOMID_SELF
2166 },
2167 .out = {
2168 .nr_extents = extents_out,
2169 .extent_order = order_out,
2170 .extent_start = mfns_out,
2171 .address_bits = address_bits,
2172 .domid = DOMID_SELF
2173 }
2174 };
2175
2176 BUG_ON(extents_in << order_in != extents_out << order_out);
2177
2178 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2179 success = (exchange.nr_exchanged == extents_in);
2180
2181 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2182 BUG_ON(success && (rc != 0));
2183
2184 return success;
2185}
2186
2187int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2188 unsigned int address_bits)
2189{
2190 unsigned long *in_frames = discontig_frames, out_frame;
2191 unsigned long flags;
2192 int success;
2193
2194 /*
2195 * Currently an auto-translated guest will not perform I/O, nor will
2196 * it require PAE page directories below 4GB. Therefore any calls to
2197 * this function are redundant and can be ignored.
2198 */
2199
2200 if (xen_feature(XENFEAT_auto_translated_physmap))
2201 return 0;
2202
2203 if (unlikely(order > MAX_CONTIG_ORDER))
2204 return -ENOMEM;
2205
2206 memset((void *) vstart, 0, PAGE_SIZE << order);
2207
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002208 spin_lock_irqsave(&xen_reservation_lock, flags);
2209
2210 /* 1. Zap current PTEs, remembering MFNs. */
2211 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2212
2213 /* 2. Get a new contiguous memory extent. */
2214 out_frame = virt_to_pfn(vstart);
2215 success = xen_exchange_memory(1UL << order, 0, in_frames,
2216 1, order, &out_frame,
2217 address_bits);
2218
2219 /* 3. Map the new extent in place of old pages. */
2220 if (success)
2221 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2222 else
2223 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2224
2225 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2226
2227 return success ? 0 : -ENOMEM;
2228}
2229EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2230
2231void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2232{
2233 unsigned long *out_frames = discontig_frames, in_frame;
2234 unsigned long flags;
2235 int success;
2236
2237 if (xen_feature(XENFEAT_auto_translated_physmap))
2238 return;
2239
2240 if (unlikely(order > MAX_CONTIG_ORDER))
2241 return;
2242
2243 memset((void *) vstart, 0, PAGE_SIZE << order);
2244
Alex Nixon08bbc9d2009-02-09 12:05:46 -08002245 spin_lock_irqsave(&xen_reservation_lock, flags);
2246
2247 /* 1. Find start MFN of contiguous extent. */
2248 in_frame = virt_to_mfn(vstart);
2249
2250 /* 2. Zap current PTEs. */
2251 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2252
2253 /* 3. Do the exchange for non-contiguous MFNs. */
2254 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2255 0, out_frames, 0);
2256
2257 /* 4. Map new pages in place of old pages. */
2258 if (success)
2259 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2260 else
2261 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2262
2263 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2264}
2265EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2266
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002267#ifdef CONFIG_XEN_PVHVM
Stefano Stabellini59151002010-06-17 14:22:52 +01002268static void xen_hvm_exit_mmap(struct mm_struct *mm)
2269{
2270 struct xen_hvm_pagetable_dying a;
2271 int rc;
2272
2273 a.domid = DOMID_SELF;
2274 a.gpa = __pa(mm->pgd);
2275 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2276 WARN_ON_ONCE(rc < 0);
2277}
2278
2279static int is_pagetable_dying_supported(void)
2280{
2281 struct xen_hvm_pagetable_dying a;
2282 int rc = 0;
2283
2284 a.domid = DOMID_SELF;
2285 a.gpa = 0x00;
2286 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2287 if (rc < 0) {
2288 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2289 return 0;
2290 }
2291 return 1;
2292}
2293
2294void __init xen_hvm_init_mmu_ops(void)
2295{
2296 if (is_pagetable_dying_supported())
2297 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2298}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01002299#endif
Stefano Stabellini59151002010-06-17 14:22:52 +01002300
Ian Campbellde1ef202009-05-21 10:09:46 +01002301#define REMAP_BATCH_SIZE 16
2302
2303struct remap_data {
2304 unsigned long mfn;
2305 pgprot_t prot;
2306 struct mmu_update *mmu_update;
2307};
2308
2309static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2310 unsigned long addr, void *data)
2311{
2312 struct remap_data *rmd = data;
2313 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2314
2315 rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
2316 rmd->mmu_update->val = pte_val_ma(pte);
2317 rmd->mmu_update++;
2318
2319 return 0;
2320}
2321
2322int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2323 unsigned long addr,
2324 unsigned long mfn, int nr,
2325 pgprot_t prot, unsigned domid)
2326{
2327 struct remap_data rmd;
2328 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2329 int batch;
2330 unsigned long range;
2331 int err = 0;
2332
2333 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2334
Stefano Stabellinie060e7af2010-11-11 12:37:43 -08002335 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2336 (VM_PFNMAP | VM_RESERVED | VM_IO)));
Ian Campbellde1ef202009-05-21 10:09:46 +01002337
2338 rmd.mfn = mfn;
2339 rmd.prot = prot;
2340
2341 while (nr) {
2342 batch = min(REMAP_BATCH_SIZE, nr);
2343 range = (unsigned long)batch << PAGE_SHIFT;
2344
2345 rmd.mmu_update = mmu_update;
2346 err = apply_to_page_range(vma->vm_mm, addr, range,
2347 remap_area_mfn_pte_fn, &rmd);
2348 if (err)
2349 goto out;
2350
2351 err = -EFAULT;
2352 if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
2353 goto out;
2354
2355 nr -= batch;
2356 addr += range;
2357 }
2358
2359 err = 0;
2360out:
2361
2362 flush_tlb_all();
2363
2364 return err;
2365}
2366EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2367
Jeremy Fitzhardinge994025c2008-08-20 17:02:19 -07002368#ifdef CONFIG_XEN_DEBUG_FS
2369
2370static struct dentry *d_mmu_debug;
2371
2372static int __init xen_mmu_debugfs(void)
2373{
2374 struct dentry *d_xen = xen_init_debugfs();
2375
2376 if (d_xen == NULL)
2377 return -ENOMEM;
2378
2379 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
2380
2381 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
2382
2383 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
2384 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
2385 &mmu_stats.pgd_update_pinned);
2386 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
2387 &mmu_stats.pgd_update_pinned);
2388
2389 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
2390 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
2391 &mmu_stats.pud_update_pinned);
2392 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
2393 &mmu_stats.pud_update_pinned);
2394
2395 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
2396 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
2397 &mmu_stats.pmd_update_pinned);
2398 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
2399 &mmu_stats.pmd_update_pinned);
2400
2401 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
2402// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
2403// &mmu_stats.pte_update_pinned);
2404 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
2405 &mmu_stats.pte_update_pinned);
2406
2407 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
2408 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
2409 &mmu_stats.mmu_update_extended);
2410 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2411 mmu_stats.mmu_update_histo, 20);
2412
2413 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
2414 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
2415 &mmu_stats.set_pte_at_batched);
2416 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
2417 &mmu_stats.set_pte_at_current);
2418 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
2419 &mmu_stats.set_pte_at_kernel);
2420
2421 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2422 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2423 &mmu_stats.prot_commit_batched);
2424
2425 return 0;
2426}
2427fs_initcall(xen_mmu_debugfs);
2428
2429#endif /* CONFIG_XEN_DEBUG_FS */